@nalan90
2018-03-22T15:19:34.000000Z
字数 15611
阅读 759
自动化运维
etcd环境部署请参考:
etcd集群搭建
k8s架构
环境准备
软件及脚本
里面包启master、node节点所需运行的所有二进制包
目录及软件安装
## 批量创建工作目录
work:~ ys$ for item in 161 162 163 164 165;do ssh root@dev-$item 'mkdir -p /opt/kubernetes/{cfg,bin}';done
## 安装docker-ce (使用阿里docker-ce镜像)
# step 1: 安装必要的一些系统工具
yum install -y yum-utils device-mapper-persistent-data lvm2
# Step 2: 添加软件源信息
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
# Step 3: 更新并安装 Docker-CE
yum makecache fast
yum -y install docker-ce
work:~ ys$ for item in 161 162 163 164 165; do ssh root@dev-$item 'yum install -y yum-utils device-mapper-persistent-data lvm2 && yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo && yum makecache fast && yum install -y docker-ce && systemctl enable docker';done
## 安装flannel
work:~ ys$ for item in 161 162 163 164 165; do ssh root@dev-$item 'yum install -y flannel && systemctl enable flanneld';done
## 关闭swap分区
work:~ ys$ for item in 161 162 163 164 165; do ssh root@dev-$item 'swapoff -a';done
master节点部署
[root@dev-161 bin]# ll
total 418592
-rwxr-xr-x 1 root root 193631216 Mar 13 00:47 kube-apiserver
-rwxr-xr-x 1 root root 128511681 Mar 13 00:47 kube-controller-manager
-rwxr-xr-x 1 root root 52496673 Mar 13 00:48 kubectl
-rwxr-xr-x 1 root root 53989846 Mar 13 00:47 kube-scheduler
[root@dev-161 bin]# cp kube-* /opt/kubernetes/bin/
[root@dev-161 bin]# ll /opt/kubernetes/bin/
total 367324
-rwxr-xr-x 1 root root 193631216 Mar 22 11:00 kube-apiserver
-rwxr-xr-x 1 root root 128511681 Mar 22 11:00 kube-controller-manager
-rwxr-xr-x 1 root root 53989846 Mar 22 11:00 kube-scheduler
MASTER_ADDRESS=${1:-"172.16.1.161"}
ETCD_SERVERS=${2:-"http://172.16.1.161:2379,http://172.16.1.162:2379,http://172.16.1.163:2379"}
SERVICE_CLUSTER_IP_RANGE=${3:-"10.10.10.0/16"}
ADMISSION_CONTROL=${4:-""}
cat <<EOF >/opt/kubernetes/cfg/kube-apiserver
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=4"
KUBE_ETCD_SERVERS="--etcd-servers=${ETCD_SERVERS}"
KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"
KUBE_API_PORT="--insecure-port=8080"
NODE_PORT="--kubelet-port=10250"
KUBE_ADVERTISE_ADDR="--advertise-address=${MASTER_ADDRESS}"
KUBE_ALLOW_PRIV="--allow-privileged=false"
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}"
KUBE_ADMISSION_CONTROL="--admission-control=${ADMISSION_CONTROL}"
EOF
KUBE_APISERVER_OPTS=" \${KUBE_LOGTOSTDERR} \\
\${KUBE_LOG_LEVEL} \\
\${KUBE_ETCD_SERVERS} \\
\${KUBE_API_ADDRESS} \\
\${KUBE_API_PORT} \\
\${NODE_PORT} \\
\${KUBE_ADVERTISE_ADDR} \\
\${KUBE_ALLOW_PRIV} \\
\${KUBE_SERVICE_ADDRESSES} \\
\${KUBE_ADMISSION_CONTROL}"
cat <<EOF >/usr/lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-apiserver
ExecStart=/opt/kubernetes/bin/kube-apiserver ${KUBE_APISERVER_OPTS}
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable kube-apiserver
systemctl restart kube-apiserver
## 执行shell
[root@dev-161 shell]# sh apiserver.sh
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-apiserver.service to /usr/lib/systemd/system/kube-apiserver.service.
## 启动kube-apiserver
[root@dev-161 shell]# systemctl start kube-apiserver
## 查看状态
[root@dev-161 shell]# systemctl status kube-apiserver
● kube-apiserver.service - Kubernetes API Server
Loaded: loaded (/usr/lib/systemd/system/kube-apiserver.service; enabled; vendor preset: disabled)
Active: active (running) since Thu 2018-03-22 11:05:42 CST; 18s ago
Docs: https://github.com/kubernetes/kubernetes
Main PID: 6460 (kube-apiserver)
Memory: 181.8M
CGroup: /system.slice/kube-apiserver.service
└─6460 /opt/kubernetes/bin/kube-apiserver --logtostderr=true --v=4 --etcd-servers=http://172.16.1.161:2379,http://172.16.1.162:2379,http://172.16.1.163:2379 --insecure-bind-address=0.0.0.0 --insecure-port=8080 --kubelet-port=10250 --advertise-address=172.16.1.161 --allow-privileged=false --service-cluster-ip-range=10.10.10.0/16 --admission-control=
MASTER_ADDRESS=${1:-"172.16.1.161"}
cat <<EOF >/opt/kubernetes/cfg/kube-controller-manager
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=4"
KUBE_MASTER="--master=${MASTER_ADDRESS}:8080"
KUBE_LEADER_ELECT="--leader-elect"
EOF
KUBE_CONTROLLER_MANAGER_OPTS=" \${KUBE_LOGTOSTDERR} \\
\${KUBE_LOG_LEVEL} \\
\${KUBE_MASTER} \\
\${KUBE_LEADER_ELECT}"
cat <<EOF >/usr/lib/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-controller-manager
ExecStart=/opt/kubernetes/bin/kube-controller-manager ${KUBE_CONTROLLER_MANAGER_OPTS}
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable kube-controller-manager
systemctl restart kube-controller-manager
[root@dev-161 shell]# sh controller-manager.sh
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-controller-manager.service to /usr/lib/systemd/system/kube-controller-manager.service.
[root@dev-161 shell]# systemctl start kube-controller-manager
[root@dev-161 shell]# systemctl status kube-controller-manager
● kube-controller-manager.service - Kubernetes Controller Manager
Loaded: loaded (/usr/lib/systemd/system/kube-controller-manager.service; enabled; vendor preset: disabled)
Active: active (running) since Thu 2018-03-22 11:17:34 CST; 13s ago
Docs: https://github.com/kubernetes/kubernetes
Main PID: 6532 (kube-controller)
Memory: 15.2M
CGroup: /system.slice/kube-controller-manager.service
└─6532 /opt/kubernetes/bin/kube-controller-manager --logtostderr=true --v=4 --master=172.16.1.161:8080 --leader-elect
MASTER_ADDRESS=${1:-"172.16.1.161"}
cat <<EOF >/opt/kubernetes/cfg/kube-scheduler
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=4"
KUBE_MASTER="--master=${MASTER_ADDRESS}:8080"
KUBE_LEADER_ELECT="--leader-elect"
KUBE_SCHEDULER_ARGS=""
EOF
KUBE_SCHEDULER_OPTS=" \${KUBE_LOGTOSTDERR} \\
\${KUBE_LOG_LEVEL} \\
\${KUBE_MASTER} \\
\${KUBE_LEADER_ELECT} \\
\$KUBE_SCHEDULER_ARGS"
cat <<EOF >/usr/lib/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-scheduler
ExecStart=/opt/kubernetes/bin/kube-scheduler ${KUBE_SCHEDULER_OPTS}
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable kube-scheduler
systemctl restart kube-scheduler
[root@dev-161 shell]# sh scheduler.sh
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-scheduler.service to /usr/lib/systemd/system/kube-scheduler.service.
[root@dev-161 shell]# systemctl start kube-scheduler
[root@dev-161 shell]# systemctl status kube-scheduler
● kube-scheduler.service - Kubernetes Scheduler
Loaded: loaded (/usr/lib/systemd/system/kube-scheduler.service; enabled; vendor preset: disabled)
Active: active (running) since Thu 2018-03-22 11:22:28 CST; 11s ago
Docs: https://github.com/kubernetes/kubernetes
Main PID: 6601 (kube-scheduler)
Memory: 6.6M
CGroup: /system.slice/kube-scheduler.service
└─6601 /opt/kubernetes/bin/kube-scheduler --logtostderr=true --v=4 --master=172.16.1.161:8080 --leader-elect
node节点部署(以dev-162为例)
[root@dev-162 bin]# ll
total 181948
-rwxr-xr-x 1 root root 138166656 Mar 13 00:47 kubelet
-rwxr-xr-x 1 root root 48144192 Mar 13 00:47 kube-proxy
[root@dev-162 bin]# cp kubele kube-proxy /opt/kubernetes/bin/
[root@dev-162 bin]# ll /opt/kubernetes/bin/
total 181948
-rwxr-xr-x 1 root root 138166656 Mar 22 11:33 kubelet
-rwxr-xr-x 1 root root 48144192 Mar 22 11:33 kube-proxy
[root@dev-162 bin]# cd ../shell/
[root@dev-162 shell]# ll
total 12
-rwxr-xr-x 1 root root 2387 Mar 20 11:30 flannel.sh
-rwxr-xr-x 1 root root 3013 Mar 20 11:30 kubelet.sh
-rwxr-xr-x 1 root root 1719 Mar 20 11:30 proxy.sh
[root@dev-162 ~]#etcdctl mk /coreos.com/network/config "{\"Network\": \"10.10.0.0/16\", \"SubnetLen\": 24, \"Backend\": { \"Type\": \"vxlan\" } }"
{"Network": "10.10.0.0/16", "SubnetLen": 24, "Backend": { "Type": "vxlan" } }
[root@dev-161 shell]# etcdctl get /coreos.com/network/config
{"Network": "10.10.0.0/16", "SubnetLen": 24, "Backend": { "Type": "vxlan" } }
FLANNEL_ETCD_ENDPOINTS="http://172.16.1.161:2379,http://172.16.1.162:2379,http://172.16.1.163:2379"
FLANNEL_ETCD_PREFIX="/coreos.com/network"
[root@dev-162 ~]# systemctl start flanneld
[root@dev-162 ~]# systemctl status flanneld
● flanneld.service - Flanneld overlay address etcd agent
Loaded: loaded (/usr/lib/systemd/system/flanneld.service; enabled; vendor preset: disabled)
Active: active (running) since Thu 2018-03-22 13:14:19 CST; 2min 52s ago
Process: 6493 ExecStartPost=/usr/libexec/flannel/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/docker (code=exited, status=0/SUCCESS)
Main PID: 6485 (flanneld)
Memory: 4.9M
CGroup: /system.slice/flanneld.service
└─6485 /usr/bin/flanneld -etcd-endpoints=http://172.16.1.161:2379,http://172.16.1.162:2379,http://172.16.1.163:2379 -etcd-prefix=/coreos.com/network
[root@dev-162 flannel]# cat /run/flannel/docker
DOCKER_OPT_BIP="--bip=10.10.63.1/24"
DOCKER_OPT_IPMASQ="--ip-masq=true"
DOCKER_OPT_MTU="--mtu=1450"
DOCKER_NETWORK_OPTIONS=" --bip=10.10.63.1/24 --ip-masq=true --mtu=1450"
[root@dev-162 flannel]# cat /run/flannel/subnet.env
FLANNEL_NETWORK=10.10.0.0/16
FLANNEL_SUBNET=10.10.63.1/24
FLANNEL_MTU=1450
FLANNEL_IPMASQ=false
## 添加--log-level=error $DOCKER_NETWORK_OPTIONS
ExecStart=/usr/bin/dockerd --log-level=error $DOCKER_NETWORK_OPTIONS
[root@dev-162 flannel]# systemctl daemon-reload
[root@dev-162 flannel]# systemctl start docker
[root@dev-162 flannel]# systemctl status docker
● docker.service - Docker Application Container Engine
Loaded: loaded (/usr/lib/systemd/system/docker.service; disabled; vendor preset: disabled)
Drop-In: /usr/lib/systemd/system/docker.service.d
└─flannel.conf
Active: active (running) since Thu 2018-03-22 13:32:15 CST; 8s ago
Docs: https://docs.docker.com
Main PID: 9996 (dockerd)
Memory: 23.9M
CGroup: /system.slice/docker.service
├─ 9996 /usr/bin/dockerd --log-level=error --bip=10.10.63.1/24 --ip-masq=true --mtu=1450
└─10001 docker-containerd --config /var/run/docker/containerd/containerd.toml
MASTER_ADDRESS=${1:-"172.16.1.161"}
NODE_ADDRESS=${2:-"172.16.1.162"}
DNS_SERVER_IP=${3:-"10.10.10.2"}
DNS_DOMAIN=${4:-"cluster.local"}
KUBECONFIG_DIR=${KUBECONFIG_DIR:-/opt/kubernetes/cfg}
# Generate a kubeconfig file
cat <<EOF > "${KUBECONFIG_DIR}/kubelet.kubeconfig"
apiVersion: v1
kind: Config
clusters:
- cluster:
server: http://${MASTER_ADDRESS}:8080/
name: local
contexts:
- context:
cluster: local
name: local
current-context: local
EOF
cat <<EOF >/opt/kubernetes/cfg/kubelet
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=4"
NODE_ADDRESS="--address=${NODE_ADDRESS}"
NODE_PORT="--port=10250"
NODE_HOSTNAME="--hostname-override=${NODE_ADDRESS}"
KUBELET_KUBECONFIG="--kubeconfig=${KUBECONFIG_DIR}/kubelet.kubeconfig"
KUBE_ALLOW_PRIV="--allow-privileged=false"
KUBELET__DNS_IP="--cluster-dns=${DNS_SERVER_IP}"
KUBELET_DNS_DOMAIN="--cluster-domain=${DNS_DOMAIN}"
KUBELET_ARGS=""
EOF
KUBELET_OPTS=" \${KUBE_LOGTOSTDERR} \\
\${KUBE_LOG_LEVEL} \\
\${NODE_ADDRESS} \\
\${NODE_PORT} \\
\${NODE_HOSTNAME} \\
\${KUBELET_KUBECONFIG} \\
\${KUBE_ALLOW_PRIV} \\
\${KUBELET__DNS_IP} \\
\${KUBELET_DNS_DOMAIN} \\
\$KUBELET_ARGS"
cat <<EOF >/usr/lib/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet
After=docker.service
Requires=docker.service
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kubelet
ExecStart=/opt/kubernetes/bin/kubelet ${KUBELET_OPTS}
Restart=on-failure
KillMode=process
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable kubelet
systemctl restart kubelet
## File exists问题处理
[root@dev-163 shell]# sh kubelet.sh
Failed to execute operation: File exists
[root@dev-163 shell]# systemctl disable kubelet
Removed symlink /etc/systemd/system/multi-user.target.wants/kubelet.service.
[root@dev-162 shell]# sh kubelet.sh
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
[root@dev-162 shell]# systemctl start kubelet
[root@dev-162 shell]# systemctl status kubelet
● kubelet.service - Kubernetes Kubelet
Loaded: loaded (/usr/lib/systemd/system/kubelet.service; enabled; vendor preset: disabled)
Active: active (running) since Thu 2018-03-22 13:33:18 CST; 41s ago
Main PID: 10129 (kubelet)
Memory: 22.7M
CGroup: /system.slice/kubelet.service
└─10129 /opt/kubernetes/bin/kubelet --logtostderr=true --v=4 --address=172.16.1.162 --port=10250 --hostname-override=172.16.1.162 --kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig --allow-privileged=false --cluster-dns=10.10.10.2 --cluster-domain=cluster.local
MASTER_ADDRESS=${1:-"172.16.1.161"}
NODE_ADDRESS=${2:-"172.16.1.162"}
cat <<EOF >/opt/kubernetes/cfg/kube-proxy
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=4"
NODE_HOSTNAME="--hostname-override=${NODE_ADDRESS}"
KUBE_MASTER="--master=http://${MASTER_ADDRESS}:8080"
EOF
KUBE_PROXY_OPTS=" \${KUBE_LOGTOSTDERR} \\
\${KUBE_LOG_LEVEL} \\
\${NODE_HOSTNAME} \\
\${KUBE_MASTER}"
cat <<EOF >/usr/lib/systemd/system/kube-proxy.service
[Unit]
Description=Kubernetes Proxy
After=network.target
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-proxy
ExecStart=/opt/kubernetes/bin/kube-proxy ${KUBE_PROXY_OPTS}
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable kube-proxy
systemctl restart kube-proxy
[root@dev-162 shell]# sh proxy.sh
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-proxy.service to /usr/lib/systemd/system/kube-proxy.service.
[root@dev-162 shell]# systemctl start kube-proxy
[root@dev-162 shell]# systemctl status kube-proxy
● kube-proxy.service - Kubernetes Proxy
Loaded: loaded (/usr/lib/systemd/system/kube-proxy.service; enabled; vendor preset: disabled)
Active: active (running) since Thu 2018-03-22 13:36:01 CST; 15s ago
Main PID: 10266 (kube-proxy)
Memory: 7.2M
CGroup: /system.slice/kube-proxy.service
‣ 10266 /opt/kubernetes/bin/kube-proxy --logtostderr=true --v=4 --hostname-override=172.16.1.162 --master=http://172.16.1.161:8080
最终效果
[root@dev-161 shell]# etcdctl ls /coreos.com/network/subnets
/coreos.com/network/subnets/10.10.63.0-24
/coreos.com/network/subnets/10.10.26.0-24
/coreos.com/network/subnets/10.10.44.0-24
/coreos.com/network/subnets/10.10.27.0-24
[root@dev-161 shell]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
172.16.1.162 Ready <none> 2h v1.8.9
172.16.1.163 Ready <none> 15m v1.8.9
172.16.1.164 Ready <none> 9m v1.8.9
172.16.1.165 Ready <none> 5m v1.8.9
[root@dev-162 ~]# route -n
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
0.0.0.0 172.16.1.1 0.0.0.0 UG 0 0 0 ens3
10.10.0.0 0.0.0.0 255.255.0.0 U 0 0 0 flannel.1
10.10.63.0 0.0.0.0 255.255.255.0 U 0 0 0 docker0
169.254.0.0 0.0.0.0 255.255.0.0 U 1002 0 0 ens3
172.16.0.0 0.0.0.0 255.255.0.0 U 0 0 0 ens3
Tips
## 由于国内网络环境的原因,pause镜像拖管在google的仓库,需要作如下操作(所有node节点):
docker pull registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0
docker tag registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0 gcr.io/google_containers/pause-amd64:3.0
[root@dev-161 nginx]# vim /usr/lib/systemd/system/docker.service
ExecStart=/usr/bin/dockerd --registry-mirror=https://registry.docker-cn.com