[关闭]
@nalan90 2018-03-22T15:19:34.000000Z 字数 15611 阅读 759

k8s集群部署

自动化运维


etcd环境部署请参考: etcd集群搭建

k8s架构

kubernetes.png-271.8kB


环境准备

软件及脚本

里面包启master、node节点所需运行的所有二进制包

image_1c95mikdl1ha6sqh1q8j1fihb7dm.png-185.2kB


目录及软件安装
  1. ## 批量创建工作目录
  2. work:~ ys$ for item in 161 162 163 164 165;do ssh root@dev-$item 'mkdir -p /opt/kubernetes/{cfg,bin}';done
  3. ## 安装docker-ce (使用阿里docker-ce镜像)
  4. # step 1: 安装必要的一些系统工具
  5. yum install -y yum-utils device-mapper-persistent-data lvm2
  6. # Step 2: 添加软件源信息
  7. yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
  8. # Step 3: 更新并安装 Docker-CE
  9. yum makecache fast
  10. yum -y install docker-ce
  11. work:~ ys$ for item in 161 162 163 164 165; do ssh root@dev-$item 'yum install -y yum-utils device-mapper-persistent-data lvm2 && yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo && yum makecache fast && yum install -y docker-ce && systemctl enable docker';done
  12. ## 安装flannel
  13. work:~ ys$ for item in 161 162 163 164 165; do ssh root@dev-$item 'yum install -y flannel && systemctl enable flanneld';done
  14. ## 关闭swap分区
  15. work:~ ys$ for item in 161 162 163 164 165; do ssh root@dev-$item 'swapoff -a';done

master节点部署
  1. [root@dev-161 bin]# ll
  2. total 418592
  3. -rwxr-xr-x 1 root root 193631216 Mar 13 00:47 kube-apiserver
  4. -rwxr-xr-x 1 root root 128511681 Mar 13 00:47 kube-controller-manager
  5. -rwxr-xr-x 1 root root 52496673 Mar 13 00:48 kubectl
  6. -rwxr-xr-x 1 root root 53989846 Mar 13 00:47 kube-scheduler
  7. [root@dev-161 bin]# cp kube-* /opt/kubernetes/bin/
  8. [root@dev-161 bin]# ll /opt/kubernetes/bin/
  9. total 367324
  10. -rwxr-xr-x 1 root root 193631216 Mar 22 11:00 kube-apiserver
  11. -rwxr-xr-x 1 root root 128511681 Mar 22 11:00 kube-controller-manager
  12. -rwxr-xr-x 1 root root 53989846 Mar 22 11:00 kube-scheduler
  1. MASTER_ADDRESS=${1:-"172.16.1.161"}
  2. ETCD_SERVERS=${2:-"http://172.16.1.161:2379,http://172.16.1.162:2379,http://172.16.1.163:2379"}
  3. SERVICE_CLUSTER_IP_RANGE=${3:-"10.10.10.0/16"}
  4. ADMISSION_CONTROL=${4:-""}
  5. cat <<EOF >/opt/kubernetes/cfg/kube-apiserver
  6. KUBE_LOGTOSTDERR="--logtostderr=true"
  7. KUBE_LOG_LEVEL="--v=4"
  8. KUBE_ETCD_SERVERS="--etcd-servers=${ETCD_SERVERS}"
  9. KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"
  10. KUBE_API_PORT="--insecure-port=8080"
  11. NODE_PORT="--kubelet-port=10250"
  12. KUBE_ADVERTISE_ADDR="--advertise-address=${MASTER_ADDRESS}"
  13. KUBE_ALLOW_PRIV="--allow-privileged=false"
  14. KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}"
  15. KUBE_ADMISSION_CONTROL="--admission-control=${ADMISSION_CONTROL}"
  16. EOF
  17. KUBE_APISERVER_OPTS=" \${KUBE_LOGTOSTDERR} \\
  18. \${KUBE_LOG_LEVEL} \\
  19. \${KUBE_ETCD_SERVERS} \\
  20. \${KUBE_API_ADDRESS} \\
  21. \${KUBE_API_PORT} \\
  22. \${NODE_PORT} \\
  23. \${KUBE_ADVERTISE_ADDR} \\
  24. \${KUBE_ALLOW_PRIV} \\
  25. \${KUBE_SERVICE_ADDRESSES} \\
  26. \${KUBE_ADMISSION_CONTROL}"
  27. cat <<EOF >/usr/lib/systemd/system/kube-apiserver.service
  28. [Unit]
  29. Description=Kubernetes API Server
  30. Documentation=https://github.com/kubernetes/kubernetes
  31. [Service]
  32. EnvironmentFile=-/opt/kubernetes/cfg/kube-apiserver
  33. ExecStart=/opt/kubernetes/bin/kube-apiserver ${KUBE_APISERVER_OPTS}
  34. Restart=on-failure
  35. [Install]
  36. WantedBy=multi-user.target
  37. EOF
  38. systemctl daemon-reload
  39. systemctl enable kube-apiserver
  40. systemctl restart kube-apiserver
  1. ## 执行shell
  2. [root@dev-161 shell]# sh apiserver.sh
  3. Created symlink from /etc/systemd/system/multi-user.target.wants/kube-apiserver.service to /usr/lib/systemd/system/kube-apiserver.service.
  4. ## 启动kube-apiserver
  5. [root@dev-161 shell]# systemctl start kube-apiserver
  6. ## 查看状态
  7. [root@dev-161 shell]# systemctl status kube-apiserver
  8. kube-apiserver.service - Kubernetes API Server
  9. Loaded: loaded (/usr/lib/systemd/system/kube-apiserver.service; enabled; vendor preset: disabled)
  10. Active: active (running) since Thu 2018-03-22 11:05:42 CST; 18s ago
  11. Docs: https://github.com/kubernetes/kubernetes
  12. Main PID: 6460 (kube-apiserver)
  13. Memory: 181.8M
  14. CGroup: /system.slice/kube-apiserver.service
  15. └─6460 /opt/kubernetes/bin/kube-apiserver --logtostderr=true --v=4 --etcd-servers=http://172.16.1.161:2379,http://172.16.1.162:2379,http://172.16.1.163:2379 --insecure-bind-address=0.0.0.0 --insecure-port=8080 --kubelet-port=10250 --advertise-address=172.16.1.161 --allow-privileged=false --service-cluster-ip-range=10.10.10.0/16 --admission-control=
  1. MASTER_ADDRESS=${1:-"172.16.1.161"}
  2. cat <<EOF >/opt/kubernetes/cfg/kube-controller-manager
  3. KUBE_LOGTOSTDERR="--logtostderr=true"
  4. KUBE_LOG_LEVEL="--v=4"
  5. KUBE_MASTER="--master=${MASTER_ADDRESS}:8080"
  6. KUBE_LEADER_ELECT="--leader-elect"
  7. EOF
  8. KUBE_CONTROLLER_MANAGER_OPTS=" \${KUBE_LOGTOSTDERR} \\
  9. \${KUBE_LOG_LEVEL} \\
  10. \${KUBE_MASTER} \\
  11. \${KUBE_LEADER_ELECT}"
  12. cat <<EOF >/usr/lib/systemd/system/kube-controller-manager.service
  13. [Unit]
  14. Description=Kubernetes Controller Manager
  15. Documentation=https://github.com/kubernetes/kubernetes
  16. [Service]
  17. EnvironmentFile=-/opt/kubernetes/cfg/kube-controller-manager
  18. ExecStart=/opt/kubernetes/bin/kube-controller-manager ${KUBE_CONTROLLER_MANAGER_OPTS}
  19. Restart=on-failure
  20. [Install]
  21. WantedBy=multi-user.target
  22. EOF
  23. systemctl daemon-reload
  24. systemctl enable kube-controller-manager
  25. systemctl restart kube-controller-manager
  1. [root@dev-161 shell]# sh controller-manager.sh
  2. Created symlink from /etc/systemd/system/multi-user.target.wants/kube-controller-manager.service to /usr/lib/systemd/system/kube-controller-manager.service.
  3. [root@dev-161 shell]# systemctl start kube-controller-manager
  4. [root@dev-161 shell]# systemctl status kube-controller-manager
  5. kube-controller-manager.service - Kubernetes Controller Manager
  6. Loaded: loaded (/usr/lib/systemd/system/kube-controller-manager.service; enabled; vendor preset: disabled)
  7. Active: active (running) since Thu 2018-03-22 11:17:34 CST; 13s ago
  8. Docs: https://github.com/kubernetes/kubernetes
  9. Main PID: 6532 (kube-controller)
  10. Memory: 15.2M
  11. CGroup: /system.slice/kube-controller-manager.service
  12. └─6532 /opt/kubernetes/bin/kube-controller-manager --logtostderr=true --v=4 --master=172.16.1.161:8080 --leader-elect
  1. MASTER_ADDRESS=${1:-"172.16.1.161"}
  2. cat <<EOF >/opt/kubernetes/cfg/kube-scheduler
  3. KUBE_LOGTOSTDERR="--logtostderr=true"
  4. KUBE_LOG_LEVEL="--v=4"
  5. KUBE_MASTER="--master=${MASTER_ADDRESS}:8080"
  6. KUBE_LEADER_ELECT="--leader-elect"
  7. KUBE_SCHEDULER_ARGS=""
  8. EOF
  9. KUBE_SCHEDULER_OPTS=" \${KUBE_LOGTOSTDERR} \\
  10. \${KUBE_LOG_LEVEL} \\
  11. \${KUBE_MASTER} \\
  12. \${KUBE_LEADER_ELECT} \\
  13. \$KUBE_SCHEDULER_ARGS"
  14. cat <<EOF >/usr/lib/systemd/system/kube-scheduler.service
  15. [Unit]
  16. Description=Kubernetes Scheduler
  17. Documentation=https://github.com/kubernetes/kubernetes
  18. [Service]
  19. EnvironmentFile=-/opt/kubernetes/cfg/kube-scheduler
  20. ExecStart=/opt/kubernetes/bin/kube-scheduler ${KUBE_SCHEDULER_OPTS}
  21. Restart=on-failure
  22. [Install]
  23. WantedBy=multi-user.target
  24. EOF
  25. systemctl daemon-reload
  26. systemctl enable kube-scheduler
  27. systemctl restart kube-scheduler
  1. [root@dev-161 shell]# sh scheduler.sh
  2. Created symlink from /etc/systemd/system/multi-user.target.wants/kube-scheduler.service to /usr/lib/systemd/system/kube-scheduler.service.
  3. [root@dev-161 shell]# systemctl start kube-scheduler
  4. [root@dev-161 shell]# systemctl status kube-scheduler
  5. kube-scheduler.service - Kubernetes Scheduler
  6. Loaded: loaded (/usr/lib/systemd/system/kube-scheduler.service; enabled; vendor preset: disabled)
  7. Active: active (running) since Thu 2018-03-22 11:22:28 CST; 11s ago
  8. Docs: https://github.com/kubernetes/kubernetes
  9. Main PID: 6601 (kube-scheduler)
  10. Memory: 6.6M
  11. CGroup: /system.slice/kube-scheduler.service
  12. └─6601 /opt/kubernetes/bin/kube-scheduler --logtostderr=true --v=4 --master=172.16.1.161:8080 --leader-elect

node节点部署(以dev-162为例)
  1. [root@dev-162 bin]# ll
  2. total 181948
  3. -rwxr-xr-x 1 root root 138166656 Mar 13 00:47 kubelet
  4. -rwxr-xr-x 1 root root 48144192 Mar 13 00:47 kube-proxy
  5. [root@dev-162 bin]# cp kubele kube-proxy /opt/kubernetes/bin/
  6. [root@dev-162 bin]# ll /opt/kubernetes/bin/
  7. total 181948
  8. -rwxr-xr-x 1 root root 138166656 Mar 22 11:33 kubelet
  9. -rwxr-xr-x 1 root root 48144192 Mar 22 11:33 kube-proxy
  10. [root@dev-162 bin]# cd ../shell/
  11. [root@dev-162 shell]# ll
  12. total 12
  13. -rwxr-xr-x 1 root root 2387 Mar 20 11:30 flannel.sh
  14. -rwxr-xr-x 1 root root 3013 Mar 20 11:30 kubelet.sh
  15. -rwxr-xr-x 1 root root 1719 Mar 20 11:30 proxy.sh
  1. [root@dev-162 ~]#etcdctl mk /coreos.com/network/config "{\"Network\": \"10.10.0.0/16\", \"SubnetLen\": 24, \"Backend\": { \"Type\": \"vxlan\" } }"
  2. {"Network": "10.10.0.0/16", "SubnetLen": 24, "Backend": { "Type": "vxlan" } }
  3. [root@dev-161 shell]# etcdctl get /coreos.com/network/config
  4. {"Network": "10.10.0.0/16", "SubnetLen": 24, "Backend": { "Type": "vxlan" } }
  1. FLANNEL_ETCD_ENDPOINTS="http://172.16.1.161:2379,http://172.16.1.162:2379,http://172.16.1.163:2379"
  2. FLANNEL_ETCD_PREFIX="/coreos.com/network"
  1. [root@dev-162 ~]# systemctl start flanneld
  2. [root@dev-162 ~]# systemctl status flanneld
  3. flanneld.service - Flanneld overlay address etcd agent
  4. Loaded: loaded (/usr/lib/systemd/system/flanneld.service; enabled; vendor preset: disabled)
  5. Active: active (running) since Thu 2018-03-22 13:14:19 CST; 2min 52s ago
  6. Process: 6493 ExecStartPost=/usr/libexec/flannel/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/docker (code=exited, status=0/SUCCESS)
  7. Main PID: 6485 (flanneld)
  8. Memory: 4.9M
  9. CGroup: /system.slice/flanneld.service
  10. └─6485 /usr/bin/flanneld -etcd-endpoints=http://172.16.1.161:2379,http://172.16.1.162:2379,http://172.16.1.163:2379 -etcd-prefix=/coreos.com/network
  1. [root@dev-162 flannel]# cat /run/flannel/docker
  2. DOCKER_OPT_BIP="--bip=10.10.63.1/24"
  3. DOCKER_OPT_IPMASQ="--ip-masq=true"
  4. DOCKER_OPT_MTU="--mtu=1450"
  5. DOCKER_NETWORK_OPTIONS=" --bip=10.10.63.1/24 --ip-masq=true --mtu=1450"
  6. [root@dev-162 flannel]# cat /run/flannel/subnet.env
  7. FLANNEL_NETWORK=10.10.0.0/16
  8. FLANNEL_SUBNET=10.10.63.1/24
  9. FLANNEL_MTU=1450
  10. FLANNEL_IPMASQ=false
  1. ## 添加--log-level=error $DOCKER_NETWORK_OPTIONS
  2. ExecStart=/usr/bin/dockerd --log-level=error $DOCKER_NETWORK_OPTIONS
  3. [root@dev-162 flannel]# systemctl daemon-reload
  4. [root@dev-162 flannel]# systemctl start docker
  5. [root@dev-162 flannel]# systemctl status docker
  6. docker.service - Docker Application Container Engine
  7. Loaded: loaded (/usr/lib/systemd/system/docker.service; disabled; vendor preset: disabled)
  8. Drop-In: /usr/lib/systemd/system/docker.service.d
  9. └─flannel.conf
  10. Active: active (running) since Thu 2018-03-22 13:32:15 CST; 8s ago
  11. Docs: https://docs.docker.com
  12. Main PID: 9996 (dockerd)
  13. Memory: 23.9M
  14. CGroup: /system.slice/docker.service
  15. ├─ 9996 /usr/bin/dockerd --log-level=error --bip=10.10.63.1/24 --ip-masq=true --mtu=1450
  16. └─10001 docker-containerd --config /var/run/docker/containerd/containerd.toml
  1. MASTER_ADDRESS=${1:-"172.16.1.161"}
  2. NODE_ADDRESS=${2:-"172.16.1.162"}
  3. DNS_SERVER_IP=${3:-"10.10.10.2"}
  4. DNS_DOMAIN=${4:-"cluster.local"}
  5. KUBECONFIG_DIR=${KUBECONFIG_DIR:-/opt/kubernetes/cfg}
  6. # Generate a kubeconfig file
  7. cat <<EOF > "${KUBECONFIG_DIR}/kubelet.kubeconfig"
  8. apiVersion: v1
  9. kind: Config
  10. clusters:
  11. - cluster:
  12. server: http://${MASTER_ADDRESS}:8080/
  13. name: local
  14. contexts:
  15. - context:
  16. cluster: local
  17. name: local
  18. current-context: local
  19. EOF
  20. cat <<EOF >/opt/kubernetes/cfg/kubelet
  21. KUBE_LOGTOSTDERR="--logtostderr=true"
  22. KUBE_LOG_LEVEL="--v=4"
  23. NODE_ADDRESS="--address=${NODE_ADDRESS}"
  24. NODE_PORT="--port=10250"
  25. NODE_HOSTNAME="--hostname-override=${NODE_ADDRESS}"
  26. KUBELET_KUBECONFIG="--kubeconfig=${KUBECONFIG_DIR}/kubelet.kubeconfig"
  27. KUBE_ALLOW_PRIV="--allow-privileged=false"
  28. KUBELET__DNS_IP="--cluster-dns=${DNS_SERVER_IP}"
  29. KUBELET_DNS_DOMAIN="--cluster-domain=${DNS_DOMAIN}"
  30. KUBELET_ARGS=""
  31. EOF
  32. KUBELET_OPTS=" \${KUBE_LOGTOSTDERR} \\
  33. \${KUBE_LOG_LEVEL} \\
  34. \${NODE_ADDRESS} \\
  35. \${NODE_PORT} \\
  36. \${NODE_HOSTNAME} \\
  37. \${KUBELET_KUBECONFIG} \\
  38. \${KUBE_ALLOW_PRIV} \\
  39. \${KUBELET__DNS_IP} \\
  40. \${KUBELET_DNS_DOMAIN} \\
  41. \$KUBELET_ARGS"
  42. cat <<EOF >/usr/lib/systemd/system/kubelet.service
  43. [Unit]
  44. Description=Kubernetes Kubelet
  45. After=docker.service
  46. Requires=docker.service
  47. [Service]
  48. EnvironmentFile=-/opt/kubernetes/cfg/kubelet
  49. ExecStart=/opt/kubernetes/bin/kubelet ${KUBELET_OPTS}
  50. Restart=on-failure
  51. KillMode=process
  52. [Install]
  53. WantedBy=multi-user.target
  54. EOF
  55. systemctl daemon-reload
  56. systemctl enable kubelet
  57. systemctl restart kubelet
  1. ## File exists问题处理
  2. [root@dev-163 shell]# sh kubelet.sh
  3. Failed to execute operation: File exists
  4. [root@dev-163 shell]# systemctl disable kubelet
  5. Removed symlink /etc/systemd/system/multi-user.target.wants/kubelet.service.
  6. [root@dev-162 shell]# sh kubelet.sh
  7. Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
  8. [root@dev-162 shell]# systemctl start kubelet
  9. [root@dev-162 shell]# systemctl status kubelet
  10. kubelet.service - Kubernetes Kubelet
  11. Loaded: loaded (/usr/lib/systemd/system/kubelet.service; enabled; vendor preset: disabled)
  12. Active: active (running) since Thu 2018-03-22 13:33:18 CST; 41s ago
  13. Main PID: 10129 (kubelet)
  14. Memory: 22.7M
  15. CGroup: /system.slice/kubelet.service
  16. └─10129 /opt/kubernetes/bin/kubelet --logtostderr=true --v=4 --address=172.16.1.162 --port=10250 --hostname-override=172.16.1.162 --kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig --allow-privileged=false --cluster-dns=10.10.10.2 --cluster-domain=cluster.local
  1. MASTER_ADDRESS=${1:-"172.16.1.161"}
  2. NODE_ADDRESS=${2:-"172.16.1.162"}
  3. cat <<EOF >/opt/kubernetes/cfg/kube-proxy
  4. KUBE_LOGTOSTDERR="--logtostderr=true"
  5. KUBE_LOG_LEVEL="--v=4"
  6. NODE_HOSTNAME="--hostname-override=${NODE_ADDRESS}"
  7. KUBE_MASTER="--master=http://${MASTER_ADDRESS}:8080"
  8. EOF
  9. KUBE_PROXY_OPTS=" \${KUBE_LOGTOSTDERR} \\
  10. \${KUBE_LOG_LEVEL} \\
  11. \${NODE_HOSTNAME} \\
  12. \${KUBE_MASTER}"
  13. cat <<EOF >/usr/lib/systemd/system/kube-proxy.service
  14. [Unit]
  15. Description=Kubernetes Proxy
  16. After=network.target
  17. [Service]
  18. EnvironmentFile=-/opt/kubernetes/cfg/kube-proxy
  19. ExecStart=/opt/kubernetes/bin/kube-proxy ${KUBE_PROXY_OPTS}
  20. Restart=on-failure
  21. [Install]
  22. WantedBy=multi-user.target
  23. EOF
  24. systemctl daemon-reload
  25. systemctl enable kube-proxy
  26. systemctl restart kube-proxy
  1. [root@dev-162 shell]# sh proxy.sh
  2. Created symlink from /etc/systemd/system/multi-user.target.wants/kube-proxy.service to /usr/lib/systemd/system/kube-proxy.service.
  3. [root@dev-162 shell]# systemctl start kube-proxy
  4. [root@dev-162 shell]# systemctl status kube-proxy
  5. kube-proxy.service - Kubernetes Proxy
  6. Loaded: loaded (/usr/lib/systemd/system/kube-proxy.service; enabled; vendor preset: disabled)
  7. Active: active (running) since Thu 2018-03-22 13:36:01 CST; 15s ago
  8. Main PID: 10266 (kube-proxy)
  9. Memory: 7.2M
  10. CGroup: /system.slice/kube-proxy.service
  11. 10266 /opt/kubernetes/bin/kube-proxy --logtostderr=true --v=4 --hostname-override=172.16.1.162 --master=http://172.16.1.161:8080

最终效果
  1. [root@dev-161 shell]# etcdctl ls /coreos.com/network/subnets
  2. /coreos.com/network/subnets/10.10.63.0-24
  3. /coreos.com/network/subnets/10.10.26.0-24
  4. /coreos.com/network/subnets/10.10.44.0-24
  5. /coreos.com/network/subnets/10.10.27.0-24
  6. [root@dev-161 shell]# kubectl get nodes
  7. NAME STATUS ROLES AGE VERSION
  8. 172.16.1.162 Ready <none> 2h v1.8.9
  9. 172.16.1.163 Ready <none> 15m v1.8.9
  10. 172.16.1.164 Ready <none> 9m v1.8.9
  11. 172.16.1.165 Ready <none> 5m v1.8.9
  12. [root@dev-162 ~]# route -n
  13. Kernel IP routing table
  14. Destination Gateway Genmask Flags Metric Ref Use Iface
  15. 0.0.0.0 172.16.1.1 0.0.0.0 UG 0 0 0 ens3
  16. 10.10.0.0 0.0.0.0 255.255.0.0 U 0 0 0 flannel.1
  17. 10.10.63.0 0.0.0.0 255.255.255.0 U 0 0 0 docker0
  18. 169.254.0.0 0.0.0.0 255.255.0.0 U 1002 0 0 ens3
  19. 172.16.0.0 0.0.0.0 255.255.0.0 U 0 0 0 ens3

Tips
  1. ## 由于国内网络环境的原因,pause镜像拖管在google的仓库,需要作如下操作(所有node节点):
  2. docker pull registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0
  3. docker tag registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0 gcr.io/google_containers/pause-amd64:3.0
  1. [root@dev-161 nginx]# vim /usr/lib/systemd/system/docker.service
  2. ExecStart=/usr/bin/dockerd --registry-mirror=https://registry.docker-cn.com
添加新批注
在作者公开此批注前,只有你和作者可见。
回复批注