[关闭]
@chensiqi 2021-06-04T14:33:32.000000Z 字数 26556 阅读 1668

容器自动化(十):K8S容器云平台入门(下)

云计算专题之容器自动化

--私人课件,不公开,不出版,禁止传播

想做好运维工作,人先要学会勤快;
居安而思危,勤记而补拙,方可不断提高;
别人资料不论你用着再如何爽那也是别人的;
自己总结东西是你自身特有的一种思想与理念的展现;
精髓不是看出来的,精髓是记出来的;
请同学们在学习的过程中养成好的学习习惯;
勤于实践,抛弃教案,勤于动手,整理文档。

第二章 Kubernetes生产级高可用集群部署

角色 IP 组件 推荐配置
master01 192.168.200.207 kube-apiserver/kube-controller-manager/kube-scheduler/etcd CPU:2C+ 内存:4G+
master02 192.168.200.208 kube-apiserver/kube-controller-manager/kube-scheduler/etcd CPU:2C+ 内存:4G+
node01 192.168.200.209 kubelet/kube-proxy/docker/flannel/etcd CPU:2C+ 内存:4G+
node02 192.168.200.210 kubelet/kube-proxy/docker/flannel CPU:2C+ 内存:4G+
Load_Balancer_Master 192.168.200.205,VIP:192.168.200.100 Nginx L4 CPU:2C+ 内存:4G+
Load_Balancer_Backup 192.168.200.206 Nginx L4 CPU:2C+ 内存:4G+
Registry_Harbor 192.168.200.211 Harbor CPU:2C+ 内存:4G+

QQ截图20190322214408.png-82.6kB

2.11 多master集群-部署Master02组件

假如我们要在Master01的基础上创建Master02,只需要进行以下几步操作

2.11.1 拷贝Master01上/opt/kubernetes目录所有内容到Master02的/opt下

  1. [root@Master01 ~]# scp -r /opt/kubernetes/ root@192.168.200.208:/opt/
  2. root@192.168.200.208s password:
  3. kube-apiserver 100% 184MB 103.7MB/s 00:01
  4. kubectl 100% 55MB 100.9MB/s 00:00
  5. kube-controller-manager 100% 155MB 92.6MB/s 00:01
  6. kube-scheduler 100% 55MB 100.2MB/s 00:00
  7. token.csv 100% 84 67.0KB/s 00:00
  8. kube-apiserver 100% 958 818.7KB/s 00:00
  9. kube-controller-manager 100% 483 1.2MB/s 00:00
  10. kube-scheduler 100% 93 44.1KB/s 00:00
  11. ca.pem 100% 1359 3.3MB/s 00:00
  12. server-key.pem 100% 1675 3.6MB/s 00:00
  13. server.pem 100% 1643 3.1MB/s 00:00
  14. ca-key.pem 100% 1679 3.3MB/s 00:00

2.11.2 拷贝Master01上的systemd管理的启动脚本到Master02上

  1. [root@Master01 ~]# cd /usr/lib/systemd/system
  2. [root@Master01 system]# pwd
  3. /usr/lib/systemd/system
  4. [root@Master01 system]# ls kube-apiserver.service kube-scheduler.service kube-controller-manager.service
  5. kube-apiserver.service kube-controller-manager.service kube-scheduler.service
  6. [root@Master01 system]# scp kube-apiserver.service kube-controller-manager.service kube-scheduler.service root@192.168.200.208:/usr/lib/systemd/system/
  7. root@192.168.200.208s password:
  8. kube-apiserver.service 100% 282 645.3KB/s 00:00
  9. kube-controller-manager.service 100% 317 457.8KB/s 00:00
  10. kube-scheduler.service 100% 282 670.3KB/s 00:00

2.11.3 在Master02上修改刚才拷贝的kube-apiserver文件里涉及到IP地址部分的代码

  1. #修改kube-apiserver配置文件
  2. [root@Master02 ~]# cd /opt/kubernetes/
  3. [root@Master02 kubernetes]# cd cfg/
  4. [root@Master02 cfg]# sed -n '5p;7p' kube-apiserver
  5. --bind-address=192.168.200.207 \ #本行修改为本地IP
  6. --advertise-address=192.168.200.207 \ #本行修改为本地IP
  7. [root@Master02 cfg]# vim kube-apiserver +5
  8. [root@Master02 cfg]# sed -n '5p;7p' kube-apiserver
  9. --bind-address=192.168.200.208 \
  10. --advertise-address=192.168.200.208 \

2.11.4 启动Master02的各个组件服务,并设置开机自动启动

  1. #启动kube-apiserver服务
  2. [root@Master02 ~]# systemctl start kube-apiserver
  3. [root@Master02 ~]# systemctl enable kube-apiserver
  4. Created symlink from /etc/systemd/system/multi-user.target.wants/kube-apiserver.service to /usr/lib/systemd/system/kube-apiserver.service.
  5. #验证apiserver服务是否启动成功
  6. [root@Master02 ~]# ps -ef | grep kube-apiserver | grep -v grep
  7. root 1702 1 26 19:51 ? 00:00:07 /opt/kubernetes/bin/kube-apiserver --logtostderr=true --v=4 --etcd-servers=https://192.168.200.207:2379,https://192.168.200.208:2379,https://192.168.200.209:2379 --bind-address=192.168.200.208 --secure-port=6443 --advertise-address=192.168.200.208 --allow-privileged=true --service-cluster-ip-range=10.0.0.0/24 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota,NodeRestriction --authorization-mode=RBAC,Node --kubelet-https=true --enable-bootstrap-token-auth --token-auth-file=/opt/kubernetes/cfg/token.csv --service-node-port-range=30000-50000 --tls-cert-file=/opt/kubernetes/ssl/server.pem --tls-private-key-file=/opt/kubernetes/ssl/server-key.pem --client-ca-file=/opt/kubernetes/ssl/ca.pem --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem --etcd-cafile=/opt/etcd/ssl/ca.pem --etcd-certfile=/opt/etcd/ssl/server.pem --etcd-keyfile=/opt/etcd/ssl/server-key.pem
  8. #启动kube-scheduler服务
  9. [root@Master02 ~]# systemctl start kube-scheduler
  10. [root@Master02 ~]# systemctl enable kube-scheduler
  11. Created symlink from /etc/systemd/system/multi-user.target.wants/kube-scheduler.service to /usr/lib/systemd/system/kube-scheduler.service.
  12. #验证kube-scheduler服务是否启动成功
  13. [root@Master02 ~]# ps -ef | grep kube-scheduler | grep -v grep
  14. root 1739 1 0 19:53 ? 00:00:00 /opt/kubernetes/bin/kube-scheduler --logtostderr=true --v=4 --master=127.0.0.1:8080 --leader-elect
  15. #启动kube-controller-manager服务
  16. [root@Master02 ~]# systemctl start kube-controller-manager
  17. [root@Master02 ~]# systemctl enable kube-controller-manager
  18. Created symlink from /etc/systemd/system/multi-user.target.wants/kube-controller-manager.service to /usr/lib/systemd/system/kube-controller-manager.service.
  19. #验证kube-controller-manager是否启动成功
  20. [root@Master02 ~]# ps -ef | grep kube-controller-manager | grep -v grep
  21. root 1777 1 5 19:54 ? 00:00:00 /opt/kubernetes/bin/kube-controller-manager --logtostderr=true --v=4 --master=127.0.0.1:8080 --leader-elect=true --address=127.0.0.1 --service-cluster-ip-range=10.10.10.0/24 --cluster-name=kubernetes --cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem --cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem --root-ca-file=/opt/kubernetes/ssl/ca.pem --service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem --experimental-cluster-signing-duration=87600h0m0s

2.11.5 在Master02上进行集群状态检查

  1. [root@Master02 ~]# ln -s /opt/kubernetes/bin/kubectl /usr/local/bin/
  2. [root@Master02 ~]# which kubectl
  3. /usr/local/bin/kubectl
  4. [root@Master02 ~]# kubectl get cs
  5. NAME STATUS MESSAGE ERROR
  6. scheduler Healthy ok
  7. controller-manager Healthy ok
  8. etcd-1 Healthy {"health":"true"}
  9. etcd-0 Healthy {"health":"true"}
  10. etcd-2 Healthy {"health":"true"}
  11. [root@Master02 ~]# kubectl get node
  12. NAME STATUS ROLES AGE VERSION
  13. 192.168.200.209 Ready <none> 12d v1.12.1
  14. 192.168.200.210 Ready <none> 11d v1.12.1

2.12 多master集群-Nginx+keepalived(高可用)

Nginx很常用,而且现在的Yum安装版的配置很全,已经无需源码编译
而且,从1.9.10版本开始,Nginx在编译时只需要加上--with-stream,即可支持四层负载均衡
那么,我们如何从官网找到我们需要的yum安装源呢?
http://nginx.org

1.png-90.2kB

2.png-70.5kB

3.png-65.2kB

4.png-59.7kB

需要注意的是,Yum源代码里的$releaserver需要修改成我们用的操作系统的版本,我们用Centos7那么就写7即可。

2.12.1 Yum安装Nginx

  1. #配置Nginx的Yum源文件
  2. [root@LB-Nginx-Master ~]# vim /etc/yum.repos.d/nginx.repo
  3. [root@LB-Nginx-Master ~]# cat /etc/yum.repos.d/nginx.repo
  4. [nginx]
  5. name=nginx repo
  6. baseurl=http://nginx.org/packages/centos/7/$basearch/
  7. gpgcheck=0
  8. enabled=1
  9. #Yum安装Nginx
  10. [root@LB-Nginx-Master ~]# yum -y install nginx

2.12.2 修改Nginx配置文件

  1. #修改一下配置文件的参数,添加stream负载均衡池
  2. #配置文件修改后的结果如下所示:
  3. [root@LB-Nginx-Master ~]# cat /etc/nginx/nginx.conf
  4. user nginx;
  5. worker_processes 4; #修改一下processes
  6. error_log /var/log/nginx/error.log warn;
  7. pid /var/run/nginx.pid;
  8. events {
  9. worker_connections 1024;
  10. }
  11. stream { #添加一个stream四层负载均衡池
  12. upstream k8s-apiserver {
  13. server 192.168.200.207:6443;
  14. server 192.168.200.208:6443;
  15. }
  16. server {
  17. listen 192.168.200.205:6443; #模拟监听的本地端口
  18. proxy_pass k8s-apiserver; #因为四层所以这里不是URL,请同学们注意
  19. }
  20. }
  21. http {
  22. include /etc/nginx/mime.types;
  23. default_type application/octet-stream;
  24. log_format main '$remote_addr - $remote_user [$time_local] "$request" '
  25. '$status $body_bytes_sent "$http_referer" '
  26. '"$http_user_agent" "$http_x_forwarded_for"';
  27. access_log /var/log/nginx/access.log main;
  28. sendfile on;
  29. #tcp_nopush on;
  30. keepalive_timeout 65;
  31. #gzip on;
  32. include /etc/nginx/conf.d/*.conf;
  33. }

2.12.3 启动Nginx

  1. [root@LB-Nginx-Master ~]# systemctl start nginx
  2. [root@LB-Nginx-Master ~]# ps -ef | grep nginx | grep -v grep
  3. root 21840 1 0 21:03 ? 00:00:00 nginx: master process /usr/sbin/nginx -c /etc/nginx/nginx.conf
  4. nginx 21841 21840 0 21:03 ? 00:00:00 nginx: worker process
  5. nginx 21842 21840 0 21:03 ? 00:00:00 nginx: worker process
  6. nginx 21843 21840 0 21:03 ? 00:00:00 nginx: worker process
  7. nginx 21844 21840 0 21:03 ? 00:00:00 nginx: worker process
  8. [root@LB-Nginx-Master ~]# netstat -antp | grep 6443
  9. tcp 0 0 192.168.200.205:6443 0.0.0.0:* LISTEN 21840/nginx: master

2.12.4 修改Node节点的组件配置文件,将指向Master的IP地址,指向LB-Nginx(目前没有做VIP)

  1. #修改Node01节点配置文件
  2. [root@node01 cfg]# pwd
  3. /opt/kubernetes/cfg
  4. [root@node01 cfg]# grep 207 *
  5. bootstrap.kubeconfig: server: https://192.168.200.207:6443 #修改这个文件
  6. flanneld:FLANNEL_OPTIONS="--etcd-endpoints=https://192.168.200.207:2379,https://192.168.200.208:2379,https://192.168.200.209:2379 -etcd-cafile=/opt/etcd/ssl/ca.pem -etcd-certfile=/opt/etcd/ssl/server.pem -etcd-keyfile=/opt/etcd/ssl/server-key.pem"
  7. kubelet.kubeconfig: server: https://192.168.200.207:6443 #修改这个文件
  8. kube-proxy.kubeconfig: server: https://192.168.200.207:6443 #修改这个文件
  9. #查看修改后的结果
  10. [root@node01 cfg]# vim bootstrap.kubeconfig +5
  11. [root@node01 cfg]# vim kubelet.kubeconfig +5
  12. [root@node01 cfg]# vim kube-proxy.kubeconfig +5
  13. [root@node01 cfg]# grep -n 205 *
  14. bootstrap.kubeconfig:5: server: https://192.168.200.205:6443
  15. kubelet.kubeconfig:5: server: https://192.168.200.205:6443
  16. kube-proxy.kubeconfig:5: server: https://192.168.200.205:6443
  17. #重启动
  18. [root@node01 cfg]# systemctl restart kubelet
  19. [root@node01 cfg]# systemctl restart kube-proxy

在其他Node节点重复上面的操作即可。

2.12.5 在Master节点上进行集群访问测试

  1. [root@Master01 system]# kubectl get node
  2. NAME STATUS ROLES AGE VERSION
  3. 192.168.200.209 Ready <none> 12d v1.12.1
  4. 192.168.200.210 Ready <none> 12d v1.12.1
  5. [root@Master01 system]# kubectl get svc
  6. NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
  7. kubernetes ClusterIP 10.0.0.1 <none> 443/TCP 23d
  8. nginx NodePort 10.0.0.192 <none> 80:31442/TCP 11d

2.12.6 给LB-Nginx添加一个访问日志(便于观察学习)

  1. #添加K8S访问日志
  2. [root@LB-Nginx-Master ~]# vim /etc/nginx/nginx.conf
  3. [root@LB-Nginx-Master ~]# sed -n '13,24p' /etc/nginx/nginx.conf
  4. stream {
  5. log_format main "$remote_addr $upstream_addr - $time_local $status" #添加本行
  6. access_log /var/log/nginx/k8s-access.log main; #添加本行
  7. upstream k8s-apiserver {
  8. server 192.168.200.207:6443;
  9. server 192.168.200.208:6443;
  10. }
  11. server {
  12. listen 192.168.200.205:6443;
  13. proxy_pass k8s-apiserver;
  14. }
  15. }
  16. 特别说明:
  17. remote_addr:请求的来源IP
  18. upstream_addr:负载均衡池的转发目标IP
  19. time_local:本地时间
  20. status:访问状态码

2.12.7 重启Nginx,并观察日志

  1. [root@LB-Nginx-Master ~]# systemctl reload nginx
  2. [root@LB-Nginx-Master ~]# ls /var/log/nginx/
  3. access.log error.log k8s-access.log
  4. #重启Node节点的kubelet服务,观察k8s-access.log
  5. [root@node01 cfg]# systemctl restart kubelet
  6. [root@node02 cfg]# systemctl restart kubelet
  7. #查看日志
  8. [root@LB-Nginx-Master ~]# cat /var/log/nginx/k8s-access.log
  9. 192.168.200.209 192.168.200.207:6443 - 21/Apr/2019:21:44:02 +0800 200
  10. 192.168.200.209 192.168.200.207:6443 - 21/Apr/2019:21:44:02 +0800 200
  11. 192.168.200.210 192.168.200.208:6443 - 21/Apr/2019:21:45:04 +0800 200
  12. 192.168.200.210 192.168.200.207:6443 - 21/Apr/2019:21:45:04 +0800 200

2.12.8 部署Nginx-Slave及keepalived高可用

Nginx+keepalived的高可用过程部署,由于同学们已经学过了,这里就不再描述
配置完高可用以后,同学们需要注意修改Node节点的指向IP地址,从LB-Nginx-Master上指向VIP地址即可

第三章 kubectl命令行管理工具

3.1 kubectl管理命令概要

QQ截图20190421223622.png-133.4kB

QQ截图20190421223633.png-135.3kB

  1. #kubectl create通过文件创建资源
  2. [root@Master01 system]# kubectl create --help | grep -A 2 Usage
  3. Usage:
  4. kubectl create -f FILENAME [options]
  5. #kubectl run在集群中运行一个指定的镜像
  6. [root@Master01 system]# kubectl run --help | grep -A 2 Usage
  7. Usage:
  8. kubectl run NAME --image=image [--env="key=value"] [--port=port] [--replicas=replicas] [--dry-run=bool] [--overrides=inline-json] [--command] -- [COMMAND] [args...] [options]
  9. #kubectl expose将容器公开为一个服务
  10. [root@Master01 system]# kubectl expose --help | grep -A 2 Usage
  11. Usage:
  12. kubectl expose (-f FILENAME | TYPE NAME) [--port=port] [--protocol=TCP|UDP|SCTP] [--target-port=number-or-name] [--name=name] [--external-ip=external-ip-of-service] [--type=type] [options]
  13. #kubectl set在对象上设置指定的功能
  14. [root@Master01 system]# kubectl set --help | grep -A 2 Usage
  15. Usage:
  16. kubectl set SUBCOMMAND [options]
  17. #kubectl get显示一个或多个资源
  18. [root@Master01 system]# kubectl get --help | grep -A 2 Usage
  19. Usage:
  20. kubectl get [(-o|--output=)json|yaml|wide|custom-columns=...|custom-columns-file=...|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=...] (TYPE[.VERSION][.GROUP] [NAME | -l label] | TYPE[.VERSION][.GROUP]/NAME ...) [flags] [options]
  21. #kubectl edit使用默认编辑器编辑一个资源
  22. [root@Master01 system]# kubectl edit --help | grep -A 2 Usage
  23. Usage:
  24. kubectl edit (RESOURCE/NAME | -f FILENAME) [options]
  25. #kubectl delete通过文件名,资源名称等删除资源
  26. [root@Master01 system]# kubectl delete --help | grep -A 2 Usage
  27. Usage:
  28. kubectl delete ([-f FILENAME] | TYPE [(NAME | -l label | --all)]) [options]

实操演示:认识kubectl

  1. #获取集群中的pods资源信息
  2. [root@Master01 system]# kubectl get pod
  3. NAME READY STATUS RESTARTS AGE
  4. nginx-dbddb74b8-ggpxm 1/1 Running 3 12d
  5. #获取集群中所有资源信息
  6. [root@Master01 system]# kubectl get all
  7. NAME READY STATUS RESTARTS AGE
  8. pod/nginx-dbddb74b8-ggpxm 1/1 Running 3 12d
  9. NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
  10. service/kubernetes ClusterIP 10.0.0.1 <none> 443/TCP 23d
  11. service/nginx NodePort 10.0.0.192 <none> 80:31442/TCP 12d
  12. NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
  13. deployment.apps/nginx 1 1 1 1 12d
  14. NAME DESIRED CURRENT READY AGE
  15. replicaset.apps/nginx-dbddb74b8 1 1 1 12d
  16. #更加详细的展示集群中所有资源的信息
  17. [root@Master01 system]# kubectl get all -o wide
  18. NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE
  19. pod/nginx-dbddb74b8-ggpxm 1/1 Running 3 12d 172.17.12.2 192.168.200.209 <none>
  20. NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
  21. service/kubernetes ClusterIP 10.0.0.1 <none> 443/TCP 23d <none>
  22. service/nginx NodePort 10.0.0.192 <none> 80:31442/TCP 12d run=nginx
  23. NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR
  24. deployment.apps/nginx 1 1 1 1 12d nginx nginx run=nginx
  25. NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR
  26. replicaset.apps/nginx-dbddb74b8 1 1 1 12d nginx nginx pod-template-hash=dbddb74b8,run=nginx
  27. #更加详细的展示pods中资源的信息
  28. [root@Master01 system]# kubectl get pods -o wide
  29. NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE
  30. nginx-dbddb74b8-ggpxm 1/1 Running 3 12d 172.17.12.2 192.168.200.209 <none>
  31. #查看指定命名空间的pods资源
  32. [root@Master01 system]# kubectl get pods -o wide -n kube-system
  33. NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE
  34. kubernetes-dashboard-6bff7dc67d-jl7pg 1/1 Running 1 14h 172.17.12.3 192.168.200.209 <none>
  35. 特别说明:
  36. 命名空间就相当于K8S中的一个虚拟集群

3.2 kubectl工具管理集群应用生命周期

当我们用kubectl下指令去创建pods的时候
它会先创建一个deployment资源部署监控器来执行pods的创建任务
同时,deployment还会监控pods资源的健康状态,如果发现pods副本缺失,会自动创建pods来弥补

3.2.1 Pods创建

创建的作用就是生成一个deployment来执行pods资源的创建任务

  1. #查看默认命名空间的所有pods资源信息
  2. [root@Master01 ~]# kubectl get pods
  3. No resources found.
  4. #启动一个deploy名字叫做nginx
  5. [root@Master01 ~]# kubectl run nginx --replicas=3 --image=nginx:1.14 --port=80
  6. kubectl run --generator=deployment/apps.v1beta1 is DEPRECATED and will be removed in a future version. Use kubectl create instead.
  7. deployment.apps/nginx created
  8. #查看刚刚创建的pods副本
  9. [root@Master01 ~]# kubectl get pods
  10. NAME READY STATUS RESTARTS AGE
  11. nginx-65868c9459-2pz44 1/1 Running 0 3s
  12. nginx-65868c9459-9tlzt 1/1 Running 0 3s
  13. nginx-65868c9459-rfdkd 1/1 Running 0 3s
  14. #查看pods资源及deployment资源部署监控器
  15. [root@Master01 ~]# kubectl get pods,deploy
  16. NAME READY STATUS RESTARTS AGE
  17. pod/nginx-65868c9459-2pz44 1/1 Running 0 75s
  18. pod/nginx-65868c9459-9tlzt 1/1 Running 0 75s
  19. pod/nginx-65868c9459-rfdkd 1/1 Running 0 75s
  20. NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
  21. deployment.extensions/nginx 3 3 3 3 75s

3.2.2 Pods发布

发布的作用就是打开pods的对外访问服务(暴露对外的访问端口)

QQ截图20190422214133.png-205.2kB

发布的目的是开启node集群内的Pods的ClusterIP访问端口的同时开启集群的外部访问服务端口

  1. #查看集群的service服务(对外访问服务)
  2. [root@Master01 ~]# kubectl get svc
  3. NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
  4. kubernetes ClusterIP 10.0.0.1 <none> 443/TCP 24d
  5. #将pods公开为一个服务,提供外部访问
  6. [root@Master01 ~]# kubectl expose deployment nginx --port=80 --type=NodePort --target-port=80 --name=nginx-service
  7. service/nginx-service exposed
  8. [root@Master01 ~]# kubectl get svc
  9. NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
  10. kubernetes ClusterIP 10.0.0.1 <none> 443/TCP 24d
  11. nginx-service NodePort 10.0.0.193 <none> 80:46889/TCP 8s
  12. 说明:
  13. 8046889 集群内Node节点之间Pod业务的ClusterIP访问端口映射到集群的外部访问服务端口46889

3.2.3 Pods更新

更新的作用就是升级镜像的版本或者副本数

  1. #查看pods的资源信息
  2. [root@Master01 ~]# kubectl get pods -o wide
  3. NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE
  4. nginx-7b67cfbf9f-6nxmb 1/1 Running 1 48m 172.17.3.2 192.168.200.209 <none>
  5. nginx-7b67cfbf9f-7fmgx 1/1 Running 1 48m 172.17.74.2 192.168.200.210 <none>
  6. nginx-7b67cfbf9f-h6hkv 1/1 Running 1 48m 172.17.3.3 192.168.200.209 <none>
  7. #查看某个pod的镜像具体描述信息里的版本信息
  8. [root@Master01 ~]# kubectl describe pod nginx-7b67cfbf9f-6nxmb | grep nginx:1.14
  9. Image: nginx:1.14
  10. Normal Pulled 49m kubelet, 192.168.200.209 Container image "nginx:1.14" already present on machine
  11. Normal Pulled 10m kubelet, 192.168.200.209 Container image "nginx:1.14" already present on machine
  12. #更新pods的镜像的版本
  13. [root@Master01 ~]# kubectl set image deploy/nginx nginx=nginx:1.15
  14. deployment.extensions/nginx image updated
  15. #查看Pods资源情况(ImagePullBackOff为正在pull镜像中)
  16. [root@Master01 ~]# kubectl get pods
  17. NAME READY STATUS RESTARTS AGE
  18. nginx-56fbc658b-wgrqj 0/1 ImagePullBackOff 0 50s
  19. nginx-7b67cfbf9f-6nxmb 1/1 Running 1 53m
  20. nginx-7b67cfbf9f-7fmgx 1/1 Running 1 53m
  21. nginx-7b67cfbf9f-h6hkv 1/1 Running 1 53m
  22. [root@Master01 ~]# kubectl get pods
  23. NAME READY STATUS RESTARTS AGE
  24. nginx-56fbc658b-wgrqj 0/1 ImagePullBackOff 0 101s
  25. nginx-7b67cfbf9f-6nxmb 1/1 Running 1 54m
  26. nginx-7b67cfbf9f-7fmgx 1/1 Running 1 54m
  27. nginx-7b67cfbf9f-h6hkv 1/1 Running 1 54m
  28. [root@Master01 ~]# kubectl get pods
  29. NAME READY STATUS RESTARTS AGE
  30. nginx-56fbc658b-sfzbh 0/1 ContainerCreating 0 23s
  31. nginx-56fbc658b-wgrqj 1/1 Running 0 2m24s
  32. nginx-7b67cfbf9f-6nxmb 1/1 Running 1 54m
  33. nginx-7b67cfbf9f-7fmgx 1/1 Running 1 54m
  34. [root@Master01 ~]# kubectl get pods
  35. NAME READY STATUS RESTARTS AGE
  36. nginx-56fbc658b-br59r 1/1 Running 0 15s
  37. nginx-56fbc658b-sfzbh 1/1 Running 0 43s
  38. nginx-56fbc658b-wgrqj 1/1 Running 0 2m44s
  39. #查看pods的镜像的具体描述信息里的版本信息
  40. [root@Master01 ~]# kubectl describe pod nginx-56fbc658b-br59r | grep nginx:1.15
  41. Image: nginx:1.15
  42. Normal Pulled 69s kubelet, 192.168.200.209 Container image "nginx:1.15" already present on machine

QQ截图20190422221728.png-11.3kB

3.2.4 Pods回滚

当我们更新的项目突然有了问题;
我们需要将Pods回滚到上一个版本

  1. #查看Pods发布过的版本情况
  2. [root@Master01 ~]# kubectl rollout history deploy/nginx
  3. deployment.extensions/nginx
  4. REVISION CHANGE-CAUSE
  5. 1 <none>
  6. 2 <none>
  7. #回滚到上一个版本
  8. [root@Master01 ~]# kubectl rollout undo deploy/nginx
  9. deployment.extensions/nginx
  10. #查看pods回滚情况
  11. [root@Master01 ~]# kubectl get pods -o wide
  12. NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE
  13. nginx-56fbc658b-br59r 0/1 Terminating 0 30m 172.17.3.2 192.168.200.209 <none>
  14. nginx-56fbc658b-sfzbh 0/1 Terminating 0 31m 172.17.3.3 192.168.200.209 <none>
  15. nginx-56fbc658b-wgrqj 0/1 Terminating 0 33m 172.17.74.3 192.168.200.210 <none>
  16. nginx-7b67cfbf9f-j4jvr 1/1 Running 0 9s 172.17.74.2 192.168.200.210 <none>
  17. nginx-7b67cfbf9f-nxn8n 1/1 Running 0 8s 172.17.3.2 192.168.200.209 <none>
  18. nginx-7b67cfbf9f-tmlds 1/1 Running 0 6s 172.17.3.3 192.168.200.209 <none>
  19. [root@Master01 ~]# kubectl get pods -o wide
  20. NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE
  21. nginx-56fbc658b-br59r 0/1 Terminating 0 30m 172.17.3.2 192.168.200.209 <none>
  22. nginx-56fbc658b-sfzbh 0/1 Terminating 0 31m 172.17.3.3 192.168.200.209 <none>
  23. nginx-56fbc658b-wgrqj 0/1 Terminating 0 33m 172.17.74.3 192.168.200.210 <none>
  24. nginx-7b67cfbf9f-j4jvr 1/1 Running 0 14s 172.17.74.2 192.168.200.210 <none>
  25. nginx-7b67cfbf9f-nxn8n 1/1 Running 0 13s 172.17.3.2 192.168.200.209 <none>
  26. nginx-7b67cfbf9f-tmlds 1/1 Running 0 11s 172.17.3.3 192.168.200.209 <none>
  27. [root@Master01 ~]# kubectl get pods -o wide
  28. NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE
  29. nginx-7b67cfbf9f-j4jvr 1/1 Running 0 24s 172.17.74.2 192.168.200.210 <none>
  30. nginx-7b67cfbf9f-nxn8n 1/1 Running 0 23s 172.17.3.2 192.168.200.209 <none>
  31. nginx-7b67cfbf9f-tmlds 1/1 Running 0 21s 172.17.3.3 192.168.200.209 <none>

QQ截图20190422224308.png-13kB

3.2.5 Pods删除

当我们需要删除Pods时;
我们不能去删除Pods,而是要删除管理Pods的deployment;
如果Pods被发布了对外访问服务,我们还需要删除service

  1. #查看Pods和deploy
  2. [root@Master01 ~]# kubectl get pods,deploy -o wide
  3. NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE
  4. pod/nginx-7b67cfbf9f-j4jvr 1/1 Running 0 7m21s 172.17.74.2 192.168.200.210 <none>
  5. pod/nginx-7b67cfbf9f-nxn8n 1/1 Running 0 7m20s 172.17.3.2 192.168.200.209 <none>
  6. pod/nginx-7b67cfbf9f-tmlds 1/1 Running 0 7m18s 172.17.3.3 192.168.200.209 <none>
  7. NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR
  8. deployment.extensions/nginx 3 3 3 3 92m nginx nginx:1.14 run=nginx
  9. #删除指定的deploy
  10. [root@Master01 ~]# kubectl delete deploy/nginx
  11. deployment.extensions "nginx" deleted
  12. #查看删除的结果
  13. [root@Master01 ~]# kubectl get pods,deploy -o wide
  14. NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE
  15. pod/nginx-7b67cfbf9f-nxn8n 0/1 Terminating 0 7m40s <none> 192.168.200.209 <none>
  16. pod/nginx-7b67cfbf9f-tmlds 0/1 Terminating 0 7m38s <none> 192.168.200.209 <none>
  17. [root@Master01 ~]# kubectl get pods,deploy -o wide
  18. No resources found.
  19. #查看发布的service
  20. [root@Master01 ~]# kubectl get svc
  21. NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
  22. kubernetes ClusterIP 10.0.0.1 <none> 443/TCP 24d
  23. nginx-service NodePort 10.0.0.193 <none> 80:46889/TCP 79m
  24. #删除指定的service
  25. [root@Master01 ~]# kubectl delete svc/nginx-service
  26. service "nginx-service" deleted
  27. #查看删除结果
  28. [root@Master01 ~]# kubectl get svc
  29. NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
  30. kubernetes ClusterIP 10.0.0.1 <none> 443/TCP 24d

3.3 kubectl工具远程连接K8S集群

想要管理kubenetes集群,必须在K8S的Master节点上;
要想远程来管理,那么需要进行如下操作

3.3.1 通过脚本和证书生成kubectl的配置文件

  1. #在Master01上,准备一个ssh_kubeconfig.sh脚本
  2. [root@Master01 scripts]# pwd
  3. /server/scripts
  4. [root@Master01 scripts]# ls ssh_kubeconfig.sh
  5. ssh_kubeconfig.sh
  6. [root@Master01 scripts]# cat ssh_kubeconfig.sh
  7. #!/bin/bash
  8. kubectl config set-cluster kubernetes \
  9. --server=https://192.168.200.205:6443 \ #负载均衡器VIP或Master01的IP和端口
  10. --embed-certs=true \
  11. --certificate-authority=ca.pem \
  12. --kubeconfig=config
  13. kubectl config set-credentials cluster-admin \
  14. --certificate-authority=ca.pem \
  15. --embed-certs=true \
  16. --client-key=admin-key.pem \
  17. --client-certificate=admin.pem \
  18. --kubeconfig=config
  19. kubectl config set-context default --cluster=kubernetes --user=cluster-admin --kubeconfig=config
  20. kubectl config use-context default --kubeconfig=config
  21. #创建一个目录将需要的证书和脚本拷贝进去
  22. [root@Master01 scripts]# mkdir -p /ssh_config
  23. [root@Master01 scripts]# cp ssh_kubeconfig.sh /ssh_config/
  24. [root@Master01 scripts]# ls k8s-cert/admin.pem k8s-cert/admin-key.pem k8s-cert/ca.pem
  25. k8s-cert/admin-key.pem k8s-cert/admin.pem k8s-cert/ca.pem
  26. [root@Master01 scripts]# cp k8s-cert/admin.pem k8s-cert/admin-key.pem k8s-cert/ca.pem /ssh_config/
  27. [root@Master01 scripts]# cd /ssh_config/
  28. [root@Master01 ssh_config]# ls
  29. admin-key.pem admin.pem ca.pem ssh_kubeconfig.sh
  30. #执行脚本生成config远程连接的配置文件
  31. [root@Master01 ssh_config]# ./ssh_kubeconfig.sh
  32. Cluster "kubernetes" set.
  33. User "cluster-admin" set.
  34. Context "default" created.
  35. Switched to context "default".
  36. [root@Master01 ssh_config]# ls
  37. admin-key.pem admin.pem ca.pem config ssh_kubeconfig.sh

3.3.2 kubectl的远程连接测试

  1. #将kubectl二进制命令文件拷贝到Harbor(192.168.200.211)上
  2. [root@Master01 ssh_config]# scp /usr/local/bin/kubectl root@192.168.200.211:/usr/local/bin/
  3. The authenticity of host '192.168.200.211 (192.168.200.211)' cant be established.
  4. ECDSA key fingerprint is SHA256:DbY5ZLFytaIrrM0hUUSYj12DHprd/boGy3Kim6rMrJA.
  5. ECDSA key fingerprint is MD5:59:39:e3:1a:6e:f8:66:4e:0d:de:08:80:cc:89:f4:20.
  6. Are you sure you want to continue connecting (yes/no)? yes
  7. Warning: Permanently added '192.168.200.211' (ECDSA) to the list of known hosts.
  8. root@192.168.200.211s password:
  9. kubectl 100% 55MB 81.5MB/s 00:00
  10. #在Harbor的机器上进行远程连接测试
  11. [root@Harbor ~]# hostname -I
  12. 192.168.200.211
  13. [root@Harbor ~]# which kubectl
  14. /usr/local/bin/kubectl
  15. [root@Harbor ~]# kubectl get node
  16. The connection to the server localhost:8080 was refused - did you specify the right host or port?
  17. #我们发现kubectl并不起作用。
  18. #现在我们将之前生成的config配置文件也拷贝过来
  19. [root@Master01 ssh_config]# scp config root@192.168.200.211:~/
  20. root@192.168.200.211s password:
  21. config 100% 6273 7.9MB/s 00:00
  22. #再次在Harbor服务器上进行kubectl的远程连接测试
  23. [root@Harbor ~]# ls
  24. anaconda-ks.cfg config
  25. [root@Harbor ~]# kubectl --kubeconfig=./config get node
  26. NAME STATUS ROLES AGE VERSION
  27. 192.168.200.209 Ready <none> 24d v1.12.1
  28. 192.168.200.210 Ready <none> 23d v1.12.1

第四章 YAML文件(资源编排)

YAML是"YAML不是一种标记语言"的外语缩写;
它是一种直观的能够被电脑识别的数据序列化格式,是一个可读性高并且容易被人类阅读,容易和脚本语言交互,用来表达资料序列的编程语言;
它是类似于标准通用标记语言的子集XML的数据描述语言,语法比XML简单很多。

4.1 YAML文件格式说明

因为在之前的学习中,Ansible那里已经多次练习过;
所以,这里对于格式不再多说

4.2 YAML文件创建资源对象

QQ截图20190503215421.png-119.7kB

  1. #创建一个目录,制作资源创建的YAML
  2. [root@Master01 ~]# mkdir demo
  3. [root@Master01 ~]# cd demo/
  4. [root@Master01 demo]# pwd
  5. /root/demo
  6. [root@Master01 demo]# vim nginx-deployment.yaml
  7. [root@Master01 demo]# cat nginx-deployment.yaml
  8. apiVersion: apps/v1
  9. kind: Deployment
  10. metadata:
  11. name: nginx-deployment
  12. labels:
  13. app: nginx
  14. spec:
  15. replicas: 3
  16. selector:
  17. matchLabels:
  18. app: nginx
  19. template:
  20. metadata:
  21. labels:
  22. app: nginx
  23. spec:
  24. containers:
  25. - name: nginx
  26. image: nginx:1.15.4
  27. ports:
  28. - containerPort: 80
  29. #根据YAML资源文件创建资源
  30. [root@Master01 demo]# kubectl create --help | grep -A 2 Usage
  31. Usage:
  32. kubectl create -f FILENAME [options]
  33. [root@Master01 demo]# kubectl create -f nginx-deployment.yaml
  34. deployment.apps/nginx-deployment created
  35. [root@Master01 demo]# kubectl get pods
  36. NAME READY STATUS RESTARTS AGE
  37. nginx-deployment-d55b94fd-2hhw5 0/1 ContainerCreating 0 69s
  38. nginx-deployment-d55b94fd-gtvtp 1/1 Running 0 69s
  39. nginx-deployment-d55b94fd-gvfzg 1/1 Running 0 69s
  40. [root@Master01 demo]# kubectl get pods -o wide
  41. NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE
  42. nginx-deployment-d55b94fd-2hhw5 0/1 ContainerCreating 0 85s <none> 192.168.200.209 <none>
  43. nginx-deployment-d55b94fd-gtvtp 1/1 Running 0 85s 172.17.74.2 192.168.200.210 <none>
  44. nginx-deployment-d55b94fd-gvfzg 1/1 Running 0 85s 172.17.3.3 192.168.200.209 <none>
  45. [root@Master01 demo]# kubectl get pods -o wide
  46. NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE
  47. nginx-deployment-d55b94fd-2hhw5 1/1 Running 0 86s 172.17.3.2 192.168.200.209 <none>
  48. nginx-deployment-d55b94fd-gtvtp 1/1 Running 0 86s 172.17.74.2 192.168.200.210 <none>
  49. nginx-deployment-d55b94fd-gvfzg 1/1 Running 0 86s 172.17.3.3 192.168.200.209 <none>

4.3 YAML文件发布资源对象

  1. [root@Master01 demo]# vim nginx-service.yaml
  2. [root@Master01 demo]# cat nginx-service.yaml
  3. apiVersion: v1
  4. kind: Service
  5. metadata:
  6. name: nginx-service
  7. labels:
  8. app: nginx
  9. spec:
  10. type: NodePort
  11. ports:
  12. - port: 80
  13. targetPort: 80
  14. selector:
  15. app: nginx
  16. [root@Master01 demo]# kubectl create -f nginx-service.yaml
  17. service/nginx-service created
  18. [root@Master01 demo]# kubectl get pods -o wide
  19. NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE
  20. nginx-deployment-d55b94fd-2hhw5 1/1 Running 1 16m 172.17.7.3 192.168.200.209 <none>
  21. nginx-deployment-d55b94fd-gtvtp 1/1 Running 1 16m 172.17.13.2 192.168.200.210 <none>
  22. nginx-deployment-d55b94fd-gvfzg 1/1 Running 1 16m 172.17.7.2 192.168.200.209 <none>
  23. [root@Master01 demo]# kubectl get svc -o wide
  24. NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
  25. kubernetes ClusterIP 10.0.0.1 <none> 443/TCP 35d <none>
  26. nginx-service NodePort 10.0.0.42 <none> 80:35920/TCP 8m13s app=nginx

QQ截图20190503215201.png-25.2kB

QQ截图20190503215151.png-25.4kB

4.4 YAML字段太多,记不住?

  1. #用run命令生成
  2. [root@Master01 demo]# kubectl run nginx --image=nginx:1.15.2 --port=80 --replicas=3 -o yaml --dry-run > my-deployment
  3. kubectl run --generator=deployment/apps.v1beta1 is DEPRECATED and will be removed in a future version. Use kubectl create instead.
  4. [root@Master01 demo]# cat my-deployment
  5. apiVersion: apps/v1beta1
  6. kind: Deployment
  7. metadata:
  8. creationTimestamp: null
  9. labels:
  10. run: nginx
  11. name: nginx
  12. spec:
  13. replicas: 3
  14. selector:
  15. matchLabels:
  16. run: nginx
  17. strategy: {}
  18. template:
  19. metadata:
  20. creationTimestamp: null
  21. labels:
  22. run: nginx
  23. spec:
  24. containers:
  25. - image: nginx:1.15.2
  26. name: nginx
  27. ports:
  28. - containerPort: 80
  29. resources: {}
  30. status: {}
  31. #用get命令导出
  32. [root@Master01 demo]# kubectl get deploy/nginx-deployment --export
  33. NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
  34. nginx-deployment 3 0 0 0 <unknown>
  35. [root@Master01 demo]# kubectl get deploy/nginx-deployment --export -o yaml > my-deploy.yaml
  36. [root@Master01 demo]# cat my-deploy.yaml
  37. apiVersion: extensions/v1beta1
  38. kind: Deployment
  39. metadata:
  40. annotations:
  41. deployment.kubernetes.io/revision: "1"
  42. creationTimestamp: null
  43. generation: 1
  44. labels:
  45. app: nginx
  46. name: nginx-deployment
  47. selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/nginx-deployment
  48. spec:
  49. progressDeadlineSeconds: 600
  50. replicas: 3
  51. revisionHistoryLimit: 10
  52. selector:
  53. matchLabels:
  54. app: nginx
  55. strategy:
  56. rollingUpdate:
  57. maxSurge: 25%
  58. maxUnavailable: 25%
  59. type: RollingUpdate
  60. template:
  61. metadata:
  62. creationTimestamp: null
  63. labels:
  64. app: nginx
  65. spec:
  66. containers:
  67. - image: nginx:1.15.4
  68. imagePullPolicy: IfNotPresent
  69. name: nginx
  70. ports:
  71. - containerPort: 80
  72. protocol: TCP
  73. resources: {}
  74. terminationMessagePath: /dev/termination-log
  75. terminationMessagePolicy: File
  76. dnsPolicy: ClusterFirst
  77. restartPolicy: Always
  78. schedulerName: default-scheduler
  79. securityContext: {}
  80. terminationGracePeriodSeconds: 30
  81. status: {}
添加新批注
在作者公开此批注前,只有你和作者可见。
回复批注