[关闭]
@zhangyy 2021-11-26T15:39:25.000000Z 字数 25195 阅读 280

kubernetes 1.20.5 安装配置部署

kubernetes系列



一: 系统环境介绍

1.1 环境准备

  1. 在开始之前,部署Kubernetes集群机器需要满足以下几个条件:
  2. 操作系统: CentOS7.9-86_x64
  3. 硬件配置:2GB或更多RAM2CPU或更多CPU,硬盘30GB或更多集群中所有机器之间网络互通
  4. 可以访问外网,需要拉取镜像,如果服务器不能上网,需要提前下载镜像并导入节点
  5. 禁止swap分区

1.2 软件环境:

  1. 操作系统: CentOS7.9x64
  2. Docker: 20-ce
  3. Kubernetes: 1.20.2

1.3 环境规划

  1. 服务器整体规划:

image_1f34fq9r4e8h182b1e2qh9i1o519.png-98kB

1.4 单Master架构图:

image_1ett5ifnu1s0r1h98btc1eh5rbg9.png-1450.4kB

1.5 单Master服务器规划:

image_1ett5lrijvta19ok1b15gus1hih9.png-329.4kB

1.6 操作系统初始化配置

  1. # 关闭防火墙
  2. systemctl stop firewalld
  3. systemctl disable firewalld
  4. # 关闭selinux
  5. sed -i 's/enforcing/disabled/' /etc/selinux/config # 永久
  6. setenforce 0 # 临时
  7. # 关闭swap
  8. swapoff -a # 临时
  9. sed -ri 's/.*swap.*/#&/' /etc/fstab # 永久
  10. # 根据规划设置主机名
  11. hostnamectl set-hostname <hostname>
  12. # 在master添加hosts
  13. cat >> /etc/hosts << EOF
  14. 192.168.100.11 node01.flyfish
  15. 192.168.100.12 node02.flyfish
  16. 192.168.100.13 node03.flyfish
  17. EOF
  18. # 将桥接的IPv4流量传递到iptables的链
  19. cat > /etc/sysctl.d/k8s.conf << EOF
  20. net.bridge.bridge-nf-call-ip6tables = 1
  21. net.bridge.bridge-nf-call-iptables = 1
  22. EOF
  23. sysctl --system # 生效
  24. # 时间同步
  25. yum install chronyd
  26. server ntp1.aliyun.com

二:ETCD 集群部署

2.1 ETCD 集群的概念

  1. Etcd 是一个分布式键值存储系统,Kubernetes使用Etcd进行数据存储,所以先准备一个Etcd数据库,为解决Etcd单点故障,应采用集群方式部署,这里使用3台组建集群,可容忍1台机器故障,当然,你也可以使用5台组建集群,可容忍2台机器故障

2.2准备cfssl证书生成工具

  1. cfssl是一个开源的证书管理工具,使用json文件生成证书,相比openssl更方便使用。
  2. 找任意一台服务器操作,这里用Master节点。
  3. ---
  4. wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
  5. wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
  6. wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
  7. chmod +x cfssl_linux-amd64 cfssljson_linux-amd64 cfssl-certinfo_linux-amd64
  8. mv cfssl_linux-amd64 /usr/bin/cfssl
  9. mv cfssljson_linux-amd64 /usr/bin/cfssljson
  10. mv cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo

image_1f34g7b7q1ukh1jpthcjk1d6n8m.png-232.6kB


2.3 生成Etcd证书

  1. 1. 自签证书颁发机构(CA
  2. 创建工作目录:
  3. mkdir -p ~/TLS/{etcd,k8s}
  4. cd TLS/etcd

  1. 自签CA
  2. cat > ca-config.json << EOF
  3. {
  4. "signing": {
  5. "default": {
  6. "expiry": "87600h"
  7. },
  8. "profiles": {
  9. "www": {
  10. "expiry": "87600h",
  11. "usages": [
  12. "signing",
  13. "key encipherment",
  14. "server auth",
  15. "client auth"
  16. ]
  17. }
  18. }
  19. }
  20. }
  21. EOF
  22. cat > ca-csr.json << EOF
  23. {
  24. "CN": "etcd CA",
  25. "key": {
  26. "algo": "rsa",
  27. "size": 2048
  28. },
  29. "names": [
  30. {
  31. "C": "CN",
  32. "L": "Beijing",
  33. "ST": "Beijing"
  34. }
  35. ]
  36. }
  37. EOF
  38. 生成证书
  39. cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
  40. ls *pem
  41. ca-key.pem ca.pem

image_1ett660gr1us2ach1hrv1h3f1u4j13.png-160.1kB


  1. 2. 使用自签CA签发Etcd HTTPS证书
  2. 创建证书申请文件:
  3. cat > server-csr.json << EOF
  4. {
  5. "CN": "etcd",
  6. "hosts": [
  7. "192.168.100.11",
  8. "192.168.100.12",
  9. "192.168.100.13",
  10. "192.168.100.14",
  11. "192.168.100.15",
  12. "192.168.100.16",
  13. "192.168.100.17",
  14. "192.168.100.100"
  15. ],
  16. "key": {
  17. "algo": "rsa",
  18. "size": 2048
  19. },
  20. "names": [
  21. {
  22. "C": "CN",
  23. "L": "BeiJing",
  24. "ST": "BeiJing"
  25. }
  26. ]
  27. }
  28. EOF
  29. 生成证书:
  30. cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server
  31. ls server*pem
  32. server-key.pem server.pem

image_1f34g8b901bjp14ek5s61pti11r213.png-76.1kB

image_1f34g8ng1ie810d87a111o82g1g.png-64.9kB


2.4 从Github下载二进制文件

  1. 下载地址:https://github.com/etcd-io/etcd/releases/download/v3.4.14/etcd-v3.4.14-linux-amd64.tar.gz
  2. 以下在节点1上操作,为简化操作,待会将节点1生成的所有文件拷贝到节点2和节点3.
  3. 1. 创建工作目录并解压二进制包
  4. mkdir /opt/etcd/{bin,cfg,ssl} -p
  5. tar zxvf etcd-v3.4.14-linux-amd64.tar.gz
  6. mv etcd-v3.4.14-linux-amd64/{etcd,etcdctl} /opt/etcd/bin/

image_1f34gbl0k1k8frbj1cnd530cjq20.png-351.7kB

image_1f34gc0em198p1v081rq835k1rpc2d.png-238.5kB

image_1f34gcg041dpf1h801m341jsn1vmn2q.png-133.4kB

2.5 创建etcd配置文件

  1. cat > /opt/etcd/cfg/etcd.conf << EOF
  2. #[Member]
  3. ETCD_NAME="etcd-1"
  4. ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
  5. ETCD_LISTEN_PEER_URLS="https://192.168.100.11:2380"
  6. ETCD_LISTEN_CLIENT_URLS="https://192.168.100.11:2379"
  7. #[Clustering]
  8. ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.100.11:2380"
  9. ETCD_ADVERTISE_CLIENT_URLS="https://192.168.100.11:2379"
  10. ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.100.11:2380,etcd-2=https://192.168.100.12:2380,etcd-3=https://192.168.100.13:2380"
  11. ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
  12. ETCD_INITIAL_CLUSTER_STATE="new"
  13. EOF
  14. ---
  15. ETCD_NAME:节点名称,集群中唯一
  16. ETCD_DATA_DIR:数据目录
  17. ETCD_LISTEN_PEER_URLS:集群通信监听地址
  18. ETCD_LISTEN_CLIENT_URLS:客户端访问监听地址
  19. ETCD_INITIAL_ADVERTISE_PEER_URLS:集群通告地址
  20. ETCD_ADVERTISE_CLIENT_URLS:客户端通告地址
  21. ETCD_INITIAL_CLUSTER:集群节点地址
  22. ETCD_INITIAL_CLUSTER_TOKEN:集群Token
  23. ETCD_INITIAL_CLUSTER_STATE:加入集群的当前状态,new是新集群,existing表示加入已有集群

2.6. systemd管理etcd

  1. cat > /usr/lib/systemd/system/etcd.service << EOF
  2. [Unit]
  3. Description=Etcd Server
  4. After=network.target
  5. After=network-online.target
  6. Wants=network-online.target
  7. [Service]
  8. Type=notify
  9. EnvironmentFile=/opt/etcd/cfg/etcd.conf
  10. ExecStart=/opt/etcd/bin/etcd \
  11. --cert-file=/opt/etcd/ssl/server.pem \
  12. --key-file=/opt/etcd/ssl/server-key.pem \
  13. --peer-cert-file=/opt/etcd/ssl/server.pem \
  14. --peer-key-file=/opt/etcd/ssl/server-key.pem \
  15. --trusted-ca-file=/opt/etcd/ssl/ca.pem \
  16. --peer-trusted-ca-file=/opt/etcd/ssl/ca.pem \
  17. --logger=zap
  18. Restart=on-failure
  19. LimitNOFILE=65536
  20. [Install]
  21. WantedBy=multi-user.target
  22. EOF

  1. 4. 拷贝刚才生成的证书
  2. 把刚才生成的证书拷贝到配置文件中的路径:
  3. cp ~/TLS/etcd/ca*pem ~/TLS/etcd/server*pem /opt/etcd/ssl/

  1. 5. 同步所有主机
  2. scp -r /opt/etcd/ root@192.168.100.12:/opt/
  3. scp -r /opt/etcd/ root@192.168.100.13:/opt/
  4. scp /usr/lib/systemd/system/etcd.service root@192.168.100.12:/usr/lib/systemd/system/
  5. scp /usr/lib/systemd/system/etcd.service root@192.168.100.13:/usr/lib/systemd/system/

image_1f34gh93n1msj1sq7tan1flt2fb37.png-237.7kB


  1. 修改192.168.100.12 etcd配置文件
  2. vim /opt/etcd/cfg/etcd.conf
  3. #[Member]
  4. ETCD_NAME="etcd-2"
  5. ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
  6. ETCD_LISTEN_PEER_URLS="https://192.168.100.12:2380"
  7. ETCD_LISTEN_CLIENT_URLS="https://192.168.100.12:2379"
  8. #[Clustering]
  9. ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.100.12:2380"
  10. ETCD_ADVERTISE_CLIENT_URLS="https://192.168.100.12:2379"
  11. ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.100.11:2380,etcd-2=https://192.168.100.12:2380,etcd-3=https://192.168.100.13:2380"
  12. ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
  13. ETCD_INITIAL_CLUSTER_STATE="new"

  1. 修改192.168.100.13 etcd 配置文件
  2. vim /opt/etcd/cfg/etcd.conf
  3. #[Member]
  4. ETCD_NAME="etcd-3"
  5. ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
  6. ETCD_LISTEN_PEER_URLS="https://192.168.100.13:2380"
  7. ETCD_LISTEN_CLIENT_URLS="https://192.168.100.13:2379"
  8. #[Clustering]
  9. ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.100.13:2380"
  10. ETCD_ADVERTISE_CLIENT_URLS="https://192.168.100.13:2379"
  11. ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.100.11:2380,etcd-2=https://192.168.100.12:2380,etcd-3=https://192.168.100.13:2380"
  12. ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
  13. ETCD_INITIAL_CLUSTER_STATE="new"

  1. 启动etcd 集群
  2. service etcd start
  3. chkconfig etcd on

image_1f34gl2h91jnh465k3vjf1cv23k.png-60.4kB


  1. ETCDCTL_API=3 /opt/etcd/bin/etcdctl --cacert=/opt/etcd/ssl/ca.pem --cert=/opt/etcd/ssl/server.pem --key=/opt/etcd/ssl/server-key.pem --endpoints="https://192.168.100.11:2379,https://192.168.100.12:2379,https://192.168.100.13:2379" endpoint health

image_1f34glr6018ca1b4e9blhdvf3041.png-72.6kB

image_1f34gmke928jmfb1pro195p1osp4e.png-88.2kB

image_1f34gmv7b42g1phceei10se1caq4r.png-105kB


  1. 如果输出上面信息,就说明集群部署成功。如果有问题第一步先看日志:/var/log/message journalctl -u etcd

三、安装Docker

  1. 1. 下载:
  2. 下载地址:https://download.docker.com/linux/static/stable/x86_64/docker-20.10.3.tgz
  3. 以下在所有节点操作。这里采用二进制安装,用yum安装也一样。
  4. node01.flyfish.cn,node02.flyfish.cn node03.flyfish.cn 节点上面安装

  1. 2. 部署docker
  2. tar -xvf docker-20.10.3.tar
  3. mv docker/* /usr/bin

image_1f34gt2r21gg11d0u55huss1ilp5l.png-89.6kB

  1. mkdir /etc/docker
  2. ---
  3. cat > /etc/docker/daemon.json << EOF
  4. {
  5. "registry-mirrors": ["https://b9pmyelo.mirror.aliyuncs.com"]
  6. }
  7. EOF
  8. ----

image_1f34gvsgk3q36m5omo1lkq1cua62.png-91.9kB


  1. 3 systemd管理docker
  2. cat > /usr/lib/systemd/system/docker.service << EOF
  3. [Unit]
  4. Description=Docker Application Container Engine
  5. Documentation=https://docs.docker.com
  6. After=network-online.target firewalld.service
  7. Wants=network-online.target
  8. [Service]
  9. Type=notify
  10. ExecStart=/usr/bin/dockerd
  11. ExecReload=/bin/kill -s HUP $MAINPID
  12. LimitNOFILE=infinity
  13. LimitNPROC=infinity
  14. LimitCORE=infinity
  15. TimeoutStartSec=0
  16. Delegate=yes
  17. KillMode=process
  18. Restart=on-failure
  19. StartLimitBurst=3
  20. StartLimitInterval=60s
  21. [Install]
  22. WantedBy=multi-user.target
  23. EOF

  1. service docker start
  2. chkconfig docker on

image_1f34h128tl2d1if6jj1dnl1oiv6f.png-219.9kB

四、部署Master Node

4.1 生成kube-apiserver证书

  1. 生成kube-apiserver证书
  2. 1. 自签证书颁发机构(CA
  3. cd /root/TLS/k8s/
  4. ---
  5. cat > ca-config.json << EOF
  6. {
  7. "signing": {
  8. "default": {
  9. "expiry": "87600h"
  10. },
  11. "profiles": {
  12. "kubernetes": {
  13. "expiry": "87600h",
  14. "usages": [
  15. "signing",
  16. "key encipherment",
  17. "server auth",
  18. "client auth"
  19. ]
  20. }
  21. }
  22. }
  23. }
  24. EOF
  25. cat > ca-csr.json << EOF
  26. {
  27. "CN": "kubernetes",
  28. "key": {
  29. "algo": "rsa",
  30. "size": 2048
  31. },
  32. "names": [
  33. {
  34. "C": "CN",
  35. "L": "Beijing",
  36. "ST": "Beijing",
  37. "O": "k8s",
  38. "OU": "System"
  39. }
  40. ]
  41. }
  42. EOF
  43. 生成证书:
  44. cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
  45. ls *pem
  46. ca-key.pem ca.pem

image_1ett9us2t1fkpohatt1g3r1aaf7j.png-134.7kB


  1. 使用自签CA签发kube-apiserver HTTPS证书
  2. 创建证书申请文件:
  3. cat > server-csr.json << EOF
  4. {
  5. "CN": "kubernetes",
  6. "hosts": [
  7. "10.0.0.1",
  8. "127.0.0.1",
  9. "192.168.100.11",
  10. "192.168.100.12",
  11. "192.168.100.13",
  12. "192.168.100.14",
  13. "192.168.100.15",
  14. "192.168.100.16",
  15. "192.168.100.17",
  16. "192.168.100.18",
  17. "192.168.100.100",
  18. "kubernetes",
  19. "kubernetes.default",
  20. "kubernetes.default.svc",
  21. "kubernetes.default.svc.cluster",
  22. "kubernetes.default.svc.cluster.local"
  23. ],
  24. "key": {
  25. "algo": "rsa",
  26. "size": 2048
  27. },
  28. "names": [
  29. {
  30. "C": "CN",
  31. "L": "BeiJing",
  32. "ST": "BeiJing",
  33. "O": "k8s",
  34. "OU": "System"
  35. }
  36. ]
  37. }
  38. EOF
  1. 生成证书:
  2. cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server
  3. ls server*pem
  4. server-key.pem server.pem

image_1f34hdf61qk514d51k1j1lj84356s.png-139.4kB

4.2 从Github下载二进制文件

  1. 下载地址: https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.20.md#v1202
  2. 注:打开链接你会发现里面有很多包,下载一个server包就够了,包含了MasterWorker Node二进制文件。

4.3 解压二进制包

  1. mkdir -p /opt/kubernetes/{bin,cfg,ssl,logs}
  2. tar zxvf kubernetes-server-linux-amd64.tar.gz
  3. cd kubernetes/server/bin
  4. cp kube-apiserver kube-scheduler kube-controller-manager /opt/kubernetes/bin
  5. cp kubectl /usr/bin/
  6. cp kubectl /usr/local/bin/

image_1f34hku0ctua1vt6195l1k48fhe79.png-191.1kB

image_1f34hlasfe511e679c4qcdeam7m.png-77.9kB


  1. 4.4 部署kube-apiserver
  2. 1. 创建配置文件
  3. cat > /opt/kubernetes/cfg/kube-apiserver.conf << EOF
  4. KUBE_APISERVER_OPTS="--logtostderr=false \\
  5. --v=2 \\
  6. --log-dir=/opt/kubernetes/logs \\
  7. --etcd-servers=https://192.168.100.11:2379,https://192.168.100.12:2379,https://192.168.100.13:2379 \\
  8. --bind-address=192.168.100.11 \\
  9. --secure-port=6443 \\
  10. --advertise-address=192.168.100.11 \\
  11. --allow-privileged=true \\
  12. --service-cluster-ip-range=10.0.0.0/24 \\
  13. --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \\
  14. --authorization-mode=RBAC,Node \\
  15. --enable-bootstrap-token-auth=true \\
  16. --token-auth-file=/opt/kubernetes/cfg/token.csv \\
  17. --service-node-port-range=30000-32767 \\
  18. --kubelet-client-certificate=/opt/kubernetes/ssl/server.pem \\
  19. --kubelet-client-key=/opt/kubernetes/ssl/server-key.pem \\
  20. --tls-cert-file=/opt/kubernetes/ssl/server.pem \\
  21. --tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \\
  22. --client-ca-file=/opt/kubernetes/ssl/ca.pem \\
  23. --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \\
  24. --service-account-issuer=api \\
  25. --service-account-signing-key-file=/opt/kubernetes/ssl/server-key.pem \\
  26. --etcd-cafile=/opt/etcd/ssl/ca.pem \\
  27. --etcd-certfile=/opt/etcd/ssl/server.pem \\
  28. --etcd-keyfile=/opt/etcd/ssl/server-key.pem \\
  29. --requestheader-client-ca-file=/opt/kubernetes/ssl/ca.pem \\
  30. --proxy-client-cert-file=/opt/kubernetes/ssl/server.pem \\
  31. --proxy-client-key-file=/opt/kubernetes/ssl/server-key.pem \\
  32. --requestheader-allowed-names=kubernetes \\
  33. --requestheader-extra-headers-prefix=X-Remote-Extra- \\
  34. --requestheader-group-headers=X-Remote-Group \\
  35. --requestheader-username-headers=X-Remote-User \\
  36. --enable-aggregator-routing=true \\
  37. --audit-log-maxage=30 \\
  38. --audit-log-maxbackup=3 \\
  39. --audit-log-maxsize=100 \\
  40. --audit-log-path=/opt/kubernetes/logs/k8s-audit.log"
  41. EOF
  42. ---
  43. 注:上面两个\ \ 第一个是转义符,第二个是换行符,使用转义符是为了使用EOF保留换行符。
  44. logtostderr:启用日志
  45. v:日志等级
  46. log-dir:日志目录
  47. etcd-serversetcd集群地址
  48. bind-address:监听地址
  49. secure-porthttps安全端口
  50. advertise-address:集群通告地址
  51. allow-privileged:启用授权
  52. service-cluster-ip-rangeService虚拟IP地址段
  53. enable-admission-plugins:准入控制模块
  54. authorization-mode:认证授权,启用RBAC授权和节点自管理
  55. enable-bootstrap-token-auth:启用TLS bootstrap机制
  56. token-auth-filebootstrap token文件
  57. service-node-port-rangeService nodeport类型默认分配端口范围
  58. kubelet-client-xxxapiserver访问kubelet客户端证书
  59. tls-xxx-fileapiserver https证书
  60. etcd-xxxfile:连接Etcd集群证书
  61. audit-log-xxx:审计日志
  62. 1.20版本必须加的参数:--service-account-issuer,--service-account-signing-key-file
  63. •--etcd-xxxfile:连接Etcd集群证书
  64. •--audit-log-xxx:审计日志
  65. •启动聚合层相关配置:--requestheader-client-ca-file,--proxy-client-cert-file,--proxy-client-key-file,--requestheader-allowed-names,--requestheader-extra-headers-prefix,--requestheader-group-headers,--requestheader-username-headers,--enable-aggregator-routing

image_1f34rkkfu1c3jo1o1m4r13oi18ai3h.png-268.9kB

  1. 2. 拷贝刚才生成的证书
  2. 把刚才生成的证书拷贝到配置文件中的路径:
  3. cp ~/TLS/k8s/ca*pem ~/TLS/k8s/server*pem /opt/kubernetes/ssl/

image_1f34i4a8b14421ns81akv1uh1c5u9j.png-35.4kB

  1. 3. 启用 TLS Bootstrapping 机制
  2. TLS BootstrapingMaster apiserver启用TLS认证后,Node节点kubeletkube-proxy要与kube-apiserver进行通信,必须使用CA签发的有效证书才可以,当Node节点很多时,这种客户端证书颁发需要大量工作,同样也会增加集群扩展复杂度。为了简化流程,Kubernetes引入了TLS bootstraping机制来自动颁发客户端证书,kubelet会以一个低权限用户自动向apiserver申请证书,kubelet的证书由apiserver动态签署。所以强烈建议在Node上使用这种方式,目前主要用于kubeletkube-proxy还是由我们统一颁发一个证书。
  3. TLS bootstraping 工作流程:

image_1ettejkig1l2batokrn1ecn7uoah.png-301.7kB


  1. 创建上述配置文件中token文件:
  2. cat > /opt/kubernetes/cfg/token.csv << EOF
  3. c47ffb939f5ca36231d9e3121a252940,kubelet-bootstrap,10001,"system:node-bootstrapper"
  4. EOF
  5. 格式:token,用户名,UID,用户组
  6. token也可自行生成替换:
  7. head -c 16 /dev/urandom | od -An -t x | tr -d ' '

4.4 systemd管理apiserver

  1. cat > /usr/lib/systemd/system/kube-apiserver.service << EOF
  2. [Unit]
  3. Description=Kubernetes API Server
  4. Documentation=https://github.com/kubernetes/kubernetes
  5. [Service]
  6. EnvironmentFile=/opt/kubernetes/cfg/kube-apiserver.conf
  7. ExecStart=/opt/kubernetes/bin/kube-apiserver \$KUBE_APISERVER_OPTS
  8. Restart=on-failure
  9. [Install]
  10. WantedBy=multi-user.target
  11. EOF

  1. 启动并设置开机启动
  2. systemctl daemon-reload
  3. systemctl start kube-apiserver
  4. systemctl enable kube-apiserver

image_1f34i5n6c1172q5t6hjk5r1mcead.png-277.5kB


4.5 部署kube-controller-manager

  1. 1. 创建配置文件
  2. cat > /opt/kubernetes/cfg/kube-controller-manager.conf << EOF
  3. KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=false \\
  4. --v=2 \\
  5. --log-dir=/opt/kubernetes/logs \\
  6. --leader-elect=true \\
  7. --kubeconfig=/opt/kubernetes/cfg/kube-controller-manager.kubeconfig \\
  8. --bind-address=127.0.0.1 \\
  9. --allocate-node-cidrs=true \\
  10. --cluster-cidr=10.244.0.0/16 \\
  11. --service-cluster-ip-range=10.0.0.0/24 \\
  12. --cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \\
  13. --cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \\
  14. --root-ca-file=/opt/kubernetes/ssl/ca.pem \\
  15. --service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \\
  16. --cluster-signing-duration=87600h0m0s"
  17. EOF

image_1f34ih2rr2hs430j0hmefhevba.png-145.6kB

  1. •--kubeconfig:连接apiserver配置文件
  2. •--leader-elect:当该组件启动多个时,自动选举(HA
  3. •--cluster-signing-cert-file/--cluster-signing-key-file:自动为kubelet颁发证书的CA,与apiserver保持一致

  1. 2. 生成kubeconfig文件
  2. 生成kube-controller-manager证书:
  3. # 切换工作目录
  4. cd ~/TLS/k8s
  5. # 创建证书请求文件
  6. cat > kube-controller-manager-csr.json << EOF
  7. {
  8. "CN": "system:kube-controller-manager",
  9. "hosts": [],
  10. "key": {
  11. "algo": "rsa",
  12. "size": 2048
  13. },
  14. "names": [
  15. {
  16. "C": "CN",
  17. "L": "BeiJing",
  18. "ST": "BeiJing",
  19. "O": "system:masters",
  20. "OU": "System"
  21. }
  22. ]
  23. }
  24. EOF
  25. # 生成证书
  26. cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager

image_1f34ikfbf1k45d31nulsbo5cic7.png-123.5kB

image_1f34io4ta1oam1h5c3ijlhjhi9ek.png-132.9kB

  1. 生成kubeconfig文件(以下是shell命令,直接在终端执行):
  2. KUBE_CONFIG="/opt/kubernetes/cfg/kube-controller-manager.kubeconfig"
  3. KUBE_APISERVER="https://192.168.100.11:6443"
  4. kubectl config set-cluster kubernetes \
  5. --certificate-authority=/opt/kubernetes/ssl/ca.pem \
  6. --embed-certs=true \
  7. --server=${KUBE_APISERVER} \
  8. --kubeconfig=${KUBE_CONFIG}
  9. kubectl config set-credentials kube-controller-manager \
  10. --client-certificate=./kube-controller-manager.pem \
  11. --client-key=./kube-controller-manager-key.pem \
  12. --embed-certs=true \
  13. --kubeconfig=${KUBE_CONFIG}
  14. kubectl config set-context default \
  15. --cluster=kubernetes \
  16. --user=kube-controller-manager \
  17. --kubeconfig=${KUBE_CONFIG}
  18. kubectl config use-context default --kubeconfig=${KUBE_CONFIG}

image_1f34iptgt1p8ltl61p677kk1mk8f1.png-221.6kB


  1. 3. systemd管理controller-manager
  2. cat > /usr/lib/systemd/system/kube-controller-manager.service << EOF
  3. [Unit]
  4. Description=Kubernetes Controller Manager
  5. Documentation=https://github.com/kubernetes/kubernetes
  6. [Service]
  7. EnvironmentFile=/opt/kubernetes/cfg/kube-controller-manager.conf
  8. ExecStart=/opt/kubernetes/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
  9. Restart=on-failure
  10. [Install]
  11. WantedBy=multi-user.target
  12. EOF

image_1f34ir8h7dl0il9180j3lk1db1fe.png-95.5kB

  1. 4. 启动并设置开机启动
  2. systemctl daemon-reload
  3. systemctl start kube-controller-manager
  4. systemctl enable kube-controller-manager

image_1f34isnc31jm5bgnk6517qj112ffr.png-301.2kB

4.6 部署kube-scheduler

  1. 1. 创建配置文件
  2. cat > /opt/kubernetes/cfg/kube-scheduler.conf << EOF
  3. KUBE_SCHEDULER_OPTS="--logtostderr=false \\
  4. --v=2 \\
  5. --log-dir=/opt/kubernetes/logs \\
  6. --leader-elect \\
  7. --kubeconfig=/opt/kubernetes/cfg/kube-scheduler.kubeconfig \\
  8. --bind-address=127.0.0.1"
  9. EOF

  1. •--kubeconfig:连接apiserver配置文件
  2. •--leader-elect:当该组件启动多个时,自动选举(HA

image_1f34iuvp31l8e1aio1gmq1inm1tdsg8.png-63.9kB

  1. 2. 生成kubeconfig文件
  2. 生成kube-scheduler证书:
  3. # 切换工作目录
  4. cd ~/TLS/k8s
  5. # 创建证书请求文件
  6. cat > kube-scheduler-csr.json << EOF
  7. {
  8. "CN": "system:kube-scheduler",
  9. "hosts": [],
  10. "key": {
  11. "algo": "rsa",
  12. "size": 2048
  13. },
  14. "names": [
  15. {
  16. "C": "CN",
  17. "L": "BeiJing",
  18. "ST": "BeiJing",
  19. "O": "system:masters",
  20. "OU": "System"
  21. }
  22. ]
  23. }
  24. EOF
  25. # 生成证书
  26. cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler

image_1f34j0to91u3g1msh128l1obd5nqgl.png-230.7kB

  1. 生成kubeconfig文件(以下是shell命令,直接在终端执行):
  2. KUBE_CONFIG="/opt/kubernetes/cfg/kube-scheduler.kubeconfig"
  3. KUBE_APISERVER="https://192.168.100.11:6443"
  4. kubectl config set-cluster kubernetes \
  5. --certificate-authority=/opt/kubernetes/ssl/ca.pem \
  6. --embed-certs=true \
  7. --server=${KUBE_APISERVER} \
  8. --kubeconfig=${KUBE_CONFIG}
  9. kubectl config set-credentials kube-scheduler \
  10. --client-certificate=./kube-scheduler.pem \
  11. --client-key=./kube-scheduler-key.pem \
  12. --embed-certs=true \
  13. --kubeconfig=${KUBE_CONFIG}
  14. kubectl config set-context default \
  15. --cluster=kubernetes \
  16. --user=kube-scheduler \
  17. --kubeconfig=${KUBE_CONFIG}
  18. kubectl config use-context default --kubeconfig=${KUBE_CONFIG}

image_1f34j2ife1iccqsr1lgc199flc3h2.png-213.5kB

  1. 3. systemd管理scheduler
  2. cat > /usr/lib/systemd/system/kube-scheduler.service << EOF
  3. [Unit]
  4. Description=Kubernetes Scheduler
  5. Documentation=https://github.com/kubernetes/kubernetes
  6. [Service]
  7. EnvironmentFile=/opt/kubernetes/cfg/kube-scheduler.conf
  8. ExecStart=/opt/kubernetes/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
  9. Restart=on-failure
  10. [Install]
  11. WantedBy=multi-user.target
  12. EOF
  13. 4. 启动并设置开机启动
  14. systemctl daemon-reload
  15. systemctl start kube-scheduler
  16. systemctl enable kube-scheduler

image_1f34j41gp1gp8hm0okv1re8fbvhf.png-147.2kB

image_1f34j4ld32infp75g81e6gjrfhs.png-280.5kB

  1. 5. 查看集群状态
  2. 生成kubectl连接集群的证书:
  3. cat > admin-csr.json <<EOF
  4. {
  5. "CN": "admin",
  6. "hosts": [],
  7. "key": {
  8. "algo": "rsa",
  9. "size": 2048
  10. },
  11. "names": [
  12. {
  13. "C": "CN",
  14. "L": "BeiJing",
  15. "ST": "BeiJing",
  16. "O": "system:masters",
  17. "OU": "System"
  18. }
  19. ]
  20. }
  21. EOF
  22. cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin

image_1f34j6ad713jkpla1g1sp3j5lri9.png-206kB

  1. 生成kubeconfig文件:
  2. mkdir /root/.kube
  3. KUBE_CONFIG="/root/.kube/config"
  4. KUBE_APISERVER="https://192.168.100.11:6443"
  5. kubectl config set-cluster kubernetes \
  6. --certificate-authority=/opt/kubernetes/ssl/ca.pem \
  7. --embed-certs=true \
  8. --server=${KUBE_APISERVER} \
  9. --kubeconfig=${KUBE_CONFIG}
  10. kubectl config set-credentials cluster-admin \
  11. --client-certificate=./admin.pem \
  12. --client-key=./admin-key.pem \
  13. --embed-certs=true \
  14. --kubeconfig=${KUBE_CONFIG}
  15. kubectl config set-context default \
  16. --cluster=kubernetes \
  17. --user=cluster-admin \
  18. --kubeconfig=${KUBE_CONFIG}
  19. kubectl config use-context default --kubeconfig=${KUBE_CONFIG}

image_1f34j7ke11a8j9ns79qtd51uauim.png-213kB

  1. 通过kubectl工具查看当前集群组件状态:
  2. kubectl get cs

image_1f34j9ihvra41bvflpf104pbtqj3.png-66.1kB

  1. 如上输出说明Master节点组件运行正常。
  1. 6. 授权kubelet-bootstrap用户允许请求证书
  2. kubectl create clusterrolebinding kubelet-bootstrap \
  3. --clusterrole=system:node-bootstrapper \
  4. --user=kubelet-bootstrap

image_1f34jdli2bilb5m1njulm1hkbjg.png-50.9kB

五、部署Worker Node

5.1 创建工作目录并拷贝二进制文件

  1. 还是在Master Node上操作,即同时作为Worker Node
  2. 在所有worker node创建工作目录:
  3. mkdir -p /opt/kubernetes/{bin,cfg,ssl,logs}
  4. master节点拷贝:
  5. cd kubernetes/server/bin
  6. cp kubelet kube-proxy /opt/kubernetes/bin # 本地拷贝

image_1f34jh05r14hpt72bam7os4k1jt.png-37.1kB

5.2 部署kubelet

  1. 1. 创建配置文件
  2. cat > /opt/kubernetes/cfg/kubelet.conf << EOF
  3. KUBELET_OPTS="--logtostderr=false \\
  4. --v=2 \\
  5. --log-dir=/opt/kubernetes/logs \\
  6. --hostname-override=node01.flyfish.cn \\
  7. --network-plugin=cni \\
  8. --kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \\
  9. --bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \\
  10. --config=/opt/kubernetes/cfg/kubelet-config.yml \\
  11. --cert-dir=/opt/kubernetes/ssl \\
  12. --pod-infra-container-image=lizhenliang/pause-amd64:3.0"
  13. EOF

image_1f34jkmk919di1vte1p661veo1a1dkn.png-99.5kB

  1. •--hostname-override:显示名称,集群中唯一
  2. •--network-plugin:启用CNI
  3. •--kubeconfig:空路径,会自动生成,后面用于连接apiserver
  4. •--bootstrap-kubeconfig:首次启动向apiserver申请证书
  5. •--config:配置参数文件
  6. •--cert-dirkubelet证书生成目录
  7. •--pod-infra-container-image:管理Pod网络容器的镜像

  1. 2. 配置参数文件
  2. cat > /opt/kubernetes/cfg/kubelet-config.yml << EOF
  3. kind: KubeletConfiguration
  4. apiVersion: kubelet.config.k8s.io/v1beta1
  5. address: 0.0.0.0
  6. port: 10250
  7. readOnlyPort: 10255
  8. cgroupDriver: cgroupfs
  9. clusterDNS:
  10. - 10.0.0.2
  11. clusterDomain: cluster.local
  12. failSwapOn: false
  13. authentication:
  14. anonymous:
  15. enabled: false
  16. webhook:
  17. cacheTTL: 2m0s
  18. enabled: true
  19. x509:
  20. clientCAFile: /opt/kubernetes/ssl/ca.pem
  21. authorization:
  22. mode: Webhook
  23. webhook:
  24. cacheAuthorizedTTL: 5m0s
  25. cacheUnauthorizedTTL: 30s
  26. evictionHard:
  27. imagefs.available: 15%
  28. memory.available: 100Mi
  29. nodefs.available: 10%
  30. nodefs.inodesFree: 5%
  31. maxOpenFiles: 1000000
  32. maxPods: 110
  33. EOF

image_1f34jmarb19un1ri0l0q105o1dhjl4.png-160.6kB


  1. 3. 生成kubelet初次加入集群引导kubeconfig文件
  2. KUBE_CONFIG="/opt/kubernetes/cfg/bootstrap.kubeconfig"
  3. KUBE_APISERVER="https://192.168.100.11:6443" # apiserver IP:PORT
  4. TOKEN="c47ffb939f5ca36231d9e3121a252940" # 与token.csv里保持一致
  5. # 生成 kubelet bootstrap kubeconfig 配置文件
  6. kubectl config set-cluster kubernetes \
  7. --certificate-authority=/opt/kubernetes/ssl/ca.pem \
  8. --embed-certs=true \
  9. --server=${KUBE_APISERVER} \
  10. --kubeconfig=${KUBE_CONFIG}
  11. kubectl config set-credentials "kubelet-bootstrap" \
  12. --token=${TOKEN} \
  13. --kubeconfig=${KUBE_CONFIG}
  14. kubectl config set-context default \
  15. --cluster=kubernetes \
  16. --user="kubelet-bootstrap" \
  17. --kubeconfig=${KUBE_CONFIG}
  18. kubectl config use-context default --kubeconfig=${KUBE_CONFIG}

image_1f34jp6261ol11jjipe3h4m163ilh.png-273.4kB


  1. 4. systemd管理kubelet
  2. cat > /usr/lib/systemd/system/kubelet.service << EOF
  3. [Unit]
  4. Description=Kubernetes Kubelet
  5. After=docker.service
  6. [Service]
  7. EnvironmentFile=/opt/kubernetes/cfg/kubelet.conf
  8. ExecStart=/opt/kubernetes/bin/kubelet \$KUBELET_OPTS
  9. Restart=on-failure
  10. LimitNOFILE=65536
  11. [Install]
  12. WantedBy=multi-user.target
  13. EOF

  1. 5. 启动并设置开机启动
  2. systemctl daemon-reload
  3. systemctl start kubelet
  4. systemctl enable kubelet

image_1f34jqm1untd1g0711qkkncjl3lu.png-180.3kB


5.3 批准kubelet证书申请并加入集群

  1. # 查看kubelet证书请求
  2. kubectl get csr

image_1f34jt61b1pkeidm1bt11lv21ermb.png-66.5kB

  1. # 批准申请
  2. kubectl certificate approve node-csr-4nuKddTQQ9zuio_m9w33lZtp-_FxQw7XYLBZWnAT3DM
  3. # 查看节点
  4. kubectl get node
  5. 注:由于网络插件还没有部署,节点会没有准备就绪 NotReady

image_1f34ju1d61mva1umj1usn1s2g1bg4mo.png-136.8kB


5.4 部署kube-proxy

  1. 1. 创建配置文件
  2. cat > /opt/kubernetes/cfg/kube-proxy.conf << EOF
  3. KUBE_PROXY_OPTS="--logtostderr=false \\
  4. --v=2 \\
  5. --log-dir=/opt/kubernetes/logs \\
  6. --config=/opt/kubernetes/cfg/kube-proxy-config.yml"
  7. EOF

  1. 2. 配置参数文件
  2. cat > /opt/kubernetes/cfg/kube-proxy-config.yml << EOF
  3. kind: KubeProxyConfiguration
  4. apiVersion: kubeproxy.config.k8s.io/v1alpha1
  5. bindAddress: 0.0.0.0
  6. metricsBindAddress: 0.0.0.0:10249
  7. clientConnection:
  8. kubeconfig: /opt/kubernetes/cfg/kube-proxy.kubeconfig
  9. hostnameOverride: node01.flyfish.cn
  10. clusterCIDR: 10.0.0.0/24
  11. EOF

  1. 3. 生成kube-proxy.kubeconfig文件
  2. 生成kube-proxy证书:
  3. # 切换工作目录
  4. cd ~/TLS/k8s
  5. # 创建证书请求文件
  6. cat > kube-proxy-csr.json << EOF
  7. {
  8. "CN": "system:kube-proxy",
  9. "hosts": [],
  10. "key": {
  11. "algo": "rsa",
  12. "size": 2048
  13. },
  14. "names": [
  15. {
  16. "C": "CN",
  17. "L": "BeiJing",
  18. "ST": "BeiJing",
  19. "O": "k8s",
  20. "OU": "System"
  21. }
  22. ]
  23. }
  24. EOF
  25. # 生成证书
  26. cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy

image_1f34k45gduffhbd1h32g9l1e6cni.png-231kB

  1. 生成kubeconfig文件:
  2. KUBE_CONFIG="/opt/kubernetes/cfg/kube-proxy.kubeconfig"
  3. KUBE_APISERVER="https://192.168.100.11:6443"
  4. kubectl config set-cluster kubernetes \
  5. --certificate-authority=/opt/kubernetes/ssl/ca.pem \
  6. --embed-certs=true \
  7. --server=${KUBE_APISERVER} \
  8. --kubeconfig=${KUBE_CONFIG}
  9. kubectl config set-credentials kube-proxy \
  10. --client-certificate=./kube-proxy.pem \
  11. --client-key=./kube-proxy-key.pem \
  12. --embed-certs=true \
  13. --kubeconfig=${KUBE_CONFIG}
  14. kubectl config set-context default \
  15. --cluster=kubernetes \
  16. --user=kube-proxy \
  17. --kubeconfig=${KUBE_CONFIG}
  18. kubectl config use-context default --kubeconfig=${KUBE_CONFIG}

image_1f34k4tqv1voe1p5u1ihs1kp31i8qnv.png-217.2kB


  1. 4. systemd管理kube-proxy
  2. cat > /usr/lib/systemd/system/kube-proxy.service << EOF
  3. [Unit]
  4. Description=Kubernetes Proxy
  5. After=network.target
  6. [Service]
  7. EnvironmentFile=/opt/kubernetes/cfg/kube-proxy.conf
  8. ExecStart=/opt/kubernetes/bin/kube-proxy \$KUBE_PROXY_OPTS
  9. Restart=on-failure
  10. LimitNOFILE=65536
  11. [Install]
  12. WantedBy=multi-user.target
  13. EOF
  14. 5. 启动并设置开机启动
  15. systemctl daemon-reload
  16. systemctl start kube-proxy
  17. systemctl enable kube-proxy

image_1f34k66m1db6lianjt1nri13r7oc.png-143.7kB

5.5 部署网络组件

  1. Calico是一个纯三层的数据中心网络方案,是目前Kubernetes主流的网络方案。
  2. 部署Calico
  3. wget https://docs.projectcalico.org/v3.14/manifests/calico.yaml
  4. k8s1.22+
  5. wget https://docs.projectcalico.org/manifests/calico.yaml
  6. kubectl apply -f calico.yaml
  7. kubectl get pods -n kube-system
  8. Calico PodRunning,节点也会准备就绪:
  9. kubectl get node
  10. kubectl get pod -n kube-system

image_1f34knmv71d5kndpkt216ui1fdbp6.png-126.9kB

5.6 授权apiserver访问kubelet

  1. 应用场景:例如kubectl logs
  2. cat > apiserver-to-kubelet-rbac.yaml << EOF
  3. apiVersion: rbac.authorization.k8s.io/v1
  4. kind: ClusterRole
  5. metadata:
  6. annotations:
  7. rbac.authorization.kubernetes.io/autoupdate: "true"
  8. labels:
  9. kubernetes.io/bootstrapping: rbac-defaults
  10. name: system:kube-apiserver-to-kubelet
  11. rules:
  12. - apiGroups:
  13. - ""
  14. resources:
  15. - nodes/proxy
  16. - nodes/stats
  17. - nodes/log
  18. - nodes/spec
  19. - nodes/metrics
  20. - pods/log
  21. verbs:
  22. - "*"
  23. ---
  24. apiVersion: rbac.authorization.k8s.io/v1
  25. kind: ClusterRoleBinding
  26. metadata:
  27. name: system:kube-apiserver
  28. namespace: ""
  29. roleRef:
  30. apiGroup: rbac.authorization.k8s.io
  31. kind: ClusterRole
  32. name: system:kube-apiserver-to-kubelet
  33. subjects:
  34. - apiGroup: rbac.authorization.k8s.io
  35. kind: User
  36. name: kubernetes
  37. EOF

  1. kubectl apply -f apiserver-to-kubelet-rbac.yaml

image_1f34krmfg15km2e2pm9qbr491pj.png-102.1kB

5.7 新增加Worker Node

  1. 1. 拷贝已部署好的Node相关文件到新节点
  2. Master节点将Worker Node涉及文件拷贝到新节点192.168.100.12/13
  3. scp -r /opt/kubernetes root@192.168.100.12:/opt/
  4. scp -r /usr/lib/systemd/system/{kubelet,kube-proxy}.service root@192.168.100.12:/usr/lib/systemd/system
  5. scp /opt/kubernetes/ssl/ca.pem root@192.168.100.12:/opt/kubernetes/ssl/

image_1f34l1o0k2uq1lc214sc1q741m7sqg.png-160.6kB

  1. 2. 删除kubelet证书和kubeconfig文件
  2. rm -f /opt/kubernetes/cfg/kubelet.kubeconfig
  3. rm -f /opt/kubernetes/ssl/kubelet*
  4. 注:这几个文件是证书申请审批后自动生成的,每个Node不同,必须删除

image_1f34l2jrh21l6mh1bggci0hr3qt.png-37.8kB

  1. 3. 修改主机名
  2. vi /opt/kubernetes/cfg/kubelet.conf
  3. --hostname-override=node02.flyfish.cn
  4. vi /opt/kubernetes/cfg/kube-proxy-config.yml
  5. hostnameOverride: node02.flyfish.cn

image_1f34l5puj1ndl1p9c12k4p5m8mrn.png-100.1kB

image_1f34l6gl12vphq916l81fhp1grns4.png-83.2kB

  1. 4. 启动并设置开机启动
  2. systemctl daemon-reload
  3. systemctl start kubelet kube-proxy
  4. systemctl enable kubelet kube-proxy

image_1f34l583l1f2c1rm7vds1tn6oupra.png-141kB

  1. 5. Master上批准新Node kubelet证书申请
  2. # 查看证书请求
  3. kubectl get csr

image_1f34l7n9g12koukf1qcj1c711hnfsh.png-83.5kB

  1. # 授权请求
  2. kubectl certificate approve node-csr-t0mDd0h5y6T_L0F4vV6UHemyc95NoDhzbXhdGAtjlwg

image_1f34l8lti1kso15oh1ic41m2613vhsu.png-132.7kB

  1. kubectl get node
  2. kubectl get pod -n kube-system

image_1f34lfcb07eh1mj1a6tha412dctr.png-127kB


  1. 同上配置node03.flyfish.cn 节点
  2. kubectl get node

image_1f34q06qubn1bdf120n5n1100vu8.png-75.1kB

六、部署Dashboard和CoreDNS

  1. 查考官网:
  2. https://github.com/kubernetes/dashboard/releases/tag/v2.3.1
  3. 6.1 部署Dashboard
  4. $ wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta8/aio/deploy/recommended.yaml
  5. 默认Dashboard只能集群内部访问,修改ServiceNodePort类型,暴露到外部:
  6. vim recommended.yaml
  7. ----
  8. kind: Service
  9. apiVersion: v1
  10. metadata:
  11. labels:
  12. k8s-app: kubernetes-dashboard
  13. name: kubernetes-dashboard
  14. namespace: kubernetes-dashboard
  15. spec:
  16. ports:
  17. - port: 443
  18. targetPort: 8443
  19. nodePort: 30001
  20. type: NodePort
  21. selector:
  22. k8s-app: kubernetes-dashboard
  23. ----

  1. kubectl apply -f recommended.yaml
  2. kubectl get pods,svc -n kubernetes-dashboard

image_1f34qg0oj199g11s7krj1e911a939.png-123.3kB

image_1f34qgh09149j16j57ao1lialutm.png-174.2kB


  1. 创建service account并绑定默认cluster-admin管理员集群角色:
  2. kubectl create serviceaccount dashboard-admin -n kube-system
  3. kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin
  4. kubectl describe secrets -n kube-system $(kubectl -n kube-system get secret | awk '/dashboard-admin/{print $1}')

image_1f34qhk55e0eab31qfl1lol1bug13.png-257.2kB

image_1f34qlan19p01i6fd13c281gtl1g.png-288.4kB

image_1f34qmfqp19u04quunp1sih1b8g1t.png-396.4kB

  1. kubectl apply -f coredns.yaml
  2. kubectl run -it --rm dns-test --image=busybox:1.28.4 sh

image_1f34qpnn0rmp3786he7du2a62n.png-160.7kB

image_1f34qr9kj1lia18khdg1iqlp334.png-73.5kB

添加新批注
在作者公开此批注前,只有你和作者可见。
回复批注