[关闭]
@zhangyy 2020-12-21T14:36:16.000000Z 字数 15929 阅读 135

kubesphere 3.0 安装部署填坑记录

kubernetes系列



一:系统环境

1.1 系统环境初始化

  1. 系统:Centos7.8x64
  2. cat /etc/hosts
  3. -----
  4. 192.168.100.11 node01.flyfish.cn
  5. 192.168.100.12 node02.flyfish.cn
  6. 192.168.100.13 node03.flyfish.cn
  7. 192.168.100.14 node04.flyfish.cn
  8. 192.168.100.15 node05.flyfish.cn
  9. 192.168.100.16 node06.flyfish.cn
  10. 192.168.100.17 node07.flyfish.cn
  11. 192.168.100.18 node08.flyfish.cn
  12. -----
  13. 本次安装以 前三台部署
  14. k8s 部署说明

image_1eq1hbnj55tcn2nvf51juculo9.png-39.8kB


1.2 系统配置初始化

  1. 安装基础工具
  2. yum install -y wget && yum install -y vim && yum install -y lsof && yum install -y net-tools

image_1eq1hfpkp1a1bmpp1tm2b3k5drm.png-258.4kB

  1. 关闭防火墙或者阿里云开通安全组端口访问
  2. systemctl stop firewalld
  3. systemctl disable firewalld
  4. 执行关闭命令: systemctl stop firewalld.service
  5. 再次执行查看防火墙命令:systemctl status firewalld.service
  6. 执行开机禁用防火墙自启命令 systemctl disable firewalld.service

  1. 关闭 selinux:
  2. sed -i 's/enforcing/disabled/' /etc/selinux/config
  3. setenforce 0
  4. cat /etc/selinux/config

image_1eq1hk3a91ejq1oj6np093njqe13.png-86.5kB


  1. 关闭 swap
  2. swapoff -a #临时
  3. sed -ri 's/.*swap.*/#&/' /etc/fstab #永久
  4. free -l -h

image_1eq1hml7heuu193b1u3h6hn14fr1g.png-82.6kB


  1. 将桥接的 IPv4 流量传递到 iptables 的链
  2. 如果没有/etc/sysctl.conf文件的话直接执行
  3. echo "net.ipv4.ip_forward = 1" >> /etc/sysctl.conf
  4. echo "net.bridge.bridge-nf-call-ip6tables = 1" >> /etc/sysctl.conf
  5. echo "net.bridge.bridge-nf-call-iptables = 1" >> /etc/sysctl.conf
  6. echo "net.ipv6.conf.all.disable_ipv6 = 1" >> /etc/sysctl.conf
  7. echo "net.ipv6.conf.default.disable_ipv6 = 1" >> /etc/sysctl.conf
  8. echo "net.ipv6.conf.lo.disable_ipv6 = 1" >> /etc/sysctl.conf
  9. echo "net.ipv6.conf.all.forwarding = 1" >> /etc/sysctl.conf

image_1eq1hr62sbmrg2918i21lgb9qt1t.png-174.3kB

1.3 部署docker

  1. 下载地址:https://download.docker.com/linux/static/stable/x86_64/docker-19.03.9.tgz
  2. 以下在所有节点操作。这里采用二进制安装,用yum安装也一样。
  3. node01.flyfish,node02.flyfish node03.flyfish 节点上面安装

  1. 3.1 解压二进制包
  2. tar zxvf docker-19.03.9.tgz
  3. mv docker/* /usr/bin

image_1eq1i8mmbr901l11v9j1haptl9.png-65.9kB

  1. 3.2 systemd管理docker
  2. cat > /usr/lib/systemd/system/docker.service << EOF
  3. [Unit]
  4. Description=Docker Application Container Engine
  5. Documentation=https://docs.docker.com
  6. After=network-online.target firewalld.service
  7. Wants=network-online.target
  8. [Service]
  9. Type=notify
  10. ExecStart=/usr/bin/dockerd
  11. ExecReload=/bin/kill -s HUP $MAINPID
  12. LimitNOFILE=infinity
  13. LimitNPROC=infinity
  14. LimitCORE=infinity
  15. TimeoutStartSec=0
  16. Delegate=yes
  17. KillMode=process
  18. Restart=on-failure
  19. StartLimitBurst=3
  20. StartLimitInterval=60s
  21. [Install]
  22. WantedBy=multi-user.target
  23. EOF

image_1eq1iog7f1o5a144hi4blc01k7lm.png-135.3kB

  1. 3.3 创建配置文件
  2. mkdir /etc/docker
  3. cat > /etc/docker/daemon.json << EOF
  4. {
  5. "registry-mirrors": ["https://b9pmyelo.mirror.aliyuncs.com"]
  6. }
  7. EOF
  8. registry-mirrors 阿里云镜像加速器
  1. 3.4 启动并设置开机启动
  2. systemctl daemon-reload
  3. systemctl start docker
  4. systemctl enable docker

image_1eq1iovdi1i2rkmp9it618rs13.png-162kB

二 :安装k8s 集群

  1. 安装k8skubeletkubeadmkubectl(所有节点)
  2. 配置K8Syum
  3. cat <<EOF > /etc/yum.repos.d/kubernetes.repo
  4. [kubernetes]
  5. name=Kubernetes
  6. baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
  7. enabled=1
  8. gpgcheck=0
  9. repo_gpgcheck=0
  10. gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
  11. http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
  12. EOF

  1. 安装kubeletkubeadmkubectl
  2. yum install -y kubelet-1.17.3 kubeadm-1.17.3 kubectl-1.17.3

image_1eq1k04tlkfb1qfo1dc6aiv19pg1g.png-186.8kB

  1. systemctl enable kubelet && systemctl start kubelet

image_1eq1k19611h64tb61kj31nq31g8o1t.png-104.7kB

  1. 初始化所有节点:
  2. 下载镜像脚本:
  3. vim image.sh
  4. ----
  5. #!/bin/bash
  6. images=(
  7. kube-apiserver:v1.17.3
  8. kube-proxy:v1.17.3
  9. kube-controller-manager:v1.17.3
  10. kube-scheduler:v1.17.3
  11. coredns:1.6.5
  12. etcd:3.4.3-0
  13. pause:3.1
  14. )
  15. for imageName in ${images[@]} ; do
  16. docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName
  17. done
  18. -----

image_1eq1kaan6hccof3d5c1h6m9jh2a.png-167.5kB


  1. 初始化 master节点:
  2. 注意,该操作只是在master节点之后构建环境。
  3. kubeadm init \
  4. --apiserver-advertise-address=192.168.100.11 \
  5. --image-repository registry.cn-hangzhou.aliyuncs.com/google_containers \
  6. --kubernetes-version v1.17.3 \
  7. --service-cidr=10.96.0.0/16 \
  8. --pod-network-cidr=10.244.0.0/16

image_1eq1kg65g1o879g82u710vp1jhr2n.png-223.4kB


  1. mkdir -p $HOME/.kube
  2. sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  3. sudo chown $(id -u):$(id -g) $HOME/.kube/config

image_1eq1khr70qbsfcep0910mcncr34.png-64.2kB


  1. 部署网络插件
  2. kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml

image_1eq1kn6aq1mt3ttkahjd6p5mp3h.png-299.8kB

image_1eq1kptkk19tg15uu8s64ar19qd3u.png-51.4kB


image_1eq1ktf6k6cd1clhus25bj1ir44b.png-209.2kB

  1. 其他节点加入:
  2. kubeadm join 192.168.100.11:6443 --token y28jw9.gxstbcar3m4n5p1a \
  3. --discovery-token-ca-cert-hash sha256:769528577607a4024ead671ae01b694744dba16e0806e57ed1b099eb6c6c9350

image_1eq1kvda81844o7g1edm1sac1s1c4o.png-204.2kB

image_1eq1kvp7g1ds21sflrhf1eph1sqt55.png-215.5kB

image_1eq1l0j2h131pavp8chp6k16m15i.png-67.3kB


三:部署NFS 服务器

  1. yum install -y nfs-utils
  2. echo "/nfs/data/ *(insecure,rw,sync,no_root_squash)" > /etc/exports

image_1eq1l6vo2135v1q0o1drlm4d4rt5v.png-115.9kB

  1. mkdir -p /nfs/data
  2. systemctl enable rpcbind
  3. systemctl enable nfs-server
  4. systemctl start rpcbind
  5. systemctl start nfs-server
  6. exportfs -r

image_1eq1la8jqiev1hkg350or0f966p.png-156.2kB

image_1eq1l8d8h8m97b2tddb6o1esu6c.png-47.4kB


  1. 测试Pod直接挂载NFS了(主节点操作)
  2. opt目录下创建一个nginx.yaml的文件
  3. vim nginx.yaml
  4. ----
  5. apiVersion: v1
  6. kind: Pod
  7. metadata:
  8. name: vol-nfs
  9. namespace: default
  10. spec:
  11. volumes:
  12. - name: html
  13. nfs:
  14. path: /nfs/data #1000G
  15. server: 192.168.100.11 #自己的nfs服务器地址
  16. containers:
  17. - name: myapp
  18. image: nginx
  19. volumeMounts:
  20. - name: html
  21. mountPath: /usr/share/nginx/html/
  22. ----
  23. kubectl apply -f nginx.yaml
  24. cd /nfs/data/
  25. echo " 11111" >>> index.html

image_1eq1lv1q7np81rin14vs1i812t776.png-53.5kB


  1. 安装客户端工具(node节点操作)
  2. node02.flyfish.cn
  3. showmount -e 192.168.100.11

image_1eq1mc39bf7sdsvs8pvgaien7j.png-39.5kB


  1. 创建同步文件夹
  2. mkdir /root/nfsmount

  1. 将客户端的/root/nfsmount和/nfs/data/做同步(node节点操作)
  2. mount -t nfs 192.168.100.11:/nfs/data/ /root/nfsmount

image_1eq1popta5o1ucj1evbq6p1nav8d.png-296.2kB

image_1eq1ps3341s041a3ba3gbjmhej8q.png-51.5kB

image_1eq1pskkn1f7c2g81cr95ie6n097.png-36kB


四:设置动态供应链storageclass

image_1eq1q1tutp21monrlt1hf99839k.png-587.9kB

  1. vim nfs-rbac.yaml
  2. ----
  3. apiVersion: v1
  4. kind: ServiceAccount
  5. metadata:
  6. name: nfs-provisioner
  7. ---
  8. kind: ClusterRole
  9. apiVersion: rbac.authorization.k8s.io/v1
  10. metadata:
  11. name: nfs-provisioner-runner
  12. rules:
  13. - apiGroups: [""]
  14. resources: ["persistentvolumes"]
  15. verbs: ["get", "list", "watch", "create", "delete"]
  16. - apiGroups: [""]
  17. resources: ["persistentvolumeclaims"]
  18. verbs: ["get", "list", "watch", "update"]
  19. - apiGroups: ["storage.k8s.io"]
  20. resources: ["storageclasses"]
  21. verbs: ["get", "list", "watch"]
  22. - apiGroups: [""]
  23. resources: ["events"]
  24. verbs: ["watch", "create", "update", "patch"]
  25. - apiGroups: [""]
  26. resources: ["services", "endpoints"]
  27. verbs: ["get","create","list", "watch","update"]
  28. - apiGroups: ["extensions"]
  29. resources: ["podsecuritypolicies"]
  30. resourceNames: ["nfs-provisioner"]
  31. verbs: ["use"]
  32. ---
  33. kind: ClusterRoleBinding
  34. apiVersion: rbac.authorization.k8s.io/v1
  35. metadata:
  36. name: run-nfs-provisioner
  37. subjects:
  38. - kind: ServiceAccount
  39. name: nfs-provisioner
  40. namespace: default
  41. roleRef:
  42. kind: ClusterRole
  43. name: nfs-provisioner-runner
  44. apiGroup: rbac.authorization.k8s.io
  45. ---
  46. kind: Deployment
  47. apiVersion: apps/v1
  48. metadata:
  49. name: nfs-client-provisioner
  50. spec:
  51. replicas: 1
  52. strategy:
  53. type: Recreate
  54. selector:
  55. matchLabels:
  56. app: nfs-client-provisioner
  57. template:
  58. metadata:
  59. labels:
  60. app: nfs-client-provisioner
  61. spec:
  62. serviceAccount: nfs-provisioner
  63. containers:
  64. - name: nfs-client-provisioner
  65. image: lizhenliang/nfs-client-provisioner
  66. volumeMounts:
  67. - name: nfs-client-root
  68. mountPath: /persistentvolumes
  69. env:
  70. - name: PROVISIONER_NAME
  71. value: storage.pri/nfs
  72. - name: NFS_SERVER
  73. value: 192.168.100.11
  74. - name: NFS_PATH
  75. value: /nfs/data
  76. volumes:
  77. - name: nfs-client-root
  78. nfs:
  79. server: 192.168.100.11
  80. path: /nfs/data
  81. ----
  82. kubectl apply -f nfs-rbac.yaml
  83. kubectl get pod

image_1eq1qekdf190m16581uu1oqr1m2sa1.png-65kB

image_1eq1qghjjqpl1oq6c001nk7159mae.png-72kB

  1. 创建storageclass
  2. vi storageclass-nfs.yaml
  3. ----
  4. apiVersion: storage.k8s.io/v1
  5. kind: StorageClass
  6. metadata:
  7. name: storage-nfs
  8. provisioner: storage.pri/nfs
  9. reclaimPolicy: Delete
  10. ----
  11. kubectl apply -f storageclass-nfs.yaml

image_1eq1qm3001a9ne6c195319go6srar.png-47kB

  1. #扩展"reclaim policy"有三种方式:Retain、Recycle、Deleted。
  2. Retain
  3. #保护被PVC释放的PV及其上数据,并将PV状态改成"released",不将被其它PVC绑定。集群管理员手动通过如下步骤释放存储资源:
  4. 手动删除PV,但与其相关的后端存储资源如(AWS EBS, GCE PD, Azure Disk, or Cinder volume)仍然存在。
  5. 手动清空后端存储volume上的数据。
  6. 手动删除后端存储volume,或者重复使用后端volume,为其创建新的PV
  7. Delete
  8. 删除被PVC释放的PV及其后端存储volume。对于动态PV"reclaim policy"继承自其"storage class"
  9. 默认是Delete。集群管理员负责将"storage class""reclaim policy"设置成用户期望的形式,否则需要用
  10. 户手动为创建后的动态PV编辑"reclaim policy"
  11. Recycle
  12. 保留PV,但清空其上数据,已废弃

  1. kubectl get storageclass

image_1eq1qttcv17i3vaf17br1de02i1b8.png-56.2kB


  1. 改变默认sc
  2. https://kubernetes.io/zh/docs/tasks/administer-cluster/change-default-storage-class/#%e4%b8%ba%e4%bb%80%e4%b9%88%e8%a6%81%e6%94%b9%e5%8f%98%e9%bb%98%e8%ae%a4-storage-class
  3. kubectl patch storageclass storage-nfs -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'

  1. 验证nfs动态供应
  2. 创建pvc
  3. vim pvc.yaml
  4. -----
  5. apiVersion: v1
  6. kind: PersistentVolumeClaim
  7. metadata:
  8. name: pvc-claim-01
  9. #annotations:
  10. # volume.beta.kubernetes.io/storage-class: "storage-nfs"
  11. spec:
  12. storageClassName: storage-nfs #这个class一定注意要和sc的名字一样
  13. accessModes:
  14. - ReadWriteMany
  15. resources:
  16. requests:
  17. storage: 1Mi
  18. -----
  19. kubectl apply -f pvc.yaml

image_1eq1rcgqqqgp1mie11921f5a1u0pc2.png-41.3kB

  1. 使用pvc
  2. vi testpod.yaml
  3. ----
  4. kind: Pod
  5. apiVersion: v1
  6. metadata:
  7. name: test-pod
  8. spec:
  9. containers:
  10. - name: test-pod
  11. image: busybox
  12. command:
  13. - "/bin/sh"
  14. args:
  15. - "-c"
  16. - "touch /mnt/SUCCESS && exit 0 || exit 1"
  17. volumeMounts:
  18. - name: nfs-pvc
  19. mountPath: "/mnt"
  20. restartPolicy: "Never"
  21. volumes:
  22. - name: nfs-pvc
  23. persistentVolumeClaim:
  24. claimName: pvc-claim-01
  25. -----
  26. kubectl apply -f testpod.yaml

image_1eq1rg5601t481hk9jki1paq115mcf.png-39.6kB

五:安装metrics-server

  1. 1、先安装metrics-server(yaml如下,已经改好了镜像和配置,可以直接使用),这样就能监控到podnode的资源情况(默认只有cpumemory的资源审计信息哟,更专业的我们后面对接 Prometheus
  2. vim 2222.yaml
  3. ----
  4. apiVersion: rbac.authorization.k8s.io/v1
  5. kind: ClusterRole
  6. metadata:
  7. name: system:aggregated-metrics-reader
  8. labels:
  9. rbac.authorization.k8s.io/aggregate-to-view: "true"
  10. rbac.authorization.k8s.io/aggregate-to-edit: "true"
  11. rbac.authorization.k8s.io/aggregate-to-admin: "true"
  12. rules:
  13. - apiGroups: ["metrics.k8s.io"]
  14. resources: ["pods", "nodes"]
  15. verbs: ["get", "list", "watch"]
  16. ---
  17. apiVersion: rbac.authorization.k8s.io/v1
  18. kind: ClusterRoleBinding
  19. metadata:
  20. name: metrics-server:system:auth-delegator
  21. roleRef:
  22. apiGroup: rbac.authorization.k8s.io
  23. kind: ClusterRole
  24. name: system:auth-delegator
  25. subjects:
  26. - kind: ServiceAccount
  27. name: metrics-server
  28. namespace: kube-system
  29. ---
  30. apiVersion: rbac.authorization.k8s.io/v1
  31. kind: RoleBinding
  32. metadata:
  33. name: metrics-server-auth-reader
  34. namespace: kube-system
  35. roleRef:
  36. apiGroup: rbac.authorization.k8s.io
  37. kind: Role
  38. name: extension-apiserver-authentication-reader
  39. subjects:
  40. - kind: ServiceAccount
  41. name: metrics-server
  42. namespace: kube-system
  43. ---
  44. apiVersion: apiregistration.k8s.io/v1beta1
  45. kind: APIService
  46. metadata:
  47. name: v1beta1.metrics.k8s.io
  48. spec:
  49. service:
  50. name: metrics-server
  51. namespace: kube-system
  52. group: metrics.k8s.io
  53. version: v1beta1
  54. insecureSkipTLSVerify: true
  55. groupPriorityMinimum: 100
  56. versionPriority: 100
  57. ---
  58. apiVersion: v1
  59. kind: ServiceAccount
  60. metadata:
  61. name: metrics-server
  62. namespace: kube-system
  63. ---
  64. apiVersion: apps/v1
  65. kind: Deployment
  66. metadata:
  67. name: metrics-server
  68. namespace: kube-system
  69. labels:
  70. k8s-app: metrics-server
  71. spec:
  72. selector:
  73. matchLabels:
  74. k8s-app: metrics-server
  75. template:
  76. metadata:
  77. name: metrics-server
  78. labels:
  79. k8s-app: metrics-server
  80. spec:
  81. serviceAccountName: metrics-server
  82. volumes:
  83. # mount in tmp so we can safely use from-scratch images and/or read-only containers
  84. - name: tmp-dir
  85. emptyDir: {}
  86. containers:
  87. - name: metrics-server
  88. image: mirrorgooglecontainers/metrics-server-amd64:v0.3.6
  89. imagePullPolicy: IfNotPresent
  90. args:
  91. - --cert-dir=/tmp
  92. - --secure-port=4443
  93. - --kubelet-insecure-tls
  94. - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
  95. ports:
  96. - name: main-port
  97. containerPort: 4443
  98. protocol: TCP
  99. securityContext:
  100. readOnlyRootFilesystem: true
  101. runAsNonRoot: true
  102. runAsUser: 1000
  103. volumeMounts:
  104. - name: tmp-dir
  105. mountPath: /tmp
  106. nodeSelector:
  107. kubernetes.io/os: linux
  108. kubernetes.io/arch: "amd64"
  109. ---
  110. apiVersion: v1
  111. kind: Service
  112. metadata:
  113. name: metrics-server
  114. namespace: kube-system
  115. labels:
  116. kubernetes.io/name: "Metrics-server"
  117. kubernetes.io/cluster-service: "true"
  118. spec:
  119. selector:
  120. k8s-app: metrics-server
  121. ports:
  122. - port: 443
  123. protocol: TCP
  124. targetPort: main-port
  125. ---
  126. apiVersion: rbac.authorization.k8s.io/v1
  127. kind: ClusterRole
  128. metadata:
  129. name: system:metrics-server
  130. rules:
  131. - apiGroups:
  132. - ""
  133. resources:
  134. - pods
  135. - nodes
  136. - nodes/stats
  137. - namespaces
  138. - configmaps
  139. verbs:
  140. - get
  141. - list
  142. - watch
  143. ---
  144. apiVersion: rbac.authorization.k8s.io/v1
  145. kind: ClusterRoleBinding
  146. metadata:
  147. name: system:metrics-server
  148. roleRef:
  149. apiGroup: rbac.authorization.k8s.io
  150. kind: ClusterRole
  151. name: system:metrics-server
  152. subjects:
  153. - kind: ServiceAccount
  154. name: metrics-server
  155. namespace: kube-system
  156. ---
  157. kubectl apply -f 2222.yaml

image_1eq1s4c0fvq01bvg18kgqi6g9rcs.png-118.3kB


  1. kubetl top nodes
  2. kubectl top nodes

image_1eq1s9mrl1i7e1lj2puh1v681c7ddm.png-156.9kB

image_1eq1sai2015malp21pgn11i718hse3.png-94.5kB


六: 安装 kubesphere

  1. https://kubesphere.com.cn/docs/quick-start/minimal-kubesphere-on-k8s/
  2. wget https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/kubesphere-installer.yaml
  3. wget https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml

image_1eq1sh9uictr14gdneh17fr1p0meg.png-472.3kB


  1. vim cluster-configuration.yaml
  2. ----
  3. apiVersion: installer.kubesphere.io/v1alpha1
  4. kind: ClusterConfiguration
  5. metadata:
  6. name: ks-installer
  7. namespace: kubesphere-system
  8. labels:
  9. version: v3.0.0
  10. spec:
  11. persistence:
  12. storageClass: "" # If there is not a default StorageClass in your cluster, you need to specify an existing StorageClass here.
  13. authentication:
  14. jwtSecret: "" # Keep the jwtSecret consistent with the host cluster. Retrive the jwtSecret by executing "kubectl -n kubesphere-system get cm kubesphere-config -o yaml | grep -v "apiVersion" | grep jwtSecret" on the host cluster.
  15. etcd:
  16. monitoring: true # Whether to enable etcd monitoring dashboard installation. You have to create a secret for etcd before you enable it.
  17. endpointIps: 192.168.100.11 # etcd cluster EndpointIps, it can be a bunch of IPs here.
  18. port: 2379 # etcd port
  19. tlsEnable: true
  20. common:
  21. mysqlVolumeSize: 20Gi # MySQL PVC size.
  22. minioVolumeSize: 20Gi # Minio PVC size.
  23. etcdVolumeSize: 20Gi # etcd PVC size.
  24. openldapVolumeSize: 2Gi # openldap PVC size.
  25. redisVolumSize: 2Gi # Redis PVC size.
  26. es: # Storage backend for logging, events and auditing.
  27. # elasticsearchMasterReplicas: 1 # total number of master nodes, it's not allowed to use even number
  28. # elasticsearchDataReplicas: 1 # total number of data nodes.
  29. elasticsearchMasterVolumeSize: 4Gi # Volume size of Elasticsearch master nodes.
  30. elasticsearchDataVolumeSize: 20Gi # Volume size of Elasticsearch data nodes.
  31. logMaxAge: 7 # Log retention time in built-in Elasticsearch, it is 7 days by default.
  32. elkPrefix: logstash # The string making up index names. The index name will be formatted as ks-<elk_prefix>-log.
  33. console:
  34. enableMultiLogin: true # enable/disable multiple sing on, it allows an account can be used by different users at the same time.
  35. port: 30880
  36. alerting: # (CPU: 0.3 Core, Memory: 300 MiB) Whether to install KubeSphere alerting system. It enables Users to customize alerting policies to send messages to receivers in time with different time intervals and alerting levels to choose from.
  37. enabled: true
  38. auditing: # Whether to install KubeSphere audit log system. It provides a security-relevant chronological set of records,recording the sequence of activities happened in platform, initiated by different tenants.
  39. enabled: true
  40. devops: # (CPU: 0.47 Core, Memory: 8.6 G) Whether to install KubeSphere DevOps System. It provides out-of-box CI/CD system based on Jenkins, and automated workflow tools including Source-to-Image & Binary-to-Image.
  41. enabled: true
  42. jenkinsMemoryLim: 2Gi # Jenkins memory limit.
  43. jenkinsMemoryReq: 1500Mi # Jenkins memory request.
  44. jenkinsVolumeSize: 8Gi # Jenkins volume size.
  45. jenkinsJavaOpts_Xms: 512m # The following three fields are JVM parameters.
  46. jenkinsJavaOpts_Xmx: 512m
  47. jenkinsJavaOpts_MaxRAM: 2g
  48. events: # Whether to install KubeSphere events system. It provides a graphical web console for Kubernetes Events exporting, filtering and alerting in multi-tenant Kubernetes clusters.
  49. enabled: true
  50. ruler:
  51. enabled: true
  52. replicas: 2
  53. logging: # (CPU: 57 m, Memory: 2.76 G) Whether to install KubeSphere logging system. Flexible logging functions are provided for log query, collection and management in a unified console. Additional log collectors can be added, such as Elasticsearch, Kafka and Fluentd.
  54. enabled: true
  55. logsidecarReplicas: 2
  56. metrics_server: # (CPU: 56 m, Memory: 44.35 MiB) Whether to install metrics-server. IT enables HPA (Horizontal Pod Autoscaler).
  57. enabled: false
  58. monitoring:
  59. # prometheusReplicas: 1 # Prometheus replicas are responsible for monitoring different segments of data source and provide high availability as well.
  60. prometheusMemoryRequest: 400Mi # Prometheus request memory.
  61. prometheusVolumeSize: 20Gi # Prometheus PVC size.
  62. # alertmanagerReplicas: 1 # AlertManager Replicas.
  63. multicluster:
  64. clusterRole: none # host | member | none # You can install a solo cluster, or specify it as the role of host or member cluster.
  65. networkpolicy: # Network policies allow network isolation within the same cluster, which means firewalls can be set up between certain instances (Pods).
  66. # Make sure that the CNI network plugin used by the cluster supports NetworkPolicy. There are a number of CNI network plugins that support NetworkPolicy, including Calico, Cilium, Kube-router, Romana and Weave Net.
  67. enabled: true
  68. notification: # Email Notification support for the legacy alerting system, should be enabled/disabled together with the above alerting option.
  69. enabled: true
  70. openpitrix: # (2 Core, 3.6 G) Whether to install KubeSphere Application Store. It provides an application store for Helm-based applications, and offer application lifecycle management.
  71. enabled: true
  72. servicemesh: # (0.3 Core, 300 MiB) Whether to install KubeSphere Service Mesh (Istio-based). It provides fine-grained traffic management, observability and tracing, and offer visualization for traffic topology.
  73. enabled: true
  74. ----
  75. kubectl apply -f kubesphere-installer.yaml
  76. kubectl apply -f cluster-configuration1.yaml

image_1eq1t3e60udqdui1k16agtftqet.png-125.2kB


  1. 查看安装进度:
  2. kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f

image_1eq1vjdj2b5jam1c9q3lfdltlt.png-355.6kB

  1. kubectl get pod -A

image_1eq1u209o1njh1v1v85i1vf11r0pg7.png-381.3kB

image_1eq1u2tel7g916pf1ln214hqcjogk.png-413.5kB

  1. kubesphere-monitoring-system prometheus-k8s-0 0/3 ContainerCreating 0 7m20s
  2. kubesphere-monitoring-system prometheus-k8s-1 0/3 ContainerCreating 0 7m20s
  3. prometheus-k8s-1 这个一直在 ContainerCreating 这个 状态

image_1eq1u9dlvbfjsg8f0kpvnhl9he.png-180.6kB


  1. kubectl describe pod prometheus-k8s-0 -n kubesphere-monitoring-system
  2. kube-etcd-client-certs 这个证书没有找到:

image_1eq1uc2rl8v81p22s598uo15eehr.png-299.3kB

  1. kubectl -n kubesphere-monitoring-system create secret generic kube-etcd-client-certs --from-file=etcd-client-ca.crt=/etc/kubernetes/pki/etcd/ca.crt --from-file=etcd-client.crt=/etc/kubernetes/pki/apiserver-etcd-client.crt --from-file=etcd-client.key=/etc/kubernetes/pki/apiserver-etcd-client.key
  2. kubectl get secret -A |grep etcd

image_1eq1uj35o18qkrmd1lo41o501hiki8.png-53.1kB

  1. kubectl get pod -n kubesphere-monitoring-system
  2. prometheus-k8s-1 这个pod 就变成Running 状态了

image_1eq1um7aj198vskr1jg714481icbil.png-167.8kB


  1. 下面根据日志提示打开kubesphere web 页面:

image_1eq1uq2jvasb12pg1pbtg1bqbj2.png-119.5kB

image_1eq1uselj1mtln5p1afg1fl1p2rjf.png-37.6kB

image_1eq1ut25r15mi1guoo2d1qr41nf3js.png-55.9kB

image_1eq1v2ma51fq51021n7q1e92e4uk9.png-161.4kB

image_1eq1v590r3bk151c1v88m51o6akm.png-134.3kB

image_1eq1v69tn1ukh1di91l5q1f3vec6l3.png-190.9kB

添加新批注
在作者公开此批注前,只有你和作者可见。
回复批注