@zhangyy
2020-11-30T18:11:47.000000Z
字数 7839
阅读 375
kubernetes系列
一:系统初始化
二:kubernetes 安装
192.168.100.11 node01.flyfish
192.168.100.12 node02.flyfish
192.168.100.13 node03.flyfish
192.168.100.14 node04.flyfish
192.168.100.15 node05.flyfish
192.168.100.16 node06.flyfish
192.168.100.17 node07.flyfish
系统节点全部执行:
systemctl stop firewalld && systemctl disable firewalld && yum -y install iptables-services && systemctl start iptables && systemctl enable iptables && iptables -F && service iptables save
关闭 SELINUX与swap 内存
swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
全部节点安装
yum install -y conntrack ntpdate ntp ipvsadm ipset jq iptables curl sysstat libseccomp wget vim net-tools git
所有节点都执行
cat > kubernetes.conf <<EOF
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0 # 禁止使用 swap 空间,只有当系统 OOM 时才允许使用它 vm.overcommit_memory=1 # 不检查物理内存是否够用
vm.panic_on_oom=0 # 开启 OOM
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF
cp kubernetes.conf /etc/sysctl.d/kubernetes.conf
sysctl -p /etc/sysctl.d/kubernetes.conf
# 设置系统时区为 中国/上海 timedatectl set-timezone Asia/Shanghai
# 将当前的 UTC 时间写入硬件时钟 timedatectl set-local-rtc 0
# 重启依赖于系统时间的服务
systemctl restart rsyslog && systemctl restart crond
systemctl stop postfix && systemctl disable postfix
mkdir /var/log/journal # 持久化保存日志的目录
mkdir /etc/systemd/journald.conf.d
cat > /etc/systemd/journald.conf.d/99-prophet.conf <<EOF
[Journal]
# 持久化保存到磁盘
Storage=persistent
# 压缩历史日志
Compress=yes
SyncIntervalSec=5m
RateLimitInterval=30s
RateLimitBurst=1000
# 最大占用空间 10G
SystemMaxUse=10G
# 单日志文件最大 200M
SystemMaxFileSize=200M
# 日志保存时间 2 周
MaxRetentionSec=2week
# 不将日志转发到 syslog
ForwardToSyslog=no
EOF
systemctl restart systemd-journald
CentOS 7.x 系统自带的 3.10.x 内核存在一些 Bugs,导致运行的 Docker、Kubernetes 不稳定,例如: rpm -Uvh
http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
# 安装完成后检查 /boot/grub2/grub.cfg 中对应内核 menuentry 中是否包含 initrd16 配置,如果没有,再安装 一次!
yum --enablerepo=elrepo-kernel install -y kernel-lt
# 设置开机从新内核启动
grub2-set-default "CentOS Linux (4.4.182-1.el7.elrepo.x86_64) 7 (Core)"
reboot
# 重启后安装内核源文件
yum --enablerepo=elrepo-kernel install kernel-lt-devel-$(uname -r) kernel-lt-headers-$(uname -r)
-----------
或者:
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org && \
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm && \
yum --disablerepo=\* --enablerepo=elrepo-kernel repolist && \
yum --disablerepo=\* --enablerepo=elrepo-kernel install -y kernel-ml.x86_64 && \
yum remove -y kernel-tools-libs.x86_64 kernel-tools.x86_64 && \
yum --disablerepo=\* --enablerepo=elrepo-kernel install -y kernel-ml-tools.x86_64 && \
grub2-set-default 0
modprobe br_netfilter
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules
bash /etc/sysconfig/modules/ipvs.modules
lsmod | grep -e ip_vs -e nf_conntrack_ipv4
机器节点都执行:
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum update -y && yum install docker-ce-18.09.9 docker-ce-cli-18.09.9 containerd.io -y
重启机器: reboot
查看内核版本: uname -r
在加载: grub2-set-default "CentOS Linux (4.4.182-1.el7.elrepo.x86_64) 7 (Core)" && reboot
如果还不行
就改 文件 : vim /etc/grub2.cfg 注释掉 3.10 的 内核
保证 内核的版本 为 4.4
service docker start
chkconfig docker on
## 创建 /etc/docker 目录
mkdir /etc/docker
# 配置 daemon.
cat > /etc/docker/daemon.json <<EOF
{ "exec-opts": ["native.cgroupdriver=systemd"], "log-driver": "json-file", "log-opts": { "max-size": "100m" } }
EOF
mkdir -p /etc/systemd/system/docker.service.d
# 重启docker服务
systemctl daemon-reload && systemctl restart docker && systemctl enable docker
cat >> /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum -y install kubeadm-1.15.1 kubectl-1.15.1 kubelet-1.15.1
systemctl enable kubelet.service
节点全部配置:
上传导入文件
kubeadm-basic.images.tar.gz
tar -zxvf kubeadm-basic.images.tar.gz
### 写一个导入脚本
vim load-image.sh
---
#!/bin/bash
ls /root/kubeadm-basic.images > /tmp/image-list.txt
cd /root/kubeadm-basic.images
for i in $(cat /tmp/image-list.txt)
do
docker load -i $i
done
---
chmod +x load-image.sh
./load-image.sh
scp kubeadm-basic.images.tar.gz load-image.sh root@172.17.100.2:/root
scp kubeadm-basic.images.tar.gz load-image.sh root@172.17.100.3:/root
login : 172.17.100.2
tar -zxvf kubeadm-basic.images.tar.gz
./load-image.sh
login: 172.17.100.3
tar -zxvf kubeadm-basic.images.tar.gz
./load-image.sh
kubeadm config print init-defaults > kubeadm-config.yaml
vim kubeadm-config.yaml
---
advertiseAddress: 192.168.100.11 (改为主节点IP地址)
增加
podSubnet: "10.244.0.0/16"
---
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
SupportIPVSProxyMode: true
mode: "ipvs"
---
kubeadm init --config=kubeadm-config.yaml --experimental-upload-certs | tee kubeadm-init.log
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
kubectl get node
cd /etc/kubernetes/manifests/
vim kube-controller-manager.yaml
---
增加:
- --allocate-node-cidrs=true
- --cluster-cidr=10.244.0.0/16
---
mkdir -p k8s-install/{core,plugin}
mv kubeadm-init.log k8s-install/core
mv kubeadm-config.yaml k8s-install/core
cd k8s-install/plugin
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
vim kube-flannel.yml
-----
因为quay.io/coreos/flannel:v0.11.0-amd64 这个 quay.io 这个 在国外需要翻墙,才能下载到flannel:v0.11.0-amd64 资源 , 所以要改掉 改成:quay-mirror.qiniu.com
-----
kubectl create -f kube-flannel.yml
kubectl get pod -n kube-system
node01.flyfish
cd /root/k8s-install/
cat kubeadm-init.log
---
到最后一行有加入命令
kubeadm join 192.168.100.11:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:1e93d61321b0fee1993134f1379ccb73cb9bd79f5cb4af965be1abb77dae295b
---
node02.flyfish:
kubeadm join 192.168.100.11:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:1e93d61321b0fee1993134f1379ccb73cb9bd79f5cb4af965be1abb77dae295b
node03.flyfish:
kubeadm join 192.168.100.11:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:1e93d61321b0fee1993134f1379ccb73cb9bd79f5cb4af965be1abb77dae295b
node01.flyfish:
kubectl get node
kubectl get pods -n kube-system
至此k8s 三节点集群完成安装:
注:在node01.flyfish节点上进行如下操作
1.创建Dashboard的yaml文件
wget https://raw.githubusercontent.com/kubernetes/dashboard/v1.10.1/src/deploy/recommended/kubernetes-dashboard.yaml
----
使用如下命令或直接手动编辑kubernetes-dashboard.yaml文件
sed -i 's/k8s.gcr.io/registry.cn-hangzhou.aliyuncs.com/kuberneters/g' kubernetes-dashboard.yaml
sed -i '/targetPort:/a\ \ \ \ \ \ nodePort: 30001\n\ \ type: NodePort' kubernetes-dashboard.yaml
---
手动编辑kubernetes-dashboard.yaml文件时,需要修改两处内容,首先在Dashboard Deployment部分修改Dashboard镜像下载链接,由于默认从官方社区下载,而不“科学上网”是无法下载的,因此修改为:image: registry.cn-hangzhou.aliyuncs.com/kuberneters/kubernetes-dashboard-amd64:v1.10.1 修改后内容如图:
此外,需要在Dashboard Service内容加入nodePort: 30001和type: NodePort两项内容,将Dashboard访问端口映射为节点端口,以供外部访问,编辑完成后,状态如图
部署Dashboard
kubectl create -f kubernetes-dashboard.yaml
创建完成后,检查相关服务运行状态
kubectl get deployment kubernetes-dashboard -n kube-system
kubectl get pods -n kube-system -o wide
kubectl get services -n kube-system
netstat -ntlp|grep 30001
在Firefox浏览器输入Dashboard访问地址:
https://192.168.100.12:30001
授权令牌
kubectl create serviceaccount dashboard-admin -n kube-system
kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin
kubectl describe secrets -n kube-system $(kubectl -n kube-system get secret | awk '/dashboard-admin/{print $1}')
使用输出的token登录Dashboard
认证通过后,登录Dashboard首页如图