@zhaikun
2017-07-20T15:50:15.000000Z
字数 9369
阅读 1248
docker
[root@zk-k8s-master01 ~]# vim /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
172.16.138.120 zk-k8s-master01
172.16.138.121 zk-k8s-node01
[root@zk-k8s-master01 ~]# yum install -y kubernetes etcd flannel
Loaded plugins: fastestmirror
Loading mirror speeds from cached hostfile
* epel: mirrors.aliyun.com
Resolving Dependencies
--> Running transaction check
---> Package etcd.x86_64 0:3.1.9-1.el7 will be installed
---> Package flannel.x86_64 0:0.7.1-1.el7 will be installed
---> Package kubernetes.x86_64 0:1.5.2-0.7.git269f928.el7 will be installed
--> Processing Dependency: kubernetes-node = 1.5.2-0.7.git269f928.el7 for package: kubernetes-1.5.2-0.7.git269f928.el7.x86_64
--> Processing Dependency: kubernetes-master = 1.5.2-0.7.git269f928.el7 for package: kubernetes-1.5.2-0.7.git269f928.el7.x86_64
--> Running transaction check
---> Package kubernetes-master.x86_64 0:1.5.2-0.7.git269f928.el7 will be installed
......
###
# kubernetes system config
#
# The following values are used to configure various aspects of all
# kubernetes services, including
#
# kube-apiserver.service
# kube-controller-manager.service
# kube-scheduler.service
# kubelet.service
# kube-proxy.service
# logging to stderr means we get it in the systemd journal
KUBE_LOGTOSTDERR="--logtostderr=true"
# journal message level, 0 is debug
KUBE_LOG_LEVEL="--v=0"
# Should this cluster be allowed to run privileged docker containers
KUBE_ALLOW_PRIV="--allow-privileged=false"
# How the controller-manager, scheduler, and proxy find the apiserver
KUBE_MASTER="--master=http://zk-k8s-master01:8080" #修改为master地址
# [member]
ETCD_NAME=default
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
#ETCD_WAL_DIR=""
#ETCD_SNAPSHOT_COUNT="10000"
#ETCD_HEARTBEAT_INTERVAL="100"
#ETCD_ELECTION_TIMEOUT="1000"
#ETCD_LISTEN_PEER_URLS="http://localhost:2380"
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379" #修改为本机地址或者0.0.0.0
#ETCD_MAX_SNAPSHOTS="5"
#ETCD_MAX_WALS="5"
#ETCD_CORS=""
#
#[cluster]
#ETCD_INITIAL_ADVERTISE_PEER_URLS="http://localhost:2380"
# if you use different ETCD_NAME (e.g. test), set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..."
#ETCD_INITIAL_CLUSTER="default=http://localhost:2380"
#ETCD_INITIAL_CLUSTER_STATE="new"
#ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="http://0.0.0.0:2379" #修改为本机地址或者0.0.0.0
#ETCD_DISCOVERY=""
#ETCD_DISCOVERY_SRV=""
#ETCD_DISCOVERY_FALLBACK="proxy"
#ETCD_DISCOVERY_PROXY=""
[root@zk-k8s-master01 ~]# vim /etc/kubernetes/apiserver
###
# kubernetes system config
#
# The following values are used to configure the kube-apiserver
#
# The address on the local server to listen to.
KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"
# The port on the local server to listen on.
KUBE_API_PORT="--port=8080"
# Port minions listen on
KUBELET_PORT="--kubelet-port=10250"
# Comma separated list of nodes in the etcd cluster
KUBE_ETCD_SERVERS="--etcd-servers=http://127.0.0.1:2379"
# Address range to use for services
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"
# default admission control policies
KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ResourceQuota"
# Add your own!
KUBE_API_ARGS=""
......
[root@zk-k8s-master01 ~]# service etcd start
[root@zk-k8s-master01 ~]# etcdctl mkdir /kube-centos/network
[root@zk-k8s-master01 ~]# etcdctl mk /kube-centos/network/config "{ \"Network\": \"172.30.0.0/16\", \"SubnetLen\": 24, \"Backend\": { \"Type\": \"vxlan\" } }"
{ "Network": "172.30.0.0/16", "SubnetLen": 24, "Backend": { "Type": "vxlan" } }
[root@zk-k8s-master01 ~]#
#配置flannel
[root@zk-k8s-master01 ~]# vim /etc/sysconfig/flanneld
# Flanneld configuration options
# etcd url location. Point this to the server where etcd runs
FLANNEL_ETCD_ENDPOINTS="http://127.0.0.1:2379"
# etcd config key. This is the configuration key that flannel queries
# For address range assignment
FLANNEL_ETCD_PREFIX="/kube-centos/network"
# Any additional options that you want to pass
#FLANNEL_OPTIONS=""
[root@zk-k8s-master01 ~]# for SERVICES in etcd kube-apiserver kube-controller-manager kube-scheduler flanneld; do
>
> systemctl restart $SERVICES
>
> systemctl enable $SERVICES
>
> systemctl status $SERVICES
>
> done
[root@zk-k8s-node01 ~]# yum -y install kubernetes-node flannel
Loaded plugins: fastestmirror
Loading mirror speeds from cached hostfile
* epel: mirrors.aliyun.com
Resolving Dependencies
--> Running transaction check
---> Package flannel.x86_64 0:0.7.1-1.el7 will be installed
---> Package kubernetes-node.x86_64 0:1.5.2-0.7.git269f928.el7 will be installed
--> Processing Dependency: kubernetes-client = 1.5.2-0.7.git269f928.el7 for package: kubernetes-node-1.5.2-0.7.git269f928.el7.x86_64
--> Processing Dependency: socat for package: kubernetes-node-1.5.2-0.7.git269f928.el7.x86_64
--> Processing Dependency: conntrack-tools for package: kubernetes-node-1.5.2-0.7.git269f928.el7.x86_64
--> Running transaction check
---> Package conntrack-tools.x86_64 0:1.4.4-3.el7_3 will be installed
--> Processing Dependency: libnetfilter_conntrack >= 1.0.6 for package: conntrack-tools-1.4.4-3.el7_3.x86_64
--> Processing Dependency: libnetfilter_cttimeout.so.1(LIBNETFILTER_CTTIMEOUT_1.1)(64bit) for package: conntrack-tools-1.4.4-3.el7_3.x86_64
--> Processing Dependency: libnetfilter_cttimeout.so.1(LIBNETFILTER_CTTIMEOUT_1.0)(64bit) for package: conntrack-tools-1.4.4-3.el7_3.x86_64
--> Processing Dependency: libnetfilter_cthelper.so.0(LIBNETFILTER_CTHELPER_1.0)(64bit) for package: conntrack-tools-1.4.4-3.el7_3.x86_64
--> Processing Dependency: libnetfilter_queue.so.1()(64bit) for package: conntrack-tools-1.4.4-3.el7_3.x86_64
--> Processing Dependency: libnetfilter_cttimeout.so.1()(64bit) for package: conntrack-tools-1.4.4-3.el7_3.x86_64
--> Processing Dependency: libnetfilter_cthelper.so.0()(64bit) for package: conntrack-tools-1.4.4-3.el7_3.x86_64
---> Package kubernetes-client.x86_64 0:1.5.2-0.7.git269f928.el7 will be installed
---> Package socat.x86_64 0:1.7.2.2-5.el7 will be installed
--> Running transaction check
......
5、修复配置
[root@zk-k8s-node01 ~]# vim /etc/kubernetes/config
###
# kubernetes system config
#
# The following values are used to configure various aspects of all
# kubernetes services, including
#
# kube-apiserver.service
# kube-controller-manager.service
# kube-scheduler.service
# kubelet.service
# kube-proxy.service
# logging to stderr means we get it in the systemd journal
KUBE_LOGTOSTDERR="--logtostderr=true"
# journal message level, 0 is debug
KUBE_LOG_LEVEL="--v=0"
# Should this cluster be allowed to run privileged docker containers
KUBE_ALLOW_PRIV="--allow-privileged=false"
# How the controller-manager, scheduler, and proxy find the apiserver
KUBE_MASTER="--master=http://zk-k8s-master01:8080"
[root@zk-k8s-node01 ~]# vim /etc/kubernetes/kubelet
###
# kubernetes kubelet (minion) config
# The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)
KUBELET_ADDRESS="--address=0.0.0.0"
# The port for the info server to serve on
# KUBELET_PORT="--port=10250"
# You may leave this blank to use the actual hostname
KUBELET_HOSTNAME="--hostname-override=zk-k8s-node01"
# location of the api-server
KUBELET_API_SERVER="--api-servers=http://zk-k8s-master01:8080"
# pod infrastructure container
KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=harbor.suixingpay.com/kube/pause-amd64:3.0"
# Add your own!
KUBELET_ARGS="--cluster_dns=10.254.0.100 --cluster_domain=cluster.local"
[root@zk-k8s-node01 ~]# vim /etc/sysconfig/flanneld
# Flanneld configuration options
# etcd url location. Point this to the server where etcd runs
FLANNEL_ETCD_ENDPOINTS="http://zk-k8s-master01:2379"
# etcd config key. This is the configuration key that flannel queries
# For address range assignment
FLANNEL_ETCD_PREFIX="/kube-centos/network"
# Any additional options that you want to pass
#FLANNEL_OPTIONS=""
[root@zk-k8s-node01 ~]# for SERVICES in kube-proxy kubelet flanneld docker; do
>
> systemctl restart $SERVICES
>
> systemctl enable $SERVICES
>
> systemctl status $SERVICES
>
> done
[root@zk-k8s-master01 docker]# vim kubernetes-dashboard.yaml
.....
spec:
containers:
- name: kubernetes-dashboard
image: harbor.suixingpay.com/kube/k8s-dashboard:1.6.0 #修改为本地仓库的地址
imagePullPolicy: IfNotPresent #修改为IfNotPresent模式 IfNotPresent是如果存在不下载安装
ports:
- containerPort: 9090
protocol: TCP
args:
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
- --apiserver-host=http://172.16.138.120:8080 #修改为master地址
livenessProbe:
httpGet:
path: /
port: 9090
initialDelaySeconds: 30
timeoutSeconds: 30
......
[root@zk-k8s-master01 docker]# kubectl create -f kubernetes-dashboard.yaml
deployment "kubernetes-dashboard" created
service "kubernetes-dashboard" created
[root@zk-k8s-master01 docker]#
注:
(node节点启动docker 配置 /etc/sysconfig/docker
ADD_REGISTRY='--add-registry harbor.suixingpay.com'
INSECURE_REGISTRY='--insecure-registry harbor.suixingpay.com
)
[root@zk-k8s-master01 docker]# kubectl create -f skydns-rc.yaml
deployment "kube-dns" created
[root@zk-k8s-master01 docker]# kubectl create -f skydns-svc.yaml
service "kube-dns" created
[root@zk-k8s-master01 docker]#
1)下载组件包:
harbor.suixingpay.com/kube/heapster:canary
harbor.suixingpay.com/kube/heapster_influxdb:v0.5
harbor.suixingpay.com/kube/heapster_grafana:v2.6.0-2
2)修改heapster的配置文件,文件位置解压heapster-1.2.0.tar.gz文件
heapster-1.2.0/deploy/kube-config/influxdb/
3)修改heapster-controller.yaml文件中对应images,然后修改最后两句为:
- --source=kubernetes:http://172.16.138.101:8080?inClusterConfig=false
- --sink=influxdb:http://monitoring-influxdb:8086
4)修改influxdb-grafana-controller.yaml文件中的对应images地址。
5) 安装资源 kubectl create -f . (在heapster-1.2.0/deploy/kube-config/influxdb/目录中执行)