高可用部署二进制 Kubernetes
Posted 醉舞斜陽
tags:
篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了高可用部署二进制 Kubernetes相关的知识,希望对你有一定的参考价值。
二进制安装k8s
节点名称 | IP |
---|---|
k8s-master-01 | 172.16.1.71 |
k8s-master-02 | 172.16.1.72 |
k8s-master-03 | 172.16.1.73 |
k8s-node-01 | 172.16.1.74 |
k8s-node-02 | 172.16.1.75 |
# 修改IP和主机名
# 关闭防火墙和selinux
# host解析 (五台机器都要做,太长不重复复制)
[root@k8s-master-01 ~]# vim /etc/hosts
----------------------------------------------------------------------
172.16.1.71 k8s-master-01 m1
172.16.1.72 k8s-master-02 m2
172.16.1.73 k8s-master-03 m3
172.16.1.74 k8s-node-01 n1
172.16.1.75 k8s-node-02 n2
----------------------------------------------------------------------
# 关闭swap分区 (五台机器都要做,太长不重复复制)
[root@k8s-master-01 ~]# vim /etc/fstab
----------------------------------------------------------------------
# UUID=43e1bca3-991b-4cbf-bf73-e198e975f24e swap swap defaults 0 0
----------------------------------------------------------------------
# 设置忽略swap分区 (五台机器都要做,太长不重复复制)
[root@k8s-master-01 ~]# echo \'KUBELET_EXTRA_ARGS="--fail-swap-on=false"\' > /etc/sysconfig/kubelet
# 关闭selinux (五台机器都要做,太长不重复复制)
[root@k8s-master-01 ~]# sed -i \'s#enforcing#disabled#g\' /etc/selinux/config # 永久关闭
[root@k8s-master-01 ~]# setenforce 0 # 临时关闭
# 刷新缓存 (五台机器都要做,太长不重复复制)
[root@k8s-master-01 ~]# yum makecache
# 更新系统 (五台机器都要做,太长不重复复制)
[root@k8s-master-01 ~]# yum update -y --exclud=kernel*
# 更新内核版本
[root@k8s-master-01 ~]# ll # 上传包
-r-xr-xr-x 1 root root 41857400 2021-01-18 09:20 kernel-lt-4.4.245-1.el7.elrepo.x86_64.rpm
-r-xr-xr-x 1 root root 10731836 2021-01-18 09:19 kernel-lt-devel-4.4.245-1.el7.elrepo.x86_64.rpm
# 做五台机器的免密
[root@k8s-master-01 ~]# ssh-keygen
[root@k8s-master-01 ~]# ssh-copy-id 192.168.13.71
[root@k8s-master-01 ~]# ssh-copy-id 192.168.13.72
[root@k8s-master-01 ~]# ssh-copy-id 192.168.13.73
[root@k8s-master-01 ~]# ssh-copy-id 192.168.13.74
[root@k8s-master-01 ~]# ssh-copy-id 192.168.13.75
# 把/root的文件传给另外两台机器
[root@k8s-master-01 ~]# for i in n1 n2; do scp kernel* $i:/root;done
# 安装 (五台机器都要做,太长不重复复制)
[root@k8s-master-01 ~]# yum localinstall -y kernel*
# 更新内核版本 (五台机器都要做,太长不重复复制)
[root@k8s-master-01 ~]# yum localinstall -y kernel-lt* 安装
[root@k8s-master-01 ~]# grub2-set-default 0 && grub2-mkconfig -o /etc/grub2.cfg # 设置启动优先级
[root@k8s-master-01 ~]# grubby --default-kernel # 查看内核版本
# 安装ipvs (五台机器都要做,太长不重复复制)
[root@k8s-master-01 ~]# yum install -y conntrack-tools ipvsadm ipset conntrack libseccomp
# 加载IPVS模块 (五台机器都要做,太长不重复复制)
[root@k8s-master-01 ~]# cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack"
for kernel_module in \\${ipvs_modules}; do
/sbin/modinfo -F filename \\${kernel_module} > /dev/null 2>&1
if [ $? -eq 0 ]; then
/sbin/modprobe \\${kernel_module}
fi
done
EOF
# 测试是否成功 (五台机器都要做,太长不重复复制)
[root@k8s-master-01 ~]# chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs
# 优化系统内核参数 (五台机器都要做,太长不重复复制)
[root@k8s-master-01 ~]# cat > /etc/sysctl.d/k8s.conf << EOF
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp.keepaliv.probes = 3
net.ipv4.tcp_keepalive_intvl = 15
net.ipv4.tcp.max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp.max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.top_timestamps = 0
net.core.somaxconn = 16384
EOF
# 重启 (五台机器都要做,太长不重复复制)
[root@k8s-master-01 ~]# reboot
# 查看内核 (五台机器都要做,太长不重复复制)
[root@k8s-master-01 ~]# uname -a
# 安装基础软件 (五台机器都要做,太长不重复复制)
[root@k8s-master-01 ~]# yum install wget expect vim net-tools ntp bash-completion ipvsadm ipset jq iptables conntrack sysstat libseccomp -y
# 安装docker (五台机器都要做,太长不重复复制)
[root@k8s-master-01 ~]# yum install -y yum-utils device-mapper-persistent-data lvm2
[root@k8s-master-01 ~]# yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
[root@k8s-master-01 ~]# yum install docker-ce -y # 安装
[root@k8s-master-01 ~]# sudo mkdir -p /etc/docker
[root@k8s-master-01 ~]# sudo tee /etc/docker/daemon.json <<-\'EOF\'
{
"registry-mirrors": ["https://8mh75mhz.mirror.aliyuncs.com"]
}
EOF
[root@k8s-master-01 ~]# sudo systemctl daemon-reload ; systemctl restart docker;systemctl enable --now docker.service
[root@k8s-master-01 ~]# docker info # 测试是否安装上
# 同步时间 (五台机器都要做,太长不重复复制)
[root@k8s-master-01 ~]# crontab -e
--------------------------------------------------------------------
* * * * * /usr/sbin/ntpdate ntp.aliyun.com &> /dev/null
--------------------------------------------------------------------
安装cfssl证书生成工具
# 下载
[root@k8s-master-01 ~]# wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
[root@k8s-master-01 ~]# wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
# 设置执行权限
[root@k8s-master-01 ~]# chmod +x cfssljson_linux-amd64
[root@k8s-master-01 ~]# chmod +x cfssl_linux-amd64
# 移动到/usr/local/bin
[root@k8s-master-01 ~]# mv cfssljson_linux-amd64 cfssljson
[root@k8s-master-01 ~]# mv cfssl_linux-amd64 cfssl
[root@k8s-master-01 ~]# mv cfssljson cfssl /usr/local/bin
# 验证
[root@k8s-master-01 ~]# cfssl version
Version: 1.2.0
Revision: dev
Runtime: go1.6
创建集群根证书
[root@k8s-master-01 ~]# mkdir -p /opt/cert/ca
[root@k8s-master-01 ~]# cat > /opt/cert/ca/ca-config.json <<EOF
{
"signing": {
"default": {
"expiry": "8760h"
},
"profiles": {
"kubernetes": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "8760h"
}
}
}
}
EOF
创建根CA证书签名请求文件
[root@k8s-master-01 ~]# cat > /opt/cert/ca/ca-csr.json << EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names":[{
"C": "CN",
"ST": "ShangHai",
"L": "ShangHai"
}]
}
EOF
生成根证书
[root@k8s-master-01 ~]# cd /opt/cert/ca/
[root@k8s-master-01 /opt/cert/ca]# ll
-rw-r--r-- 1 root root 285 2021-01-19 15:28 ca-config.json
-rw-r--r-- 1 root root 153 2021-01-19 15:29 ca-csr.json
[root@k8s-master-01 /opt/cert/ca]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
部署ETCD
[root@k8s-master-01 /opt/cert/ca]# mkdir /opt/data
[root@k8s-master-01 /opt/cert/ca]# cd /opt/data
[root@k8s-master-01 /opt/data]# wget https://mirrors.huaweicloud.com/etcd/v3.3.24/etcd-v3.3.24-linux-amd64.tar.gz
[root@k8s-master-01 /opt/data]# tar xf etcd-v3.3.24-linux-amd64.tar.gz
[root@k8s-master-01 /opt/data]# cd etcd-v3.3.24-linux-amd64/
[root@k8s-master-01 /opt/data/etcd-v3.3.24-linux-amd64]#for i in m1 m2 m3;do scp etc* $i:/usr/local/bin;done
# 测试
[root@k8s-master-01 /opt/data/etcd-v3.3.24-linux-amd64]# etcd --version
etcd Version: 3.3.24
Git SHA: bdd57848d
Go Version: go1.12.17
Go OS/Arch: linux/amd64
创建ETCD证书
[root@k8s-master-01 ~]# mkdir -p /opt/cert/etcd
[root@k8s-master-01 ~]# cd /opt/cert/etcd
[root@k8s-master-01 /opt/cert/etcd]# cat > etcd-csr.json << EOF
{
"CN": "etcd",
"hosts": [
"127.0.0.1",
"172.16.1.71",
"172.16.1.72",
"172.16.1.73",
"172.16.1.74",
"172.16.1.75"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "ShangHai",
"L": "ShangHai"
}
]
}
EOF
[root@k8s-master-01 /opt/cert/etcd]# ll
-rw-r--r-- 1 root root 335 2021-01-19 15:40 etcd-csr.json
生成证书
[root@k8s-master-01 /opt/cert/etcd]# cfssl gencert -ca=../ca/ca.pem -ca-key=../ca/ca-key.pem -config=../ca/ca-config.json -profile=kubernetes etcd-csr.json | cfssljson -bare etcd
[root@k8s-master-01 /opt/cert/etcd]# ll
总用量 16
-rw-r--r-- 1 root root 1041 2021-01-19 15:41 etcd.csr
-rw-r--r-- 1 root root 335 2021-01-19 15:40 etcd-csr.json
-rw------- 1 root root 1675 2021-01-19 15:41 etcd-key.pem
-rw-r--r-- 1 root root 1371 2021-01-19 15:41 etcd.pem
分发证书
[root@k8s-master-01 /opt/cert/etcd]# for ip in m1 m2 m3 n1 n2;do
ssh root@${ip} "mkdir -pv /etc/etcd/ssl"
scp ../ca/ca*.pem root@${ip}:/etc/etcd/ssl
scp ./etcd*.pem root@${ip}:/etc/etcd/ssl
done
注册etcd服务. (master机器都要做,太长不复制)
[root@k8s-master-01 /opt/cert/etcd]# cd
[root@k8s-master-01 ~]# ETCD_NAME=`hostname`
[root@k8s-master-01 ~]# INTERNAL_IP=`hostname -i`
[root@k8s-master-01 ~]# INITIAL_CLUSTER=k8s-master-01=https://172.16.1.71:2380,k8s-master-02=https://172.16.1.72:2380,k8s-master-03=https://172.16.1.73:2380
[root@k8s-master-01 ~]# cat << EOF | sudo tee /usr/lib/systemd/system/etcd.service
[Unit]
Description=etcd
Documentation=https://github.com/coreos
[Service]
ExecStart=/usr/local/bin/etcd \\\\
--name ${ETCD_NAME} \\\\
--cert-file=/etc/etcd/ssl/etcd.pem \\\\
--key-file=/etc/etcd/ssl/etcd-key.pem \\\\
--peer-cert-file=/etc/etcd/ssl/etcd.pem \\\\
--peer-key-file=/etc/etcd/ssl/etcd-key.pem \\\\
--trusted-ca-file=/etc/etcd/ssl/ca.pem \\\\
--peer-trusted-ca-file=/etc/etcd/ssl/ca.pem \\\\
--peer-client-cert-auth \\\\
--client-cert-auth \\\\
--initial-advertise-peer-urls https://${INTERNAL_IP}:2380 \\\\
--listen-peer-urls https://${INTERNAL_IP}:2380 \\\\
--listen-client-urls https://${INTERNAL_IP}:2379,https://127.0.0.1:2379 \\\\
--advertise-client-urls https://${INTERNAL_IP}:2379 \\\\
--initial-cluster-token etcd-cluster \\\\
--initial-cluster ${INITIAL_CLUSTER} \\\\
--initial-cluster-state new \\\\
--data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
启动etcd (master机器都要做,太长不复制)
[root@k8s-master-01 ~]# systemctl enable --now etcd
[root@k8s-master-01 ~]# systemctl status etcd
测试ETCD集群
[root@k8s-master-01 ~]# ETCDCTL_API=3 etcdctl \\
--cacert=/etc/etcd/ssl/etcd.pem \\
--cert=/etc/etcd/ssl/etcd.pem \\
--key=/etc/etcd/ssl/etcd-key.pem \\
--endpoints="https://172.16.1.71:2379,https://172.16.1.72:2379,https://172.16.1.73:2379" \\
endpoint status --write-out=\'table\'
+--------------------------+------------------+---------+---------+-----------+-----------+------------+
| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | RAFT TERM | RAFT INDEX |
+--------------------------+------------------+---------+---------+-----------+-----------+------------+
| https://172.16.1.71:2379 | 80d0ace027643b4e | 3.3.24 | 20 kB | true | 7 | 9 |
| https://172.16.1.72:2379 | 9a7cf2dc57ec669f | 3.3.24 | 20 kB | false | 7 | 9 |
| https://172.16.1.73:2379 | 54f8db1a175b9c73 | 3.3.24 | 20 kB | false | 7 | 9 |
+--------------------------+------------------+---------+---------+-----------+-----------+------------+
创建master CA节点证书
[root@k8s-master-01 ~]# mkdir /opt/cert/k8s
[root@k8s-master-01 ~]# cd /opt/cert/k8s/
[root@k8s-master-01 /opt/cert/k8s]# cat > ca-config.json << EOF
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"kubernetes": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
EOF
[root@k8s-master-01 /opt/cert/k8s]# cat > ca-csr.json << EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "ShangHai",
"ST": "ShangHai"
}
]
}
EOF
[root@k8s-master-01 /opt/cert/k8s]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
创建kube-apiserver证书
[root@k8s-master-01 /opt/cert/k8s]# cat > server-csr.json << EOF
{
"CN": "kubernetes",
"hosts": [
"127.0.0.1",
"172.16.1.71",
"172.16.1.72",
"172.16.1.73",
"172.16.1.80",
"10.96.0.1",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "ShangHai",
"ST": "ShangHai"
}
]
}
EOF
[root@k8s-master-01 /opt/cert/k8s]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server
创建kube-controller-manager证书
[root@k8s-master-01 /opt/cert/k8s]# cat > kube-controller-manager-csr.json << EOF
{
"CN": "system:kube-controller-manager",
"hosts": [
"127.0.0.1",
"172.16.1.71",
"172.16.1.72",
"172.16.1.73",
"172.16.1.80"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "system:kube-controller-manager",
"OU": "System"
}
]
}
EOF
[root@k8s-master-01 /opt/cert/k8s]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
创建kube-scheduler证书
[root@k8s-master-01 /opt/cert/k8s]# cat > kube-scheduler-csr.json << EOF
{
"CN": "system:kube-scheduler",
"hosts": [
"127.0.0.1",
"172.16.1.71",
"172.16.1.72",
"172.16.1.73",
"172.16.1.80"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "system:kube-scheduler",
"OU": "System"
}
]
}
EOF
[root@k8s-master-01 /opt/cert/k8s]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler
创建kube-proxy证书
[root@k8s-master-01 /opt/cert/k8s]# cat > kube-proxy-csr.json << EOF
{
"CN":"system:kube-proxy",
"hosts":[],
"key":{
"algo":"rsa",
"size":2048
},
"names":[
{
"C":"CN",
"L":"BeiJing",
"ST":"BeiJing",
"O":"system:kube-proxy",
"OU":"System"
}
]
}
EOF
[root@k8s-master-01 /opt/cert/k8s]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
签发管理员用户证书
[root@k8s-master-01 /opt/cert/k8s]# cat > admin-csr.json << EOF
{
"CN":"admin",
"key":{
"algo":"rsa",
"size":2048
},
"names":[
{
"C":"CN",
"L":"BeiJing",
"ST":"BeiJing",
"O":"system:masters",
"OU":"System"
}
]
}
EOF
[root@k8s-master-01 /opt/cert/k8s]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
颁发证书
[root@k8s-master-01 /opt/cert/k8s]# mkdir /etc/kubernetes/ssl
[root@k8s-master-01 /opt/cert/k8s]# cp -p ./{ca*pem,server*pem,kube-controller-manager*pem,kube-scheduler*.pem,kube-proxy*pem,admin*.pem} /etc/kubernetes/ssl
[root@k8s-master-01 /opt/cert/k8s]# ll /etc/kubernetes/ssl/
总用量 48
-rw------- 1 root root 1679 2021-01-19 16:11 admin-key.pem
-rw-r--r-- 1 root root 1363 2021-01-19 16:11 admin.pem
-rw------- 1 root root 1675 2021-01-19 16:04 ca-key.pem
-rw-r--r-- 1 root root 1281 2021-01-19 16:04 ca.pem
-rw------- 1 root root 1679 2021-01-19 16:06 kube-controller-manager-key.pem
-rw-r--r-- 1 root root 1476 2021-01-19 16:06 kube-controller-manager.pem
-rw------- 1 root root 1679 2021-01-19 16:07 kube-proxy-key.pem
-rw-r--r-- 1 root root 1383 2021-01-19 16:07 kube-proxy.pem
-rw------- 1 root root 1679 2021-01-19 16:06 kube-scheduler-key.pem
-rw-r--r-- 1 root root 1452 2021-01-19 16:06 kube-scheduler.pem
-rw------- 1 root root 1679 2021-01-19 16:05 server-key.pem
-rw-r--r-- 1 root root 1558 2021-01-19 16:05 server.pem
[root@k8s-master-01 /opt/cert/k8s]# for i in m2 m3 ;do
ssh root@$i "mkdir -pv /etc/kubernetes/ssl"
scp /etc/kubernetes/ssl/* root@$i:/etc/kubernetes/ssl
done
下载安装包
[root@k8s-master-01 ~]# cd /opt/data
# 下载server安装包(应该下载不了)
[root@k8s-master-01 /opt/data]# wget https://dl.k8s.io/v1.18.8/kubernetes-server-linux-amd64.tar.gz
[root@k8s-master-01 /opt/data]# docker run -dit registry.cn-hangzhou.aliyuncs.com/k8sos/k8s:v1.18.8.1 bash
[root@k8s-master-01 /opt/data]# docker ps (看ID)
[root@k8s-master-01 /opt/data]# docker exec fdeed1e0b5a1 ls
[root@k8s-master-01 /opt/data]# docker cp fdeed1e0b5a1:kubernetes-server-linux-amd64.tar.gz .
[root@k8s-master-01 /opt/data]# tar xf kubernetes-server-linux-amd64.tar.gz
[root@k8s-master-01 /opt/data]# cd kubernetes/server/bin/
[root@k8s-master-01 /opt/data/kubernetes/server/bin]# for i in m1 m2 m3;do scp kube-apiserver kube-controller-manager kubectl kubelet kube-proxy kube-scheduler $i:/usr/local/bin ; done
# 测试(三台master机器都要测试)
[root@k8s-master-01 /opt/data/kubernetes/server/bin]# kube-apiserver --version
Kubernetes v1.18.8
创建kube-controller-manager集群配置文件
[root@k8s-master-01 /opt/data/kubernetes/server/bin]# cd /opt/cert/k8s/
# 设置集群参数
[root@k8s-master-01 /opt/cert/k8s]# export KUBE_APISERVER="https://172.16.1.80:8443"
[root@k8s-master-01 /opt/cert/k8s]# kubectl config set-cluster kubernetes \\
--certificate-authority=/etc/kubernetes/ssl/ca.pem \\
--embed-certs=true \\
--server=${KUBE_APISERVER} \\
--kubeconfig=kube-controller-manager.kubeconfig
# 设置客户端认证参数
[root@k8s-master-01 /opt/cert/k8s]# kubectl config set-credentials "kube-controller-manager" \\
--client-certificate=/etc/kubernetes/ssl/kube-controller-manager.pem \\
--client-key=/etc/kubernetes/ssl/kube-controller-manager-key.pem \\
--embed-certs=true \\
--kubeconfig=kube-controller-manager.kubeconfig
# 设置上下文参数(在上下文参数中将集群参数和 用户参数关联起来)
[root@k8s-master-01 /opt/cert/k8s]# kubectl config set-context default \\
--cluster=kubernetes \\
--user="kube-controller-manager" \\
--kubeconfig=kube-controller-manager.kubeconfig
# 配置默认上下文
[root@k8s-master-01 /opt/cert/k8s]# kubectl config use-context default --kubeconfig=kube-controller-manager.kubeconfig
[root@k8s-master-01 /opt/cert/k8s]# ll
-rw------- 1 root root 6289 2021-01-19 16:32 kube-controller-manager.kubeconfig # 其中一个文件
创建kube-scheduler集群配置文件
[root@k8s-master-01 /opt/cert/k8s]# export KUBE_APISERVER="https://172.16.1.80:8443"
# 设置集群参数
[root@k8s-master-01 /opt/cert/k8s]#kubectl config set-cluster kubernetes \\
--certificate-authority=/etc/kubernetes/ssl/ca.pem \\
--embed-certs=true \\
--server=${KUBE_APISERVER} \\
--kubeconfig=kube-scheduler.kubeconfig
# 设置客户端认证参数
[root@k8s-master-01 /opt/cert/k8s]# kubectl config set-credentials "kube-scheduler" \\
--client-certificate=/etc/kubernetes/ssl/kube-scheduler.pem \\
--client-key=/etc/kubernetes/ssl/kube-scheduler-key.pem \\
--embed-certs=true \\
--kubeconfig=kube-scheduler.kubeconfig
# 设置上下文参数(在上下文参数中将集群参数和 用户参数关联起来)
[root@k8s-master-01 /opt/cert/k8s]# kubectl config set-context default \\
--cluster=kubernetes \\
--user="kube-scheduler" \\
--kubeconfig=kube-scheduler.kubeconfig
# 配置默认上下文
[root@k8s-master-01 /opt/cert/k8s]# kubectl config use-context default --kubeconfig=kube-scheduler.kubeconfig
[root@k8s-master-01 /opt/cert/k8s]# ll
-rw------- 1 root root 6239 2021-01-19 16:36 kube-scheduler.kubeconfig # 其中一个文件
创建kube-proxy集群配置文件
[root@k8s-master-01 /opt/cert/k8s]# export KUBE_APISERVER="https://172.16.1.80:8443"
# 设置集群参数
[root@k8s-master-01 /opt/cert/k8s]# kubectl config set-cluster kubernetes \\
--certificate-authority=/etc/kubernetes/ssl/ca.pem \\
--embed-certs=true \\
--server=${KUBE_APISERVER} \\
--kubeconfig=kube-proxy.kubeconfig
# 设置客户端认证参数
[root@k8s-master-01 /opt/cert/k8s]# kubectl config set-credentials "kube-proxy" \\
--client-certificate=/etc/kubernetes/ssl/kube-proxy.pem \\
--client-key=/etc/kubernetes/ssl/kube-proxy-key.pem \\
--embed-certs=true \\
--kubeconfig=kube-proxy.kubeconfig
# 设置上下文参数(在上下文参数中将集群参数和 用户参数关联起来)
[root@k8s-master-01 /opt/cert/k8s]# kubectl config set-context default \\
--cluster=kubernetes \\
--user="kube-proxy" \\
--kubeconfig=kube-proxy.kubeconfig
# 配置默认上下文
[root@k8s-master-01 /opt/cert/k8s]# kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
[root@k8s-master-01 /opt/cert/k8s]# ll
-rw------- 1 root root 6139 2021-01-19 16:38 kube-proxy.kubeconfig # 其中一个文件
创建集群管理员集群配置文件
[root@k8s-master-01 /opt/cert/k8s]# export KUBE_APISERVER="https://172.16.1.80:8443"
# 设置集群参数
[root@k8s-master-01 /opt/cert/k8s]# kubectl config set-cluster kubernetes \\
--certificate-authority=/etc/kubernetes/ssl/ca.pem \\
--embed-certs=true \\
--server=${KUBE_APISERVER} \\
--kubeconfig=admin.kubeconfig
# 设置客户端认证参数
[root@k8s-master-01 /opt/cert/k8s]# kubectl config set-credentials "admin" \\
--client-certificate=/etc/kubernetes/ssl/admin.pem \\
--client-key=/etc/kubernetes/ssl/admin-key.pem \\
--embed-certs=true \\
--kubeconfig=admin.kubeconfig
# 设置上下文参数(在上下文参数中将集群参数和 用户参数关联起来)
[root@k8s-master-01 /opt/cert/k8s]# kubectl config set-context default \\
--cluster=kubernetes \\
--user="admin" \\
--kubeconfig=admin.kubeconfig
# 配置默认上下文
[root@k8s-master-01 /opt/cert/k8s]# kubectl config use-context default --kubeconfig=admin.kubeconfig
[root@k8s-master-01 /opt/cert/k8s]# ll
-rw------- 1 root root 6105 2021-01-19 16:41 admin.kubeconfig # 其中一个文件
配置TLS bootstrapping
[root@k8s-master-01 /opt/cert/k8s]# TLS_BOOTSTRAPPING_TOKEN=`head -c 16 /dev/urandom | od -An -t x | tr -d \' \'`
[root@k8s-master-01 /opt/cert/k8s]# cat > token.csv << EOF
${TLS_BOOTSTRAPPING_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF
[root@k8s-master-01 /opt/cert/k8s]# ll
-rw-r--r-- 1 root root 84 2021-01-19 16:51 token.csv # 其中一个文件
[root@k8s-master-01 /opt/cert/k8s]# cat token.csv
3358c2b56753366ebf7d02bb00eeb3fc,kubelet-bootstrap,10001,"system:kubelet-bootstrap"
创建bootstrapping集群配置文件
[root@k8s-master-01 /opt/cert/k8s]# export KUBE_APISERVER="https://172.16.1.80:8443"
# 设置集群参数
[root@k8s-master-01 /opt/cert/k8s]# kubectl config set-cluster kubernetes \\
--certificate-authority=/etc/kubernetes/ssl/ca.pem \\
--embed-certs=true \\
--server=${KUBE_APISERVER} \\
--kubeconfig=kubelet-bootstrap.kubeconfig
# 设置客户端认证参数,此处token必须用上叙token.csv中的token (3358c2b56753366ebf7d02bb00eeb3fc)
[root@k8s-master-01 /opt/cert/k8s]# kubectl config set-credentials "kubelet-bootstrap" \\
--token=3358c2b56753366ebf7d02bb00eeb3fc \\
--kubeconfig=kubelet-bootstrap.kubeconfig
# 设置上下文参数(在上下文参数中将集群参数和用户参数关联起来)
[root@k8s-master-01 /opt/cert/k8s]# kubectl config set-context default \\
--cluster=kubernetes \\
--user="kubelet-bootstrap" \\
--kubeconfig=kubelet-bootstrap.kubeconfig
# 配置默认上下文
[root@k8s-master-01 /opt/cert/k8s]# kubectl config use-context default --kubeconfig=kubelet-bootstrap.kubeconfig
[root@k8s-master-01 /opt/cert/k8s]# ll
-rw------- 1 root root 2061 2021-01-19 16:55 kubelet-bootstrap.kubeconfig # 其中一个文件
分发集群配置文件
[root@k8s-master-01 /opt/cert/k8s]# for i in m1 m2 m3; do
ssh root@$i "mkdir -p /etc/kubernetes/cfg";
scp token.csv kube-scheduler.kubeconfig kube-controller-manager.kubeconfig admin.kubeconfig kube-proxy.kubeconfig kubelet-bootstrap.kubeconfig root@$i:/etc/kubernetes/cfg;
done
部署kube-apiserver (三台master机器上面都需要执行)
[root@k8s-master-01 /opt/cert/k8s]# KUBE_APISERVER_IP=`hostname -i`
[root@k8s-master-01 /opt/cert/k8s]# cat > /etc/kubernetes/cfg/kube-apiserver.conf << EOF
KUBE_APISERVER_OPTS="--logtostderr=false \\\\
--v=2 \\\\
--log-dir=/var/log/kubernetes \\\\
--advertise-address=${KUBE_APISERVER_IP} \\\\
--default-not-ready-toleration-seconds=360 \\\\
--default-unreachable-toleration-seconds=360 \\\\
--max-mutating-requests-inflight=2000 \\\\
--max-requests-inflight=4000 \\\\
--default-watch-cache-size=200 \\\\
--delete-collection-workers=2 \\\\
--bind-address=0.0.0.0 \\\\
--secure-port=6443 \\\\
--allow-privileged=true \\\\
--service-cluster-ip-range=10.96.0.0/16 \\\\
--service-node-port-range=10-52767 \\\\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \\\\
--authorization-mode=RBAC,Node \\\\
--enable-bootstrap-token-auth=true \\\\
--token-auth-file=/etc/kubernetes/cfg/token.csv \\\\
--kubelet-client-certificate=/etc/kubernetes/ssl/server.pem \\\\
--kubelet-client-key=/etc/kubernetes/ssl/server-key.pem \\\\
--tls-cert-file=/etc/kubernetes/ssl/server.pem \\\\
--tls-private-key-file=/etc/kubernetes/ssl/server-key.pem \\\\
--client-ca-file=/etc/kubernetes/ssl/ca.pem \\\\
--service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \\\\
--audit-log-maxage=30 \\\\
--audit-log-maxbackup=3 \\\\
--audit-log-maxsize=100 \\\\
--audit-log-path=/var/log/kubernetes/k8s-audit.log \\\\
--etcd-servers=https://172.16.1.71:2379,https://172.16.1.72:2379,https://172.16.1.73:2379 \\\\
--etcd-cafile=/etc/etcd/ssl/ca.pem \\\\
--etcd-certfile=/etc/etcd/ssl/etcd.pem \\\\
--etcd-keyfile=/etc/etcd/ssl/etcd-key.pem"
EOF
# 如果hostname -i获取的是外网IP,则需要执行:
[root@k8s-master-01 /opt/cert/k8s]# sed -i \'s#192.168.13#172.16.1#g\' /etc/kubernetes/cfg/kube-apiserver.conf
注册kube-apiserver服务
[root@k8s-master-01 /opt/cert/k8s]# cat > /usr/lib/systemd/system/kube-apiserver.service << EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
EnvironmentFile=/etc/kubernetes/cfg/kube-apiserver.conf
ExecStart=/usr/local/bin/kube-apiserver \\$KUBE_APISERVER_OPTS
Restart=on-failure
RestartSec=10
Type=notify
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
[root@k8s-master-01 /opt/cert/k8s]# for i in m2 m3;do scp /usr/lib/systemd/system/kube-apiserver.service $i:/usr/lib/systemd/system/kube-apiserver.service;done
[root@k8s-master-01 /opt/cert/k8s]# mkdir -p /var/log/kubernetes/
[root@k8s-master-01 /opt/cert/k8s]# systemctl daemon-reload
[root@k8s-master-01 /opt/cert/k8s]# systemctl enable --now kube-apiserver
# 查看是否启动 (三台master机器上面都需要查看)
[root@k8s-master-01 /opt/cert/k8s]# systemctl status kube-apiserver
kube-apiserver高可用
[root@k8s-master-01 /opt/cert/k8s]# yum install -y keepalived haproxy
[root@k8s-master-02 /opt/cert/k8s]# yum install -y keepalived haproxy
[root@k8s-master-03 /opt/cert/k8s]# yum install -y keepalived haproxy
[root@k8s-master-03 /opt/cert/k8s]# cat > /etc/haproxy/haproxy.cfg <<EOF
global
maxconn 2000
ulimit-n 16384
log 127.0.0.1 local0 err
stats timeout 30s
defaults
log global
mode http
option httplog
timeout connect 5000
timeout client 50000
timeout server 50000
timeout http-request 15s
timeout http-keep-alive 15s
frontend monitor-in
bind *:33305
mode http
option httplog
monitor-uri /monitor
listen stats
bind *:8006
mode http
stats enable
stats hide-version
stats uri /stats
stats refresh 30s
stats realm Haproxy\\ Statistics
stats auth admin:admin
frontend k8s-master
bind 0.0.0.0:8443
bind 127.0.0.1:8443
mode tcp
option tcplog
tcp-request inspect-delay 5s
default_backend k8s-master
backend k8s-master
mode tcp
option tcplog
option tcp-check
balance roundrobin
default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
server k8s-master-01 172.16.1.71:6443 check inter 2000 fall 2 rise 2 weight 100
server k8s-master-02 172.16.1.72:6443 check inter 2000 fall 2 rise 2 weight 100
server k8s-master-03 172.16.1.73:6443 check inter 2000 fall 2 rise 2 weight 100
EOF
[root@k8s-master-01 /opt/cert/k8s]# for i in m2 m3;do scp /etc/haproxy/haproxy.cfg $i:/etc/haproxy/haproxy.cfg;done
[root@k8s-master-01 /opt/cert/k8s]# systemctl enable --now haproxy.service
[root@k8s-master-02 /opt/cert/k8s]# systemctl enable --now haproxy.service
[root@k8s-master-03 /opt/cert/k8s]# systemctl enable --now haproxy.service
[root@k8s-master-01 /opt/cert/k8s]# systemctl status haproxy.service
[root@k8s-master-02 /opt/cert/k8s]# systemctl status haproxy.service
[root@k8s-master-03 /opt/cert/k8s]# systemctl status haproxy.service
[root@k8s-master-01 /opt/cert/k8s]# mv /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf_bak
[root@k8s-master-01 /opt/cert/k8s]# cd /etc/keepalived
[root@k8s-master-01 /opt/cert/k8s]# cat > /etc/keepalived/keepalived.conf <<EOF
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
}
vrrp_script chk_kubernetes {
script "/etc/keepalived/check_kubernetes.sh"
interval 2
weight -5
fall 3
rise 2
}
vrrp_instance VI_1 {
state MASTER
interface eth1
mcast_src_ip 172.16.1.71
virtual_router_id 51
priority 100
advert_int 2
authentication {
auth_type PASS
auth_pass K8SHA_KA_AUTH
}
virtual_ipaddress {
172.16.1.80
}
# track_script {
# chk_kubernetes
# }
}
EOF
[root@k8s-master-01 /etc/keepalived]# for i in m2 m3;do scp keepalived.conf $i:/etc/keepalived/keepalived.conf;done
## k8s-master-02操作
[root@k8s-master-02 ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
}
vrrp_script chk_kubernetes {
script "/etc/keepalived/check_kubernetes.sh"
interval 2
weight -5
fall 3
rise 2
}
vrrp_instance VI_1 {
state BACKUP
interface eth1
mcast_src_ip 172.16.1.72
virtual_router_id 51
priority 90
advert_int 2
authentication {
auth_type PASS
auth_pass K8SHA_KA_AUTH
}
virtual_ipaddress {
172.16.1.80
}
# track_script {
# chk_kubernetes
# }
}
## k8s-master-03操作
[root@k8s-master-03 ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
}
vrrp_script chk_kubernetes {
script "/etc/keepalived/check_kubernetes.sh"
interval 2
weight -5
fall 3
rise 2
}
vrrp_instance VI_1 {
state BACKUP
interface eth1
mcast_src_ip 172.16.1.73
virtual_router_id 51
priority 80
advert_int 2
authentication {
auth_type PASS
auth_pass K8SHA_KA_AUTH
}
virtual_ipaddress {
172.16.1.80
}
# track_script {
# chk_kubernetes
# }
}
设置监控检车脚本
[root@k8s-master-03 ~]# cat > /etc/keepalived/check_kubernetes.sh <<EOF
#!/bin/bash
function chech_kubernetes() {
for ((i=0;i<5;i++));do
apiserver_pid_id=$(pgrep kube-apiserver)
if [[ ! -z $apiserver_pid_id ]];then
return
else
sleep 2
fi
apiserver_pid_id=0
done
}
# 1:running 0:stopped
check_kubernetes
if [[ $apiserver_pid_id -eq 0 ]];then
/usr/bin/systemctl stop keepalived
exit 1
else
exit 0
fi
EOF
[root@k8s-master-01 /etc/keepalived]# for i in m2 m3;do scp /etc/keepalived/check_kubernetes.sh $i:/etc/keepalived/check_kubernetes.sh; done
# 给监控脚本加权限(三台master都要做)
[root@k8s-master-01 /etc/keepalived]# chmod +x /etc/keepalived/check_kubernetes.sh
[root@k8s-master-02 ~]# chmod +x /etc/keepalived/check_kubernetes.sh
[root@k8s-master-03 ~]# chmod +x /etc/keepalived/check_kubernetes.sh
# 动keeplived和haproxy服务 (三台master都要做)
[root@k8s-master-01 /etc/keepalived]# systemctl enable --now keepalived haproxy
[root@k8s-master-02 ~]# systemctl enable --now keepalived haproxy
[root@k8s-master-03 ~]# systemctl enable --now keepalived haproxy
# 查看是否启动 (三台master都要做)
[root@k8s-master-01 /etc/keepalived]# systemctl status keepalived.service
[root@k8s-master-02 ~]# systemctl status keepalived.service
[root@k8s-master-03 ~]# systemctl status keepalived.service
# 查看是否有vip (三台master都要做)
[root@k8s-master-01 /etc/keepalived]# ip a | grep 172.16.1.80 # 有
inet 172.16.1.80/32 scope global eth1
[root@k8s-master-02 ~]# ip a | grep 172.16.1.80 # 没有
[root@k8s-master-03 ~]# ip a | grep 172.16.1.80 # 没有
授权TLS Bootrapping用户请求
[root@k8s-master-01 /etc/keepalived]# kubectl create clusterrolebinding kubelet-bootstrap \\
--clusterrole=system:node-bootstrapper \\
--user=kubelet-bootstrap
创建kube-controller-manager配置文件
[root@k8s-master-01 /etc/keepalived]# cat > /etc/kubernetes/cfg/kube-controller-manager.conf << EOF
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=false \\\\
--v=2 \\\\
--log-dir=/var/log/kubernetes \\\\
--leader-elect=true \\\\
--cluster-name=kubernetes \\\\
--bind-address=127.0.0.1 \\\\
--allocate-node-cidrs=true \\\\
--cluster-cidr=10.244.0.0/12 \\\\
--service-cluster-ip-range=10.96.0.0/16 \\\\
--cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \\\\
--cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \\\\
--root-ca-file=/etc/kubernetes/ssl/ca.pem \\\\
--service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \\\\
--kubeconfig=/etc/kubernetes/cfg/kube-controller-manager.kubeconfig \\\\
--tls-cert-file=/etc/kubernetes/ssl/kube-controller-manager.pem \\\\
--tls-private-key-file=/etc/kubernetes/ssl/kube-controller-manager-key.pem \\\\
--experimental-cluster-signing-duration=87600h0m0s \\\\
--controllers=*,bootstrapsigner,tokencleaner \\\\
--use-service-account-credentials=true \\\\
--node-monitor-grace-period=10s \\\\
--horizontal-pod-autoscaler-use-rest-clients=true"
EOF
[root@k8s-master-01 /etc/keepalived]# cat > /usr/lib/systemd/system/kube-controller-manager.service << EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
EnvironmentFile=/etc/kubernetes/cfg/kube-controller-manager.conf
ExecStart=/usr/local/bin/kube-controller-manager \\$KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
[root@k8s-master-01 /etc/keepalived]# for i in m1 m2 m3; do scp /etc/kubernetes/cfg/kube-controller-manager.conf root@$i:/etc/kubernetes/cfg; scp /usr/lib/systemd/system/kube-controller-manager.service root@$i:/usr/lib/systemd/system/kube-controller-manager.service; done
# 启动kube-controller-manager.service(三台master都要做)
[root@k8s-master-01 /etc/keepalived]# systemctl enable --now kube-controller-manager.service
[root@k8s-master-02 ~]# systemctl enable --now kube-controller-manager.service
[root@k8s-master-03 ~]# systemctl enable --now kube-controller-manager.service
# 查看kube-controller-manager.service(三台master都要做)
[root@k8s-master-01 /etc/keepalived]# systemctl status kube-controller-manager.service
[root@k8s-master-02 ~]# systemctl status kube-controller-manager.service
[root@k8s-master-03 ~]# systemctl status kube-controller-manager.service
创建kube-scheduler配置
[root@k8s-master-01 /etc/keepalived]# cat > /etc/kubernetes/cfg/kube-scheduler.conf << EOF
KUBE_SCHEDULER_OPTS="--logtostderr=false \\\\
--v=2 \\\\
--log-dir=/var/log/kubernetes \\\\
--kubeconfig=/etc/kubernetes/cfg/kube-scheduler.kubeconfig \\\\
--leader-elect=true \\\\
--master=http://127.0.0.1:8080 \\\\
--bind-address=127.0.0.1 "
EOF
[root@k8s-master-01 /etc/keepalived]# cat > /usr/lib/systemd/system/kube-scheduler.service << EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
EnvironmentFile=/etc/kubernetes/cfg/kube-scheduler.conf
ExecStart=/usr/local/bin/kube-scheduler \\$KUBE_SCHEDULER_OPTS
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
[root@k8s-master-01 /etc/keepalived]# for ip in m2 m3; do scp /usr/lib/systemd/system/kube-scheduler.service root@${ip}:/usr/lib/systemd/system; scp /etc/kubernetes/cfg/kube-scheduler.conf root@${ip}:/etc/kubernetes/cfg; done
[root@k8s-master-01 /etc/keepalived]# systemctl daemon-reload
[root@k8s-master-02 ~]# systemctl daemon-reload
[root@k8s-master-02 ~]# systemctl daemon-reload
# 启动kube-scheduler.service(三台master都要做)
[root@k8s-master-01 /etc/keepalived]# systemctl enable --now kube-scheduler.service
[root@k8s-master-02 ~]# systemctl enable --now kube-scheduler.service
[root@k8s-master-02 ~]# systemctl enable --now kube-scheduler.service
# 查看kube-scheduler.service(三台master都要做)
[root@k8s-master-01 /etc/keepalived]# systemctl status kube-scheduler.service
[root@k8s-master-02 ~]# systemctl status kube-scheduler.service
[root@k8s-master-02 ~]# systemctl status kube-scheduler.service
查看集群状态
[root@k8s-master-01 ~]# kubectl get cs
NAME STATUS MESSAGE ERROR
controller-manager Healthy ok
scheduler Healthy ok
etcd-0 Healthy {"health":"true"}
etcd-2 Healthy {"health":"true"}
etcd-1 Healthy {"health":"true"}
部署kubelet服务
[root@k8s-master-01 ~]# KUBE_HOSTNAME=`hostname`
[root@k8s-master-01 ~]# cat > /etc/kubernetes/cfg/kubelet.conf << EOF
KUBELET_OPTS="--logtostderr=false \\\\
--v=2 \\\\
--log-dir=/var/log/kubernetes \\\\
--hostname-override=${KUBE_HOSTNAME} \\\\
--container-runtime=docker \\\\
--kubeconfig=/etc/kubernetes/cfg/kubelet.kubeconfig \\\\
--bootstrap-kubeconfig=/etc/kubernetes/cfg/kubelet-bootstrap.kubeconfig \\\\
--config=/etc/kubernetes/cfg/kubelet-config.yml \\\\
--cert-dir=/etc/kubernetes/ssl \\\\
--image-pull-progress-deadline=15m \\\\
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/k8sos/pause:3.2"
EOF
[root@k8s-master-01 ~]# cat > /etc/kubernetes/cfg/kubelet-config.yml << EOF
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 172.16.1.71
port: 10250
readOnlyPort: 10255
cgroupDriver: cgroupfs
clusterDNS:
- 10.96.0.2
clusterDomain: cluster.local
failSwapOn: false
authentication:
anonymous:
enabled: false
webhook:
cacheTTL: 2m0s
enabled: true
x509:
clientCAFile: /etc/kubernetes/ssl/ca.pem
authorization:
mode: Webhook
webhook:
cacheAuthorizedTTL: 5m0s
cacheUnauthorizedTTL: 30s
evictionHard:
imagefs.available: 15%
memory.available: 100Mi
nodefs.available: 10%
nodefs.inodesFree: 5%
maxOpenFiles: 1000000
maxPods: 110
EOF
[root@k8s-master-01 ~]# cat > /usr/lib/systemd/system/kubelet.service << EOF
[Unit]
Description=Kubernetes Kubelet
After=docker.service
[Service]
EnvironmentFile=/etc/kubernetes/cfg/kubelet.conf
ExecStart=/usr/local/bin/kubelet \\$KUBELET_OPTS
Restart=on-failure
RestartSec=10
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
# 6.11.5.7.6、分发kubelet
[root@k8s-master-01 ~]# for ip in m2 m3;do scp /etc/kubernetes/cfg/{kubelet-config.yml,kubelet.conf} root@${ip}:/etc/kubernetes/cfg; scp /usr/lib/systemd/system/kubelet.service root@${ip}:/usr/lib/systemd/system; done
# master-02操作
[root@k8s-master-02 ~]# sed -i \'s#master-01#master-02#g\' /etc/kubernetes/cfg/kubelet.conf
[root@k8s-master-02 ~]# sed -i \'s#172.16.1.71#172.16.1.72#g\' /etc/kubernetes/cfg/kubelet-config.yml
# master-03操作
[root@k8s-master-03 ~]# sed -i \'s#master-01#master-03#g\' /etc/kubernetes/cfg/kubelet.conf
[root@k8s-master-03 ~]# sed -i \'s#172.16.1.71#172.16.1.73#g\' /etc/kubernetes/cfg/kubelet-config.yml
# 开启kubelet服务
[root@k8s-master-01 ~]# systemctl daemon-reload;systemctl enable --now kubelet;systemctl status kubelet.service
[root@k8s-master-02 ~]# systemctl daemon-reload;systemctl enable --now kubelet;systemctl status kubelet.service
[root@k8s-master-03 ~]# systemctl daemon-reload;systemctl enable --now kubelet;systemctl status kubelet.service
部署kube-peoxy服务
[root@k8s-master-03 ~]# cat > /usr/lib/systemd/system/kube-proxy.service << EOF
[Unit]
Description=Kubernetes Proxy
After=network.target
[Service]
EnvironmentFile=/etc/kubernetes/cfg/kube-proxy.conf
ExecStart=/usr/local/bin/kube-proxy \\$KUBE_PROXY_OPTS
Restart=on-failure
RestartSec=10
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
[root@k8s-master-03 ~]# cat > /etc/kubernetes/cfg/kube-proxy-config.yml << EOF
kind: KubeProxyConfiguration
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 172.16.1.71
healthzBindAddress: 172.16.1.71:10256
metricsBindAddress: 172.16.1.71:10249
clientConnection:
burst: 200
kubeconfig: /etc/kubernetes/cfg/kube-proxy.kubeconfig
qps: 100
hostnameOverride: k8s-master-01
clusterCIDR: 10.96.0.0/16
enableProfiling: true
mode: "ipvs"
kubeProxyIPTablesConfiguration:
masqueradeAll: false
kubeProxyIPVSConfiguration:
scheduler: rr
excludeCIDRs: []
EOF
[root@k8s-master-03 ~]# cat > /etc/kubernetes/cfg/kube-proxy.conf << EOF
KUBE_PROXY_OPTS="--logtostderr=false \\\\
--v=2 \\\\
--log-dir=/var/log/kubernetes \\\\
--config=/etc/kubernetes/cfg/kube-proxy-config.yml"
EOF
[root@k8s-master-01 ~]# for ip in m2 m3;do
scp /etc/kubernetes/cfg/{kube-proxy-config.yml,kube-proxy.conf} root@${ip}:/etc/kubernetes/cfg/
scp /usr/lib/systemd/system/kube-proxy.service root@${ip}:/usr/lib/systemd/system/
done
# master-02操作
[root@k8s-master-02 ~]# sed -i \'s#172.16.1.71#172.16.1.72#g\' /etc/kubernetes/cfg/kube-proxy-config.yml
[root@k8s-master-02 ~]# sed -i \'s#master-01#master-02#g\' /etc/kubernetes/cfg/kube-proxy-config.yml
# master-03操作
[root@k8s-master-03 ~]# sed -i \'s#172.16.1.71#172.16.1.73#g\' /etc/kubernetes/cfg/kube-proxy-config.yml
[root@k8s-master-03 ~]# sed -i \'s#master-01#master-03#g\' /etc/kubernetes/cfg/kube-proxy-config.yml
# 3台master都要操作
[root@k8s-master-01 ~]# systemctl daemon-reload; systemctl enable --now kube-proxy; systemctl status kube-proxy
[root@k8s-master-02 ~]# systemctl daemon-reload; systemctl enable --now kube-proxy; systemctl status kube-proxy
[root@k8s-master-03 ~]# systemctl daemon-reload; systemctl enable --now kube-proxy; systemctl status kube-proxy
# 查看
[root@k8s-master-01 /opt/cert/k8s]# kubectl get csr
NAME AGE SIGNERNAME REQUESTOR CONDITION
node-csr-GUPj9U-cv5F5WtBMlWJByKsCa5OkV--6nPPSneRkxU0 15s kubernetes.io/kube-apiserver-client-kubelet kubelet-bootstrap Pending
node-csr-U3uVDsdmGlnv7vQD-Rieui3YQpW5pK6sZ56BtsOpLgQ 18s kubernetes.io/kube-apiserver-client-kubelet kubelet-bootstrap Pending
node-csr-zECR2kT2FCF6ZkIN9mwyNfyn_zPyzGE54SQF47enU08 38s kubernetes.io/kube-apiserver-client-kubelet kubelet-bootstrap Pending
批准加入
[root@k8s-master-01 /opt/cert/k8s]# kubectl certificate approve `kubectl get csr | grep "Pending" | awk \'{print $1}\'`
certificatesigningrequest.certificates.k8s.io/node-csr-GUPj9U-cv5F5WtBMlWJByKsCa5OkV--6nPPSneRkxU0 approved
certificatesigningrequest.certificates.k8s.io/node-csr-U3uVDsdmGlnv7vQD-Rieui3YQpW5pK6sZ56BtsOpLgQ approved
certificatesigningrequest.certificates.k8s.io/node-csr-zECR2kT2FCF6ZkIN9mwyNfyn_zPyzGE54SQF47enU08 approved
[root@k8s-master-01 /opt/cert/k8s]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master-01 Ready <none> 43s v1.18.8
k8s-master-02 Ready <none> 14s v1.18.8
k8s-master-03 NotReady <none> 1s v1.18.8
部署网络插件
# 方式一: (如果方式一行不通则用第二种)
[root@k8s-master-01 /opt/cert/k8s]# cd /opt/data/
[root@k8s-master-01 /opt/data/]# wget https://github.com/coreos/flannel/releases/download/v0.13.1-rc1/flannel-v0.13.1-rc1-linux-amd64.tar.gz
# 第二种方法:
[root@k8s-master-01 /opt/data]# docker run -dit registry.cn-hangzhou.aliyuncs.com/k8sos/k8s:v1.18.8.1 bash
[root@k8s-master-01 /opt/data]# docker ps # 看id
[root@k8s-master-01 /opt/data]# docker exec 6e678a83701c ls # 看包名
[root@k8s-master-01 /opt/data]# docker cp 6e678a83701c:flannel-v0.11.0-linux-amd64.tar.gz .
[root@k8s-master-01 /opt/data]# tar xf flannel-v0.11.0-linux-amd64.tar.gz
[root@k8s-master-01 /opt/data]# ll
-rwxr-xr-x 1 root root 35249016 Jan 29 2019 flanneld
-rw-r--r-- 1 root root 9565743 Jan 29 2019 flannel-v0.11.0-linux-amd64.tar.gz
-rwxr-xr-x 1 root root 2139 Oct 23 2018 mk-docker-opts.sh
[root@k8s-master-01 /opt/data]# for i in m1 m2 m3;do scp flanneld mk-docker-opts.sh root@$i:/usr/local/bin; done
[root@k8s-master-01 /opt/data]# etcdctl \\
--ca-file=/etc/etcd/ssl/ca.pem \\
--cert-file=/etc/etcd/ssl/etcd.pem \\
--key-file=/etc/etcd/ssl/etcd-key.pem \\
--endpoints="https://172.16.1.71:2379,https://172.16.1.72:2379,https://172.16.1.73:2379" \\
mk /coreos.com/network/config \'{"Network":"10.244.0.0/12", "SubnetLen": 21, "Backend": {"Type": "vxlan", "DirectRouting": true}}\'
# 使用get查看信息
[root@k8s-master-01 /opt/data]# etcdctl \\
--ca-file=/etc/etcd/ssl/ca.pem \\
--cert-file=/etc/etcd/ssl/etcd.pem \\
--key-file=/etc/etcd/ssl/etcd-key.pem \\
--endpoints="https://172.16.1.71:2379,https://172.16.1.72:2379,https://172.16.1.73:2379" \\
get /coreos.com/network/config
注册网络插件服务
[root@k8s-master-01 /opt/data]# cat > /usr/lib/systemd/system/flanneld.service << EOF
[Unit]
Description=Flanneld address
After=network.target
After=network-online.target
Wants=network-online.target
After=etcd.service
Before=docker.service
[Service]
Type=notify
ExecStart=/usr/local/bin/flanneld \\\\
-etcd-cafile=/etc/etcd/ssl/ca.pem \\\\
-etcd-certfile=/etc/etcd/ssl/etcd.pem \\\\
-etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \\\\
-etcd-endpoints=https://172.16.1.71:2379,https://172.16.1.72:2379,https://172.16.1.73:2379 \\\\
-etcd-prefix=/coreos.com/network \\\\
-ip-masq
ExecStartPost=/usr/local/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
Restart=always
RestartSec=5
StartLimitInterval=0
[Install]
WantedBy=multi-user.target
RequiredBy=docker.service
EOF
[root@k8s-master-01 /opt/data]# for i in m2 m3;do scp /usr/lib/systemd/system/flanneld.service root@$i:/usr/lib/systemd/system;done
[root@k8s-master-01 /opt/data]# sed -i \'/ExecStart/s/\\(.*\\)/#\\1/\' /usr/lib/systemd/system/docker.service
[root@k8s-master-01 /opt/data]# sed -i \'/ExecReload/a ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS -H fd:// --containerd=/run/containerd/containerd.sock\' /usr/lib/systemd/system/docker.service
[root@k8s-master-01 /opt/data]# sed -i \'/ExecReload/a EnvironmentFile=-/run/flannel/subnet.env\' /usr/lib/systemd/system/docker.service
[root@k8s-master-01 /opt/data]# for ip in m2 m3;do scp /usr/lib/systemd/system/docker.service root@${ip}:/usr/lib/systemd/system; done
[root@k8s-master-01 /opt/data]# systemctl daemon-reload
[root@k8s-master-01 /opt/data]# systemctl start flanneld
[root@k8s-master-01 /opt/data]# systemctl enable --now flanneld.service
[root@k8s-master-01 /opt/data]# systemctl restart docker
[root@k8s-master-02 ~]# systemctl daemon-reload
[root@k8s-master-02 ~]# systemctl start flanneld
[root@k8s-master-02 ~]# systemctl enable --now flanneld.service
[root@k8s-master-02 ~]# systemctl restart docker
[root@k8s-master-03 ~]# systemctl daemon-reload
[root@k8s-master-03 ~]# systemctl start flanneld
[root@k8s-master-03 ~]# systemctl restart docker
[root@k8s-master-02 ~]# ip a # 查看
4: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
inet 10.241.200.1/21 brd 10.241.207.255 scope global docker0
7: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN group default
inet 10.241.200.0/32 scope global flannel.1
安装集群DNS
[root@k8s-master-01 /opt/data]# yum install git -y
[root@k8s-master-01 /opt/data]# git clone https://github.com/coredns/deployment.git
[root@k8s-master-01 /opt/data]# cd /opt/data/deployment/kubernetes
[root@k8s-master-01 /opt/data/deployment/kubernetes]# sed -i \'s#coredns/coredns#registry.cn-hangzhou.aliyuncs.com/k8sos/coredns#g\' coredns.yaml.sed
[root@k8s-master-01 /opt/data/deployment/kubernetes]# ./deploy.sh -i 10.96.0.2 -s | kubectl apply -f -
# 测试:
[root@k8s-master-01 /opt/data/deployment/kubernetes]# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-6cb6557c66-7cx62 1/1 Running 0 60s
绑定超管权限
[root@k8s-master-01 /opt/data/deployment/kubernetes]# kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=kubernetes
clusterrolebinding.rbac.authorization.k8s.io/cluster-system-anonymous created
测试集群DNS
[root@k8s-master-01 /opt/data/deployment/kubernetes]# kubectl run test -it --rm --image=busybox:1.28.3
If you don\'t see a command prompt, try pressing enter.
/ # nslookup kubernetes
Server: 10.96.0.2
Address 1: 10.96.0.2 kube-dns.kube-system.svc.cluster.local
Name: kubernetes
Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local
加入工作节点
# 修改node上docker的文件系统
[root@k8s-node-01 /etc/kubernetes/cfg]# cat /etc/docker/daemon.json
{
"registry-mirrors": ["https://8mh75mhz.mirror.aliyuncs.com"],
"exec-opts": ["native.cgroupdriver=cgroupfs"]
}
[root@k8s-node-02 /etc/kubernetes/cfg]# cat /etc/docker/daemon.json
{
"registry-mirrors": ["https://8mh75mhz.mirror.aliyuncs.com"],
"exec-opts": ["native.cgroupdriver=cgroupfs"]
}
# 在两node上重启docker
[root@k8s-node-01 /etc/kubernetes/cfg]# systemctl daemon-reload
[root@k8s-node-02 /etc/kubernetes/cfg]# systemctl daemon-reload
[root@k8s-node-01 /etc/kubernetes/cfg]# systemctl restart docker
[root@k8s-node-02 /etc/kubernetes/cfg]# systemctl restart docker
# 分发组件 (在master01上执行)
[root@k8s-master-01 /opt/data/deployment/kubernetes]# cd /opt/data/kubernetes/server/bin
[root@k8s-master-01 /opt/data/kubernetes/server/bin]# for i in n1 n2;do
scp kubelet kube-proxy $i:/usr/local/bin
done
# 分发证书 (在master01上执行)
[root@k8s-master-01 /opt/data/kubernetes/server/bin]# cd /opt/cert/k8s
[root@k8s-master-01 /opt/cert/k8s]# for i in n1 n2; do ssh root@$i "mkdir -pv /etc/kubernetes/ssl"; scp -pr ./{ca*.pem,admin*pem,kube-proxy*pem} root@$i:/etc/kubernetes/ssl; done
# 分发网络插件安装包 以上是关于高可用部署二进制 Kubernetes的主要内容,如果未能解决你的问题,请参考以下文章