#yyds干货盘点#--kubernetes集群搭建
Posted 旺仔呀旺仔
tags:
篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了#yyds干货盘点#--kubernetes集群搭建相关的知识,希望对你有一定的参考价值。
一、环境说明
系统版本 | 主机地址 | 主机名 |
redhat 7.2 | 10.131.100.92 | k8s-master01 |
redhat 7.2 | 10.131.100.93 | k8s-master02 |
redhat 7.2 | 10.131.100.94 | k8s-master03 |
redhat 7.2 | 10.131.100.95 | k8s-node01 |
二、系统初始化
2.1 配置主机名添加hosts解析
cat <<EOF >>/etc/hosts
10.131.100.92 k8s-master01
10.131.100.93 k8s-master02
10.131.100.94 k8s-master03
10.131.100.95 k8s-node01
EOF
2.2 关闭防火墙、selinux
systemctl stop firewalld
systemctl disable firewalld
setenforce 0
sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
2.3 配置内核参数,将桥接的IPv4流量传递到iptables的链
cat > /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_nonlocal_bind = 1
net.ipv4.ip_forward = 1
vm.swappiness=0
EOF
sysctl --system
2.4 配置国内yum源
yum install -y wget
mkdir /etc/yum.repos.d/bak && mv /etc/yum.repos.d/*.repo /etc/yum.repos.d/bak
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.cloud.tencent.com/repo/centos7_base.repo
wget -O /etc/yum.repos.d/epel.repo http://mirrors.cloud.tencent.com/repo/epel-7.repo
2.5 配置国内kubernetes源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
2.6 配置docker源
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
yum clean all && yum makecache
注:如果是rathat系统将以上所有yum源里边的$releasever替换成7然后再执行yum clean all && yum makecache
2.7 三台master节点做ssh互信
[root@k8s-master03 ~]# cat fenfa.sh
#!/bin/bash
rm /root/.ssh/id_dsa
ssh-keygen -t dsa -f /root/.ssh/id_dsa -N ""
for ip in 92 93 94
do
sshpass -p 123456 ssh-copy-id -i /root/.ssh/id_dsa.pub root@10.131.100.$ip -o StrictHostKeyChecking=no
done
三、安装docker并配置
3.1 升级内核
执行uname -a #查看内核版本,低于3.10.0-514版本需要升级内核
yum install -y kernel #yum直接进行内核升级
3.2 安装docker18版本和其他相关组件
yum -y install docker-ce-18.06.3*
3.3 Docker 更换 存储方式 Docker切换OverLay(2)——提高性能,加快速度
官方地址:https://docs.docker.com/storage/storagedriver/overlayfs-driver/#how-the-overlay2-driver-works
- 将存储改成xfs类型
xfs_info /u03 #如果 ftype=0 表示不是xfs
umount /u03
mkfs.xfs -f /dev/mapper/rhel-u03 # 如果提示设备或资源忙 则执行fuser -km /u03
mount /dev/mapper/rhel-u03 /u03
xfs_info /u03/ |grep ftype #出现ftype=1 就算完成
- 创建/etc/docker/daemon.json文件,并配置
cat >/etc/docker/daemon.json<<EOF
"exec-opts": ["native.cgroupdriver=systemd"],
"insecure-registries":["192.168.220.84"], #http方式
"registry-mirrors": ["https://registry.docker-cn.com" ], # 配置国内源
"log-driver":"json-file",
"log-opts": "max-size" :"200m","max-file":"5",
"graph": "/u03/docker", #日志路径
"storage-driver": "overlay2", # 存储类型
"storage-opts": ["overlay2.override_kernel_check=true"]
EOF
3) 重启docker并加入到开机自启动
systemctl restart docker && systemctl enable docker
docker status docker
四、安装etcd集群
4.1 下载二进制包并安装并安装etcd(三个master节点都执行)
etcd下载地址:https://github.com/etcd-io/etcd/
cd /u01/etcd
tar zxf etcd-v3.3.10-linux-amd64.tar.gz -C /u01/
rm -f /usr/bin/etcd /usr/bin/etcdctl
ln -s /u01/etcd-v3.3.10-linux-amd64/etcd /usr/bin/etcd
ln -s /u01/etcd-v3.3.10-linux-amd64/etcdctl /usr/bin/etcdctl
mkdir /etc/etcd/etcdssl -p 2>/dev/null
4.2 生成etcd配置文件和系统启动文件(三个master节点都执行)
[root@k8s-master01 ~]# cat etcd.sh
local_ip=`/sbin/ifconfig | grep inet| grep -v 127.0.0.1 |grep -v "172.1*" | grep -v inet6 |cut -d: -f2 | awk print $2|head -1`
etcd01="10.131.100.92"
etcd02="10.131.100.93"
etcd03="10.131.100.94"
cat > /etc/etcd/etcd.conf <<EOF
ETCD_NAME="$local_ip"
ETCD_DATA_DIR="/u03/etcddata"
ETCD_LISTEN_PEER_URLS="https://$local_ip:2380"
ETCD_LISTEN_CLIENT_URLS="https://$local_ip:2379"
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://$local_ip:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://$local_ip:2379"
ETCD_INITIAL_CLUSTER="etcd01=https://$etcd01:2380,etcd02=https://$etcd02:2380,etcd03=https://$etcd03:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF
source /etc/etcd/etcd.conf
if [ -e /etc/etcd/etcd.conf ]; then
cat > /usr/lib/systemd/system/etcd.service << EOF
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
EnvironmentFile=/etc/etcd/etcd.conf
ExecStart=/usr/bin/etcd \\
--name=$ETCD_NAME \\
--data-dir=$ETCD_DATA_DIR \\
--listen-peer-urls=$ETCD_LISTEN_PEER_URLS \\
--listen-client-urls=$ETCD_LISTEN_CLIENT_URLS,http://127.0.0.1:2379 \\
--advertise-client-urls=$ETCD_ADVERTISE_CLIENT_URLS \\
--initial-advertise-peer-urls=$ETCD_INITIAL_ADVERTISE_PEER_URLS \\
--initial-cluster=$ETCD_INITIAL_CLUSTER \\
--initial-cluster-token=$ETCD_INITIAL_CLUSTER_TOKEN \\
--initial-cluster-state=new \\
--cert-file=/etc/etcd/etcdssl/etcd.pem \\
--key-file=/etc/etcd/etcdssl/etcd-key.pem \\
--peer-cert-file=/etc/etcd/etcdssl/etcd.pem \\
--peer-key-file=/etc/etcd/etcdssl/etcd-key.pem \\
--trusted-ca-file=/etc/etcd/etcdssl/ca.pem \\
--peer-trusted-ca-file=/etc/etcd/etcdssl/ca.pem
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
else
echo "Please check cert! "
fi
systemctl daemon-reload
sh etcd.sh
4.3 配置etcd证书文件生成证书和私钥(在master01上操作)
1)配置证书生成文件
cd /etc/etcd/etcdssl
cat > ca-config.json <<EOF
"signing":
"default":
"expiry": "876000h"
,
"profiles":
"etcd":
"expiry": "876000h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
EOF
cat > ca-csr.json <<EOF
"CN": "etcd",
"key":
"algo": "rsa",
"size": 2048
,
"names": [
"C": "CN",
"L": "Shanghai",
"ST": "Shanghai",
"O": "etcd",
"OU": "System"
]
EOF
cat > etcd-csr.json <<EOF
"CN": "etcd",
"hosts": [
"10.131.100.92",
"10.191.100.93",
"10.191.100.94"
],
"key":
"algo": "rsa",
"size": 2048
,
"names": [
"C": "CN",
"L": "Shanghai",
"ST": "Shanghai",
"O": "etcd",
"OU": "System"
]
EOF
4.4 下载cfssl 证书生成工具并生成证书然后拷贝至另外两个节点
cd /u01/
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
chmod +x cfssl_linux-amd64 cfssljson_linux-amd64 cfssl-certinfo_linux-amd64
\\cp -rp cfssl_linux-amd64 /usr/local/bin/cfssl
\\cp -rp cfssljson_linux-amd64 /usr/local/bin/cfssljson
\\cp -rp cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo
cd /etc/etcd/etcdssl
##生成 CA 证书和私钥
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
###生成 etcd证书和私钥
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=etcd etcd-csr.json | cfssljson -bare etcd
[root@k8s-master01 etcdssl]# ll
总用量 36
-rw-r--r-- 1 root root 290 12月 3 09:15 ca-config.json
-rw-r--r-- 1 root root 997 12月 3 09:23 ca.csr
-rw-r--r-- 1 root root 245 12月 3 09:15 ca-csr.json
-rw------- 1 root root 1679 12月 3 09:23 ca-key.pem
-rw-r--r-- 1 root root 1350 12月 3 09:23 ca.pem
-rw-r--r-- 1 root root 1058 12月 3 09:23 etcd.csr
-rw-r--r-- 1 root root 345 12月 3 09:23 etcd-csr.json
-rw------- 1 root root 1679 12月 3 09:23 etcd-key.pem
-rw-r--r-- 1 root root 1428 12月 3 09:23 etcd.pem
###拷贝到其他服务器
scp -r /etc/etcd/etcdssl 10.131.100.93:/etc/etcd/
scp -r /etc/etcd/etcdssl 10.131.100.94:/etc/etcd/
4.5 加入开机启动并启动所有节点的etcd并验证集群
systemctl enable etcd && systemctl restart etcd
[root@k8s-master01 etcdssl]# etcdctl --ca-file=/etc/etcd/etcdssl/ca.pem --cert-file=/etc/etcd/etcdssl/etcd.pem --key-file=/etc/etcd/etcdssl/etcd-key.pem cluster-health
member 3b08f83c3b351284 is healthy: got healthy result from https://10.131.100.93:2379
member b276b92973c348d8 is healthy: got healthy result from https://10.131.100.92:2379
member ee911b7a6ae0c43a is healthy: got healthy result from https://10.131.100.94:2379
cluster is healthy
提示:如果有报错可以通过journalctl -xefu etcd 或 tailf /var/log/messages进行排查
五、Haproxy_docekr部署(做负载均衡)
5.1 配置ipvs 做内部负载均衡(三台master都执行)
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
5.2 生成ha配置文件(三台master都执行)
cat >/etc/haproxy/haproxy.cfg<<EOF
global
log 127.0.0.1 local0 err
maxconn 50000
uid 99
gid 99
#daemon
nbproc 1
pidfile haproxy.pid
defaults
mode http
log 127.0.0.1 local0 err
maxconn 50000
retries 3
timeout connect 5s
timeout client 30s
timeout server 30s
timeout check 2s
listen admin_stats
mode http
bind 0.0.0.0:1080
log 127.0.0.1 local0 err
stats refresh 30s
stats uri /haproxy-status
stats realm Haproxy\\ Statistics
stats auth admin:admin
stats hide-version
stats admin if TRUE
frontend k8s-https
bind 0.0.0.0:8443
mode tcp
#maxconn 50000
default_backend k8s-https
backend k8s-https
mode tcp
balance roundrobin
server k8s220210 10.131.100.92:6443 weight 1 maxconn 1000 check inter 2000 rise 2 fall 3
server k8s220211 10.131.100.93:6443 weight 1 maxconn 1000 check inter 2000 rise 2 fall 3
server gaia-22048 10.131.100.94:6443 weight 1 maxconn 1000 check inter 2000 rise 2 fall 3
EOF
5.3 启动ha容器(三台master都执行)
docker run -d --name my-haproxy \\
-v /etc/haproxy:/usr/local/etc/haproxy:ro \\
-p 8443:8443 \\
-p 1080:1080 \\
--restart always \\
192.168.220.84/kubernetes/haproxy:1.7.8-alpine
提示:部署完成后可通过 http://10.131.100.92:1080/haproxy-status admin admin 进行验证
六、部署keepalived_docker(做高可用)
6.1 直接启动keepalibed容器(三台master都执行)
docker run --net=host --cap-add=NET_ADMIN \\
-e KEEPALIVED_INTERFACE=eth0 \\
-e KEEPALIVED_VIRTUAL_IPS="#PYTHON2BASH:[10.0.0.253]" \\ #vip地址
-e KEEPALIVED_CHECK_PORT=8443 \\
-e KEEPALIVED_UNICAST_PEERS="#PYTHON2BASH:[10.131.100.92,10.131.100.93,10.131.100.94]" \\ #master集群的地址
-e KEEPALIVED_PASSWORD=admin \\
--name k8s-keepalived \\
--restart always \\
-d 192.168.220.84/kubernetes/osixia/keepalived:1.4.4
七、初始化集群
1) 安装其他相关组件
yum install kubelet-1.14.3-0 kubeadm-1.14.3-0 kubectl-1.14.3-0 kubernetes-cni-0.7.5
提示:kubectl为aipserver的客户端,nodes节点可不用安装
2) 编辑初始化配置文件
[root@k8s-master01 k8s]# cat config.yaml
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
SupportIPVSProxyMode: true
mode: ipvs
---
apiVersion: kubeadm.k8s.io/v1beta1
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: n95wls.h1ifw0ln1mzlmfhu
ttl: "0"
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 10.131.100.92 #本地监听的地址
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: k8s-master01 # kubect get nodes 查看的节点名称
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta1
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: "10.131.100.253:8443" #keepalived的vip地址
controllerManager:
dns:
type: CoreDNS
etcd:
external:
endpoints:
- https://10.131.100.92:2379
- https://10.131.100.93:2379
- https://10.131.100.94:2379
caFile: /etc/kubernetes/pki/etcd/ca.crt
certFile: /etc/kubernetes/pki/apiserver-etcd-client.crt
keyFile: /etc/kubernetes/pki/apiserver-etcd-client.key
# local:
# dataDir: /u03/etcd_docker
imageRepository: 192.168.220.84/kubernetes #harbor仓库的地址
kind: ClusterConfiguration
kubernetesVersion: v1.14.3
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16" # pod的网段
serviceSubnet: 10.96.0.0/12 # server的网段
scheduler:
- 将etcd的证书文件拷贝到对应的目录下
cd /etc/etcd/etcdssl
\\cp -r ca.pem /etc/kubernetes/pki/etcd/ca.crt
\\cp -r etcd.pem /etc/kubernetes/pki/apiserver-etcd-client.crt
\\cp -r etcd-key.pem /etc/kubernetes/pki/apiserver-etcd-client.key
- 初始化集群
kubeadm init --config=/u01/k8s/config.yaml --experimental-upload-certs | tee /u01/kubeadm-init.log
提示:如果初始化不成功可用kubeadm reset 进行还原方便重新初始化
- 检查apiserver和etcd证书有效时间是否为100年
cd /etc/kubernetes/pki/
openssl x509 -in apiserver.crt -noout -text |grep Not
Not Before: Dec 11 15:06:23 2019 GMT
Not After : Nov 17 15:06:24 2119 GMT
openssl x509 -in apiserver-etcd-client.crt -noout -text |grep Not
Not Before: Dec 11 14:33:00 2019 GMT
Not After : Nov 17 14:33:00 2119 GMT
- 配置kubelet访问apiservers
echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.bash_profile
source ~/.bash_profile
- 安装网络插件
kubectl apply -f calico.yaml
- 另外两台master节点加入集群(通过下边的kubeadm join 在另外两个master上运行,初始化成master节点)
[root@k8s-master01 ~]# grep -A 2 "kubeadm join" /u01/kubeadm-init.log |head -3
kubeadm join 10.0.0.253:8443 --token n95wls.h1ifw0ln1mzlmfhu \\
--discovery-token-ca-cert-hash sha256:197b877bf7b05df875ee49e324d77ebdfac6977026cc6d58376af94112b450a6 \\
--experimental-control-plane --certificate-key 258f15e292378bf6ac1d8a2a1de7c81d2a698e802624e3fac51b711cdfcc0f13
- 将node节点加入集群(通过一下kubeadm join 在node节点上执行,初始化成集群node节点)
[root@k8s-master01 ~]# grep -A 2 "kubeadm join" /u01/kubeadm-init.log | tail -2
kubeadm join 10.0.0.253:8443 --token n95wls.h1ifw0ln1mzlmfhu \\
--discovery-token-ca-cert-hash sha256:197b877bf7b05df875ee49e324d77ebdfac6977026cc6d58376af94112b450a6
- 验证集群状态(所有节点状态是ready表示集群安装成功)
[root@k8s-master01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master01 Ready master 158m v1.14.3
k8s-master02 Ready master 121m v1.14.3
k8s-master03 Ready master 117m v1.14.3
k8s-node01 Ready <none> 113m v1.14.3
八、 遇到的问题
8.1 忘记加入机器的证书
kubeadm token create --print-join-command #生成加入集群的证书
kubeadm init phase upload-certs --upload-certs #生成加入到master的新的证书
#进行拼接
kubeadm join apiserver.com:6443 --token n3oixu.n0tz4q9k0p3qgbpf \\
--discovery-token-ca-cert-hash sha256:a412994b2bb087e9970ebe7d83c45ef81b6b0e30c72e2a8a94e174bdd4b23b82 #加入集群的证书\\
--control-plane --certificate-key a288aff17ed60a2541febb74f3cdc662f0f64922273cda2ee127f42b48ec912c #master证书
以上是关于#yyds干货盘点#--kubernetes集群搭建的主要内容,如果未能解决你的问题,请参考以下文章
#yyds干货盘点# 如何保障你的 Kubernetes 集群资源不会被打爆(18)
#yyds干货盘点# 怎样对 Kubernetes 集群进行灾备和恢复?(22)
#yyds干货盘点# Kubernetes 是如何搞定“不可变基础设施”的?(04)