k8s安装(yum安装)

Posted givenchy_yzl

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了k8s安装(yum安装)相关的知识,希望对你有一定的参考价值。

kubeadm安装

服务器配置至少是2G2核的。如果不是则可以在集群初始化后面增加 --ignore-preflight-errors=NumCPU

1、克隆机器

192.168.1.55  m01
192.168.1.56  n1
192.168.1.57  n2

2、修改主机名称

[root@k8s1 ~]# hostnamectl set-hostname m01
[root@k8s2 ~]# hostnamectl set-hostname n1
[root@k8s3 ~]# hostnamectl set-hostname n2

3、系统优化(所有机器全做)

# 关闭selinux(临时关闭)
[root@m01 ~]# setenforce 0
# 关闭防火墙
systemctl disable --now firewalld

# 临时关闭swap分区
swapoff -a
#永久禁用
sed -i.bak '/swap/s/^/#/' /etc/fstab

修改/etc/fstab 让kubelet忽略swap
echo 'KUBELET_EXTRA_ARGS="--fail-swap-on=false"' > /etc/sysconfig/kubelet   

# 修改hosts文件
[root@m01 ~]# vim /etc/hosts
192.168.1.55  m01
192.168.1.56  n1
192.168.1.57  n2

# 做免密登录
[root@m01 ~]# ssh-keygen -t rsa
[root@m01 ~]# for i in m01 n1 n2;do  ssh-copy-id -i ~/.ssh/id_rsa.pub root@$i; done

# 配置镜像源
[root@m01 ~]# curl  -o /etc/yum.repos.d/CentOS-Base.repo https://repo.huaweicloud.com/repository/conf/CentOS-7-reg.repo
[root@m01 ~]# yum clean all
[root@m01 ~]# yum makecache

# 更新系统
[root@m01 ~]# yum update -y --exclud=kernel*

# 安装基础常用软件
[root@m01 ~]#yum install wget expect vim net-tools ntp bash-completion ipvsadm ipset jq iptables conntrack sysstat libseccomp -y

# 同步集群时间
[root@m01 ~]# ntpdate ntp1.aliyun.com
[root@m01 ~]# hwclock --systohc

# 更新系统内核(docker 对系统内核要求比较高,最好使用4.4+)
[root@m01 ~]# wget https://elrepo.org/linux/kernel/el7/x86_64/RPMS/kernel-lt-5.4.128-1.el7.elrepo.x86_64.rpm
[root@m01 ~]# wget https://elrepo.org/linux/kernel/el7/x86_64/RPMS/kernel-lt-devel-5.4.128-1.el7.elrepo.x86_64.rpm

## 安装系统内容
[root@m01 ~]# yum localinstall -y kernel-lt*
    ## 调到默认启动
[root@m01 ~]# grub2-set-default  0 && grub2-mkconfig -o /etc/grub2.cfg
    ## 查看当前默认启动的内核
[root@m01 ~]# grubby --default-kernel
    ## 重启
[root@m01 ~]# reboot

# 安装IPVS
    yum install -y conntrack-tools ipvsadm ipset conntrack libseccomp

   	## 加载IPVS模块
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack"
for kernel_module in \\${ipvs_modules}; do
  /sbin/modinfo -F filename \\${kernel_module} > /dev/null 2>&1
  if [ $? -eq 0 ]; then
	/sbin/modprobe \\${kernel_module}
  fi
done
EOF
[root@m01 ~]# chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs

# 修改内核启动参数
cat > /etc/sysctl.d/k8s.conf << EOF
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp.keepaliv.probes = 3
net.ipv4.tcp_keepalive_intvl = 15
net.ipv4.tcp.max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp.max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.top_timestamps = 0
net.core.somaxconn = 16384
EOF

# 立即生效
sysctl --system

4、安装docker(所有机器都要做)

# 卸载之前安装过得docker(若没有安装直接跳过此步)
[root@m01 ~]# sudo yum remove docker docker-common docker-selinux docker-engine

# 安装docker需要的依赖包
[root@m01 ~]# sudo yum install -y yum-utils device-mapper-persistent-data lvm2

# 安装dockeryum源
[root@m01 ~]# wget -O /etc/yum.repos.d/docker-ce.repo https://repo.huaweicloud.com/docker-ce/linux/centos/docker-ce.repo

# 安装docker
[root@m01 ~]# yum install docker-ce -y

# 设置开机自启动
[root@m01 ~]# systemctl enable --now docker.service

5、安装kubelet(所有机器都要装)

# 安装kebenetes yum 源
[root@m01 ~]# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

# 安装kubelet
[root@m01 ~]# yum install -y kubelet kubeadm kubectl 
[root@m01 ~]#systemctl enable --now kubelet

# 加入集群
kubeadm join 192.168.1.55:6443 --token klwsff.4quj2w2qas99743z --discovery-token-ca-cert-hash sha256:abc0eb615809e7a19e32e72fc9f8f441f27da61547a1bbf33b647a82e3c7bf6a 

注意:此命令必须在监控节点完成初始化集群后,且运行kubeadm token create --print-join-command 此条命令后,将命令结果在node节点上执行即可。以上命令是将工作节点加入集群,默认token的有效期为24小时,当过期之后,该token就不可用了。

6、初始化master节点(仅在master节点上执行)

[root@k8s-m-01 ~]# kubeadm init \\
--image-repository=registry.cn-hangzhou.aliyuncs.com/k8sos \\
--kubernetes-version=v1.21.2 \\
--service-cidr=10.96.0.0/12 \\
--pod-network-cidr=10.244.0.0/16

# 若配置不够可以在以上命令后面加上--ignore-preflight-errors=NumCPU

7、初始化后续(仅在master节点上执行)

# 建立用户集群权限
[root@k8s-m-01 ~]# mkdir -p $HOME/.kube
[root@k8s-m-01 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8s-m-01 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config

# 如果是root用户,则可以使用:export KUBECONFIG=/etc/kubernetes/admin.conf

# 安装集群网络插件(flannel.yaml见附件)
[root@k8s-m-01 ~]# kubectl apply -f flannel.yaml

# 将工作节点加入集群
[root@k8s-m-01 ~]# kubeadm token create    --print-join-command 
kubeadm join 192.168.15.31:6443 --token s6svmh.lw88lchyl6m24tts     --discovery-token-ca-cert-hash sha256:4d7e3e37e73176a97322e26fe501d2c27830a7bf3550df56f3a55b68395b507b 
## 注:将上方生成的token复制到node节点上执行。

[root@k8s-m-01 ~]# kubectl get nodes
NAME       STATUS   ROLES                  AGE   VERSION
k8s-m-01   Ready    control-plane,master   13m   v1.20.5
k8s-n-01   Ready    <none>                 35s   v1.20.5
k8s-n-02   Ready    <none>                 39s   v1.20.5


# 检查集群状态
## 第一种方式
[root@k8s-m-01 ~]# kubectl get nodes
NAME       STATUS   ROLES                  AGE     VERSION
k8s-m-01   Ready    control-plane,master   5m56s   v1.20.5

# 第二种方式
[root@k8s-m-01 ~]# kubectl get pods -n kube-system
NAME                               READY   STATUS    RESTARTS   AGE
coredns-f68b4c98f-5t7wm            1/1     Running   0          5m54s
coredns-f68b4c98f-5xqjs            1/1     Running   0          5m54s
etcd-k8s-m-01                      1/1     Running   0          6m3s
kube-apiserver-k8s-m-01            1/1     Running   0          6m3s
kube-controller-manager-k8s-m-01   1/1     Running   0          6m3s
kube-flannel-ds-7bcwl              1/1     Running   0          104s
kube-proxy-ntpjx                   1/1     Running   0          5m54s
kube-scheduler-k8s-m-01            1/1     Running   0          6m3s

# 第三种方式:直接验证集群DNS
[root@k8s-m-01 ~]# kubectl run test -it --rm --image=busybox:1.28.3
If you don't see a command prompt, try pressing enter.
/ # nslookup kubernetes
Server:    10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local

Name:      kubernetes
Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local

报错及解决方案

问题一:
出现**The connection to the server localhost:8080 was refused - did you specify the right host or port?**问题
问题分析:
环境变量
原因:kubernetes master没有与本机绑定,集群初始化的时候没有绑定,此时设置在本机的环境变量即可解决问题

解决方案如下:

步骤一:加入环境变量
具体根据情况,此处记录linux设置该环境变量
方式一:编辑文件设置
[root@m01 ~]#  vim /etc/profile
在底部增加新的环境变量 export KUBECONFIG=/etc/kubernetes/admin.conf

方式二:直接追加文件内容
[root@m01 ~]# echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> /etc/profile

步骤二:重载配置文件
[root@m01 ~]# source /etc/profile

问题二:
部署完master节点以后,执行kubectl get cs命令来检测组件的运行状态时,报如下错误:
在这里插入图片描述
原因分析:
出现这种情况,是/etc/kubernetes/manifests/下的kube-controller-manager.yaml和kube-scheduler.yaml设置的默认端口是0导致的,解决方式是注释掉对应的port即可,操作如下

步骤一:
kube-controller-manager.yaml文件修改: - --port=0如下图:
在这里插入图片描述
步骤二:
kube-scheduler.yaml文件修改: - --port=0如下图:
在这里插入图片描述
步骤三:然后在master节点上重启kubelet,systemctl restart kubelet.service,然后重新查看就正常了

附件:

---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
  name: psp.flannel.unprivileged
  annotations:
    seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
    seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
    apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
    apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
  privileged: false
  volumes:
  - configMap
  - secret
  - emptyDir
  - hostPath
  allowedHostPaths:
  - pathPrefix: "/etc/cni/net.d"
  - pathPrefix: "/etc/kube-flannel"
  - pathPrefix: "/run/flannel"
  readOnlyRootFilesystem: false
  # Users and groups
  runAsUser:
    rule: RunAsAny
  supplementalGroups:
    rule: RunAsAny
  fsGroup:
    rule: RunAsAny
  # Privilege Escalation
  allowPrivilegeEscalation: false
  defaultAllowPrivilegeEscalation: false
  # Capabilities
  allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
  defaultAddCapabilities: []
  requiredDropCapabilities: []
  # Host namespaces
  hostPID: false
  hostIPC: false
  hostNetwork: true
  hostPorts:
  - min: 0
    max: 65535
  # SELinux
  seLinux:
    # SELinux is unused in CaaSP
    rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
rules:
- apiGroups: ['extensions']
  resources: ['podsecuritypolicies']
  verbs: ['use']
  resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
  - ""
  resources:
  - pods
  verbs:
  - get
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes/status
  verbs:
  - patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
- kind: ServiceAccount
  name: flannel
  namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: flannel
  namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: kube-flannel-cfg
  namespace: kube-system
  labels:
    tier: node
    app: flannel
data:
  cni-conf.json: |
    {
      "name": "cbr0",
      "cniVersion": "0.3.1",
      "plugins": [
        {
          "type": "flannel",
          "delegate": {
            "hairpinMode": true,
            "isDefaultGateway": true
          }
        },
        {
          "type": "portmap",
          "capabilities": {
            "portMappings": true
          }
        }
      ]
    }
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              - key: kubernetes.io/os
                operator: In
                values:
                - linux
      hostNetwork: true
      priorityClassName: system-node-critical
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: registry.cn-hangzhou.aliyuncs.com/alvinos/flanned:v0.13.1-rc1
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: registry.cn-hangzhou.aliyuncs.com/alvinos/flanned:v0.13.1-rc1
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
            add: ["NET_ADMIN", "NET_RAW"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
      - name: run
        hostPath:
          path: /run/flannel
      - name: cni
        hostPath:
          path: /etc/cni/net.d
      - name: flannel-cfg
        configMap:
          name: kube-flannel-cfg

以上是关于k8s安装(yum安装)的主要内容,如果未能解决你的问题,请参考以下文章

使用sealos4安装k8s集群

k8s安装部署

用kubeadm安装k8s集群

k8s1.4.3安装实践记录-etcddockerflannel安装配置

K8S 图形化管理,Easy 到爆

ARM架构服务器(飞腾平台)centos7.5上yum安装k8s教程