KuberSphere 添加新 Node 节点

Posted

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了KuberSphere 添加新 Node 节点相关的知识,希望对你有一定的参考价值。

参考技术A 登录KS-Master的机器,编辑 /etc/hosts ,将新node 节点的内网IP添加到hosts中。

输入新节点的root密码。

如果新 node 节点有数据盘,还需要将数据盘挂载到 /var/lib/docker 目录。

安装Kubersphere和Kubernetes的依赖组件。

在master节点执行下面的命令

初检通过后,即可输入yes进行添加。

添加成功后,系统将显示下面的信息:

登录KS控制台,确认 集群节点 中含有新增的 ECS实例。

在kubernetes中部署kubersphere

活动地址:毕业季·进击的技术er

在kubernetes中部署kubersphere

一、检查本地kubernetes环境

[root@k8s-master ~]# kubectl get nodes -owide
NAME         STATUS   ROLES                  AGE   VERSION   INTERNAL-IP     EXTERNAL-IP   OS-IMAGE                KERNEL-VERSION          CONTAINER-RUNTIME
k8s-master   Ready    control-plane,master   16h   v1.23.1   192.168.3.201   <none>        CentOS Linux 7 (Core)   3.10.0-957.el7.x86_64   containerd://1.6.6
k8s-node01   Ready    <none>                 16h   v1.23.1   192.168.3.202   <none>        CentOS Linux 7 (Core)   3.10.0-957.el7.x86_64   containerd://1.6.6
k8s-node02   Ready    <none>                 16h   v1.23.1   192.168.3.203   <none>        CentOS Linux 7 (Core)   3.10.0-957.el7.x86_64   containerd://1.6.6

二、安装nfs

1.安装nfs包

yum install -y nfs-utils

2.创建共享目录


mkdir -p /nfs/data

3.配置共享目录

echo "/nfs/data/ *(insecure,rw,sync,no_root_squash)" > /etc/exports

4.启动相关服务

systemctl enable rpcbind
systemctl enable nfs-server
systemctl start rpcbind

5.使配置生效

exportfs -r

6.查看nfs

[root@k8s-master nfs]# exportfs
/nfs/data     	<world>

7.两台从节点挂载nfs

①从节点检查nfs

[root@k8s-node01 ~]# showmount -e 192.168.3.201
Export list for 192.168.3.201:
/nfs/data *


②创建挂载目录

mkdir -p /nfs/data

③挂载nfs

mount  192.168.3.201:/nfs/data /nfs/data

④检查nfs挂载情况

[root@k8s-node01 ~]# df -hT |grep nfs
192.168.3.201:/nfs/data nfs4       18G   12G  6.3G  65% /nfs/data

8.showmount命令错误故障处理

service rpcbind stop
service nfs stop
service rpcbind start
service nfs start

三、配置默认存储

1.编辑sc.yaml文件

[root@k8s-master kubersphere]# cat sc.yaml 
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
 name: nfs-storage
 annotations:
   storageclass.kubernetes.io/is-default-class: "true"
provisioner: k8s-sigs.io/nfs-subdir-external-provisioner
parameters:
 archiveOnDelete: "true"  ## 删除pv的时候,pv的内容是否要备份

---
apiVersion: apps/v1
kind: Deployment
metadata:
 name: nfs-client-provisioner
 labels:
   app: nfs-client-provisioner
 # replace with namespace where provisioner is deployed
 namespace: default
spec:
 replicas: 1
 strategy:
   type: Recreate
 selector:
   matchLabels:
     app: nfs-client-provisioner
 template:
   metadata:
     labels:
       app: nfs-client-provisioner
   spec:
     serviceAccountName: nfs-client-provisioner
     containers:
       - name: nfs-client-provisioner
         image: registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/nfs-subdir-external-provisioner:v4.0.2
         # resources:
         #    limits:
         #      cpu: 10m
         #    requests:
         #      cpu: 10m
         volumeMounts:
           - name: nfs-client-root
             mountPath: /persistentvolumes
         env:
           - name: PROVISIONER_NAME
             value: k8s-sigs.io/nfs-subdir-external-provisioner
           - name: NFS_SERVER
             value: 192.168.3.201 ## 指定自己nfs服务器地址
           - name: NFS_PATH  
             value: /nfs/data  ## nfs服务器共享的目录
     volumes:
       - name: nfs-client-root
         nfs:
           server: 192.168.3.201
           path: /nfs/data
---
apiVersion: v1
kind: ServiceAccount
metadata:
 name: nfs-client-provisioner
 # replace with namespace where provisioner is deployed
 namespace: default
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
 name: nfs-client-provisioner-runner
rules:
 - apiGroups: [""]
   resources: ["nodes"]
   verbs: ["get", "list", "watch"]
 - apiGroups: [""]
   resources: ["persistentvolumes"]
   verbs: ["get", "list", "watch", "create", "delete"]
 - apiGroups: [""]
   resources: ["persistentvolumeclaims"]
   verbs: ["get", "list", "watch", "update"]
 - apiGroups: ["storage.k8s.io"]
   resources: ["storageclasses"]
   verbs: ["get", "list", "watch"]
 - apiGroups: [""]
   resources: ["events"]
   verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
 name: run-nfs-client-provisioner
subjects:
 - kind: ServiceAccount
   name: nfs-client-provisioner
   # replace with namespace where provisioner is deployed
   namespace: default
roleRef:
 kind: ClusterRole
 name: nfs-client-provisioner-runner
 apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
 name: leader-locking-nfs-client-provisioner
 # replace with namespace where provisioner is deployed
 namespace: default
rules:
 - apiGroups: [""]
   resources: ["endpoints"]
   verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
 name: leader-locking-nfs-client-provisioner
 # replace with namespace where provisioner is deployed
 namespace: default
subjects:
 - kind: ServiceAccount
   name: nfs-client-provisioner
   # replace with namespace where provisioner is deployed
   namespace: default
roleRef:
 kind: Role
 name: leader-locking-nfs-client-provisioner
 apiGroup: rbac.authorization.k8s.io

2.应用sc.yaml

[root@k8s-master kubersphere]# kubectl apply -f sc.yaml
storageclass.storage.k8s.io/nfs-storage created
deployment.apps/nfs-client-provisioner created
serviceaccount/nfs-client-provisioner created
clusterrole.rbac.authorization.k8s.io/nfs-client-provisioner-runner created
clusterrolebinding.rbac.authorization.k8s.io/run-nfs-client-provisioner created
role.rbac.authorization.k8s.io/leader-locking-nfs-client-provisioner created
rolebinding.rbac.authorization.k8s.io/leader-locking-nfs-client-provisioner created

3.查看sc

[root@k8s-master kubersphere]# kubectl get sc
NAME                    PROVISIONER                                   RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
nfs-storage (default)   k8s-sigs.io/nfs-subdir-external-provisioner   Delete          Immediate           false                  43s

4.测试安装pv

①编写pv.yaml

[root@k8s-master kubersphere]# cat pv.ymal 
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
 name: nginx-pvc
spec:
 accessModes:
   - ReadWriteMany
 resources:
   requests:
     storage: 200Mi

②运行pv

[root@k8s-master kubersphere]# kubectl apply -f pv.ymal 
persistentvolumeclaim/nginx-pvc created

③查看pvc和pv

[root@k8s-master kubersphere]# kubectl get pv
NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM               STORAGECLASS   REASON   AGE
pvc-92610d7d-9d70-4a57-bddb-7fffab9a5ee4   200Mi      RWX            Delete           Bound    default/nginx-pvc   nfs-storage             73s
[root@k8s-master kubersphere]# kubectl get pvc
NAME        STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
nginx-pvc   Bound    pvc-92610d7d-9d70-4a57-bddb-7fffab9a5ee4   200Mi      RWX            nfs-storage    78s

四、安装metrics-server组件

1.编辑metrics.yaml文件

apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    k8s-app: metrics-server
    rbac.authorization.k8s.io/aggregate-to-admin: "true"
    rbac.authorization.k8s.io/aggregate-to-edit: "true"
    rbac.authorization.k8s.io/aggregate-to-view: "true"
  name: system:aggregated-metrics-reader
rules:
- apiGroups:
  - metrics.k8s.io
  resources:
  - pods
  - nodes
  verbs:
  - get
  - list
  - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    k8s-app: metrics-server
  name: system:metrics-server
rules:
- apiGroups:
  - ""
  resources:
  - pods
  - nodes
  - nodes/stats
  - namespaces
  - configmaps
  verbs:
  - get
  - list
  - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server-auth-reader
  namespace: kube-system
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
  name: metrics-server
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server:system:auth-delegator
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:auth-delegator
subjects:
- kind: ServiceAccount
  name: metrics-server
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  labels:
    k8s-app: metrics-server
  name: system:metrics-server
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:metrics-server
subjects:
- kind: ServiceAccount
  name: metrics-server
  namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server
  namespace: kube-system
spec:
  ports:
  - name: https
    port: 443
    protocol: TCP
    targetPort: https
  selector:
    k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server
  namespace: kube-system
spec:
  selector:
    matchLabels:
      k8s-app: metrics-server
  strategy:
    rollingUpdate:
      maxUnavailable: 0
  template:
    metadata:
      name: metrics-server
      labels:
        k8s-app: metrics-server
    spec:
      containers:
      - args:
        - --cert-dir=/tmp
        - --secure-port=4443
        - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
        - --kubelet-use-node-status-port
        # 跳过TLS证书验证  
        - --kubelet-insecure-tls
        image: jjzz/metrics-server:v0.4.1
        imagePullPolicy: IfNotPresent
        livenessProbe:
          failureThreshold: 3
          httpGet:
            path: /livez
            port: https
            scheme: HTTPS
          periodSeconds: 10
        name: metrics-server
        ports:
        - containerPort: 4443
          name: https
          protocol: TCP
        readinessProbe:
          failureThreshold: 3
          httpGet:
            path: /readyz
            port: https
            scheme: HTTPS
          periodSeconds: 10
        securityContext:
          readOnlyRootFilesystem: true
          runAsNonRoot: true
          runAsUser: 1000
        volumeMounts:
        - mountPath: /tmp
          name: tmp-dir
      nodeSelector:
        kubernetes.io/os: linux
      priorityClassName: system-cluster-critical
      serviceAccountName: metrics-server
      volumes:
      - emptyDir: 
        name: tmp-dir
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
  labels:
    k8s-app: metrics-server
  name: v1beta1.metrics.k8s.io
spec:
  group: metrics.k8s.io
  groupPriorityMinimum: 100
  insecureSkipTLSVerify: true
  service:
    name: metrics-server
    namespace: kube-system
  version: v1beta1
  versionPriority: 100



2.应用metrics.ymal文件

[root@k8s-master kubersphere]# kubectl apply -f metrics.yaml 
serviceaccount/metrics-server created
clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created
clusterrole.rbac.authorization.k8s.io/system:metrics-server created
rolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader created
clusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator created
clusterrolebinding.rbac.authorization.k8s.io/system:metrics-server created
service/metrics-server created
deployment.apps/metrics-server created
apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created

3.查看安装后pod

[root@k8s-master kubersphere]# kubectl get pod -A |grep metric
kube-system   metrics-server-6988f7c646-rt6bz           1/1     Running   0              86s


4.检查安装后效果

[root@k8s-master kubersphere]# kubectl top nodes
NAME         CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%   
k8s-master   221m         5%     1360Mi          17%       
k8s-node01   72m          1%     767Mi           9%        
k8s-node02   66m          1%     778Mi           9%    

五、安装KubeSphere

1.下载kubesphere组件


wget https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml

wget https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml

2.执行安装——最小化安装

[root@k8s-master kubersphere]# kubectl top nodes^C
[root@k8s-master kubersphere]# kubectl apply -f kubesphere-installer.yaml 
customresourcedefinition.apiextensions.k8s.io/clusterconfigurations.installer.kubesphere.io created
namespace/kubesphere-system created
serviceaccount/ks-installer created
clusterrole.rbac.authorization.k8s.io/ks-installer created
clusterrolebinding.rbac.authorization.k8s.io/ks-installer created
deployment.apps/ks-installer created
[root@k8s-master kubersphere]# kubectl apply -f cluster-configuration.yaml 
clusterconfiguration.installer.kubesphere.io/ks-installer created

3.查看pod

[root@k8s-master kubersphere]# kubectl get pods -A |grep ks
kubesphere-system   ks-installer-5cd4648bcb-vxrqf             1/1     Running   0             37s


5.查看安装进度

kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath=‘.items[0].metadata.name’) -f

***********************如何在JaguarDB集群中添加更多节点?

在 redux 状态树中添加新数据

PXC添加新节点

GoJS API学习

kubernetes和kubersphere的关系

基于麒麟SP10服务器版的Kubernetes集群安装