k8s 1.10群集配置过程

Posted blog-lhong

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了k8s 1.10群集配置过程相关的知识,希望对你有一定的参考价值。

k8s 1.10群集配置过程

#####k8s 1.10 cnetos 7


######基础配置  所有节点
#####################################################
#####################################################
###########添加hosts 

hostnamectl --static set-hostname  node$(ip addr |grep global |grep $(route  |grep default |awk {print $NF}) |head -n1 |awk {print $2} |cut -d / -f1 |cut -d . -f4)


for ip in 192.168.3.{223,224,225} ;do node_str="node$(echo ${ip}|cut -d ‘.‘ -f4)";[ $(grep -c "${node_str}" /etc/hosts) -eq 0 ] && echo "${ip} ${node_str} " >>/etc/hosts;done

[ $(grep -c  k8smaster /etc/hosts) -eq 0 ] && echo "192.168.3.207 k.meilele.com k8smaster" >>/etc/hosts
tail /etc/hosts 


#####################################################
##################################################### 所有节点
systemctl stop firewalld
systemctl disable firewalld
setenforce  0 
#sed -i "s/^SELINUX=.*/SELINUX=disabled/g" /etc/sysconfig/selinux 
sed -i "s/^SELINUX=.*/SELINUX=disabled/g" /etc/selinux/config 

#######
swapoff -a 
sed -i s/.*swap.*/#&/ /etc/fstab
free -g


#####################################################
#####################################################所有节点

modprobe br_netfilter

echo net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
 >/etc/sysctl.d/k8s.conf

sysctl -p /etc/sysctl.d/k8s.conf

ls /proc/sys/net/bridge


#####################################################时间同步 所有节点
yum install ntpdate -y

#systemctl enable ntpdate.service
#systemctl start ntpdate.service

echo */30 * * * * /usr/sbin/ntpdate cn.ntp.org.cn >/dev/null 2>&1 > /tmp/crontab2.tmp
crontab /tmp/crontab2.tmp
crontab -l

#####################################################内核优化 所有节点
ulimit -HSn 65535
ulimit -u 81920
sed -i /# End of file/a * - nofile 65535 /etc/security/limits.conf
sed -i /# End of file/a * - nproc 81920 /etc/security/limits.conf
sed -i /# End of file/a * - memlock unlimited /etc/security/limits.conf

tail /etc/security/limits.conf
ulimit -a



#####################################################
#####################################################
################################keepalived 所有节点执行

yum install -y keepalived

echo "
global_defs {
   router_id LVS_DEVEL
}
vrrp_script check_k8s {
    script curl -sk https://127.0.0.1:6443
    interval 3
    weight -2
    fall 10
    rise 2
}
vrrp_instance VI_1 {
    state MASTER
    interface eth0

    virtual_router_id 207
    priority $(ip addr |grep global |grep $(route  |grep default |awk {print $NF}) |head -n1 |awk {print $2} |cut -d / -f1 |awk -F . {print (256-$4)})
    advert_int 1
    authentication {
    auth_type PASS
    auth_pass 2829
    }

    virtual_ipaddress {
    192.168.3.207/22
    }
    track_script {
        #check_k8s
    }
}
" >/etc/keepalived/keepalived.conf 


systemctl enable keepalived.service 
systemctl restart keepalived.service
systemctl status keepalived.service 

sleep 1
ip address show eth0


#####################################################
#####################################################免密认证  只在主节点在执行
wget https://files.cnblogs.com/files/blog-lhong/sshkey_tool.sh -O sshkey_tool.sh
###bash sshkey_tool.sh hostname username userpwd 
bash sshkey_tool.sh node223 root 123456
bash sshkey_tool.sh node224 root 123456
bash sshkey_tool.sh node225 root 123456

#####################################################
#####################################################
#yum install -y epel-release
#yum install -y yum-utils device-mapper-persistent-data lvm2 net-tools conntrack-tools wget vim libseccomp libtool-ltdl 

#####################################################
#####################################################cfssl 生成证书  只在主节点在执行


wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -O /usr/local/bin/cfssl
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -O /usr/local/bin/cfssljson
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 -O /usr/local/bin/cfssl-certinfo

chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson /usr/local/bin/cfssl-certinfo
export PATH=/usr/local/bin:$PATH


###创建 CA 配置文件(下面配置的IP为etc节点的IP)###只在主节点在执行

mkdir /root/ssl
cd /root/ssl

echo {
"signing": {
"default": {
  "expiry": "8760h"
},
"profiles": {
  "kubernetes-Soulmate": {
    "usages": [
        "signing",
        "key encipherment",
        "server auth",
        "client auth"
    ],
    "expiry": "8760h"
  }
}
}
}
 >ca-config.json 

echo {
"CN": "kubernetes-Soulmate",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
  "C": "CN",
  "ST": "shanghai",
  "L": "shanghai",
  "O": "k8s",
  "OU": "System"
}
]
}
 >ca-csr.json 

cfssl gencert -initca ca-csr.json | cfssljson -bare ca

echo {
  "CN": "etcd",
  "hosts": [
    "127.0.0.1",
    "192.168.3.223",
    "192.168.3.224",
    "192.168.3.225"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "shanghai",
      "L": "shanghai",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
 > etcd-csr.json 

cfssl gencert -ca=ca.pem  -ca-key=ca-key.pem  -config=ca-config.json -profile=kubernetes-Soulmate etcd-csr.json | cfssljson -bare etcd
  
mkdir -p /etc/etcd/ssl
cp etcd.pem etcd-key.pem ca.pem /etc/etcd/ssl/
cd ~



#####################################################
#####################################################安装配置etcd 所有节点


yum install etcd -y

systemctl enable etcd

cp /etc/etcd/etcd.conf{,.bak}
echo "
# [member]
ETCD_NAME="$(hostname)"
ETCD_DATA_DIR="/var/lib/etcd/"
ETCD_LISTEN_PEER_URLS="https://0.0.0.0:2380"
ETCD_LISTEN_CLIENT_URLS="https://0.0.0.0:2379"

#[cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://$(ip addr |grep global |grep $(route  |grep default |awk ‘{print $NF}‘) |head -n1 |awk ‘{print $2}‘ |cut -d ‘/‘ -f1):2380"
ETCD_ADVERTISE_CLIENT_URLS="https://$(ip addr |grep global |grep $(route  |grep default |awk ‘{print $NF}‘) |head -n1 |awk ‘{print $2}‘ |cut -d ‘/‘ -f1):2379"


ETCD_INITIAL_CLUSTER="node223=https://192.168.3.223:2380,node224=https://192.168.3.224:2380,node225=https://192.168.3.225:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="0"

#[security]
ETCD_CERT_FILE="/etc/etcd/ssl/etcd.pem"
ETCD_KEY_FILE="/etc/etcd/ssl/etcd-key.pem"
ETCD_CLIENT_CERT_AUTH="true"

ETCD_TRUSTED_CA_FILE="/etc/etcd/ssl/ca.pem"
ETCD_AUTO_TLS="true"

ETCD_PEER_CERT_FILE="/etc/etcd/ssl/etcd.pem"
ETCD_PEER_KEY_FILE="/etc/etcd/ssl/etcd-key.pem"
ETCD_PEER_CLIENT_CERT_AUTH="true"

ETCD_PEER_TRUSTED_CA_FILE="/etc/etcd/ssl/ca.pem"
ETCD_PEER_AUTO_TLS="true"
" >/etc/etcd/etcd.conf



#####################################################
#####################################################同步证书到每个节点 etcd目录 并启动etcd服务(需要同步启动)  只在主节点在执行

scp -rp /etc/etcd/ssl root@node223:/etc/etcd/
scp -rp /etc/etcd/ssl root@node224:/etc/etcd/
scp -rp /etc/etcd/ssl root@node225:/etc/etcd/

ssh root@node223 chown -R etcd.etcd /etc/etcd
ssh root@node224 chown -R etcd.etcd /etc/etcd
ssh root@node225 chown -R etcd.etcd /etc/etcd

ssh root@node223 systemctl restart etcd &
ssh root@node224 systemctl restart etcd &
ssh root@node225 systemctl restart etcd &


#####################################################
#####################################################测试etcd是否正常
etcdctl --endpoints=https://192.168.3.223:2379,https://192.168.3.224:2379,https://192.168.3.225:2379   --ca-file=/etc/etcd/ssl/ca.pem   --cert-file=/etc/etcd/ssl/etcd.pem   --key-file=/etc/etcd/ssl/etcd-key.pem  cluster-health
etcdctl --endpoints=https://192.168.3.223:2379,https://192.168.3.224:2379,https://192.168.3.225:2379     --ca-file=/etc/etcd/ssl/ca.pem   --cert-file=/etc/etcd/ssl/etcd.pem   --key-file=/etc/etcd/ssl/etcd-key.pem  member list



#####################################################
#####################################################安装docker  所有节点执行
curl -s -o /etc/yum.repos.d/docker-ce.repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
#yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
# 查看指定版本
yum list docker-ce --showduplicates |grep 17
# 安装指定版本
yum install -y --setopt=obsoletes=0 docker-ce-17.03.2.ce-1.el7.centos.x86_64 docker-ce-selinux-17.03.2.ce-1.el7.centos.noarch
  
systemctl enable docker

##修改配置文件 vim /usr/lib/systemd/system/docker.service
#ExecStart=/usr/bin/dockerd   -H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock  --registry-mirror=https://ms3cfraz.mirror.aliyuncs.com
#启动docker
cp /usr/lib/systemd/system/docker.service{,.bak}

###sed -i s#^ExecStart=.*#ExecStart=/usr/bin/dockerd   -H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock  --registry-mirror=https://ms3cfraz.mirror.aliyuncs.com#g /usr/lib/systemd/system/docker.service
grep ExecStart /usr/lib/systemd/system/docker.service
#

systemctl daemon-reload
systemctl restart docker


#####################################################
#####################################################安装kubelet kubeadm kubectl  所有节点执行

    
    
    
####################################################
####################################################

echo [kubernetes]
name=Kubernetes Repo
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
enabled=1
 >/etc/yum.repos.d/kubernetes.repo

[ ! -e rpm-package-key.gpg ] && wget https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
rpm --import rpm-package-key.gpg


ver=1.10.0
yum list kubeadm --showduplicates |grep "${ver}"

yum install -y kubernetes-cni-0.6.0 kubeadm-${ver} kubectl-${ver} kubelet-${ver} --disableexcludes=kubernetes

systemctl enable kubelet 

cp /etc/systemd/system/kubelet.service.d/10-kubeadm.conf{,.bak}

grep Environment /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
###
####修改这一行
sed -i s#^Environment="KUBELET_CGROUP_ARGS=.*#Environment="KUBELET_CGROUP_ARGS=--cgroup-driver=cgroupfs"#g /etc/systemd/system/kubelet.service.d/10-kubeadm.conf

####添加这一行
echo Environment="KUBELET_EXTRA_ARGS=--v=2 --fail-swap-on=false --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/k8sth/pause-amd64:3.0" >>/etc/systemd/system/kubelet.service.d/10-kubeadm.conf
grep Environment /etc/systemd/system/kubelet.service.d/10-kubeadm.conf



#####################################################
##################################################### 命令补全
###yum install -y bash-completion
###source /usr/share/bash-completion/bash_completion
###source <(kubectl completion bash)
###echo "source <(kubectl completion bash)" >> ~/.bashrc

systemctl daemon-reload && systemctl restart kubelet

#####################################################
#####################################################

##docker pull registry.cn-hangzhou.aliyuncs.com/k8sth/pause-amd64:3.0
docker pull registry.cn-hangzhou.aliyuncs.com/k8sth/kube-apiserver-amd64:v1.10.0
docker pull registry.cn-hangzhou.aliyuncs.com/k8sth/kube-controller-manager-amd64:v1.10.0
docker pull registry.cn-hangzhou.aliyuncs.com/k8sth/kube-scheduler-amd64:v1.10.0
docker pull registry.cn-hangzhou.aliyuncs.com/k8sth/etcd-amd64:3.1.12
##docker tag registry.cn-hangzhou.aliyuncs.com/k8sth/pause-amd64:3.0 gcr.io/google_containers/pause-amd64:3.0


#####################################################
#####################################################初始化集群 只在主节点

echo   
apiVersion: kubeadm.k8s.io/v1alpha1
kind: MasterConfiguration
etcd:
  endpoints:
  - https://192.168.3.223:2379
  - https://192.168.3.224:2379
  - https://192.168.3.225:2379
  caFile: /etc/etcd/ssl/ca.pem
  certFile: /etc/etcd/ssl/etcd.pem
  keyFile: /etc/etcd/ssl/etcd-key.pem
  dataDir: /var/lib/etcd
networking:
  podSubnet: 10.244.0.0/16
kubernetesVersion: 1.10.0
api:
  advertiseAddress: "192.168.3.207"
token: "b99a00.a144ef80536d4344"
tokenTTL: "0s"
apiServerCertSANs:
- node223
- node224
- node225
- 192.168.3.223
- 192.168.3.224
- 192.168.3.225
- 192.168.3.207
featureGates:
  CoreDNS: true
imageRepository: "registry.cn-hangzhou.aliyuncs.com/k8sth"
 >config.yaml


kubeadm init --config config.yaml  #|grep kubeadm join >join_node.sh

mkdir -p $HOME/.kube
cp  /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config

##--ignore-preflight-errors=Swap
###yes |kubeadm reset

#####################################################
#####################################################添加控制节点 只在主节点

scp -rp /etc/kubernetes/pki  node224:/etc/kubernetes/
scp -rp /etc/kubernetes/pki  node225:/etc/kubernetes/

scp -rp ~/config.yaml  node224:~/
scp -rp ~/config.yaml  node225:~/


ssh node224 kubeadm init --config ~/config.yaml
ssh node225 kubeadm init --config ~/config.yaml

ssh node224 mkdir -p $HOME/.kube ;cp  /etc/kubernetes/admin.conf $HOME/.kube/config ;chown $(id -u):$(id -g) $HOME/.kube/config
ssh node225 mkdir -p $HOME/.kube ;cp  /etc/kubernetes/admin.conf $HOME/.kube/config ;chown $(id -u):$(id -g) $HOME/.kube/config


kubectl get nodes 



#####################################################
#####################################################配置网络插件flannel 只在主节点
#wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
#版本信息:quay.io/coreos/flannel:v0.10.0-amd64

cat >kube-flannel.yml <<EOF
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
  name: psp.flannel.unprivileged
  annotations:
    seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
    seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
    apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
    apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
  privileged: false
  volumes:
    - configMap
    - secret
    - emptyDir
    - hostPath
  allowedHostPaths:
    - pathPrefix: "/etc/cni/net.d"
    - pathPrefix: "/etc/kube-flannel"
    - pathPrefix: "/run/flannel"
  readOnlyRootFilesystem: false
  # Users and groups
  runAsUser:
    rule: RunAsAny
  supplementalGroups:
    rule: RunAsAny
  fsGroup:
    rule: RunAsAny
  # Privilege Escalation
  allowPrivilegeEscalation: false
  defaultAllowPrivilegeEscalation: false
  # Capabilities
  allowedCapabilities: [NET_ADMIN]
  defaultAddCapabilities: []
  requiredDropCapabilities: []
  # Host namespaces
  hostPID: false
  hostIPC: false
  hostNetwork: true
  hostPorts:
  - min: 0
    max: 65535
  # SELinux
  seLinux:
    # SELinux is unused in CaaSP
    rule: RunAsAny
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: flannel
rules:
  - apiGroups: [extensions]
    resources: [podsecuritypolicies]
    verbs: [use]
    resourceNames: [psp.flannel.unprivileged]
  - apiGroups:
      - ""
    resources:
      - pods
    verbs:
      - get
  - apiGroups:
      - ""
    resources:
      - nodes
    verbs:
      - list
      - watch
  - apiGroups:
      - ""
    resources:
      - nodes/status
    verbs:
      - patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
- kind: ServiceAccount
  name: flannel
  namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: flannel
  namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: kube-flannel-cfg
  namespace: kube-system
  labels:
    tier: node
    app: flannel
data:
  cni-conf.json: |
    {
      "name": "cbr0",
      "cniVersion": "0.3.1",
      "plugins": [
        {
          "type": "flannel",
          "delegate": {
            "hairpinMode": true,
            "isDefaultGateway": true
          }
        },
        {
          "type": "portmap",
          "capabilities": {
            "portMappings": true
          }
        }
      ]
    }
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds-amd64
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
              - matchExpressions:
                  - key: beta.kubernetes.io/os
                    operator: In
                    values:
                      - linux
                  - key: beta.kubernetes.io/arch
                    operator: In
                    values:
                      - amd64
      hostNetwork: true
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.11.0-amd64
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.11.0-amd64
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
            add: ["NET_ADMIN"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
        - name: run
          hostPath:
            path: /run/flannel
        - name: cni
          hostPath:
            path: /etc/cni/net.d
        - name: flannel-cfg
          configMap:
            name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds-arm64
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
              - matchExpressions:
                  - key: beta.kubernetes.io/os
                    operator: In
                    values:
                      - linux
                  - key: beta.kubernetes.io/arch
                    operator: In
                    values:
                      - arm64
      hostNetwork: true
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.11.0-arm64
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.11.0-arm64
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
             add: ["NET_ADMIN"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
        - name: run
          hostPath:
            path: /run/flannel
        - name: cni
          hostPath:
            path: /etc/cni/net.d
        - name: flannel-cfg
          configMap:
            name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds-arm
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
              - matchExpressions:
                  - key: beta.kubernetes.io/os
                    operator: In
                    values:
                      - linux
                  - key: beta.kubernetes.io/arch
                    operator: In
                    values:
                      - arm
      hostNetwork: true
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.11.0-arm
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.11.0-arm
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
             add: ["NET_ADMIN"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
        - name: run
          hostPath:
            path: /run/flannel
        - name: cni
          hostPath:
            path: /etc/cni/net.d
        - name: flannel-cfg
          configMap:
            name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds-ppc64le
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
              - matchExpressions:
                  - key: beta.kubernetes.io/os
                    operator: In
                    values:
                      - linux
                  - key: beta.kubernetes.io/arch
                    operator: In
                    values:
                      - ppc64le
      hostNetwork: true
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.11.0-ppc64le
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.11.0-ppc64le
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
             add: ["NET_ADMIN"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
        - name: run
          hostPath:
            path: /run/flannel
        - name: cni
          hostPath:
            path: /etc/cni/net.d
        - name: flannel-cfg
          configMap:
            name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds-s390x
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
              - matchExpressions:
                  - key: beta.kubernetes.io/os
                    operator: In
                    values:
                      - linux
                  - key: beta.kubernetes.io/arch
                    operator: In
                    values:
                      - s390x
      hostNetwork: true
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.11.0-s390x
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.11.0-s390x
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
             add: ["NET_ADMIN"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
        - name: run
          hostPath:
            path: /run/flannel
        - name: cni
          hostPath:
            path: /etc/cni/net.d
        - name: flannel-cfg
          configMap:
            name: kube-flannel-cfg
EOF

kubectl create -f  kube-flannel.yml


#####################################################
#####################################################配置dashboard 只在主节点

cat >kubernetes-dashboard.yaml <<EOF
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Configuration to deploy release version of the Dashboard UI compatible with
# Kubernetes 1.8.
#
# Example usage: kubectl create -f <this_file>

# ------------------- Dashboard Secret ------------------- #

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-certs
  namespace: kube-system
type: Opaque

---
# ------------------- Dashboard Service Account ------------------- #

apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system

---
# ------------------- Dashboard Role & Role Binding ------------------- #

kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: kubernetes-dashboard-minimal
  namespace: kube-system
rules:
  # Allow Dashboard to create kubernetes-dashboard-key-holder secret.
- apiGroups: [""]
  resources: ["secrets"]
  verbs: ["create"]
  # Allow Dashboard to create kubernetes-dashboard-settings config map.
- apiGroups: [""]
  resources: ["configmaps"]
  verbs: ["create"]
  # Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
  resources: ["secrets"]
  resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
  verbs: ["get", "update", "delete"]
  # Allow Dashboard to get and update kubernetes-dashboard-settings config map.
- apiGroups: [""]
  resources: ["configmaps"]
  resourceNames: ["kubernetes-dashboard-settings"]
  verbs: ["get", "update"]
  # Allow Dashboard to get metrics from heapster.
- apiGroups: [""]
  resources: ["services"]
  resourceNames: ["heapster"]
  verbs: ["proxy"]
- apiGroups: [""]
  resources: ["services/proxy"]
  resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
  verbs: ["get"]

---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  name: kubernetes-dashboard-minimal
  namespace: kube-system
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: kubernetes-dashboard-minimal
subjects:
- kind: ServiceAccount
  name: kubernetes-dashboard
  namespace: kube-system

---
# ------------------- Dashboard Deployment ------------------- #

kind: Deployment
apiVersion: apps/v1beta2
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
    spec:
      nodeSelector:
        node-role.kubernetes.io/master: ""
      containers:
      - name: kubernetes-dashboard
        image: registry.cn-hangzhou.aliyuncs.com/k8sth/kubernetes-dashboard-amd64:v1.8.3
        ports:
        - containerPort: 8443
          protocol: TCP
        args:
          - --auto-generate-certificates
          # Uncomment the following line to manually specify Kubernetes API server Host
          # If not specified, Dashboard will attempt to auto discover the API server and connect
          # to it. Uncomment only if the default does not work.
          - --apiserver-host=http://my-address:port
        volumeMounts:
        - name: kubernetes-dashboard-certs
          mountPath: /certs
          # Create on-disk volume to store exec logs
        - mountPath: /tmp
          name: tmp-volume
        livenessProbe:
          httpGet:
            scheme: HTTPS
            path: /
            port: 8443
          initialDelaySeconds: 30
          timeoutSeconds: 30
      volumes:
      - name: kubernetes-dashboard-certs
        secret:
          secretName: kubernetes-dashboard-certs
      - name: tmp-volume
        emptyDir: {}
      serviceAccountName: kubernetes-dashboard
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
      - key: node-role.kubernetes.io/master
        effect: NoSchedule

---
# ------------------- Dashboard Service ------------------- #

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
spec:
  type: NodePort
  ports:
    - port: 443
      targetPort: 8443
      nodePort: 30000
  selector:
    k8s-app: kubernetes-dashboard

---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kube-system

---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kube-system
EOF

kubectl apply -f kubernetes-dashboard.yaml

kubectl get svc,pod --all-namespaces

###kubectl describe pod  $(kubectl get pods -n kube-system |grep kubernetes-dashboard |awk {print $1} ) -n kube-system

#####################################################
#####################################################配置admin-token 只在主节点
cat >admin-token.yaml <<EOF
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: admin
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
roleRef:
  kind: ClusterRole
  name: cluster-admin
  apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
  name: admin
  namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin
  namespace: kube-system
  labels:
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
EOF
kubectl apply -f admin-token.yaml

##获取token,通过令牌登陆
kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-token | awk {print $1})

##通过firefox访问dashboard,输入token,即可登陆 https://192.168.150.181:30000/#!/login
##访问https://192.168.150.181:30000/#!/login即可看到监控信息



#####################################################
#####################################################配置heapster 只在主节点
mkdir -p kube-heapster/{influxdb,rbac}
cat >kube-heapster/influxdb/grafana.yaml<<EOF
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: monitoring-grafana
  namespace: kube-system
spec:
  replicas: 1
  template:
    metadata:
      labels:
        task: monitoring
        k8s-app: grafana
    spec:
      nodeSelector:
        node-role.kubernetes.io/master: ""
      containers:
      - name: grafana
        image: registry.cn-hangzhou.aliyuncs.com/k8sth/heapster-grafana-amd64:v4.4.3
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 3000
          protocol: TCP
        volumeMounts:
        - mountPath: /etc/ssl/certs
          name: ca-certificates
          readOnly: true
        - mountPath: /var
          name: grafana-storage
        env:
        - name: INFLUXDB_HOST
          value: monitoring-influxdb
        - name: GF_SERVER_HTTP_PORT
          value: "3000"
          # The following env variables are required to make Grafana accessible via
          # the kubernetes api-server proxy. On production clusters, we recommend
          # removing these env variables, setup auth for grafana, and expose the grafana
          # service using a LoadBalancer or a public IP.
        - name: GF_AUTH_BASIC_ENABLED
          value: "false"
        - name: GF_AUTH_ANONYMOUS_ENABLED
          value: "true"
        - name: GF_AUTH_ANONYMOUS_ORG_ROLE
          value: Admin
        - name: GF_SERVER_ROOT_URL
          # If youre only using the API Server proxy, set this value instead:
          # value: /api/v1/namespaces/kube-system/services/monitoring-grafana/proxy
          value: /
      volumes:
      - name: ca-certificates
        hostPath:
          path: /etc/ssl/certs
      - name: grafana-storage
        emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
  labels:
    # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons)
    # If you are NOT using this as an addon, you should comment out this line.
    kubernetes.io/cluster-service: true
    kubernetes.io/name: monitoring-grafana
  name: monitoring-grafana
  namespace: kube-system
spec:
  # In a production setup, we recommend accessing Grafana through an external Loadbalancer
  # or through a public IP.
  # type: LoadBalancer
  # You could also use NodePort to expose the service at a randomly-generated port
  # type: NodePort
  ports:
  - port: 80
    targetPort: 3000
  selector:
    k8s-app: grafana
EOF


cat >kube-heapster/influxdb/heapster.yaml<<EOF
apiVersion: v1
kind: ServiceAccount
metadata:
  name: heapster
  namespace: kube-system
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: heapster
  namespace: kube-system
spec:
  replicas: 1
  template:
    metadata:
      labels:
        task: monitoring
        k8s-app: heapster
    spec:
      serviceAccountName: heapster
      nodeSelector:
        node-role.kubernetes.io/master: ""
      containers:
      - name: heapster
        image: registry.cn-hangzhou.aliyuncs.com/k8sth/heapster-amd64:v1.4.2
        imagePullPolicy: IfNotPresent
        command:
        - /heapster
        - --source=kubernetes:https://kubernetes.default
        - --sink=influxdb:http://monitoring-influxdb.kube-system.svc:8086
---
apiVersion: v1
kind: Service
metadata:
  labels:
    task: monitoring
    # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons)
    # If you are NOT using this as an addon, you should comment out this line.
    kubernetes.io/cluster-service: true
    kubernetes.io/name: Heapster
  name: heapster
  namespace: kube-system
spec:
  ports:
  - port: 80
    targetPort: 8082
  selector:
    k8s-app: heapster
EOF

cat >kube-heapster/influxdb/influxdb.yaml<<EOF
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: monitoring-influxdb
  namespace: kube-system
spec:
  replicas: 1
  template:
    metadata:
      labels:
        task: monitoring
        k8s-app: influxdb
    spec:
      nodeSelector:
        node-role.kubernetes.io/master: ""
      containers:
      - name: influxdb
        image: registry.cn-hangzhou.aliyuncs.com/k8sth/heapster-influxdb-amd64:v1.3.3
        imagePullPolicy: IfNotPresent
        volumeMounts:
        - mountPath: /data
          name: influxdb-storage
      volumes:
      - name: influxdb-storage
        emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
  labels:
    task: monitoring
    # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons)
    # If you are NOT using this as an addon, you should comment out this line.
    kubernetes.io/cluster-service: true
    kubernetes.io/name: monitoring-influxdb
  name: monitoring-influxdb
  namespace: kube-system
spec:
  ports:
  - port: 8086
    targetPort: 8086
  selector:
    k8s-app: influxdb
EOF

cat >kube-heapster/rbac/heapster-rbac.yaml<<EOF
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: heapster
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:heapster
subjects:
- kind: ServiceAccount
  name: heapster
  namespace: kube-system
EOF


kubectl create -f kube-heapster/influxdb/

kubectl create -f kube-heapster/rbac/


kubectl get pods --all-namespaces


#####################################################
#####################################################

 

以上是关于k8s 1.10群集配置过程的主要内容,如果未能解决你的问题,请参考以下文章

Ansible一键安装K8s1.10x and K8s1.11x

K8S 使用Dashboard部署nginx群集

用实例理解k8s群集(干货)

k8s群集的几种Web-UI界面部署

简单几步搭建k8s1.10的dashboard

手动安装K8s 1.10 第二节:基础环境+CA证书