k8s-DNS

Posted 小怪獣55

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了k8s-DNS相关的知识,希望对你有一定的参考价值。

Ansible部署K8S参考:​​https://blog.51cto.com/taowenwu/5222088​

目前常用的dns组件有kube-dns和coredns两个

1.部署coredns

docker pull gcr.io/google-containers/coredns:1.2.6
docker tag gcr.io/google-containers/coredns:1.2.6 harbor.gesila.com/k8s/coredns:1.2.6
docker push harbor.gesila.com/k8s/coredns:1.2.6

1.1.dns测试

vim coredns.yaml
kubectl apply -f coredns.yaml
kubectl exec busybox nslookup kubernetes
---------------------------------------------------------------------
Server: 10.20.254.254
Address 1: 10.20.254.254 kube-dns.kube-system.svc.linux36.local
Name: kubernetes
Address 1: 10.20.0.1 kubernetes.default.svc.linux36.local

kubectl exec busybox nslookup kubernetes.default.svc.linux36.local
---------------------------------------------------------------------
Server: 10.20.254.254
Address 1: 10.20.254.254 kube-dns.kube-system.svc.linux36.local
Name: kubernetes.default.svc.linux36.local
Address 1: 10.20.0.1 kubernetes.default.svc.linux36.local

2.部署:kube-dns

2.1.busybox

2.1.1.准备目录及文件

root@k8s-master:~# mkdir /etc/ansible/manifests/dns
root@k8s-master:~# cd /etc/ansible/manifests/dns
root@k8s-master:/etc/ansible/manifests/dns# mkdir kube-dns coredns
root@k8s-master:/etc/ansible/manifests/dns# cd kube-dns/
root@k8s-master:/etc/ansible/manifests/dns/kube-dns# rz -E
rz waiting to receive.
root@k8s-master:/etc/ansible/manifests/dns/kube-dns# ls
busybox-online.tar.gz busybox.yaml kube-dns.yaml

k8s-DNS_k8s-DNS

2.1.2.导入镜像上传到harbor

root@k8s-master:/etc/ansible/manifests/dns/kube-dns# docker load -i busybox-online.tar.gz
docker tag 747e1d7f6665 harbor.gesila.com/k8s/busybox:latest
docker push harbor.gesila.com/k8s/busybox:latest

2.1.3.修改镜像源

root@k8s-master:/etc/ansible/manifests/dns/kube-dns# vim busybox.yaml
apiVersion: v1
kind: Pod
metadata:
name: busybox
namespace: default #default namespace的DNS
spec:
containers:
- image: harbor.gesila.com/k8s/busybox:latest
command:
- sleep
- "3600"
imagePullPolicy: Always
name: busybox
restartPolicy: Always

2.1.4.创建服务

root@k8s-master:/etc/ansible/manifests/dns/kube-dns# kubectl apply -f busybox.yaml
pod/busybox created
root@k8s-master:/etc/ansible/manifests/dns/kube-dns# kubectl get pods
NAME READY STATUS RESTARTS AGE
busybox 1/1 Running 0 67m

2.2.kube-dns

2.2.1.镜像导入及上传到harbor仓库

k8s-DNS_k8s-DNS_02

docker load -i k8s-dns-kube-dns-amd64_1.14.13.tar.gz && \\
docker load -i k8s-dns-dnsmasq-nanny-amd64_1.14.13.tar.gz && \\
docker load -i k8s-dns-sidecar-amd64_1.14.13.tar.gz

docker tag 82f954458b31 harbor.gesila.com/k8s/kube-dns-amd64:1.14.13 && \\
docker tag 7b15476a7228 harbor.gesila.com/k8s/dnsmasq-nanny-amd64:1.14.13 && \\
docker tag 333fb0833870 harbor.gesila.com/k8s/sidecar-amd64:1.14.13

docker push harbor.gesila.com/k8s/kube-dns-amd64:1.14.13 && \\
docker push harbor.gesila.com/k8s/dnsmasq-nanny-amd64:1.14.13 && \\
docker push harbor.gesila.com/k8s/sidecar-amd64:1.14.13

2.2.2.修改配置文件

root@master:/etc/ansible/manifests/dns/kube-dns# vim kube-dns.yaml  
----------------------------------------------------------------------------------------
clusterIP: 10.20.254.254 #与host文件中CLUSTER_DNS_SVC_IP 一致
- --domain:linux36.local. #与host文件中CLUSTER_DNS_DOMAIN 一致
- --server=/linux36.local/127.0.0.1#10053 #域名由哪个服务器的端口处理
- --server=/magedu.net/172.20.100.23#53 #解析自己的域名
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.linux36.local,5,SRV
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.linux36.local,5,SRV

- name: kubedns
images: harbor.gesila.com/k8s/kube-dns-amd64:1.14.13

- name: dnsmasq
images: harbor.gesila.com/k8s/dnsmasq-nanny-amd64:1.14.13

- name: sidecar
images: harbor.gesila.com/k8s/sidecar-amd64:1.14.13

2.2.3.创建服务

root@k8s-master:/etc/ansible/manifests/dns/kube-dns# kubectl create -f kube-dns.yaml
service/kube-dns created
serviceaccount/kube-dns created
configmap/kube-dns created
deployment.extensions/kube-dns created

root@k8s-master:/etc/ansible/manifests/dns/kube-dns# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-64dfd5bf4c-q94ls 1/1 Running 0 153m
calico-node-9ccdk 2/2 Running 2 22h
calico-node-k297m 2/2 Running 4 22h
calico-node-w6m6p 2/2 Running 10 22h
kube-dns-5744cc9dff-rxgjk 3/3 Running 0 30s
kubernetes-dashboard-7b5f5b777c-s7djw 1/1 Running 0 153m
2.2.3.1.kube-dns.yaml
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Should keep target in cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml
# in sync with this file.

# __MACHINE_GENERATED_WARNING__

apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "KubeDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: 10.20.254.254
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-dns
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: v1
kind: ConfigMap
metadata:
name: kube-dns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: EnsureExists
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
# replicas: not specified here:
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
# 2. Default is 1.
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
rollingUpdate:
maxSurge: 10%
maxUnavailable: 0
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
annotations:
scheduler.alpha.kubernetes.io/critical-pod:
seccomp.security.alpha.kubernetes.io/pod: docker/default
spec:
priorityClassName: system-cluster-critical
securityContext:
supplementalGroups: [ 65534 ]
fsGroup: 65534
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
volumes:
- name: kube-dns-config
configMap:
name: kube-dns
optional: true
containers:
- name: kubedns
image: harbor.gesila.com/k8s/kube-dns-amd64:1.14.13
resources:
# TODO: Set memory limits when weve profiled the container for large
# clusters, then set request = limit to keep this container in
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesnt backoff from restarting it.
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
livenessProbe:
httpGet:
path: /healthcheck/kubedns
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /readiness
port: 8081
scheme: HTTP
# we poll on pod startup for the Kubernetes master service and
# only setup the /readiness HTTP server once thats available.
initialDelaySeconds: 3
timeoutSeconds: 5
args:
- --domain=linux36.local.
- --dns-port=10053
- --config-dir=/kube-dns-config
- --v=2
env:
- name: PROMETHEUS_PORT
value: "10055"
ports:
- containerPort: 10053
name: dns-local
protocol: UDP
- containerPort: 10053
name: dns-tcp-local
protocol: TCP
- containerPort: 10055
name: metrics
protocol: TCP
volumeMounts:
- name: kube-dns-config
mountPath: /kube-dns-config
- name: dnsmasq
image: harbor.gesila.com/k8s/dnsmasq-nanny-amd64:1.14.13
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- -v=2
- -logtostderr
- -configDir=/etc/k8s/dns/dnsmasq-nanny
- -restartDnsmasq=true
- --
- -k
- --cache-size=1000
- --no-negcache
- --dns-loop-detect
- --log-facility=-
- --server=/magedu.net/172.20.100.23#53
- --server=/linux36.local/127.0.0.1#10053
- --server=/in-addr.arpa/127.0.0.1#10053
- --server=/ip6.arpa/127.0.0.1#10053
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
# see: https://github.com/kubernetes/kubernetes/issues/29055 for details
resources:
requests:
cpu: 150m
memory: 20Mi
volumeMounts:
- name: kube-dns-config
mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar
image: harbor.gesila.com/k8s/sidecar-amd64:1.14.13
livenessProbe:
httpGet:
path: /metrics
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- --v=2

(c)2006-2024 SYSTEM All Rights Reserved IT常识