k8s使用helm 3 部署harbor
Posted fan-gx
tags:
篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了k8s使用helm 3 部署harbor相关的知识,希望对你有一定的参考价值。
helm-harbor的GitHub地址:https://github.com/goharbor/harbor-helm
修改values.yaml文件
expose:
type: ingress
tls:
enabled: false ###改成false
secretName: ""
notarySecretName: ""
commonName: ""
ingress:
hosts:
core: fana.harbor ##
notary: notary.harbor ##
controller: default
annotations:
ingress.kubernetes.io/ssl-redirect: "false" ##
ingress.kubernetes.io/proxy-body-size: "0"
nginx.ingress.kubernetes.io/ssl-redirect: "false" ##
nginx.ingress.kubernetes.io/proxy-body-size: "0"
clusterIP:
name: harbor
ports:
httpPort: 80
httpsPort: 443
notaryPort: 4443
nodePort:
name: harbor
ports:
http:
port: 80
nodePort: 30002
https:
port: 443
nodePort: 30003
notary:
port: 4443
nodePort: 30004
loadBalancer:
name: harbor
IP: ""
ports:
httpPort: 80
httpsPort: 443
notaryPort: 4443
annotations: {}
sourceRanges: []
externalURL: http://fana.harbor ##
persistence:
enabled: true
resourcePolicy: "keep"
persistentVolumeClaim:
registry:
existingClaim: "harbor-pvc" ##pvc
storageClass: ""
subPath: "registry" ##
accessMode: ReadWriteOnce
size: 5Gi
chartmuseum:
existingClaim: "harbor-pvc" ##
storageClass: ""
subPath: "chartmuseum" ##
accessMode: ReadWriteOnce
size: 5Gi
jobservice:
existingClaim: "harbor-pvc" ##
storageClass: ""
subPath: "jobservice" ##
accessMode: ReadWriteOnce
size: 1Gi
database:
existingClaim: "harbor-pvc" ##
storageClass: ""
subPath: "database" ##
accessMode: ReadWriteOnce
size: 1Gi
redis:
existingClaim: "harbor-pvc" ##
storageClass: ""
subPath: "redis" ##
accessMode: ReadWriteOnce
size: 1Gi
imageChartStorage:
disableredirect: false
type: filesystem
filesystem:
rootdirectory: /storage
azure:
accountname: accountname
accountkey: base64encodedaccountkey
container: containername
gcs:
bucket: bucketname
encodedkey: base64-encoded-json-key-file
s3:
region: us-west-1
bucket: bucketname
swift:
authurl: https://storage.myprovider.com/v3/auth
username: username
password: password
container: containername
oss:
accesskeyid: accesskeyid
accesskeysecret: accesskeysecret
region: regionname
bucket: bucketname
imagePullPolicy: IfNotPresent
imagePullSecrets:
updateStrategy:
type: RollingUpdate
logLevel: info
harborAdminPassword: "Harbor12345" ##密码
secretKey: "not-a-secure-key"
proxy:
httpProxy:
httpsProxy:
noProxy: 127.0.0.1,localhost,.local,.internal
components:
- core
- jobservice
- clair
nginx:
image:
repository: goharbor/nginx-photon
tag: v1.9.3
replicas: 1
nodeSelector: {}
tolerations: []
affinity: {}
podAnnotations: {}
portal:
image:
repository: goharbor/harbor-portal
tag: v1.9.3
replicas: 1
nodeSelector: {}
tolerations: []
affinity: {}
podAnnotations: {}
core:
image:
repository: goharbor/harbor-core
tag: v1.9.3
replicas: 1
livenessProbe:
initialDelaySeconds: 300
nodeSelector: {}
tolerations: []
affinity: {}
podAnnotations: {}
secret: ""
secretName: ""
xsrfKey: ""
jobservice:
image:
repository: goharbor/harbor-jobservice
tag: v1.9.3
replicas: 1
maxJobWorkers: 10
jobLogger: file
nodeSelector: {}
tolerations: []
affinity: {}
podAnnotations: {}
secret: ""
registry:
registry:
image:
repository: goharbor/registry-photon
tag: v2.7.1-patch-2819-2553-v1.9.3
controller:
image:
repository: goharbor/harbor-registryctl
tag: v1.9.3
replicas: 1
nodeSelector: {}
tolerations: []
affinity: {}
podAnnotations: {}
secret: ""
relativeurls: false
middleware:
enabled: false
type: cloudFront
cloudFront:
baseurl: example.cloudfront.net
keypairid: KEYPAIRID
duration: 3000s
ipfilteredby: none
privateKeySecret: "my-secret"
chartmuseum:
enabled: true
absoluteUrl: false
image:
repository: goharbor/chartmuseum-photon
tag: v0.9.0-v1.9.3
replicas: 1
nodeSelector: {}
tolerations: []
affinity: {}
podAnnotations: {}
clair:
enabled: true
clair:
image:
repository: goharbor/clair-photon
tag: v2.1.0-v1.9.3
adapter:
image:
repository: goharbor/clair-adapter-photon
tag: dev
replicas: 1
updatersInterval: 12
nodeSelector: {}
tolerations: []
affinity: {}
podAnnotations: {}
notary:
enabled: true
server:
image:
repository: goharbor/notary-server-photon
tag: v0.6.1-v1.9.3
replicas: 1
signer:
image:
repository: goharbor/notary-signer-photon
tag: v0.6.1-v1.9.3
replicas: 1
nodeSelector: {}
tolerations: []
affinity: {}
podAnnotations: {}
secretName: ""
database:
type: internal
internal:
image:
repository: goharbor/harbor-db
tag: v1.9.3
initContainerImage:
repository: busybox
tag: latest
password: "changeit"
nodeSelector: {}
tolerations: []
affinity: {}
external:
host: "192.168.0.1"
port: "5432"
username: "user"
password: "password"
coreDatabase: "registry"
clairDatabase: "clair"
notaryServerDatabase: "notary_server"
notarySignerDatabase: "notary_signer"
sslmode: "disable"
maxIdleConns: 50
maxOpenConns: 100
podAnnotations: {}
redis:
type: internal
internal:
image:
repository: goharbor/redis-photon
tag: v1.9.3
nodeSelector: {}
tolerations: []
affinity: {}
external:
host: "192.168.0.2"
port: "6379"
coreDatabaseIndex: "0"
jobserviceDatabaseIndex: "1"
registryDatabaseIndex: "2"
chartmuseumDatabaseIndex: "3"
clairAdapterIndex: "4"
password: ""
podAnnotations: {}
创建pvc
cat <<EOF> harbor-pvc.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: harbor-pvc
namespace: devops
labels:
app: gitlab
spec:
storageClassName: glusterfs
accessModes:
- ReadWriteMany
resources:
requests:
storage: 20Gi
EOF
## kubectl apply -f harbor-pvc.yaml #创建PVC
## kubectl get pvc -n devops
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
harbor-pvc Bound pvc-ef9c2335-2dd3-11ea-afe5-000c29f27a97 20Gi RWX glusterfs 1m
## helm install paas harbor-helm-master -n devops #安装
## helm uninstall paas -n devops #####卸载#####
## kubectl get pod -n devops
paas-harbor-chartmuseum-68b6858c7f-rkddt 1/1 Running 0 4m
paas-harbor-clair-68d5d45b9d-d4m9n 2/2 Running 0 4m
paas-harbor-core-76fbcbff9d-rksng 1/1 Running 1 4m
paas-harbor-database-0 1/1 Running 1 4m
paas-harbor-jobservice-869bcb9659-6sq8p 1/1 Running 1 4m
paas-harbor-notary-server-5d87b9f64-s9n6m 1/1 Running 1 4m
paas-harbor-notary-signer-5c9c95b4-kkxv2 1/1 Running 1 4m
paas-harbor-portal-5c8664f669-8nbnw 1/1 Running 0 4m
paas-harbor-redis-0 1/1 Running 1 4m
paas-harbor-registry-9465f5556-5s47k 2/2 Running 0 4m
验证
## 在每个node机器上写上hosts
cat <<EOF>> /etc/hosts
192.168.10.11 fana.harbor ##写本机的IP地址
EOF
## 浏览器登录 http://fana.harbor,创建个项目
## 配置/etc/docker/daemon.json
cat <<EOF> /etc/docker/daemon.json
{
"log-driver": "journald",
"log-opts": {
"mode": "non-blocking",
"max-buffer-size": "8m"
},
"data-root": "/data/docker/containerd",
"insecure-registries": [
"fana.harbor:80", ###harbor的地址
"fana.harbor"
]
}
overlay2
{
"storage-driver": "overlay2",
"storage-opts": "overlay2.override_kernel_check=true",
"log-driver": "journald",
"log-opts": {
"mode": "non-blocking",
"max-buffer-size": "8m"
},
"data-root": "/data/docker/containerd",
"insecure-registries": [
"fana.harbor:80",
"fana.harbor"
]
}
EOF
## 重启docker
systemctl restart docker
## 登录harbor
#### docker login -u admin -p Harbor12345 fana.harbor
WARNING! Using --password via the CLI is insecure. Use --password-stdin.
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store
Login Succeeded
#### docker login -u admin -p Harbor12345 fana.harbor:80
## push镜像
#### docker push fana.harbor/base/pause-amd64:3.1
The push refers to repository [fana.harbor/base/pause-amd64]
e17133b79956: Layer already exists
3.1: digest: sha256:113e218ad463746a4b7608d3f7cef72e6ab01d0c06bad2ab7265497fba92cf9c size: 527
## pull镜像
#### docker pull fana.harbor/base/pause-amd64:3.1
3.1: Pulling from base/pause-amd64
Digest: sha256:113e218ad463746a4b7608d3f7cef72e6ab01d0c06bad2ab7265497fba92cf9c
Status: Downloaded newer image for fana.harbor/base/pause-amd64:3.1
fana.harbor/base/pause-amd64:3.1
以上是关于k8s使用helm 3 部署harbor的主要内容,如果未能解决你的问题,请参考以下文章
Jenkins-k8s-helm-harbor-githab-mysql-nfs微服务发布平台实战