linux12k8s --> 15discuz+mysql主从

Posted FikL-09-19

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了linux12k8s --> 15discuz+mysql主从相关的知识,希望对你有一定的参考价值。

一、部署discuz

  • 要求:使用StorageClass、configmap、secret、https、探针
  • 规划
1、构建镜像
	alvinos/nginx:discuz-v1
	alvinos/php:discuz-v2
	mysql:5.7

2、设置网络
	ingress ---> svc ---> pod ---> mysql svc ---> mysql pod

3、编写配置清单
	1、mysql
		1、mysql命名空间
		2、mysql存储
		3、MySQL应用
		4、备份
	
	2、discuz
		1、discuz命名空间
		2、存储
		3、应用(服务和svc)

4、访问测试

二、DOckerfile部署主从复制

# 1、启动主库,启动从库(必须同一个网桥)
# 2、在主库中创建主从复制账号
# 3、将从库加入集群
# 1、创建统一目录
[root@k8s-m-01 devops]# mkdir devops
[root@k8s-m-01 devops]# cd devops
# 2、准备主备配置文件
[root@k8s-m-01 devops]# vim mysql-master.cnf 
[mysqld]
datadir=/var/lib/mysql
server-id=1  # ID不同
binlog_format=row
log-bin=mysql-bin
skip-name-resolve
gtid-mode=on
enforce-gtid-consistency=true
log-slave-updates=1
relay_log_purge=0
symbolic-links=0
sql_mode=NO_ENGINE_SUBSTITUTION,STRICT_TRANS_TABLES
[mysqld_safe]
log-error=/var/log/mysqld.log
# 3、准备 从节点
[root@k8s-m-01 devops]# vim mysql-node.cnf 
[mysqld]
datadir=/var/lib/mysql
server-id=2  # ID不同
binlog_format=row
log-bin=mysql-bin
skip-name-resolve
gtid-mode=on
enforce-gtid-consistency=true
log-slave-updates=1
relay_log_purge=0
symbolic-links=0
sql_mode=NO_ENGINE_SUBSTITUTION,STRICT_TRANS_TABLES
[mysqld_safe]
log-error=/var/log/mysqld.log
# 4、创建网桥
[root@k8s-m-01 devops]# docker network create mm
789d90b65dfda342471035f3110175140162d9bd0aec8de6c765634bebcbc546
[root@k8s-m-01 devops]# docker network ls
NETWORK ID     NAME      DRIVER    SCOPE
30bda6313154   bridge    bridge    local
e97dc4abf036   host      host      local
789d90b65dfd   mm        bridge    local
894fc1c5a6a2   none      null      local
# 5、查看文件
[root@k8s-m-01 devops]# ll
total 8
-rw-r--r-- 1 root root 292 Aug 11 19:32 mysql-master.cnf
-rw-r--r-- 1 root root 292 Aug 11 19:36 mysql-node.cnf

# 6、开启主从库
[root@k8s-m-01 devops]# docker run -d --name mysql-master-01 -p 3306:3306 --network=mm -e MYSQL_ROOT_PASSWORD=123 -v /root/devops/mysql-master.cnf:/etc/mysql/my.cnf mysql:5.7

[root@k8s-m-01 devops]# docker run -d --name mysql-node-01  --network=mm -e MYSQL_ROOT_PASSWORD=123 -v /root/devops/mysql-node.cnf:/etc/mysql/my.cnf mysql:5.7
# 7、查看服务
[root@k8s-m-01 devops]# docker ps |grep 5.7
6f4014374011   mysql:5.7                                             "docker-entrypoint.s…"   About a minute ago   Up About a minute   3306/tcp, 33060/tcp                                    mysql-node-01
43b2c840a0da   mysql:5.7                                             "docker-entrypoint.s…"   About a minute ago   Up About a minute   0.0.0.0:3306->3306/tcp, :::3306->3306/tcp, 33060/tcp   mysql-master-01
# 7、在主库中创建主从复制账号
[root@k8s-m-01 devops]# docker exec -it mysql-master-01 bash
root@43b2c840a0da:/# mysql -uroot -p123 -hmysql-master-01
# 主库上执行
mysql> grant replication slave on *.* to 'slave'@'%' identified by '123';
mysql> flush privileges;
mysql> show master status;
+------------------+----------+--------------+------------------+------------------------------------------+
| File             | Position | Binlog_Do_DB | Binlog_Ignore_DB | Executed_Gtid_Set                        |
+------------------+----------+--------------+------------------+------------------------------------------+
| mysql-bin.000003 |      622 |              |                  | ed757e7a-fa98-11eb-b088-0242ac120002:1-7 |
+------------------+----------+--------------+------------------+------------------------------------------+
# 8、进入从主数据库查看
[root@k8s-m-01 devops]# docker exec -it mysql-node-01 bash
root@6f4014374011:/# mysql -uroot -p123 -h mysql-node-01

# 在从库中执行, 加入主节点
mysql> change master to master_host='mysql-master-01',master_port=3306,master_user='slave',master_password='123',master_log_file='mysql-bin.000003',master_log_pos=622;
# 查看主从复制状态
mysql> show slave status\\G;
*************************** 1. row ***************************
               Slave_IO_State: Waiting for master to send event
                  Master_Host: mysql-master-01
                  Master_User: slave
                  Master_Port: 3306
                Connect_Retry: 60
              Master_Log_File: mysql-bin.000003
          Read_Master_Log_Pos: 622
               Relay_Log_File: 6f4014374011-relay-bin.000002
                Relay_Log_Pos: 320
        Relay_Master_Log_File: mysql-bin.000003
             Slave_IO_Running: Yes
            Slave_SQL_Running: Yes


三、部署MySQL主从复制

官网: https://hub.docker.com/repositories

  • 要求:一主多从,多个从节点可以任意添加

部署Discuz论坛+MySQL主从复制

1.准备Discuz包

1.下载discuz包
[root@k8s-m-01 discuz]# wget http://www.mmin.xyz:81/package/blog/Discuz_X3.4_SC_UTF8_20210320.zip
2.解压	# 此包默认是zip格式,无需改名
[root@k8s-m-01 discuz]# unzip Discuz_X3.4_SC_UTF8_20210320.zip

3.授权可写、压缩	# 打包后续备用
[root@k8s-m-01 discuz]# chmod -R 777 upload/

4创建站点目录
[root@k8s-m-01 discuz]# mkdir nginx 
[root@k8s-m-01 discuz]# mkdir php
[root@k8s-m-01 discuz]# cp -rp upload/ nginx/
[root@k8s-m-01 discuz]# cp -rp upload/ php
[root@k8s-m-01 discuz]# ll
total 4
drwxr-xr-x  2 root root  272 Aug 11 20:45 nginx
drwxr-xr-x  2 root root    6 Aug 11 20:45 php
drwxrwxrwx 13 root root 4096 Mar 22 19:44 upload

2.构建镜像

  • 构建并上传,无需打包可直接上传(官方镜像)
1)构建Discuz的php镜像
# 编写Dockerfile
[root@k8s-m-01 ~]# cd /root/discuz/php
[root@k8s-m-01 ~]# cat > Dockerfile <<EOF
FROM 18954354671/lnmp-php-wp:v3
ADD discuz.gz /usr/share/nginx/html
EXPOSE 9000
EOF

# 查看
[root@k8s-m-01 php]# ll
total 10092
-rw-r--r-- 1 root root 10328804 Apr 12 22:16 discuz.gz
-rw-r--r-- 1 root root       80 Apr 12 23:55 Dockerfile

# 构建、上传镜像
[root@k8s-m-01 php]# docker build -t 18954354671/lnmp-php-discuz:v2 .
[root@k8s-m-01 php]# docker push 18954354671/lnmp-php-discuz:v2 
2)构建Discuz的nginx镜像
# 编写Dockerfile
[root@k8s-m-01 php]# cd /root/discuz/nginx
[root@k8s-m-01 nginx]# cat > Dockerfile <<EOF
FROM 18954354671/lnmp-nginx-wp:v3
ADD discuz.gz /usr/share/nginx/html
EXPOSE 80 443
CMD ["nginx","daemon off;"]
EOF

# 查看
[root@k8s-m-01 nginx]# ll
total 10092
-rw-r--r-- 1 root root 10328804 Apr 12 22:35 discuz.gz
-rw-r--r-- 1 root root      112 Apr 12 22:37 Dockerfile

# 构建、上传镜像
[root@k8s-m-01 nginx]# docker build -t 18954354671/lnmp-nginx-discuz:v2 .
[root@k8s-m-01 nginx]# docker push 18954354671/lnmp-nginx-discuz:v2

3)构建MySQL的主从镜像
  • 创建镜像仓库:在阿里云镜像仓库中创建mysql-master和mysql-savle两个仓库
  • 创建主从构建目录
[root@k8s-m-01 ~]# mkdir -pv /Project/mysql/{master,slave}
1> 主库
[root@k8s-m-01 ~]# cd /Project/mysql/master
# 编写Dockerfile
[root@k8s-m-01 master]# vim Dockerfile
FROM mysql:5.7
ADD my.cnf /etc/mysql/my.cnf

# 配置mysql配置文件
[root@k8s-m-01 master]# vim my.cnf
[mysql]
socket=/var/lib/mysql/mysql.sock
[mysqld]
user=mysql
port=3306
binlog_format=mixed
log_bin=mysql-bin
socket=/var/lib/mysql/mysql.sock
server_id=1
sync_binlog=1
log-slave-updates=on

# 构建master镜像
[root@k8s-m-01 master]# docker build -t registry.cn-hangzhou.aliyuncs.com/slave/mysql/mysql-master:v1 .

# 登录仓库
[root@k8s-m-01 master]# docker login --username=zzp247364 registry.cn-hangzhou.aliyuncs.com

# 推送
[root@k8s-m-01 master]# docker push registry.cn-hangzhou.aliyuncs.com/slave/mysql/mysql-master:v1
2> 从库
[root@k8s-m-01 ~]# cd /Project/mysql/slave
# 编写Dockerfile
[root@k8s-m-01 slave]# vim Dockerfile
FROM mysql:5.7
ADD my.cnf /etc/mysql/my.cnf

# 配置mysql配置文件
[root@k8s-m-01 slave]# vim my.cnf
    [mysql]
    socket=/var/lib/mysql/mysql.sock
    [mysqld]
    user=mysql
    port=3306
    binlog_format=mixed
    log_bin=mysql-bin
    socket=/var/lib/mysql/mysql.sock
    server_id=2
    read-only=1

# 构建salve镜像
[root@k8s-m-01 slave]# docker push registry.cn-hangzhou.aliyuncs.com/slave/mysql/mysql-slave:v1

# 推送
[root@k8s-m-01 slave]# docker push registry.cn-hangzhou.aliyuncs.com/slave/mysql/mysql-slave:v1

3.准备环境

  • 清空集群,准备纯净环境
  • 依次部署基础容器:flannel.yaml、deploy.yaml、ingress-nginx.yaml
[root@k8s-m-01 ~]# kubectl apply -f flannel.yaml
[root@k8s-m-01 ~]# kubectl apply -f deploy.yaml
[root@k8s-m-01 ~]# kubectl apply -f ingress-nginx.yaml

4.部署nfs动态存储

# 官网参考链接:https://github.com/helm/helm
1.安装helm	# helm(helm相当于kubernetes中的yum)
[root@k8s-m-01 ~]# wget https://get.helm.sh/helm-v3.5.3-linux-amd64.tar.gz

[root@k8s-m-01 ~]# tar -xf helm-v3.3.4-linux-amd64.tar.gz 
[root@k8s-m-01 ~]# cd linux-amd64/
[root@k8s-m-01 ~]# for i in m1 m2 m3;do scp helm root@$i:/usr/local/bin/; done

# 测试安装, 出现参数即为出轨
[root@k8s-m-01 ~]# helm 
The Kubernetes package manager

Common actions for Helm:

- helm search:    search for charts
- helm pull:      download a chart to your local directory to view
- helm install:   upload the chart to Kubernetes
- helm list:      list releases of charts

2.安装存储类
## 安装一个helm的存储库
[root@k8s-m-01 ~]# helm repo add ckotzbauer https://ckotzbauer.github.io/helm-charts
"ckotzbauer" has been added to your repositories
[root@k8s-m-01 ~]# helm repo list
NAME       URL                                     
ckotzbauer https://ckotzbauer.github.io/helm-charts

# 方式1> :部署nfs客户端及存储类
[root@k8s-m-01 ~]# helm install nfs-client --set nfs.server=172.16.1.51 --set nfs.path=/nfs/v6  ckotzbauer/nfs-client-provisioner
NAME: nfs-client
LAST DEPLOYED: Fri Apr  9 09:33:23 2021
NAMESPACE: default
STATUS: deployed
REVISION: 1
TEST SUITE: None

## 查看部署结果
[root@k8s-m-01 ~]# kubectl get pods 
NAME                                                 READY   STATUS        RESTARTS   AGE
nfs-client-nfs-client-provisioner-56dddf479f-h9qqb   1/1     Running       0          41s

[root@k8s-m-01 ~]# kubectl get storageclasses.storage.k8s.io 
NAME         PROVISIONER                                       RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
nfs-client   cluster.local/nfs-client-nfs-client-provisioner   Delete          Immediate           true                   61s

# 方式2> :直接使用配置清单(推荐)
accessModes: ReadWriteMany

### 下载包
[root@k8s-m-01 ~]# helm pull ckotzbauer/nfs-client-provisioner

### 解压
[root@k8s-m-01 ~]# tar -xf nfs-client-provisioner-1.0.2.tgz 

### 修改values.yaml
[root@k8s-m-01 ~]# cd nfs-client-provisioner/

# 所需修改项为如下:
[root@k8s-m-01 /opt/nfs-client-provisioner]# vim values.yaml 
nfs:
  server: 192.168.12.11
  path: /nfs/v6
  reclaimPolicy: Retain
  accessModes: ReadWriteMany

5.删除所有pv、pvc

# 查看当前pvc
[root@k8s-m-01 ~]# kubectl get pvc
No resources found in default namespace.

# 过滤查看pv
[root@k8s-m-01 ~]# kubectl get pv | awk '{print $1}'
NAME
pvc-406c3f25-f857-491a-b8b9-a79f7b3262ac
pvc-7b1083e7-58e1-4984-b61c-f130d332a26f
pvc-d6ed3bd3-4a1a-4032-aec3-909d4a2aca3f

# 清空pv
[root@k8s-m-01 ~]# kubectl get pv | awk '{print $1}' | xargs -I {} kubectl delete pv {}
Error from server (NotFound): persistentvolumes "NAME" not found
persistentvolume "pvc-406c3f25-f857-491a-b8b9-a79f7b3262ac" deleted
persistentvolume "pvc-7b1083e7-58e1-4984-b61c-f130d332a26f" deleted
persistentvolume "pvc-d6ed3bd3-4a1a-4032-aec3-909d4a2aca3f" deleted

# 已清空pv
[root@k8s-m-01 ~]# kubectl get pv
No resources found

# 清空/nfs/v3/*
[root@k8s-m-01 ~]# rm -rf /nfs/v3/*

6.准备配置清单

  • discuz需准备的配置清单
  • 按顺序依次准备、启动
[root@k8s-m-01 discuz-store-mysql]# ll
total 32
-rw-r--r-- 1 root root  364 Apr 12 19:15 1-mysql-storage.yaml
-rw-r--r-- 1 root root 1927 Apr 13 20:22 2-mysql-deployment.yaml
-rw-r--r-- 1 root root  115 Apr 13 21:23 3-discuz-namespace.yaml
-rw-r--r-- 1 root root  440 Apr 12 23:02 4-discuz-storage.yaml
-rw-r--r-- 1 root root  531 Apr 13 22:32 5-discuz-configmap.yaml
-rw-r--r-- 1 root root 3527 Apr 13 23:42 6-discuz-deployment.yaml

1)mysql-storage.yaml
  • 创建mysql的storage
cat > 1-mysql-storage.yaml <EOF
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  namespace: discuz-mysql
  name: mysql-data
spec:
  storageClassName: nfs-client
  accessModes:
    - "ReadWriteMany"                 # 给一个策略(读与写)
  resources:
    requests:
      storage: "1Gi"                  # 数据库一般建议给500G
EOF

2)mysql-deployment.yaml
  • 定义discuz命名空间、服务
cat > 2-mysql-deployment.yaml <<EOF
kind: Namespace
apiVersion: v1
metadata:
  name: discuz-mysql
---
kind: Service
apiVersion: v1
metadata:
  name: mysql-cluster-svc
  namespace: discuz-mysql
spec:
  ports:
    - port: 3306
      targetPort: 3306
      name: mysql-tcp
  type: ClusterIP
---
kind: Deployment
apiVersion: apps/v1
metadata:
  name: discuz
  namespace: discuz-mysql
spec:
  selector:
    matchLabels:
      app: discuz
      deploy: mysql-cluster
  template:
    metadata:
      labels:
        app: discuz
        deploy: mysql-cluster
    spec:
      containers:
        - name: mysql
          image: mysql:5.7
          livenessProbe:              # 存活检测定义
            tcpSocket:
              port: 3306              # tcpSocket连接端口
            initialDelaySeconds: 30   # 数据库初始化启动时间,根据机器反应快慢定义
            successThreshold: 1       # 探测数据库启动成功次数
            failureThreshold: 3       # 探测数据库启动失败次数
            timeoutSeconds: 1         # 本地连接探测超时时间,一秒足够长
            periodSeconds: 2          # 执行探测频率,几秒一次,默认10秒
          readinessProbe:             # 就绪检测定义,同上
            tcpSocket:
              port: 3306
            initialDelaySeconds: 30
            successThreshold: 1
            failureThreshold: 3
            timeoutSeconds: 1
            periodSeconds: 2
          env:
            - name: MYSQL_ROOT_PASSWORD
              value: "123"
            - name: MYSQL_DATABASE
              value: "discuz"
          volumeMounts:
            - mountPath: /var/lib/mysql
              name: mysql-data        # 此处定死
      volumes:
        - name: mysql-data
          persistentVolumeClaim:
            claimName: mysql-data     # 挂载到上方定义的容器目录
EOF

3)discuz-namespace.yaml
  • discuz命名空间,独立出来,防止误删除
cat > 3-discuz-namespace.yaml <<EOF
kind: Namespace
apiVersion: v1
metadata:
  name: discuz
EOF

4)4-discuz-storage.yaml
  • 单独创建的discuz的pv
cat > 4-deployment-storage.yaml <<EOF
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  namespace: discuz               # 指定为discuz的命名空间
  name: upload-data               # 指定为discuz内的的pv名称
spec:
  storageClassName: nfs-client
  accessModes:
    - "ReadWriteMany"             # 给一个策略(读与写)
  resources:
    requests:
      storage: "1Gi"              # 数据库一般建议给500G
EOF

5)discuz-configmap
  • ConfigMap && Secret 是 K8S 中的针对应用的配置中心
  • 在 创建容器时,用户可以将应用程序打包为容器镜像后,通过环境变量或者外接挂载文件的方式进行配置注入。
  • 它有效的解决了应用挂载的问题,并且支持加密以及热 更新等功能
cat 5-discuz-configmap.yaml 
kind: ConfigMap
apiVersion: v1
metadata:
  namespace: discuz
  name: discuz-configmap
data:
  default.conf: |
    server {
        listen 80;
        server_name linux.discuz.com;
        root /opt/upload/;

        location / {
            index index.php;
        }

        location ~* \\.php$ {
            fastcgi_pass  127.0.0.1:9000;
            fastcgi_index index.php;
            fastcgi_param SCRIPT_FILENAME $document_root/$fastcgi_script_name;
            include fastcgi_params;
        }
    }
EOF

6)discuz-deployment.yaml
  • 部署Discuz
cat > 6-discuz-deployment.yaml <<EOF
# discuz部署信息
kind: Deployment
apiVersion: apps/v1
metadata:
  name: discuz
  namespace: discuz
spec:
  selector:
    matchLabels:
      app: discuz
      deploy: discuz
  template:
    metadata:
      labels:
        app: discuz
        deploy: discuz
    spec:
      containers:
        - name: php
          image: 18954354671/lnmp-php-discuz:v2  # 此时去准备构建discuz的镜像并上传,再往下写
          livenessProbe:              # 存活检测定义
            tcpSocket:                # tcpSocket连接端口
              port: 9000
            initialDelaySeconds: 30   # 数据库初始化启动时间,根据机器反应快慢定义
            successThreshold: 1       # 探测数据库启动成功次数
            failureThreshold: 3       # 探测数据库启动失败次数
            timeoutSeconds: 1         # 本地连接探测超时时间,一秒足够长
            periodSeconds: 2          # 执行探测频率,几秒一次,默认10秒
          readinessProbe:             # 就绪检测定义,同上
            tcpSocket:
              port: 9000
            initialDelaySeconds: 30
            successThreshold: 1
            failureThreshold: 3
            timeoutSeconds: 1
            periodSeconds: 2
        - name: nginx
          image: 18954354671/lnmp-nginx-discuz:v2
          command: ["/bin/bash","-c","--"]			# 此两行解决nginx拉取失败(且无错误日志)
          args: ["while true;do sleep 30;done;"]	 # 此两行解决nginx拉取失败(且无错误日志)
          livenessProbe:              # 存活检测定义
            tcpSocket:
              port: 80             # tcpSocket连接端口
            initialDelaySeconds: 30   # 数据库初始化启动时间,根据机器反应快慢定义
            successThreshold: 1       # 探测数据库启动成功次数
            failureThreshold: 3       # 探测数据库启动失败次数
            timeoutSeconds: 1         # 本地连接探测超时时间,一秒足够长
            periodSeconds: 2          # 执行探测频率,几秒一次,默认10秒
          readinessProbe:             # 就绪检测定义,同上
            tcpSocket:
              port: 80
            initialDelaySeconds: 30
            successThreshold: 1
            failureThreshold: 3
            timeoutSeconds: 1
            periodSeconds: 2
          volumeMounts:
            - mountPath: /usr/share/nginx/html/upload
              name: upload-data
            - mountPath: /etc/nginx/conf.d
              name: discuz-configmap
      volumes:                # 此时需要给Deployment创建pv(建议单独创建,不能随便修改且防止误删除:deployment-pv.yaml)
        - name: upload-data   # pv需指定为此discuz的命名空间
          persistentVolumeClaim:
            claimName: upload-data
        - name: discuz-configmap
          configMap:
            name: discuz-configmap
            items:
              - key: default.conf
                path: default.conf
---
# 部署discuz-Service
kind: Service
apiVersion: v1
metadata:
  name: discuz-svc
  namespace: discuz
spec:
  ports:
    - port: 80
      targetPort: 80
      name: http
  selector:
    app: discuz
    deploy: discuz
  clusterIP: None
---
kind: Ingress
apiVersion: extensions/v1beta1
metadata:
  name: discuz
  namespace: discuz
spec:
  rules:
    - host: linux.discuz.com
      http:
        paths:
          - backend:
              serviceName: discuz-svc
              servicePort: 80
EOF

7.复制nginx配置

# 运行刚刚构建的nginx镜像,复制其nginx配置到配置清单
[root@k8s-m-01 nginx]# docker run --rm -it 18954354671/lnmp-nginx-discuz:v2 bash
root@dcadb97a6799:/usr/share/nginx/html# cat /etc/nginx/conf.d/linux.wp.com.conf 
server {
    listen 80;
    server_name linux.discuz.com;
    root /opt/upload/;

    location / {
        index index.php;
    }

    location ~* \\.php$ {
        fastcgi_pass 127.0.0.1:9000;
        fastcgi_param SCRIPT_FILENAME $document_root/$fastcgi_script_name;
        include fastcgi_params;
    }
}

8.部署服务

kubectl apply -f 1-mysql-storage.yaml
kubectl apply -f 2-mysql-deployment.yaml
kubectl apply -f 3-discuz-namespace.yaml
kubectl apply -f 4-discuz-storage.yaml
kubectl apply -f 5-discuz-configmap.yaml
kubectl apply -f 6-discuz-deployment.yaml

9.新版本自动创建pv的权限问题

  • 加入此条参数即可
[root@k8s-m-01 ~]# vim /etc/kubernetes/manifests/kube-apiserver.yaml
- --feature-gates=RemoveSelfLink=false

# 重新部署即可~
[root@k8s-m-01 ~]# kubectl apply -f /etc/kubernetes/manifests/kube-apiserver.yaml

四、discuz主从搭建完成

# 1、准备文件
# 2、先执行namespace.yaml
# 3、最后在执行*.yaml
# 4、访问

[root@k8s-m-01 discuz]# cat config.yaml 
apiVersion: v1
kind: ConfigMap
metadata:
  name: master-my-cnf
  namespace: mysql-master
data:
  master-my-conf: |
    [mysqld]
    datadir=/var/lib/mysql
    server-id=1
    binlog_format=row
    log-bin=mysql-bin
    skip-name-resolve
    gtid-mode=on
    enforce-gtid-consistency=true
    log-slave-updates=1
    relay_log_purge=0
    symbolic-links=0
    sql_mode=NO_ENGINE_SUBSTITUTION,STRICT_TRANS_TABLES


    [mysqld_safe]
    log-error=/var/log/mysqld.log
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: node-my-cnf
  namespace: mysql-node
data:
  node-my-cnf: |
    [mysqld]
    datadir=/var/lib/mysql
    server-id=2
    binlog_format=row
    log-bin=mysql-bin
    skip-name-resolve
    gtid-mode=on
    enforce-gtid-consistency=true
    log-slave-updates=1
    relay_log_purge=0
    symbolic-links=0
    sql_mode=NO_ENGINE_SUBSTITUTION,STRICT_TRANS_TABLES

    relay-log=mysql-relay-bin
    replicate-wild-ignore-table=mysql.%
    replicate-wild-ignore-table=test.%
    replicate-wild-ignore-table=information_schema.%
    replicate-wild-ignore-table=sys.%

    [mysqld_safe]
    log-error=/var/log/mysqld.log
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: nginx
  namespace: discuz
data:
  nginx.conf: |
    user  root;
    worker_processes  auto;
    worker_cpu_affinity auto;
    error_log  /var/log/nginx/error.log notice;
    pid        /var/run/nginx.pid;
    worker_rlimit_nofile 35535;
    events {
        use epoll;
        worker_connections  10240;
    }
    http {
        include       /etc/nginx/mime.types;
        default_type  application/octet-stream;
        log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                          '$status $body_bytes_sent "$http_referer" '
                          '"$http_user_agent" "$http_x_forwarded_for"';
        log_format json_access '{"@timestamp":"$time_iso8601",'
                          '"host":"$server_addr",'
                          '"clientip":"$remote_addr",'
                          '"size":$body_bytes_sent,'
                          '"responsetime":$request_time,'
                          '"upstreamtime":"$upstream_response_time",'
                          '"upstreamhost":"$upstream_addr",'
                          '"http_host":"$host",'
                          '"url":"$uri",'
                          '"domain":"$host",'
                          '"xff":"$http_x_forwarded_for",'
                          '"referer":"$http_referer",'
                          '"status":"$status"}';
        access_log  /var/log/nginx/access.log  json_access;
        server_tokens off;
        client_max_body_size 200m;
        sendfile        on;
        tcp_nopush     on;
        keepalive_timeout  65;
        gzip on;
        gzip_disable "MSIE [1-6]\\.";
        gzip_http_version 1.1;
        gzip_comp_level 2;
        gzip_buffers 16 8k;
        gzip_min_length 1024;
        gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript image/jpeg;
        include /etc/nginx/conf.d/*.conf;
    }
  default.conf: |
    server {
            listen 80;
            server_name _;
            root /usr/share/nginx/html;
            location / {
            index index.php;
            }
            location ~* \\.php$ {
                    fastcgi_pass 127.0.0.1:9000;
                    fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
                    fastcgi_param HTTPS ON;
                    include fastcgi_params;
            }
    }
[root@k8s-m-01 discuz]# cat discuz.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: discuz
  namespace: discuz
spec:
  selector:
    matchLabels:
      app: discuz
  template:
    metadata:
      labels:
        app: discuz
    spec:
      containers:
        - name: php
          image: registry.cn-shanghai.aliyuncs.com/aliyun_mm/discuz:php-v2
          imagePullPolicy: IfNotPresent
          volumeMounts:
            - mountPath: /usr/share/nginx/html
              name: discuz
        - name: nginx
          image: nginx
          imagePullPolicy: IfNotPresent
          volumeMounts:
            - mountPath: /usr/share/nginx/html
              name: discuz

            - mountPath: /etc/nginx/nginx.conf
              name: nginx-conf
              subPath: nginx.conf

            - mountPath: /etc/nginx/conf.d
              name: default-conf

      volumes:
        - name: discuz
          persistentVolumeClaim:
            claimName: discuz

        - name: nginx-conf
          configMap:
            name: nginx
            items:
              - key: nginx.conf
                path: nginx.conf

        - name: default-conf
          configMap:
            name: nginx
            items:
              - key: default.conf
                path: default.conf
[root@k8s-m-01 discuz]# cat job.yaml 
apiVersion: batch/v1
kind: Job
metadata:
  name: job-master
  namespace: mysql-master
spec:
  template:
    spec:
      restartPolicy: OnFailure
      containers:
        - name: master-create-user
          image: mysql:5.7
          imagePullPolicy: IfNotPresent
          command:
            - "/bin/bash"
            - "-c"
            - |
              MYSQL_MASTER_SVC_NAME=mysql-master.mysql-master.svc.cluster.local
              MYSQL_MASTER_PASSWORD=123
              MYSQL_MASTER_USERNAME=root

              while true
              do
                mysql -u${MYSQL_MASTER_USERNAME} -p${MYSQL_MASTER_PASSWORD} -h${MYSQL_MASTER_SVC_NAME} -e 'show databases;' >/dev/null 2>&1
                if [ $? -eq 0 ];then

                    mysql -u${MYSQL_MASTER_USERNAME} -p${MYSQL_MASTER_PASSWORD} -h${MYSQL_MASTER_SVC_NAME} -e "grant replication slave on *.* to 'slave'@'%' identified by '123'; flush privileges;"

                    break;
                fi
                sleep 1;
              done
---
apiVersion: batch/v1
kind: Job
metadata:
  name: job-node
  namespace: mysql-node
spec:
  template:
    spec:
      restartPolicy: OnFailure
      containers:
        - name: mysql-node
          image: mysql:5.7
          imagePullPolicy: IfNotPresent
          command:
            - "/bin/bash"
            - "-c"
            - |
              MYSQL_NODE_SVC_NAME=mysql-node.mysql-node.svc.cluster.local
              MYSQL_NODE_PASSWORD=123
              MYSQL_NODE_USERNAME=root

              while true
              do
                mysql -u${MYSQL_NODE_USERNAME} -p${MYSQL_NODE_PASSWORD} -h${MYSQL_NODE_SVC_NAME} -e 'show databases;' >/dev/null 2>&1
                if [ $? -eq 0 ];then

                    mysql -u${MYSQL_NODE_USERNAME} -p${MYSQL_NODE_PASSWORD} -h${MYSQL_NODE_SVC_NAME}  -e "show master status\\G" > /tmp/log

                    MASTER_LOG_FILE=`/bin/cat /tmp/log | /usr/bin/awk -F: 'NR==2{print $2}' | /usr/bin/tr -d " "`

                    MYSQL_LOG_POS=`/bin/cat /tmp/log | /usr/bin/awk -F: 'NR==3{print $2}' | /usr/bin/tr -d " "`

                    mysql -u${MYSQL_NODE_USERNAME} -p${MYSQL_NODE_PASSWORD} -h${MYSQL_NODE_SVC_NAME} -e "change master to master_host='mysql-master.mysql-master.svc.cluster.local',master_port=3306,master_user='slave',master_password='123',master_log_file='"${MASTER_LOG_FILE}"',master_log_pos="${MYSQL_LOG_POS}";"

                    mysql -u${MYSQL_NODE_USERNAME} -p${MYSQL_NODE_PASSWORD} -h${MYSQL_NODE_SVC_NAME} -e "start slave;"
                    break;
                fi
                sleep 1;
              done
[root@k8s-m-01 discuz]# cat mysql-master.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: mysql-master
  namespace: mysql-master
spec:
  selector:
    matchLabels:
      app: mysql-master
  template:
    metadata:
      labels:
        app: mysql-master
    spec:
      containers:
        - name: mysql-master
          image: mysql:5.7
          imagePullPolicy: IfNotPresent
          env:
            - name: MYSQL_ROOT_PASSWORD
              value: "123"
          volumeMounts:
            - mountPath: /etc/mysql/my.cnf
              name: master-my-cnf
              subPath: my.cnf
      volumes:
        - name: master-my-cnf
          configMap:
            name: master-my-cnf
            items:
              - key: master-my-conf
                path: my.cnf
[root@k8s-m-01 discuz]# cat mysql-node.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: mysql-node
  namespace: mysql-node
spec:
  selector:
    matchLabels:
      app: mysql-node
  template:
    metadata:
      labels:
        app: mysql-node
    spec:
      containers:
        - name: mysql-node
          image: mysql:5.7
          imagePullPolicy: IfNotPresent
          env:
            - name: MYSQL_ROOT_PASSWORD
              value: "123"
          volumeMounts:
            - mountPath: /etc/mysql/my.cnf
              name: node-my-cnf
              subPath: my.cnf
      volumes:
        - name: node-my-cnf
          configMap:
            name: node-my-cnf
            items:
              - key: node-my-cnf
                path: my.cnf
[root@k8s-m-01 discuz]# cat namespace.yaml 
apiVersion: v1
kind: Namespace
metadata:
  name: mysql-master
---
apiVersion: v1
kind: Namespace
metadata:
  name: mysql-node
---
apiVersion: v1
kind: Namespace
metadata:
  name: discuz
[root@k8s-m-01 discuz]# cat service.yaml 
apiVersion: v1
kind: Service
metadata:
  name: mysql-master
  namespace: mysql-master
spec:
  selector:
    app: mysql-master
  ports:
    - port: 3306
      targetPort: 3306
      name: mysql
      protocol: TCP
---
apiVersion: v1
kind: Service
metadata:
  name: mysql-node
  namespace: mysql-node
spec:
  selector:
    app: mysql-node
  ports:
    - port: 3306
      targetPort: 3306
      name: mysql
      protocol: TCP
---
apiVersion: v1
kind: Service
metadata:
  name: discuz
  namespace: discuz
spec:
  selector:
    app: discuz
  ports:
    - port: 80
      targetPort: 80
      name: http
      protocol: TCP

    - port: 443
      targetPort: 443
      name: https
      protocol: TCP
  type: NodePort
[root@k8s-m-01 discuz]# cat volume.yaml 
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: discuz
  namespace: discuz
spec:
  storageClassName: nfs-client
  accessModes:
    - "ReadWriteMany"
  resources:
    requests:
      storage: "8Gi"

以上是关于linux12k8s --> 15discuz+mysql主从的主要内容,如果未能解决你的问题,请参考以下文章

linux12k8s --> KubeAdmin安装k8s

linux12k8s --> 02理论介绍,架构图

linux12k8s --> 12kubeadm部署高可用k8s

linux12k8s --> 17图形化界面

linux12k8s --> 05实战入门

linux12k8s --> 03二进制安装