MySQL 主从集群部署与维护
-
高可用:1 主 1 从 + 哨兵模式,主库负责写操作,从库负责读操作,哨兵监控主从状态并自动故障转移。
-
持久化 :使用
StorageClass动态分配 PV(生产环境采用 Ceph RBD 存储)。 -
网络:Headless Service 实现 Pod 固定域名访问。
环境准备:
|------------------------|--------|--------------|
| 镜像/主机名 | 内存/硬盘 | 处理器(数量/内核数量) |
| CentOS-Stream-8/master | 4/40GB | 4P/2C |
| CentOS-Stream-8/node1 | 4/40GB | 4P/2C |
| CentOS-Stream-8/node2 | 4/40GB | 4P/2C |
部署步骤:
1.创建命名空间与 Secret
bash
#创建命名空间
[root@master ~]# kubectl create ns middleware
# 存储MySQL密码(避免硬编码)
[root@master ~]# kubectl create secret generic mysql-secret -n middleware \
--from-literal=root-password=Admin@123 \
--from-literal=replica-password=Replica@123
#secret generic:创建通用类型的 Secret(适用于存储密码、令牌等任意敏感数据);
#--from-literal:直接通过键值对的形式定义 Secret 内容(也可通过 --from-file 从文件读取)。
[root@master ~]# kubectl get secret -n middleware
NAME TYPE DATA AGE
mysql-secret Opaque 2 25s
[root@master ~]# kubectl describe secret mysql-secret -n middleware
Name: mysql-secret
Namespace: middleware
Labels: <none>
Annotations: <none>
Type: Opaque
Data
====
replica-password: 11 bytes
root-password: 9 bytes
#Secret会对数据进行 Base64 编码(虽非加密,但可隔离敏感信息),且 K8s 支持 RBAC 权限控制谁能访问 Secret。
#若需修改密码,只需更新 Secret(kubectl edit secret mysql-secret -n middleware),重启依赖的 Pod 即可生效
2.创建存储MinIO访问凭证(用于备份)
bash
[root@master feng]# kubectl create secret generic minio-secret -n middleware \
> --from-literal=access-key=minioadmin \
> --from-literal=secret-key=minioadmin
secret/minio-secret created
3.配置ConfigMap
MySQL 主从配置(mysql-config)
bash
[root@master feng]# cat mysql-configmap.yaml
# mysql-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: mysql-config
namespace: middleware
data:
# 主库配置
master.cnf: |
[mysqld]
server-id=1
log_bin=mysql-bin
binlog_do_db=ecommerce # 需同步的业务数据库
binlog-ignore-db=mysql # 忽略系统数据库
binlog_expire_logs_seconds=86400 # 二进制日志保留1天
# 从库配置
slave.cnf: |
[mysqld]
server-id=2
log_bin=mysql-bin
relay_log=mysql-relay-bin
read_only=1 # 从库只读
replicate_do_db=ecommerce # 需同步的业务数据库
执行创建:
bash
[root@master feng]# kubectl apply -f mysql-configmap.yaml
configmap/mysql-config created
哨兵配置(mysql-sentinel-config)
bash
[root@master feng]# cat sentinel-configmap.yaml
# sentinel-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: mysql-sentinel-config
namespace: middleware
data:
sentinel.conf: |
port 26379
dir /tmp
# 监控主库(Headless Service域名),命名为mymaster,投票数2
sentinel monitor mymaster mysql-master.middleware.svc.cluster.local 3306 2
# 主库无响应30秒后触发故障转移
sentinel down-after-milliseconds mymaster 30000
# 故障转移超时时间
sentinel failover-timeout mymaster 180000
执行创建:
bash
[root@master feng]# kubectl apply -f sentinel-configmap.yaml
configmap/mysql-sentinel-config created
4.部署 Headless Service
主库 Service
bash
[root@master feng]# cat mysql-master-svc.yaml
# mysql-master-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: mysql-master
namespace: middleware
spec:
selector:
app: mysql-master
ports:
- port: 3306
targetPort: 3306
clusterIP: None # Headless Service(无集群IP,通过域名访问)
从库Service
bash
[root@master feng]# cat mysql-slave-svc.yaml
# mysql-slave-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: mysql-slave
namespace: middleware
spec:
selector:
app: mysql-slave
ports:
- port: 3306
targetPort: 3306
clusterIP: None
哨兵Service
bash
[root@master feng]# cat mysql-sentinel-svc.yaml
# mysql-sentinel-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: mysql-sentinel
namespace: middleware
spec:
selector:
app: mysql-sentinel
ports:
- port: 26379
targetPort: 26379
clusterIP: None
执行创建:
bash
[root@master feng]# kubectl apply -f mysql-master-svc.yaml -f mysql-slave-svc.yaml -f mysql-sentinel-svc.yaml
service/mysql-master created
service/mysql-slave created
service/mysql-sentinel created
5.部署主库(StatefulSet)
StatefulSet 是专为有状态应用 设计的控制器,用于管理具有固定身份、持久化存储和有序部署 / 扩展 / 更新的应用(如数据库、分布式集群、消息队列等)。相比无状态应用的 Deployment,它的核心优势在于对 "状态" 的精准管理。确保应用具备:
-
固定且唯一的网络标识(主机名、DNS 记录);
-
持久化存储的稳定关联(每个实例绑定独立的 PV/PVC);
-
有序的操作流程(部署、扩缩容、更新、删除按顺序执行)
| 特性 | StatefulSet | Deployment |
|---|---|---|
| 应用类型 | 有状态应用(需固定身份、存储) | 无状态应用(无固定身份,可替换) |
| Pod 名称 | 固定(名称-序号) |
随机(如 nginx-7f987d65c4-2xqzl) |
| 存储绑定 | 每个 Pod 独立 PVC | 所有 Pod 共享 PVC(或无持久化) |
| 操作顺序 | 严格有序(0→N 或 N→0) | 并行(无顺序) |
| 网络标识 | 固定 DNS 域名 | 依赖 Service 随机访问 |
bash
[root@master feng]# cat mysql-master-sts.yaml
# mysql-master-sts.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mysql-master
namespace: middleware
spec:
serviceName: mysql-master # 关联Headless Service
replicas: 1
selector:
matchLabels:
app: mysql-master
template:
metadata:
labels:
app: mysql-master
spec:
containers:
- name: mysql
image: mysql:8.0.36
ports:
- containerPort: 3306
env:
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: mysql-secret
key: root-password
- name: MYSQL_REPLICATION_USER
value: "replica"
- name: MYSQL_REPLICATION_PASSWORD
valueFrom:
secretKeyRef:
name: mysql-secret
key: replica-password
volumeMounts:
- name: mysql-data
mountPath: /var/lib/mysql
- name: mysql-config
mountPath: /etc/mysql/conf.d/master.cnf
subPath: master.cnf # 挂载主库配置文件
volumes:
- name: mysql-config
configMap:
name: mysql-config
volumeClaimTemplates: #为每个Pod自动创建PVC,确保每个 Pod 有独立的持久化存储;
- metadata:
name: mysql-data
spec:
accessModes: [ "ReadWriteOnce" ] #存储访问模式为单节点读写
storageClassName: "ceph-rbd" #为每个Pod自动创建PVC,确保每个 Pod 有独立的持久化存储;
resources:
requests:
storage: 20Gi
PV(PersistentVolume,持久化卷):是 Kubernetes 集群中预先配置的存储资源(比如一块磁盘、Ceph RBD 块存储等),属于集群级别的资源,独立于 Pod 存在,用于提供持久化存储能力。
PVC(PersistentVolumeClaim,持久化卷声明):是 Pod 对存储资源的 "申请",Pod 通过 PVC 向集群请求特定规格的存储(如大小、访问模式),Kubernetes 会自动将 PVC 绑定到匹配的 PV(或通过 StorageClass 动态创建 PV)。
简单说:PV 是 "存储资源",PVC 是 "资源申请单",Pod 通过 PVC 使用 PV 的存储,实现存储与应用解耦。
核心关系:
Pod /var/lib/mysql←→PVC(mysql-data-mysql-master-0)←→PV(ceph-rbd 动态创建)←→Ceph 集群 RBD 块设备Pod /etc/mysql/conf.d←→ConfigMap(mysql-config)←→Kubernetes Etcd 存储的配置数据
执行部署:
bash
[root@master feng]# kubectl apply -f mysql-master-sts.yaml
statefulset.apps/mysql-master created
6.部署从库
bash
[root@master feng]# cat mysql-slave-sts.yaml
# mysql-slave-sts.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mysql-slave
namespace: middleware
spec:
serviceName: mysql-slave
replicas: 1
selector:
matchLabels:
app: mysql-slave
template:
metadata:
labels:
app: mysql-slave
spec:
containers:
- name: mysql
image: mysql:8.0.36
ports:
- containerPort: 3306
env:
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: mysql-secret
key: root-password
- name: MYSQL_REPLICATION_USER
value: "replica"
- name: MYSQL_REPLICATION_PASSWORD
valueFrom:
secretKeyRef:
name: mysql-secret
key: replica-password
volumeMounts:
- name: mysql-data
mountPath: /var/lib/mysql
- name: mysql-config
mountPath: /etc/mysql/conf.d/slave.cnf
subPath: slave.cnf # 挂载从库配置文件
# 初始化主从同步(首次启动执行)
command:
- /bin/sh
- -c
- |
mysqld --initialize-insecure &
sleep 10
mysql -uroot -p$MYSQL_ROOT_PASSWORD -e "CHANGE MASTER TO
MASTER_HOST='mysql-master.middleware.svc.cluster.local',
MASTER_USER='$MYSQL_REPLICATION_USER',
MASTER_PASSWORD='$MYSQL_REPLICATION_PASSWORD',
MASTER_LOG_FILE=(mysql -h mysql-master -uroot -p$MYSQL_ROOT_PASSWORD -e 'SHOW MASTER STATUS\G' | grep 'File' | awk '{print $2}'),
MASTER_LOG_POS=(mysql -h mysql-master -uroot -p$MYSQL_ROOT_PASSWORD -e 'SHOW MASTER STATUS\G' | grep 'Position' | awk '{print $2}');
START SLAVE;"
exec mysqld
volumes:
- name: mysql-config
configMap:
name: mysql-config
volumeClaimTemplates:
- metadata:
name: mysql-data
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: "ceph-rbd"
resources:
requests:
storage: 20Gi
执行部署:
bash
[root@master feng]# kubectl apply -f mysql-slave-sts.yaml
statefulset.apps/mysql-slave created
7.部署哨兵
bash
[root@master feng]# cat mysql-sentinel-sys.yaml
# mysql-sentinel-sts.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mysql-sentinel
namespace: middleware
spec:
serviceName: mysql-sentinel
replicas: 3
selector:
matchLabels:
app: mysql-sentinel
template:
metadata:
labels:
app: mysql-sentinel
spec:
containers:
- name: sentinel
image: redis:7.0
ports:
- containerPort: 26379
command:
- "redis-sentinel"
- "/etc/sentinel/sentinel.conf"
volumeMounts:
- name: sentinel-config
mountPath: /etc/sentinel
volumes:
- name: sentinel-config
configMap:
name: mysql-sentinel-config
执行部署
bash
[root@master feng]# kubectl apply -f mysql-sentinel-sys.yaml
statefulset.apps/mysql-sentinel created
维护操作
数据备份
bash
# mysql-backup-cronjob.yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: mysql-backup
namespace: middleware
spec:
schedule: "0 2 * * *" # 每日凌晨2点执行
jobTemplate:
spec:
template:
spec:
containers:
- name: backup
image: mysql:8.0.36
command:
- "/bin/sh"
- "-c"
- |
# 安装MinIO客户端
wget https://dl.min.io/client/mc/release/linux-amd64/mc -O /usr/local/bin/mc && chmod +x /usr/local/bin/mc
# 配置MinIO连接
mc config host add minio http://minio-service.middleware.svc.cluster.local:9000 $MINIO_ACCESS_KEY $MINIO_SECRET_KEY
# 备份并上传
mysqldump -h mysql-master -uroot -p${ROOT_PASSWORD} --all-databases | mc cp - minio/mysql-backup/$(date +%Y%m%d).sql
env:
- name: ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: mysql-secret
key: root-password
- name: MINIO_ACCESS_KEY
valueFrom:
secretKeyRef:
name: minio-secret
key: access-key
- name: MINIO_SECRET_KEY
valueFrom:
secretKeyRef:
name: minio-secret
key: secret-key
restartPolicy: OnFailure
故障恢复
bash
# 1. 进入主库Pod
kubectl exec -it mysql-master-0 -n middleware -- bash
# 2. 安装MinIO客户端
wget https://dl.min.io/client/mc/release/linux-amd64/mc -O /usr/local/bin/mc && chmod +x /usr/local/bin/mc
# 3. 配置MinIO连接
mc config host add minio http://minio-service.middleware.svc.cluster.local:9000 minioadmin minioadmin
# 4. 下载备份文件
mc cp minio/mysql-backup/20240520.sql /tmp/
# 5. 恢复数据
mysql -uroot -pAdmin@123 < /tmp/20240520.sql