MySQL主从复制与读写分离
一、主从复制概述
1. 什么是主从复制
MySQL主从复制是指将主数据库的DDL和DML操作通过二进制日志传输到从数据库,并在从库上重新执行,使从库的数据与主库保持同步。
text
主从复制模型:
主库(Master) ──┬── 从库(Slave1)
├── 从库(Slave2)
└── 从库(Slave3)
特点:
├── 数据冗余备份
├── 读写分离
├── 负载均衡
├── 高可用性
└── 数据实时同步
2. 主从复制原理
text
主从复制工作流程:
主库(Master) 从库(Slave)
│ │
│ 1. 写入数据 │
↓ │
┌──────────────┐ │
│ 二进制日志 │ │
│ (Binary Log) │ │
└──────────────┘ │
│ │
│ 2. 发送日志 │
│ ────────────────────────────→ │
│ ↓
│ ┌──────────────┐
│ │ I/O线程 │
│ │ 读取日志 │
│ └──────────────┘
│ │
│ ┌──────↓───────┐
│ │ 中继日志 │
│ │ (Relay Log) │
│ └──────────────┘
│ │
│ ┌──────↓───────┐
│ │ SQL线程 │
│ │ 执行日志 │
│ └──────────────┘
│ ↓
│ ┌──────────────┐
│ │ 从库数据 │
└────────────────────────→│ 同步完成 │
└──────────────┘
核心组件:
text
1. 主库:
└── Binlog Dump线程:将二进制日志发送给从库
2. 从库:
├── I/O线程:接收主库的二进制日志,写入中继日志
└── SQL线程:读取中继日志,在从库上执行SQL
二、主从复制环境准备
1. 环境规划
text
角色 IP地址 主机名 说明
─────────────────────────────────────────────────────────
Master 192.168.1.10 mysql-master 主库
Slave1 192.168.1.11 mysql-slave1 从库1
Slave2 192.168.1.12 mysql-slave2 从库2
操作系统:CentOS 7/8 或 Ubuntu 20.04/22.04
MySQL版本:5.7 或 8.0
2. 准备工作
bash
# 1. 关闭防火墙(或开放3306端口)
systemctl stop firewalld
systemctl disable firewalld
# 2. 关闭SELinux
setenforce 0
sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
# 3. 设置主机名
hostnamectl set-hostname mysql-master
# 从库
hostnamectl set-hostname mysql-slave1
hostnamectl set-hostname mysql-slave2
# 4. 配置hosts文件
cat >> /etc/hosts << 'EOF'
192.168.1.10 mysql-master
192.168.1.11 mysql-slave1
192.168.1.12 mysql-slave2
EOF
# 5. 确保各节点时间同步
yum install -y ntpdate
ntpdate pool.ntp.org
三、主库配置
1. 修改主库配置文件
bash
# CentOS/RHEL: /etc/my.cnf
# Ubuntu: /etc/mysql/my.cnf 或 /etc/mysql/mysql.conf.d/mysqld.cnf
vim /etc/my.cnf
# 添加以下配置
ini
[mysqld]
# 服务器ID(主从各节点必须唯一)
server-id = 1
# 开启二进制日志
log_bin = /var/lib/mysql/mysql-bin
log_bin_index = /var/lib/mysql/mysql-bin.index
# 二进制日志格式(推荐ROW)
binlog_format = ROW
# 二进制日志过期时间(天)
expire_logs_days = 7
# 最大二进制日志大小
max_binlog_size = 100M
# 同步的数据库(多个数据库重复写多行)
binlog_do_db = testdb
binlog_do_db = appdb
# 忽略的数据库(不同步)
binlog_ignore_db = mysql
binlog_ignore_db = information_schema
binlog_ignore_db = performance_schema
binlog_ignore_db = sys
# 安全设置
log_bin_trust_function_creators = 1
# 开启GTID(可选,MySQL 5.6+)
gtid_mode = ON
enforce_gtid_consistency = ON
# 使用ROW格式时的记录方式
binlog_row_image = full
2. 创建同步用户
sql
-- 登录MySQL
mysql -u root -p
-- 创建复制用户
CREATE USER 'repl'@'%' IDENTIFIED BY 'Repl@123456';
-- 授予复制权限
GRANT REPLICATION SLAVE ON *.* TO 'repl'@'%';
-- 可选:授予其他必要权限
GRANT REPLICATION CLIENT ON *.* TO 'repl'@'%';
-- 刷新权限
FLUSH PRIVILEGES;
-- 查看用户权限
SHOW GRANTS FOR 'repl'@'%';
3. 查看主库状态
sql
-- 查看主库状态
SHOW MASTER STATUS\G
-- 输出示例
*************************** 1. row ***************************
File: mysql-bin.000001
Position: 154
Binlog_Do_DB: testdb,appdb
Binlog_Ignore_DB: mysql,information_schema,performance_schema,sys
Executed_Gtid_Set:
-- 记录File和Position值,配置从库时需要
4. 备份主库数据
bash
# 1. 锁定所有表(不锁表方式)
mysqldump -u root -p --all-databases --master-data=2 --single-transaction > master_backup.sql
# 2. 或者锁定表(锁表方式)
mysql -u root -p -e "FLUSH TABLES WITH READ LOCK;"
mysqldump -u root -p --all-databases > master_backup.sql
mysql -u root -p -e "UNLOCK TABLES;"
# 3. 传输备份文件到从库
scp master_backup.sql root@192.168.1.11:/root/
scp master_backup.sql root@192.168.1.12:/root/
四、从库配置
1. 修改从库配置文件
bash
# 从库1配置 (192.168.1.11)
vim /etc/my.cnf
ini
[mysqld]
# 服务器ID(必须与主库不同)
server-id = 2
# 开启中继日志
relay_log = /var/lib/mysql/mysql-relay-bin
relay_log_index = /var/lib/mysql/mysql-relay-bin.index
# 开启二进制日志(可选,如果从库还需要作为其他从库的主库)
log_bin = /var/lib/mysql/mysql-bin
log_slave_updates = 1
# 只读模式(防止从库被写入)
read_only = 1
super_read_only = 1
# 复制过滤
replicate_do_db = testdb
replicate_do_db = appdb
replicate_ignore_db = mysql
replicate_ignore_db = information_schema
replicate_ignore_db = performance_schema
replicate_ignore_db = sys
# 跳过错误(可选,谨慎使用)
slave_skip_errors = 1062,1032
# 开启GTID(与主库一致)
gtid_mode = ON
enforce_gtid_consistency = ON
# 中继日志恢复
relay_log_recovery = 1
bash
# 从库2配置 (192.168.1.12)
# server-id = 3
# 其他配置同从库1
2. 导入主库备份
bash
# 1. 停止从库复制(如果有)
mysql -u root -p -e "STOP SLAVE;"
# 2. 导入备份
mysql -u root -p < /root/master_backup.sql
# 3. 查看GTID或二进制日志位置
# 如果使用GTID,备份中已包含GTID信息
3. 配置主从复制
sql
-- 在从库上执行(使用二进制日志位置方式)
-- 1. 停止从库
STOP SLAVE;
-- 2. 配置主库信息
CHANGE MASTER TO
MASTER_HOST = '192.168.1.10',
MASTER_PORT = 3306,
MASTER_USER = 'repl',
MASTER_PASSWORD = 'Repl@123456',
MASTER_LOG_FILE = 'mysql-bin.000001', -- 从SHOW MASTER STATUS获取
MASTER_LOG_POS = 154; -- 从SHOW MASTER STATUS获取
-- 3. 启动从库
START SLAVE;
-- 4. 查看从库状态
SHOW SLAVE STATUS\G
sql
-- 使用GTID方式配置(MySQL 5.6+)
-- 1. 停止从库
STOP SLAVE;
-- 2. 配置GTID复制
CHANGE MASTER TO
MASTER_HOST = '192.168.1.10',
MASTER_PORT = 3306,
MASTER_USER = 'repl',
MASTER_PASSWORD = 'Repl@123456',
MASTER_AUTO_POSITION = 1;
-- 3. 启动从库
START SLAVE;
-- 4. 查看从库状态
SHOW SLAVE STATUS\G
4. 验证复制状态
sql
-- 查看从库状态关键字段
SHOW SLAVE STATUS\G
-- 关键字段检查:
-- Slave_IO_Running: Yes # I/O线程状态
-- Slave_SQL_Running: Yes # SQL线程状态
-- Seconds_Behind_Master: 0 # 延迟时间
-- Last_IO_Error: # I/O错误信息
-- Last_SQL_Error: # SQL错误信息
-- 查看主从连接状态
SHOW PROCESSLIST;
-- 应该能看到"Waiting for master to send event"状态的线程
五、主从复制管理
1. 复制状态监控
sql
-- 1. 查看从库状态
SHOW SLAVE STATUS\G
-- 2. 查看主库状态
SHOW MASTER STATUS\G
SHOW BINARY LOGS;
-- 3. 查看复制线程
SHOW PROCESSLIST;
-- 主库应看到"Binlog Dump"线程
-- 从库应看到"Waiting for master to send event"和"Slave has read all relay log"
-- 4. 查看GTID状态(如果使用GTID)
SHOW VARIABLES LIKE 'gtid%';
SELECT @@GLOBAL.gtid_executed;
SELECT @@GLOBAL.gtid_purged;
-- 5. 查看复制延迟
SELECT
TIMESTAMPDIFF(SECOND,
(SELECT variable_value FROM performance_schema.global_status
WHERE variable_name='CURRENT_TIMESTAMP'),
(SELECT variable_value FROM performance_schema.global_status
WHERE variable_name='SLAVE_SQL_RUNNING')
) AS delay_seconds;
-- 6. 查看复制过滤规则
SHOW VARIABLES LIKE 'replicate%';
2. 常用管理命令
sql
-- 1. 启动/停止复制
START SLAVE;
STOP SLAVE;
START SLAVE IO_THREAD; -- 只启动I/O线程
STOP SLAVE SQL_THREAD; -- 只停止SQL线程
-- 2. 重置复制
RESET SLAVE; -- 删除从库复制信息
RESET SLAVE ALL; -- 完全重置从库
RESET MASTER; -- 删除主库所有二进制日志
-- 3. 跳过错误
-- 临时跳过错误(跳过1个错误)
STOP SLAVE;
SET GLOBAL SQL_SLAVE_SKIP_COUNTER = 1;
START SLAVE;
-- 4. 变更主库配置
STOP SLAVE;
CHANGE MASTER TO ...;
START SLAVE;
-- 5. 查看二进制日志
SHOW BINARY LOGS;
SHOW BINLOG EVENTS IN 'mysql-bin.000001' LIMIT 10;
-- 6. 清除二进制日志
PURGE BINARY LOGS BEFORE NOW();
PURGE BINARY LOGS TO 'mysql-bin.000010';
3. 监控脚本
bash
#!/bin/bash
# mysql_replication_monitor.sh - 主从复制监控脚本
# 配置
MASTER_HOST="192.168.1.10"
SLAVE_HOST="192.168.1.11"
MYSQL_USER="root"
MYSQL_PASS="your_password"
ALERT_EMAIL="admin@example.com"
# 颜色定义
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m'
# 检查从库状态
check_slave_status() {
local status=$(mysql -u$MYSQL_USER -p$MYSQL_PASS -e "SHOW SLAVE STATUS\G")
IO_RUNNING=$(echo "$status" | grep "Slave_IO_Running:" | awk '{print $2}')
SQL_RUNNING=$(echo "$status" | grep "Slave_SQL_Running:" | awk '{print $2}')
SECONDS_BEHIND=$(echo "$status" | grep "Seconds_Behind_Master:" | awk '{print $2}')
echo "=========================================="
echo "主从复制监控报告 - $(date)"
echo "=========================================="
# 检查I/O线程
if [ "$IO_RUNNING" = "Yes" ]; then
echo -e "${GREEN}✓ I/O线程: 正常${NC}"
else
echo -e "${RED}✗ I/O线程: 异常${NC}"
send_alert "I/O线程异常"
fi
# 检查SQL线程
if [ "$SQL_RUNNING" = "Yes" ]; then
echo -e "${GREEN}✓ SQL线程: 正常${NC}"
else
echo -e "${RED}✗ SQL线程: 异常${NC}"
send_alert "SQL线程异常"
fi
# 检查延迟
if [ "$SECONDS_BEHIND" = "NULL" ]; then
echo "延迟: N/A"
elif [ $SECONDS_BEHIND -eq 0 ]; then
echo -e "${GREEN}✓ 延迟: 0 秒${NC}"
elif [ $SECONDS_BEHIND -lt 10 ]; then
echo -e "${YELLOW}⚠ 延迟: ${SECONDS_BEHIND} 秒${NC}"
else
echo -e "${RED}✗ 延迟: ${SECONDS_BEHIND} 秒${NC}"
send_alert "复制延迟: ${SECONDS_BEHIND}秒"
fi
# 检查错误
LAST_IO_ERROR=$(echo "$status" | grep "Last_IO_Error:" | cut -d':' -f2-)
LAST_SQL_ERROR=$(echo "$status" | grep "Last_SQL_Error:" | cut -d':' -f2-)
if [ -n "$LAST_IO_ERROR" ] && [ "$LAST_IO_ERROR" != " " ]; then
echo -e "${RED}IO错误: $LAST_IO_ERROR${NC}"
fi
if [ -n "$LAST_SQL_ERROR" ] && [ "$LAST_SQL_ERROR" != " " ]; then
echo -e "${RED}SQL错误: $LAST_SQL_ERROR${NC}"
fi
}
# 发送告警
send_alert() {
local message="MySQL主从复制告警: $1"
echo "$message" | mail -s "MySQL Replication Alert" $ALERT_EMAIL
logger -t mysql_replication "$message"
}
# 检查主库
check_master_status() {
local master_status=$(mysql -u$MYSQL_USER -p$MYSQL_PASS -h$MASTER_HOST -e "SHOW MASTER STATUS\G")
local file=$(echo "$master_status" | grep "File:" | awk '{print $2}')
local position=$(echo "$master_status" | grep "Position:" | awk '{print $2}')
echo "主库状态:"
echo " 二进制日志文件: $file"
echo " 当前位置: $position"
# 检查二进制日志数量
local binlog_count=$(mysql -u$MYSQL_USER -p$MYSQL_PASS -h$MASTER_HOST -e "SHOW BINARY LOGS;" | wc -l)
if [ $binlog_count -gt 20 ]; then
echo -e "${YELLOW}⚠ 警告: 二进制日志文件过多 ($binlog_count)${NC}"
fi
}
# 主函数
main() {
check_slave_status
echo ""
check_master_status
}
main
4. 复制故障处理
sql
-- 1. 常见错误:Duplicate entry(重复键)
-- 跳过错误
STOP SLAVE;
SET GLOBAL SQL_SLAVE_SKIP_COUNTER = 1;
START SLAVE;
-- 2. 常见错误:数据不一致
-- 方法1:从主库重新同步该表
STOP SLAVE;
-- 从主库导出问题表
-- 导入到从库
START SLAVE;
-- 3. 常见错误:找不到二进制日志
-- 重新指定日志位置
STOP SLAVE;
CHANGE MASTER TO
MASTER_LOG_FILE='mysql-bin.000002',
MASTER_LOG_POS=4;
START SLAVE;
-- 4. 完全重建从库
STOP SLAVE;
RESET SLAVE ALL;
-- 重新导入主库备份
-- 重新配置主从
CHANGE MASTER TO ...;
START SLAVE;
六、读写分离
1. 什么是读写分离
读写分离是将数据库的读操作和写操作分离到不同的数据库服务器上,写操作在主库执行,读操作在从库执行。
text
读写分离架构:
客户端
│
↓
读写分离中间件
│
├── 写操作 ──→ 主库(Master)
│ │
│ ↓
│ 同步数据
│ │
└── 读操作 ──→ 从库(Slave1) ←── 从库(Slave2)
2. 应用层实现
2.1 PHP实现读写分离
php
<?php
// db_router.php - PHP数据库读写分离
class DatabaseRouter {
private $master;
private $slaves;
private $current_slave = 0;
public function __construct() {
// 主库配置
$this->master = [
'host' => '192.168.1.10',
'port' => 3306,
'user' => 'appuser',
'pass' => 'AppPass123',
'dbname' => 'appdb'
];
// 从库配置(可配置多个)
$this->slaves = [
[
'host' => '192.168.1.11',
'port' => 3306,
'user' => 'appuser',
'pass' => 'AppPass123',
'dbname' => 'appdb'
],
[
'host' => '192.168.1.12',
'port' => 3306,
'user' => 'appuser',
'pass' => 'AppPass123',
'dbname' => 'appdb'
]
];
}
// 获取写连接(主库)
public function getWriteConnection() {
return $this->getConnection($this->master);
}
// 获取读连接(从库,负载均衡)
public function getReadConnection() {
// 轮询选择从库
$slave = $this->slaves[$this->current_slave % count($this->slaves)];
$this->current_slave++;
return $this->getConnection($slave);
}
// 创建数据库连接
private function getConnection($config) {
try {
$dsn = "mysql:host={$config['host']};port={$config['port']};dbname={$config['dbname']};charset=utf8mb4";
$pdo = new PDO($dsn, $config['user'], $config['pass']);
$pdo->setAttribute(PDO::ATTR_ERRMODE, PDO::ERRMODE_EXCEPTION);
return $pdo;
} catch (PDOException $e) {
die("数据库连接失败: " . $e->getMessage());
}
}
}
// 使用示例
$db = new DatabaseRouter();
// 写操作(使用主库)
$writeConn = $db->getWriteConnection();
$writeConn->exec("INSERT INTO users (name, email) VALUES ('张三', 'zhang@example.com')");
// 读操作(使用从库)
$readConn = $db->getReadConnection();
$stmt = $readConn->query("SELECT * FROM users");
$users = $stmt->fetchAll(PDO::FETCH_ASSOC);
// 事务操作必须使用主库
$writeConn->beginTransaction();
$writeConn->exec("UPDATE users SET score = score + 10 WHERE id = 1");
$writeConn->exec("INSERT INTO user_logs (user_id, action) VALUES (1, 'add_score')");
$writeConn->commit();
?>
2.2 Python实现读写分离
python
# db_router.py - Python数据库读写分离
import mysql.connector
from mysql.connector import pooling
import random
class DatabaseRouter:
def __init__(self):
# 主库配置
self.master_config = {
'host': '192.168.1.10',
'port': 3306,
'user': 'appuser',
'password': 'AppPass123',
'database': 'appdb',
'pool_name': 'master_pool',
'pool_size': 5
}
# 从库配置
self.slave_configs = [
{
'host': '192.168.1.11',
'port': 3306,
'user': 'appuser',
'password': 'AppPass123',
'database': 'appdb',
'pool_name': 'slave1_pool',
'pool_size': 10
},
{
'host': '192.168.1.12',
'port': 3306,
'user': 'appuser',
'password': 'AppPass123',
'database': 'appdb',
'pool_name': 'slave2_pool',
'pool_size': 10
}
]
# 创建连接池
self.master_pool = self._create_pool(self.master_config)
self.slave_pools = [self._create_pool(cfg) for cfg in self.slave_configs]
def _create_pool(self, config):
"""创建连接池"""
return mysql.connector.pooling.MySQLConnectionPool(**config)
def get_write_connection(self):
"""获取写连接(主库)"""
return self.master_pool.get_connection()
def get_read_connection(self):
"""获取读连接(从库,随机负载均衡)"""
# 随机选择一个从库
pool = random.choice(self.slave_pools)
return pool.get_connection()
def execute_write(self, sql, params=None):
"""执行写操作"""
conn = self.get_write_connection()
try:
cursor = conn.cursor()
cursor.execute(sql, params or ())
conn.commit()
return cursor
except Exception as e:
conn.rollback()
raise e
finally:
cursor.close()
conn.close()
def execute_read(self, sql, params=None):
"""执行读操作"""
conn = self.get_read_connection()
try:
cursor = conn.cursor(dictionary=True)
cursor.execute(sql, params or ())
return cursor.fetchall()
finally:
cursor.close()
conn.close()
def execute_transaction(self, operations):
"""执行事务(必须使用主库)"""
conn = self.get_write_connection()
try:
conn.start_transaction()
cursor = conn.cursor()
for sql, params in operations:
cursor.execute(sql, params)
conn.commit()
return True
except Exception as e:
conn.rollback()
raise e
finally:
cursor.close()
conn.close()
# 使用示例
db = DatabaseRouter()
# 写操作
db.execute_write(
"INSERT INTO users (name, email) VALUES (%s, %s)",
('张三', 'zhang@example.com')
)
# 读操作
users = db.execute_read("SELECT * FROM users")
print(users)
# 事务操作
operations = [
("UPDATE users SET score = score + 10 WHERE id = %s", (1,)),
("INSERT INTO user_logs (user_id, action) VALUES (%s, %s)", (1, 'add_score'))
]
db.execute_transaction(operations)
3. 中间件实现
3.1 ProxySQL配置
bash
# 1. 安装ProxySQL
# CentOS
yum install -y https://github.com/sysown/proxysql/releases/download/v2.5.2/proxysql-2.5.2-1-centos7.x86_64.rpm
# Ubuntu
wget https://github.com/sysown/proxysql/releases/download/v2.5.2/proxysql_2.5.2-ubuntu20_amd64.deb
dpkg -i proxysql_2.5.2-ubuntu20_amd64.deb
# 2. 启动ProxySQL
systemctl start proxysql
systemctl enable proxysql
# 3. 登录管理控制台
mysql -u admin -padmin -h 127.0.0.1 -P 6032
# 4. 配置主从服务器
INSERT INTO mysql_servers (hostgroup_id, hostname, port, weight) VALUES
(1, '192.168.1.10', 3306, 100); # 主库(写组)
INSERT INTO mysql_servers (hostgroup_id, hostname, port, weight) VALUES
(2, '192.168.1.11', 3306, 50), # 从库1(读组)
(2, '192.168.1.12', 3306, 50); # 从库2(读组)
# 5. 配置用户
INSERT INTO mysql_users (username, password, default_hostgroup, transaction_persistent) VALUES
('appuser', 'AppPass123', 1, 1);
# 6. 配置查询路由规则
# 写操作路由到主机组1
INSERT INTO mysql_query_rules (rule_id, active, match_pattern, destination_hostgroup, apply) VALUES
(1, 1, '^INSERT', 1, 1),
(2, 1, '^UPDATE', 1, 1),
(3, 1, '^DELETE', 1, 1),
(4, 1, '^REPLACE', 1, 1),
(5, 1, '^CREATE', 1, 1),
(6, 1, '^ALTER', 1, 1),
(7, 1, '^DROP', 1, 1),
(8, 1, '^TRUNCATE', 1, 1);
# 读操作路由到主机组2
INSERT INTO mysql_query_rules (rule_id, active, match_pattern, destination_hostgroup, apply) VALUES
(100, 1, '^SELECT', 2, 1);
# 7. 加载配置
LOAD MYSQL SERVERS TO RUNTIME;
LOAD MYSQL USERS TO RUNTIME;
LOAD MYSQL QUERY RULES TO RUNTIME;
# 8. 保存配置
SAVE MYSQL SERVERS TO DISK;
SAVE MYSQL USERS TO DISK;
SAVE MYSQL QUERY RULES TO DISK;
# 9. 查看配置
SELECT * FROM mysql_servers;
SELECT * FROM mysql_users;
SELECT * FROM mysql_query_rules;
# 10. 客户端连接ProxySQL
mysql -u appuser -pAppPass123 -h 127.0.0.1 -P 6033
3.2 MaxScale配置
bash
# 1. 安装MaxScale
# CentOS
yum install -y https://downloads.mariadb.com/MaxScale/latest/centos/7/x86_64/maxscale-latest.centos.7.x86_64.rpm
# 2. 配置MaxScale
vim /etc/maxscale.cnf
ini
[maxscale]
threads=auto
log_info=1
# 服务器定义
[master]
type=server
address=192.168.1.10
port=3306
protocol=MariaDBBackend
[slave1]
type=server
address=192.168.1.11
port=3306
protocol=MariaDBBackend
[slave2]
type=server
address=192.168.1.12
port=3306
protocol=MariaDBBackend
# 监控模块
[MySQL Monitor]
type=monitor
module=mysqlmon
servers=master,slave1,slave2
user=maxscale
passwd=MaxScale@123
monitor_interval=1000
# 读写分离模块
[Read-Write Service]
type=service
router=readwritesplit
servers=master,slave1,slave2
user=maxscale
passwd=MaxScale@123
# 监听器
[Read-Write Listener]
type=listener
service=Read-Write Service
protocol=MySQLClient
port=3306
# 管理接口
[MaxScale Admin]
type=listener
service=CLI
protocol=maxscaled
socket=default
port=6603
bash
# 3. 创建监控用户
mysql -u root -p -h 192.168.1.10 -e "CREATE USER 'maxscale'@'%' IDENTIFIED BY 'MaxScale@123';"
mysql -u root -p -h 192.168.1.10 -e "GRANT SELECT ON mysql.* TO 'maxscale'@'%';"
mysql -u root -p -h 192.168.1.10 -e "GRANT SHOW DATABASES ON *.* TO 'maxscale'@'%';"
# 4. 启动MaxScale
systemctl start maxscale
systemctl enable maxscale
# 5. 连接MaxScale
mysql -u appuser -pAppPass123 -h 127.0.0.1 -P 3306
# 6. 管理接口
maxctrl -u admin -p admin list servers
maxctrl -u admin -p admin list services
4. 读写分离监控
bash
#!/bin/bash
# rw_split_monitor.sh - 读写分离监控脚本
# ProxySQL监控
monitor_proxysql() {
echo "=== ProxySQL状态 ==="
mysql -u admin -padmin -h 127.0.0.1 -P 6032 -e "SELECT * FROM stats_mysql_connection_pool;"
echo ""
mysql -u admin -padmin -h 127.0.0.1 -P 6032 -e "SELECT * FROM stats_mysql_commands_counters ORDER BY Total_cnt DESC LIMIT 10;"
}
# 应用层读写比例统计
monitor_app_read_write_ratio() {
echo "=== 应用读写比例 ==="
# 从应用日志统计
if [ -f /var/log/app/mysql.log ]; then
write_count=$(grep -c "\[WRITE\]" /var/log/app/mysql.log)
read_count=$(grep -c "\[READ\]" /var/log/app/mysql.log)
total=$((write_count + read_count))
if [ $total -gt 0 ]; then
write_ratio=$(echo "scale=2; $write_count * 100 / $total" | bc)
read_ratio=$(echo "scale=2; $read_count * 100 / $total" | bc)
echo "写操作比例: ${write_ratio}%"
echo "读操作比例: ${read_ratio}%"
fi
fi
}
# 从库负载监控
monitor_slave_load() {
echo "=== 从库负载 ==="
for slave in 192.168.1.11 192.168.1.12; do
connections=$(mysql -u appuser -pAppPass123 -h $slave -e "SHOW STATUS LIKE 'Threads_connected';" | grep Threads_connected | awk '{print $2}')
queries=$(mysql -u appuser -pAppPass123 -h $slave -e "SHOW STATUS LIKE 'Questions';" | grep Questions | awk '{print $2}')
echo "从库 $slave - 连接数: $connections, 总查询: $queries"
done
}
# 主函数
main() {
echo "读写分离监控报告 - $(date)"
echo "============================"
monitor_proxysql
echo ""
monitor_app_read_write_ratio
echo ""
monitor_slave_load
}
main
七、主从复制架构优化
1. 半同步复制
sql
-- 半同步复制:确保至少一个从库收到二进制日志后才返回成功
-- 1. 安装半同步插件
INSTALL PLUGIN rpl_semi_sync_master SONAME 'semisync_master.so';
INSTALL PLUGIN rpl_semi_sync_slave SONAME 'semisync_slave.so';
-- 2. 启用半同步(主库)
SET GLOBAL rpl_semi_sync_master_enabled = 1;
SET GLOBAL rpl_semi_sync_master_timeout = 10000; -- 10秒超时
-- 3. 启用半同步(从库)
SET GLOBAL rpl_semi_sync_slave_enabled = 1;
-- 4. 查看半同步状态
SHOW STATUS LIKE 'Rpl_semi_sync%';
-- 5. 永久配置
-- 在my.cnf中添加
rpl_semi_sync_master_enabled = 1
rpl_semi_sync_slave_enabled = 1
rpl_semi_sync_master_timeout = 10000
2. 并行复制
sql
-- MySQL 5.7+ 支持并行复制
-- 1. 查看并行复制配置
SHOW VARIAB
SHOW VARIABLES LIKE 'slave_parallel%';
-- 2. 设置并行复制参数
SET GLOBAL slave_parallel_workers = 4; -- 并行线程数
SET GLOBAL slave_parallel_type = 'LOGICAL_CLOCK'; -- 并行类型
-- 3. 永久配置
-- 在my.cnf中添加
slave_parallel_workers = 4
slave_parallel_type = LOGICAL_CLOCK
3. 复制压缩
sql
-- 启用二进制日志压缩(MySQL 8.0)
SET GLOBAL binlog_transaction_compression = ON;
-- 永久配置
[mysqld]
binlog_transaction_compression = ON
4. 监控复制延迟
sql
-- 实时监控复制延迟
SELECT
NOW() AS current_time,
TIMESTAMPDIFF(SECOND,
(SELECT variable_value FROM performance_schema.global_status
WHERE variable_name='CURRENT_TIMESTAMP'),
(SELECT variable_value FROM performance_schema.global_status
WHERE variable_name='SLAVE_SQL_RUNNING')
) AS delay_seconds;
-- 监控复制心跳
SHOW STATUS LIKE 'Slave_heartbeat_period';
SHOW STATUS LIKE 'Slave_received_heartbeats';
八、故障切换与高可用
1. MHA架构
bash
# MHA (Master High Availability) 配置
# 1. 安装MHA
yum install -y perl-DBD-MySQL perl-Config-Tiny perl-Log-Dispatch perl-Parallel-ForkManager
wget https://github.com/yoshinorim/mha4mysql-manager/releases/download/v0.58/mha4mysql-manager-0.58.tar.gz
wget https://github.com/yoshinorim/mha4mysql-node/releases/download/v0.58/mha4mysql-node-0.58.tar.gz
# 2. 配置MHA
vim /etc/masterha/app1.cnf
[server default]
user=mha
password=Mha@123
manager_workdir=/var/log/masterha/app1
manager_log=/var/log/masterha/app1/manager.log
remote_workdir=/var/log/masterha/app1
ssh_user=root
[server1]
hostname=192.168.1.10
port=3306
master_binlog_dir=/var/lib/mysql
candidate_master=1
[server2]
hostname=192.168.1.11
port=3306
master_binlog_dir=/var/lib/mysql
candidate_master=1
[server3]
hostname=192.168.1.12
port=3306
master_binlog_dir=/var/lib/mysql
no_master=1
# 3. 检查SSH配置
masterha_check_ssh --conf=/etc/masterha/app1.cnf
# 4. 检查复制状态
masterha_check_repl --conf=/etc/masterha/app1.cnf
# 5. 启动MHA管理器
nohup masterha_manager --conf=/etc/masterha/app1.cnf > /var/log/masterha/manager.log 2>&1 &
2. Orchestrator
bash
# Orchestrator - MySQL高可用管理工具
# 1. 安装Orchestrator
wget https://github.com/openark/orchestrator/releases/download/v3.2.6/orchestrator-3.2.6-1.x86_64.rpm
yum install -y orchestrator-3.2.6-1.x86_64.rpm
# 2. 配置Orchestrator
vim /etc/orchestrator.conf.json
{
"MySQLTopologyUser": "orchestrator",
"MySQLTopologyPassword": "orc@123",
"MySQLOrchestratorHost": "127.0.0.1",
"MySQLOrchestratorPort": 3306,
"DiscoveryPollSeconds": 60,
"RecoveryPollSeconds": 10,
"ActiveNodeExpireSeconds": 5,
"HostnameResolveMethod": "none",
"SkipBinlogServerUnresolveCheck": true,
"ReasonableReplicationLagSeconds": 10,
"FailureDetectionPeriodSeconds": 5,
"RecoveryPeriodBlockSeconds": 3600,
"DetectClusterAliasQuery": "SELECT IFNULL(@@hostname, '')",
"DetectClusterDomainQuery": "",
"DataCenterPattern": "[.]([^.]+)[.][^.]+[.]mydomain[.]com",
"PhysicalEnvironmentPattern": "[.]([^.]+[.][^.]+)[.]mydomain[.]com",
"RaftEnabled": false,
"RaftDataDir": "/var/lib/orchestrator",
"RaftBind": "127.0.0.1",
"RaftAdvertise": "127.0.0.1",
"RaftNodes": ["127.0.0.1"],
"HTTPPort": 3000,
"AuthUser": "admin",
"AuthPassword": "admin",
"BackendDB": "sqlite",
"SQLite3DataFile": "/var/lib/orchestrator/orchestrator.db"
}
# 3. 启动服务
systemctl start orchestrator
systemctl enable orchestrator
# 4. 访问Web界面
http://192.168.1.10:3000