一、ShardingSphere读写分离核心原理
1.1 读写分离架构设计
ShardingSphere的读写分离采用透明化路由机制,自动将写操作(INSERT、UPDATE、DELETE)路由到主库,读操作(SELECT)路由到从库,实现读写负载分离。
1.2 核心特性
- 自动路由:基于SQL类型自动选择数据源
- 负载均衡:支持轮询、随机等从库负载策略
- 事务一致性:事务内的读操作默认路由到主库
- 强制路由:支持Hint强制指定数据源
- 主从延迟感知:可配置延迟容忍策略
1.3 适用场景
| 场景 | 读写比例 | 推荐配置 |
|---|---|---|
| 读多写少 | 8:2 | 一主多从(1主+2-3从) |
| 读写均衡 | 5:5 | 一主一从 |
| 写密集型 | 2:8 | 主库性能优化为主 |
二、SpringBoot集成配置实战
2.1 环境准备与依赖
Maven依赖:
xml
<!-- Spring Boot 3.x + ShardingSphere 5.5+ -->
<dependency>
<groupId>org.apache.shardingsphere</groupId>
<artifactId>shardingsphere-jdbc-core-spring-boot-starter</artifactId>
<version>5.5.2</version>
</dependency>
<!-- MySQL驱动 -->
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>8.0.33</version>
</dependency>
<!-- MyBatis Plus(可选) -->
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus-boot-starter</artifactId>
<version>3.5.6</version>
</dependency>
数据库准备:
sql
-- 主库(写库)
CREATE DATABASE medical_master;
USE medical_master;
CREATE TABLE patient_info (
id BIGINT PRIMARY KEY AUTO_INCREMENT,
patient_id VARCHAR(32) UNIQUE NOT NULL,
name VARCHAR(100) NOT NULL,
gender TINYINT COMMENT '1:男, 2:女',
age INT,
phone VARCHAR(20),
create_time DATETIME DEFAULT CURRENT_TIMESTAMP,
update_time DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
INDEX idx_patient_id (patient_id)
);
-- 从库1(读库)
CREATE DATABASE medical_slave1;
USE medical_slave1;
-- 创建相同结构的表(通过主从复制同步数据)
-- 从库2(读库)
CREATE DATABASE medical_slave2;
USE medical_slave2;
-- 创建相同结构的表
2.2 核心YAML配置
application.yml:
yaml
spring:
# 主配置
main:
allow-bean-definition-overriding: true
# ShardingSphere配置
shardingsphere:
# 数据源定义
datasource:
names: master, slave1, slave2
# 主库(写库)
master:
type: com.zaxxer.hikari.HikariDataSource
driver-class-name: com.mysql.cj.jdbc.Driver
jdbc-url: jdbc:mysql://192.168.1.100:3306/medical_master?useSSL=false&serverTimezone=Asia/Shanghai&characterEncoding=utf8
username: root
password: Master@123
connection-timeout: 30000
idle-timeout: 600000
max-lifetime: 1800000
maximum-pool-size: 20
minimum-idle: 5
pool-name: master-pool
# 从库1(读库)
slave1:
type: com.zaxxer.hikari.HikariDataSource
driver-class-name: com.mysql.cj.jdbc.Driver
jdbc-url: jdbc:mysql://192.168.1.101:3306/medical_slave1?useSSL=false&serverTimezone=Asia/Shanghai&characterEncoding=utf8
username: root
password: Slave1@123
connection-timeout: 30000
idle-timeout: 600000
max-lifetime: 1800000
maximum-pool-size: 15
minimum-idle: 3
pool-name: slave1-pool
# 从库2(读库)
slave2:
type: com.zaxxer.hikari.HikariDataSource
driver-class-name: com.mysql.cj.jdbc.Driver
jdbc-url: jdbc:mysql://192.168.1.102:3306/medical_slave2?useSSL=false&serverTimezone=Asia/Shanghai&characterEncoding=utf8
username: root
password: Slave2@123
connection-timeout: 30000
idle-timeout: 600000
max-lifetime: 1800000
maximum-pool-size: 15
minimum-idle: 3
pool-name: slave2-pool
# 读写分离规则
rules:
readwrite-splitting:
# 数据源组配置
data-sources:
# 逻辑数据源名称(业务代码中使用)
rw_ds:
# 静态配置(主从固定)
static-strategy:
write-data-source-name: master
read-data-source-names:
- slave1
- slave2
# 负载均衡策略
load-balancer-name: round_robin
# 负载均衡器定义
load-balancers:
# 轮询策略(默认)
round_robin:
type: ROUND_ROBIN
# 随机策略
random:
type: RANDOM
# 权重策略
weight:
type: WEIGHT
props:
slave1: 2
slave2: 1
# 属性配置
props:
sql-show: true # 显示SQL路由信息(开发环境)
sql-simple: true
check-table-metadata-enabled: false
# 读写分离高级配置
readwrite-splitting:
# 事务内读操作是否强制走主库(默认true)
transaction-read-query-strategy: PRIMARY
# 从库延迟容忍时间(毫秒)
replica-delay-milliseconds-threshold: 1000
# 是否启用从库健康检查
health-check-enabled: true
health-check-interval-milliseconds: 5000
2.3 动态读写分离配置(高级)
application-dynamic.yml:
yaml
spring:
shardingsphere:
rules:
readwrite-splitting:
data-sources:
dynamic_ds:
# 动态配置(支持主从切换)
dynamic-strategy:
auto-aware-data-source-name: readwrite_ds
load-balancer-name: round_robin
# 数据库发现规则(配合注册中心)
discovery:
data-sources:
readwrite_ds:
data-source-names: master,slave1,slave2
discovery-type: MySQL.MGR # 或 ZooKeeper、Nacos等
props:
group-name: medical-cluster
member-host: 192.168.1.100:3306,192.168.1.101:3306,192.168.1.102:3306
三、业务代码实战
3.1 实体类定义
typescript
@Data
@TableName("patient_info")
public class PatientInfo {
@TableId(type = IdType.AUTO)
private Long id;
private String patientId;
private String name;
private Integer gender;
private Integer age;
private String phone;
@TableField(fill = FieldFill.INSERT)
private Date createTime;
@TableField(fill = FieldFill.INSERT_UPDATE)
private Date updateTime;
}
@Data
@TableName("medical_record")
public class MedicalRecord {
@TableId(type = IdType.ASSIGN_ID)
private Long id;
private String recordNo;
private String patientId;
private String diagnosis;
private String treatment;
private String doctorId;
@TableField(fill = FieldFill.INSERT)
private Date createTime;
}
3.2 Mapper接口
less
@Mapper
public interface PatientMapper extends BaseMapper<PatientInfo> {
/**
* 根据患者ID查询(自动路由到从库)
*/
@Select("SELECT * FROM patient_info WHERE patient_id = #{patientId}")
PatientInfo selectByPatientId(@Param("patientId") String patientId);
/**
* 分页查询患者列表(自动路由到从库)
*/
@Select("SELECT * FROM patient_info ORDER BY create_time DESC")
List<PatientInfo> selectPageList(Page<PatientInfo> page);
/**
* 统计患者数量(自动路由到从库)
*/
@Select("SELECT COUNT(*) FROM patient_info")
Long countPatients();
}
@Mapper
public interface MedicalRecordMapper extends BaseMapper<MedicalRecord> {
/**
* 根据患者ID查询病历(自动路由到从库)
*/
@Select("SELECT * FROM medical_record WHERE patient_id = #{patientId} ORDER BY create_time DESC")
List<MedicalRecord> selectByPatientId(@Param("patientId") String patientId);
/**
* 批量插入病历(自动路由到主库)
*/
@Insert("<script>" +
"INSERT INTO medical_record (id, record_no, patient_id, diagnosis, treatment, doctor_id, create_time) " +
"VALUES " +
"<foreach collection='records' item='record' separator=','>" +
"(#{record.id}, #{record.recordNo}, #{record.patientId}, #{record.diagnosis}, #{record.treatment}, #{record.doctorId}, #{record.createTime})" +
"</foreach>" +
"</script>")
int batchInsert(@Param("records") List<MedicalRecord> records);
}
3.3 Service层实现
scss
@Service
@Slf4j
public class PatientService {
@Autowired
private PatientMapper patientMapper;
@Autowired
private MedicalRecordMapper recordMapper;
/**
* 创建患者(写操作,自动路由到主库)
*/
@Transactional
public Long createPatient(PatientInfo patient) {
// 生成患者ID
String patientId = "PAT" + System.currentTimeMillis();
patient.setPatientId(patientId);
// 插入操作自动路由到主库
patientMapper.insert(patient);
log.info("患者创建成功,ID: {},路由到主库", patient.getId());
return patient.getId();
}
/**
* 查询患者信息(读操作,自动路由到从库)
*/
public PatientInfo getPatientById(String patientId) {
// 查询操作自动路由到从库(轮询或随机)
PatientInfo patient = patientMapper.selectByPatientId(patientId);
if (patient != null) {
log.info("查询患者 {},路由到从库", patientId);
}
return patient;
}
/**
* 分页查询患者列表(读操作,自动路由到从库)
*/
public PageResult<PatientInfo> queryPatientPage(int pageNum, int pageSize) {
Page<PatientInfo> page = new Page<>(pageNum, pageSize);
patientMapper.selectPageList(page);
log.info("分页查询患者,第{}页,每页{}条,路由到从库", pageNum, pageSize);
return new PageResult<>(
page.getRecords(),
page.getTotal(),
page.getSize(),
page.getCurrent()
);
}
/**
* 更新患者信息(写操作,自动路由到主库)
*/
@Transactional
public boolean updatePatient(PatientInfo patient) {
int result = patientMapper.updateById(patient);
log.info("更新患者 {},影响{}行,路由到主库", patient.getPatientId(), result);
return result > 0;
}
/**
* 创建病历记录(写操作,自动路由到主库)
*/
@Transactional
public Long createMedicalRecord(MedicalRecord record) {
// 生成病历号
String recordNo = "REC" + System.currentTimeMillis();
record.setRecordNo(recordNo);
// 插入操作自动路由到主库
recordMapper.insert(record);
log.info("病历创建成功,ID: {},路由到主库", record.getId());
return record.getId();
}
/**
* 查询患者病历(读操作,自动路由到从库)
*/
public List<MedicalRecord> getPatientRecords(String patientId) {
List<MedicalRecord> records = recordMapper.selectByPatientId(patientId);
log.info("查询患者 {} 的病历,共{}条,路由到从库", patientId, records.size());
return records;
}
/**
* 批量操作测试
*/
@Transactional
public void batchOperationTest() {
// 批量插入(写操作,主库)
List<MedicalRecord> records = new ArrayList<>();
for (int i = 0; i < 100; i++) {
MedicalRecord record = new MedicalRecord();
record.setPatientId("PAT" + (i % 10));
record.setDiagnosis("诊断" + i);
record.setCreateTime(new Date());
records.add(record);
}
recordMapper.batchInsert(records);
// 立即查询(可能从库延迟,默认走主库)
List<MedicalRecord> queryResult = recordMapper.selectByPatientId("PAT1");
log.info("事务内查询结果数量: {}", queryResult.size());
}
}
3.4 强制路由控制(高级特性)
csharp
@Service
@Slf4j
public class MedicalRecordService {
@Autowired
private MedicalRecordMapper recordMapper;
/**
* 强制读主库(解决从库延迟问题)
* 场景:刚写入的数据需要立即读取
*/
public MedicalRecord getRecordForceMaster(Long recordId) {
try (HintManager hintManager = HintManager.getInstance()) {
// 设置强制路由到主库
hintManager.setWriteRouteOnly();
// 此查询会强制走主库
MedicalRecord record = recordMapper.selectById(recordId);
log.info("强制读主库查询记录: {}", recordId);
return record;
}
}
/**
* 强制指定从库(负载均衡测试)
*/
public MedicalRecord getRecordForceSlave(Long recordId, String slaveName) {
try (HintManager hintManager = HintManager.getInstance()) {
// 设置强制路由到指定从库
hintManager.setReadwriteSplittingHint(slaveName);
MedicalRecord record = recordMapper.selectById(recordId);
log.info("强制路由到从库 {} 查询记录: {}", slaveName, recordId);
return record;
}
}
/**
* 复杂事务场景:读写混合操作
*/
@Transactional
public MedicalRecord complexTransactionOperation(MedicalRecord record) {
// 1. 写入操作(自动路由到主库)
recordMapper.insert(record);
// 2. 事务内查询(默认走主库,避免脏读)
MedicalRecord freshRecord = recordMapper.selectById(record.getId());
log.info("事务内查询,确保数据一致性");
// 3. 更新操作(自动路由到主库)
freshRecord.setDiagnosis("更新后的诊断");
recordMapper.updateById(freshRecord);
return freshRecord;
}
}
3.5 配置类(可选)
typescript
@Configuration
public class ShardingSphereConfig {
/**
* 自定义负载均衡算法
*/
@Bean
public ReadQueryLoadBalanceAlgorithm customLoadBalancer() {
return new ReadQueryLoadBalanceAlgorithm() {
private final AtomicInteger counter = new AtomicInteger(0);
@Override
public String getDataSource(String name, String writeDataSourceName,
List<String> readDataSourceNames) {
// 自定义负载均衡逻辑
int index = counter.getAndIncrement() % readDataSourceNames.size();
return readDataSourceNames.get(index);
}
@Override
public String getType() {
return "CUSTOM_ROUND_ROBIN";
}
@Override
public Properties getProps() {
return new Properties();
}
@Override
public void init(Properties props) {
// 初始化
}
};
}
/**
* 事务管理器配置
*/
@Bean
public PlatformTransactionManager transactionManager(DataSource dataSource) {
return new DataSourceTransactionManager(dataSource);
}
}
四、测试验证
4.1 单元测试类
scss
@SpringBootTest
@Slf4j
class ReadWriteSplittingTest {
@Autowired
private PatientService patientService;
@Autowired
private MedicalRecordService recordService;
@Test
void testWriteOperation() {
// 测试写操作路由
PatientInfo patient = new PatientInfo();
patient.setName("张三");
patient.setGender(1);
patient.setAge(30);
Long patientId = patientService.createPatient(patient);
assertNotNull(patientId);
log.info("写操作测试通过,患者ID: {}", patientId);
}
@Test
void testReadOperation() {
// 测试读操作路由
PageResult<PatientInfo> pageResult = patientService.queryPatientPage(1, 10);
assertNotNull(pageResult);
assertTrue(pageResult.getTotal() >= 0);
log.info("读操作测试通过,查询到 {} 条记录", pageResult.getTotal());
}
@Test
void testTransactionConsistency() {
// 测试事务一致性
MedicalRecord record = new MedicalRecord();
record.setPatientId("PAT_TEST");
record.setDiagnosis("测试诊断");
MedicalRecord result = recordService.complexTransactionOperation(record);
assertNotNull(result);
assertEquals("更新后的诊断", result.getDiagnosis());
log.info("事务一致性测试通过");
}
@Test
void testForceMasterRead() {
// 测试强制读主库
MedicalRecord record = new MedicalRecord();
record.setPatientId("PAT_FORCE");
record.setDiagnosis("强制读主测试");
recordMapper.insert(record);
MedicalRecord freshRecord = recordService.getRecordForceMaster(record.getId());
assertNotNull(freshRecord);
log.info("强制读主库测试通过");
}
@Test
void testPerformance() {
// 性能测试:批量读写
long startTime = System.currentTimeMillis();
// 批量写入
for (int i = 0; i < 100; i++) {
PatientInfo patient = new PatientInfo();
patient.setName("性能测试用户" + i);
patientService.createPatient(patient);
}
// 批量读取
for (int i = 0; i < 100; i++) {
patientService.queryPatientPage(1, 10);
}
long endTime = System.currentTimeMillis();
log.info("性能测试完成,耗时: {} ms", endTime - startTime);
}
}
4.2 监控SQL路由
启动应用后,控制台会显示SQL路由信息:
sql
Logic SQL: SELECT * FROM patient_info WHERE patient_id = ?
Actual SQL: slave1 ::: SELECT * FROM patient_info WHERE patient_id = ?
Actual SQL: slave2 ::: SELECT * FROM patient_info WHERE patient_id = ?
Logic SQL: INSERT INTO patient_info (name, gender, age) VALUES (?, ?, ?)
Actual SQL: master ::: INSERT INTO patient_info (name, gender, age) VALUES (?, ?, ?)
五、与分库分表结合实战
5.1 读写分离 + 分库分表配置
yaml
spring:
shardingsphere:
rules:
# 分片规则
sharding:
tables:
medical_image:
actual-data-nodes: rw_ds.medical_image_$->{2024..2025}${(1..12).collect{t->t.toString().padLeft(2,'0')}}
table-strategy:
standard:
sharding-column: create_time
sharding-algorithm-name: table-interval
# 读写分离规则
readwrite-splitting:
data-sources:
rw_ds:
static-strategy:
write-data-source-name: master
read-data-source-names:
- slave1
- slave2
load-balancer-name: round_robin
5.2 复合场景业务代码
arduino
@Service
public class MedicalImageService {
/**
* 分片+读写分离场景
*/
@Transactional
public void saveImageWithSharding(MedicalImage image) {
// 1. 写入影像(自动路由到主库,并按时间分表)
imageMapper.insert(image);
// 2. 写入附件(绑定表,相同分片规则)
MedicalAttachment attachment = new MedicalAttachment();
attachment.setImageId(image.getId());
attachmentMapper.insert(attachment);
// 3. 立即查询(事务内,走主库)
MedicalImage freshImage = imageMapper.selectById(image.getId());
log.info("分片+读写分离操作完成,影像ID: {}", image.getId());
}
/**
* 跨分片查询+读写分离
*/
public List<MedicalImage> queryCrossShard(Date startTime, Date endTime) {
// 时间范围查询,涉及多个分片,从库负载均衡
return imageMapper.selectByTimeRange(startTime, endTime);
}
}
六、生产环境最佳实践
6.1 配置优化建议
yaml
spring:
shardingsphere:
props:
# 生产环境关闭SQL显示
sql-show: false
# 连接池优化
max-connections-size-per-query: 5
kernel-executor-size: ${CPU_CORES:16}
# 读写分离高级配置
readwrite-splitting:
transaction-read-query-strategy: PRIMARY
replica-delay-milliseconds-threshold: 2000
health-check-enabled: true
health-check-interval-milliseconds: 10000
6.2 监控与告警
markdown
# Prometheus监控指标
- shardingsphere_proxy_request_total
- shardingsphere_proxy_request_duration
- shardingsphere_proxy_execute_latency_milliseconds
- shardingsphere_proxy_connections
# 关键告警规则
- 从库延迟超过阈值
- 主从连接异常
- SQL执行时间过长
6.3 故障处理策略
- 主库故障:启用动态读写分离,自动切换到从库
- 从库延迟:关键查询强制走主库
- 连接池满:调整连接池参数,增加从库节点
- 数据不一致:启用分布式事务(Seata集成)
七、总结
通过SpringBoot + ShardingSphere实现读写分离,可以获得以下核心优势:
7.1 核心价值
- 性能提升:读操作负载分散到多个从库
- 高可用:主从架构提供故障转移能力
- 透明化:业务代码无需修改,配置即生效
- 灵活性:支持多种负载均衡策略
7.2 关键成功因素
- 合理分片键:读写分离与分库分表结合时,分片键选择至关重要
- 监控体系:建立完善的性能监控和告警机制
- 渐进实施:先单库读写分离,验证后再与分库分表结合
- 容错设计:考虑主从延迟、网络分区等异常场景
7.3 适用场景评估
| 场景 | 推荐方案 | 注意事项 |
|---|---|---|
| 纯读多写少 | 读写分离 | 主从延迟容忍度 |
| 大数据量+高并发 | 分库分表+读写分离 | 分片键设计、跨分片查询 |
| 强一致性要求 | 读写分离+强制读主 | 性能与一致性权衡 |
| 多数据中心 | 读写分离+异地多活 | 网络延迟、数据同步 |