【Sharding-JDBC】Spring/Spring Boot 集成 Sharding-JDBC,分表策略与 API、YAML 配置实践

文章目录

更多相关内容可查看

需求:按月分区,按年分表,找不到对应年份表走备份表,动态从linux获取账户密码用于创建数据库链接

备份表:table

已经提前建表语句创建好未来年份的表:table_2026,table_2027

环境准备

maven
此依赖会引入很多有漏洞的包,如果有漏洞扫描的情况下,请替换相关包的其他版本或加白名单

java 复制代码
 <dependency>
            <groupId>org.apache.shardingsphere</groupId>
            <artifactId>shardingsphere-jdbc-core</artifactId>
            <version>${sharding-jdbc.version}</version>
 </dependency>

Spring框架

Sharding-JDBC 4.x版本api实现

注意:4.x版本,在用api实现,如果想要非分片表走相同的数据源的情况下
需要用setDefaultdatasource赋默认数据源,在5.x版本已经移除了该方法,所以仅支持低版本实现,如果不加的话shading会无法路由其他表,会报表不存在的情况

datasource

java 复制代码
    @Bean(name = "dataSource")
    public DataSource shardingDataSource() throws SQLException, HKEException {
        DataSource dataSource = YamlShardingSphereDataSourceFactory.createDataSource();
        // 1. 配置数据源
        Map<String, DataSource> dataSourceMap = new HashMap<>();
        dataSourceMap.put("ds", dataSource());

        // 2. 配置分片规则
        ShardingRuleConfiguration shardingRuleConfig = new ShardingRuleConfiguration();

//        shardingRuleConfig.setDefaultDatabaseShardingStrategy(new NoneShardingStrategyConfiguration());

        // 配置table表的分片规则
        ShardingTableRuleConfiguration signInfoRule = new ShardingTableRuleConfiguration(
                "table",
                "ds.table_2025"  // 包含备份表和未来几年的表
        );
        signInfoRule.setTableShardingStrategy(
                new StandardShardingStrategyConfiguration(
                        "operate_time",  // 分片字段
                        "year-sharding-algorithm"  // 自定义分片算法
                )
        );
//        signInfoRule.setKeyGenerateStrategy(
//                new KeyGenerateStrategyConfiguration("row_id", "snowflake")
//        );

        // 添加其他表的规则(tablexxx)

        // 添加表规则到分片配置
        shardingRuleConfig.getTables().add(signInfoRule);

        // 3. 配置分片算法
        shardingRuleConfig.getShardingAlgorithms().put(
                "year-sharding-algorithm",
                new AlgorithmConfiguration("CLASS_BASED", new Properties() {
                    {
                        setProperty("strategy", "STANDARD");
                        setProperty("algorithmClassName", "com.package.YearShardingAlgorithm");
                    }
                })
        );

        // 4. 配置主键生成器
        shardingRuleConfig.getKeyGenerators().put(
                "snowflake",
                new AlgorithmConfiguration("SNOWFLAKE", new Properties())
        );

        // 5. 创建ShardingSphere数据源
        Properties props = new Properties();
        props.setProperty("sql-show", "true");  // 显示SQL日志

        return ShardingSphereDataSourceFactory.createDataSource(
                dataSourceMap,
                Collections.singleton(shardingRuleConfig),
                props
        );
    }

分表算法实现

注意:implements 哪个类取决于你需要用那个对应的算法,有stand,complex等

java 复制代码
package cfca.hke.privatization.sharding;


import cfca.hke.privatization.common.logging.LoggerManager;
import org.apache.shardingsphere.sharding.api.sharding.standard.PreciseShardingValue;
import org.apache.shardingsphere.sharding.api.sharding.standard.RangeShardingValue;
import org.apache.shardingsphere.sharding.api.sharding.standard.StandardShardingAlgorithm;

import java.time.LocalDateTime;
import java.util.Collection;
import java.util.Collections;

public class YearShardingAlgorithm implements StandardShardingAlgorithm<LocalDateTime> {


    @Override
    public String doSharding(Collection<String> availableTargetNames, PreciseShardingValue<LocalDateTime> shardingValue) {
        // 获取逻辑表名
        String logicTableName = shardingValue.getLogicTableName();

        Object value = shardingValue.getValue();
        String year = value.toString();
        //年份格式会有多种 所以直接截取这里
        if (year.length() > 4) {
            year = year.substring(0, 4);
        }
        //25年的存到备份表logicTableName
        if (!"2025".equals(year))
        {
            logicTableName = logicTableName + "_" + year;
        }

        // 检查表是否存在
        if (availableTargetNames.contains(logicTableName)) {
            return logicTableName;
        } else {
            LoggerManager.exceptionLogger.error("The table does not exist,please check HKEPrivateConfig/sharding.yaml, tableName: {}", logicTableName);
            return logicTableName;
        }
    }

    @Override
    public Collection<String> doSharding(Collection<String> collection, RangeShardingValue<LocalDateTime> rangeShardingValue) {
        return Collections.emptyList();
    }
}

Sharding-JDBC 5.4.x版本yaml实现

此版本有漏洞,当你配置不分表的表时候,配置的表明会区分大小写,此漏洞在5.4.1版本已经兼容了,所以可以尽量用最新版

注意:ds.*不会影响你加分片规则的表,优先级是如果有分片规则则认为是需分片的表,这里配单表不配ds.*的时候,必须升级到5.4.1版本,或未来出现新版之后,尽量使用高版本

yaml 复制代码
mode:
#设置为​​单机模式​​。此模式下,配置信息存储在本地,适用于开发测试或小型应用,无需额外的协调服务(如 ZooKeeper)
  type: Standalone
  repository:
  #指定使用内嵌的 JDBC 数据库来存储 ShardingSphere 的元数据(如逻辑库、表结构等)
    type: JDBC
# 数据库标识 随便写
databaseName: zhongxin
# 数据库配置,连接池配置
dataSources:
  ds:
    dataSourceClassName: com.zaxxer.hikari.HikariDataSource
    driverClassName: com.mysql.cj.jdbc.Driver
    url: jdbc:mysql://192.168.xx.xx:3306/zhongxin_new?rewriteBatchedStatements=true&allowMultiQueries=true&useLocalSessionState=true&useUnicode=true&characterEncoding=utf-8&socketTimeout=3000000&connectTimeout=60000
    username: root
    password:
    connectionTimeout: 50000
    minimumIdle: 5
    maximumPoolSize: 10
    idleTimeout: 600000
    maxLifetime: 1800000
    connectionTestQuery: SELECT 1
# 5.x版本的配法
rules:
#分片规则标签
- !SHARDING
#需要进行分片的​​逻辑表​​及其规则
  tables:
    table:
      #分片的表名
      actualDataNodes: ds.table,ds.table_$->{2026..2028}
      tableStrategy:
        standard:
          #分片字段
          shardingColumn: operate_time
          #分片算法
          shardingAlgorithmName: infoAlgorithm
    table1:
      actualDataNodes: ds.table1,ds.table1_$->{2026..2028}
      tableStrategy:
        standard:
          shardingColumn: operate_time
          shardingAlgorithmName: infoAlgorithm
    table2:
      actualDataNodes: ds.table2,ds.table2_$->{2026..2028}
      tableStrategy:
        standard:
          shardingColumn: operate_time
          shardingAlgorithmName: infoAlgorithm
  shardingAlgorithms:
    infoAlgorithm:
      #分片算法类型为​​基于自定义类
      type: CLASS_BASED
      props:
        #指定自定义算法实现的​​策略类型​​为 STANDARD(标准)
        strategy: STANDARD
        #自定义分片算法的完整类名
        algorithmClassName: com.package.YearShardingAlgorithm
  keyGenerators:
    snowflake:
      type: SNOWFLAKE
# 不分区的表
- !SINGLE
  tables:
    - ds.*

props:
#​​是否在日志中打印 SQL​​
  sql-show: true

动态修改账号密码
注意:这里要用sharding提供的解析yaml的方法去解析,如果你是低版本可以自己去写流读取,高版本在读取sharding特殊的配置会报错,在解析完后也要用sharding提供的方法转回string或者bytes

java 复制代码
    @Bean(name = "dataSource")
    public DataSource shardingDataSource() throws Exception {
        //获取用户名密码
        String username = credentials[0];
        String password = credentials[1];

        File yamlFile = new File("./xxx/sharding.yaml");
        YamlJDBCConfiguration rootConfig = YamlEngine.unmarshal(yamlFile, YamlJDBCConfiguration.class);
        Map<String, Object> dsMap = rootConfig.getDataSources().get("ds");
        dsMap.put("username",username);
        dsMap.put("password",password);
        String marshal = YamlEngine.marshal(rootConfig);
        DataSource dataSource = YamlShardingSphereDataSourceFactory.createDataSource(marshal.getBytes());
        printDBInfo(username,password);
        return dataSource;
    }

分表算法实现

注意:implements 哪个类取决于你需要用那个对应的算法,有stand,complex等

java 复制代码
package cfca.hke.privatization.sharding;


import cfca.hke.privatization.common.logging.LoggerManager;
import org.apache.shardingsphere.sharding.api.sharding.standard.PreciseShardingValue;
import org.apache.shardingsphere.sharding.api.sharding.standard.RangeShardingValue;
import org.apache.shardingsphere.sharding.api.sharding.standard.StandardShardingAlgorithm;

import java.time.LocalDateTime;
import java.util.Collection;
import java.util.Collections;

public class YearShardingAlgorithm implements StandardShardingAlgorithm<LocalDateTime> {


    @Override
    public String doSharding(Collection<String> availableTargetNames, PreciseShardingValue<LocalDateTime> shardingValue) {
        // 获取逻辑表名
        String logicTableName = shardingValue.getLogicTableName();

        Object value = shardingValue.getValue();
        String year = value.toString();
        //年份格式会有多种 所以直接截取这里
        if (year.length() > 4) {
            year = year.substring(0, 4);
        }
        //25年的存到备份表logicTableName
        if (!"2025".equals(year))
        {
            logicTableName = logicTableName + "_" + year;
        }

        // 检查表是否存在
        if (availableTargetNames.contains(logicTableName)) {
            return logicTableName;
        } else {
            LoggerManager.exceptionLogger.error("The table does not exist,please check HKEPrivateConfig/sharding.yaml, tableName: {}", logicTableName);
            return logicTableName;
        }
    }

    @Override
    public Collection<String> doSharding(Collection<String> collection, RangeShardingValue<LocalDateTime> rangeShardingValue) {
        return Collections.emptyList();
    }
}

Springboot框架

Sharding-JDBC 5.4.x版本yaml实现

在springboot的application.yml文件里面,直接加入分片的配置即可,springboot会自动解析该yaml文件不需要手动读取

yaml 复制代码
mode:
  type: Standalone
  repository:
    type: JDBC
# 数据库标识 随便写
databaseName: zhongxin
# 数据库配置,连接池配置
dataSources:
  ds:
    dataSourceClassName: com.zaxxer.hikari.HikariDataSource
    driverClassName: com.mysql.cj.jdbc.Driver
    url: jdbc:mysql://192.168.xx.xx:3306/zhongxin_new?rewriteBatchedStatements=true&allowMultiQueries=true&useLocalSessionState=true&useUnicode=true&characterEncoding=utf-8&socketTimeout=3000000&connectTimeout=60000
    username: root
    password:
    connectionTimeout: 50000
    minimumIdle: 5
    maximumPoolSize: 10
    idleTimeout: 600000
    maxLifetime: 1800000
    connectionTestQuery: SELECT 1
# 5.x版本的配法
rules:
- !SHARDING
  tables:
    table:
      actualDataNodes: ds.table,ds.table_$->{2026..2028}
      tableStrategy:
        standard:
          shardingColumn: operate_time
          shardingAlgorithmName: infoAlgorithm
    table1:
      actualDataNodes: ds.table1,ds.table1_$->{2026..2028}
      tableStrategy:
        standard:
          shardingColumn: operate_time
          shardingAlgorithmName: infoAlgorithm
    table2:
      actualDataNodes: ds.table2,ds.table2_$->{2026..2028}
      tableStrategy:
        standard:
          shardingColumn: operate_time
          shardingAlgorithmName: infoAlgorithm
  shardingAlgorithms:
    infoAlgorithm:
      type: CLASS_BASED
      props:
        strategy: STANDARD
        algorithmClassName: com.package.YearShardingAlgorithm
  keyGenerators:
    snowflake:
      type: SNOWFLAKE
# 不分区的表
- !SINGLE
  tables:
    - ds.*

props:
  sql-show: true

分库、加密、读写分离基于yaml的配置示例

yaml 复制代码
mode:
  type: Standalone
  repository:
    type: JDBC
databaseName: demo_db
dataSources:
  ds_basic:
    dataSourceClassName: com.alibaba.druid.pool.DruidDataSource
    driverClassName: com.mysql.jdbc.Driver
    url: jdbc:mysql://127.0.0.1:3306/demo_basic?characterEncoding=utf-8&allowPublicKeyRetrieval=true&useSSL=false
    username: root
    password: Ph9ep971fm14nYaZsLl9LY+MCqX9uJSozYRNgP2VVSj/hbmokn5OC6kpiAA1I0okA9GiDHEo7qHUvRQYYUNZvQ==
    initialSize: 1
    minIdle: 1
    maxActive: 64
    maxWait: 20000
    validationQuery: SELECT 1 FROM DUAL
    validationQueryTimeout: 30000
    minEvictableIdleTimeMillis: 300000
    maxEvictableIdleTimeMillis: 600000
    timeBetweenEvictionRunsMillis: 300000
    testOnBorrow: true
    testWhileIdle: true
    filters: config, stat, wall
    connectProperties:
      connectTimeout: 5000
      socketTimeout: '20000'
      config.decrypt: 'true'
      config.decrypt.key: MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBALZRYgsnvVKPqZTfMOWmmj6OuupFRSk7+Vtqv70cG3y6T3bm+DcQU3zOC993ozbHpmqeODtuLzURhIuXDMyTKW8CAwEAAQ==


  ds0000:
    dataSourceClassName: com.alibaba.druid.pool.DruidDataSource
    driverClassName: com.mysql.jdbc.Driver
    url: jdbc:mysql://127.0.0.1:3306/demo_0000?characterEncoding=utf-8&allowPublicKeyRetrieval=true&useSSL=false
    username: root
    password: Ph9ep971fm14nYaZsLl9LY+MCqX9uJSozYRNgP2VVSj/hbmokn5OC6kpiAA1I0okA9GiDHEo7qHUvRQYYUNZvQ==
    initialSize: 1
    minIdle: 1
    maxActive: 64
    maxWait: 20000
    validationQuery: SELECT 1 FROM DUAL
    validationQueryTimeout: 30000
    minEvictableIdleTimeMillis: 300000
    maxEvictableIdleTimeMillis: 600000
    timeBetweenEvictionRunsMillis: 300000
    testOnBorrow: true
    testWhileIdle: true
    filters: config, stat, wall
    connectProperties:
      connectTimeout: 5000
      socketTimeout: '20000'
      config.decrypt: 'true'
      config.decrypt.key: MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBALZRYgsnvVKPqZTfMOWmmj6OuupFRSk7+Vtqv70cG3y6T3bm+DcQU3zOC993ozbHpmqeODtuLzURhIuXDMyTKW8CAwEAAQ==


  ds0001:
    dataSourceClassName: com.alibaba.druid.pool.DruidDataSource
    driverClassName: com.mysql.jdbc.Driver
    url: jdbc:mysql://127.0.0.1:3306/demo_0001?characterEncoding=utf-8&allowPublicKeyRetrieval=true&useSSL=false
    username: root
    password: Ph9ep971fm14nYaZsLl9LY+MCqX9uJSozYRNgP2VVSj/hbmokn5OC6kpiAA1I0okA9GiDHEo7qHUvRQYYUNZvQ==
    initialSize: 1
    minIdle: 1
    maxActive: 64
    maxWait: 20000
    validationQuery: SELECT 1 FROM DUAL
    validationQueryTimeout: 30000
    minEvictableIdleTimeMillis: 300000
    maxEvictableIdleTimeMillis: 600000
    timeBetweenEvictionRunsMillis: 300000
    testOnBorrow: true
    testWhileIdle: true
    filters: config, stat, wall
    connectProperties:
      connectTimeout: 5000
      socketTimeout: '20000'
      config.decrypt: 'true'
      config.decrypt.key: MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBALZRYgsnvVKPqZTfMOWmmj6OuupFRSk7+Vtqv70cG3y6T3bm+DcQU3zOC993ozbHpmqeODtuLzURhIuXDMyTKW8CAwEAAQ==

  ds0000_slave:
    dataSourceClassName: com.alibaba.druid.pool.DruidDataSource
    driverClassName: com.mysql.jdbc.Driver
    url: jdbc:mysql://192.168.1.88:3306/demo_0000?characterEncoding=utf-8&allowPublicKeyRetrieval=true&useSSL=false
    username: root
    password: Ph9ep971fm14nYaZsLl9LY+MCqX9uJSozYRNgP2VVSj/hbmokn5OC6kpiAA1I0okA9GiDHEo7qHUvRQYYUNZvQ==
    initialSize: 1
    minIdle: 1
    maxActive: 64
    maxWait: 20000
    validationQuery: SELECT 1 FROM DUAL
    validationQueryTimeout: 30000
    minEvictableIdleTimeMillis: 300000
    maxEvictableIdleTimeMillis: 600000
    timeBetweenEvictionRunsMillis: 300000
    testOnBorrow: true
    testWhileIdle: true
    filters: config, stat, wall
    connectProperties:
      connectTimeout: 5000
      socketTimeout: '20000'
      config.decrypt: 'true'
      config.decrypt.key: MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBALZRYgsnvVKPqZTfMOWmmj6OuupFRSk7+Vtqv70cG3y6T3bm+DcQU3zOC993ozbHpmqeODtuLzURhIuXDMyTKW8CAwEAAQ==
  
  ds0001_slave:
    dataSourceClassName: com.alibaba.druid.pool.DruidDataSource
    driverClassName: com.mysql.jdbc.Driver
    url: jdbc:mysql://192.168.1.88:3306/demo_0001?characterEncoding=utf-8&allowPublicKeyRetrieval=true&useSSL=false
    username: root
    password: Ph9ep971fm14nYaZsLl9LY+MCqX9uJSozYRNgP2VVSj/hbmokn5OC6kpiAA1I0okA9GiDHEo7qHUvRQYYUNZvQ==
    initialSize: 1
    minIdle: 1
    maxActive: 64
    maxWait: 20000
    validationQuery: SELECT 1 FROM DUAL
    validationQueryTimeout: 30000
    minEvictableIdleTimeMillis: 300000
    maxEvictableIdleTimeMillis: 600000
    timeBetweenEvictionRunsMillis: 300000
    testOnBorrow: true
    testWhileIdle: true
    filters: config, stat, wall
    connectProperties:
      connectTimeout: 5000
      socketTimeout: '20000'
      config.decrypt: 'true'
      config.decrypt.key: MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBALZRYgsnvVKPqZTfMOWmmj6OuupFRSk7+Vtqv70cG3y6T3bm+DcQU3zOC993ozbHpmqeODtuLzURhIuXDMyTKW8CAwEAAQ==


rules:
# 数据分片
- !SHARDING
  tables:
    t_claim_case_mdtrt:
      actualDataNodes: ds$->{['0000','0001']}.t_claim_case_mdtrt_000$->{0..9}
      tableStrategy:
        standard:
          shardingColumn: transaction_no
          shardingAlgorithmName: t_claim_case_mdtrt_inline
      keyGenerateStrategy:
        column: id
        keyGeneratorName: snowflake
    t_claim_case_info:
      actualDataNodes: ds$->{['0000','0001']}.t_claim_case_info_000$->{0..9}
      tableStrategy:
        standard:
          shardingColumn: transaction_no
          shardingAlgorithmName: t_claim_case_info_inline
      keyGenerateStrategy:
        column: id
        keyGeneratorName: snowflake
   
  defaultShardingColumn: transaction_no
  bindingTables:
    - t_claim_case_mdtrt, t_claim_case_info
  defaultDatabaseStrategy:
    standard:
      shardingColumn: transaction_no
      shardingAlgorithmName: database_inline
  defaultTableStrategy:
    none:
  shardingAlgorithms:
    database_inline:
      type: INLINE
      props:
        algorithm-expression: ds$->{transaction_no[-8..-5]}
    t_claim_case_mdtrt_inline:
      type: INLINE
      props:
        algorithm-expression: t_claim_case_mdtrt_$->{transaction_no[-4..-1]}
    t_claim_case_info_inline:
      type: INLINE
      props:
        algorithm-expression: t_claim_case_info_$->{transaction_no[-4..-1]}
  keyGenerators:
    snowflake:
      type: SNOWFLAKE

#数据加密
- !ENCRYPT
  tables:
    t_claim_case_info:
      columns:
        appl_mobile:
          cipher:
            name: appl_mobile
            encryptorName: sm4_encryptor
        opsnId_no:
          cipher:
            name: opsnId_no
            encryptorName: sm4_encryptor
        rpter_id_no:
          cipher:
            name: rpter_id_no
            encryptorName: sm4_encryptor
        rpter_mobile:
          cipher:
            name: rpter_mobile
            encryptorName: sm4_encryptor
  encryptors:
    sm4_encryptor:
      type: SM4
      props:
        sm4-key: 86C63180C2806ED1F43A859DE501215C
        sm4-mode: ECB
        sm4-padding: PKCS5Padding
# 单表
- !SINGLE
  tables:
    - ds_basic.*

# 读写分离
- !READWRITE_SPLITTING
  dataSources:
    ds0000:
      writeDataSourceName: ds0000
      readDataSourceNames:
        - ds0000_slave
      transactionalReadQueryStrategy: PRIMARY
      loadBalancerName: random
    ds0001:
      writeDataSourceName: ds0001
      readDataSourceNames:
        - ds0001_slave
      transactionalReadQueryStrategy: PRIMARY
      loadBalancerName: random
  loadBalancers:
    random:
      type: RANDOM


props:
  sql-show: true
  max-connections-size-per-query: 5
相关推荐
叫我阿柒啊3 小时前
Java全栈开发工程师面试实战:从基础到微服务的完整技术演进
java·spring boot·微服务·前端框架·vue3·全栈开发·面试技巧
华农第一蒟蒻4 小时前
Elasticsearch赋能3D打印机任务统计分析
java·大数据·spring boot·后端·elasticsearch·adb·maven
金銀銅鐵4 小时前
[Java] 验证 HashMap 的扩容时机
java·后端
间彧4 小时前
Java高级语法糖有哪些
java
现在没有牛仔了4 小时前
SpringBoot全局异常处理实战详解
java·spring boot·后端
码农阿豪4 小时前
EasyVoice与cpolar:构建私域有声平台的本地化方案
java·voice
叫我阿柒啊5 小时前
从全栈开发到云原生:一位Java工程师的实战经验分享
java·spring boot·redis·云原生·kafka·vue·全栈开发
on the way 1235 小时前
Spring WebFlux 流式数据拉取与推送的实现
java·后端·spring
元Y亨H5 小时前
EasyExcel 自定义转换器(某些字段的值为 "N/A"、"UNKNOWN" 等时,希望在 Excel 导出时显示为空。)
java