学习目标
掌握大规模分布式系统设计原则,学习数据库分库分表技术,深入理解分布式缓存与消息队列的高级应用,掌握性能监控与容量规划方法。
1. 大规模系统设计原理
1.1 CAP理论深入应用
CAP理论实现示例:
java
// CAP理论:一致性、可用性、分区容错性
@Component
@Slf4j
public class CAPTheoryImplementation {
// AP系统:高可用性 + 分区容错性(最终一致性)
@Service
public class APSystem {
@Autowired
private RedisTemplate<String, Object> redisTemplate;
@Autowired
private DatabaseReplicaService replicaService;
// 写入时采用最终一致性策略
public void writeWithEventualConsistency(String key, Object value) {
// 写入主数据库
replicaService.writeToPrimary(key, value);
// 异步同步到副本(可能延迟,但保证可用性)
CompletableFuture.runAsync(() -> {
try {
replicaService.replicateToSecondary(key, value);
} catch (Exception e) {
log.error("副本同步失败,将稍后重试", e);
// 加入重试队列
replicaService.addToRetryQueue(key, value);
}
});
// 更新缓存
redisTemplate.opsForValue().set(key, value, Duration.ofHours(1));
}
// 读取时优先从缓存读取,保证高可用性
public Object readWithHighAvailability(String key) {
// 先尝试从缓存读取
Object cached = redisTemplate.opsForValue().get(key);
if (cached != null) {
return cached;
}
// 缓存未命中,从最近的副本读取(可用性优先)
try {
return replicaService.readFromNearestReplica(key);
} catch (Exception e) {
log.warn("从副本读取失败,尝试从主数据库读取", e);
return replicaService.readFromPrimary(key);
}
}
}
// CP系统:一致性 + 分区容错性(可能降低可用性)
@Service
public class CPSystem {
@Autowired
private DistributedLockService lockService;
// 使用两阶段提交保证强一致性
public void writeWithStrongConsistency(String key, Object value) {
String lockKey = "write_lock_" + key;
DistributedLock lock = null;
try {
// 获取分布式锁
lock = lockService.acquireLock(lockKey, 30);
// 两阶段提交:准备阶段
boolean allPrepared = prepareTransaction(key, value);
if (!allPrepared) {
throw new ConsistencyException("事务准备失败");
}
// 提交阶段
commitTransaction(key, value);
log.info("强一致性写入成功: key={}", key);
} catch (Exception e) {
// 回滚
rollbackTransaction(key, value);
log.error("强一致性写入失败", e);
throw new RuntimeException("写入失败,已回滚", e);
} finally {
if (lock != null) {
lockService.releaseLock(lock);
}
}
}
private boolean prepareTransaction(String key, Object value) {
// 向所有节点发送准备请求
// 只有所有节点都准备成功,才返回true
return true; // 简化实现
}
private void commitTransaction(String key, Object value) {
// 向所有节点发送提交请求
log.info("提交事务: key={}", key);
}
private void rollbackTransaction(String key, Object value) {
// 向所有节点发送回滚请求
log.info("回滚事务: key={}", key);
}
}
// CA系统:一致性 + 可用性(单机系统,无分区容错)
@Service
public class CASystem {
// 单机系统,天然保证一致性和可用性
public void writeInSingleNode(String key, Object value) {
// 单机写入,立即一致
log.info("单机写入: key={}, value={}", key, value);
}
public Object readFromSingleNode(String key) {
// 单机读取,立即一致
return "value_from_single_node";
}
}
}
// 一致性模型实现
@Component
@Slf4j
public class ConsistencyModel {
@Autowired
private RedisTemplate<String, Object> redisTemplate;
// 强一致性:读取必须返回最新写入的值
public void strongConsistencyWrite(String key, Object value) {
// 写入所有副本,等待全部确认
boolean allConfirmed = writeToAllReplicas(key, value);
if (!allConfirmed) {
throw new ConsistencyException("强一致性写入失败");
}
}
// 弱一致性:允许读取到旧值
public void weakConsistencyWrite(String key, Object value) {
// 写入主节点,异步复制
writeToPrimary(key, value);
// 不等待副本确认
}
// 最终一致性:保证最终会达到一致状态
public void eventualConsistencyWrite(String key, Object value) {
// 写入主节点
writeToPrimary(key, value);
// 异步同步到副本,使用冲突解决策略
CompletableFuture.runAsync(() -> {
syncToReplicas(key, value);
});
}
// 因果一致性:保证有因果关系的操作顺序
public void causalConsistencyWrite(String key, Object value, long timestamp) {
// 使用向量时钟或时间戳保证因果顺序
VectorClock clock = new VectorClock();
clock.increment(getNodeId());
writeWithVectorClock(key, value, clock);
}
private boolean writeToAllReplicas(String key, Object value) {
// 实现写所有副本的逻辑
return true;
}
private void writeToPrimary(String key, Object value) {
// 实现写主节点的逻辑
}
private void syncToReplicas(String key, Object value) {
// 实现异步同步的逻辑
}
private void writeWithVectorClock(String key, Object value, VectorClock clock) {
// 实现向量时钟写入的逻辑
}
private String getNodeId() {
return "node-1";
}
}
// 向量时钟实现
class VectorClock {
private Map<String, Long> clock = new HashMap<>();
public void increment(String nodeId) {
clock.put(nodeId, clock.getOrDefault(nodeId, 0L) + 1);
}
public boolean happensBefore(VectorClock other) {
// 实现happens-before关系判断
return false;
}
}
1.2 负载均衡策略
负载均衡实现:
java
// 负载均衡器接口
public interface LoadBalancer {
String selectServer(List<String> servers);
}
// 轮询负载均衡
@Component
@Slf4j
public class RoundRobinLoadBalancer implements LoadBalancer {
private final AtomicInteger counter = new AtomicInteger(0);
@Override
public String selectServer(List<String> servers) {
if (servers == null || servers.isEmpty()) {
throw new IllegalArgumentException("服务器列表不能为空");
}
int index = Math.abs(counter.getAndIncrement() % servers.size());
String selected = servers.get(index);
log.info("轮询选择服务器: {}", selected);
return selected;
}
}
// 随机负载均衡
@Component
@Slf4j
public class RandomLoadBalancer implements LoadBalancer {
private final Random random = new Random();
@Override
public String selectServer(List<String> servers) {
if (servers == null || servers.isEmpty()) {
throw new IllegalArgumentException("服务器列表不能为空");
}
int index = random.nextInt(servers.size());
String selected = servers.get(index);
log.info("随机选择服务器: {}", selected);
return selected;
}
}
// 加权轮询负载均衡
@Component
@Slf4j
public class WeightedRoundRobinLoadBalancer implements LoadBalancer {
private final AtomicInteger currentWeight = new AtomicInteger(0);
private final AtomicInteger currentIndex = new AtomicInteger(-1);
@Override
public String selectServer(List<String> servers) {
// 简化实现,实际应该根据服务器权重选择
int index = currentIndex.updateAndGet(i -> (i + 1) % servers.size());
return servers.get(index);
}
}
// 最少连接负载均衡
@Component
@Slf4j
public class LeastConnectionLoadBalancer implements LoadBalancer {
private final Map<String, AtomicInteger> connectionCounts = new ConcurrentHashMap<>();
@Override
public String selectServer(List<String> servers) {
if (servers == null || servers.isEmpty()) {
throw new IllegalArgumentException("服务器列表不能为空");
}
// 找到连接数最少的服务器
String selected = servers.stream()
.min(Comparator.comparingInt(server ->
connectionCounts.getOrDefault(server, new AtomicInteger(0)).get()))
.orElseThrow();
// 增加连接数
connectionCounts.computeIfAbsent(selected, k -> new AtomicInteger(0)).incrementAndGet();
log.info("最少连接选择服务器: {}, 当前连接数: {}",
selected, connectionCounts.get(selected).get());
return selected;
}
public void releaseConnection(String server) {
connectionCounts.computeIfPresent(server, (k, v) -> {
v.decrementAndGet();
return v;
});
}
}
// 一致性哈希负载均衡
@Component
@Slf4j
public class ConsistentHashLoadBalancer implements LoadBalancer {
private final TreeMap<Long, String> hashRing = new TreeMap<>();
private static final int VIRTUAL_NODES = 150;
public ConsistentHashLoadBalancer(List<String> servers) {
for (String server : servers) {
addServer(server);
}
}
public void addServer(String server) {
for (int i = 0; i < VIRTUAL_NODES; i++) {
String virtualNode = server + "#" + i;
long hash = hash(virtualNode);
hashRing.put(hash, server);
}
log.info("添加服务器: {}, 虚拟节点数: {}", server, VIRTUAL_NODES);
}
public void removeServer(String server) {
for (int i = 0; i < VIRTUAL_NODES; i++) {
String virtualNode = server + "#" + i;
long hash = hash(virtualNode);
hashRing.remove(hash);
}
log.info("移除服务器: {}", server);
}
@Override
public String selectServer(List<String> servers) {
// 实际使用中,key应该从请求中获取
String key = generateKey();
long hash = hash(key);
Map.Entry<Long, String> entry = hashRing.ceilingEntry(hash);
if (entry == null) {
entry = hashRing.firstEntry();
}
String selected = entry.getValue();
log.info("一致性哈希选择服务器: key={}, server={}", key, selected);
return selected;
}
private long hash(String key) {
// 使用MD5哈希
try {
MessageDigest md = MessageDigest.getInstance("MD5");
byte[] digest = md.digest(key.getBytes());
return ((long)(digest[0] & 0xFF) << 24) |
((long)(digest[1] & 0xFF) << 16) |
((long)(digest[2] & 0xFF) << 8) |
((long)(digest[3] & 0xFF));
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
}
private String generateKey() {
// 从请求上下文中获取(如用户ID、IP等)
return UUID.randomUUID().toString();
}
}
2. 数据库分库分表技术
2.1 ShardingSphere集成
ShardingSphere配置:
java
// ShardingSphere配置类
@Configuration
@Slf4j
public class ShardingSphereConfig {
@Bean
public DataSource shardingDataSource() {
// 数据源配置
Map<String, DataSource> dataSourceMap = new HashMap<>();
// 主数据源
HikariDataSource masterDataSource = new HikariDataSource();
masterDataSource.setJdbcUrl("jdbc:mysql://localhost:3306/db0");
masterDataSource.setUsername("root");
masterDataSource.setPassword("password");
masterDataSource.setDriverClassName("com.mysql.cj.jdbc.Driver");
dataSourceMap.put("ds0", masterDataSource);
// 分片数据源
HikariDataSource shardDataSource1 = new HikariDataSource();
shardDataSource1.setJdbcUrl("jdbc:mysql://localhost:3306/db1");
shardDataSource1.setUsername("root");
shardDataSource1.setPassword("password");
shardDataSource1.setDriverClassName("com.mysql.cj.jdbc.Driver");
dataSourceMap.put("ds1", shardDataSource1);
HikariDataSource shardDataSource2 = new HikariDataSource();
shardDataSource2.setJdbcUrl("jdbc:mysql://localhost:3306/db2");
shardDataSource2.setUsername("root");
shardDataSource2.setPassword("password");
shardDataSource2.setDriverClassName("com.mysql.cj.jdbc.Driver");
dataSourceMap.put("ds2", shardDataSource2);
// 分片规则配置
ShardingRuleConfiguration shardingRuleConfig = new ShardingRuleConfiguration();
// 表规则配置
TableRuleConfiguration orderTableRuleConfig = new TableRuleConfiguration("t_order", "ds${0..2}.t_order_${0..7}");
orderTableRuleConfig.setTableShardingStrategyConfig(new StandardShardingStrategyConfiguration("order_id", "orderShardingAlgorithm"));
orderTableRuleConfig.setDatabaseShardingStrategyConfig(new StandardShardingStrategyConfiguration("user_id", "databaseShardingAlgorithm"));
shardingRuleConfig.getTableRuleConfigs().add(orderTableRuleConfig);
// 分片算法配置
Properties orderShardingProps = new Properties();
orderShardingProps.setProperty("algorithm-expression", "t_order_${order_id % 8}");
shardingRuleConfig.getShardingAlgorithms().put("orderShardingAlgorithm",
new AlgorithmConfiguration("INLINE", orderShardingProps));
Properties databaseShardingProps = new Properties();
databaseShardingProps.setProperty("algorithm-expression", "ds${user_id % 3}");
shardingRuleConfig.getShardingAlgorithms().put("databaseShardingAlgorithm",
new AlgorithmConfiguration("INLINE", databaseShardingProps));
// 绑定表配置(避免关联查询跨库)
BindingTableRuleConfiguration bindingTableRuleConfig = new BindingTableRuleConfiguration();
bindingTableRuleConfig.setTableNames("t_order,t_order_item");
shardingRuleConfig.getBindingTableGroups().add(bindingTableRuleConfig.getName());
// 广播表配置(所有库都有相同的表)
BroadcastTableRuleConfiguration broadcastTableRuleConfig = new BroadcastTableRuleConfiguration("t_config");
shardingRuleConfig.getBroadcastTables().add(broadcastTableRuleConfig.getName());
// 创建ShardingSphere数据源
try {
return ShardingSphereDataSourceFactory.createDataSource(dataSourceMap,
Collections.singleton(shardingRuleConfig), new Properties());
} catch (SQLException e) {
log.error("创建ShardingSphere数据源失败", e);
throw new RuntimeException(e);
}
}
}
// 自定义分片算法
@Slf4j
public class CustomShardingAlgorithm implements StandardShardingAlgorithm<Long> {
@Override
public String doSharding(Collection<String> availableTargetNames, PreciseShardingValue<Long> shardingValue) {
String tableName = shardingValue.getLogicTableName();
Long value = shardingValue.getValue();
// 根据业务规则选择表
int tableIndex = (int)(value % availableTargetNames.size());
String selectedTable = tableName + "_" + tableIndex;
log.info("分片选择: table={}, value={}", selectedTable, value);
return selectedTable;
}
@Override
public Collection<String> doSharding(Collection<String> availableTargetNames,
RangeShardingValue<Long> shardingValue) {
// 范围查询时返回所有可能的分片
Collection<String> result = new ArrayList<>();
Range<Long> valueRange = shardingValue.getValueRange();
for (String tableName : availableTargetNames) {
// 根据范围判断是否需要查询该表
result.add(tableName);
}
log.info("范围分片查询: tables={}", result);
return result;
}
@Override
public void init() {
log.info("初始化自定义分片算法");
}
@Override
public String getType() {
return "CUSTOM";
}
}
// 分库分表使用示例
@Repository
@Slf4j
public class OrderRepository {
@Autowired
private JdbcTemplate jdbcTemplate;
// 插入订单(自动路由到对应分片)
public void insertOrder(Order order) {
String sql = "INSERT INTO t_order (order_id, user_id, amount, create_time) VALUES (?, ?, ?, ?)";
jdbcTemplate.update(sql, order.getOrderId(), order.getUserId(),
order.getAmount(), order.getCreateTime());
log.info("订单插入成功: orderId={}, userId={}", order.getOrderId(), order.getUserId());
}
// 根据订单ID查询(自动路由到对应分片)
public Order selectByOrderId(Long orderId) {
String sql = "SELECT order_id, user_id, amount, create_time FROM t_order WHERE order_id = ?";
return jdbcTemplate.queryForObject(sql, new Object[]{orderId}, (rs, rowNum) -> {
Order order = new Order();
order.setOrderId(rs.getLong("order_id"));
order.setUserId(rs.getLong("user_id"));
order.setAmount(rs.getBigDecimal("amount"));
order.setCreateTime(rs.getTimestamp("create_time"));
return order;
});
}
// 根据用户ID查询(可能需要跨分片查询)
public List<Order> selectByUserId(Long userId) {
String sql = "SELECT order_id, user_id, amount, create_time FROM t_order WHERE user_id = ?";
return jdbcTemplate.query(sql, new Object[]{userId}, (rs, rowNum) -> {
Order order = new Order();
order.setOrderId(rs.getLong("order_id"));
order.setUserId(rs.getLong("user_id"));
order.setAmount(rs.getBigDecimal("amount"));
order.setCreateTime(rs.getTimestamp("create_time"));
return order;
});
}
}
// 订单实体
@Data
public class Order {
private Long orderId;
private Long userId;
private BigDecimal amount;
private Timestamp createTime;
}
2.2 分片策略选择
分片策略实现:
java
// 分片策略管理器
@Component
@Slf4j
public class ShardingStrategyManager {
// 根据用户ID进行水平分表
public String getTableNameByUserId(String tablePrefix, Long userId, int shardCount) {
int shardIndex = (int)(userId % shardCount);
return tablePrefix + "_" + shardIndex;
}
// 根据时间进行水平分表(按月分表)
public String getTableNameByDate(String tablePrefix, Date date) {
SimpleDateFormat sdf = new SimpleDateFormat("yyyyMM");
String month = sdf.format(date);
return tablePrefix + "_" + month;
}
// 根据订单ID进行范围分表
public String getTableNameByOrderIdRange(String tablePrefix, Long orderId) {
if (orderId < 1000000) {
return tablePrefix + "_0";
} else if (orderId < 5000000) {
return tablePrefix + "_1";
} else if (orderId < 10000000) {
return tablePrefix + "_2";
} else {
return tablePrefix + "_3";
}
}
// 哈希分表(用于均匀分布)
public String getTableNameByHash(String tablePrefix, String key, int shardCount) {
int hash = key.hashCode();
int shardIndex = Math.abs(hash % shardCount);
return tablePrefix + "_" + shardIndex;
}
}
// 分库分表路由服务
@Service
@Slf4j
public class ShardingRouterService {
@Autowired
private ShardingStrategyManager shardingStrategyManager;
// 动态选择数据源和表
public ShardingRoute route(Long userId, Long orderId) {
ShardingRoute route = new ShardingRoute();
// 根据用户ID选择数据源
int dbIndex = (int)(userId % 3);
route.setDataSource("ds" + dbIndex);
// 根据订单ID选择表
String tableName = shardingStrategyManager.getTableNameByUserId("t_order", userId, 8);
route.setTableName(tableName);
log.info("分片路由: userId={}, orderId={}, route={}", userId, orderId, route);
return route;
}
}
// 分片路由信息
@Data
public class ShardingRoute {
private String dataSource;
private String tableName;
}
3. 分布式缓存深度应用
3.1 多级缓存架构
多级缓存实现:
java
// 多级缓存管理器
@Component
@Slf4j
public class MultiLevelCacheManager {
@Autowired
private RedisTemplate<String, Object> redisTemplate;
@Autowired
private CaffeineCache localCache;
// L1: 本地缓存(Caffeine)
// L2: 分布式缓存(Redis)
// L3: 数据库
public <T> T get(String key, Class<T> type, Supplier<T> databaseLoader) {
// L1缓存查询
T value = localCache.get(key, type);
if (value != null) {
log.debug("L1缓存命中: key={}", key);
return value;
}
// L2缓存查询
value = getFromRedis(key, type);
if (value != null) {
log.debug("L2缓存命中: key={}", key);
// 回填L1缓存
localCache.put(key, value);
return value;
}
// L3数据库查询
log.debug("缓存未命中,查询数据库: key={}", key);
value = databaseLoader.get();
if (value != null) {
// 写入L2缓存
putToRedis(key, value, Duration.ofMinutes(30));
// 写入L1缓存
localCache.put(key, value);
}
return value;
}
public <T> void put(String key, T value, Duration expireTime) {
// 写入L1缓存
localCache.put(key, value);
// 写入L2缓存
putToRedis(key, value, expireTime);
}
public void evict(String key) {
// 删除L1缓存
localCache.invalidate(key);
// 删除L2缓存
redisTemplate.delete(key);
}
private <T> T getFromRedis(String key, Class<T> type) {
Object value = redisTemplate.opsForValue().get(key);
if (value != null && type.isInstance(value)) {
return type.cast(value);
}
return null;
}
private <T> void putToRedis(String key, T value, Duration expireTime) {
redisTemplate.opsForValue().set(key, value, expireTime);
}
}
// Caffeine本地缓存配置
@Configuration
public class CaffeineCacheConfig {
@Bean
public CaffeineCache localCache() {
Cache<String, Object> cache = Caffeine.newBuilder()
.maximumSize(10000)
.expireAfterWrite(Duration.ofMinutes(5))
.expireAfterAccess(Duration.ofMinutes(3))
.recordStats()
.build();
return new CaffeineCache("localCache", cache);
}
}
// 缓存穿透防护
@Component
@Slf4j
public class CachePenetrationProtection {
@Autowired
private RedisTemplate<String, Object> redisTemplate;
private static final String NULL_VALUE_PREFIX = "NULL:";
private static final Duration NULL_VALUE_TTL = Duration.ofMinutes(5);
public <T> T getWithProtection(String key, Class<T> type, Supplier<T> loader) {
// 1. 先查缓存
Object cached = redisTemplate.opsForValue().get(key);
if (cached != null) {
if (cached.toString().startsWith(NULL_VALUE_PREFIX)) {
// 空值标记,防止缓存穿透
log.debug("缓存空值标记: key={}", key);
return null;
}
return type.cast(cached);
}
// 2. 查询数据库
T value = loader.get();
// 3. 写入缓存(即使是null也要缓存,防止缓存穿透)
if (value != null) {
redisTemplate.opsForValue().set(key, value, Duration.ofMinutes(30));
} else {
// 缓存空值,设置较短的过期时间
redisTemplate.opsForValue().set(key, NULL_VALUE_PREFIX + System.currentTimeMillis(), NULL_VALUE_TTL);
}
return value;
}
}
// 缓存击穿防护(互斥锁)
@Component
@Slf4j
public class CacheBreakdownProtection {
@Autowired
private RedisTemplate<String, Object> redisTemplate;
@Autowired
private DistributedLockService lockService;
public <T> T getWithMutexLock(String key, Class<T> type, Supplier<T> loader, Duration expireTime) {
// 1. 查询缓存
Object cached = redisTemplate.opsForValue().get(key);
if (cached != null) {
log.debug("缓存命中: key={}", key);
return type.cast(cached);
}
// 2. 获取分布式锁
String lockKey = "mutex:" + key;
DistributedLock lock = null;
try {
lock = lockService.acquireLock(lockKey, 10);
// 3. 双重检查(其他线程可能已经加载完成)
cached = redisTemplate.opsForValue().get(key);
if (cached != null) {
log.debug("双重检查缓存命中: key={}", key);
return type.cast(cached);
}
// 4. 查询数据库
log.debug("查询数据库: key={}", key);
T value = loader.get();
// 5. 写入缓存
if (value != null) {
redisTemplate.opsForValue().set(key, value, expireTime);
}
return value;
} finally {
if (lock != null) {
lockService.releaseLock(lock);
}
}
}
}
// 缓存雪崩防护
@Component
@Slf4j
public class CacheAvalancheProtection {
@Autowired
private RedisTemplate<String, Object> redisTemplate;
public <T> void putWithRandomExpire(String key, T value, Duration baseExpireTime) {
// 在基础过期时间上添加随机时间(±20%),防止大量key同时过期
long baseSeconds = baseExpireTime.getSeconds();
long randomSeconds = (long)(baseSeconds * 0.2 * (Math.random() - 0.5) * 2);
long finalSeconds = baseSeconds + randomSeconds;
Duration finalExpireTime = Duration.ofSeconds(Math.max(1, finalSeconds));
redisTemplate.opsForValue().set(key, value, finalExpireTime);
log.debug("设置缓存(随机过期时间): key={}, expire={}s", key, finalExpireTime.getSeconds());
}
}
3.2 缓存预热与更新策略
缓存预热与更新:
java
// 缓存预热服务
@Service
@Slf4j
public class CacheWarmupService {
@Autowired
private RedisTemplate<String, Object> redisTemplate;
@Autowired
private UserService userService;
// 应用启动时预热热点数据
@PostConstruct
public void warmup() {
log.info("开始缓存预热...");
CompletableFuture.runAsync(() -> {
try {
// 预热热点用户数据
warmupHotUsers();
// 预热配置数据
warmupConfigData();
log.info("缓存预热完成");
} catch (Exception e) {
log.error("缓存预热失败", e);
}
});
}
private void warmupHotUsers() {
// 查询热点用户并缓存
List<Long> hotUserIds = getHotUserIds();
for (Long userId : hotUserIds) {
Object userData = userService.getUserById(userId);
redisTemplate.opsForValue().set("user:" + userId, userData, Duration.ofHours(1));
}
log.info("预热热点用户数据: count={}", hotUserIds.size());
}
private void warmupConfigData() {
// 预热配置数据
// ...
}
private List<Long> getHotUserIds() {
// 获取热点用户ID列表
return Arrays.asList(1L, 2L, 3L, 4L, 5L);
}
}
// 缓存更新策略
@Service
@Slf4j
public class CacheUpdateStrategy {
@Autowired
private RedisTemplate<String, Object> redisTemplate;
// 写回策略:先更新数据库,再删除缓存(Cache Aside)
public <T> void updateWithCacheAside(String key, T value, Supplier<T> dbUpdater) {
// 1. 更新数据库
T updatedValue = dbUpdater.get();
// 2. 删除缓存
redisTemplate.delete(key);
log.info("Cache Aside更新: key={}", key);
}
// 写穿策略:先更新缓存,再更新数据库(Write Through)
public <T> void updateWithWriteThrough(String key, T value, Duration expireTime,
Supplier<T> dbUpdater) {
// 1. 更新缓存
redisTemplate.opsForValue().set(key, value, expireTime);
// 2. 更新数据库
dbUpdater.get();
log.info("Write Through更新: key={}", key);
}
// 写回策略:先更新缓存,异步更新数据库(Write Behind)
public <T> void updateWithWriteBehind(String key, T value, Duration expireTime,
Supplier<T> dbUpdater) {
// 1. 更新缓存
redisTemplate.opsForValue().set(key, value, expireTime);
// 2. 异步更新数据库
CompletableFuture.runAsync(() -> {
try {
dbUpdater.get();
log.info("Write Behind异步更新完成: key={}", key);
} catch (Exception e) {
log.error("Write Behind异步更新失败: key={}", key, e);
}
});
}
}
4. 消息队列深度应用
4.1 事务消息
事务消息实现:
java
// 事务消息服务
@Service
@Slf4j
public class TransactionalMessageService {
@Autowired
private RocketMQTemplate rocketMQTemplate;
@Autowired
private TransactionLogMapper transactionLogMapper;
// 发送事务消息
public void sendTransactionalMessage(String topic, String tag, Object payload,
Runnable localTransaction) {
TransactionalMessage message = new TransactionalMessage();
message.setTopic(topic);
message.setTag(tag);
message.setPayload(payload);
message.setTransactionId(UUID.randomUUID().toString());
// 1. 发送半消息
TransactionSendResult sendResult = rocketMQTemplate.sendMessageInTransaction(
topic,
MessageBuilder.withPayload(payload)
.setHeader(RocketMQHeaders.TRANSACTION_ID, message.getTransactionId())
.build(),
localTransaction
);
log.info("发送事务消息: transactionId={}, sendResult={}",
message.getTransactionId(), sendResult.getSendStatus());
}
// 本地事务执行器
@RocketMQTransactionListener
public class LocalTransactionListener implements RocketMQLocalTransactionListener {
@Override
@Transactional(rollbackFor = Exception.class)
public RocketMQLocalTransactionState executeLocalTransaction(Message msg, Object arg) {
try {
// 执行本地事务
Runnable localTransaction = (Runnable) arg;
localTransaction.run();
// 记录事务日志
String transactionId = (String) msg.getHeaders().get(RocketMQHeaders.TRANSACTION_ID);
saveTransactionLog(transactionId, "COMMIT");
log.info("本地事务提交: transactionId={}", transactionId);
return RocketMQLocalTransactionState.COMMIT;
} catch (Exception e) {
log.error("本地事务执行失败", e);
return RocketMQLocalTransactionState.ROLLBACK;
}
}
@Override
public RocketMQLocalTransactionState checkLocalTransaction(Message msg) {
// 回查本地事务状态
String transactionId = (String) msg.getHeaders().get(RocketMQHeaders.TRANSACTION_ID);
TransactionLog log = transactionLogMapper.selectByTransactionId(transactionId);
if (log != null && "COMMIT".equals(log.getStatus())) {
return RocketMQLocalTransactionState.COMMIT;
} else {
return RocketMQLocalTransactionState.ROLLBACK;
}
}
}
private void saveTransactionLog(String transactionId, String status) {
TransactionLog transactionLog = new TransactionLog();
transactionLog.setTransactionId(transactionId);
transactionLog.setStatus(status);
transactionLog.setCreateTime(new Date());
transactionLogMapper.insert(transactionLog);
}
}
// 事务日志实体
@Data
public class TransactionLog {
private Long id;
private String transactionId;
private String status;
private Date createTime;
}
4.2 顺序消息与延时消息
顺序消息和延时消息:
java
// 顺序消息服务
@Service
@Slf4j
public class SequentialMessageService {
@Autowired
private RocketMQTemplate rocketMQTemplate;
// 发送顺序消息(同一订单的消息必须按顺序处理)
public void sendSequentialMessage(String topic, String orderId, Object payload) {
// 使用订单ID作为消息队列选择器,确保同一订单的消息发送到同一个队列
rocketMQTemplate.syncSendOrderly(
topic,
payload,
orderId, // 队列选择key
TimeUnit.SECONDS.toMillis(3)
);
log.info("发送顺序消息: topic={}, orderId={}", topic, orderId);
}
// 顺序消息消费者
@RocketMQMessageListener(
topic = "order-topic",
consumerGroup = "order-sequential-consumer",
consumeMode = ConsumeMode.ORDERLY // 顺序消费模式
)
public class SequentialMessageConsumer implements RocketMQListener<OrderMessage> {
@Override
public void onMessage(OrderMessage message) {
log.info("顺序消费消息: orderId={}, messageId={}",
message.getOrderId(), message.getMessageId());
// 按顺序处理订单消息
processOrderMessage(message);
}
private void processOrderMessage(OrderMessage message) {
// 处理订单逻辑
}
}
}
// 延时消息服务
@Service
@Slf4j
public class DelayedMessageService {
@Autowired
private RocketMQTemplate rocketMQTemplate;
// 发送延时消息
public void sendDelayedMessage(String topic, Object payload, int delayLevel) {
// delayLevel: 1s 5s 10s 30s 1m 2m 3m 4m 5m 6m 7m 8m 9m 10m 20m 30m 1h 2h
Message<Object> message = MessageBuilder.withPayload(payload)
.setHeader(RocketMQHeaders.DELAY_LEVEL, delayLevel)
.build();
rocketMQTemplate.syncSend(topic, message);
log.info("发送延时消息: topic={}, delayLevel={}", topic, delayLevel);
}
// 定时任务消息(使用Cron表达式)
public void sendScheduledMessage(String topic, Object payload, String cronExpression) {
// 将Cron表达式转换为延时时间
int delaySeconds = parseCronToDelay(cronExpression);
int delayLevel = calculateDelayLevel(delaySeconds);
sendDelayedMessage(topic, payload, delayLevel);
}
private int parseCronToDelay(String cronExpression) {
// 简化实现,实际应该解析Cron表达式
return 60; // 60秒后
}
private int calculateDelayLevel(int delaySeconds) {
// 根据延时秒数计算delayLevel
if (delaySeconds <= 1) return 1;
if (delaySeconds <= 5) return 2;
if (delaySeconds <= 10) return 3;
if (delaySeconds <= 30) return 4;
if (delaySeconds <= 60) return 5;
// ... 更多级别
return 5;
}
}
5. 性能监控与分析
5.1 APM应用性能监控
APM监控实现:
java
// APM性能监控切面
@Aspect
@Component
@Slf4j
public class APMMonitoringAspect {
@Autowired
private MeterRegistry meterRegistry;
@Autowired
private Tracer tracer;
// 方法执行时间监控
@Around("@annotation(Monitored)")
public Object monitorMethod(ProceedingJoinPoint joinPoint) throws Throwable {
String methodName = joinPoint.getSignature().toShortString();
Timer.Sample sample = Timer.start(meterRegistry);
Span span = tracer.nextSpan().name(methodName).start();
try (Tracer.SpanInScope ws = tracer.withSpanInScope(span)) {
Object result = joinPoint.proceed();
sample.stop(Timer.builder("method.execution.time")
.tag("method", methodName)
.tag("status", "success")
.register(meterRegistry));
span.tag("status", "success");
return result;
} catch (Exception e) {
sample.stop(Timer.builder("method.execution.time")
.tag("method", methodName)
.tag("status", "error")
.tag("error.type", e.getClass().getSimpleName())
.register(meterRegistry));
span.tag("status", "error");
span.tag("error", e.getMessage());
throw e;
} finally {
span.end();
}
}
// 数据库查询监控
@Around("execution(* *..*Repository.*(..))")
public Object monitorDatabase(ProceedingJoinPoint joinPoint) throws Throwable {
String methodName = joinPoint.getSignature().toShortString();
Timer.Sample sample = Timer.start(meterRegistry);
Counter queriesCounter = Counter.builder("database.queries")
.tag("method", methodName)
.register(meterRegistry);
try {
Object result = joinPoint.proceed();
queriesCounter.increment();
sample.stop(Timer.builder("database.query.time")
.tag("method", methodName)
.tag("status", "success")
.register(meterRegistry));
return result;
} catch (Exception e) {
sample.stop(Timer.builder("database.query.time")
.tag("method", methodName)
.tag("status", "error")
.register(meterRegistry));
throw e;
}
}
}
// 自定义监控指标
@Component
@Slf4j
public class CustomMetricsCollector {
@Autowired
private MeterRegistry meterRegistry;
private final Counter orderCounter;
private final Gauge activeUsersGauge;
private final Timer requestTimer;
public CustomMetricsCollector(MeterRegistry meterRegistry) {
this.meterRegistry = meterRegistry;
// 订单计数
this.orderCounter = Counter.builder("orders.total")
.description("订单总数")
.register(meterRegistry);
// 活跃用户数
this.activeUsersGauge = Gauge.builder("users.active",
this, obj -> obj.getActiveUserCount())
.description("活跃用户数")
.register(meterRegistry);
// 请求耗时
this.requestTimer = Timer.builder("requests.duration")
.description("请求耗时")
.register(meterRegistry);
}
public void incrementOrderCount() {
orderCounter.increment();
}
private double getActiveUserCount() {
// 从缓存或数据库获取活跃用户数
return 100.0;
}
public void recordRequestDuration(Duration duration) {
requestTimer.record(duration);
}
}
5.2 链路追踪
分布式链路追踪:
java
// 链路追踪工具
@Component
@Slf4j
public class TracingUtil {
@Autowired
private Tracer tracer;
// 创建新Span
public Span startSpan(String operationName) {
return tracer.nextSpan().name(operationName).start();
}
// 记录Span标签
public void tag(Span span, String key, String value) {
span.tag(key, value);
}
// 记录Span事件
public void logEvent(Span span, String event) {
span.event(event);
}
// 包装方法调用(自动追踪)
public <T> T trace(String operationName, Supplier<T> supplier) {
Span span = startSpan(operationName);
try (Tracer.SpanInScope ws = tracer.withSpanInScope(span)) {
return supplier.get();
} catch (Exception e) {
span.tag("error", e.getMessage());
throw e;
} finally {
span.end();
}
}
// 包装异步方法调用
public <T> CompletableFuture<T> traceAsync(String operationName,
Supplier<CompletableFuture<T>> supplier) {
Span span = startSpan(operationName);
try (Tracer.SpanInScope ws = tracer.withSpanInScope(span)) {
return supplier.get()
.whenComplete((result, throwable) -> {
if (throwable != null) {
span.tag("error", throwable.getMessage());
}
span.end();
});
}
}
}
// 服务间调用追踪
@Service
@Slf4j
public class TracedService {
@Autowired
private TracingUtil tracingUtil;
@Autowired
private RestTemplate restTemplate;
// HTTP调用追踪
public String callRemoteService(String url) {
return tracingUtil.trace("remote-service-call", () -> {
Span span = tracingUtil.startSpan("http-call");
try {
tracingUtil.tag(span, "http.url", url);
tracingUtil.tag(span, "http.method", "GET");
String result = restTemplate.getForObject(url, String.class);
tracingUtil.tag(span, "http.status", "200");
tracingUtil.logEvent(span, "http-call-success");
return result;
} catch (Exception e) {
tracingUtil.tag(span, "http.status", "500");
tracingUtil.tag(span, "error", e.getMessage());
throw e;
} finally {
span.end();
}
});
}
}
6. 系统容量规划
6.1 压力测试
压力测试工具:
java
// 压力测试服务
@Service
@Slf4j
public class LoadTestService {
@Autowired
private UserService userService;
@Autowired
private MeterRegistry meterRegistry;
// 并发压力测试
public LoadTestResult performLoadTest(int concurrentUsers, int totalRequests,
Duration testDuration) {
log.info("开始压力测试: concurrentUsers={}, totalRequests={}, duration={}",
concurrentUsers, totalRequests, testDuration);
ExecutorService executor = Executors.newFixedThreadPool(concurrentUsers);
CountDownLatch latch = new CountDownLatch(totalRequests);
Timer.Sample sample = Timer.start(meterRegistry);
AtomicInteger successCount = new AtomicInteger(0);
AtomicInteger errorCount = new AtomicInteger(0);
List<Long> responseTimes = Collections.synchronizedList(new ArrayList<>());
long startTime = System.currentTimeMillis();
for (int i = 0; i < totalRequests; i++) {
executor.submit(() -> {
try {
long requestStart = System.currentTimeMillis();
// 执行测试请求
userService.getUserById((long)(Math.random() * 1000));
long responseTime = System.currentTimeMillis() - requestStart;
responseTimes.add(responseTime);
successCount.incrementAndGet();
} catch (Exception e) {
errorCount.incrementAndGet();
log.error("压力测试请求失败", e);
} finally {
latch.countDown();
}
});
}
try {
boolean completed = latch.await(testDuration.toMillis(), TimeUnit.MILLISECONDS);
long totalTime = System.currentTimeMillis() - startTime;
sample.stop(Timer.builder("load.test.total.time").register(meterRegistry));
LoadTestResult result = new LoadTestResult();
result.setTotalRequests(totalRequests);
result.setSuccessRequests(successCount.get());
result.setErrorRequests(errorCount.get());
result.setTotalTime(Duration.ofMillis(totalTime));
result.setAverageResponseTime(calculateAverage(responseTimes));
result.setP95ResponseTime(calculatePercentile(responseTimes, 95));
result.setP99ResponseTime(calculatePercentile(responseTimes, 99));
result.setThroughput((double)successCount.get() / (totalTime / 1000.0));
result.setCompleted(completed);
log.info("压力测试完成: {}", result);
return result;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException("压力测试被中断", e);
} finally {
executor.shutdown();
}
}
private double calculateAverage(List<Long> values) {
return values.stream().mapToLong(Long::longValue).average().orElse(0.0);
}
private long calculatePercentile(List<Long> values, int percentile) {
Collections.sort(values);
int index = (int) Math.ceil((percentile / 100.0) * values.size()) - 1;
return values.get(Math.max(0, index));
}
}
// 压力测试结果
@Data
public class LoadTestResult {
private int totalRequests;
private int successRequests;
private int errorRequests;
private Duration totalTime;
private double averageResponseTime;
private long p95ResponseTime;
private long p99ResponseTime;
private double throughput; // 每秒请求数
private boolean completed;
}
6.2 容量评估与弹性伸缩
容量评估与弹性伸缩:
java
// 容量评估服务
@Service
@Slf4j
public class CapacityAssessmentService {
@Autowired
private MeterRegistry meterRegistry;
@Autowired
private RedisTemplate<String, Object> redisTemplate;
// 评估系统容量
public CapacityAssessment assessCapacity() {
CapacityAssessment assessment = new CapacityAssessment();
// 1. 评估CPU使用率
double cpuUsage = getCpuUsage();
assessment.setCpuUsage(cpuUsage);
assessment.setCpuCapacity(getCpuCapacity(cpuUsage));
// 2. 评估内存使用率
double memoryUsage = getMemoryUsage();
assessment.setMemoryUsage(memoryUsage);
assessment.setMemoryCapacity(getMemoryCapacity(memoryUsage));
// 3. 评估数据库连接池
int dbConnectionUsage = getDatabaseConnectionUsage();
assessment.setDatabaseConnectionUsage(dbConnectionUsage);
assessment.setDatabaseConnectionCapacity(getDatabaseConnectionCapacity(dbConnectionUsage));
// 4. 评估QPS
double qps = getCurrentQPS();
assessment.setCurrentQPS(qps);
assessment.setMaxQPS(getMaxQPS());
// 5. 评估响应时间
double avgResponseTime = getAverageResponseTime();
assessment.setAverageResponseTime(avgResponseTime);
assessment.setResponseTimeCapacity(getResponseTimeCapacity(avgResponseTime));
// 6. 综合评估
assessment.setOverallCapacity(calculateOverallCapacity(assessment));
log.info("容量评估完成: {}", assessment);
return assessment;
}
private double getCpuUsage() {
return ManagementFactory.getOperatingSystemMXBean().getProcessCpuLoad() * 100;
}
private String getCpuCapacity(double usage) {
if (usage < 50) return "充足";
if (usage < 80) return "正常";
return "不足";
}
private double getMemoryUsage() {
MemoryMXBean memoryBean = ManagementFactory.getMemoryMXBean();
long used = memoryBean.getHeapMemoryUsage().getUsed();
long max = memoryBean.getHeapMemoryUsage().getMax();
return (double)used / max * 100;
}
private String getMemoryCapacity(double usage) {
if (usage < 70) return "充足";
if (usage < 90) return "正常";
return "不足";
}
private int getDatabaseConnectionUsage() {
// 从HikariCP获取连接池使用情况
return 10; // 简化实现
}
private String getDatabaseConnectionCapacity(int usage) {
int maxConnections = 20; // 最大连接数
double usagePercent = (double)usage / maxConnections * 100;
if (usagePercent < 50) return "充足";
if (usagePercent < 80) return "正常";
return "不足";
}
private double getCurrentQPS() {
// 从监控指标获取当前QPS
return 100.0;
}
private double getMaxQPS() {
// 系统最大支持的QPS
return 1000.0;
}
private double getAverageResponseTime() {
// 从监控指标获取平均响应时间
return 50.0;
}
private String getResponseTimeCapacity(double responseTime) {
if (responseTime < 100) return "充足";
if (responseTime < 500) return "正常";
return "不足";
}
private String calculateOverallCapacity(CapacityAssessment assessment) {
// 综合评估逻辑
if (assessment.getCpuUsage() > 80 ||
assessment.getMemoryUsage() > 90 ||
assessment.getCurrentQPS() / assessment.getMaxQPS() > 0.9) {
return "需要扩容";
}
return "容量充足";
}
}
// 弹性伸缩服务
@Service
@Slf4j
public class AutoScalingService {
@Autowired
private CapacityAssessmentService capacityAssessmentService;
@Scheduled(fixedRate = 60000) // 每分钟检查一次
public void checkAndScale() {
CapacityAssessment assessment = capacityAssessmentService.assessCapacity();
if ("需要扩容".equals(assessment.getOverallCapacity())) {
scaleOut();
} else if (canScaleIn(assessment)) {
scaleIn();
}
}
private void scaleOut() {
log.info("执行扩容操作");
// 调用Kubernetes API或云服务API进行扩容
// kubectl scale deployment user-service --replicas=5
}
private void scaleIn() {
log.info("执行缩容操作");
// 调用Kubernetes API或云服务API进行缩容
}
private boolean canScaleIn(CapacityAssessment assessment) {
// 判断是否可以缩容(资源使用率低于阈值且持续一段时间)
return assessment.getCpuUsage() < 30 &&
assessment.getMemoryUsage() < 50 &&
assessment.getCurrentQPS() / assessment.getMaxQPS() < 0.3;
}
}
// 容量评估结果
@Data
public class CapacityAssessment {
private double cpuUsage;
private String cpuCapacity;
private double memoryUsage;
private String memoryCapacity;
private int databaseConnectionUsage;
private String databaseConnectionCapacity;
private double currentQPS;
private double maxQPS;
private double averageResponseTime;
private String responseTimeCapacity;
private String overallCapacity;
}