SaaS点餐平台难点详细解决方案
一、多租户架构核心解决方案
1.1 动态数据源管理 - 三级缓存连接池方案
问题痛点:
- 传统方案每个租户独立连接池,内存消耗大
- 连接数随租户增长而线性增加
- 租户冷启动延迟高
优化方案:
java
@Component
public class DynamicDataSourceManager {
// 一级缓存:热点租户连接池(LRU策略)
private LoadingCache<String, DataSource> hotTenantCache = CacheBuilder.newBuilder()
.maximumSize(100) // 缓存100个活跃租户
.expireAfterAccess(30, TimeUnit.MINUTES)
.build(new CacheLoader<String, DataSource>() {
@Override
public DataSource load(String tenantId) {
return createDataSource(tenantId);
}
});
// 二级缓存:连接共享池(用于低频租户)
private Map<String, WeakReference<DataSource>> sharedPool =
new ConcurrentHashMap<>();
// 三级策略:按需创建+延迟销毁
public DataSource getDataSource(String tenantId) {
// 1. 检查是否为活跃租户(最近30分钟有访问)
if (tenantActivityService.isActive(tenantId)) {
return hotTenantCache.get(tenantId);
}
// 2. 检查共享池
WeakReference<DataSource> ref = sharedPool.get(tenantId);
if (ref != null && ref.get() != null) {
return ref.get();
}
// 3. 按需创建新连接
DataSource ds = createDataSource(tenantId);
sharedPool.put(tenantId, new WeakReference<>(ds));
// 4. 启动监控线程,30分钟无访问自动关闭
scheduleConnectionCleanup(tenantId, ds);
return ds;
}
private DataSource createDataSource(String tenantId) {
HikariConfig config = new HikariConfig();
config.setJdbcUrl("jdbc:mysql://localhost:3306/tenant_" + tenantId);
config.setMaximumPoolSize(5); // 小连接池
config.setIdleTimeout(300000);
config.setConnectionTimeout(30000);
return new HikariDataSource(config);
}
}
1.2 跨租户统计查询 - 异步聚合方案
问题: Schema隔离导致跨租户统计查询困难
解决方案:
java
@Service
public class CrossTenantStatisticsService {
@Autowired
private TenantRegistry tenantRegistry;
@Autowired
private ElasticsearchRestTemplate elasticsearchTemplate;
// 方案一:实时查询 + 并行处理(适用于少量租户)
public PlatformStats realtimeStats() {
List<TenantInfo> allTenants = tenantRegistry.getAllActiveTenants();
// 使用CompletableFuture并行查询
List<CompletableFuture<TenantStats>> futures = allTenants.stream()
.map(tenant -> CompletableFuture.supplyAsync(() ->
queryTenantStats(tenant.getId()), statsExecutor))
.collect(Collectors.toList());
// 合并结果
List<TenantStats> results = futures.stream()
.map(CompletableFuture::join)
.collect(Collectors.toList());
return aggregateStats(results);
}
// 方案二:Elasticsearch近实时聚合(推荐)
public PlatformStats esStats() {
// 所有租户数据实时同步到ES统一索引
SearchQuery searchQuery = new NativeSearchQueryBuilder()
.withQuery(QueryBuilders.matchAllQuery())
.addAggregation(AggregationBuilders
.sum("total_order_amount").field("order_amount"))
.addAggregation(AggregationBuilders
.cardinality("distinct_users").field("user_id"))
.build();
AggregatedPage<OrderDocument> result = elasticsearchTemplate
.queryForPage(searchQuery, OrderDocument.class);
return convertToPlatformStats(result.getAggregations());
}
// 数据同步到ES的实现
@EventListener
public void onOrderCreated(OrderCreatedEvent event) {
OrderDocument doc = convertToDocument(event.getOrder());
elasticsearchTemplate.save(doc);
}
}
二、订单系统高并发解决方案
2.1 库存扣减 - 三级库存保护方案
核心问题: 防止超卖,保证性能
java
@Service
public class InventoryService {
@Autowired
private RedissonClient redissonClient;
@Autowired
private InventoryStockStream stockStream;
// 三级库存设计
public boolean deductInventory(Long dishId, Integer quantity) {
// 第一级:Redis原子操作(快速失败)
String redisKey = "inventory:dish:" + dishId;
RAtomicLong atomicStock = redissonClient.getAtomicLong(redisKey);
// 使用Lua脚本保证原子性
String luaScript = """
local current = redis.call('get', KEYS[1])
if current and tonumber(current) >= tonumber(ARGV[1]) then
return redis.call('decrby', KEYS[1], ARGV[1])
else
return -1
end
""";
RFuture<Object> future = redissonClient.getScript()
.evalAsync(redisKey,
RScript.ReturnType.INTEGER,
luaScript,
Arrays.asList(redisKey),
quantity);
Long result = (Long) future.get();
if (result < 0) {
return false; // 库存不足
}
// 第二级:数据库乐观锁(最终一致性)
stockStream.sendDeductMessage(dishId, quantity);
return true;
}
// 异步处理数据库库存
@StreamListener("stock-input")
public void processStockDeduct(StockDeductMessage message) {
// 数据库乐观锁更新
int updated = dishMapper.updateStock(
message.getDishId(),
message.getQuantity());
if (updated == 0) {
// 库存不足,需要回滚Redis
rollbackRedisStock(message);
// 触发订单取消逻辑
orderService.cancelOrderDueToStock(message.getOrderId());
}
}
// 第三级:库存预警和补偿
@Scheduled(fixedDelay = 60000)
public void syncInventoryToRedis() {
// 定期将数据库库存同步到Redis,防止数据不一致
List<DishStock> stocks = dishMapper.getLowStockItems();
stocks.forEach(stock -> {
String key = "inventory:dish:" + stock.getDishId();
redissonClient.getBucket(key).set(stock.getStock());
});
}
}
2.2 订单号生成 - 分布式ID方案
java
@Component
public class OrderIdGenerator {
// 美团的Leaf方案变种
public String generateOrderId(String tenantId) {
// 格式:租户前缀(2位) + 时间(6位) + 序列号(6位) + 随机尾号(2位)
String dateStr = LocalDateTime.now()
.format(DateTimeFormatter.ofPattern("yyMMddHHmmss"));
// Redis原子递增获取序列号
String sequenceKey = "order_seq:" + tenantId + ":" +
LocalDate.now().format(DateTimeFormatter.BASIC_ISO_DATE);
Long sequence = redisTemplate.opsForValue()
.increment(sequenceKey, 1);
// 每日重置序列
redisTemplate.expire(sequenceKey, 48, TimeUnit.HOURS);
// 补零到6位
String seqStr = String.format("%06d", sequence % 1000000);
// 随机尾号防止猜解
String randomSuffix = String.format("%02d",
ThreadLocalRandom.current().nextInt(100));
return tenantId.substring(0, 2) + dateStr + seqStr + randomSuffix;
}
}
2.3 分表查询优化 - 索引表方案
java
@Service
public class OrderQueryService {
// 建立全局订单索引表
@Table(name = "order_index")
public class OrderIndex {
@Id
private String orderId;
private String tenantId;
private String userId;
private LocalDateTime createTime;
private BigDecimal amount;
private String status;
private String shardKey; // 分表键:YYYYMM
}
// 复杂查询走索引表
public Page<OrderVO> complexQuery(OrderQueryDTO query) {
// 1. 先在索引表查询符合条件的订单ID
Page<OrderIndex> indexPage = orderIndexRepository.findByConditions(
query,
PageRequest.of(query.getPage(), query.getSize()));
// 2. 批量从分表中获取详情
List<String> orderIds = indexPage.getContent().stream()
.map(OrderIndex::getOrderId)
.collect(Collectors.toList());
Map<String, OrderDetail> details = batchGetOrderDetails(orderIds);
// 3. 组装结果
return assembleResult(indexPage, details);
}
// 批量查询优化
private Map<String, OrderDetail> batchGetOrderDetails(List<String> orderIds) {
// 按分表键分组
Map<String, List<String>> shardGroups = orderIds.stream()
.collect(Collectors.groupingBy(this::extractShardKey));
// 并行查询不同分表
return shardGroups.entrySet().parallelStream()
.flatMap(entry -> {
String shardKey = entry.getKey();
List<String> ids = entry.getValue();
// 动态切换数据源到对应分表
String sql = "SELECT * FROM orders_" + shardKey +
" WHERE order_id IN (:ids)";
return orderJdbcTemplate.queryForList(sql,
Collections.singletonMap("ids", ids), OrderDetail.class)
.stream();
})
.collect(Collectors.toMap(OrderDetail::getOrderId,
Function.identity()));
}
}
三、分布式事务解决方案
3.1 下单支付一体化 - Saga模式实现
java
@Service
public class OrderSagaService {
@Autowired
private SagaCoordinator sagaCoordinator;
public void createOrderWithPayment(CreateOrderRequest request) {
SagaDefinition saga = SagaBuilder.newSaga("createOrder")
// 第一步:创建订单(可补偿)
.activity("createOrder", CreateOrderActivity.class)
.compensationActivity("cancelOrder", CancelOrderActivity.class)
// 第二步:预扣库存(可补偿)
.activity("preDeductInventory", PreDeductInventoryActivity.class)
.compensationActivity("restoreInventory", RestoreInventoryActivity.class)
// 第三步:发起支付(关键业务)
.activity("createPayment", CreatePaymentActivity.class)
// 第四步:确认库存
.activity("confirmInventory", ConfirmInventoryActivity.class)
.build();
sagaCoordinator.execute(saga, request);
}
// 活动实现示例
@Component
public class CreateOrderActivity implements SagaActivity {
@Override
public SagaResult execute(SagaContext context) {
try {
// 创建订单(本地事务)
Order order = orderService.createOrder(context.getRequest());
context.setData("orderId", order.getId());
return SagaResult.success();
} catch (Exception e) {
return SagaResult.failure(e);
}
}
@Override
public SagaResult compensate(SagaContext context) {
// 补偿:删除订单
orderService.cancelOrder(context.getData("orderId"));
return SagaResult.success();
}
}
}
3.2 支付回调幂等 - 多维度幂等方案
java
@RestController
@RequestMapping("/payment")
public class PaymentCallbackController {
// 三级幂等保障
@PostMapping("/callback")
public String callback(@RequestBody PaymentCallbackDTO dto,
HttpServletRequest request) {
String uniqueKey = buildUniqueKey(dto);
// 第一级:Redis分布式锁(防并发)
RLock lock = redissonClient.getLock("payment:callback:" + uniqueKey);
try {
if (!lock.tryLock(3, 10, TimeUnit.SECONDS)) {
return "processing"; // 正在处理中
}
// 第二级:数据库唯一索引防重
try {
paymentCallbackLogMapper.insert(CallbackLog.of(dto));
} catch (DuplicateKeyException e) {
log.info("重复回调,直接返回成功");
return "success";
}
// 第三级:状态机校验
Order order = orderService.getOrder(dto.getOrderNo());
if (order.getStatus() != OrderStatus.WAITING_PAY) {
log.warn("订单状态异常:{}", order.getStatus());
return "success";
}
// 业务处理
return processPayment(dto, order);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return "retry";
} finally {
lock.unlock();
}
}
private String buildUniqueKey(PaymentCallbackDTO dto) {
// 组合多个维度保证唯一性
return dto.getPaymentId() + ":" +
dto.getOrderNo() + ":" +
dto.getAmount();
}
}
四、实时推送优化方案
4.1 WebSocket集群方案
java
@Component
@ServerEndpoint("/ws/order/{tenantId}/{userId}")
public class OrderWebSocketEndpoint {
// 用户连接映射(集群共享)
private static RedisTemplate<String, String> redisTemplate;
@OnOpen
public void onOpen(Session session,
@PathParam("tenantId") String tenantId,
@PathParam("userId") String userId) {
// 存储连接信息到Redis
String connectionKey = "ws:connection:" + tenantId + ":" + userId;
redisTemplate.opsForValue().set(connectionKey, session.getId());
// 设置过期时间,防止僵尸连接
redisTemplate.expire(connectionKey, 2, TimeUnit.HOURS);
// 订阅用户专属频道
String channel = "order:update:" + tenantId + ":" + userId;
redisTemplate.execute(new RedisCallback<Void>() {
@Override
public Void doInRedis(RedisConnection connection) {
connection.subscribe((message, pattern) -> {
// 接收到消息,通过WebSocket推送给用户
session.getAsyncRemote().sendText(message.toString());
}, channel.getBytes());
return null;
}
});
}
// 推送服务
@Service
public class OrderPushService {
public void pushOrderUpdate(String tenantId, String userId,
OrderUpdate update) {
String channel = "order:update:" + tenantId + ":" + userId;
String message = JSON.toJSONString(update);
// 发布到Redis频道,所有节点都会收到
redisTemplate.convertAndSend(channel, message);
// 备份到本地队列,防止网络问题
localPushQueue.add(new PushTask(tenantId, userId, message));
}
}
}
五、营销系统复杂计算方案
5.1 优惠计算引擎
java
@Component
public class PromotionCalculator {
// 规则引擎配置
private KieContainer kieContainer;
public CalculateResult calculate(OrderContext context) {
KieSession kieSession = kieContainer.newKieSession();
try {
// 插入事实对象
kieSession.insert(context.getOrder());
kieSession.insert(context.getUser());
context.getCoupons().forEach(kieSession::insert);
context.getPromotions().forEach(kieSession::insert);
// 执行规则
kieSession.fireAllRules();
// 收集结果
Collection<DiscountResult> results =
kieSession.getObjects(new ClassObjectFilter(DiscountResult.class));
return aggregateResults(results);
} finally {
kieSession.dispose();
}
}
// Drools规则示例
rule "满100减20"
when
$order : Order(totalAmount >= 100)
$promo : Promotion(type == "FULL_REDUCTION", threshold <= 100)
then
DiscountResult result = new DiscountResult();
result.setDiscountAmount(new BigDecimal(20));
result.setRuleId($promo.getId());
insert(result);
end
// 优惠分摊算法
private Map<Long, BigDecimal> allocateDiscount(Order order,
BigDecimal totalDiscount) {
// 按商品金额比例分摊
BigDecimal totalAmount = order.getItems().stream()
.map(OrderItem::getAmount)
.reduce(BigDecimal.ZERO, BigDecimal::add);
Map<Long, BigDecimal> allocation = new HashMap<>();
BigDecimal allocated = BigDecimal.ZERO;
for (int i = 0; i < order.getItems().size(); i++) {
OrderItem item = order.getItems().get(i);
BigDecimal ratio = item.getAmount().divide(totalAmount, 4,
RoundingMode.HALF_UP);
BigDecimal itemDiscount = totalDiscount.multiply(ratio);
// 最后一个商品处理余数
if (i == order.getItems().size() - 1) {
itemDiscount = totalDiscount.subtract(allocated);
}
allocation.put(item.getId(), itemDiscount);
allocated = allocated.add(itemDiscount);
}
return allocation;
}
}
六、监控与稳定性保障
6.1 全链路监控
yaml
# application-monitor.yml
management:
endpoints:
web:
exposure:
include: "health,metrics,prometheus"
metrics:
export:
prometheus:
enabled: true
distribution:
sla: [100ms, 200ms, 500ms, 1s, 2s]
# 自定义业务指标
@Component
public class OrderMetrics {
private final MeterRegistry registry;
private final Timer orderCreateTimer;
private final Counter paymentErrorCounter;
public OrderMetrics(MeterRegistry registry) {
this.registry = registry;
this.orderCreateTimer = Timer.builder("order.create.duration")
.description("订单创建耗时")
.publishPercentiles(0.5, 0.95, 0.99)
.register(registry);
this.paymentErrorCounter = Counter.builder("payment.error.count")
.description("支付失败次数")
.tag("type", "callback")
.register(registry);
}
@Around("@annotation(MonitorPerformance)")
public Object monitor(ProceedingJoinPoint pjp) throws Throwable {
return orderCreateTimer.record(() -> {
try {
return pjp.proceed();
} catch (Throwable t) {
paymentErrorCounter.increment();
throw t;
}
});
}
}
6.2 熔断降级策略
java
@Configuration
public class ResilienceConfig {
@Bean
public CircuitBreakerConfig orderCircuitBreakerConfig() {
return CircuitBreakerConfig.custom()
.failureRateThreshold(50) // 失败率阈值
.slowCallRateThreshold(100) // 慢调用率阈值
.slowCallDurationThreshold(Duration.ofSeconds(2))
.waitDurationInOpenState(Duration.ofSeconds(60)) // 熔断时间
.permittedNumberOfCallsInHalfOpenState(10) // 半开状态允许调用数
.minimumNumberOfCalls(100) // 最小调用数
.slidingWindowType(SlidingWindowType.COUNT_BASED)
.slidingWindowSize(100) // 滑动窗口大小
.build();
}
@Bean
public BulkheadConfig inventoryBulkheadConfig() {
return BulkheadConfig.custom()
.maxConcurrentCalls(100) // 最大并发数
.maxWaitDuration(Duration.ofMillis(500)) // 最大等待时间
.build();
}
@Bean
public RetryConfig paymentRetryConfig() {
return RetryConfig.custom()
.maxAttempts(3) // 最大重试次数
.waitDuration(Duration.ofSeconds(1)) // 重试间隔
.retryOnException(e -> e instanceof TimeoutException)
.failAfterMaxAttempts(true)
.build();
}
}
七、数据迁移与扩容方案
7.1 在线分表迁移
java
@Service
public class ShardingMigrationService {
public void migrateOrders(String sourceTable, String targetTable) {
// 1. 创建影子表
jdbcTemplate.execute("CREATE TABLE " + targetTable + " LIKE " + sourceTable);
// 2. 双写开始
enableDoubleWrite(sourceTable, targetTable);
// 3. 数据迁移(分批)
long maxId = getMaxId(sourceTable);
int batchSize = 1000;
for (long startId = 0; startId <= maxId; startId += batchSize) {
// 迁移一批数据
migrateBatch(sourceTable, targetTable, startId, startId + batchSize);
// 延迟,减少对生产影响
Thread.sleep(100);
}
// 4. 数据校验
validateData(sourceTable, targetTable);
// 5. 切换读流量
switchReadTraffic(targetTable);
// 6. 关闭双写,清理旧表
disableDoubleWrite();
archiveOldTable(sourceTable);
}
}
总结: 这些解决方案形成了完整的SaaS点餐平台技术体系,重点解决了多租户隔离、高并发订单、分布式事务、实时推送等核心难题。实际实施时需要根据业务规模和技术团队能力分阶段推进,优先保证核心链路的稳定性和性能。