七、监控与维护
一个设计良好的混合架构系统不仅需要实现业务功能,还需要具备完善的监控和维护机制。本章将介绍如何构建全方位的监控体系,以确保MySQL与NoSQL混合架构的稳定运行。
关键指标监控
不同类型的数据库需要监控不同的关键指标。以下是各数据库系统的核心监控指标:
1. MySQL监控指标
java
@Component
public class MySQLHealthIndicator {
@Autowired
private DataSource dataSource;
/**
* 获取MySQL关键指标
*/
public MySQLMetrics collectMetrics() {
MySQLMetrics metrics = new MySQLMetrics();
try (Connection conn = dataSource.getConnection();
Statement stmt = conn.createStatement()) {
// 1. 连接池指标
if (dataSource instanceof HikariDataSource) {
HikariDataSource hikariDS = (HikariDataSource) dataSource;
HikariPoolMXBean poolMXBean = hikariDS.getHikariPoolMXBean();
metrics.setActiveConnections(poolMXBean.getActiveConnections());
metrics.setIdleConnections(poolMXBean.getIdleConnections());
metrics.setTotalConnections(poolMXBean.getTotalConnections());
metrics.setThreadsAwaitingConnection(poolMXBean.getThreadsAwaitingConnection());
}
// 2. 性能指标
ResultSet rs = stmt.executeQuery("SHOW GLOBAL STATUS");
Map<String, String> statusMap = new HashMap<>();
while (rs.next()) {
statusMap.put(rs.getString(1), rs.getString(2));
}
// 并发连接数
metrics.setActiveThreads(parseInt(statusMap.get("Threads_running")));
metrics.setMaxConnections(parseInt(statusMap.get("Max_used_connections")));
// QPS和TPS
metrics.setQps(parseInt(statusMap.get("Queries")));
metrics.setTps(parseInt(statusMap.get("Com_commit")) + parseInt(statusMap.get("Com_rollback")));
// InnoDB缓冲池命中率
long readRequests = parseLong(statusMap.get("Innodb_buffer_pool_read_requests"));
long reads = parseLong(statusMap.get("Innodb_buffer_pool_reads"));
double hitRatio = reads > 0 ? (1 - (double) reads / readRequests) * 100 : 100.0;
metrics.setBufferPoolHitRatio(hitRatio);
// 临时表和文件排序
metrics.setTempTablesCreated(parseInt(statusMap.get("Created_tmp_tables")));
metrics.setTempDiskTablesCreated(parseInt(statusMap.get("Created_tmp_disk_tables")));
metrics.setFileSorts(parseInt(statusMap.get("Sort_scan")));
// 慢查询
metrics.setSlowQueries(parseInt(statusMap.get("Slow_queries")));
// 3. 复制状态(如果有从库)
try {
ResultSet replRs = stmt.executeQuery("SHOW SLAVE STATUS");
if (replRs.next()) {
metrics.setReplicationRunning(
"Yes".equalsIgnoreCase(replRs.getString("Slave_IO_Running")) &&
"Yes".equalsIgnoreCase(replRs.getString("Slave_SQL_Running"))
);
metrics.setReplicationLag(replRs.getLong("Seconds_Behind_Master"));
}
} catch (SQLException e) {
// 可能没有配置复制,忽略异常
}
// 4. 表空间使用
metrics.setTableSpaceUsage(collectTableSpaceUsage());
} catch (SQLException e) {
log.error("获取MySQL指标失败", e);
metrics.setError(e.getMessage());
}
metrics.setTimestamp(System.currentTimeMillis());
return metrics;
}
/**
* 收集表空间使用情况
*/
private Map<String, TableSpaceInfo> collectTableSpaceUsage() throws SQLException {
Map<String, TableSpaceInfo> tableSpaceMap = new HashMap<>();
String sql = "SELECT table_schema, table_name, engine, data_length, index_length, " +
"table_rows, auto_increment, create_time " +
"FROM information_schema.tables " +
"WHERE table_schema NOT IN ('mysql', 'information_schema', 'performance_schema')";
try (Connection conn = dataSource.getConnection();
PreparedStatement pstmt = conn.prepareStatement(sql);
ResultSet rs = pstmt.executeQuery()) {
while (rs.next()) {
String schema = rs.getString("table_schema");
String tableName = rs.getString("table_name");
String key = schema + "." + tableName;
TableSpaceInfo info = new TableSpaceInfo();
info.setSchema(schema);
info.setTableName(tableName);
info.setEngine(rs.getString("engine"));
info.setDataSize(rs.getLong("data_length"));
info.setIndexSize(rs.getLong("index_length"));
info.setRows(rs.getLong("table_rows"));
info.setAutoIncrement(rs.getLong("auto_increment"));
info.setCreateTime(rs.getTimestamp("create_time"));
tableSpaceMap.put(key, info);
}
}
return tableSpaceMap;
}
// 辅助方法
private int parseInt(String value) {
try {
return value != null ? Integer.parseInt(value) : 0;
} catch (NumberFormatException e) {
return 0;
}
}
private long parseLong(String value) {
try {
return value != null ? Long.parseLong(value) : 0L;
} catch (NumberFormatException e) {
return 0L;
}
}
// 指标类
@Data
public static class MySQLMetrics {
private long timestamp;
// 连接池指标
private int activeConnections;
private int idleConnections;
private int totalConnections;
private int threadsAwaitingConnection;
// 性能指标
private int activeThreads;
private int maxConnections;
private int qps;
private int tps;
private double bufferPoolHitRatio;
private int tempTablesCreated;
private int tempDiskTablesCreated;
private int fileSorts;
private int slowQueries;
// 复制状态
private boolean replicationRunning;
private long replicationLag;
// 表空间使用
private Map<String, TableSpaceInfo> tableSpaceUsage;
// 错误信息
private String error;
}
@Data
public static class TableSpaceInfo {
private String schema;
private String tableName;
private String engine;
private long dataSize;
private long indexSize;
private long rows;
private long autoIncrement;
private Date createTime;
}
}
2. Redis监控指标
java
@Component
public class RedisHealthIndicator {
@Autowired
private StringRedisTemplate redisTemplate;
/**
* 获取Redis关键指标
*/
public RedisMetrics collectMetrics() {
RedisMetrics metrics = new RedisMetrics();
try {
// 1. 获取Redis基本信息
Properties info = redisTemplate.execute(RedisCallback::info);
Properties commandStats = redisTemplate.execute(connection ->
connection.info("commandstats"));
// 2. 解析基本指标
metrics.setVersion(info.getProperty("redis_version"));
metrics.setMode(info.getProperty("redis_mode"));
metrics.setOs(info.getProperty("os"));
// 内存指标
metrics.setUsedMemory(parseLong(info.getProperty("used_memory")));
metrics.setUsedMemoryRss(parseLong(info.getProperty("used_memory_rss")));
metrics.setMemFragmentationRatio(parseDouble(info.getProperty("mem_fragmentation_ratio")));
// 性能指标
metrics.setConnectedClients(parseInt(info.getProperty("connected_clients")));
metrics.setBlocked(parseInt(info.getProperty("blocked_clients")));
metrics.setCommandsProcessed(parseLong(info.getProperty("total_commands_processed")));
metrics.setOpsPerSecond(parseDouble(info.getProperty("instantaneous_ops_per_sec")));
metrics.setHitRate(calculateHitRate(info));
// 键空间指标
metrics.setKeyspaceHits(parseLong(info.getProperty("keyspace_hits")));
metrics.setKeyspaceMisses(parseLong(info.getProperty("keyspace_misses")));
metrics.setExpiredKeys(parseLong(info.getProperty("expired_keys")));
metrics.setEvictedKeys(parseLong(info.getProperty("evicted_keys")));
// 网络指标
metrics.setConnectedSlaves(parseInt(info.getProperty("connected_slaves")));
metrics.setRejectedConnections(parseLong(info.getProperty("rejected_connections")));
metrics.setTotalNetInputBytes(parseLong(info.getProperty("total_net_input_bytes")));
metrics.setTotalNetOutputBytes(parseLong(info.getProperty("total_net_output_bytes")));
// 慢日志分析
metrics.setSlowlogLength(getSlowLogLength());
// 3. 分析热门命令
if (commandStats != null) {
Map<String, Long> commandCalls = new HashMap<>();
for (String key : commandStats.stringPropertyNames()) {
if (key.startsWith("cmdstat_")) {
String command = key.substring(8).toLowerCase();
String value = commandStats.getProperty(key);
if (value.contains("calls=")) {
int start = value.indexOf("calls=") + 6;
int end = value.indexOf(',', start);
if (end > start) {
try {
long calls = Long.parseLong(value.substring(start, end));
commandCalls.put(command, calls);
} catch (NumberFormatException e) {
// 忽略解析错误
}
}
}
}
}
metrics.setCommandStats(commandCalls);
}
// 4. 获取数据库键统计
Map<String, KeyspaceStats> keyspaceMap = new HashMap<>();
for (String key : info.stringPropertyNames()) {
if (key.startsWith("db")) {
String dbNumber = key;
String value = info.getProperty(key);
KeyspaceStats stats = new KeyspaceStats();
stats.setDatabase(dbNumber);
// 解析格式:keys=1,expires=0,avg_ttl=0
String[] parts = value.split(",");
for (String part : parts) {
String[] kv = part.split("=");
if (kv.length == 2) {
switch (kv[0]) {
case "keys":
stats.setKeys(parseLong(kv[1]));
break;
case "expires":
stats.setExpires(parseLong(kv[1]));
break;
case "avg_ttl":
stats.setAvgTtl(parseLong(kv[1]));
break;
}
}
}
keyspaceMap.put(dbNumber, stats);
}
}
metrics.setKeyspaceStats(keyspaceMap);
} catch (Exception e) {
log.error("获取Redis指标失败", e);
metrics.setError(e.getMessage());
}
metrics.setTimestamp(System.currentTimeMillis());
return metrics;
}
/**
* 获取慢日志长度
*/
private int getSlowLogLength() {
try {
return redisTemplate.execute((RedisCallback<Integer>) connection -> {
List<Object> response = (List<Object>) connection.execute("SLOWLOG", "LEN".getBytes());
if (response != null && !response.isEmpty()) {
return Integer.parseInt(new String((byte[]) response.get(0)));
}
return 0;
});
} catch (Exception e) {
log.error("获取慢日志长度失败", e);
return 0;
}
}
/**
* 计算命中率
*/
private double calculateHitRate(Properties info) {
long hits = parseLong(info.getProperty("keyspace_hits"));
long misses = parseLong(info.getProperty("keyspace_misses"));
return hits + misses > 0 ? (double) hits / (hits + misses) * 100 : 0;
}
// 辅助方法
private int parseInt(String value) {
try {
return value != null ? Integer.parseInt(value) : 0;
} catch (NumberFormatException e) {
return 0;
}
}
private long parseLong(String value) {
try {
return value != null ? Long.parseLong(value) : 0L;
} catch (NumberFormatException e) {
return 0L;
}
}
private double parseDouble(String value) {
try {
return value != null ? Double.parseDouble(value) : 0.0;
} catch (NumberFormatException e) {
return 0.0;
}
}
// 指标类
@Data
public static class RedisMetrics {
private long timestamp;
// 基本信息
private String version;
private String mode;
private String os;
// 内存指标
private long usedMemory;
private long usedMemoryRss;
private double memFragmentationRatio;
// 性能指标
private int connectedClients;
private int blocked;
private long commandsProcessed;
private double opsPerSecond;
private double hitRate;
// 键空间指标
private long keyspaceHits;
private long keyspaceMisses;
private long expiredKeys;
private long evictedKeys;
// 网络指标
private int connectedSlaves;
private long rejectedConnections;
private long totalNetInputBytes;
private long totalNetOutputBytes;
// 慢日志
private int slowlogLength;
// 命令统计
private Map<String, Long> commandStats;
// 键空间统计
private Map<String, KeyspaceStats> keyspaceStats;
// 错误信息
private String error;
}
@Data
public static class KeyspaceStats {
private String database;
private long keys;
private long expires;
private long avgTtl;
}
}
3. MongoDB监控指标
java
@Component
public class MongoDBHealthIndicator {
@Autowired
private MongoClient mongoClient;
/**
* 获取MongoDB关键指标
*/
public MongoDBMetrics collectMetrics() {
MongoDBMetrics metrics = new MongoDBMetrics();
try {
// 1. 服务器状态
Document serverStatus = mongoClient.getDatabase("admin")
.runCommand(new Document("serverStatus", 1));
// 2. 解析连接信息
Document connections = (Document) serverStatus.get("connections");
metrics.setCurrentConnections(connections.getInteger("current", 0));
metrics.setAvailableConnections(connections.getInteger("available", 0));
metrics.setTotalCreated(connections.getInteger("totalCreated", 0));
// 3. 内存使用情况
Document mem = (Document) serverStatus.get("mem");
metrics.setResident(mem.getInteger("resident", 0));
metrics.setVirtual(mem.getInteger("virtual", 0));
metrics.setMappedWithJournal(mem.getInteger("mappedWithJournal", 0));
// 4. 操作计数器
Document opcounters = (Document) serverStatus.get("opcounters");
metrics.setInsertCount(opcounters.getInteger("insert", 0));
metrics.setQueryCount(opcounters.getInteger("query", 0));
metrics.setUpdateCount(opcounters.getInteger("update", 0));
metrics.setDeleteCount(opcounters.getInteger("delete", 0));
metrics.setGetmoreCount(opcounters.getInteger("getmore", 0));
metrics.setCommandCount(opcounters.getInteger("command", 0));
// 5. 网络统计
Document network = (Document) serverStatus.get("network");
metrics.setBytesIn(network.getLong("bytesIn", 0L));
metrics.setBytesOut(network.getLong("bytesOut", 0L));
metrics.setNumRequests(network.getLong("numRequests", 0L));
// 6. 获取集合统计信息
MongoDatabase db = mongoClient.getDatabase("yourDatabaseName");
MongoIterable<String> collectionNames = db.listCollectionNames();
Map<String, CollectionStats> collectionStatsMap = new HashMap<>();
for (String collectionName : collectionNames) {
Document stats = db.runCommand(new Document("collStats", collectionName));
CollectionStats collStats = new CollectionStats();
collStats.setCollection(collectionName);
collStats.setCount(stats.getLong("count", 0L));
collStats.setSize(stats.getLong("size", 0L));
collStats.setAvgObjSize(stats.getDouble("avgObjSize", 0.0));
collStats.setStorageSize(stats.getLong("storageSize", 0L));
collStats.setTotalIndexSize(stats.getLong("totalIndexSize", 0L));
// 获取索引详情
ArrayList<Document> indexSizes = (ArrayList<Document>) stats.get("indexSizes");
if (indexSizes != null) {
Map<String, Long> indexSizeMap = new HashMap<>();
for (Document indexSize : indexSizes) {
for (String key : indexSize.keySet()) {
indexSizeMap.put(key, indexSize.getLong(key));
}
}
collStats.setIndexSizes(indexSizeMap);
}
collectionStatsMap.put(collectionName, collStats);
}
metrics.setCollectionStats(collectionStatsMap);
// 7. 检查复制集状态
try {
Document replStatus = mongoClient.getDatabase("admin")
.runCommand(new Document("replSetGetStatus", 1));
if (replStatus != null) {
metrics.setReplicaSetName(replStatus.getString("set"));
metrics.setReplicaSetMembers(replStatus.getList("members", Document.class).size());
// 解析复制集成员状态
List<Document> members = replStatus.getList("members", Document.class);
List<ReplicaMemberStats> memberStats = new ArrayList<>();
for (Document member : members) {
ReplicaMemberStats stats = new ReplicaMemberStats();
stats.setId(member.getInteger("_id"));
stats.setName(member.getString("name"));
stats.setState(member.getInteger("state"));
stats.setStateStr(member.getString("stateStr"));
stats.setUptime(member.getLong("uptime", 0L));
if (member.containsKey("optimeDate")) {
stats.setOptimeDate(member.getDate("optimeDate"));
}
memberStats.add(stats);
}
metrics.setReplicaMembers(memberStats);
}
} catch (Exception e) {
// 可能不是复制集环境,忽略异常
log.debug("获取复制集状态失败: {}", e.getMessage());
}
} catch (Exception e) {
log.error("获取MongoDB指标失败", e);
metrics.setError(e.getMessage());
}
metrics.setTimestamp(System.currentTimeMillis());
return metrics;
}
// 指标类
@Data
public static class MongoDBMetrics {
private long timestamp;
// 连接信息
private int currentConnections;
private int availableConnections;
private int totalCreated;
// 内存统计
private int resident;
private int virtual;
private int mappedWithJournal;
// 操作统计
private int insertCount;
private int queryCount;
private int updateCount;
private int deleteCount;
private int getmoreCount;
private int commandCount;
// 网络统计
private long bytesIn;
private long bytesOut;
private long numRequests;
// 复制集信息
private String replicaSetName;
private int replicaSetMembers;
private List<ReplicaMemberStats> replicaMembers;
// 集合统计
private Map<String, CollectionStats> collectionStats;
// 错误信息
private String error;
}
@Data
public static class CollectionStats {
private String collection;
private long count;
private long size;
private double avgObjSize;
private long storageSize;
private long totalIndexSize;
private Map<String, Long> indexSizes;
}
@Data
public static class ReplicaMemberStats {
private int id;
private String name;
private int state;
private String stateStr;
private long uptime;
private Date optimeDate;
}
}
异常检测与告警
建立健全的异常检测和告警系统,可以及时发现并处理潜在问题:
java
@Service
public class DatabaseMonitoringService {
@Autowired
private MySQLHealthIndicator mysqlHealthIndicator;
@Autowired
private RedisHealthIndicator redisHealthIndicator;
@Autowired
private MongoDBHealthIndicator mongodbHealthIndicator;
@Autowired
private AlertService alertService;
@Autowired
private MetricsRepository metricsRepository;
/**
* 定时监控MySQL健康状态
*/
@Scheduled(fixedRate = 60000) // 每分钟执行一次
public void monitorMySQLHealth() {
MySQLHealthIndicator.MySQLMetrics metrics = mysqlHealthIndicator.collectMetrics();
// 保存指标到数据库
metricsRepository.saveMySQLMetrics(metrics);
// 检查警告阈值
List<AlertEvent> alerts = new ArrayList<>();
// 1. 检查连接池
if (metrics.getThreadsAwaitingConnection() > 5) {
alerts.add(new AlertEvent(
AlertLevel.WARNING,
"MySQL_CONNECTION_POOL",
"MySQL连接池等待线程数过多: " + metrics.getThreadsAwaitingConnection(),
"检查应用连接池配置和MySQL最大连接数设置"
));
}
// 2. 检查慢查询
if (metrics.getSlowQueries() > 10) {
alerts.add(new AlertEvent(
AlertLevel.WARNING,
"MySQL_SLOW_QUERIES",
"MySQL慢查询数量过多: " + metrics.getSlowQueries(),
"检查slow_query_log并优化相关SQL"
));
}
// 3. 检查临时表和文件排序
if (metrics.getTempDiskTablesCreated() > 100) {
alerts.add(new AlertEvent(
AlertLevel.WARNING,
"MySQL_TEMP_DISK_TABLES",
"MySQL创建临时磁盘表过多: " + metrics.getTempDiskTablesCreated(),
"检查tmp_table_size和max_heap_table_size配置,优化相关查询"
));
}
// 4. 检查缓冲池命中率
if (metrics.getBufferPoolHitRatio() < 90) {
alerts.add(new AlertEvent(
AlertLevel.WARNING,
"MySQL_BUFFER_POOL_HIT_RATIO",
"MySQL缓冲池命中率过低: " + String.format("%.2f%%", metrics.getBufferPoolHitRatio()),
"考虑增加innodb_buffer_pool_size配置"
));
}
// 5. 检查主从复制延迟
if (metrics.isReplicationRunning() && metrics.getReplicationLag() > 30) {
AlertLevel level = metrics.getReplicationLag() > 300 ? AlertLevel.CRITICAL : AlertLevel.WARNING;
alerts.add(new AlertEvent(
level,
"MySQL_REPLICATION_LAG",
"MySQL主从复制延迟过高: " + metrics.getReplicationLag() + "秒",
"检查从库负载和网络连接"
));
}
// 发送告警
for (AlertEvent alert : alerts) {
alertService.sendAlert(alert);
}
}
/**
* 定时监控Redis健康状态
*/
@Scheduled(fixedRate = 60000) // 每分钟执行一次
public void monitorRedisHealth() {
RedisHealthIndicator.RedisMetrics metrics = redisHealthIndicator.collectMetrics();
// 保存指标到数据库
metricsRepository.saveRedisMetrics(metrics);
// 检查警告阈值
List<AlertEvent> alerts = new ArrayList<>();
// 1. 检查内存碎片率
if (metrics.getMemFragmentationRatio() > 1.5) {
alerts.add(new AlertEvent(
AlertLevel.WARNING,
"REDIS_MEMORY_FRAGMENTATION",
"Redis内存碎片率过高: " + String.format("%.2f", metrics.getMemFragmentationRatio()),
"考虑重启Redis实例或使用MEMORY PURGE命令"
));
}
// 2. 检查命中率
if (metrics.getHitRate() < 80) {
alerts.add(new AlertEvent(
AlertLevel.WARNING,
"REDIS_HIT_RATE",
"Redis缓存命中率过低: " + String.format("%.2f%%", metrics.getHitRate()),
"检查缓存策略和过期设置"
));
}
// 3. 检查被拒绝的连接数
if (metrics.getRejectedConnections() > 0) {
alerts.add(new AlertEvent(
AlertLevel.WARNING,
"REDIS_REJECTED_CONNECTIONS",
"Redis拒绝了连接请求: " + metrics.getRejectedConnections(),
"检查maxclients设置和客户端连接管理"
));
}
// 4. 检查键被驱逐情况
if (metrics.getEvictedKeys() > 0) {
alerts.add(new AlertEvent(
AlertLevel.WARNING,
"REDIS_EVICTED_KEYS",
"Redis内存不足导致键被驱逐: " + metrics.getEvictedKeys(),
"考虑增加Redis内存或优化数据存储"
));
}
// 5. 检查慢日志
if (metrics.getSlowlogLength() > 10) {
alerts.add(new AlertEvent(
AlertLevel.WARNING,
"REDIS_SLOW_OPERATIONS",
"Redis存在慢操作: " + metrics.getSlowlogLength() + "条",
"使用SLOWLOG GET命令检查慢操作详情,优化相关命令"
));
}
// 发送告警
for (AlertEvent alert : alerts) {
alertService.sendAlert(alert);
}
}
/**
* 定时监控MongoDB健康状态
*/
@Scheduled(fixedRate = 60000) // 每分钟执行一次
public void monitorMongoDBHealth() {
MongoDBHealthIndicator.MongoDBMetrics metrics = mongodbHealthIndicator.collectMetrics();
// 保存指标到数据库
metricsRepository.saveMongoDBMetrics(metrics);
// 检查警告阈值
List<AlertEvent> alerts = new ArrayList<>();
// 1. 检查连接数
int connectionUsagePercent = metrics.getCurrentConnections() * 100 /
(metrics.getCurrentConnections() + metrics.getAvailableConnections());
if (connectionUsagePercent > 80) {
alerts.add(new AlertEvent(
AlertLevel.WARNING,
"MONGODB_CONNECTIONS",
"MongoDB连接使用率过高: " + connectionUsagePercent + "%",
"检查应用连接池设置和MongoDB maxConnections配置"
));
}
// 2. 检查内存使用
if (metrics.getResident() > 10000) { // 超过10GB
alerts.add(new AlertEvent(
AlertLevel.WARNING,
"MONGODB_MEMORY_USAGE",
"MongoDB内存使用过高: " + metrics.getResident() + "MB",
"检查查询效率和索引使用情况"
));
}
// 3. 检查复制集状态
if (metrics.getReplicaMembers() != null) {
for (MongoDBHealthIndicator.ReplicaMemberStats member : metrics.getReplicaMembers()) {
// 检查非PRIMARY或SECONDARY状态的成员
if (member.getState() != 1 && member.getState() != 2) {
alerts.add(new AlertEvent(
AlertLevel.CRITICAL,
"MONGODB_REPLICA_STATE",
"MongoDB复制集成员状态异常: " + member.getName() + " (" + member.getStateStr() + ")",
"检查该成员的日志和连接状态"
));
}
}
}
// 4. 检查集合增长情况
for (Map.Entry<String, MongoDBHealthIndicator.CollectionStats> entry :
metrics.getCollectionStats().entrySet()) {
MongoDBHealthIndicator.CollectionStats stats = entry.getValue();
// 检查大集合
if (stats.getSize() > 5 * 1024 * 1024 * 1024L) { // 超过5GB
alerts.add(new AlertEvent(
AlertLevel.WARNING,
"MONGODB_COLLECTION_SIZE",
"MongoDB集合" + entry.getKey() + "大小过大: " +
String.format("%.2f GB", stats.getSize() / (1024.0 * 1024 * 1024)),
"考虑对集合进行分片或归档历史数据"
));
}
// 检查索引大小
if (stats.getTotalIndexSize() > stats.getSize()) {
alerts.add(new AlertEvent(
AlertLevel.WARNING,
"MONGODB_INDEX_SIZE",
"MongoDB集合" + entry.getKey() + "的索引大小超过数据大小",
"检查是否存在不必要的索引"
));
}
}
// 发送告警
for (AlertEvent alert : alerts) {
alertService.sendAlert(alert);
}
}
/**
* 数据一致性监控
*/
@Scheduled(cron = "0 0/10 * * * ?") // 每10分钟执行一次
public void monitorDataConsistency() {
// 以订单数据为例,检查MySQL和Redis中的数据一致性
try {
List<InconsistentData> inconsistentOrders = checkOrderConsistency();
if (!inconsistentOrders.isEmpty()) {
// 记录不一致数据
for (InconsistentData data : inconsistentOrders) {
metricsRepository.saveInconsistentData(data);
}
// 发送告警
AlertEvent alert = new AlertEvent(
AlertLevel.WARNING,
"DATA_INCONSISTENCY",
"检测到" + inconsistentOrders.size() + "条不一致的订单数据",
"请检查数据同步机制和缓存更新策略"
);
alertService.sendAlert(alert);
// 对于少量不一致数据,可以自动修复
if (inconsistentOrders.size() <= 10) {
for (InconsistentData data : inconsistentOrders) {
fixInconsistentData(data);
}
}
}
} catch (Exception e) {
log.error("监控数据一致性失败", e);
AlertEvent alert = new AlertEvent(
AlertLevel.CRITICAL,
"MONITORING_ERROR",
"数据一致性监控失败: " + e.getMessage(),
"请检查监控服务状态"
);
alertService.sendAlert(alert);
}
}
/**
* 检查订单数据一致性
*/
private List<InconsistentData> checkOrderConsistency() {
List<InconsistentData> result = new ArrayList<>();
// 1. 获取最近10分钟内的订单
List<Order> orders = orderRepository.findRecentOrders(10);
// 2. 检查每个订单在Redis中的缓存是否一致
for (Order order : orders) {
String cacheKey = "order:" + order.getId();
Order cachedOrder = (Order) redisTemplate.opsForValue().get(cacheKey);
if (cachedOrder != null) {
// 检查关键字段是否一致
if (!order.getStatus().equals(cachedOrder.getStatus()) ||
!order.getTotalAmount().equals(cachedOrder.getTotalAmount())) {
InconsistentData data = new InconsistentData();
data.setEntityType("Order");
data.setEntityId(order.getId().toString());
data.setMysqlData(JsonUtil.toJson(order));
data.setRedisData(JsonUtil.toJson(cachedOrder));
data.setInconsistentFields(getInconsistentFields(order, cachedOrder));
data.setDetectedAt(new Date());
result.add(data);
}
}
}
return result;
}
/**
* 获取不一致的字段
*/
private List<String> getInconsistentFields(Order mysqlOrder, Order redisOrder) {
List<String> fields = new ArrayList<>();
if (!Objects.equals(mysqlOrder.getStatus(), redisOrder.getStatus())) {
fields.add("status");
}
if (!Objects.equals(mysqlOrder.getTotalAmount(), redisOrder.getTotalAmount())) {
fields.add("totalAmount");
}
if (!Objects.equals(mysqlOrder.getAddressId(), redisOrder.getAddressId())) {
fields.add("addressId");
}
if (!Objects.equals(mysqlOrder.getUpdatedAt(), redisOrder.getUpdatedAt())) {
fields.add("updatedAt");
}
return fields;
}
/**
* 修复不一致的数据
*/
private void fixInconsistentData(InconsistentData data) {
log.info("尝试修复不一致数据: {}", data.getEntityId());
try {
if ("Order".equals(data.getEntityType())) {
String cacheKey = "order:" + data.getEntityId();
// 从MySQL获取最新数据
Order order = orderRepository.findById(Long.valueOf(data.getEntityId()))
.orElseThrow(() -> new IllegalStateException("订单不存在"));
// 更新Redis缓存
redisTemplate.opsForValue().set(cacheKey, order, 1, TimeUnit.HOURS);
// 记录修复日志
data.setFixed(true);
data.setFixedAt(new Date());
metricsRepository.updateInconsistentData(data);
log.info("成功修复不一致数据: {}", data.getEntityId());
}
} catch (Exception e) {
log.error("修复不一致数据失败: {}", data.getEntityId(), e);
}
}
// 告警级别枚举
public enum AlertLevel {
INFO, WARNING, CRITICAL
}
// 告警事件类
@Data
public static class AlertEvent {
private AlertLevel level;
private String type;
private String message;
private String suggestion;
private Date timestamp = new Date();
public AlertEvent(AlertLevel level, String type, String message, String suggestion) {
this.level = level;
this.type = type;
this.message = message;
this.suggestion = suggestion;
}
}
// 不一致数据记录
@Data
public static class InconsistentData {
private String entityType;
private String entityId;
private String mysqlData;
private String redisData;
private List<String> inconsistentFields;
private Date detectedAt;
private boolean fixed;
private Date fixedAt;
}
}
// 告警服务
@Service
public class AlertService {
@Autowired
private JavaMailSender mailSender;
@Autowired
private AlertRepository alertRepository;
@Value("${alert.mail.recipients}")
private String mailRecipients;
@Value("${alert.mail.enabled}")
private boolean mailEnabled;
@Value("${alert.webhook.url}")
private String webhookUrl;
@Value("${alert.webhook.enabled}")
private boolean webhookEnabled;
/**
* 发送告警
*/
public void sendAlert(DatabaseMonitoringService.AlertEvent alert) {
// 1. 保存告警记录到数据库
alertRepository.saveAlert(alert);
// 2. 根据告警级别决定是否立即通知
boolean needImmediateNotify = alert.getLevel() == DatabaseMonitoringService.AlertLevel.CRITICAL;
// 3. 发送邮件通知
if (mailEnabled && needImmediateNotify) {
sendMailAlert(alert);
}
// 4. 发送Webhook通知
if (webhookEnabled && needImmediateNotify) {
sendWebhookAlert(alert);
}
}
/**
* 发送邮件告警
*/
private void sendMailAlert(DatabaseMonitoringService.AlertEvent alert) {
try {
MimeMessage message = mailSender.createMimeMessage();
MimeMessageHelper helper = new MimeMessageHelper(message, true);
helper.setFrom("monitor@yourcompany.com");
helper.setTo(mailRecipients.split(","));
helper.setSubject("[" + alert.getLevel() + "] 数据库监控告警: " + alert.getType());
String content = "<h3>告警详情</h3>" +
"<p><strong>类型:</strong>" + alert.getType() + "</p>" +
"<p><strong>级别:</strong>" + alert.getLevel() + "</p>" +
"<p><strong>时间:</strong>" + formatDate(alert.getTimestamp()) + "</p>" +
"<p><strong>消息:</strong>" + alert.getMessage() + "</p>" +
"<p><strong>建议:</strong>" + alert.getSuggestion() + "</p>";
helper.setText(content, true);
mailSender.send(message);
log.info("已发送邮件告警: {}", alert.getType());
} catch (Exception e) {
log.error("发送邮件告警失败", e);
}
}
/**
* 发送Webhook告警
*/
private void sendWebhookAlert(DatabaseMonitoringService.AlertEvent alert) {
try {
// 构建要发送的JSON数据
JSONObject json = new JSONObject();
json.put("level", alert.getLevel().toString());
json.put("type", alert.getType());
json.put("message", alert.getMessage());
json.put("suggestion", alert.getSuggestion());
json.put("timestamp", alert.getTimestamp().getTime());
// 发送POST请求
URL url = new URL(webhookUrl);
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("POST");
conn.setRequestProperty("Content-Type", "application/json");
conn.setDoOutput(true);
try (OutputStream os = conn.getOutputStream()) {
byte[] input = json.toString().getBytes(StandardCharsets.UTF_8);
os.write(input, 0, input.length);
}
int responseCode = conn.getResponseCode();
if (responseCode >= 200 && responseCode < 300) {
log.info("已发送Webhook告警: {}, 响应码: {}", alert.getType(), responseCode);
} else {
log.warn("发送Webhook告警失败: {}, 响应码: {}", alert.getType(), responseCode);
}
} catch (Exception e) {
log.error("发送Webhook告警失败", e);
}
}
/**
* 格式化日期
*/
private String formatDate(Date date) {
SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
return sdf.format(date);
}
}
备份与恢复策略
数据库的备份与恢复是确保系统容灾能力的关键组成部分。在混合架构中,需要对不同类型的数据库制定差异化的备份策略:
java
@Service
public class DatabaseBackupService {
@Autowired
private Environment env;
@Autowired
private BackupRepository backupRepository;
@Autowired
private AlertService alertService;
/**
* MySQL全量备份 - 每天凌晨2点执行
*/
@Scheduled(cron = "0 0 2 * * ?")
public void mysqlFullBackup() {
String backupId = "mysql_full_" + new SimpleDateFormat("yyyyMMdd_HHmmss").format(new Date());
BackupTask task = new BackupTask(backupId, "MySQL", "FULL");
task.setStartTime(new Date());
try {
log.info("开始MySQL全量备份: {}", backupId);
// 获取MySQL连接信息
String mysqlHost = env.getProperty("spring.datasource.host");
String mysqlPort = env.getProperty("spring.datasource.port", "3306");
String mysqlUser = env.getProperty("spring.datasource.username");
String mysqlPassword = env.getProperty("spring.datasource.password");
String mysqlDatabase = env.getProperty("spring.datasource.database");
// 备份目录
String backupDir = env.getProperty("backup.directory", "/data/backup");
String dateDir = new SimpleDateFormat("yyyy-MM-dd").format(new Date());
String backupPath = backupDir + "/" + dateDir;
// 创建备份目录
new File(backupPath).mkdirs();
// 构建备份命令
String backupFile = backupPath + "/" + backupId + ".sql.gz";
// 使用mysqldump命令执行备份
ProcessBuilder pb = new ProcessBuilder(
"mysqldump",
"--host=" + mysqlHost,
"--port=" + mysqlPort,
"--user=" + mysqlUser,
"--password=" + mysqlPassword,
"--single-transaction", // InnoDB事务一致性
"--routines", // 存储过程和函数
"--triggers", // 触发器
"--events", // 事件
mysqlDatabase
);
pb.redirectOutput(ProcessBuilder.Redirect.to(new File(backupFile + ".tmp")));
// 执行备份命令
Process process = pb.start();
int exitCode = process.waitFor();
if (exitCode != 0) {
throw new RuntimeException("MySQL备份命令执行失败,退出码: " + exitCode);
}
// 压缩备份文件
ProcessBuilder gzipPb = new ProcessBuilder("gzip", backupFile + ".tmp");
gzipPb.redirectOutput(ProcessBuilder.Redirect.to(new File(backupFile)));
Process gzipProcess = gzipPb.start();
int gzipExitCode = gzipProcess.waitFor();
if (gzipExitCode != 0) {
throw new RuntimeException("压缩备份文件失败,退出码: " + gzipExitCode);
}
// 删除临时文件
new File(backupFile + ".tmp").delete();
// 计算文件大小和校验和
File backup = new File(backupFile);
long fileSize = backup.length();
String checksum = calculateMD5(backup);
// 更新备份任务信息
task.setEndTime(new Date());
task.setStatus("SUCCESS");
task.setBackupPath(backupFile);
task.setFileSize(fileSize);
task.setChecksum(checksum);
log.info("MySQL全量备份完成: {}, 大小: {}", backupId, formatFileSize(fileSize));
} catch (Exception e) {
log.error("MySQL全量备份失败: {}", backupId, e);
task.setEndTime(new Date());
task.setStatus("FAILED");
task.setErrorMessage(e.getMessage());
// 发送告警
DatabaseMonitoringService.AlertEvent alert = new DatabaseMonitoringService.AlertEvent(
DatabaseMonitoringService.AlertLevel.CRITICAL,
"BACKUP_FAILED",
"MySQL全量备份失败: " + e.getMessage(),
"请检查备份脚本和数据库连接"
);
alertService.sendAlert(alert);
} finally {
// 保存备份任务记录
backupRepository.saveBackupTask(task);
}
}
/**
* MySQL增量备份(binlog) - 每6小时执行一次
*/
@Scheduled(cron = "0 0 */6 * * ?")
public void mysqlIncrementalBackup() {
String backupId = "mysql_binlog_" + new SimpleDateFormat("yyyyMMdd_HHmmss").format(new Date());
BackupTask task = new BackupTask(backupId, "MySQL", "INCREMENTAL");
task.setStartTime(new Date());
try {
log.info("开始MySQL增量备份(binlog): {}", backupId);
// 获取MySQL连接信息
String mysqlHost = env.getProperty("spring.datasource.host");
String mysqlPort = env.getProperty("spring.datasource.port", "3306");
String mysqlUser = env.getProperty("spring.datasource.username");
String mysqlPassword = env.getProperty("spring.datasource.password");
// 备份目录
String backupDir = env.getProperty("backup.directory", "/data/backup");
String dateDir = new SimpleDateFormat("yyyy-MM-dd").format(new Date());
String backupPath = backupDir + "/" + dateDir + "/binlog";
// 创建备份目录
new File(backupPath).mkdirs();
// 查询当前binlog文件
Connection conn = DriverManager.getConnection(
"jdbc:mysql://" + mysqlHost + ":" + mysqlPort,
mysqlUser,
mysqlPassword
);
Statement stmt = conn.createStatement();
ResultSet rs = stmt.executeQuery("SHOW MASTER STATUS");
if (!rs.next()) {
throw new RuntimeException("无法获取binlog状态信息");
}
String currentBinlog = rs.getString("File");
String currentPos = rs.getString("Position");
rs.close();
stmt.close();
conn.close();
// 执行binlog备份
// 注意:这里使用mysqlbinlog工具,实际环境中可能需要更复杂的逻辑来确定要备份的binlog范围
ProcessBuilder pb = new ProcessBuilder(
"mysqlbinlog",
"--host=" + mysqlHost,
"--port=" + mysqlPort,
"--user=" + mysqlUser,
"--password=" + mysqlPassword,
"--raw", // 以原始格式输出
"--result-file=" + backupPath + "/",
currentBinlog
);
Process process = pb.start();
int exitCode = process.waitFor();
if (exitCode != 0) {
throw new RuntimeException("binlog备份命令执行失败,退出码: " + exitCode);
}
// 记录binlog位置信息
String binlogInfoFile = backupPath + "/binlog_info.json";
JSONObject binlogInfo = new JSONObject();
binlogInfo.put("file", currentBinlog);
binlogInfo.put("position", currentPos);
binlogInfo.put("timestamp", System.currentTimeMillis());
FileWriter writer = new FileWriter(binlogInfoFile);
writer.write(binlogInfo.toString());
writer.close();
// 更新备份任务信息
task.setEndTime(new Date());
task.setStatus("SUCCESS");
task.setBackupPath(backupPath);
task.setMetadata(binlogInfo.toString());
log.info("MySQL增量备份(binlog)完成: {}, 当前binlog: {}", backupId, currentBinlog);
} catch (Exception e) {
log.error("MySQL增量备份失败: {}", backupId, e);
task.setEndTime(new Date());
task.setStatus("FAILED");
task.setErrorMessage(e.getMessage());
// 发送告警
DatabaseMonitoringService.AlertEvent alert = new DatabaseMonitoringService.AlertEvent(
DatabaseMonitoringService.AlertLevel.WARNING,
"BACKUP_FAILED",
"MySQL增量备份失败: " + e.getMessage(),
"请检查binlog设置和备份脚本"
);
alertService.sendAlert(alert);
} finally {
// 保存备份任务记录
backupRepository.saveBackupTask(task);
}
}
/**
* Redis备份 - 每天凌晨3点执行
*/
@Scheduled(cron = "0 0 3 * * ?")
public void redisBackup() {
String backupId = "redis_" + new SimpleDateFormat("yyyyMMdd_HHmmss").format(new Date());
BackupTask task = new BackupTask(backupId, "Redis", "FULL");
task.setStartTime(new Date());
try {
log.info("开始Redis备份: {}", backupId);
// 获取Redis连接信息
String redisHost = env.getProperty("spring.redis.host");
String redisPort = env.getProperty("spring.redis.port", "6379");
String redisPassword = env.getProperty("spring.redis.password");
// 备份目录
String backupDir = env.getProperty("backup.directory", "/data/backup");
String dateDir = new SimpleDateFormat("yyyy-MM-dd").format(new Date());
String backupPath = backupDir + "/" + dateDir;
// 创建备份目录
new File(backupPath).mkdirs();
// 备份文件
String backupFile = backupPath + "/" + backupId + ".rdb";
// 触发Redis的BGSAVE
Jedis jedis = new Jedis(redisHost, Integer.parseInt(redisPort));
if (redisPassword != null && !redisPassword.isEmpty()) {
jedis.auth(redisPassword);
}
String response = jedis.bgsave();
if (!"Background saving started".equals(response)) {
throw new RuntimeException("触发Redis BGSAVE失败: " + response);
}
// 等待BGSAVE完成
String saveInProgress;
do {
Thread.sleep(1000);
saveInProgress = jedis.info("Persistence")
.lines()
.filter(line -> line.startsWith("rdb_bgsave_in_progress"))
.findFirst()
.orElse("rdb_bgsave_in_progress:0")
.split(":")[1]
.trim();
} while ("1".equals(saveInProgress));
// 获取RDB文件路径
String rdbFilePath = jedis.configGet("dir").get(1) + "/" + jedis.configGet("dbfilename").get(1);
jedis.close();
// 复制RDB文件到备份目录
FileChannel sourceChannel = new FileInputStream(rdbFilePath).getChannel();
FileChannel destChannel = new FileOutputStream(backupFile).getChannel();
destChannel.transferFrom(sourceChannel, 0, sourceChannel.size());
sourceChannel.close();
destChannel.close();
// 计算文件大小和校验和
File backup = new File(backupFile);
long fileSize = backup.length();
String checksum = calculateMD5(backup);
// 更新备份任务信息
task.setEndTime(new Date());
task.setStatus("SUCCESS");
task.setBackupPath(backupFile);
task.setFileSize(fileSize);
task.setChecksum(checksum);
log.info("Redis备份完成: {}, 大小: {}", backupId, formatFileSize(fileSize));
} catch (Exception e) {
log.error("Redis备份失败: {}", backupId, e);
task.setEndTime(new Date());
task.setStatus("FAILED");
task.setErrorMessage(e.getMessage());
// 发送告警
DatabaseMonitoringService.AlertEvent alert = new DatabaseMonitoringService.AlertEvent(
DatabaseMonitoringService.AlertLevel.CRITICAL,
"BACKUP_FAILED",
"Redis备份失败: " + e.getMessage(),
"请检查Redis配置和备份脚本"
);
alertService.sendAlert(alert);
} finally {
// 保存备份任务记录
backupRepository.saveBackupTask(task);
}
}
/**
* MongoDB备份 - 每天凌晨4点执行
*/
@Scheduled(cron = "0 0 4 * * ?")
public void mongoDBBackup() {
String backupId = "mongodb_" + new SimpleDateFormat("yyyyMMdd_HHmmss").format(new Date());
BackupTask task = new BackupTask(backupId, "MongoDB", "FULL");
task.setStartTime(new Date());
try {
log.info("开始MongoDB备份: {}", backupId);
// 获取MongoDB连接信息
String mongoHost = env.getProperty("spring.data.mongodb.host");
String mongoPort = env.getProperty("spring.data.mongodb.port", "27017");
String mongoDatabase = env.getProperty("spring.data.mongodb.database");
String mongoUsername = env.getProperty("spring.data.mongodb.username");
String mongoPassword = env.getProperty("spring.data.mongodb.password");
// 备份目录
String backupDir = env.getProperty("backup.directory", "/data/backup");
String dateDir = new SimpleDateFormat("yyyy-MM-dd").format(new Date());
String backupPath = backupDir + "/" + dateDir;
// 创建备份目录
new File(backupPath).mkdirs();
// 备份文件夹
String backupFolder = backupPath + "/" + backupId;
new File(backupFolder).mkdirs();
// 构建mongodump命令
List<String> command = new ArrayList<>();
command.add("mongodump");
command.add("--host");
command.add(mongoHost);
command.add("--port");
command.add(mongoPort);
if (mongoUsername != null && !mongoUsername.isEmpty()) {
command.add("--username");
command.add(mongoUsername);
command.add("--password");
command.add(mongoPassword);
command.add("--authenticationDatabase");
command.add("admin");
}
command.add("--db");
command.add(mongoDatabase);
command.add("--out");
command.add(backupFolder);
// 执行备份命令
ProcessBuilder pb = new ProcessBuilder(command);
Process process = pb.start();
int exitCode = process.waitFor();
if (exitCode != 0) {
throw new RuntimeException("MongoDB备份命令执行失败,退出码: " + exitCode);
}
// 压缩备份文件夹
String zipFile = backupPath + "/" + backupId + ".zip";
ProcessBuilder zipPb = new ProcessBuilder(
"zip", "-r", zipFile, backupId
);
zipPb.directory(new File(backupPath));
Process zipProcess = zipPb.start();
int zipExitCode = zipProcess.waitFor();
if (zipExitCode != 0) {
throw new RuntimeException("压缩MongoDB备份文件失败,退出码: " + zipExitCode);
}
// 删除原始备份文件夹
deleteDirectory(new File(backupFolder));
// 计算文件大小和校验和
File backup = new File(zipFile);
long fileSize = backup.length();
String checksum = calculateMD5(backup);
// 更新备份任务信息
task.setEndTime(new Date());
task.setStatus("SUCCESS");
task.setBackupPath(zipFile);
task.setFileSize(fileSize);
task.setChecksum(checksum);
log.info("MongoDB备份完成: {}, 大小: {}", backupId, formatFileSize(fileSize));
} catch (Exception e) {
log.error("MongoDB备份失败: {}", backupId, e);
task.setEndTime(new Date());
task.setStatus("FAILED");
task.setErrorMessage(e.getMessage());
// 发送告警
DatabaseMonitoringService.AlertEvent alert = new DatabaseMonitoringService.AlertEvent(
DatabaseMonitoringService.AlertLevel.CRITICAL,
"BACKUP_FAILED",
"MongoDB备份失败: " + e.getMessage(),
"请检查MongoDB配置和备份脚本"
);
alertService.sendAlert(alert);
} finally {
// 保存备份任务记录
backupRepository.saveBackupTask(task);
}
}
/**
* 恢复MySQL数据库
*/
public RestoreResult restoreMySQLFromBackup(String backupPath) {
String restoreId = "mysql_restore_" + new SimpleDateFormat("yyyyMMdd_HHmmss").format(new Date());
RestoreTask task = new RestoreTask(restoreId, "MySQL", backupPath);
task.setStartTime(new Date());
try {
log.info("开始从备份恢复MySQL: {}", restoreId);
// 获取MySQL连接信息
String mysqlHost = env.getProperty("spring.datasource.host");
String mysqlPort = env.getProperty("spring.datasource.port", "3306");
String mysqlUser = env.getProperty("spring.datasource.username");
String mysqlPassword = env.getProperty("spring.datasource.password");
String mysqlDatabase = env.getProperty("spring.datasource.database");
// 检查备份文件
File backupFile = new File(backupPath);
if (!backupFile.exists()) {
throw new FileNotFoundException("备份文件不存在: " + backupPath);
}
// 处理压缩文件
boolean isCompressed = backupPath.endsWith(".gz");
String sqlFile = backupPath;
if (isCompressed) {
// 解压缩文件
sqlFile = backupPath.substring(0, backupPath.length() - 3);
ProcessBuilder gunzipPb = new ProcessBuilder("gunzip", "-c", backupPath);
gunzipPb.redirectOutput(ProcessBuilder.Redirect.to(new File(sqlFile)));
Process gunzipProcess = gunzipPb.start();
int gunzipExitCode = gunzipProcess.waitFor();
if (gunzipExitCode != 0) {
throw new RuntimeException("解压备份文件失败,退出码: " + gunzipExitCode);
}
}
// 执行恢复命令
ProcessBuilder pb = new ProcessBuilder(
"mysql",
"--host=" + mysqlHost,
"--port=" + mysqlPort,
"--user=" + mysqlUser,
"--password=" + mysqlPassword,
mysqlDatabase
);
pb.redirectInput(new File(sqlFile));
Process process = pb.start();
int exitCode = process.waitFor();
if (exitCode != 0) {
throw new RuntimeException("MySQL恢复命令执行失败,退出码: " + exitCode);
}
// 如果解压了临时文件,删除它
if (isCompressed) {
new File(sqlFile).delete();
}
// 更新恢复任务信息
task.setEndTime(new Date());
task.setStatus("SUCCESS");
log.info("MySQL恢复完成: {}", restoreId);
RestoreResult result = new RestoreResult();
result.setSuccess(true);
result.setMessage("MySQL恢复成功");
return result;
} catch (Exception e) {
log.error("MySQL恢复失败: {}", restoreId, e);
task.setEndTime(new Date());
task.setStatus("FAILED");
task.setErrorMessage(e.getMessage());
// 发送告警
DatabaseMonitoringService.AlertEvent alert = new DatabaseMonitoringService.AlertEvent(
DatabaseMonitoringService.AlertLevel.CRITICAL,
"RESTORE_FAILED",
"MySQL恢复失败: " + e.getMessage(),
"请检查备份文件和MySQL配置"
);
alertService.sendAlert(alert);
RestoreResult result = new RestoreResult();
result.setSuccess(false);
result.setMessage("MySQL恢复失败: " + e.getMessage());
return result;
} finally {
// 保存恢复任务记录
backupRepository.saveRestoreTask(task);
}
}
/**
* 恢复Redis数据库
*/
public RestoreResult restoreRedisFromBackup(String backupPath) {
String restoreId = "redis_restore_" + new SimpleDateFormat("yyyyMMdd_HHmmss").format(new Date());
RestoreTask task = new RestoreTask(restoreId, "Redis", backupPath);
task.setStartTime(new Date());
try {
log.info("开始从备份恢复Redis: {}", restoreId);
// 获取Redis连接信息
String redisHost = env.getProperty("spring.redis.host");
String redisPort = env.getProperty("spring.redis.port", "6379");
String redisPassword = env.getProperty("spring.redis.password");
// 检查备份文件
File backupFile = new File(backupPath);
if (!backupFile.exists()) {
throw new FileNotFoundException("备份文件不存在: " + backupPath);
}
// 连接Redis
Jedis jedis = new Jedis(redisHost, Integer.parseInt(redisPort));
if (redisPassword != null && !redisPassword.isEmpty()) {
jedis.auth(redisPassword);
}
// 获取Redis配置
String redisDir = jedis.configGet("dir").get(1);
String rdbFilename = jedis.configGet("dbfilename").get(1);
// 关闭Redis(需要管理员权限)
jedis.shutdown();
jedis.close();
// 备份当前RDB文件
File currentRdb = new File(redisDir + "/" + rdbFilename);
if (currentRdb.exists()) {
File backupRdb = new File(redisDir + "/" + rdbFilename + ".bak");
Files.copy(currentRdb.toPath(), backupRdb.toPath(), StandardCopyOption.REPLACE_EXISTING);
}
// 复制备份文件到Redis数据目录
Files.copy(backupFile.toPath(), currentRdb.toPath(), StandardCopyOption.REPLACE_EXISTING);
// 启动Redis服务(需要通过系统服务管理)
ProcessBuilder pb = new ProcessBuilder("service", "redis-server", "start");
Process process = pb.start();
int exitCode = process.waitFor();
if (exitCode != 0) {
throw new RuntimeException("启动Redis服务失败,退出码: " + exitCode);
}
// 等待Redis启动
Thread.sleep(5000);
// 验证Redis是否正常启动
jedis = new Jedis(redisHost, Integer.parseInt(redisPort));
if (redisPassword != null && !redisPassword.isEmpty()) {
jedis.auth(redisPassword);
}
String pingResponse = jedis.ping();
jedis.close();
if (!"PONG".equals(pingResponse)) {
throw new RuntimeException("Redis服务未正常响应,响应: " + pingResponse);
}
// 更新恢复任务信息
task.setEndTime(new Date());
task.setStatus("SUCCESS");
log.info("Redis恢复完成: {}", restoreId);
RestoreResult result = new RestoreResult();
result.setSuccess(true);
result.setMessage("Redis恢复成功");
return result;
} catch (Exception e) {
log.error("Redis恢复失败: {}", restoreId, e);
task.setEndTime(new Date());
task.setStatus("FAILED");
task.setErrorMessage(e.getMessage());
// 发送告警
DatabaseMonitoringService.AlertEvent alert = new DatabaseMonitoringService.AlertEvent(
DatabaseMonitoringService.AlertLevel.CRITICAL,
"RESTORE_FAILED",
"Redis恢复失败: " + e.getMessage(),
"请检查备份文件和Redis配置"
);
alertService.sendAlert(alert);
RestoreResult result = new RestoreResult();
result.setSuccess(false);
result.setMessage("Redis恢复失败: " + e.getMessage());
return result;
} finally {
// 保存恢复任务记录
backupRepository.saveRestoreTask(task);
}
}
/**
* 恢复MongoDB数据库
*/
public RestoreResult restoreMongoDBFromBackup(String backupPath) {
String restoreId = "mongodb_restore_" + new SimpleDateFormat("yyyyMMdd_HHmmss").format(new Date());
RestoreTask task = new RestoreTask(restoreId, "MongoDB", backupPath);
task.setStartTime(new Date());
try {
log.info("开始从备份恢复MongoDB: {}", restoreId);
// 获取MongoDB连接信息
String mongoHost = env.getProperty("spring.data.mongodb.host");
String mongoPort = env.getProperty("spring.data.mongodb.port", "27017");
String mongoDatabase = env.getProperty("spring.data.mongodb.database");
String mongoUsername = env.getProperty("spring.data.mongodb.username");
String mongoPassword = env.getProperty("spring.data.mongodb.password");
// 检查备份文件
File backupFile = new File(backupPath);
if (!backupFile.exists()) {
throw new FileNotFoundException("备份文件不存在: " + backupPath);
}
// 处理压缩文件
boolean isCompressed = backupPath.endsWith(".zip");
String backupFolder = backupPath;
if (isCompressed) {
// 创建临时解压目录
String extractDir = backupFile.getParent() + "/extract_" + System.currentTimeMillis();
new File(extractDir).mkdirs();
// 解压文件
ProcessBuilder unzipPb = new ProcessBuilder(
"unzip", backupPath, "-d", extractDir
);
Process unzipProcess = unzipPb.start();
int unzipExitCode = unzipProcess.waitFor();
if (unzipExitCode != 0) {
throw new RuntimeException("解压MongoDB备份文件失败,退出码: " + unzipExitCode);
}
// 获取解压后的目录
File[] extractedFiles = new File(extractDir).listFiles();
if (extractedFiles == null || extractedFiles.length == 0) {
throw new RuntimeException("解压后目录为空");
}
backupFolder = extractDir;
}
// 构建mongorestore命令
List<String> command = new ArrayList<>();
command.add("mongorestore");
command.add("--host");
command.add(mongoHost);
command.add("--port");
command.add(mongoPort);
if (mongoUsername != null && !mongoUsername.isEmpty()) {
command.add("--username");
command.add(mongoUsername);
command.add("--password");
command.add(mongoPassword);
command.add("--authenticationDatabase");
command.add("admin");
}
command.add("--db");
command.add(mongoDatabase);
command.add("--drop"); // 删除现有集合
// 添加备份目录
if (isCompressed) {
command.add(backupFolder + "/" + mongoDatabase);
} else {
command.add(backupFolder);
}
// 执行恢复命令
ProcessBuilder pb = new ProcessBuilder(command);
Process process = pb.start();
int exitCode = process.waitFor();
if (exitCode != 0) {
throw new RuntimeException("MongoDB恢复命令执行失败,退出码: " + exitCode);
}
// 如果解压了临时文件,删除它
if (isCompressed) {
deleteDirectory(new File(backupFolder));
}
// 更新恢复任务信息
task.setEndTime(new Date());
task.setStatus("SUCCESS");
log.info("MongoDB恢复完成: {}", restoreId);
RestoreResult result = new RestoreResult();
result.setSuccess(true);
result.setMessage("MongoDB恢复成功");
return result;
} catch (Exception e) {
log.error("MongoDB恢复失败: {}", restoreId, e);
task.setEndTime(new Date());
task.setStatus("FAILED");
task.setErrorMessage(e.getMessage());
// 发送告警
DatabaseMonitoringService.AlertEvent alert = new DatabaseMonitoringService.AlertEvent(
DatabaseMonitoringService.AlertLevel.CRITICAL,
"RESTORE_FAILED",
"MongoDB恢复失败: " + e.getMessage(),
"请检查备份文件和MongoDB配置"
);
alertService.sendAlert(alert);
RestoreResult result = new RestoreResult();
result.setSuccess(false);
result.setMessage("MongoDB恢复失败: " + e.getMessage());
return result;
} finally {
// 保存恢复任务记录
backupRepository.saveRestoreTask(task);
}
}
/**
* 清理过期备份 - 每周日凌晨1点执行
*/
@Scheduled(cron = "0 0 1 ? * SUN")
public void cleanupExpiredBackups() {
log.info("开始清理过期备份");
try {
// 获取保留备份的天数
int retentionDays = Integer.parseInt(env.getProperty("backup.retention.days", "30"));
// 备份目录
String backupDir = env.getProperty("backup.directory", "/data/backup");
File backupRoot = new File(backupDir);
if (!backupRoot.exists() || !backupRoot.isDirectory()) {
log.warn("备份目录不存在: {}", backupDir);
return;
}
// 计算截止日期
Calendar cutoffDate = Calendar.getInstance();
cutoffDate.add(Calendar.DAY_OF_MONTH, -retentionDays);
// 获取所有日期目录
File[] dateDirs = backupRoot.listFiles();
if (dateDirs == null) {
return;
}
int deletedFiles = 0;
long freedSpace = 0;
for (File dateDir : dateDirs) {
if (!dateDir.isDirectory()) {
continue;
}
// 解析日期目录名称
try {
Date dirDate = new SimpleDateFormat("yyyy-MM-dd").parse(dateDir.getName());
// 检查是否过期
if (dirDate.before(cutoffDate.getTime())) {
// 计算删除前的目录大小
long dirSize = calculateDirectorySize(dateDir);
// 删除目录及其内容
boolean deleted = deleteDirectory(dateDir);
if (deleted) {
deletedFiles++;
freedSpace += dirSize;
log.info("已删除过期备份目录: {}, 大小: {}", dateDir.getName(), formatFileSize(dirSize));
} else {
log.warn("删除过期备份目录失败: {}", dateDir.getName());
}
}
} catch (ParseException e) {
// 不是日期格式的目录,忽略
log.debug("忽略非日期格式目录: {}", dateDir.getName());
}
}
log.info("清理过期备份完成,删除目录: {},释放空间: {}", deletedFiles, formatFileSize(freedSpace));
} catch (Exception e) {
log.error("清理过期备份失败", e);
}
}
/**
* 计算目录大小
*/
private long calculateDirectorySize(File directory) {
long size = 0;
File[] files = directory.listFiles();
if (files != null) {
for (File file : files) {
if (file.isFile()) {
size += file.length();
} else {
size += calculateDirectorySize(file);
}
}
}
return size;
}
/**
* 删除目录及其内容
*/
private boolean deleteDirectory(File directory) {
File[] files = directory.listFiles();
if (files != null) {
for (File file : files) {
if (file.isDirectory()) {
deleteDirectory(file);
} else {
file.delete();
}
}
}
return directory.delete();
}
/**
* 计算文件MD5校验和
*/
private String calculateMD5(File file) throws IOException, NoSuchAlgorithmException {
MessageDigest md = MessageDigest.getInstance("MD5");
try (InputStream is = new FileInputStream(file)) {
byte[] buffer = new byte[8192];
int read;
while ((read = is.read(buffer)) > 0) {
md.update(buffer, 0, read);
}
}
byte[] md5Bytes = md.digest();
StringBuilder sb = new StringBuilder();
for (byte b : md5Bytes) {
sb.append(String.format("%02x", b));
}
return sb.toString();
}
/**
* 格式化文件大小
*/
private String formatFileSize(long size) {
if (size < 1024) {
return size + " B";
} else if (size < 1024 * 1024) {
return String.format("%.2f KB", size / 1024.0);
} else if (size < 1024 * 1024 * 1024) {
return String.format("%.2f MB", size / (1024.0 * 1024));
} else {
return String.format("%.2f GB", size / (1024.0 * 1024 * 1024));
}
}
// 备份任务类
@Data
public static class BackupTask {
private String id;
private String databaseType;
private String backupType;
private Date startTime;
private Date endTime;
private String status;
private String backupPath;
private long fileSize;
private String checksum;
private String metadata;
private String errorMessage;
public BackupTask(String id, String databaseType, String backupType) {
this.id = id;
this.databaseType = databaseType;
this.backupType = backupType;
}
}
// 恢复任务类
@Data
public static class RestoreTask {
private String id;
private String databaseType;
private String backupPath;
private Date startTime;
private Date endTime;
private String status;
private String errorMessage;
public RestoreTask(String id, String databaseType, String backupPath) {
this.id = id;
this.databaseType = databaseType;
this.backupPath = backupPath;
}
}
// 恢复结果类
@Data
public static class RestoreResult {
private boolean success;
private String message;
}
}
容量规划与扩展
随着业务增长,数据库容量规划和扩展变得至关重要。以下是一个容量规划服务的示例:
java
@Service
public class CapacityPlanningService {
@Autowired
private MySQLHealthIndicator mysqlHealthIndicator;
@Autowired
private RedisHealthIndicator redisHealthIndicator;
@Autowired
private MongoDBHealthIndicator mongodbHealthIndicator;
@Autowired
private MetricsRepository metricsRepository;
/**
* 生成容量预测报告
*/
public CapacityReport generateCapacityReport() {
CapacityReport report = new CapacityReport();
report.setGeneratedAt(new Date());
// 1. 收集当前指标
MySQLHealthIndicator.MySQLMetrics mysqlMetrics = mysqlHealthIndicator.collectMetrics();
RedisHealthIndicator.RedisMetrics redisMetrics = redisHealthIndicator.collectMetrics();
MongoDBHealthIndicator.MongoDBMetrics mongoMetrics = mongodbHealthIndicator.collectMetrics();
// 2. 分析MySQL容量
MySQLCapacity mysqlCapacity = analyzeMySQLCapacity(mysqlMetrics);
report.setMysqlCapacity(mysqlCapacity);
// 3. 分析Redis容量
RedisCapacity redisCapacity = analyzeRedisCapacity(redisMetrics);
report.setRedisCapacity(redisCapacity);
// 4. 分析MongoDB容量
MongoDBCapacity mongoCapacity = analyzeMongoDBCapacity(mongoMetrics);
report.setMongoCapacity(mongoCapacity);
// 5. 生成整体建议
List<String> recommendations = generateRecommendations(
mysqlCapacity, redisCapacity, mongoCapacity);
report.setRecommendations(recommendations);
return report;
}
/**
* 分析MySQL容量
*/
private MySQLCapacity analyzeMySQLCapacity(MySQLHealthIndicator.MySQLMetrics metrics) {
MySQLCapacity capacity = new MySQLCapacity();
try {
// 获取历史数据增长趋势
List<StorageTrend> storageTrends = metricsRepository.getMySQLStorageTrend(90); // 过去90天
// 计算表空间当前大小
long totalDataSize = 0;
long totalIndexSize = 0;
for (Map.Entry<String, MySQLHealthIndicator.TableSpaceInfo> entry :
metrics.getTableSpaceUsage().entrySet()) {
MySQLHealthIndicator.TableSpaceInfo info = entry.getValue();
totalDataSize += info.getDataSize();
totalIndexSize += info.getIndexSize();
// 分析大表
if (info.getDataSize() > 1024 * 1024 * 1024) { // 超过1GB
TableInfo tableInfo = new TableInfo();
tableInfo.setTableName(entry.getKey());
tableInfo.setSize(info.getDataSize() + info.getIndexSize());
tableInfo.setRows(info.getRows());
// 计算平均行大小
if (info.getRows() > 0) {
tableInfo.setAvgRowSize(info.getDataSize() / info.getRows());
}
capacity.getLargeTables().add(tableInfo);
}
// 检查可能的自增ID耗尽问题
if (info.getAutoIncrement() > 0 &&
info.getAutoIncrement() > Long.MAX_VALUE * 0.7) { // 已使用70%以上
capacity.getAutoIncrementRisks().add(entry.getKey());
}
}
capacity.setTotalDataSize(totalDataSize);
capacity.setTotalIndexSize(totalIndexSize);
// 预测未来增长
if (!storageTrends.isEmpty()) {
// 使用线性回归预测增长趋势
double growthRatePerDay = calculateGrowthRate(storageTrends);
capacity.setDailyGrowthRate(growthRatePerDay);
// 预测30/90/180天后的大小
capacity.setProjectedSize30Days(totalDataSize + totalIndexSize + (long)(growthRatePerDay * 30));
capacity.setProjectedSize90Days(totalDataSize + totalIndexSize + (long)(growthRatePerDay * 90));
capacity.setProjectedSize180Days(totalDataSize + totalIndexSize + (long)(growthRatePerDay * 180));
// 计算耗尽时间
// 假设磁盘总空间为1TB
long totalSpace = 1024L * 1024 * 1024 * 1024;
long availableSpace = totalSpace - totalDataSize - totalIndexSize;
if (growthRatePerDay > 0) {
int daysUntilFull = (int)(availableSpace / growthRatePerDay);
capacity.setDaysUntilFull(daysUntilFull);
}
}
// 根据分析结果生成建议
List<String> tableRecommendations = new ArrayList<>();
// 对大表的建议
for (TableInfo table : capacity.getLargeTables()) {
if (table.getRows() > 10000000) { // 超过1千万行
tableRecommendations.add("考虑对表 " + table.getTableName() + " 进行分表或归档");
}
}
capacity.setTableRecommendations(tableRecommendations);
} catch (Exception e) {
log.error("分析MySQL容量失败", e);
capacity.setError("分析失败: " + e.getMessage());
}
return capacity;
}
/**
* 分析Redis容量
*/
private RedisCapacity analyzeRedisCapacity(RedisHealthIndicator.RedisMetrics metrics) {
RedisCapacity capacity = new RedisCapacity();
try {
// 设置当前内存使用情况
capacity.setUsedMemory(metrics.getUsedMemory());
capacity.setUsedMemoryRss(metrics.getUsedMemoryRss());
capacity.setMemFragmentationRatio(metrics.getMemFragmentationRatio());
// 获取历史内存使用趋势
List<MemoryTrend> memoryTrends = metricsRepository.getRedisMemoryTrend(30); // 过去30天
// 分析键空间
int totalKeys = 0;
for (Map.Entry<String, RedisHealthIndicator.KeyspaceStats> entry :
metrics.getKeyspaceStats().entrySet()) {
RedisHealthIndicator.KeyspaceStats stats = entry.getValue();
totalKeys += stats.getKeys();
// 添加数据库统计
DatabaseInfo dbInfo = new DatabaseInfo();
dbInfo.setDatabase(entry.getKey());
dbInfo.setKeys(stats.getKeys());
dbInfo.setExpires(stats.getExpires());
dbInfo.setAvgTtl(stats.getAvgTtl());
capacity.getDatabases().add(dbInfo);
}
capacity.setTotalKeys(totalKeys);
// 分析热门命令
List<CommandUsage> commands = new ArrayList<>();
if (metrics.getCommandStats() != null) {
metrics.getCommandStats().entrySet().stream()
.sorted(Map.Entry.<String, Long>comparingByValue().reversed())
.limit(10) // 取前10个热门命令
.forEach(entry -> {
CommandUsage usage = new CommandUsage();
usage.setCommand(entry.getKey());
usage.setCalls(entry.getValue());
commands.add(usage);
});
}
capacity.setTopCommands(commands);
// 预测未来增长
if (!memoryTrends.isEmpty()) {
// 使用线性回归预测内存增长趋势
double memoryGrowthPerDay = calculateMemoryGrowthRate(memoryTrends);
capacity.setDailyMemoryGrowthRate(memoryGrowthPerDay);
// 预测30/90天后的内存使用
capacity.setProjectedMemory30Days(metrics.getUsedMemory() + (long)(memoryGrowthPerDay * 30));
capacity.setProjectedMemory90Days(metrics.getUsedMemory() + (long)(memoryGrowthPerDay * 90));
// 假设最大内存为10GB
long maxMemory = 10L * 1024 * 1024 * 1024;
long availableMemory = maxMemory - metrics.getUsedMemory();
if (memoryGrowthPerDay > 0) {
int daysUntilFull = (int)(availableMemory / memoryGrowthPerDay);
capacity.setDaysUntilMemoryFull(daysUntilFull);
}
}
// 生成Redis建议
List<String> redisRecommendations = new ArrayList<>();
// 内存碎片率建议
if (metrics.getMemFragmentationRatio() > 1.5) {
redisRecommendations.add("Redis内存碎片率较高 (" + String.format("%.2f", metrics.getMemFragmentationRatio()) +
"),考虑使用MEMORY PURGE命令或重启Redis服务");
}
// 键数量建议
if (totalKeys > 10000000) {
redisRecommendations.add("Redis键总数较多 (" + totalKeys + "),考虑使用键过期策略或定期清理不活跃的键");
}
// 热门命令建议
if (!commands.isEmpty() && commands.get(0).getCalls() > 1000000) {
redisRecommendations.add("命令 " + commands.get(0).getCommand() + " 使用频率很高,考虑优化相关代码或使用批处理");
}
capacity.setRecommendations(redisRecommendations);
} catch (Exception e) {
log.error("分析Redis容量失败", e);
capacity.setError("分析失败: " + e.getMessage());
}
return capacity;
}
/**
* 分析MongoDB容量
*/
private MongoDBCapacity analyzeMongoDBCapacity(MongoDBHealthIndicator.MongoDBMetrics metrics) {
MongoDBCapacity capacity = new MongoDBCapacity();
try {
// 设置当前资源使用情况
capacity.setResidentMemory(metrics.getResident());
capacity.setVirtualMemory(metrics.getVirtual());
// 分析集合数据
long totalCollectionSize = 0;
long totalIndexSize = 0;
for (Map.Entry<String, MongoDBHealthIndicator.CollectionStats> entry :
metrics.getCollectionStats().entrySet()) {
MongoDBHealthIndicator.CollectionStats stats = entry.getValue();
totalCollectionSize += stats.getSize();
totalIndexSize += stats.getTotalIndexSize();
// 记录大集合
if (stats.getSize() > 1024 * 1024 * 1024) { // 超过1GB
CollectionInfo colInfo = new CollectionInfo();
colInfo.setCollectionName(entry.getKey());
colInfo.setSize(stats.getSize());
colInfo.setIndexSize(stats.getTotalIndexSize());
colInfo.setDocumentCount(stats.getCount());
if (stats.getCount() > 0) {
colInfo.setAvgDocumentSize(stats.getSize() / stats.getCount());
}
// 分析索引使用效率
if (stats.getTotalIndexSize() > stats.getSize() * 0.5) {
colInfo.setIndexNotes("索引大小占数据的" +
String.format("%.1f%%", (double)stats.getTotalIndexSize() / stats.getSize() * 100) +
",考虑优化索引");
}
capacity.getLargeCollections().add(colInfo);
}
}
capacity.setTotalDataSize(totalCollectionSize);
capacity.setTotalIndexSize(totalIndexSize);
// 获取历史数据增长趋势
List<StorageTrend> storageTrends = metricsRepository.getMongoDBStorageTrend(60); // 过去60天
// 预测未来增长
if (!storageTrends.isEmpty()) {
// 使用线性回归预测增长趋势
double growthRatePerDay = calculateGrowthRate(storageTrends);
capacity.setDailyGrowthRate(growthRatePerDay);
// 预测30/90/180天后的大小
capacity.setProjectedSize30Days(totalCollectionSize + totalIndexSize + (long)(growthRatePerDay * 30));
capacity.setProjectedSize90Days(totalCollectionSize + totalIndexSize + (long)(growthRatePerDay * 90));
capacity.setProjectedSize180Days(totalCollectionSize + totalIndexSize + (long)(growthRatePerDay * 180));
// 假设磁盘总空间为2TB
long totalSpace = 2L * 1024 * 1024 * 1024 * 1024;
long availableSpace = totalSpace - totalCollectionSize - totalIndexSize;
if (growthRatePerDay > 0) {
int daysUntilFull = (int)(availableSpace / growthRatePerDay);
capacity.setDaysUntilFull(daysUntilFull);
}
}
// 操作统计分析
capacity.setDailyInserts(metrics.getInsertCount());
capacity.setDailyQueries(metrics.getQueryCount());
capacity.setDailyUpdates(metrics.getUpdateCount());
capacity.setDailyDeletes(metrics.getDeleteCount());
// 生成MongoDB建议
List<String> mongoRecommendations = new ArrayList<>();
// 大集合建议
for (CollectionInfo coll : capacity.getLargeCollections()) {
if (coll.getDocumentCount() > 10000000) { // 超过1千万文档
mongoRecommendations.add("集合 " + coll.getCollectionName() + " 文档数量较多,考虑使用分片或时间分区策略");
}
if (coll.getIndexNotes() != null) {
mongoRecommendations.add("集合 " + coll.getCollectionName() + ": " + coll.getIndexNotes());
}
}
// 内存使用建议
if (metrics.getResident() > 24000) { // 超过24GB内存
mongoRecommendations.add("MongoDB占用内存较高 (" + metrics.getResident() + " MB),考虑优化查询或增加服务器内存");
}
capacity.setRecommendations(mongoRecommendations);
} catch (Exception e) {
log.error("分析MongoDB容量失败", e);
capacity.setError("分析失败: " + e.getMessage());
}
return capacity;
}
/**
* 计算数据增长率(每天,单位:字节)
*/
private double calculateGrowthRate(List<StorageTrend> trends) {
// 使用简单线性回归
int n = trends.size();
// x坐标表示天数(从0开始),y坐标表示存储大小
double sumX = 0;
double sumY = 0;
double sumXY = 0;
double sumXX = 0;
for (int i = 0; i < n; i++) {
double x = i;
double y = trends.get(i).getSize();
sumX += x;
sumY += y;
sumXY += x * y;
sumXX += x * x;
}
// 计算斜率(每天增长量)
return (n * sumXY - sumX * sumY) / (n * sumXX - sumX * sumX);
}
/**
* 计算内存增长率(每天,单位:字节)
*/
private double calculateMemoryGrowthRate(List<MemoryTrend> trends) {
// 使用简单线性回归
int n = trends.size();
// x坐标表示天数(从0开始),y坐标表示内存大小
double sumX = 0;
double sumY = 0;
double sumXY = 0;
double sumXX = 0;
for (int i = 0; i < n; i++) {
double x = i;
double y = trends.get(i).getMemory();
sumX += x;
sumY += y;
sumXY += x * y;
sumXX += x * x;
}
// 计算斜率(每天增长量)
return (n * sumXY - sumX * sumY) / (n * sumXX - sumX * sumX);
}
/**
* 生成综合建议
*/
private List<String> generateRecommendations(
MySQLCapacity mysqlCapacity,
RedisCapacity redisCapacity,
MongoDBCapacity mongoCapacity) {
List<String> recommendations = new ArrayList<>();
// 添加MySQL建议
if (mysqlCapacity.getTableRecommendations() != null) {
recommendations.addAll(mysqlCapacity.getTableRecommendations());
}
if (mysqlCapacity.getDaysUntilFull() != null && mysqlCapacity.getDaysUntilFull() < 60) {
recommendations.add("MySQL存储空间预计将在" + mysqlCapacity.getDaysUntilFull() +
"天内耗尽,建议扩容或清理数据");
}
// 添加Redis建议
if (redisCapacity.getRecommendations() != null) {
recommendations.addAll(redisCapacity.getRecommendations());
}
if (redisCapacity.getDaysUntilMemoryFull() != null && redisCapacity.getDaysUntilMemoryFull() < 30) {
recommendations.add("Redis内存预计将在" + redisCapacity.getDaysUntilMemoryFull() +
"天内耗尽,建议增加内存或清理数据");
}
// 添加MongoDB建议
if (mongoCapacity.getRecommendations() != null) {
recommendations.addAll(mongoCapacity.getRecommendations());
}
if (mongoCapacity.getDaysUntilFull() != null && mongoCapacity.getDaysUntilFull() < 90) {
recommendations.add("MongoDB存储空间预计将在" + mongoCapacity.getDaysUntilFull() +
"天内耗尽,建议扩容或清理数据");
}
// 综合建议
if (mysqlCapacity.getTotalDataSize() > 100 * 1024 * 1024 * 1024 &&
mongoCapacity.getTotalDataSize() < 10 * 1024 * 1024 * 1024) {
recommendations.add("考虑将MySQL中的历史数据或日志数据迁移到MongoDB,优化混合架构");
}
if (redisCapacity.getUsedMemory() > 4 * 1024 * 1024 * 1024) {
recommendations.add("Redis内存使用较大,考虑实施Redis集群或增加缓存过期策略");
}
return recommendations;
}
// 容量报告类
@Data
public static class CapacityReport {
private Date generatedAt;
private MySQLCapacity mysqlCapacity;
private RedisCapacity redisCapacity;
private MongoDBCapacity mongoCapacity;
private List<String> recommendations;
}
// MySQL容量分析结果
@Data
public static class MySQLCapacity {
private long totalDataSize;
private long totalIndexSize;
private double dailyGrowthRate;
private long projectedSize30Days;
private long projectedSize90Days;
private long projectedSize180Days;
private Integer daysUntilFull;
private List<TableInfo> largeTables = new ArrayList<>();
private List<String> autoIncrementRisks = new ArrayList<>();
private List<String> tableRecommendations = new ArrayList<>();
private String error;
}
// Redis容量分析结果
@Data
public static class RedisCapacity {
private long usedMemory;
private long usedMemoryRss;
private double memFragmentationRatio;
private long totalKeys;
private double dailyMemoryGrowthRate;
private long projectedMemory30Days;
private long projectedMemory90Days;
private Integer daysUntilMemoryFull;
private List<DatabaseInfo> databases = new ArrayList<>();
private List<CommandUsage> topCommands = new ArrayList<>();
private List<String> recommendations = new ArrayList<>();
private String error;
}
// MongoDB容量分析结果
@Data
public static class MongoDBCapacity {
private long residentMemory;
private long virtualMemory;
private long totalDataSize;
private long totalIndexSize;
private double dailyGrowthRate;
private long projectedSize30Days;
private long projectedSize90Days;
private long projectedSize180Days;
private Integer daysUntilFull;
private int dailyInserts;
private int dailyQueries;
private int dailyUpdates;
private int dailyDeletes;
private List<CollectionInfo> largeCollections = new ArrayList<>();
private List<String> recommendations = new ArrayList<>();
private String error;
}
// 表信息
@Data
public static class TableInfo {
private String tableName;
private long size;
private long rows;
private long avgRowSize;
}
// 数据库信息(Redis)
@Data
public static class DatabaseInfo {
private String database;
private long keys;
private long expires;
private long avgTtl;
}
// 命令使用情况(Redis)
@Data
public static class CommandUsage {
private String command;
private long calls;
}
// 集合信息(MongoDB)
@Data
public static class CollectionInfo {
private String collectionName;
private long size;
private long indexSize;
private long documentCount;
private long avgDocumentSize;
private String indexNotes;
}
// 存储趋势
@Data
public static class StorageTrend {
private Date date;
private long size;
}
// 内存趋势
@Data
public static class MemoryTrend {
private Date date;
private long memory;
}
}
通过这些监控与维护工具,我们可以确保混合架构系统的健康运行,及时发现并解决潜在问题,同时为未来的扩展提供有力支持。
八、未来展望
随着技术的飞速发展,数据库领域正经历着前所未有的变革。本章将探讨混合架构未来的发展方向,帮助你把握技术趋势,为系统演进做好准备。
新兴数据库技术趋势
1. 时序数据库的崛起
随着物联网、监控、金融分析等场景的爆发,专门处理时间序列数据的数据库正成为混合架构中的重要一环。
java
// 时序数据示例:使用InfluxDB存储监控指标
@Service
public class TimeSeriesMetricService {
@Autowired
private InfluxDBClient influxDBClient;
/**
* 记录系统指标
*/
public void recordSystemMetrics(SystemMetrics metrics) {
Point point = Point.measurement("system_metrics")
.addTag("host", metrics.getHostname())
.addTag("environment", metrics.getEnvironment())
.addField("cpu_usage", metrics.getCpuUsage())
.addField("memory_usage", metrics.getMemoryUsage())
.addField("disk_usage", metrics.getDiskUsage())
.addField("network_in", metrics.getNetworkIn())
.addField("network_out", metrics.getNetworkOut())
.time(Instant.now(), WritePrecision.MS);
influxDBClient.getWriteApiBlocking().writePoint(point);
}
/**
* 查询CPU使用率趋势
*/
public List<MetricPoint> getCpuUsageTrend(String hostname, String timeRange) {
String query = "from(bucket:\"monitoring\")\n" +
" |> range(start: -" + timeRange + ")\n" +
" |> filter(fn: (r) => r._measurement == \"system_metrics\" and r.host == \"" + hostname + "\")\n" +
" |> filter(fn: (r) => r._field == \"cpu_usage\")\n" +
" |> aggregateWindow(every: 5m, fn: mean, createEmpty: false)\n" +
" |> yield(name: \"mean\")";
List<MetricPoint> results = new ArrayList<>();
influxDBClient.getQueryApi().query(query, (cancellable, record) -> {
MetricPoint point = new MetricPoint();
point.setTimestamp(record.getTime());
point.setValue(record.getValue() instanceof Number ? ((Number) record.getValue()).doubleValue() : 0);
results.add(point);
});
return results;
}
/**
* 查询系统异常事件
*/
public List<SystemAlert> getSystemAlerts(String timeRange, double thresholdCpu, double thresholdMemory) {
String query = "from(bucket:\"monitoring\")\n" +
" |> range(start: -" + timeRange + ")\n" +
" |> filter(fn: (r) => r._measurement == \"system_metrics\")\n" +
" |> filter(fn: (r) => (r._field == \"cpu_usage\" and r._value > " + thresholdCpu + ") or " +
" (r._field == \"memory_usage\" and r._value > " + thresholdMemory + "))\n" +
" |> yield()";
List<SystemAlert> alerts = new ArrayList<>();
influxDBClient.getQueryApi().query(query, (cancellable, record) -> {
SystemAlert alert = new SystemAlert();
alert.setTimestamp(record.getTime());
alert.setHostname(record.getValueByKey("host").toString());
alert.setMetric(record.getField());
alert.setValue(((Number) record.getValue()).doubleValue());
alerts.add(alert);
});
return alerts;
}
@Data
public static class SystemMetrics {
private String hostname;
private String environment;
private double cpuUsage;
private double memoryUsage;
private double diskUsage;
private long networkIn;
private long networkOut;
}
@Data
public static class MetricPoint {
private Instant timestamp;
private double value;
}
@Data
public static class SystemAlert {
private Instant timestamp;
private String hostname;
private String metric;
private double value;
}
}
时序数据库的特点及优势:
- 高效写入:优化了针对时间戳索引的写入性能,可处理高频数据采集
- 自动数据老化:内置数据保留策略,自动管理冷热数据
- 聚合查询优化:针对时间区间的聚合操作(如平均值、最大值、采样等)性能极佳
- 压缩率高:专为时间序列数据设计的压缩算法,存储效率高
在混合架构中,时序数据库可以与传统数据库形成互补:MySQL存储核心业务数据,时序数据库存储监控指标、用户行为轨迹、IoT设备数据等。
2. 向量数据库与AI集成
随着人工智能技术的普及,特别是向量嵌入技术的广泛应用,向量数据库正成为混合架构中处理相似性搜索的关键组件。
java
// 向量数据库示例:使用Milvus存储和搜索商品特征向量
@Service
public class ProductVectorService {
@Autowired
private MilvusClient milvusClient;
@Autowired
private ProductRepository productRepository;
@Autowired
private ModelService modelService;
private final String COLLECTION_NAME = "product_embeddings";
/**
* 生成并存储商品的特征向量
*/
public void generateAndStoreProductVector(Long productId) {
// 1. 从传统数据库获取商品信息
Product product = productRepository.findById(productId)
.orElseThrow(() -> new EntityNotFoundException("商品不存在"));
// 2. 使用模型服务生成特征向量
String productText = product.getName() + " " + product.getDescription();
float[] embedding = modelService.generateEmbedding(productText);
// 3. 存储向量到Milvus
InsertParam insertParam = new InsertParam.Builder(COLLECTION_NAME)
.withFields(List.of(
LongFieldData.newBuilder()
.withName("product_id")
.withData(List.of(productId))
.build(),
FloatVectorFieldData.newBuilder()
.withName("embedding")
.withData(List.of(embedding))
.build()
))
.build();
milvusClient.insert(insertParam);
// 4. 记录操作日志
log.info("已生成并存储商品向量: {}", productId);
}
/**
* 基于相似度搜索相关商品
*/
public List<SimilarProduct> findSimilarProducts(Long productId, int limit) {
// 1. 获取目标商品的向量
QueryParam queryParam = new QueryParam.Builder(COLLECTION_NAME)
.withOutFields(List.of("embedding"))
.withExpr("product_id == " + productId)
.build();
QueryResult queryResult = milvusClient.query(queryParam);
if (queryResult.getFieldsData().isEmpty()) {
throw new EntityNotFoundException("商品向量不存在");
}
FloatVectorFieldData vectorField = (FloatVectorFieldData) queryResult.getFieldsData().get(1);
float[] targetEmbedding = vectorField.getData().get(0);
// 2. 执行向量相似度搜索
SearchParam searchParam = new SearchParam.Builder(COLLECTION_NAME)
.withTopK(limit + 1) // 多查一个,因为可能包含目标商品自身
.withMetricType(MetricType.L2)
.withVectors(List.of(targetEmbedding))
.withVectorFieldName("embedding")
.withOutFields(List.of("product_id"))
.build();
SearchResult searchResult = milvusClient.search(searchParam);
// 3. 处理搜索结果
List<SimilarProduct> similarProducts = new ArrayList<>();
List<QueryResultsWrapper> resultsWrappers = searchResult.getResults();
for (int i = 0; i < resultsWrappers.size(); i++) {
QueryResultsWrapper wrapper = resultsWrappers.get(i);
Long id = ((LongFieldData)wrapper.getFieldData("product_id")).getData().get(0);
float distance = wrapper.getDistance();
// 排除目标商品自身
if (!id.equals(productId)) {
// 从MySQL获取完整商品信息
Product product = productRepository.findById(id).orElse(null);
if (product != null) {
SimilarProduct similarProduct = new SimilarProduct();
similarProduct.setProduct(product);
similarProduct.setSimilarity(1.0f - distance); // 转换距离为相似度
similarProducts.add(similarProduct);
}
}
}
return similarProducts;
}
/**
* 基于文本搜索相关商品
*/
public List<SimilarProduct> searchProductsByText(String searchText, int limit) {
// 1. 生成搜索文本的向量表示
float[] searchEmbedding = modelService.generateEmbedding(searchText);
// 2. 执行向量相似度搜索
SearchParam searchParam = new SearchParam.Builder(COLLECTION_NAME)
.withTopK(limit)
.withMetricType(MetricType.L2)
.withVectors(List.of(searchEmbedding))
.withVectorFieldName("embedding")
.withOutFields(List.of("product_id"))
.build();
SearchResult searchResult = milvusClient.search(searchParam);
// 3. 处理搜索结果
List<SimilarProduct> similarProducts = new ArrayList<>();
List<QueryResultsWrapper> resultsWrappers = searchResult.getResults();
for (int i = 0; i < resultsWrappers.size(); i++) {
QueryResultsWrapper wrapper = resultsWrappers.get(i);
Long id = ((LongFieldData)wrapper.getFieldData("product_id")).getData().get(0);
float distance = wrapper.getDistance();
// 从MySQL获取完整商品信息
Product product = productRepository.findById(id).orElse(null);
if (product != null) {
SimilarProduct similarProduct = new SimilarProduct();
similarProduct.setProduct(product);
similarProduct.setSimilarity(1.0f - distance); // 转换距离为相似度
similarProducts.add(similarProduct);
}
}
return similarProducts;
}
@Data
public static class SimilarProduct {
private Product product;
private float similarity;
}
}
向量数据库的特点及应用场景:
- 高维向量索引:支持高效的相似度搜索,如KNN(K最近邻)查询
- 语义搜索:结合大语言模型,实现基于语义而非关键词的搜索
- 推荐系统:基于内容相似度的商品推荐、文章推荐等
- 图像检索:相似图片查找、人脸识别等
- 异常检测:识别异常行为模式
在混合架构中,向量数据库通常与传统数据库协同工作:MySQL存储结构化业务数据,向量数据库存储高维特征向量并提供相似度搜索能力。
3. 图数据库的应用扩展
随着社交网络、知识图谱、复杂关系分析需求的增长,图数据库在混合架构中的应用也在不断扩展。
java
// 图数据库示例:使用Neo4j构建用户社交网络
@Service
public class SocialGraphService {
@Autowired
private Neo4jClient neo4jClient;
@Autowired
private UserRepository userRepository;
/**
* 创建或更新用户节点
*/
public void createOrUpdateUserNode(Long userId) {
// 从MySQL获取用户信息
User user = userRepository.findById(userId)
.orElseThrow(() -> new EntityNotFoundException("用户不存在"));
// 创建或更新Neo4j中的用户节点
neo4jClient.query(
"MERGE (u:User {id: $id}) " +
"ON CREATE SET u.username = $username, u.avatar = $avatar, u.createdAt = $createdAt " +
"ON MATCH SET u.username = $username, u.avatar = $avatar"
)
.bind(user.getId()).to("id")
.bind(user.getUsername()).to("username")
.bind(user.getAvatar()).to("avatar")
.bind(user.getCreatedAt().toInstant()).to("createdAt")
.run();
}
/**
* 添加关注关系
*/
public void addFollowRelationship(Long followerId, Long followeeId) {
// 在Neo4j中创建关注关系
neo4jClient.query(
"MATCH (follower:User {id: $followerId}), (followee:User {id: $followeeId}) " +
"MERGE (follower)-[r:FOLLOWS]->(followee) " +
"ON CREATE SET r.createdAt = $now"
)
.bind(followerId).to("followerId")
.bind(followeeId).to("followeeId")
.bind(Instant.now()).to("now")
.run();
}
/**
* 删除关注关系
*/
public void removeFollowRelationship(Long followerId, Long followeeId) {
// 在Neo4j中删除关注关系
neo4jClient.query(
"MATCH (follower:User {id: $followerId})-[r:FOLLOWS]->(followee:User {id: $followeeId}) " +
"DELETE r"
)
.bind(followerId).to("followerId")
.bind(followeeId).to("followeeId")
.run();
}
/**
* 查找共同关注的用户
*/
public List<CommonFollowee> findCommonFollowees(Long userId1, Long userId2) {
// 查询两个用户共同关注的人
return neo4jClient.query(
"MATCH (u1:User {id: $userId1})-[:FOLLOWS]->(common:User)<-[:FOLLOWS]-(u2:User {id: $userId2}) " +
"RETURN common.id AS id, common.username AS username, common.avatar AS avatar"
)
.bind(userId1).to("userId1")
.bind(userId2).to("userId2")
.fetchAs(CommonFollowee.class)
.mappedBy((TypeSystem t, Record r) -> {
CommonFollowee followee = new CommonFollowee();
followee.setId(r.get("id").asLong());
followee.setUsername(r.get("username").asString());
followee.setAvatar(r.get("avatar").asString());
return followee;
})
.all();
}
/**
* 查找推荐关注的用户(朋友的朋友)
*/
public List<RecommendedUser> findRecommendedUsers(Long userId, int limit) {
// 查询用户关注的人再关注的人(二度关系),但排除已经关注的人
return neo4jClient.query(
"MATCH (user:User {id: $userId})-[:FOLLOWS]->(friend:User)-[:FOLLOWS]->(fof:User) " +
"WHERE NOT (user)-[:FOLLOWS]->(fof) AND user <> fof " +
"WITH fof, count(friend) AS commonFriends " +
"ORDER BY commonFriends DESC " +
"LIMIT $limit " +
"RETURN fof.id AS id, fof.username AS username, fof.avatar AS avatar, commonFriends"
)
.bind(userId).to("userId")
.bind(limit).to("limit")
.fetchAs(RecommendedUser.class)
.mappedBy((TypeSystem t, Record r) -> {
RecommendedUser recommended = new RecommendedUser();
recommended.setId(r.get("id").asLong());
recommended.setUsername(r.get("username").asString());
recommended.setAvatar(r.get("avatar").asString());
recommended.setCommonFriends(r.get("commonFriends").asInt());
return recommended;
})
.all();
}
/**
* 查找最短路径(社交距离)
*/
public List<PathUser> findShortestPath(Long fromUserId, Long toUserId) {
return neo4jClient.query(
"MATCH path = shortestPath((from:User {id: $fromUserId})-[:FOLLOWS*]-(to:User {id: $toUserId})) " +
"UNWIND nodes(path) AS user " +
"RETURN user.id AS id, user.username AS username, user.avatar AS avatar " +
"ORDER BY id"
)
.bind(fromUserId).to("fromUserId")
.bind(toUserId).to("toUserId")
.fetchAs(PathUser.class)
.mappedBy((TypeSystem t, Record r) -> {
PathUser pathUser = new PathUser();
pathUser.setId(r.get("id").asLong());
pathUser.setUsername(r.get("username").asString());
pathUser.setAvatar(r.get("avatar").asString());
return pathUser;
})
.all();
}
/**
* 计算用户影响力(基于PageRank算法)
*/
public List<InfluentialUser> findMostInfluentialUsers(int limit) {
// 使用图算法计算用户影响力
neo4jClient.query(
"CALL gds.pageRank.write('user_graph', { " +
" writeProperty: 'pagerank', " +
" maxIterations: 20, " +
" dampingFactor: 0.85 " +
"})"
).run();
// 查询影响力最高的用户
return neo4jClient.query(
"MATCH (u:User) " +
"WHERE exists(u.pagerank) " +
"RETURN u.id AS id, u.username AS username, u.avatar AS avatar, u.pagerank AS influence " +
"ORDER BY u.pagerank DESC " +
"LIMIT $limit"
)
.bind(limit).to("limit")
.fetchAs(InfluentialUser.class)
.mappedBy((TypeSystem t, Record r) -> {
InfluentialUser user = new InfluentialUser();
user.setId(r.get("id").asLong());
user.setUsername(r.get("username").asString());
user.setAvatar(r.get("avatar").asString());
user.setInfluence(r.get("influence").asDouble());
return user;
})
.all();
}
@Data
public static class CommonFollowee {
private Long id;
private String username;
private String avatar;
}
@Data
public static class RecommendedUser {
private Long id;
private String username;
private String avatar;
private int commonFriends;
}
@Data
public static class PathUser {
private Long id;
private String username;
private String avatar;
}
@Data
public static class InfluentialUser {
private Long id;
private String username;
private String avatar;
private double influence;
}
}
图数据库的优势和应用场景:
- 关系优先:原生支持复杂的关联关系查询,无需多表连接
- 路径查询:高效找出实体间的最短路径、所有路径等
- 深度关系分析:支持朋友的朋友(N度关系)等复杂查询
- 图算法支持:内置社区发现、中心性分析、路径分析等算法
- 应用场景:社交网络分析、知识图谱、推荐系统、欺诈检测等
在混合架构中,图数据库通常与关系型数据库协同工作:MySQL存储用户、内容等基础实体数据,图数据库存储和查询实体间的复杂关系。
云原生数据库解决方案
云计算的普及正在深刻改变数据库的部署和管理方式。云原生数据库具有以下特点:
1. 全托管服务
云厂商提供的全托管数据库服务(如AWS RDS、Azure CosmosDB、GCP Cloud Spanner等)正在降低数据库运维的复杂性。
java
// 使用AWS托管服务的配置示例
@Configuration
public class AWSDataConfig {
@Value("${aws.region}")
private String awsRegion;
@Value("${aws.accessKey}")
private String accessKey;
@Value("${aws.secretKey}")
private String secretKey;
@Bean
public AmazonDynamoDB dynamoDBClient() {
AWSCredentials credentials = new BasicAWSCredentials(accessKey, secretKey);
return AmazonDynamoDBClientBuilder.standard()
.withCredentials(new AWSStaticCredentialsProvider(credentials))
.withRegion(awsRegion)
.build();
}
@Bean
public AmazonElasticCache elasticCacheClient() {
AWSCredentials credentials = new BasicAWSCredentials(accessKey, secretKey);
return AmazonElasticCacheClientBuilder.standard()
.withCredentials(new AWSStaticCredentialsProvider(credentials))
.withRegion(awsRegion)
.build();
}
@Bean
public AmazonRDS rdsClient() {
AWSCredentials credentials = new BasicAWSCredentials(accessKey, secretKey);
return AmazonRDSClientBuilder.standard()
.withCredentials(new AWSStaticCredentialsProvider(credentials))
.withRegion(awsRegion)
.build();
}
@Bean
public DynamoDBMapper dynamoDBMapper() {
return new DynamoDBMapper(dynamoDBClient());
}
}
// 使用DynamoDB存储会话数据示例
@DynamoDBTable(tableName = "UserSessions")
public class UserSession {
private String sessionId;
private Long userId;
private String username;
private Map<String, String> attributes;
private Date creationTime;
private Date expirationTime;
@DynamoDBHashKey
public String getSessionId() {
return sessionId;
}
public void setSessionId(String sessionId) {
this.sessionId = sessionId;
}
@DynamoDBAttribute
public Long getUserId() {
return userId;
}
public void setUserId(Long userId) {
this.userId = userId;
}
@DynamoDBAttribute
public String getUsername() {
return username;
}
public void setUsername(String username) {
this.username = username;
}
@DynamoDBAttribute
public Map<String, String> getAttributes() {
return attributes;
}
public void setAttributes(Map<String, String> attributes) {
this.attributes = attributes;
}
@DynamoDBAttribute
public Date getCreationTime() {
return creationTime;
}
public void setCreationTime(Date creationTime) {
this.creationTime = creationTime;
}
@DynamoDBAttribute
public Date getExpirationTime() {
return expirationTime;
}
public void setExpirationTime(Date expirationTime) {
this.expirationTime = expirationTime;
}
}
@Service
public class CloudSessionService {
@Autowired
private DynamoDBMapper dynamoDBMapper;
/**
* 创建用户会话
*/
public UserSession createSession(Long userId, String username) {
UserSession session = new UserSession();
session.setSessionId(UUID.randomUUID().toString());
session.setUserId(userId);
session.setUsername(username);
session.setAttributes(new HashMap<>());
session.setCreationTime(new Date());
// 设置24小时后过期
Calendar cal = Calendar.getInstance();
cal.add(Calendar.HOUR, 24);
session.setExpirationTime(cal.getTime());
dynamoDBMapper.save(session);
return session;
}
/**
* 获取会话
*/
public UserSession getSession(String sessionId) {
return dynamoDBMapper.load(UserSession.class, sessionId);
}
/**
* 更新会话属性
*/
public void updateSessionAttribute(String sessionId, String key, String value) {
UserSession session = getSession(sessionId);
if (session != null) {
if (session.getAttributes() == null) {
session.setAttributes(new HashMap<>());
}
session.getAttributes().put(key, value);
dynamoDBMapper.save(session);
}
}
/**
* 删除会话
*/
public void deleteSession(String sessionId) {
UserSession session = new UserSession();
session.setSessionId(sessionId);
dynamoDBMapper.delete(session);
}
}
全托管服务的优势:
- 自动化运维:自动备份、软件升级、安全补丁等
- 按需扩展:根据负载自动扩展或缩减资源
- 高可用性:内置多可用区复制、故障自动恢复
- 成本优化:按实际使用付费,避免资源浪费
- 监控集成:与云平台监控、告警、日志系统集成
2. Kubernetes上的数据库编排
随着Kubernetes成为容器编排的标准,越来越多的数据库开始支持在Kubernetes上部署和管理。
yaml
# MySQL在Kubernetes上的部署示例(StatefulSet)
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mysql
spec:
selector:
matchLabels:
app: mysql
serviceName: mysql
replicas: 3
template:
metadata:
labels:
app: mysql
spec:
terminationGracePeriodSeconds: 10
containers:
- name: mysql
image: mysql:8.0
ports:
- containerPort: 3306
name: mysql
env:
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: mysql-secret
key: password
volumeMounts:
- name: data
mountPath: /var/lib/mysql
resources:
requests:
memory: 1Gi
cpu: 500m
livenessProbe:
exec:
command: ["mysqladmin", "ping", "-u", "root", "-p${MYSQL_ROOT_PASSWORD}"]
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
readinessProbe:
exec:
command: ["mysql", "-u", "root", "-p${MYSQL_ROOT_PASSWORD}", "-e", "SELECT 1"]
initialDelaySeconds: 5
periodSeconds: 2
timeoutSeconds: 1
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: standard
resources:
requests:
storage: 10Gi
---
# Headless Service for StatefulSet
apiVersion: v1
kind: Service
metadata:
name: mysql
labels:
app: mysql
spec:
ports:
- port: 3306
name: mysql
clusterIP: None
selector:
app: mysql
---
# Client Service for accessing the database
apiVersion: v1
kind: Service
metadata:
name: mysql-read
labels:
app: mysql
spec:
ports:
- port: 3306
name: mysql
selector:
app: mysql
Kubernetes上运行数据库的优势:
- 统一管理:与其他应用一起进行容器化管理
- 弹性扩展:利用Kubernetes的自动扩缩容能力
- 自动恢复:节点故障时自动重新调度
- 配置管理:通过ConfigMap和Secret管理配置
- 声明式部署:基于YAML的声明式配置,易于版本控制和CI/CD集成
3. 多云数据策略
为了避免云厂商锁定,越来越多的企业开始采用多云数据策略。
java
// 多云数据访问抽象示例
public interface CloudDataStore<T> {
T save(T entity);
Optional<T> findById(String id);
List<T> findAll();
void delete(T entity);
}
// AWS实现
@Service
@ConditionalOnProperty(name = "cloud.provider", havingValue = "aws")
public class AWSDataStore<T> implements CloudDataStore<T> {
@Autowired
private DynamoDBMapper dynamoDBMapper;
@Override
public T save(T entity) {
dynamoDBMapper.save(entity);
return entity;
}
@Override
public Optional<T> findById(String id) {
// 实现省略
return Optional.empty();
}
@Override
public List<T> findAll() {
// 实现省略
return Collections.emptyList();
}
@Override
public void delete(T entity) {
dynamoDBMapper.delete(entity);
}
}
// GCP实现
@Service
@ConditionalOnProperty(name = "cloud.provider", havingValue = "gcp")
public class GCPDataStore<T> implements CloudDataStore<T> {
@Autowired
private Datastore datastore;
@Override
public T save(T entity) {
// 实现省略
return entity;
}
@Override
public Optional<T> findById(String id) {
// 实现省略
return Optional.empty();
}
@Override
public List<T> findAll() {
// 实现省略
return Collections.emptyList();
}
@Override
public void delete(T entity) {
// 实现省略
}
}
// 多云数据服务
@Service
public class MultiCloudUserService {
@Autowired
private CloudDataStore<User> dataStore;
public User createUser(User user) {
return dataStore.save(user);
}
public Optional<User> getUserById(String id) {
return dataStore.findById(id);
}
public List<User> getAllUsers() {
return dataStore.findAll();
}
public void deleteUser(User user) {
dataStore.delete(user);
}
}
多云策略的优势:
- 避免供应商锁定:不依赖于单一云厂商的专有服务
- 灾难恢复:在一个云服务出现区域性故障时,可切换到其他云服务
- 成本优化:可以根据不同云厂商的定价策略选择最优方案
- 合规需求:满足数据存储地理位置的法规要求
- 最佳服务选择:可以为不同类型的工作负载选择最适合的云服务
混合架构的发展方向
随着技术的不断演进,混合架构也在向着更加智能、自适应的方向发展:
1. 自适应数据分层
未来的混合架构将能够根据数据访问模式自动决定数据存储位置:
java
@Service
public class AdaptiveDataTieringService {
@Autowired
private JdbcTemplate jdbcTemplate;
@Autowired
private RedisTemplate<String, Object> redisTemplate;
@Autowired
private MongoTemplate mongoTemplate;
@Autowired
private DataAccessStatisticsService statsService;
/**
* 自适应数据存储策略
*/
@Scheduled(fixedRate = 3600000) // 每小时执行一次
public void adaptDataTiering() {
log.info("开始执行自适应数据分层...");
// 1. 获取热点数据统计
List<HotDataInfo> hotDataList = statsService.getHotDataList();
// 2. 获取冷数据统计
List<ColdDataInfo> coldDataList = statsService.getColdDataList();
// 3. 迁移热点数据到缓存
for (HotDataInfo hotData : hotDataList) {
if (hotData.getAccessFrequency() > 100) { // 每小时访问超过100次
String key = hotData.getEntityType() + ":" + hotData.getEntityId();
// 检查是否已在缓存中
if (!Boolean.TRUE.equals(redisTemplate.hasKey(key))) {
// 从MySQL加载数据并缓存
Object entity = loadEntityFromDatabase(hotData.getEntityType(), hotData.getEntityId());
if (entity != null) {
// 设置缓存,根据访问频率动态调整过期时间
long expireTime = calculateExpireTime(hotData.getAccessFrequency());
redisTemplate.opsForValue().set(key, entity, expireTime, TimeUnit.SECONDS);
log.info("热点数据已缓存: {}, 访问频率: {}/小时, 过期时间: {}秒",
key, hotData.getAccessFrequency(), expireTime);
}
}
}
}
// 4. 迁移冷数据到MongoDB(历史存储)
for (ColdDataInfo coldData : coldDataList) {
// 只处理足够冷的数据(超过30天未访问且非核心业务数据)
if (coldData.getDaysSinceLastAccess() > 30 &&
!coldData.getEntityType().equals("Order") &&
!coldData.getEntityType().equals("Transaction")) {
// 从MySQL加载冷数据
Object entity = loadEntityFromDatabase(coldData.getEntityType(), coldData.getEntityId());
if (entity != null) {
// 转换为文档格式
Document document = convertToDocument(entity);
document.append("_entity_type", coldData.getEntityType());
document.append("_entity_id", coldData.getEntityId());
document.append("_archived_at", new Date());
// 存储到MongoDB
mongoTemplate.insert(document, "archived_data");
// 在MySQL中标记为已归档(不删除原始数据,只添加标记)
markAsArchived(coldData.getEntityType(), coldData.getEntityId());
log.info("冷数据已归档: {} {}, 上次访问: {}天前",
coldData.getEntityType(), coldData.getEntityId(),
coldData.getDaysSinceLastAccess());
}
}
}
log.info("自适应数据分层完成");
}
/**
* 数据访问方法:自动选择最佳数据源
*/
public <T> T getEntity(String entityType, Long id, Class<T> entityClass) {
String cacheKey = entityType + ":" + id;
// 1. 尝试从缓存获取
T cachedEntity = (T) redisTemplate.opsForValue().get(cacheKey);
if (cachedEntity != null) {
// 记录缓存命中并更新访问统计
statsService.recordAccess(entityType, id, "cache_hit");
return cachedEntity;
}
// 2. 查询MySQL
T entity = null;
try {
entity = loadEntityFromDatabase(entityType, id, entityClass);
} catch (Exception e) {
log.warn("从MySQL加载数据失败: {} {}, 错误: {}", entityType, id, e.getMessage());
}
// 3. 如果MySQL中不存在,且标记了归档,则查询MongoDB
if (entity == null && isArchived(entityType, id)) {
try {
Document document = mongoTemplate.findOne(
Query.query(Criteria.where("_entity_type").is(entityType)
.and("_entity_id").is(id)),
Document.class,
"archived_data"
);
if (document != null) {
// 转换为实体对象
entity = convertToEntity(document, entityClass);
// 记录归档数据访问
statsService.recordAccess(entityType, id, "archive_hit");
// 频繁访问的归档数据可以考虑恢复到MySQL
if (statsService.getRecentAccessCount(entityType, id) > 10) {
// 异步恢复数据到MySQL
CompletableFuture.runAsync(() -> restoreFromArchive(entityType, id));
}
}
} catch (Exception e) {
log.warn("从MongoDB加载归档数据失败: {} {}, 错误: {}", entityType, id, e.getMessage());
}
}
// 4. 如果找到数据,根据访问频率决定是否缓存
if (entity != null) {
// 记录数据库访问
statsService.recordAccess(entityType, id, "db_hit");
// 如果最近访问频率较高,则缓存
int recentAccessCount = statsService.getRecentAccessCount(entityType, id);
if (recentAccessCount >= 3) { // 短时间内多次访问
// 根据访问频率动态设置缓存时间
long expireTime = calculateExpireTime(recentAccessCount);
redisTemplate.opsForValue().set(cacheKey, entity, expireTime, TimeUnit.SECONDS);
log.debug("动态缓存数据: {}, 近期访问次数: {}, 过期时间: {}秒",
cacheKey, recentAccessCount, expireTime);
}
} else {
// 记录未命中
statsService.recordAccess(entityType, id, "miss");
}
return entity;
}
/**
* 根据访问频率计算缓存过期时间
*/
private long calculateExpireTime(int accessFrequency) {
// 基础过期时间10分钟
long baseExpireTime = 600;
// 根据访问频率增加过期时间,但最多24小时
if (accessFrequency < 10) {
return baseExpireTime;
} else if (accessFrequency < 50) {
return baseExpireTime * 6; // 1小时
} else if (accessFrequency < 100) {
return baseExpireTime * 24; // 4小时
} else if (accessFrequency < 500) {
return baseExpireTime * 72; // 12小时
} else {
return baseExpireTime * 144; // 24小时
}
}
// 其他辅助方法省略...
@Data
public static class HotDataInfo {
private String entityType;
private Long entityId;
private int accessFrequency;
}
@Data
public static class ColdDataInfo {
private String entityType;
private Long entityId;
private int daysSinceLastAccess;
}
}
自适应数据分层的优势:
- 智能数据放置:系统自动将热点数据移至高速存储,冷数据移至低成本存储
- 动态适应:随着访问模式变化自动调整数据分布
- 资源优化:优化存储成本与访问性能的平衡
- 透明访问:应用层无需关心数据实际存储位置
2. 数据库自动选择器
未来的系统能够根据数据模型和查询模式自动选择最合适的数据库:
java
@Service
public class DatabaseSelectorService {
@Autowired
private MySQLRepository mysqlRepository;
@Autowired
private RedisTemplate<String, Object> redisTemplate;
@Autowired
private MongoTemplate mongoTemplate;
@Autowired
private ElasticsearchTemplate esTemplate;
@Autowired
private Neo4jClient neo4jClient;
/**
* 根据查询特性自动选择最佳数据库
*/
public <T> QueryResult<T> executeQuery(DatabaseQuery query, Class<T> resultClass) {
// 根据查询特征分析最适合的数据库
DatabaseType bestDb = selectBestDatabase(query);
QueryResult<T> result = new QueryResult<>();
result.setSelectedDatabase(bestDb);
try {
switch (bestDb) {
case MYSQL:
result.setResults(executeInMySQL(query, resultClass));
break;
case REDIS:
result.setResults(executeInRedis(query, resultClass));
break;
case MONGODB:
result.setResults(executeInMongoDB(query, resultClass));
break;
case ELASTICSEARCH:
result.setResults(executeInElasticsearch(query, resultClass));
break;
case NEO4J:
result.setResults(executeInNeo4j(query, resultClass));
break;
}
// 记录查询性能指标
result.setQueryTimeMs(query.getExecutionTime());
result.setResultCount(result.getResults().size());
} catch (Exception e) {
result.setError("查询执行失败: " + e.getMessage());
log.error("自动查询执行失败", e);
}
return result;
}
/**
* 自动选择最佳数据库类型
*/
private DatabaseType selectBestDatabase(DatabaseQuery query) {
// 1. 分析查询特征
boolean isKeyValueLookup = query.getType() == QueryType.KEY_VALUE_LOOKUP;
boolean isFullTextSearch = query.getType() == QueryType.FULL_TEXT_SEARCH;
boolean isGraphQuery = query.getType() == QueryType.GRAPH_TRAVERSAL;
boolean isComplexJoin = query.getJoinCount() > 2;
boolean isRangeQuery = query.isRangeQuery();
boolean isAggregation = query.isAggregation();
boolean isHighFrequency = query.getFrequency() > 100; // 每秒执行超过100次
// 2. 优先级判断
// 对于简单的键值查找,优先使用Redis
if (isKeyValueLookup && isHighFrequency) {
return DatabaseType.REDIS;
}
// 对于全文搜索,优先使用Elasticsearch
if (isFullTextSearch) {
return DatabaseType.ELASTICSEARCH;
}
// 对于图遍历查询,优先使用Neo4j
if (isGraphQuery) {
return DatabaseType.NEO4J;
}
// 对于复杂聚合但不需要复杂连接,优先使用MongoDB
if (isAggregation && !isComplexJoin) {
return DatabaseType.MONGODB;
}
// 对于范围查询且高频访问,优先使用MongoDB
if (isRangeQuery && isHighFrequency) {
return DatabaseType.MONGODB;
}
// 复杂连接查询,优先使用MySQL
if (isComplexJoin) {
return DatabaseType.MYSQL;
}
// 默认使用MySQL
return DatabaseType.MYSQL;
}
private <T> List<T> executeInMySQL(DatabaseQuery query, Class<T> resultClass) {
// 构建SQL查询
String sql = buildSqlQuery(query);
Object[] params = query.getParameters().toArray();
// 记录开始时间
long startTime = System.currentTimeMillis();
// 执行查询
List<T> results = jdbcTemplate.query(sql, params,
BeanPropertyRowMapper.newInstance(resultClass));
// 记录执行时间
query.setExecutionTime(System.currentTimeMillis() - startTime);
return results;
}
private <T> List<T> executeInRedis(DatabaseQuery query, Class<T> resultClass) {
// 执行Redis查询
// 实现省略...
return Collections.emptyList();
}
private <T> List<T> executeInMongoDB(DatabaseQuery query, Class<T> resultClass) {
// 构建MongoDB查询
Query mongoQuery = buildMongoQuery(query);
// 记录开始时间
long startTime = System.currentTimeMillis();
// 执行查询
List<T> results = mongoTemplate.find(mongoQuery, resultClass);
// 记录执行时间
query.setExecutionTime(System.currentTimeMillis() - startTime);
return results;
}
private <T> List<T> executeInElasticsearch(DatabaseQuery query, Class<T> resultClass) {
// 构建Elasticsearch查询
// 实现省略...
return Collections.emptyList();
}
private <T> List<T> executeInNeo4j(DatabaseQuery query, Class<T> resultClass) {
// 构建Cypher查询
// 实现省略...
return Collections.emptyList();
}
// 辅助方法...
public enum DatabaseType {
MYSQL, REDIS, MONGODB, ELASTICSEARCH, NEO4J
}
public enum QueryType {
KEY_VALUE_LOOKUP, RANGE_QUERY, FULL_TEXT_SEARCH, GRAPH_TRAVERSAL, COMPLEX_JOIN
}
@Data
public static class DatabaseQuery {
private String entity;
private QueryType type;
private Map<String, Object> criteria = new HashMap<>();
private List<Object> parameters = new ArrayList<>();
private List<String> fields = new ArrayList<>();
private Integer limit;
private Integer offset;
private List<String> sorts = new ArrayList<>();
private int joinCount;
private boolean rangeQuery;
private boolean aggregation;
private int frequency; // 每秒执行次数
private long executionTime; // 毫秒
}
@Data
public static class QueryResult<T> {
private DatabaseType selectedDatabase;
private List<T> results;
private long queryTimeMs;
private int resultCount;
private String error;
}
}
数据库自动选择器的优势:
- 查询优化:根据查询特征自动选择最优数据库
- 性能提升:为每类查询使用最适合的存储引擎
- 简化开发:开发者只需关注业务逻辑,无需手动选择数据库
- 自适应进化:根据查询性能反馈不断优化选择策略
3. 分布式数据编排
未来的数据库架构将更加关注数据的编排和流动,而非静态存储:
java
@Service
public class DataOrchestrationService {
@Autowired
private KafkaTemplate<String, String> kafkaTemplate;
@Autowired
private JdbcTemplate jdbcTemplate;
@Autowired
private MongoTemplate mongoTemplate;
@Autowired
private ElasticsearchTemplate esTemplate;
/**
* 数据变更事件处理
*/
@KafkaListener(topics = "data_changes")
public void handleDataChangeEvent(String message) {
try {
DataChangeEvent event = JsonUtil.fromJson(message, DataChangeEvent.class);
// 根据数据类型和变更类型,决定数据流向
switch (event.getEntityType()) {
case "product":
orchestrateProductData(event);
break;
case "order":
orchestrateOrderData(event);
break;
case "user":
orchestrateUserData(event);
break;
default:
log.warn("未知的实体类型: {}", event.getEntityType());
break;
}
} catch (Exception e) {
log.error("处理数据变更事件失败", e);
}
}
/**
* 商品数据编排
*/
private void orchestrateProductData(DataChangeEvent event) {
// 从事件中提取商品数据
JsonNode data = event.getData();
Long productId = data.get("id").asLong();
// 根据变更类型执行不同操作
switch (event.getChangeType()) {
case "create":
case "update":
// 1. 更新搜索索引
updateProductSearchIndex(productId, data);
// 2. 生成商品特征向量
generateProductFeatureVector(productId, data);
// 3. 更新统计聚合
updateProductAggregations(productId, data);
// 4. 对于热门商品,预缓存
if (isPopularProduct(productId)) {
cacheProductData(productId, data);
}
break;
case "delete":
// 删除相关数据
deleteProductRelatedData(productId);
break;
}
}
/**
* 订单数据编排
*/
private void orchestrateOrderData(DataChangeEvent event) {
// 从事件中提取订单数据
JsonNode data = event.getData();
Long orderId = data.get("id").asLong();
String orderStatus = data.get("status").asText();
// 根据订单状态执行不同操作
if ("CREATED".equals(orderStatus)) {
// 1. 更新库存
updateInventory(data);
// 2. 更新用户购买历史
updateUserPurchaseHistory(data);
} else if ("PAID".equals(orderStatus)) {
// 1. 更新销售统计
updateSalesStatistics(data);
// 2. 触发推荐系统更新
updateRecommendationData(data);
} else if ("COMPLETED".equals(orderStatus)) {
// 1. 归档订单详情到MongoDB
archiveOrderToMongoDB(orderId, data);
// 2. 更新客户生命周期状态
updateCustomerLifecycle(data);
}
}
/**
* 用户数据编排
*/
private void orchestrateUserData(DataChangeEvent event) {
// 从事件中提取用户数据
JsonNode data = event.getData();
Long userId = data.get("id").asLong();
// 根据变更类型执行不同操作
switch (event.getChangeType()) {
case "create":
// 1. 创建用户画像
createUserProfile(userId, data);
// 2. 初始化推荐数据
initializeRecommendations(userId);
break;
case "update":
// 1. 更新用户画像
updateUserProfile(userId, data);
// 2. 检查用户分群变化
checkUserSegmentChanges(userId, data);
break;
case "delete":
// 删除相关数据
deleteUserRelatedData(userId);
break;
}
}
/**
* 更新商品搜索索引
*/
private void updateProductSearchIndex(Long productId, JsonNode data) {
try {
// 构建搜索文档
ProductDocument doc = new ProductDocument();
doc.setId(productId.toString());
doc.setName(data.get("name").asText());
doc.setCategory(data.get("categoryId").asText());
doc.setPrice(data.get("price").asDouble());
doc.setDescription(data.get("description").asText());
// 提取标签
ArrayNode tagsNode = (ArrayNode) data.get("tags");
if (tagsNode != null) {
List<String> tags = new ArrayList<>();
tagsNode.forEach(tag -> tags.add(tag.asText()));
doc.setTags(tags);
}
// 更新Elasticsearch
esTemplate.save(doc);
log.info("商品搜索索引已更新: {}", productId);
} catch (Exception e) {
log.error("更新商品搜索索引失败: {}", productId, e);
// 记录失败,后续重试
recordFailedOperation("update_search_index", "product", productId);
}
}
// 其他数据编排方法省略...
/**
* 记录失败操作,稍后重试
*/
private void recordFailedOperation(String operation, String entityType, Long entityId) {
FailedOperation failedOp = new FailedOperation();
failedOp.setOperation(operation);
failedOp.setEntityType(entityType);
failedOp.setEntityId(entityId);
failedOp.setFailureTime(new Date());
failedOp.setRetryCount(0);
mongoTemplate.save(failedOp, "failed_operations");
}
@Data
public static class DataChangeEvent {
private String entityType;
private String changeType;
private JsonNode data;
private long timestamp;
}
@Data
public static class FailedOperation {
private String operation;
private String entityType;
private Long entityId;
private Date failureTime;
private int retryCount;
}
}
分布式数据编排的优势:
- 数据流动管理:关注数据如何在不同存储间流动和转换
- 事件驱动架构:通过事件实现松耦合的数据同步和处理
- 数据一致性保障:通过协调器确保跨数据库的数据一致性
- 失败处理与恢复:内置重试机制和故障恢复能力
- 实时数据处理:支持数据变更的实时响应和处理
通过本章的探讨,我们可以看到,数据库技术正朝着更加专业化、云原生化和智能化的方向发展。混合架构也将从静态的多数据库组合,演变为动态的、自适应的数据流编排系统。对于架构师和开发者而言,了解这些趋势并提前布局,将有助于构建更具前瞻性和可扩展性的系统架构。
九、总结
在本文中,我们对NoSQL与MySQL混合架构进行了全面而深入的探讨。从理论基础到实战案例,从性能优化到踩坑经验,我们试图为你呈现一幅混合架构的完整图景。现在,让我们对这一旅程进行总结,并探讨混合架构的核心价值与未来方向。
混合架构设计的核心价值
1. 扬长避短,优势互补
混合架构的核心价值在于能够充分发挥各类数据库的优势,同时规避其局限性。正如我们在文中所展示的:
- MySQL:擅长处理结构化数据和复杂事务,保障数据一致性和ACID特性,适合核心业务数据和财务交易。
- Redis:提供极致的读写性能和丰富的数据结构,适合缓存、计数器、排行榜等高频访问场景。
- MongoDB:灵活的文档模型适合存储半结构化数据,如用户画