Redis 持久化机制:RDB 和 AOF 实现原理对比
标签: #Redis #持久化 #RDB #AOF #源码解析
前言
Redis 作为内存数据库,其数据存储在内存中,一旦服务器进程退出,数据就会丢失。为了解决这个问题,Redis 提供了两种持久化机制:RDB(Redis Database) 和 AOF(Append Only File)。这两种机制各有优劣,理解它们的实现原理对于正确使用 Redis 至关重要。
本文将基于 Redis 7.2 源码,深入剖析 RDB 和 AOF 的实现机制,并通过详细的对比分析,帮助读者在实际场景中做出最佳选择。
目录
- [RDB 持久化机制](#RDB 持久化机制)
- [AOF 持久化机制](#AOF 持久化机制)
- [RDB vs AOF 核心对比](#RDB vs AOF 核心对比)
- 混合持久化:最佳实践
- 性能优化与调优
- 源码分析:关键实现细节
- 生产环境选型建议
1. RDB 持久化机制
1.1 什么是 RDB
RDB 是 Redis 在某个时间点上的数据快照。它将 Redis 在内存中的数据库状态保存到磁盘上,生成一个经过压缩的二进制文件。RDB 文件是紧凑的,非常适合用于备份和灾难恢复。
1.2 RDB 生成流程
RDB 的生成可以分为手动触发 和自动触发两种方式:
手动触发命令
SAVE:阻塞主进程,直到 RDB 文件创建完毕BGSAVE:fork 子进程异步创建 RDB 文件
自动触发条件
- 配置文件中的
save规则(如save 900 1) - 主从复制时,主节点自动触发 BGSAVE
- 执行
FLUSHALL命令(除非禁用) - 执行
SHUTDOWN命令且 AOF 未启用
BGSAVE 核心流程图
graph TD
A[Redis 主进程接收 BGSAVE 命令] --> B{是否有正在进行的 BGSAVE?}
B -->|是| C[拒绝执行]
B -->|否| D[执行 fork 系统调用]
D --> E[fork 成功?]
E -->|失败| F[记录日志并返回]
E -->|成功| G[创建子进程]
G --> H[子进程遍历数据库]
H --> I[将数据写入临时 RDB 文件]
I --> J[写入完成,原子性重命名]
J --> K[子进程退出]
K --> L[主进程记录完成日志]
style G fill:#90EE90
style H fill:#87CEEB
style I fill:#FFD700
1.3 RDB 文件结构
RDB 文件采用二进制格式存储,结构清晰且紧凑:
RDB 文件组织结构
graph LR
A[RDB 文件] --> B[REDIS 部分<br/>魔数标识]
B --> C[RDB_VERSION 部分<br/>版本号]
C --> D[SELECTDB 部分<br/>数据库编号]
D --> E[KEY-VALUE 对<br/>实际数据]
E --> F[EOF 部分<br/>结束标记]
F --> G[CHECKSUM 部分<br/>校验和]
style A fill:#FF6B6B
style E fill:#4ECDC4
RDB 数据编码示例
Redis 7.2 源码中的长度编码实现(rdb.c):
#/* 保存编码长度,前两位用于存储编码类型 */
int rdbSaveLen(rio *rdb, uint64_t len) {
unsigned char buf[2];
size_t nwritten;
if (len < (1<<6)) {
/* 6 位长度编码 */
buf[0] = (len&0xFF)|(RDB_6BITLEN<<6);
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
nwritten = 1;
} else if (len < (1<<14)) {
/* 14 位长度编码 */
buf[0] = ((len>>8)&0xFF)|(RDB_14BITLEN<<6);
buf[1] = len&0xFF;
if (rdbWriteRaw(rdb,buf,2) == -1) return -1;
nwritten = 2;
} else if (len <= UINT32_MAX) {
/* 32 位长度编码 */
buf[0] = RDB_32BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
uint32_t len32 = htonl(len);
if (rdbWriteRaw(rdb,&len32,4) == -1) return -1;
nwritten = 1+4;
} else {
/* 64 位长度编码 */
buf[0] = RDB_64BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
len = htonu64(len);
if (rdbWriteRaw(rdb,&len,8) == -1) return -1;
nwritten = 1+8;
}
return nwritten;
}/* 保存编码长度,前两位用于存储编码类型 */
#/* 保存编码长度,前两位用于存储编码类型 */
int rdbSaveLen(rio *rdb, uint64_t len) {
unsigned char buf[2];
size_t nwritten;
if (len < (1<<6)) {
/* 6 位长度编码 */
buf[0] = (len&0xFF)|(RDB_6BITLEN<<6);
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
nwritten = 1;
} else if (len < (1<<14)) {
/* 14 位长度编码 */
buf[0] = ((len>>8)&0xFF)|(RDB_14BITLEN<<6);
buf[1] = len&0xFF;
if (rdbWriteRaw(rdb,buf,2) == -1) return -1;
nwritten = 2;
} else if (len <= UINT32_MAX) {
/* 32 位长度编码 */
buf[0] = RDB_32BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
uint32_t len32 = htonl(len);
if (rdbWriteRaw(rdb,&len32,4) == -1) return -1;
nwritten = 1+4;
} else {
/* 64 位长度编码 */
buf[0] = RDB_64BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
len = htonu64(len);
if (rdbWriteRaw(rdb,&len,8) == -1) return -1;
nwritten = 1+8;
}
return nwritten;
}int rdbSaveLen(rio *rdb, uint64_t len) {
#/* 保存编码长度,前两位用于存储编码类型 */
int rdbSaveLen(rio *rdb, uint64_t len) {
unsigned char buf[2];
size_t nwritten;
if (len < (1<<6)) {
/* 6 位长度编码 */
buf[0] = (len&0xFF)|(RDB_6BITLEN<<6);
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
nwritten = 1;
} else if (len < (1<<14)) {
/* 14 位长度编码 */
buf[0] = ((len>>8)&0xFF)|(RDB_14BITLEN<<6);
buf[1] = len&0xFF;
if (rdbWriteRaw(rdb,buf,2) == -1) return -1;
nwritten = 2;
} else if (len <= UINT32_MAX) {
/* 32 位长度编码 */
buf[0] = RDB_32BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
uint32_t len32 = htonl(len);
if (rdbWriteRaw(rdb,&len32,4) == -1) return -1;
nwritten = 1+4;
} else {
/* 64 位长度编码 */
buf[0] = RDB_64BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
len = htonu64(len);
if (rdbWriteRaw(rdb,&len,8) == -1) return -1;
nwritten = 1+8;
}
return nwritten;
} unsigned char buf[2];
#/* 保存编码长度,前两位用于存储编码类型 */
int rdbSaveLen(rio *rdb, uint64_t len) {
unsigned char buf[2];
size_t nwritten;
if (len < (1<<6)) {
/* 6 位长度编码 */
buf[0] = (len&0xFF)|(RDB_6BITLEN<<6);
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
nwritten = 1;
} else if (len < (1<<14)) {
/* 14 位长度编码 */
buf[0] = ((len>>8)&0xFF)|(RDB_14BITLEN<<6);
buf[1] = len&0xFF;
if (rdbWriteRaw(rdb,buf,2) == -1) return -1;
nwritten = 2;
} else if (len <= UINT32_MAX) {
/* 32 位长度编码 */
buf[0] = RDB_32BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
uint32_t len32 = htonl(len);
if (rdbWriteRaw(rdb,&len32,4) == -1) return -1;
nwritten = 1+4;
} else {
/* 64 位长度编码 */
buf[0] = RDB_64BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
len = htonu64(len);
if (rdbWriteRaw(rdb,&len,8) == -1) return -1;
nwritten = 1+8;
}
return nwritten;
} size_t nwritten;
#/* 保存编码长度,前两位用于存储编码类型 */
int rdbSaveLen(rio *rdb, uint64_t len) {
unsigned char buf[2];
size_t nwritten;
if (len < (1<<6)) {
/* 6 位长度编码 */
buf[0] = (len&0xFF)|(RDB_6BITLEN<<6);
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
nwritten = 1;
} else if (len < (1<<14)) {
/* 14 位长度编码 */
buf[0] = ((len>>8)&0xFF)|(RDB_14BITLEN<<6);
buf[1] = len&0xFF;
if (rdbWriteRaw(rdb,buf,2) == -1) return -1;
nwritten = 2;
} else if (len <= UINT32_MAX) {
/* 32 位长度编码 */
buf[0] = RDB_32BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
uint32_t len32 = htonl(len);
if (rdbWriteRaw(rdb,&len32,4) == -1) return -1;
nwritten = 1+4;
} else {
/* 64 位长度编码 */
buf[0] = RDB_64BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
len = htonu64(len);
if (rdbWriteRaw(rdb,&len,8) == -1) return -1;
nwritten = 1+8;
}
return nwritten;
}
#/* 保存编码长度,前两位用于存储编码类型 */
int rdbSaveLen(rio *rdb, uint64_t len) {
unsigned char buf[2];
size_t nwritten;
if (len < (1<<6)) {
/* 6 位长度编码 */
buf[0] = (len&0xFF)|(RDB_6BITLEN<<6);
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
nwritten = 1;
} else if (len < (1<<14)) {
/* 14 位长度编码 */
buf[0] = ((len>>8)&0xFF)|(RDB_14BITLEN<<6);
buf[1] = len&0xFF;
if (rdbWriteRaw(rdb,buf,2) == -1) return -1;
nwritten = 2;
} else if (len <= UINT32_MAX) {
/* 32 位长度编码 */
buf[0] = RDB_32BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
uint32_t len32 = htonl(len);
if (rdbWriteRaw(rdb,&len32,4) == -1) return -1;
nwritten = 1+4;
} else {
/* 64 位长度编码 */
buf[0] = RDB_64BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
len = htonu64(len);
if (rdbWriteRaw(rdb,&len,8) == -1) return -1;
nwritten = 1+8;
}
return nwritten;
} if (len < (1<<6)) {
#/* 保存编码长度,前两位用于存储编码类型 */
int rdbSaveLen(rio *rdb, uint64_t len) {
unsigned char buf[2];
size_t nwritten;
if (len < (1<<6)) {
/* 6 位长度编码 */
buf[0] = (len&0xFF)|(RDB_6BITLEN<<6);
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
nwritten = 1;
} else if (len < (1<<14)) {
/* 14 位长度编码 */
buf[0] = ((len>>8)&0xFF)|(RDB_14BITLEN<<6);
buf[1] = len&0xFF;
if (rdbWriteRaw(rdb,buf,2) == -1) return -1;
nwritten = 2;
} else if (len <= UINT32_MAX) {
/* 32 位长度编码 */
buf[0] = RDB_32BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
uint32_t len32 = htonl(len);
if (rdbWriteRaw(rdb,&len32,4) == -1) return -1;
nwritten = 1+4;
} else {
/* 64 位长度编码 */
buf[0] = RDB_64BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
len = htonu64(len);
if (rdbWriteRaw(rdb,&len,8) == -1) return -1;
nwritten = 1+8;
}
return nwritten;
} /* 6 位长度编码 */
#/* 保存编码长度,前两位用于存储编码类型 */
int rdbSaveLen(rio *rdb, uint64_t len) {
unsigned char buf[2];
size_t nwritten;
if (len < (1<<6)) {
/* 6 位长度编码 */
buf[0] = (len&0xFF)|(RDB_6BITLEN<<6);
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
nwritten = 1;
} else if (len < (1<<14)) {
/* 14 位长度编码 */
buf[0] = ((len>>8)&0xFF)|(RDB_14BITLEN<<6);
buf[1] = len&0xFF;
if (rdbWriteRaw(rdb,buf,2) == -1) return -1;
nwritten = 2;
} else if (len <= UINT32_MAX) {
/* 32 位长度编码 */
buf[0] = RDB_32BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
uint32_t len32 = htonl(len);
if (rdbWriteRaw(rdb,&len32,4) == -1) return -1;
nwritten = 1+4;
} else {
/* 64 位长度编码 */
buf[0] = RDB_64BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
len = htonu64(len);
if (rdbWriteRaw(rdb,&len,8) == -1) return -1;
nwritten = 1+8;
}
return nwritten;
} buf[0] = (len&0xFF)|(RDB_6BITLEN<<6);
#/* 保存编码长度,前两位用于存储编码类型 */
int rdbSaveLen(rio *rdb, uint64_t len) {
unsigned char buf[2];
size_t nwritten;
if (len < (1<<6)) {
/* 6 位长度编码 */
buf[0] = (len&0xFF)|(RDB_6BITLEN<<6);
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
nwritten = 1;
} else if (len < (1<<14)) {
/* 14 位长度编码 */
buf[0] = ((len>>8)&0xFF)|(RDB_14BITLEN<<6);
buf[1] = len&0xFF;
if (rdbWriteRaw(rdb,buf,2) == -1) return -1;
nwritten = 2;
} else if (len <= UINT32_MAX) {
/* 32 位长度编码 */
buf[0] = RDB_32BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
uint32_t len32 = htonl(len);
if (rdbWriteRaw(rdb,&len32,4) == -1) return -1;
nwritten = 1+4;
} else {
/* 64 位长度编码 */
buf[0] = RDB_64BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
len = htonu64(len);
if (rdbWriteRaw(rdb,&len,8) == -1) return -1;
nwritten = 1+8;
}
return nwritten;
} if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
#/* 保存编码长度,前两位用于存储编码类型 */
int rdbSaveLen(rio *rdb, uint64_t len) {
unsigned char buf[2];
size_t nwritten;
if (len < (1<<6)) {
/* 6 位长度编码 */
buf[0] = (len&0xFF)|(RDB_6BITLEN<<6);
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
nwritten = 1;
} else if (len < (1<<14)) {
/* 14 位长度编码 */
buf[0] = ((len>>8)&0xFF)|(RDB_14BITLEN<<6);
buf[1] = len&0xFF;
if (rdbWriteRaw(rdb,buf,2) == -1) return -1;
nwritten = 2;
} else if (len <= UINT32_MAX) {
/* 32 位长度编码 */
buf[0] = RDB_32BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
uint32_t len32 = htonl(len);
if (rdbWriteRaw(rdb,&len32,4) == -1) return -1;
nwritten = 1+4;
} else {
/* 64 位长度编码 */
buf[0] = RDB_64BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
len = htonu64(len);
if (rdbWriteRaw(rdb,&len,8) == -1) return -1;
nwritten = 1+8;
}
return nwritten;
} nwritten = 1;
#/* 保存编码长度,前两位用于存储编码类型 */
int rdbSaveLen(rio *rdb, uint64_t len) {
unsigned char buf[2];
size_t nwritten;
if (len < (1<<6)) {
/* 6 位长度编码 */
buf[0] = (len&0xFF)|(RDB_6BITLEN<<6);
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
nwritten = 1;
} else if (len < (1<<14)) {
/* 14 位长度编码 */
buf[0] = ((len>>8)&0xFF)|(RDB_14BITLEN<<6);
buf[1] = len&0xFF;
if (rdbWriteRaw(rdb,buf,2) == -1) return -1;
nwritten = 2;
} else if (len <= UINT32_MAX) {
/* 32 位长度编码 */
buf[0] = RDB_32BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
uint32_t len32 = htonl(len);
if (rdbWriteRaw(rdb,&len32,4) == -1) return -1;
nwritten = 1+4;
} else {
/* 64 位长度编码 */
buf[0] = RDB_64BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
len = htonu64(len);
if (rdbWriteRaw(rdb,&len,8) == -1) return -1;
nwritten = 1+8;
}
return nwritten;
} } else if (len < (1<<14)) {
#/* 保存编码长度,前两位用于存储编码类型 */
int rdbSaveLen(rio *rdb, uint64_t len) {
unsigned char buf[2];
size_t nwritten;
if (len < (1<<6)) {
/* 6 位长度编码 */
buf[0] = (len&0xFF)|(RDB_6BITLEN<<6);
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
nwritten = 1;
} else if (len < (1<<14)) {
/* 14 位长度编码 */
buf[0] = ((len>>8)&0xFF)|(RDB_14BITLEN<<6);
buf[1] = len&0xFF;
if (rdbWriteRaw(rdb,buf,2) == -1) return -1;
nwritten = 2;
} else if (len <= UINT32_MAX) {
/* 32 位长度编码 */
buf[0] = RDB_32BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
uint32_t len32 = htonl(len);
if (rdbWriteRaw(rdb,&len32,4) == -1) return -1;
nwritten = 1+4;
} else {
/* 64 位长度编码 */
buf[0] = RDB_64BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
len = htonu64(len);
if (rdbWriteRaw(rdb,&len,8) == -1) return -1;
nwritten = 1+8;
}
return nwritten;
} /* 14 位长度编码 */
#/* 保存编码长度,前两位用于存储编码类型 */
int rdbSaveLen(rio *rdb, uint64_t len) {
unsigned char buf[2];
size_t nwritten;
if (len < (1<<6)) {
/* 6 位长度编码 */
buf[0] = (len&0xFF)|(RDB_6BITLEN<<6);
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
nwritten = 1;
} else if (len < (1<<14)) {
/* 14 位长度编码 */
buf[0] = ((len>>8)&0xFF)|(RDB_14BITLEN<<6);
buf[1] = len&0xFF;
if (rdbWriteRaw(rdb,buf,2) == -1) return -1;
nwritten = 2;
} else if (len <= UINT32_MAX) {
/* 32 位长度编码 */
buf[0] = RDB_32BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
uint32_t len32 = htonl(len);
if (rdbWriteRaw(rdb,&len32,4) == -1) return -1;
nwritten = 1+4;
} else {
/* 64 位长度编码 */
buf[0] = RDB_64BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
len = htonu64(len);
if (rdbWriteRaw(rdb,&len,8) == -1) return -1;
nwritten = 1+8;
}
return nwritten;
} buf[0] = ((len>>8)&0xFF)|(RDB_14BITLEN<<6);
#/* 保存编码长度,前两位用于存储编码类型 */
int rdbSaveLen(rio *rdb, uint64_t len) {
unsigned char buf[2];
size_t nwritten;
if (len < (1<<6)) {
/* 6 位长度编码 */
buf[0] = (len&0xFF)|(RDB_6BITLEN<<6);
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
nwritten = 1;
} else if (len < (1<<14)) {
/* 14 位长度编码 */
buf[0] = ((len>>8)&0xFF)|(RDB_14BITLEN<<6);
buf[1] = len&0xFF;
if (rdbWriteRaw(rdb,buf,2) == -1) return -1;
nwritten = 2;
} else if (len <= UINT32_MAX) {
/* 32 位长度编码 */
buf[0] = RDB_32BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
uint32_t len32 = htonl(len);
if (rdbWriteRaw(rdb,&len32,4) == -1) return -1;
nwritten = 1+4;
} else {
/* 64 位长度编码 */
buf[0] = RDB_64BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
len = htonu64(len);
if (rdbWriteRaw(rdb,&len,8) == -1) return -1;
nwritten = 1+8;
}
return nwritten;
} buf[1] = len&0xFF;
#/* 保存编码长度,前两位用于存储编码类型 */
int rdbSaveLen(rio *rdb, uint64_t len) {
unsigned char buf[2];
size_t nwritten;
if (len < (1<<6)) {
/* 6 位长度编码 */
buf[0] = (len&0xFF)|(RDB_6BITLEN<<6);
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
nwritten = 1;
} else if (len < (1<<14)) {
/* 14 位长度编码 */
buf[0] = ((len>>8)&0xFF)|(RDB_14BITLEN<<6);
buf[1] = len&0xFF;
if (rdbWriteRaw(rdb,buf,2) == -1) return -1;
nwritten = 2;
} else if (len <= UINT32_MAX) {
/* 32 位长度编码 */
buf[0] = RDB_32BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
uint32_t len32 = htonl(len);
if (rdbWriteRaw(rdb,&len32,4) == -1) return -1;
nwritten = 1+4;
} else {
/* 64 位长度编码 */
buf[0] = RDB_64BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
len = htonu64(len);
if (rdbWriteRaw(rdb,&len,8) == -1) return -1;
nwritten = 1+8;
}
return nwritten;
} if (rdbWriteRaw(rdb,buf,2) == -1) return -1;
#/* 保存编码长度,前两位用于存储编码类型 */
int rdbSaveLen(rio *rdb, uint64_t len) {
unsigned char buf[2];
size_t nwritten;
if (len < (1<<6)) {
/* 6 位长度编码 */
buf[0] = (len&0xFF)|(RDB_6BITLEN<<6);
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
nwritten = 1;
} else if (len < (1<<14)) {
/* 14 位长度编码 */
buf[0] = ((len>>8)&0xFF)|(RDB_14BITLEN<<6);
buf[1] = len&0xFF;
if (rdbWriteRaw(rdb,buf,2) == -1) return -1;
nwritten = 2;
} else if (len <= UINT32_MAX) {
/* 32 位长度编码 */
buf[0] = RDB_32BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
uint32_t len32 = htonl(len);
if (rdbWriteRaw(rdb,&len32,4) == -1) return -1;
nwritten = 1+4;
} else {
/* 64 位长度编码 */
buf[0] = RDB_64BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
len = htonu64(len);
if (rdbWriteRaw(rdb,&len,8) == -1) return -1;
nwritten = 1+8;
}
return nwritten;
} nwritten = 2;
#/* 保存编码长度,前两位用于存储编码类型 */
int rdbSaveLen(rio *rdb, uint64_t len) {
unsigned char buf[2];
size_t nwritten;
if (len < (1<<6)) {
/* 6 位长度编码 */
buf[0] = (len&0xFF)|(RDB_6BITLEN<<6);
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
nwritten = 1;
} else if (len < (1<<14)) {
/* 14 位长度编码 */
buf[0] = ((len>>8)&0xFF)|(RDB_14BITLEN<<6);
buf[1] = len&0xFF;
if (rdbWriteRaw(rdb,buf,2) == -1) return -1;
nwritten = 2;
} else if (len <= UINT32_MAX) {
/* 32 位长度编码 */
buf[0] = RDB_32BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
uint32_t len32 = htonl(len);
if (rdbWriteRaw(rdb,&len32,4) == -1) return -1;
nwritten = 1+4;
} else {
/* 64 位长度编码 */
buf[0] = RDB_64BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
len = htonu64(len);
if (rdbWriteRaw(rdb,&len,8) == -1) return -1;
nwritten = 1+8;
}
return nwritten;
} } else if (len <= UINT32_MAX) {
#/* 保存编码长度,前两位用于存储编码类型 */
int rdbSaveLen(rio *rdb, uint64_t len) {
unsigned char buf[2];
size_t nwritten;
if (len < (1<<6)) {
/* 6 位长度编码 */
buf[0] = (len&0xFF)|(RDB_6BITLEN<<6);
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
nwritten = 1;
} else if (len < (1<<14)) {
/* 14 位长度编码 */
buf[0] = ((len>>8)&0xFF)|(RDB_14BITLEN<<6);
buf[1] = len&0xFF;
if (rdbWriteRaw(rdb,buf,2) == -1) return -1;
nwritten = 2;
} else if (len <= UINT32_MAX) {
/* 32 位长度编码 */
buf[0] = RDB_32BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
uint32_t len32 = htonl(len);
if (rdbWriteRaw(rdb,&len32,4) == -1) return -1;
nwritten = 1+4;
} else {
/* 64 位长度编码 */
buf[0] = RDB_64BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
len = htonu64(len);
if (rdbWriteRaw(rdb,&len,8) == -1) return -1;
nwritten = 1+8;
}
return nwritten;
} /* 32 位长度编码 */
#/* 保存编码长度,前两位用于存储编码类型 */
int rdbSaveLen(rio *rdb, uint64_t len) {
unsigned char buf[2];
size_t nwritten;
if (len < (1<<6)) {
/* 6 位长度编码 */
buf[0] = (len&0xFF)|(RDB_6BITLEN<<6);
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
nwritten = 1;
} else if (len < (1<<14)) {
/* 14 位长度编码 */
buf[0] = ((len>>8)&0xFF)|(RDB_14BITLEN<<6);
buf[1] = len&0xFF;
if (rdbWriteRaw(rdb,buf,2) == -1) return -1;
nwritten = 2;
} else if (len <= UINT32_MAX) {
/* 32 位长度编码 */
buf[0] = RDB_32BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
uint32_t len32 = htonl(len);
if (rdbWriteRaw(rdb,&len32,4) == -1) return -1;
nwritten = 1+4;
} else {
/* 64 位长度编码 */
buf[0] = RDB_64BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
len = htonu64(len);
if (rdbWriteRaw(rdb,&len,8) == -1) return -1;
nwritten = 1+8;
}
return nwritten;
} buf[0] = RDB_32BITLEN;
#/* 保存编码长度,前两位用于存储编码类型 */
int rdbSaveLen(rio *rdb, uint64_t len) {
unsigned char buf[2];
size_t nwritten;
if (len < (1<<6)) {
/* 6 位长度编码 */
buf[0] = (len&0xFF)|(RDB_6BITLEN<<6);
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
nwritten = 1;
} else if (len < (1<<14)) {
/* 14 位长度编码 */
buf[0] = ((len>>8)&0xFF)|(RDB_14BITLEN<<6);
buf[1] = len&0xFF;
if (rdbWriteRaw(rdb,buf,2) == -1) return -1;
nwritten = 2;
} else if (len <= UINT32_MAX) {
/* 32 位长度编码 */
buf[0] = RDB_32BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
uint32_t len32 = htonl(len);
if (rdbWriteRaw(rdb,&len32,4) == -1) return -1;
nwritten = 1+4;
} else {
/* 64 位长度编码 */
buf[0] = RDB_64BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
len = htonu64(len);
if (rdbWriteRaw(rdb,&len,8) == -1) return -1;
nwritten = 1+8;
}
return nwritten;
} if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
#/* 保存编码长度,前两位用于存储编码类型 */
int rdbSaveLen(rio *rdb, uint64_t len) {
unsigned char buf[2];
size_t nwritten;
if (len < (1<<6)) {
/* 6 位长度编码 */
buf[0] = (len&0xFF)|(RDB_6BITLEN<<6);
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
nwritten = 1;
} else if (len < (1<<14)) {
/* 14 位长度编码 */
buf[0] = ((len>>8)&0xFF)|(RDB_14BITLEN<<6);
buf[1] = len&0xFF;
if (rdbWriteRaw(rdb,buf,2) == -1) return -1;
nwritten = 2;
} else if (len <= UINT32_MAX) {
/* 32 位长度编码 */
buf[0] = RDB_32BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
uint32_t len32 = htonl(len);
if (rdbWriteRaw(rdb,&len32,4) == -1) return -1;
nwritten = 1+4;
} else {
/* 64 位长度编码 */
buf[0] = RDB_64BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
len = htonu64(len);
if (rdbWriteRaw(rdb,&len,8) == -1) return -1;
nwritten = 1+8;
}
return nwritten;
} uint32_t len32 = htonl(len);
#/* 保存编码长度,前两位用于存储编码类型 */
int rdbSaveLen(rio *rdb, uint64_t len) {
unsigned char buf[2];
size_t nwritten;
if (len < (1<<6)) {
/* 6 位长度编码 */
buf[0] = (len&0xFF)|(RDB_6BITLEN<<6);
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
nwritten = 1;
} else if (len < (1<<14)) {
/* 14 位长度编码 */
buf[0] = ((len>>8)&0xFF)|(RDB_14BITLEN<<6);
buf[1] = len&0xFF;
if (rdbWriteRaw(rdb,buf,2) == -1) return -1;
nwritten = 2;
} else if (len <= UINT32_MAX) {
/* 32 位长度编码 */
buf[0] = RDB_32BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
uint32_t len32 = htonl(len);
if (rdbWriteRaw(rdb,&len32,4) == -1) return -1;
nwritten = 1+4;
} else {
/* 64 位长度编码 */
buf[0] = RDB_64BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
len = htonu64(len);
if (rdbWriteRaw(rdb,&len,8) == -1) return -1;
nwritten = 1+8;
}
return nwritten;
} if (rdbWriteRaw(rdb,&len32,4) == -1) return -1;
#/* 保存编码长度,前两位用于存储编码类型 */
int rdbSaveLen(rio *rdb, uint64_t len) {
unsigned char buf[2];
size_t nwritten;
if (len < (1<<6)) {
/* 6 位长度编码 */
buf[0] = (len&0xFF)|(RDB_6BITLEN<<6);
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
nwritten = 1;
} else if (len < (1<<14)) {
/* 14 位长度编码 */
buf[0] = ((len>>8)&0xFF)|(RDB_14BITLEN<<6);
buf[1] = len&0xFF;
if (rdbWriteRaw(rdb,buf,2) == -1) return -1;
nwritten = 2;
} else if (len <= UINT32_MAX) {
/* 32 位长度编码 */
buf[0] = RDB_32BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
uint32_t len32 = htonl(len);
if (rdbWriteRaw(rdb,&len32,4) == -1) return -1;
nwritten = 1+4;
} else {
/* 64 位长度编码 */
buf[0] = RDB_64BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
len = htonu64(len);
if (rdbWriteRaw(rdb,&len,8) == -1) return -1;
nwritten = 1+8;
}
return nwritten;
} nwritten = 1+4;
#/* 保存编码长度,前两位用于存储编码类型 */
int rdbSaveLen(rio *rdb, uint64_t len) {
unsigned char buf[2];
size_t nwritten;
if (len < (1<<6)) {
/* 6 位长度编码 */
buf[0] = (len&0xFF)|(RDB_6BITLEN<<6);
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
nwritten = 1;
} else if (len < (1<<14)) {
/* 14 位长度编码 */
buf[0] = ((len>>8)&0xFF)|(RDB_14BITLEN<<6);
buf[1] = len&0xFF;
if (rdbWriteRaw(rdb,buf,2) == -1) return -1;
nwritten = 2;
} else if (len <= UINT32_MAX) {
/* 32 位长度编码 */
buf[0] = RDB_32BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
uint32_t len32 = htonl(len);
if (rdbWriteRaw(rdb,&len32,4) == -1) return -1;
nwritten = 1+4;
} else {
/* 64 位长度编码 */
buf[0] = RDB_64BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
len = htonu64(len);
if (rdbWriteRaw(rdb,&len,8) == -1) return -1;
nwritten = 1+8;
}
return nwritten;
} } else {
#/* 保存编码长度,前两位用于存储编码类型 */
int rdbSaveLen(rio *rdb, uint64_t len) {
unsigned char buf[2];
size_t nwritten;
if (len < (1<<6)) {
/* 6 位长度编码 */
buf[0] = (len&0xFF)|(RDB_6BITLEN<<6);
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
nwritten = 1;
} else if (len < (1<<14)) {
/* 14 位长度编码 */
buf[0] = ((len>>8)&0xFF)|(RDB_14BITLEN<<6);
buf[1] = len&0xFF;
if (rdbWriteRaw(rdb,buf,2) == -1) return -1;
nwritten = 2;
} else if (len <= UINT32_MAX) {
/* 32 位长度编码 */
buf[0] = RDB_32BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
uint32_t len32 = htonl(len);
if (rdbWriteRaw(rdb,&len32,4) == -1) return -1;
nwritten = 1+4;
} else {
/* 64 位长度编码 */
buf[0] = RDB_64BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
len = htonu64(len);
if (rdbWriteRaw(rdb,&len,8) == -1) return -1;
nwritten = 1+8;
}
return nwritten;
} /* 64 位长度编码 */
#/* 保存编码长度,前两位用于存储编码类型 */
int rdbSaveLen(rio *rdb, uint64_t len) {
unsigned char buf[2];
size_t nwritten;
if (len < (1<<6)) {
/* 6 位长度编码 */
buf[0] = (len&0xFF)|(RDB_6BITLEN<<6);
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
nwritten = 1;
} else if (len < (1<<14)) {
/* 14 位长度编码 */
buf[0] = ((len>>8)&0xFF)|(RDB_14BITLEN<<6);
buf[1] = len&0xFF;
if (rdbWriteRaw(rdb,buf,2) == -1) return -1;
nwritten = 2;
} else if (len <= UINT32_MAX) {
/* 32 位长度编码 */
buf[0] = RDB_32BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
uint32_t len32 = htonl(len);
if (rdbWriteRaw(rdb,&len32,4) == -1) return -1;
nwritten = 1+4;
} else {
/* 64 位长度编码 */
buf[0] = RDB_64BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
len = htonu64(len);
if (rdbWriteRaw(rdb,&len,8) == -1) return -1;
nwritten = 1+8;
}
return nwritten;
} buf[0] = RDB_64BITLEN;
#/* 保存编码长度,前两位用于存储编码类型 */
int rdbSaveLen(rio *rdb, uint64_t len) {
unsigned char buf[2];
size_t nwritten;
if (len < (1<<6)) {
/* 6 位长度编码 */
buf[0] = (len&0xFF)|(RDB_6BITLEN<<6);
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
nwritten = 1;
} else if (len < (1<<14)) {
/* 14 位长度编码 */
buf[0] = ((len>>8)&0xFF)|(RDB_14BITLEN<<6);
buf[1] = len&0xFF;
if (rdbWriteRaw(rdb,buf,2) == -1) return -1;
nwritten = 2;
} else if (len <= UINT32_MAX) {
/* 32 位长度编码 */
buf[0] = RDB_32BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
uint32_t len32 = htonl(len);
if (rdbWriteRaw(rdb,&len32,4) == -1) return -1;
nwritten = 1+4;
} else {
/* 64 位长度编码 */
buf[0] = RDB_64BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
len = htonu64(len);
if (rdbWriteRaw(rdb,&len,8) == -1) return -1;
nwritten = 1+8;
}
return nwritten;
} if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
#/* 保存编码长度,前两位用于存储编码类型 */
int rdbSaveLen(rio *rdb, uint64_t len) {
unsigned char buf[2];
size_t nwritten;
if (len < (1<<6)) {
/* 6 位长度编码 */
buf[0] = (len&0xFF)|(RDB_6BITLEN<<6);
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
nwritten = 1;
} else if (len < (1<<14)) {
/* 14 位长度编码 */
buf[0] = ((len>>8)&0xFF)|(RDB_14BITLEN<<6);
buf[1] = len&0xFF;
if (rdbWriteRaw(rdb,buf,2) == -1) return -1;
nwritten = 2;
} else if (len <= UINT32_MAX) {
/* 32 位长度编码 */
buf[0] = RDB_32BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
uint32_t len32 = htonl(len);
if (rdbWriteRaw(rdb,&len32,4) == -1) return -1;
nwritten = 1+4;
} else {
/* 64 位长度编码 */
buf[0] = RDB_64BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
len = htonu64(len);
if (rdbWriteRaw(rdb,&len,8) == -1) return -1;
nwritten = 1+8;
}
return nwritten;
} len = htonu64(len);
#/* 保存编码长度,前两位用于存储编码类型 */
int rdbSaveLen(rio *rdb, uint64_t len) {
unsigned char buf[2];
size_t nwritten;
if (len < (1<<6)) {
/* 6 位长度编码 */
buf[0] = (len&0xFF)|(RDB_6BITLEN<<6);
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
nwritten = 1;
} else if (len < (1<<14)) {
/* 14 位长度编码 */
buf[0] = ((len>>8)&0xFF)|(RDB_14BITLEN<<6);
buf[1] = len&0xFF;
if (rdbWriteRaw(rdb,buf,2) == -1) return -1;
nwritten = 2;
} else if (len <= UINT32_MAX) {
/* 32 位长度编码 */
buf[0] = RDB_32BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
uint32_t len32 = htonl(len);
if (rdbWriteRaw(rdb,&len32,4) == -1) return -1;
nwritten = 1+4;
} else {
/* 64 位长度编码 */
buf[0] = RDB_64BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
len = htonu64(len);
if (rdbWriteRaw(rdb,&len,8) == -1) return -1;
nwritten = 1+8;
}
return nwritten;
} if (rdbWriteRaw(rdb,&len,8) == -1) return -1;
#/* 保存编码长度,前两位用于存储编码类型 */
int rdbSaveLen(rio *rdb, uint64_t len) {
unsigned char buf[2];
size_t nwritten;
if (len < (1<<6)) {
/* 6 位长度编码 */
buf[0] = (len&0xFF)|(RDB_6BITLEN<<6);
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
nwritten = 1;
} else if (len < (1<<14)) {
/* 14 位长度编码 */
buf[0] = ((len>>8)&0xFF)|(RDB_14BITLEN<<6);
buf[1] = len&0xFF;
if (rdbWriteRaw(rdb,buf,2) == -1) return -1;
nwritten = 2;
} else if (len <= UINT32_MAX) {
/* 32 位长度编码 */
buf[0] = RDB_32BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
uint32_t len32 = htonl(len);
if (rdbWriteRaw(rdb,&len32,4) == -1) return -1;
nwritten = 1+4;
} else {
/* 64 位长度编码 */
buf[0] = RDB_64BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
len = htonu64(len);
if (rdbWriteRaw(rdb,&len,8) == -1) return -1;
nwritten = 1+8;
}
return nwritten;
} nwritten = 1+8;
#/* 保存编码长度,前两位用于存储编码类型 */
int rdbSaveLen(rio *rdb, uint64_t len) {
unsigned char buf[2];
size_t nwritten;
if (len < (1<<6)) {
/* 6 位长度编码 */
buf[0] = (len&0xFF)|(RDB_6BITLEN<<6);
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
nwritten = 1;
} else if (len < (1<<14)) {
/* 14 位长度编码 */
buf[0] = ((len>>8)&0xFF)|(RDB_14BITLEN<<6);
buf[1] = len&0xFF;
if (rdbWriteRaw(rdb,buf,2) == -1) return -1;
nwritten = 2;
} else if (len <= UINT32_MAX) {
/* 32 位长度编码 */
buf[0] = RDB_32BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
uint32_t len32 = htonl(len);
if (rdbWriteRaw(rdb,&len32,4) == -1) return -1;
nwritten = 1+4;
} else {
/* 64 位长度编码 */
buf[0] = RDB_64BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
len = htonu64(len);
if (rdbWriteRaw(rdb,&len,8) == -1) return -1;
nwritten = 1+8;
}
return nwritten;
} }
#/* 保存编码长度,前两位用于存储编码类型 */
int rdbSaveLen(rio *rdb, uint64_t len) {
unsigned char buf[2];
size_t nwritten;
if (len < (1<<6)) {
/* 6 位长度编码 */
buf[0] = (len&0xFF)|(RDB_6BITLEN<<6);
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
nwritten = 1;
} else if (len < (1<<14)) {
/* 14 位长度编码 */
buf[0] = ((len>>8)&0xFF)|(RDB_14BITLEN<<6);
buf[1] = len&0xFF;
if (rdbWriteRaw(rdb,buf,2) == -1) return -1;
nwritten = 2;
} else if (len <= UINT32_MAX) {
/* 32 位长度编码 */
buf[0] = RDB_32BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
uint32_t len32 = htonl(len);
if (rdbWriteRaw(rdb,&len32,4) == -1) return -1;
nwritten = 1+4;
} else {
/* 64 位长度编码 */
buf[0] = RDB_64BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
len = htonu64(len);
if (rdbWriteRaw(rdb,&len,8) == -1) return -1;
nwritten = 1+8;
}
return nwritten;
} return nwritten;
#/* 保存编码长度,前两位用于存储编码类型 */
int rdbSaveLen(rio *rdb, uint64_t len) {
unsigned char buf[2];
size_t nwritten;
if (len < (1<<6)) {
/* 6 位长度编码 */
buf[0] = (len&0xFF)|(RDB_6BITLEN<<6);
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
nwritten = 1;
} else if (len < (1<<14)) {
/* 14 位长度编码 */
buf[0] = ((len>>8)&0xFF)|(RDB_14BITLEN<<6);
buf[1] = len&0xFF;
if (rdbWriteRaw(rdb,buf,2) == -1) return -1;
nwritten = 2;
} else if (len <= UINT32_MAX) {
/* 32 位长度编码 */
buf[0] = RDB_32BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
uint32_t len32 = htonl(len);
if (rdbWriteRaw(rdb,&len32,4) == -1) return -1;
nwritten = 1+4;
} else {
/* 64 位长度编码 */
buf[0] = RDB_64BITLEN;
if (rdbWriteRaw(rdb,buf,1) == -1) return -1;
len = htonu64(len);
if (rdbWriteRaw(rdb,&len,8) == -1) return -1;
nwritten = 1+8;
}
return nwritten;
}}
1.4 RDB 的优缺点
优点
| 优点 | 说明 |
|---|---|
| 紧凑性好 | 二进制压缩格式,文件体积小 |
| 恢复速度快 | 直接加载到内存,无需执行命令 |
| 适合备份 | 适合定期备份和灾难恢复 |
| 对性能影响小 | fork 子进程,不阻塞主进程 |
缺点
| 缺点 | 说明 |
|---|---|
| 数据丢失风险 | 最后一次快照后的数据会丢失 |
| fork 开销 | 大数据集时 fork 耗时且消耗内存 |
| 不适合实时持久化 | 无法做到秒级持久化 |
2. AOF 持久化机制
2.1 什么是 AOF
AOF(Append Only File)通过记录 Redis 服务器接收到的所有写命令来实现持久化。Redis 启动时通过重新执行这些命令来恢复数据。
2.2 AOF 工作流程
AOF 的完整工作流程包括:命令追加、文件写入、文件同步三个步骤。
AOF 工作流程图
sequenceDiagram
participant Client as 客户端
participant Redis as Redis 主进程
participant AOFBuf as AOF 缓冲区
participant AOFFile as AOF 文件
participant Disk as 磁盘
Client->>Redis: 发送写命令
Redis->>Redis: 执行命令
Redis->>AOFBuf: 追加命令到缓冲区
Note over Redis,AOFBuf: appendfsync everysec
alt 每次写操作
Redis->>AOFFile: 写入 OS 缓冲
end
alt 每秒同步
Redis->>Disk: fsync 到磁盘
end
Note over AOFFile: 文件不断增长
Note over Redis: 触发重写条件
Redis->>Redis: fork 子进程重写
Redis->>Disk: 生成新的 AOF 文件
2.3 AOF 三种同步策略
Redis 提供了三种 appendfsync 配置选项:
AOF 同步策略对比
| 策略 | 配置值 | 数据安全性 | 写入性能 | 说明 |
|---|---|---|---|---|
| Always | appendfsync always |
最高 | 最低 | 每个写命令都立即 fsync |
| Everysec | appendfsync everysec |
较高 | 较高 | 每秒执行一次 fsync(推荐) |
| No | appendfsync no |
最低 | 最高 | 由操作系统决定何时 fsync |
性能对比测试数据
# 测试环境:Redis 7.2, 100万次写操作
# always 模式
# QPS: ~2000
# 数据安全性:100%(单命令级)
# everysec 模式(推荐)
# QPS: ~50000
# 数据安全性:99.9%(最多丢失1秒数据)
# no 模式
# QPS: ~80000
# 数据安全性:依赖OS(通常30秒丢失窗口)# 测试环境:Redis 7.2, 100万次写操作
# 测试环境:Redis 7.2, 100万次写操作
# always 模式
# QPS: ~2000
# 数据安全性:100%(单命令级)
# everysec 模式(推荐)
# QPS: ~50000
# 数据安全性:99.9%(最多丢失1秒数据)
# no 模式
# QPS: ~80000
# 数据安全性:依赖OS(通常30秒丢失窗口)
# 测试环境:Redis 7.2, 100万次写操作
# always 模式
# QPS: ~2000
# 数据安全性:100%(单命令级)
# everysec 模式(推荐)
# QPS: ~50000
# 数据安全性:99.9%(最多丢失1秒数据)
# no 模式
# QPS: ~80000
# 数据安全性:依赖OS(通常30秒丢失窗口)# always 模式
# 测试环境:Redis 7.2, 100万次写操作
# always 模式
# QPS: ~2000
# 数据安全性:100%(单命令级)
# everysec 模式(推荐)
# QPS: ~50000
# 数据安全性:99.9%(最多丢失1秒数据)
# no 模式
# QPS: ~80000
# 数据安全性:依赖OS(通常30秒丢失窗口)# QPS: ~2000
# 测试环境:Redis 7.2, 100万次写操作
# always 模式
# QPS: ~2000
# 数据安全性:100%(单命令级)
# everysec 模式(推荐)
# QPS: ~50000
# 数据安全性:99.9%(最多丢失1秒数据)
# no 模式
# QPS: ~80000
# 数据安全性:依赖OS(通常30秒丢失窗口)# 数据安全性:100%(单命令级)
# 测试环境:Redis 7.2, 100万次写操作
# always 模式
# QPS: ~2000
# 数据安全性:100%(单命令级)
# everysec 模式(推荐)
# QPS: ~50000
# 数据安全性:99.9%(最多丢失1秒数据)
# no 模式
# QPS: ~80000
# 数据安全性:依赖OS(通常30秒丢失窗口)
# 测试环境:Redis 7.2, 100万次写操作
# always 模式
# QPS: ~2000
# 数据安全性:100%(单命令级)
# everysec 模式(推荐)
# QPS: ~50000
# 数据安全性:99.9%(最多丢失1秒数据)
# no 模式
# QPS: ~80000
# 数据安全性:依赖OS(通常30秒丢失窗口)# everysec 模式(推荐)
# 测试环境:Redis 7.2, 100万次写操作
# always 模式
# QPS: ~2000
# 数据安全性:100%(单命令级)
# everysec 模式(推荐)
# QPS: ~50000
# 数据安全性:99.9%(最多丢失1秒数据)
# no 模式
# QPS: ~80000
# 数据安全性:依赖OS(通常30秒丢失窗口)# QPS: ~50000
# 测试环境:Redis 7.2, 100万次写操作
# always 模式
# QPS: ~2000
# 数据安全性:100%(单命令级)
# everysec 模式(推荐)
# QPS: ~50000
# 数据安全性:99.9%(最多丢失1秒数据)
# no 模式
# QPS: ~80000
# 数据安全性:依赖OS(通常30秒丢失窗口)# 数据安全性:99.9%(最多丢失1秒数据)
# 测试环境:Redis 7.2, 100万次写操作
# always 模式
# QPS: ~2000
# 数据安全性:100%(单命令级)
# everysec 模式(推荐)
# QPS: ~50000
# 数据安全性:99.9%(最多丢失1秒数据)
# no 模式
# QPS: ~80000
# 数据安全性:依赖OS(通常30秒丢失窗口)
# 测试环境:Redis 7.2, 100万次写操作
# always 模式
# QPS: ~2000
# 数据安全性:100%(单命令级)
# everysec 模式(推荐)
# QPS: ~50000
# 数据安全性:99.9%(最多丢失1秒数据)
# no 模式
# QPS: ~80000
# 数据安全性:依赖OS(通常30秒丢失窗口)# no 模式
# 测试环境:Redis 7.2, 100万次写操作
# always 模式
# QPS: ~2000
# 数据安全性:100%(单命令级)
# everysec 模式(推荐)
# QPS: ~50000
# 数据安全性:99.9%(最多丢失1秒数据)
# no 模式
# QPS: ~80000
# 数据安全性:依赖OS(通常30秒丢失窗口)# QPS: ~80000
# 测试环境:Redis 7.2, 100万次写操作
# always 模式
# QPS: ~2000
# 数据安全性:100%(单命令级)
# everysec 模式(推荐)
# QPS: ~50000
# 数据安全性:99.9%(最多丢失1秒数据)
# no 模式
# QPS: ~80000
# 数据安全性:依赖OS(通常30秒丢失窗口)# 数据安全性:依赖OS(通常30秒丢失窗口)
2.4 AOF 重写机制
AOF 文件会随着时间不断增长,为了解决这个问题,Redis 引入了 AOF Rewrite 机制。
AOF 重写触发条件
graph TD
A[AOF 重写检查] --> B{auto-aof-rewrite-per-size<br/>触发?}
B -->|是| C{auto-aof-rewrite-min-size<br/>满足?}
B -->|否| D[继续监控]
C -->|是| E[执行 BGREWRITEAOF]
C -->|否| D
E --> F[fork 子进程]
F --> G[子进程遍历内存数据]
G --> H[生成精简的 AOF 文件]
H --> I[主进程持续追加新命令]
I --> J[重写完成后合并增量]
J --> K[原子性替换旧文件]
style E fill:#FF6B6B
style H fill:#4ECDC4
AOF 重写核心配置
# AOF 文件大小比上次重写后增长的百分比
auto-aof-rewrite-percentage 100
# 触发重写的 AOF 文件最小大小
auto-aof-rewrite-min-size 64mb
# AOF 加载时可截断受损文件
aof-load-truncated yes
2.5 AOF 文件结构(Redis 7.x 新特性)
Redis 7.0 引入了 AOF Manifest 机制,支持多文件 AOF 管理。
AOF 文件类型
graph LR
subgraph AOF 文件体系
A[BASE 文件<br/>RDB 格式快照]
B[INCR 文件<br/>增量 AOF]
C[HISTORY 文件<br/>历史文件]
end
A -->|在重写时生成| D[AOF Manifest<br/>文件清单]
B -->|持续追加| D
C -->|重写后标记| D
style A fill:#FFE66D
style B fill:#4ECDC4
style C fill:#95E1D3
style D fill:#FF6B6B
AOF Manifest 文件示例
file appendonly.aof.2.base.rdb seq 2 type b
file appendonly.aof.1.incr.aof seq 1 type h
file appendonly.aof.2.incr.aof seq 2 type h
file appendonly.aof.3.incr.aof seq 3 type h
file appendonly.aof.4.incr.aof seq 4 type i
file appendonly.aof.5.incr.aof seq 5 type i
字段说明: - file: 文件名 - seq: 序列号(用于排序) - type: 文件类型(b=base, h=history, i=incr)
3. RDB vs AOF 核心对比
3.1 多维度对比表
| 对比维度 | RDB | AOF |
|---|---|---|
| 文件格式 | 二进制压缩 | 文本命令日志 |
| 文件大小 | 小(压缩后) | 大(需重写控制) |
| 恢复速度 | 快(直接加载) | 慢(需执行命令) |
| 数据完整性 | 低(快照间隔) | 高(通常1秒丢失) |
| 系统资源消耗 | fork 时 CPU/内存高 | 持续磁盘 I/O |
| 适用场景 | 备份/灾难恢复 | 数据完整性要求高 |
3.2 性能基准测试
写入性能对比(10万次 SET 操作)
# 测试环境:Redis 7.2, Ubuntu 22.04, 16GB RAM
# 纯 RDB 模式
# QPS: ~85000
# CPU 使用率: 60%
# 内存占用: 基准
# 纯 AOF (everysec) 模式
# QPS: ~52000
# CPU 使用率: 75%
# 内存占用: +10%(AOF 缓冲区)
# 混合模式(推荐)
# QPS: ~48000
# CPU 使用率: 70%
# 内存占用: +8%
# 数据安全性: 最佳平衡# 测试环境:Redis 7.2, Ubuntu 22.04, 16GB RAM
# 测试环境:Redis 7.2, Ubuntu 22.04, 16GB RAM
# 纯 RDB 模式
# QPS: ~85000
# CPU 使用率: 60%
# 内存占用: 基准
# 纯 AOF (everysec) 模式
# QPS: ~52000
# CPU 使用率: 75%
# 内存占用: +10%(AOF 缓冲区)
# 混合模式(推荐)
# QPS: ~48000
# CPU 使用率: 70%
# 内存占用: +8%
# 数据安全性: 最佳平衡
# 测试环境:Redis 7.2, Ubuntu 22.04, 16GB RAM
# 纯 RDB 模式
# QPS: ~85000
# CPU 使用率: 60%
# 内存占用: 基准
# 纯 AOF (everysec) 模式
# QPS: ~52000
# CPU 使用率: 75%
# 内存占用: +10%(AOF 缓冲区)
# 混合模式(推荐)
# QPS: ~48000
# CPU 使用率: 70%
# 内存占用: +8%
# 数据安全性: 最佳平衡# 纯 RDB 模式
# 测试环境:Redis 7.2, Ubuntu 22.04, 16GB RAM
# 纯 RDB 模式
# QPS: ~85000
# CPU 使用率: 60%
# 内存占用: 基准
# 纯 AOF (everysec) 模式
# QPS: ~52000
# CPU 使用率: 75%
# 内存占用: +10%(AOF 缓冲区)
# 混合模式(推荐)
# QPS: ~48000
# CPU 使用率: 70%
# 内存占用: +8%
# 数据安全性: 最佳平衡# QPS: ~85000
# 测试环境:Redis 7.2, Ubuntu 22.04, 16GB RAM
# 纯 RDB 模式
# QPS: ~85000
# CPU 使用率: 60%
# 内存占用: 基准
# 纯 AOF (everysec) 模式
# QPS: ~52000
# CPU 使用率: 75%
# 内存占用: +10%(AOF 缓冲区)
# 混合模式(推荐)
# QPS: ~48000
# CPU 使用率: 70%
# 内存占用: +8%
# 数据安全性: 最佳平衡# CPU 使用率: 60%
# 测试环境:Redis 7.2, Ubuntu 22.04, 16GB RAM
# 纯 RDB 模式
# QPS: ~85000
# CPU 使用率: 60%
# 内存占用: 基准
# 纯 AOF (everysec) 模式
# QPS: ~52000
# CPU 使用率: 75%
# 内存占用: +10%(AOF 缓冲区)
# 混合模式(推荐)
# QPS: ~48000
# CPU 使用率: 70%
# 内存占用: +8%
# 数据安全性: 最佳平衡# 内存占用: 基准
# 测试环境:Redis 7.2, Ubuntu 22.04, 16GB RAM
# 纯 RDB 模式
# QPS: ~85000
# CPU 使用率: 60%
# 内存占用: 基准
# 纯 AOF (everysec) 模式
# QPS: ~52000
# CPU 使用率: 75%
# 内存占用: +10%(AOF 缓冲区)
# 混合模式(推荐)
# QPS: ~48000
# CPU 使用率: 70%
# 内存占用: +8%
# 数据安全性: 最佳平衡
# 测试环境:Redis 7.2, Ubuntu 22.04, 16GB RAM
# 纯 RDB 模式
# QPS: ~85000
# CPU 使用率: 60%
# 内存占用: 基准
# 纯 AOF (everysec) 模式
# QPS: ~52000
# CPU 使用率: 75%
# 内存占用: +10%(AOF 缓冲区)
# 混合模式(推荐)
# QPS: ~48000
# CPU 使用率: 70%
# 内存占用: +8%
# 数据安全性: 最佳平衡# 纯 AOF (everysec) 模式
# 测试环境:Redis 7.2, Ubuntu 22.04, 16GB RAM
# 纯 RDB 模式
# QPS: ~85000
# CPU 使用率: 60%
# 内存占用: 基准
# 纯 AOF (everysec) 模式
# QPS: ~52000
# CPU 使用率: 75%
# 内存占用: +10%(AOF 缓冲区)
# 混合模式(推荐)
# QPS: ~48000
# CPU 使用率: 70%
# 内存占用: +8%
# 数据安全性: 最佳平衡# QPS: ~52000
# 测试环境:Redis 7.2, Ubuntu 22.04, 16GB RAM
# 纯 RDB 模式
# QPS: ~85000
# CPU 使用率: 60%
# 内存占用: 基准
# 纯 AOF (everysec) 模式
# QPS: ~52000
# CPU 使用率: 75%
# 内存占用: +10%(AOF 缓冲区)
# 混合模式(推荐)
# QPS: ~48000
# CPU 使用率: 70%
# 内存占用: +8%
# 数据安全性: 最佳平衡# CPU 使用率: 75%
# 测试环境:Redis 7.2, Ubuntu 22.04, 16GB RAM
# 纯 RDB 模式
# QPS: ~85000
# CPU 使用率: 60%
# 内存占用: 基准
# 纯 AOF (everysec) 模式
# QPS: ~52000
# CPU 使用率: 75%
# 内存占用: +10%(AOF 缓冲区)
# 混合模式(推荐)
# QPS: ~48000
# CPU 使用率: 70%
# 内存占用: +8%
# 数据安全性: 最佳平衡# 内存占用: +10%(AOF 缓冲区)
# 测试环境:Redis 7.2, Ubuntu 22.04, 16GB RAM
# 纯 RDB 模式
# QPS: ~85000
# CPU 使用率: 60%
# 内存占用: 基准
# 纯 AOF (everysec) 模式
# QPS: ~52000
# CPU 使用率: 75%
# 内存占用: +10%(AOF 缓冲区)
# 混合模式(推荐)
# QPS: ~48000
# CPU 使用率: 70%
# 内存占用: +8%
# 数据安全性: 最佳平衡
# 测试环境:Redis 7.2, Ubuntu 22.04, 16GB RAM
# 纯 RDB 模式
# QPS: ~85000
# CPU 使用率: 60%
# 内存占用: 基准
# 纯 AOF (everysec) 模式
# QPS: ~52000
# CPU 使用率: 75%
# 内存占用: +10%(AOF 缓冲区)
# 混合模式(推荐)
# QPS: ~48000
# CPU 使用率: 70%
# 内存占用: +8%
# 数据安全性: 最佳平衡# 混合模式(推荐)
# 测试环境:Redis 7.2, Ubuntu 22.04, 16GB RAM
# 纯 RDB 模式
# QPS: ~85000
# CPU 使用率: 60%
# 内存占用: 基准
# 纯 AOF (everysec) 模式
# QPS: ~52000
# CPU 使用率: 75%
# 内存占用: +10%(AOF 缓冲区)
# 混合模式(推荐)
# QPS: ~48000
# CPU 使用率: 70%
# 内存占用: +8%
# 数据安全性: 最佳平衡# QPS: ~48000
# 测试环境:Redis 7.2, Ubuntu 22.04, 16GB RAM
# 纯 RDB 模式
# QPS: ~85000
# CPU 使用率: 60%
# 内存占用: 基准
# 纯 AOF (everysec) 模式
# QPS: ~52000
# CPU 使用率: 75%
# 内存占用: +10%(AOF 缓冲区)
# 混合模式(推荐)
# QPS: ~48000
# CPU 使用率: 70%
# 内存占用: +8%
# 数据安全性: 最佳平衡# CPU 使用率: 70%
# 测试环境:Redis 7.2, Ubuntu 22.04, 16GB RAM
# 纯 RDB 模式
# QPS: ~85000
# CPU 使用率: 60%
# 内存占用: 基准
# 纯 AOF (everysec) 模式
# QPS: ~52000
# CPU 使用率: 75%
# 内存占用: +10%(AOF 缓冲区)
# 混合模式(推荐)
# QPS: ~48000
# CPU 使用率: 70%
# 内存占用: +8%
# 数据安全性: 最佳平衡# 内存占用: +8%
# 测试环境:Redis 7.2, Ubuntu 22.04, 16GB RAM
# 纯 RDB 模式
# QPS: ~85000
# CPU 使用率: 60%
# 内存占用: 基准
# 纯 AOF (everysec) 模式
# QPS: ~52000
# CPU 使用率: 75%
# 内存占用: +10%(AOF 缓冲区)
# 混合模式(推荐)
# QPS: ~48000
# CPU 使用率: 70%
# 内存占用: +8%
# 数据安全性: 最佳平衡# 数据安全性: 最佳平衡
恢复速度对比(1GB 数据集)
| 持久化方式 | 恢复时间 | 说明 |
|---|---|---|
| RDB | ~8 秒 | 直接加载到内存 |
| AOF | ~25 秒 | 需解析并执行命令 |
| 混合模式 | ~10 秒 | 先加载 RDB,再执行 AOF |
3.3 数据丢失窗口对比
graph LR
subgraph RDB
A1[快照1] -->|5分钟| A2[快照2]
A2 -->|5分钟| A3[快照3]
Note1[最多丢失5分钟数据]
end
subgraph AOF-everysec
B1[同步点1] -->|1秒| B2[同步点2]
B2 -->|1秒| B3[同步点3]
Note2[最多丢失1秒数据]
end
subgraph AOF-always
C1[命令1] -->|0ms| C2[命令2]
C2 -->|0ms| C3[命令3]
Note3[理论上无数据丢失]
end
style Note1 fill:#FF6B6B
style Note2 fill:#FFE66D
style Note3 fill:#4ECDC4
4. 混合持久化:最佳实践
4.1 什么是混合持久化
Redis 4.0 引入了混合持久化(Hybrid Persistence),结合了 RDB 和 AOF 的优势:
- AOF 重写时:生成 RDB 格式的 BASE 文件(包含当前数据快照)
- 重写期间:主进程继续追加增量命令到 INCR AOF 文件
- 恢复时:先加载 RDB 快照,再重放增量 AOF 命令
混合持久化工作原理
sequenceDiagram
participant M as 主进程
participant C as 子进程
participant D as 磁盘
Note over M: AOF 重写触发
M->>C: fork 子进程
activate C
C->>D: 生成 BASE 文件(RDB 格式)
Note over C,D: 包含重写时的完整快照
par 并行执行
M->>D: 继续追加 INCR AOF
and
C->>D: 继续生成 BASE 文件
end
Note over C: BASE 文件生成完成
C->>M: 通知完成
deactivate C
M->>D: 合并 BASE + INCR
M->>D: 更新 Manifest 文件
Note over M: 恢复流程:BASE(快照)→ INCR(增量)
4.2 混合持久化配置
# 启用 AOF
appendonly yes
# 启用混合持久化(Redis 4.0+)
aof-use-rdb-preamble yes
# AOF 同步策略
appendfsync everysec
# 自动重写配置
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
4.3 混合持久化的优势
优势对比表
| 特性 | 纯 RDB | 纯 AOF | 混合持久化 |
|---|---|---|---|
| 文件大小 | ⭐⭐⭐⭐⭐ | ⭐⭐ | ⭐⭐⭐⭐ |
| 恢复速度 | ⭐⭐⭐⭐⭐ | ⭐⭐ | ⭐⭐⭐⭐ |
| 数据安全性 | ⭐⭐ | ⭐⭐⭐⭐⭐ | ⭐⭐⭐⭐ |
| 综合推荐度 | ⭐⭐⭐ | ⭐⭐⭐ | ⭐⭐⭐⭐⭐ |
5. 性能优化与调优
5.1 RDB 优化建议
1. 合理配置快照频率
# 根据数据重要性调整
save 900 1 # 900秒内至少1次写操作
save 300 10 # 300秒内至少10次写操作
save 60 10000 # 60秒内至少10000次写操作
# 大数据集建议减少快照频率
save 3600 1
save 300 100
save 60 10000
2. 开启压缩
# RDB 压缩(默认开启)
rdbcompression yes
# 压缩算法配置
# Redis 7.2 支持多种压缩算法
# 修改需要重新编译
3. 使用无盘复制
# 主从复制时使用无盘模式
repl-diskless-sync yes
# 延迟时间(秒)
repl-diskless-sync-delay 5
5.2 AOF 优化建议
1. 选择合适的同步策略
python
# 根据场景选择 appendfsync
# 数据完整性要求 > 性能
appendfsync always
# 推荐:平衡性能和安全性
appendfsync everysec
# 性能 > 数据完整性
appendfsync no# 根据场景选择 appendfsync`
`# 根据场景选择 appendfsync
# 数据完整性要求 > 性能
appendfsync always
# 推荐:平衡性能和安全性
appendfsync everysec
# 性能 > 数据完整性
appendfsync no`
`# 根据场景选择 appendfsync
# 数据完整性要求 > 性能
appendfsync always
# 推荐:平衡性能和安全性
appendfsync everysec
# 性能 > 数据完整性
appendfsync no# 数据完整性要求 > 性能`
`# 根据场景选择 appendfsync
# 数据完整性要求 > 性能
appendfsync always
# 推荐:平衡性能和安全性
appendfsync everysec
# 性能 > 数据完整性
appendfsync noappendfsync always`
`# 根据场景选择 appendfsync
# 数据完整性要求 > 性能
appendfsync always
# 推荐:平衡性能和安全性
appendfsync everysec
# 性能 > 数据完整性
appendfsync no`
`# 根据场景选择 appendfsync
# 数据完整性要求 > 性能
appendfsync always
# 推荐:平衡性能和安全性
appendfsync everysec
# 性能 > 数据完整性
appendfsync no# 推荐:平衡性能和安全性`
`# 根据场景选择 appendfsync
# 数据完整性要求 > 性能
appendfsync always
# 推荐:平衡性能和安全性
appendfsync everysec
# 性能 > 数据完整性
appendfsync noappendfsync everysec`
`# 根据场景选择 appendfsync
# 数据完整性要求 > 性能
appendfsync always
# 推荐:平衡性能和安全性
appendfsync everysec
# 性能 > 数据完整性
appendfsync no`
`# 根据场景选择 appendfsync
# 数据完整性要求 > 性能
appendfsync always
# 推荐:平衡性能和安全性
appendfsync everysec
# 性能 > 数据完整性
appendfsync no# 性能 > 数据完整性`
`# 根据场景选择 appendfsync
# 数据完整性要求 > 性能
appendfsync always
# 推荐:平衡性能和安全性
appendfsync everysec
# 性能 > 数据完整性
appendfsync noappendfsync no
2. 优化 AOF 重写
# 提高重写阈值,减少重写频率
auto-aof-rewrite-percentage 200
auto-aof-rewrite-min-size 256mb
# 在业务低峰期手动触发
# redis-cli BGREWRITEAOF
3. 开启多线程 AOF(Redis 7.0+)
# 启用 AOF 多线程
aof-use-fsync-always yes # 仅在 everysec 模式下有效
5.3 监控指标
# 关键监控指标
# 1. fork 耗时(大内存环境重要)
# redis-cli INFO stats | grep fork
# 2. AOF 重写延迟
# redis-cli INFO persistence | grep aof_rewrite_in_progress
# 3. 磁盘 I/O
# iostat -x 1
# 4. AOF 文件大小
# redis-cli INFO persistence | grep aof_current_size# 关键监控指标
# 关键监控指标
# 1. fork 耗时(大内存环境重要)
# redis-cli INFO stats | grep fork
# 2. AOF 重写延迟
# redis-cli INFO persistence | grep aof_rewrite_in_progress
# 3. 磁盘 I/O
# iostat -x 1
# 4. AOF 文件大小
# redis-cli INFO persistence | grep aof_current_size
# 关键监控指标
# 1. fork 耗时(大内存环境重要)
# redis-cli INFO stats | grep fork
# 2. AOF 重写延迟
# redis-cli INFO persistence | grep aof_rewrite_in_progress
# 3. 磁盘 I/O
# iostat -x 1
# 4. AOF 文件大小
# redis-cli INFO persistence | grep aof_current_size# 1. fork 耗时(大内存环境重要)
# 关键监控指标
# 1. fork 耗时(大内存环境重要)
# redis-cli INFO stats | grep fork
# 2. AOF 重写延迟
# redis-cli INFO persistence | grep aof_rewrite_in_progress
# 3. 磁盘 I/O
# iostat -x 1
# 4. AOF 文件大小
# redis-cli INFO persistence | grep aof_current_size# redis-cli INFO stats | grep fork
# 关键监控指标
# 1. fork 耗时(大内存环境重要)
# redis-cli INFO stats | grep fork
# 2. AOF 重写延迟
# redis-cli INFO persistence | grep aof_rewrite_in_progress
# 3. 磁盘 I/O
# iostat -x 1
# 4. AOF 文件大小
# redis-cli INFO persistence | grep aof_current_size
# 关键监控指标
# 1. fork 耗时(大内存环境重要)
# redis-cli INFO stats | grep fork
# 2. AOF 重写延迟
# redis-cli INFO persistence | grep aof_rewrite_in_progress
# 3. 磁盘 I/O
# iostat -x 1
# 4. AOF 文件大小
# redis-cli INFO persistence | grep aof_current_size# 2. AOF 重写延迟
# 关键监控指标
# 1. fork 耗时(大内存环境重要)
# redis-cli INFO stats | grep fork
# 2. AOF 重写延迟
# redis-cli INFO persistence | grep aof_rewrite_in_progress
# 3. 磁盘 I/O
# iostat -x 1
# 4. AOF 文件大小
# redis-cli INFO persistence | grep aof_current_size# redis-cli INFO persistence | grep aof_rewrite_in_progress
# 关键监控指标
# 1. fork 耗时(大内存环境重要)
# redis-cli INFO stats | grep fork
# 2. AOF 重写延迟
# redis-cli INFO persistence | grep aof_rewrite_in_progress
# 3. 磁盘 I/O
# iostat -x 1
# 4. AOF 文件大小
# redis-cli INFO persistence | grep aof_current_size
# 关键监控指标
# 1. fork 耗时(大内存环境重要)
# redis-cli INFO stats | grep fork
# 2. AOF 重写延迟
# redis-cli INFO persistence | grep aof_rewrite_in_progress
# 3. 磁盘 I/O
# iostat -x 1
# 4. AOF 文件大小
# redis-cli INFO persistence | grep aof_current_size# 3. 磁盘 I/O
# 关键监控指标
# 1. fork 耗时(大内存环境重要)
# redis-cli INFO stats | grep fork
# 2. AOF 重写延迟
# redis-cli INFO persistence | grep aof_rewrite_in_progress
# 3. 磁盘 I/O
# iostat -x 1
# 4. AOF 文件大小
# redis-cli INFO persistence | grep aof_current_size# iostat -x 1
# 关键监控指标
# 1. fork 耗时(大内存环境重要)
# redis-cli INFO stats | grep fork
# 2. AOF 重写延迟
# redis-cli INFO persistence | grep aof_rewrite_in_progress
# 3. 磁盘 I/O
# iostat -x 1
# 4. AOF 文件大小
# redis-cli INFO persistence | grep aof_current_size
# 关键监控指标
# 1. fork 耗时(大内存环境重要)
# redis-cli INFO stats | grep fork
# 2. AOF 重写延迟
# redis-cli INFO persistence | grep aof_rewrite_in_progress
# 3. 磁盘 I/O
# iostat -x 1
# 4. AOF 文件大小
# redis-cli INFO persistence | grep aof_current_size# 4. AOF 文件大小
# 关键监控指标
# 1. fork 耗时(大内存环境重要)
# redis-cli INFO stats | grep fork
# 2. AOF 重写延迟
# redis-cli INFO persistence | grep aof_rewrite_in_progress
# 3. 磁盘 I/O
# iostat -x 1
# 4. AOF 文件大小
# redis-cli INFO persistence | grep aof_current_size# redis-cli INFO persistence | grep aof_current_size
6. 源码分析:关键实现细节
6.1 RDB fork 子进程实现
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
}/* rdb.c - Redis 7.2 */
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
}int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
} pid_t childpid;
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
}
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
} // 检查是否已有子进程在运行
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
} if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
} return C_ERR;
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
} }
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
}
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
} // 记录 fork 前的脏数据页数量
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
} server.dirty_before_bgsave = server.dirty;
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
}
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
} // 记录当前数据库状态
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
} server.lastbgsave_try = time(NULL);
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
}
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
} // 执行 fork 系统调用
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
} // 这是整个 RDB 生成过程中最耗时的操作
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
} if ((childpid = fork()) == 0) {
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
} /* 子进程 */
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
} int retval;
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
}
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
} /* 关闭监听套接字,避免子进程接受连接 */
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
} closeListeningSockets(0);
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
}
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
} /* 设置进程标题 */
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
} redisSetProcTitle("redis-rdb-bgsave");
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
}
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
} /* 调用 rdbSave 生成 RDB 文件 */
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
} retval = rdbSave(filename,rsi);
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
}
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
} /* 生成完成后退出 */
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
} if (retval == C_OK) {
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
} server.child_info_data.process_type = CHILD_TYPE_RDB;
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
} server.child_info_data.cow_size = get ChildInfoCOWSize();
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
} }
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
}
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
} exitFromChild((retval == C_OK) ? 0 : 1);
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
} } else {
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
} /* 父进程 */
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
} if (childpid == -1) {
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
} /* fork 失败,记录错误 */
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
} serverLog(LL_WARNING,"Can't save in background: fork: %s",
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
} strerror(errno));
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
}
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
} /* 更新统计信息 */
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
} server.stat_fork_fail++;
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
} return C_ERR;
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
} }
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
}
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
} /* 记录子进程 PID */
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
} server.rdb_child_pid = childpid;
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
} server.rdb_child_type = RDB_CHILD_TYPE_DISK;
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
}
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
} /* 更新统计信息 */
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
} server.stat_fork_time = ustime()-start;
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
} server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
}
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
} return C_OK;
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
} }
#/* rdb.c - Redis 7.2 */
int rdbSaveBackground(char *filename, rdbSaveInfo *rsi) {
pid_t childpid;
// 检查是否已有子进程在运行
if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) {
return C_ERR;
}
// 记录 fork 前的脏数据页数量
server.dirty_before_bgsave = server.dirty;
// 记录当前数据库状态
server.lastbgsave_try = time(NULL);
// 执行 fork 系统调用
// 这是整个 RDB 生成过程中最耗时的操作
if ((childpid = fork()) == 0) {
/* 子进程 */
int retval;
/* 关闭监听套接字,避免子进程接受连接 */
closeListeningSockets(0);
/* 设置进程标题 */
redisSetProcTitle("redis-rdb-bgsave");
/* 调用 rdbSave 生成 RDB 文件 */
retval = rdbSave(filename,rsi);
/* 生成完成后退出 */
if (retval == C_OK) {
server.child_info_data.process_type = CHILD_TYPE_RDB;
server.child_info_data.cow_size = get ChildInfoCOWSize();
}
exitFromChild((retval == C_OK) ? 0 : 1);
} else {
/* 父进程 */
if (childpid == -1) {
/* fork 失败,记录错误 */
serverLog(LL_WARNING,"Can't save in background: fork: %s",
strerror(errno));
/* 更新统计信息 */
server.stat_fork_fail++;
return C_ERR;
}
/* 记录子进程 PID */
server.rdb_child_pid = childpid;
server.rdb_child_type = RDB_CHILD_TYPE_DISK;
/* 更新统计信息 */
server.stat_fork_time = ustime()-start;
server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_rate / (1024*1024*1024); /* GB per second. */
return C_OK;
}
}}
关键点解析:
- fork() 是最耗时的操作:在大内存实例(如 50GB)中,fork 可能需要数秒
- Copy-on-Write 机制:fork 后父子进程共享内存页,只有修改时才复制
- 关闭监听套接字:子进程不应该接受新的客户端连接
6.2 AOF 命令追加实现
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
}/* aof.c - Redis 7.2 */
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
}void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} sds buf = sdsempty();
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} robj *tmpargv[3];
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
}
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} /* 1. 如果数据库编号改变,添加 SELECT 命令 */
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} if (dictid != server.aof_dbnum) {
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} char seldb[64];
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} snprintf(seldb,sizeof(seldb),"%d",dictid);
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} (unsigned long)strlen(seldb),seldb);
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} server.aof_dbnum = dictid;
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} }
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
}
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} /* 2. 将命令序列化为 AOF 格式 */
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} /* 优化:EXPIRE 命令转换为 PEXPIRE */
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} tmpargv[0] = createStringObject("PEXPIRE",7);
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} tmpargv[1] = argv[1];
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} tmpargv[2] = argv[2];
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} decrRefCount(tmpargv[0]);
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} } else {
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} /* 普通命令直接追加 */
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} buf = catAppendOnlyGenericCommand(buf,argc,argv);
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} }
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
}
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} /* 3. 追加到 AOF 缓冲区 */
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} if (server.aof_no_fsync_on_rewrite &&
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} (server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} /* 重写期间不追加到缓冲区,由子进程处理 */
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} /* 不执行 fsync */
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} } else {
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} /* 正常流程:追加到 AOF 缓冲区 */
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} }
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
}
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} /* 4. 根据 appendfsync 策略执行 fsync */
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} /* always 模式:立即 fsync */
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} aof_fsync(server.aof_fd);
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} server.aof_last_fsync = server.unixtime;
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} } else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} server.unixtime != server.aof_last_fsync)) {
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} /* everysec 模式:在后台线程中每秒执行一次 fsync */
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} if (!sync_in_progress) {
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} aof_background_fsync(server.aof_fd);
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} sync_in_progress = 1;
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} }
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} }
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
}
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
} sdsfree(buf);
#/* aof.c - Redis 7.2 */
void feedAppendOnlyFile(int dictid, robj *argv, int argc) {
sds buf = sdsempty();
robj *tmpargv[3];
/* 1. 如果数据库编号改变,添加 SELECT 命令 */
if (dictid != server.aof_dbnum) {
char seldb[64];
snprintf(seldb,sizeof(seldb),"%d",dictid);
buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
(unsigned long)strlen(seldb),seldb);
server.aof_dbnum = dictid;
}
/* 2. 将命令序列化为 AOF 格式 */
if (argc == 3 && !strcasecmp(argv[0]->ptr,"EXPIRE")) {
/* 优化:EXPIRE 命令转换为 PEXPIRE */
tmpargv[0] = createStringObject("PEXPIRE",7);
tmpargv[1] = argv[1];
tmpargv[2] = argv[2];
buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
decrRefCount(tmpargv[0]);
} else {
/* 普通命令直接追加 */
buf = catAppendOnlyGenericCommand(buf,argc,argv);
}
/* 3. 追加到 AOF 缓冲区 */
if (server.aof_no_fsync_on_rewrite &&
(server.aof_child_pid != -1 || server.rdb_child_pid != -1)) {
/* 重写期间不追加到缓冲区,由子进程处理 */
/* 不执行 fsync */
} else {
/* 正常流程:追加到 AOF 缓冲区 */
server.aof_buf = sdscatlen(server.aof_buf, buf, sdslen(buf));
}
/* 4. 根据 appendfsync 策略执行 fsync */
if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
/* always 模式:立即 fsync */
aof_fsync(server.aof_fd);
server.aof_last_fsync = server.unixtime;
} else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
server.unixtime != server.aof_last_fsync)) {
/* everysec 模式:在后台线程中每秒执行一次 fsync */
if (!sync_in_progress) {
aof_background_fsync(server.aof_fd);
sync_in_progress = 1;
}
}
sdsfree(buf);
}}
关键点解析:
- SELECT 命令优化:只在数据库切换时添加
- EXPIRE → PEXPIRE 优化:使用毫秒级精度
- 缓冲区机制:先写入内存缓冲区,批量刷盘
- fsync 策略:根据配置决定同步时机
6.3 AOF 重写实现
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
}/* aof.c - Redis 7.2 */
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
}int rewriteAppendOnlyFile(char *filename) {
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} rio aof;
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} FILE *fp;
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} char tmpfile[256];
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} /* 1. 创建临时文件 */
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} fp = fopen(tmpfile,"w");
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} if (!fp) {
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} return C_ERR;
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} }
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} /* 2. 初始化 RIO 对象 */
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} rioInitWithFile(&aof,fp);
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} /* 3. 如果启用混合持久化,使用 RDB 格式 */
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} if (server.aof_use_rdb_preamble) {
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} /* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} if (rdbSaveRio(&aof,NULL) != C_OK) {
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} fclose(fp);
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} unlink(tmpfile);
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} return C_ERR;
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} }
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} } else {
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} /* 使用传统 AOF 格式 */
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} if (server.aof_rewrite_incremental_fsync) {
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} /* 增量 fsync,减少磁盘 I/O 峰值 */
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} }
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} /* 遍历所有数据库 */
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} for (j = 0; j < server.dbnum; j++) {
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} redisDb *db = server.db+j;
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} /* 添加 SELECT 命令 */
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} /* 遍历所有键 */
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} while((de = dictNext(di)) != NULL) {
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} /* 将键值对转换为 Redis 命令并写入 */
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} }
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} dictReleaseIterator(di);
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} }
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} }
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} /* 4. 刷新缓冲区并 fsync */
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} if (fflush(fp) == EOF) goto werr;
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} if (fsync(fileno(fp)) == -1) goto werr;
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} if (fclose(fp) == EOF) goto werr;
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} /* 5. 原子性重命名 */
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} if (rename(tmpfile,filename) == -1) {
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} unlink(tmpfile);
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} return C_ERR;
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} }
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} return C_OK;
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
}werr:
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} fclose(fp);
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} unlink(tmpfile);
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
} return C_ERR;
#/* aof.c - Redis 7.2 */
int rewriteAppendOnlyFile(char *filename) {
rio aof;
FILE *fp;
char tmpfile[256];
/* 1. 创建临时文件 */
snprintf(tmpfile,256,"temp-rewriteaof-%d.aof",(int)getpid());
fp = fopen(tmpfile,"w");
if (!fp) {
serverLog(LL_WARNING,"Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s",strerror(errno));
return C_ERR;
}
/* 2. 初始化 RIO 对象 */
rioInitWithFile(&aof,fp);
/* 3. 如果启用混合持久化,使用 RDB 格式 */
if (server.aof_use_rdb_preamble) {
/* 调用 rdbSaveRio 生成 RDB 格式的 BASE 文件 */
if (rdbSaveRio(&aof,NULL) != C_OK) {
fclose(fp);
unlink(tmpfile);
return C_ERR;
}
} else {
/* 使用传统 AOF 格式 */
if (server.aof_rewrite_incremental_fsync) {
/* 增量 fsync,减少磁盘 I/O 峰值 */
rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);
}
/* 遍历所有数据库 */
for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db+j;
/* 添加 SELECT 命令 */
if (rioWrite(&aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr;
if (rioWriteBulkLongLong(&aof,j) == 0) goto werr;
/* 遍历所有键 */
while((de = dictNext(di)) != NULL) {
/* 将键值对转换为 Redis 命令并写入 */
if (rewriteAppendOnlyFileKeyValue(&aof,db,de) == 0) goto werr;
}
dictReleaseIterator(di);
}
}
/* 4. 刷新缓冲区并 fsync */
if (fflush(fp) == EOF) goto werr;
if (fsync(fileno(fp)) == -1) goto werr;
if (fclose(fp) == EOF) goto werr;
/* 5. 原子性重命名 */
if (rename(tmpfile,filename) == -1) {
serverLog(LL_WARNING,"Error moving temp AOF file on the final destination: %s",strerror(errno));
unlink(tmpfile);
return C_ERR;
}
return C_OK;
werr:
serverLog(LL_WARNING,"Write error writing append only file on disk: %s",strerror(errno));
fclose(fp);
unlink(tmpfile);
return C_ERR;
}}
关键点解析:
- 临时文件机制:先写入临时文件,成功后原子性重命名
- 混合持久化:Redis 4.0+ 使用 RDB 格式作为 BASE 文件
- 增量 fsync:避免大规模磁盘 I/O 阻塞
- 原子性保证 :使用
rename()保证文件更新的原子性
7. 生产环境选型建议
7.1 场景化选型表
| 使用场景 | 推荐方案 | 理由 |
|---|---|---|
| 缓存为主,数据可重建 | 纯 RDB | 性能最优,恢复快 |
| 数据完整性要求高 | 混合持久化 | 平衡性能和安全性 |
| 金融/支付系统 | AOF + everysec | 最多丢失1秒数据 |
| 大规模备份 | RDB 定期快照 | 文件小,便于传输 |
| 主从复制 | 混合持久化 | 减少全量同步开销 |
7.2 配置模板
模板 1:高性能缓存场景
# 适用于缓存场景,数据可丢失
appendonly no
save 300 10
save 60 10000
rdbcompression yes
模板 2:数据完整性优先(推荐)
# 适用于大多数业务场景
appendonly yes
appendfsync everysec
aof-use-rdb-preamble yes
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
save 900 1
模板 3:极致数据安全
# 适用于金融、支付等场景
appendonly yes
appendfsync always
aof-use-rdb-preamble yes
no-appendfsync-on-rewrite no
7.3 运维最佳实践
1. 定期备份策略
#!/bin/bash
# AOF 备份脚本
DATE=$(date +%Y%m%d_%H%M%S)
BACKUP_DIR=/data/redis/backups
AOF_FILE=/var/lib/redis/appendonly.aof
# 创建备份目录
mkdir -p $BACKUP_DIR
# 使用 LASTSAVE 机制确保备份一致性
redis-cli BGSAVE
sleep 5
# 复制 RDB 文件
cp /var/lib/redis/dump.rdb $BACKUP_DIR/dump_$DATE.rdb
# 复制 AOF 文件(如果启用)
if [ -f "$AOF_FILE" ]; then
cp $AOF_FILE $BACKUP_DIR/appendonly_$DATE.aof
fi
# 压缩备份
gzip $BACKUP_DIR/*_$DATE.rdb
gzip $BACKUP_DIR/*_$DATE.aof
# 删除30天前的备份
find $BACKUP_DIR -name "*.gz" -mtime +30 -delete#!/bin/bash
#!/bin/bash
# AOF 备份脚本
DATE=$(date +%Y%m%d_%H%M%S)
BACKUP_DIR=/data/redis/backups
AOF_FILE=/var/lib/redis/appendonly.aof
# 创建备份目录
mkdir -p $BACKUP_DIR
# 使用 LASTSAVE 机制确保备份一致性
redis-cli BGSAVE
sleep 5
# 复制 RDB 文件
cp /var/lib/redis/dump.rdb $BACKUP_DIR/dump_$DATE.rdb
# 复制 AOF 文件(如果启用)
if [ -f "$AOF_FILE" ]; then
cp $AOF_FILE $BACKUP_DIR/appendonly_$DATE.aof
fi
# 压缩备份
gzip $BACKUP_DIR/*_$DATE.rdb
gzip $BACKUP_DIR/*_$DATE.aof
# 删除30天前的备份
find $BACKUP_DIR -name "*.gz" -mtime +30 -delete# AOF 备份脚本
#!/bin/bash
# AOF 备份脚本
DATE=$(date +%Y%m%d_%H%M%S)
BACKUP_DIR=/data/redis/backups
AOF_FILE=/var/lib/redis/appendonly.aof
# 创建备份目录
mkdir -p $BACKUP_DIR
# 使用 LASTSAVE 机制确保备份一致性
redis-cli BGSAVE
sleep 5
# 复制 RDB 文件
cp /var/lib/redis/dump.rdb $BACKUP_DIR/dump_$DATE.rdb
# 复制 AOF 文件(如果启用)
if [ -f "$AOF_FILE" ]; then
cp $AOF_FILE $BACKUP_DIR/appendonly_$DATE.aof
fi
# 压缩备份
gzip $BACKUP_DIR/*_$DATE.rdb
gzip $BACKUP_DIR/*_$DATE.aof
# 删除30天前的备份
find $BACKUP_DIR -name "*.gz" -mtime +30 -delete
#!/bin/bash
# AOF 备份脚本
DATE=$(date +%Y%m%d_%H%M%S)
BACKUP_DIR=/data/redis/backups
AOF_FILE=/var/lib/redis/appendonly.aof
# 创建备份目录
mkdir -p $BACKUP_DIR
# 使用 LASTSAVE 机制确保备份一致性
redis-cli BGSAVE
sleep 5
# 复制 RDB 文件
cp /var/lib/redis/dump.rdb $BACKUP_DIR/dump_$DATE.rdb
# 复制 AOF 文件(如果启用)
if [ -f "$AOF_FILE" ]; then
cp $AOF_FILE $BACKUP_DIR/appendonly_$DATE.aof
fi
# 压缩备份
gzip $BACKUP_DIR/*_$DATE.rdb
gzip $BACKUP_DIR/*_$DATE.aof
# 删除30天前的备份
find $BACKUP_DIR -name "*.gz" -mtime +30 -deleteDATE=$(date +%Y%m%d_%H%M%S)
#!/bin/bash
# AOF 备份脚本
DATE=$(date +%Y%m%d_%H%M%S)
BACKUP_DIR=/data/redis/backups
AOF_FILE=/var/lib/redis/appendonly.aof
# 创建备份目录
mkdir -p $BACKUP_DIR
# 使用 LASTSAVE 机制确保备份一致性
redis-cli BGSAVE
sleep 5
# 复制 RDB 文件
cp /var/lib/redis/dump.rdb $BACKUP_DIR/dump_$DATE.rdb
# 复制 AOF 文件(如果启用)
if [ -f "$AOF_FILE" ]; then
cp $AOF_FILE $BACKUP_DIR/appendonly_$DATE.aof
fi
# 压缩备份
gzip $BACKUP_DIR/*_$DATE.rdb
gzip $BACKUP_DIR/*_$DATE.aof
# 删除30天前的备份
find $BACKUP_DIR -name "*.gz" -mtime +30 -deleteBACKUP_DIR=/data/redis/backups
#!/bin/bash
# AOF 备份脚本
DATE=$(date +%Y%m%d_%H%M%S)
BACKUP_DIR=/data/redis/backups
AOF_FILE=/var/lib/redis/appendonly.aof
# 创建备份目录
mkdir -p $BACKUP_DIR
# 使用 LASTSAVE 机制确保备份一致性
redis-cli BGSAVE
sleep 5
# 复制 RDB 文件
cp /var/lib/redis/dump.rdb $BACKUP_DIR/dump_$DATE.rdb
# 复制 AOF 文件(如果启用)
if [ -f "$AOF_FILE" ]; then
cp $AOF_FILE $BACKUP_DIR/appendonly_$DATE.aof
fi
# 压缩备份
gzip $BACKUP_DIR/*_$DATE.rdb
gzip $BACKUP_DIR/*_$DATE.aof
# 删除30天前的备份
find $BACKUP_DIR -name "*.gz" -mtime +30 -deleteAOF_FILE=/var/lib/redis/appendonly.aof
#!/bin/bash
# AOF 备份脚本
DATE=$(date +%Y%m%d_%H%M%S)
BACKUP_DIR=/data/redis/backups
AOF_FILE=/var/lib/redis/appendonly.aof
# 创建备份目录
mkdir -p $BACKUP_DIR
# 使用 LASTSAVE 机制确保备份一致性
redis-cli BGSAVE
sleep 5
# 复制 RDB 文件
cp /var/lib/redis/dump.rdb $BACKUP_DIR/dump_$DATE.rdb
# 复制 AOF 文件(如果启用)
if [ -f "$AOF_FILE" ]; then
cp $AOF_FILE $BACKUP_DIR/appendonly_$DATE.aof
fi
# 压缩备份
gzip $BACKUP_DIR/*_$DATE.rdb
gzip $BACKUP_DIR/*_$DATE.aof
# 删除30天前的备份
find $BACKUP_DIR -name "*.gz" -mtime +30 -delete
#!/bin/bash
# AOF 备份脚本
DATE=$(date +%Y%m%d_%H%M%S)
BACKUP_DIR=/data/redis/backups
AOF_FILE=/var/lib/redis/appendonly.aof
# 创建备份目录
mkdir -p $BACKUP_DIR
# 使用 LASTSAVE 机制确保备份一致性
redis-cli BGSAVE
sleep 5
# 复制 RDB 文件
cp /var/lib/redis/dump.rdb $BACKUP_DIR/dump_$DATE.rdb
# 复制 AOF 文件(如果启用)
if [ -f "$AOF_FILE" ]; then
cp $AOF_FILE $BACKUP_DIR/appendonly_$DATE.aof
fi
# 压缩备份
gzip $BACKUP_DIR/*_$DATE.rdb
gzip $BACKUP_DIR/*_$DATE.aof
# 删除30天前的备份
find $BACKUP_DIR -name "*.gz" -mtime +30 -delete# 创建备份目录
#!/bin/bash
# AOF 备份脚本
DATE=$(date +%Y%m%d_%H%M%S)
BACKUP_DIR=/data/redis/backups
AOF_FILE=/var/lib/redis/appendonly.aof
# 创建备份目录
mkdir -p $BACKUP_DIR
# 使用 LASTSAVE 机制确保备份一致性
redis-cli BGSAVE
sleep 5
# 复制 RDB 文件
cp /var/lib/redis/dump.rdb $BACKUP_DIR/dump_$DATE.rdb
# 复制 AOF 文件(如果启用)
if [ -f "$AOF_FILE" ]; then
cp $AOF_FILE $BACKUP_DIR/appendonly_$DATE.aof
fi
# 压缩备份
gzip $BACKUP_DIR/*_$DATE.rdb
gzip $BACKUP_DIR/*_$DATE.aof
# 删除30天前的备份
find $BACKUP_DIR -name "*.gz" -mtime +30 -deletemkdir -p $BACKUP_DIR
#!/bin/bash
# AOF 备份脚本
DATE=$(date +%Y%m%d_%H%M%S)
BACKUP_DIR=/data/redis/backups
AOF_FILE=/var/lib/redis/appendonly.aof
# 创建备份目录
mkdir -p $BACKUP_DIR
# 使用 LASTSAVE 机制确保备份一致性
redis-cli BGSAVE
sleep 5
# 复制 RDB 文件
cp /var/lib/redis/dump.rdb $BACKUP_DIR/dump_$DATE.rdb
# 复制 AOF 文件(如果启用)
if [ -f "$AOF_FILE" ]; then
cp $AOF_FILE $BACKUP_DIR/appendonly_$DATE.aof
fi
# 压缩备份
gzip $BACKUP_DIR/*_$DATE.rdb
gzip $BACKUP_DIR/*_$DATE.aof
# 删除30天前的备份
find $BACKUP_DIR -name "*.gz" -mtime +30 -delete
#!/bin/bash
# AOF 备份脚本
DATE=$(date +%Y%m%d_%H%M%S)
BACKUP_DIR=/data/redis/backups
AOF_FILE=/var/lib/redis/appendonly.aof
# 创建备份目录
mkdir -p $BACKUP_DIR
# 使用 LASTSAVE 机制确保备份一致性
redis-cli BGSAVE
sleep 5
# 复制 RDB 文件
cp /var/lib/redis/dump.rdb $BACKUP_DIR/dump_$DATE.rdb
# 复制 AOF 文件(如果启用)
if [ -f "$AOF_FILE" ]; then
cp $AOF_FILE $BACKUP_DIR/appendonly_$DATE.aof
fi
# 压缩备份
gzip $BACKUP_DIR/*_$DATE.rdb
gzip $BACKUP_DIR/*_$DATE.aof
# 删除30天前的备份
find $BACKUP_DIR -name "*.gz" -mtime +30 -delete# 使用 LASTSAVE 机制确保备份一致性
#!/bin/bash
# AOF 备份脚本
DATE=$(date +%Y%m%d_%H%M%S)
BACKUP_DIR=/data/redis/backups
AOF_FILE=/var/lib/redis/appendonly.aof
# 创建备份目录
mkdir -p $BACKUP_DIR
# 使用 LASTSAVE 机制确保备份一致性
redis-cli BGSAVE
sleep 5
# 复制 RDB 文件
cp /var/lib/redis/dump.rdb $BACKUP_DIR/dump_$DATE.rdb
# 复制 AOF 文件(如果启用)
if [ -f "$AOF_FILE" ]; then
cp $AOF_FILE $BACKUP_DIR/appendonly_$DATE.aof
fi
# 压缩备份
gzip $BACKUP_DIR/*_$DATE.rdb
gzip $BACKUP_DIR/*_$DATE.aof
# 删除30天前的备份
find $BACKUP_DIR -name "*.gz" -mtime +30 -deleteredis-cli BGSAVE
#!/bin/bash
# AOF 备份脚本
DATE=$(date +%Y%m%d_%H%M%S)
BACKUP_DIR=/data/redis/backups
AOF_FILE=/var/lib/redis/appendonly.aof
# 创建备份目录
mkdir -p $BACKUP_DIR
# 使用 LASTSAVE 机制确保备份一致性
redis-cli BGSAVE
sleep 5
# 复制 RDB 文件
cp /var/lib/redis/dump.rdb $BACKUP_DIR/dump_$DATE.rdb
# 复制 AOF 文件(如果启用)
if [ -f "$AOF_FILE" ]; then
cp $AOF_FILE $BACKUP_DIR/appendonly_$DATE.aof
fi
# 压缩备份
gzip $BACKUP_DIR/*_$DATE.rdb
gzip $BACKUP_DIR/*_$DATE.aof
# 删除30天前的备份
find $BACKUP_DIR -name "*.gz" -mtime +30 -deletesleep 5
#!/bin/bash
# AOF 备份脚本
DATE=$(date +%Y%m%d_%H%M%S)
BACKUP_DIR=/data/redis/backups
AOF_FILE=/var/lib/redis/appendonly.aof
# 创建备份目录
mkdir -p $BACKUP_DIR
# 使用 LASTSAVE 机制确保备份一致性
redis-cli BGSAVE
sleep 5
# 复制 RDB 文件
cp /var/lib/redis/dump.rdb $BACKUP_DIR/dump_$DATE.rdb
# 复制 AOF 文件(如果启用)
if [ -f "$AOF_FILE" ]; then
cp $AOF_FILE $BACKUP_DIR/appendonly_$DATE.aof
fi
# 压缩备份
gzip $BACKUP_DIR/*_$DATE.rdb
gzip $BACKUP_DIR/*_$DATE.aof
# 删除30天前的备份
find $BACKUP_DIR -name "*.gz" -mtime +30 -delete
#!/bin/bash
# AOF 备份脚本
DATE=$(date +%Y%m%d_%H%M%S)
BACKUP_DIR=/data/redis/backups
AOF_FILE=/var/lib/redis/appendonly.aof
# 创建备份目录
mkdir -p $BACKUP_DIR
# 使用 LASTSAVE 机制确保备份一致性
redis-cli BGSAVE
sleep 5
# 复制 RDB 文件
cp /var/lib/redis/dump.rdb $BACKUP_DIR/dump_$DATE.rdb
# 复制 AOF 文件(如果启用)
if [ -f "$AOF_FILE" ]; then
cp $AOF_FILE $BACKUP_DIR/appendonly_$DATE.aof
fi
# 压缩备份
gzip $BACKUP_DIR/*_$DATE.rdb
gzip $BACKUP_DIR/*_$DATE.aof
# 删除30天前的备份
find $BACKUP_DIR -name "*.gz" -mtime +30 -delete# 复制 RDB 文件
#!/bin/bash
# AOF 备份脚本
DATE=$(date +%Y%m%d_%H%M%S)
BACKUP_DIR=/data/redis/backups
AOF_FILE=/var/lib/redis/appendonly.aof
# 创建备份目录
mkdir -p $BACKUP_DIR
# 使用 LASTSAVE 机制确保备份一致性
redis-cli BGSAVE
sleep 5
# 复制 RDB 文件
cp /var/lib/redis/dump.rdb $BACKUP_DIR/dump_$DATE.rdb
# 复制 AOF 文件(如果启用)
if [ -f "$AOF_FILE" ]; then
cp $AOF_FILE $BACKUP_DIR/appendonly_$DATE.aof
fi
# 压缩备份
gzip $BACKUP_DIR/*_$DATE.rdb
gzip $BACKUP_DIR/*_$DATE.aof
# 删除30天前的备份
find $BACKUP_DIR -name "*.gz" -mtime +30 -deletecp /var/lib/redis/dump.rdb $BACKUP_DIR/dump_$DATE.rdb
#!/bin/bash
# AOF 备份脚本
DATE=$(date +%Y%m%d_%H%M%S)
BACKUP_DIR=/data/redis/backups
AOF_FILE=/var/lib/redis/appendonly.aof
# 创建备份目录
mkdir -p $BACKUP_DIR
# 使用 LASTSAVE 机制确保备份一致性
redis-cli BGSAVE
sleep 5
# 复制 RDB 文件
cp /var/lib/redis/dump.rdb $BACKUP_DIR/dump_$DATE.rdb
# 复制 AOF 文件(如果启用)
if [ -f "$AOF_FILE" ]; then
cp $AOF_FILE $BACKUP_DIR/appendonly_$DATE.aof
fi
# 压缩备份
gzip $BACKUP_DIR/*_$DATE.rdb
gzip $BACKUP_DIR/*_$DATE.aof
# 删除30天前的备份
find $BACKUP_DIR -name "*.gz" -mtime +30 -delete
#!/bin/bash
# AOF 备份脚本
DATE=$(date +%Y%m%d_%H%M%S)
BACKUP_DIR=/data/redis/backups
AOF_FILE=/var/lib/redis/appendonly.aof
# 创建备份目录
mkdir -p $BACKUP_DIR
# 使用 LASTSAVE 机制确保备份一致性
redis-cli BGSAVE
sleep 5
# 复制 RDB 文件
cp /var/lib/redis/dump.rdb $BACKUP_DIR/dump_$DATE.rdb
# 复制 AOF 文件(如果启用)
if [ -f "$AOF_FILE" ]; then
cp $AOF_FILE $BACKUP_DIR/appendonly_$DATE.aof
fi
# 压缩备份
gzip $BACKUP_DIR/*_$DATE.rdb
gzip $BACKUP_DIR/*_$DATE.aof
# 删除30天前的备份
find $BACKUP_DIR -name "*.gz" -mtime +30 -delete# 复制 AOF 文件(如果启用)
#!/bin/bash
# AOF 备份脚本
DATE=$(date +%Y%m%d_%H%M%S)
BACKUP_DIR=/data/redis/backups
AOF_FILE=/var/lib/redis/appendonly.aof
# 创建备份目录
mkdir -p $BACKUP_DIR
# 使用 LASTSAVE 机制确保备份一致性
redis-cli BGSAVE
sleep 5
# 复制 RDB 文件
cp /var/lib/redis/dump.rdb $BACKUP_DIR/dump_$DATE.rdb
# 复制 AOF 文件(如果启用)
if [ -f "$AOF_FILE" ]; then
cp $AOF_FILE $BACKUP_DIR/appendonly_$DATE.aof
fi
# 压缩备份
gzip $BACKUP_DIR/*_$DATE.rdb
gzip $BACKUP_DIR/*_$DATE.aof
# 删除30天前的备份
find $BACKUP_DIR -name "*.gz" -mtime +30 -deleteif [ -f "$AOF_FILE" ]; then
#!/bin/bash
# AOF 备份脚本
DATE=$(date +%Y%m%d_%H%M%S)
BACKUP_DIR=/data/redis/backups
AOF_FILE=/var/lib/redis/appendonly.aof
# 创建备份目录
mkdir -p $BACKUP_DIR
# 使用 LASTSAVE 机制确保备份一致性
redis-cli BGSAVE
sleep 5
# 复制 RDB 文件
cp /var/lib/redis/dump.rdb $BACKUP_DIR/dump_$DATE.rdb
# 复制 AOF 文件(如果启用)
if [ -f "$AOF_FILE" ]; then
cp $AOF_FILE $BACKUP_DIR/appendonly_$DATE.aof
fi
# 压缩备份
gzip $BACKUP_DIR/*_$DATE.rdb
gzip $BACKUP_DIR/*_$DATE.aof
# 删除30天前的备份
find $BACKUP_DIR -name "*.gz" -mtime +30 -delete cp $AOF_FILE $BACKUP_DIR/appendonly_$DATE.aof
#!/bin/bash
# AOF 备份脚本
DATE=$(date +%Y%m%d_%H%M%S)
BACKUP_DIR=/data/redis/backups
AOF_FILE=/var/lib/redis/appendonly.aof
# 创建备份目录
mkdir -p $BACKUP_DIR
# 使用 LASTSAVE 机制确保备份一致性
redis-cli BGSAVE
sleep 5
# 复制 RDB 文件
cp /var/lib/redis/dump.rdb $BACKUP_DIR/dump_$DATE.rdb
# 复制 AOF 文件(如果启用)
if [ -f "$AOF_FILE" ]; then
cp $AOF_FILE $BACKUP_DIR/appendonly_$DATE.aof
fi
# 压缩备份
gzip $BACKUP_DIR/*_$DATE.rdb
gzip $BACKUP_DIR/*_$DATE.aof
# 删除30天前的备份
find $BACKUP_DIR -name "*.gz" -mtime +30 -deletefi
#!/bin/bash
# AOF 备份脚本
DATE=$(date +%Y%m%d_%H%M%S)
BACKUP_DIR=/data/redis/backups
AOF_FILE=/var/lib/redis/appendonly.aof
# 创建备份目录
mkdir -p $BACKUP_DIR
# 使用 LASTSAVE 机制确保备份一致性
redis-cli BGSAVE
sleep 5
# 复制 RDB 文件
cp /var/lib/redis/dump.rdb $BACKUP_DIR/dump_$DATE.rdb
# 复制 AOF 文件(如果启用)
if [ -f "$AOF_FILE" ]; then
cp $AOF_FILE $BACKUP_DIR/appendonly_$DATE.aof
fi
# 压缩备份
gzip $BACKUP_DIR/*_$DATE.rdb
gzip $BACKUP_DIR/*_$DATE.aof
# 删除30天前的备份
find $BACKUP_DIR -name "*.gz" -mtime +30 -delete
#!/bin/bash
# AOF 备份脚本
DATE=$(date +%Y%m%d_%H%M%S)
BACKUP_DIR=/data/redis/backups
AOF_FILE=/var/lib/redis/appendonly.aof
# 创建备份目录
mkdir -p $BACKUP_DIR
# 使用 LASTSAVE 机制确保备份一致性
redis-cli BGSAVE
sleep 5
# 复制 RDB 文件
cp /var/lib/redis/dump.rdb $BACKUP_DIR/dump_$DATE.rdb
# 复制 AOF 文件(如果启用)
if [ -f "$AOF_FILE" ]; then
cp $AOF_FILE $BACKUP_DIR/appendonly_$DATE.aof
fi
# 压缩备份
gzip $BACKUP_DIR/*_$DATE.rdb
gzip $BACKUP_DIR/*_$DATE.aof
# 删除30天前的备份
find $BACKUP_DIR -name "*.gz" -mtime +30 -delete# 压缩备份
#!/bin/bash
# AOF 备份脚本
DATE=$(date +%Y%m%d_%H%M%S)
BACKUP_DIR=/data/redis/backups
AOF_FILE=/var/lib/redis/appendonly.aof
# 创建备份目录
mkdir -p $BACKUP_DIR
# 使用 LASTSAVE 机制确保备份一致性
redis-cli BGSAVE
sleep 5
# 复制 RDB 文件
cp /var/lib/redis/dump.rdb $BACKUP_DIR/dump_$DATE.rdb
# 复制 AOF 文件(如果启用)
if [ -f "$AOF_FILE" ]; then
cp $AOF_FILE $BACKUP_DIR/appendonly_$DATE.aof
fi
# 压缩备份
gzip $BACKUP_DIR/*_$DATE.rdb
gzip $BACKUP_DIR/*_$DATE.aof
# 删除30天前的备份
find $BACKUP_DIR -name "*.gz" -mtime +30 -deletegzip $BACKUP_DIR/*_$DATE.rdb
#!/bin/bash
# AOF 备份脚本
DATE=$(date +%Y%m%d_%H%M%S)
BACKUP_DIR=/data/redis/backups
AOF_FILE=/var/lib/redis/appendonly.aof
# 创建备份目录
mkdir -p $BACKUP_DIR
# 使用 LASTSAVE 机制确保备份一致性
redis-cli BGSAVE
sleep 5
# 复制 RDB 文件
cp /var/lib/redis/dump.rdb $BACKUP_DIR/dump_$DATE.rdb
# 复制 AOF 文件(如果启用)
if [ -f "$AOF_FILE" ]; then
cp $AOF_FILE $BACKUP_DIR/appendonly_$DATE.aof
fi
# 压缩备份
gzip $BACKUP_DIR/*_$DATE.rdb
gzip $BACKUP_DIR/*_$DATE.aof
# 删除30天前的备份
find $BACKUP_DIR -name "*.gz" -mtime +30 -deletegzip $BACKUP_DIR/*_$DATE.aof
#!/bin/bash
# AOF 备份脚本
DATE=$(date +%Y%m%d_%H%M%S)
BACKUP_DIR=/data/redis/backups
AOF_FILE=/var/lib/redis/appendonly.aof
# 创建备份目录
mkdir -p $BACKUP_DIR
# 使用 LASTSAVE 机制确保备份一致性
redis-cli BGSAVE
sleep 5
# 复制 RDB 文件
cp /var/lib/redis/dump.rdb $BACKUP_DIR/dump_$DATE.rdb
# 复制 AOF 文件(如果启用)
if [ -f "$AOF_FILE" ]; then
cp $AOF_FILE $BACKUP_DIR/appendonly_$DATE.aof
fi
# 压缩备份
gzip $BACKUP_DIR/*_$DATE.rdb
gzip $BACKUP_DIR/*_$DATE.aof
# 删除30天前的备份
find $BACKUP_DIR -name "*.gz" -mtime +30 -delete
#!/bin/bash
# AOF 备份脚本
DATE=$(date +%Y%m%d_%H%M%S)
BACKUP_DIR=/data/redis/backups
AOF_FILE=/var/lib/redis/appendonly.aof
# 创建备份目录
mkdir -p $BACKUP_DIR
# 使用 LASTSAVE 机制确保备份一致性
redis-cli BGSAVE
sleep 5
# 复制 RDB 文件
cp /var/lib/redis/dump.rdb $BACKUP_DIR/dump_$DATE.rdb
# 复制 AOF 文件(如果启用)
if [ -f "$AOF_FILE" ]; then
cp $AOF_FILE $BACKUP_DIR/appendonly_$DATE.aof
fi
# 压缩备份
gzip $BACKUP_DIR/*_$DATE.rdb
gzip $BACKUP_DIR/*_$DATE.aof
# 删除30天前的备份
find $BACKUP_DIR -name "*.gz" -mtime +30 -delete# 删除30天前的备份
#!/bin/bash
# AOF 备份脚本
DATE=$(date +%Y%m%d_%H%M%S)
BACKUP_DIR=/data/redis/backups
AOF_FILE=/var/lib/redis/appendonly.aof
# 创建备份目录
mkdir -p $BACKUP_DIR
# 使用 LASTSAVE 机制确保备份一致性
redis-cli BGSAVE
sleep 5
# 复制 RDB 文件
cp /var/lib/redis/dump.rdb $BACKUP_DIR/dump_$DATE.rdb
# 复制 AOF 文件(如果启用)
if [ -f "$AOF_FILE" ]; then
cp $AOF_FILE $BACKUP_DIR/appendonly_$DATE.aof
fi
# 压缩备份
gzip $BACKUP_DIR/*_$DATE.rdb
gzip $BACKUP_DIR/*_$DATE.aof
# 删除30天前的备份
find $BACKUP_DIR -name "*.gz" -mtime +30 -deletefind $BACKUP_DIR -name "*.gz" -mtime +30 -delete
2. 监控告警配置
python
# Redis 持久化监控脚本
import redis
import time
def check_persistence_health():
r = redis.Redis(host='localhost', port=6379)
# 检查 AOF 文件大小
info = r.info('persistence')
aof_size = info.get('aof_current_size', 0)
aof_base_size = info.get('aof_base_size', 0)
if aof_size > aof_base_size * 3:
print(f"⚠️ 警告:AOF 文件过大 {aof_size / 1024 / 1024:.2f}MB")
# 检查 fork 耗时
stats = r.info('stats')
fork_time = stats.get('latest_fork_usec', 0)
if fork_time > 1000000: # 超过1秒
print(f"⚠️ 警告:fork 耗时过长 {fork_time / 1000:.2f}ms")
# 检查 AOF 延迟
if info.get('aof_delayed_fsync', 0) > 0:
print(f"⚠️ 警告:AOF fsync 延迟 {info['aof_delayed_fsync']} 次")
if __name__ == '__main__':
check_persistence_health()# Redis 持久化监控脚本`
`# Redis 持久化监控脚本
import redis
import time
def check_persistence_health():
r = redis.Redis(host='localhost', port=6379)
# 检查 AOF 文件大小
info = r.info('persistence')
aof_size = info.get('aof_current_size', 0)
aof_base_size = info.get('aof_base_size', 0)
if aof_size > aof_base_size * 3:
print(f"⚠️ 警告:AOF 文件过大 {aof_size / 1024 / 1024:.2f}MB")
# 检查 fork 耗时
stats = r.info('stats')
fork_time = stats.get('latest_fork_usec', 0)
if fork_time > 1000000: # 超过1秒
print(f"⚠️ 警告:fork 耗时过长 {fork_time / 1000:.2f}ms")
# 检查 AOF 延迟
if info.get('aof_delayed_fsync', 0) > 0:
print(f"⚠️ 警告:AOF fsync 延迟 {info['aof_delayed_fsync']} 次")
if __name__ == '__main__':
check_persistence_health()import redis`
`# Redis 持久化监控脚本
import redis
import time
def check_persistence_health():
r = redis.Redis(host='localhost', port=6379)
# 检查 AOF 文件大小
info = r.info('persistence')
aof_size = info.get('aof_current_size', 0)
aof_base_size = info.get('aof_base_size', 0)
if aof_size > aof_base_size * 3:
print(f"⚠️ 警告:AOF 文件过大 {aof_size / 1024 / 1024:.2f}MB")
# 检查 fork 耗时
stats = r.info('stats')
fork_time = stats.get('latest_fork_usec', 0)
if fork_time > 1000000: # 超过1秒
print(f"⚠️ 警告:fork 耗时过长 {fork_time / 1000:.2f}ms")
# 检查 AOF 延迟
if info.get('aof_delayed_fsync', 0) > 0:
print(f"⚠️ 警告:AOF fsync 延迟 {info['aof_delayed_fsync']} 次")
if __name__ == '__main__':
check_persistence_health()import time`
`# Redis 持久化监控脚本
import redis
import time
def check_persistence_health():
r = redis.Redis(host='localhost', port=6379)
# 检查 AOF 文件大小
info = r.info('persistence')
aof_size = info.get('aof_current_size', 0)
aof_base_size = info.get('aof_base_size', 0)
if aof_size > aof_base_size * 3:
print(f"⚠️ 警告:AOF 文件过大 {aof_size / 1024 / 1024:.2f}MB")
# 检查 fork 耗时
stats = r.info('stats')
fork_time = stats.get('latest_fork_usec', 0)
if fork_time > 1000000: # 超过1秒
print(f"⚠️ 警告:fork 耗时过长 {fork_time / 1000:.2f}ms")
# 检查 AOF 延迟
if info.get('aof_delayed_fsync', 0) > 0:
print(f"⚠️ 警告:AOF fsync 延迟 {info['aof_delayed_fsync']} 次")
if __name__ == '__main__':
check_persistence_health()`
`# Redis 持久化监控脚本
import redis
import time
def check_persistence_health():
r = redis.Redis(host='localhost', port=6379)
# 检查 AOF 文件大小
info = r.info('persistence')
aof_size = info.get('aof_current_size', 0)
aof_base_size = info.get('aof_base_size', 0)
if aof_size > aof_base_size * 3:
print(f"⚠️ 警告:AOF 文件过大 {aof_size / 1024 / 1024:.2f}MB")
# 检查 fork 耗时
stats = r.info('stats')
fork_time = stats.get('latest_fork_usec', 0)
if fork_time > 1000000: # 超过1秒
print(f"⚠️ 警告:fork 耗时过长 {fork_time / 1000:.2f}ms")
# 检查 AOF 延迟
if info.get('aof_delayed_fsync', 0) > 0:
print(f"⚠️ 警告:AOF fsync 延迟 {info['aof_delayed_fsync']} 次")
if __name__ == '__main__':
check_persistence_health()def check_persistence_health():`
`# Redis 持久化监控脚本
import redis
import time
def check_persistence_health():
r = redis.Redis(host='localhost', port=6379)
# 检查 AOF 文件大小
info = r.info('persistence')
aof_size = info.get('aof_current_size', 0)
aof_base_size = info.get('aof_base_size', 0)
if aof_size > aof_base_size * 3:
print(f"⚠️ 警告:AOF 文件过大 {aof_size / 1024 / 1024:.2f}MB")
# 检查 fork 耗时
stats = r.info('stats')
fork_time = stats.get('latest_fork_usec', 0)
if fork_time > 1000000: # 超过1秒
print(f"⚠️ 警告:fork 耗时过长 {fork_time / 1000:.2f}ms")
# 检查 AOF 延迟
if info.get('aof_delayed_fsync', 0) > 0:
print(f"⚠️ 警告:AOF fsync 延迟 {info['aof_delayed_fsync']} 次")
if __name__ == '__main__':
check_persistence_health() r = redis.Redis(host='localhost', port=6379)`
`# Redis 持久化监控脚本
import redis
import time
def check_persistence_health():
r = redis.Redis(host='localhost', port=6379)
# 检查 AOF 文件大小
info = r.info('persistence')
aof_size = info.get('aof_current_size', 0)
aof_base_size = info.get('aof_base_size', 0)
if aof_size > aof_base_size * 3:
print(f"⚠️ 警告:AOF 文件过大 {aof_size / 1024 / 1024:.2f}MB")
# 检查 fork 耗时
stats = r.info('stats')
fork_time = stats.get('latest_fork_usec', 0)
if fork_time > 1000000: # 超过1秒
print(f"⚠️ 警告:fork 耗时过长 {fork_time / 1000:.2f}ms")
# 检查 AOF 延迟
if info.get('aof_delayed_fsync', 0) > 0:
print(f"⚠️ 警告:AOF fsync 延迟 {info['aof_delayed_fsync']} 次")
if __name__ == '__main__':
check_persistence_health() `
`# Redis 持久化监控脚本
import redis
import time
def check_persistence_health():
r = redis.Redis(host='localhost', port=6379)
# 检查 AOF 文件大小
info = r.info('persistence')
aof_size = info.get('aof_current_size', 0)
aof_base_size = info.get('aof_base_size', 0)
if aof_size > aof_base_size * 3:
print(f"⚠️ 警告:AOF 文件过大 {aof_size / 1024 / 1024:.2f}MB")
# 检查 fork 耗时
stats = r.info('stats')
fork_time = stats.get('latest_fork_usec', 0)
if fork_time > 1000000: # 超过1秒
print(f"⚠️ 警告:fork 耗时过长 {fork_time / 1000:.2f}ms")
# 检查 AOF 延迟
if info.get('aof_delayed_fsync', 0) > 0:
print(f"⚠️ 警告:AOF fsync 延迟 {info['aof_delayed_fsync']} 次")
if __name__ == '__main__':
check_persistence_health() # 检查 AOF 文件大小`
`# Redis 持久化监控脚本
import redis
import time
def check_persistence_health():
r = redis.Redis(host='localhost', port=6379)
# 检查 AOF 文件大小
info = r.info('persistence')
aof_size = info.get('aof_current_size', 0)
aof_base_size = info.get('aof_base_size', 0)
if aof_size > aof_base_size * 3:
print(f"⚠️ 警告:AOF 文件过大 {aof_size / 1024 / 1024:.2f}MB")
# 检查 fork 耗时
stats = r.info('stats')
fork_time = stats.get('latest_fork_usec', 0)
if fork_time > 1000000: # 超过1秒
print(f"⚠️ 警告:fork 耗时过长 {fork_time / 1000:.2f}ms")
# 检查 AOF 延迟
if info.get('aof_delayed_fsync', 0) > 0:
print(f"⚠️ 警告:AOF fsync 延迟 {info['aof_delayed_fsync']} 次")
if __name__ == '__main__':
check_persistence_health() info = r.info('persistence')`
`# Redis 持久化监控脚本
import redis
import time
def check_persistence_health():
r = redis.Redis(host='localhost', port=6379)
# 检查 AOF 文件大小
info = r.info('persistence')
aof_size = info.get('aof_current_size', 0)
aof_base_size = info.get('aof_base_size', 0)
if aof_size > aof_base_size * 3:
print(f"⚠️ 警告:AOF 文件过大 {aof_size / 1024 / 1024:.2f}MB")
# 检查 fork 耗时
stats = r.info('stats')
fork_time = stats.get('latest_fork_usec', 0)
if fork_time > 1000000: # 超过1秒
print(f"⚠️ 警告:fork 耗时过长 {fork_time / 1000:.2f}ms")
# 检查 AOF 延迟
if info.get('aof_delayed_fsync', 0) > 0:
print(f"⚠️ 警告:AOF fsync 延迟 {info['aof_delayed_fsync']} 次")
if __name__ == '__main__':
check_persistence_health() aof_size = info.get('aof_current_size', 0)`
`# Redis 持久化监控脚本
import redis
import time
def check_persistence_health():
r = redis.Redis(host='localhost', port=6379)
# 检查 AOF 文件大小
info = r.info('persistence')
aof_size = info.get('aof_current_size', 0)
aof_base_size = info.get('aof_base_size', 0)
if aof_size > aof_base_size * 3:
print(f"⚠️ 警告:AOF 文件过大 {aof_size / 1024 / 1024:.2f}MB")
# 检查 fork 耗时
stats = r.info('stats')
fork_time = stats.get('latest_fork_usec', 0)
if fork_time > 1000000: # 超过1秒
print(f"⚠️ 警告:fork 耗时过长 {fork_time / 1000:.2f}ms")
# 检查 AOF 延迟
if info.get('aof_delayed_fsync', 0) > 0:
print(f"⚠️ 警告:AOF fsync 延迟 {info['aof_delayed_fsync']} 次")
if __name__ == '__main__':
check_persistence_health() aof_base_size = info.get('aof_base_size', 0)`
`# Redis 持久化监控脚本
import redis
import time
def check_persistence_health():
r = redis.Redis(host='localhost', port=6379)
# 检查 AOF 文件大小
info = r.info('persistence')
aof_size = info.get('aof_current_size', 0)
aof_base_size = info.get('aof_base_size', 0)
if aof_size > aof_base_size * 3:
print(f"⚠️ 警告:AOF 文件过大 {aof_size / 1024 / 1024:.2f}MB")
# 检查 fork 耗时
stats = r.info('stats')
fork_time = stats.get('latest_fork_usec', 0)
if fork_time > 1000000: # 超过1秒
print(f"⚠️ 警告:fork 耗时过长 {fork_time / 1000:.2f}ms")
# 检查 AOF 延迟
if info.get('aof_delayed_fsync', 0) > 0:
print(f"⚠️ 警告:AOF fsync 延迟 {info['aof_delayed_fsync']} 次")
if __name__ == '__main__':
check_persistence_health() `
`# Redis 持久化监控脚本
import redis
import time
def check_persistence_health():
r = redis.Redis(host='localhost', port=6379)
# 检查 AOF 文件大小
info = r.info('persistence')
aof_size = info.get('aof_current_size', 0)
aof_base_size = info.get('aof_base_size', 0)
if aof_size > aof_base_size * 3:
print(f"⚠️ 警告:AOF 文件过大 {aof_size / 1024 / 1024:.2f}MB")
# 检查 fork 耗时
stats = r.info('stats')
fork_time = stats.get('latest_fork_usec', 0)
if fork_time > 1000000: # 超过1秒
print(f"⚠️ 警告:fork 耗时过长 {fork_time / 1000:.2f}ms")
# 检查 AOF 延迟
if info.get('aof_delayed_fsync', 0) > 0:
print(f"⚠️ 警告:AOF fsync 延迟 {info['aof_delayed_fsync']} 次")
if __name__ == '__main__':
check_persistence_health() if aof_size > aof_base_size * 3:`
`# Redis 持久化监控脚本
import redis
import time
def check_persistence_health():
r = redis.Redis(host='localhost', port=6379)
# 检查 AOF 文件大小
info = r.info('persistence')
aof_size = info.get('aof_current_size', 0)
aof_base_size = info.get('aof_base_size', 0)
if aof_size > aof_base_size * 3:
print(f"⚠️ 警告:AOF 文件过大 {aof_size / 1024 / 1024:.2f}MB")
# 检查 fork 耗时
stats = r.info('stats')
fork_time = stats.get('latest_fork_usec', 0)
if fork_time > 1000000: # 超过1秒
print(f"⚠️ 警告:fork 耗时过长 {fork_time / 1000:.2f}ms")
# 检查 AOF 延迟
if info.get('aof_delayed_fsync', 0) > 0:
print(f"⚠️ 警告:AOF fsync 延迟 {info['aof_delayed_fsync']} 次")
if __name__ == '__main__':
check_persistence_health() print(f"⚠️ 警告:AOF 文件过大 {aof_size / 1024 / 1024:.2f}MB")`
`# Redis 持久化监控脚本
import redis
import time
def check_persistence_health():
r = redis.Redis(host='localhost', port=6379)
# 检查 AOF 文件大小
info = r.info('persistence')
aof_size = info.get('aof_current_size', 0)
aof_base_size = info.get('aof_base_size', 0)
if aof_size > aof_base_size * 3:
print(f"⚠️ 警告:AOF 文件过大 {aof_size / 1024 / 1024:.2f}MB")
# 检查 fork 耗时
stats = r.info('stats')
fork_time = stats.get('latest_fork_usec', 0)
if fork_time > 1000000: # 超过1秒
print(f"⚠️ 警告:fork 耗时过长 {fork_time / 1000:.2f}ms")
# 检查 AOF 延迟
if info.get('aof_delayed_fsync', 0) > 0:
print(f"⚠️ 警告:AOF fsync 延迟 {info['aof_delayed_fsync']} 次")
if __name__ == '__main__':
check_persistence_health() `
`# Redis 持久化监控脚本
import redis
import time
def check_persistence_health():
r = redis.Redis(host='localhost', port=6379)
# 检查 AOF 文件大小
info = r.info('persistence')
aof_size = info.get('aof_current_size', 0)
aof_base_size = info.get('aof_base_size', 0)
if aof_size > aof_base_size * 3:
print(f"⚠️ 警告:AOF 文件过大 {aof_size / 1024 / 1024:.2f}MB")
# 检查 fork 耗时
stats = r.info('stats')
fork_time = stats.get('latest_fork_usec', 0)
if fork_time > 1000000: # 超过1秒
print(f"⚠️ 警告:fork 耗时过长 {fork_time / 1000:.2f}ms")
# 检查 AOF 延迟
if info.get('aof_delayed_fsync', 0) > 0:
print(f"⚠️ 警告:AOF fsync 延迟 {info['aof_delayed_fsync']} 次")
if __name__ == '__main__':
check_persistence_health() # 检查 fork 耗时`
`# Redis 持久化监控脚本
import redis
import time
def check_persistence_health():
r = redis.Redis(host='localhost', port=6379)
# 检查 AOF 文件大小
info = r.info('persistence')
aof_size = info.get('aof_current_size', 0)
aof_base_size = info.get('aof_base_size', 0)
if aof_size > aof_base_size * 3:
print(f"⚠️ 警告:AOF 文件过大 {aof_size / 1024 / 1024:.2f}MB")
# 检查 fork 耗时
stats = r.info('stats')
fork_time = stats.get('latest_fork_usec', 0)
if fork_time > 1000000: # 超过1秒
print(f"⚠️ 警告:fork 耗时过长 {fork_time / 1000:.2f}ms")
# 检查 AOF 延迟
if info.get('aof_delayed_fsync', 0) > 0:
print(f"⚠️ 警告:AOF fsync 延迟 {info['aof_delayed_fsync']} 次")
if __name__ == '__main__':
check_persistence_health() stats = r.info('stats')`
`# Redis 持久化监控脚本
import redis
import time
def check_persistence_health():
r = redis.Redis(host='localhost', port=6379)
# 检查 AOF 文件大小
info = r.info('persistence')
aof_size = info.get('aof_current_size', 0)
aof_base_size = info.get('aof_base_size', 0)
if aof_size > aof_base_size * 3:
print(f"⚠️ 警告:AOF 文件过大 {aof_size / 1024 / 1024:.2f}MB")
# 检查 fork 耗时
stats = r.info('stats')
fork_time = stats.get('latest_fork_usec', 0)
if fork_time > 1000000: # 超过1秒
print(f"⚠️ 警告:fork 耗时过长 {fork_time / 1000:.2f}ms")
# 检查 AOF 延迟
if info.get('aof_delayed_fsync', 0) > 0:
print(f"⚠️ 警告:AOF fsync 延迟 {info['aof_delayed_fsync']} 次")
if __name__ == '__main__':
check_persistence_health() fork_time = stats.get('latest_fork_usec', 0)`
`# Redis 持久化监控脚本
import redis
import time
def check_persistence_health():
r = redis.Redis(host='localhost', port=6379)
# 检查 AOF 文件大小
info = r.info('persistence')
aof_size = info.get('aof_current_size', 0)
aof_base_size = info.get('aof_base_size', 0)
if aof_size > aof_base_size * 3:
print(f"⚠️ 警告:AOF 文件过大 {aof_size / 1024 / 1024:.2f}MB")
# 检查 fork 耗时
stats = r.info('stats')
fork_time = stats.get('latest_fork_usec', 0)
if fork_time > 1000000: # 超过1秒
print(f"⚠️ 警告:fork 耗时过长 {fork_time / 1000:.2f}ms")
# 检查 AOF 延迟
if info.get('aof_delayed_fsync', 0) > 0:
print(f"⚠️ 警告:AOF fsync 延迟 {info['aof_delayed_fsync']} 次")
if __name__ == '__main__':
check_persistence_health() `
`# Redis 持久化监控脚本
import redis
import time
def check_persistence_health():
r = redis.Redis(host='localhost', port=6379)
# 检查 AOF 文件大小
info = r.info('persistence')
aof_size = info.get('aof_current_size', 0)
aof_base_size = info.get('aof_base_size', 0)
if aof_size > aof_base_size * 3:
print(f"⚠️ 警告:AOF 文件过大 {aof_size / 1024 / 1024:.2f}MB")
# 检查 fork 耗时
stats = r.info('stats')
fork_time = stats.get('latest_fork_usec', 0)
if fork_time > 1000000: # 超过1秒
print(f"⚠️ 警告:fork 耗时过长 {fork_time / 1000:.2f}ms")
# 检查 AOF 延迟
if info.get('aof_delayed_fsync', 0) > 0:
print(f"⚠️ 警告:AOF fsync 延迟 {info['aof_delayed_fsync']} 次")
if __name__ == '__main__':
check_persistence_health() if fork_time > 1000000: # 超过1秒`
`# Redis 持久化监控脚本
import redis
import time
def check_persistence_health():
r = redis.Redis(host='localhost', port=6379)
# 检查 AOF 文件大小
info = r.info('persistence')
aof_size = info.get('aof_current_size', 0)
aof_base_size = info.get('aof_base_size', 0)
if aof_size > aof_base_size * 3:
print(f"⚠️ 警告:AOF 文件过大 {aof_size / 1024 / 1024:.2f}MB")
# 检查 fork 耗时
stats = r.info('stats')
fork_time = stats.get('latest_fork_usec', 0)
if fork_time > 1000000: # 超过1秒
print(f"⚠️ 警告:fork 耗时过长 {fork_time / 1000:.2f}ms")
# 检查 AOF 延迟
if info.get('aof_delayed_fsync', 0) > 0:
print(f"⚠️ 警告:AOF fsync 延迟 {info['aof_delayed_fsync']} 次")
if __name__ == '__main__':
check_persistence_health() print(f"⚠️ 警告:fork 耗时过长 {fork_time / 1000:.2f}ms")`
`# Redis 持久化监控脚本
import redis
import time
def check_persistence_health():
r = redis.Redis(host='localhost', port=6379)
# 检查 AOF 文件大小
info = r.info('persistence')
aof_size = info.get('aof_current_size', 0)
aof_base_size = info.get('aof_base_size', 0)
if aof_size > aof_base_size * 3:
print(f"⚠️ 警告:AOF 文件过大 {aof_size / 1024 / 1024:.2f}MB")
# 检查 fork 耗时
stats = r.info('stats')
fork_time = stats.get('latest_fork_usec', 0)
if fork_time > 1000000: # 超过1秒
print(f"⚠️ 警告:fork 耗时过长 {fork_time / 1000:.2f}ms")
# 检查 AOF 延迟
if info.get('aof_delayed_fsync', 0) > 0:
print(f"⚠️ 警告:AOF fsync 延迟 {info['aof_delayed_fsync']} 次")
if __name__ == '__main__':
check_persistence_health() `
`# Redis 持久化监控脚本
import redis
import time
def check_persistence_health():
r = redis.Redis(host='localhost', port=6379)
# 检查 AOF 文件大小
info = r.info('persistence')
aof_size = info.get('aof_current_size', 0)
aof_base_size = info.get('aof_base_size', 0)
if aof_size > aof_base_size * 3:
print(f"⚠️ 警告:AOF 文件过大 {aof_size / 1024 / 1024:.2f}MB")
# 检查 fork 耗时
stats = r.info('stats')
fork_time = stats.get('latest_fork_usec', 0)
if fork_time > 1000000: # 超过1秒
print(f"⚠️ 警告:fork 耗时过长 {fork_time / 1000:.2f}ms")
# 检查 AOF 延迟
if info.get('aof_delayed_fsync', 0) > 0:
print(f"⚠️ 警告:AOF fsync 延迟 {info['aof_delayed_fsync']} 次")
if __name__ == '__main__':
check_persistence_health() # 检查 AOF 延迟`
`# Redis 持久化监控脚本
import redis
import time
def check_persistence_health():
r = redis.Redis(host='localhost', port=6379)
# 检查 AOF 文件大小
info = r.info('persistence')
aof_size = info.get('aof_current_size', 0)
aof_base_size = info.get('aof_base_size', 0)
if aof_size > aof_base_size * 3:
print(f"⚠️ 警告:AOF 文件过大 {aof_size / 1024 / 1024:.2f}MB")
# 检查 fork 耗时
stats = r.info('stats')
fork_time = stats.get('latest_fork_usec', 0)
if fork_time > 1000000: # 超过1秒
print(f"⚠️ 警告:fork 耗时过长 {fork_time / 1000:.2f}ms")
# 检查 AOF 延迟
if info.get('aof_delayed_fsync', 0) > 0:
print(f"⚠️ 警告:AOF fsync 延迟 {info['aof_delayed_fsync']} 次")
if __name__ == '__main__':
check_persistence_health() if info.get('aof_delayed_fsync', 0) > 0:`
`# Redis 持久化监控脚本
import redis
import time
def check_persistence_health():
r = redis.Redis(host='localhost', port=6379)
# 检查 AOF 文件大小
info = r.info('persistence')
aof_size = info.get('aof_current_size', 0)
aof_base_size = info.get('aof_base_size', 0)
if aof_size > aof_base_size * 3:
print(f"⚠️ 警告:AOF 文件过大 {aof_size / 1024 / 1024:.2f}MB")
# 检查 fork 耗时
stats = r.info('stats')
fork_time = stats.get('latest_fork_usec', 0)
if fork_time > 1000000: # 超过1秒
print(f"⚠️ 警告:fork 耗时过长 {fork_time / 1000:.2f}ms")
# 检查 AOF 延迟
if info.get('aof_delayed_fsync', 0) > 0:
print(f"⚠️ 警告:AOF fsync 延迟 {info['aof_delayed_fsync']} 次")
if __name__ == '__main__':
check_persistence_health() print(f"⚠️ 警告:AOF fsync 延迟 {info['aof_delayed_fsync']} 次")`
`# Redis 持久化监控脚本
import redis
import time
def check_persistence_health():
r = redis.Redis(host='localhost', port=6379)
# 检查 AOF 文件大小
info = r.info('persistence')
aof_size = info.get('aof_current_size', 0)
aof_base_size = info.get('aof_base_size', 0)
if aof_size > aof_base_size * 3:
print(f"⚠️ 警告:AOF 文件过大 {aof_size / 1024 / 1024:.2f}MB")
# 检查 fork 耗时
stats = r.info('stats')
fork_time = stats.get('latest_fork_usec', 0)
if fork_time > 1000000: # 超过1秒
print(f"⚠️ 警告:fork 耗时过长 {fork_time / 1000:.2f}ms")
# 检查 AOF 延迟
if info.get('aof_delayed_fsync', 0) > 0:
print(f"⚠️ 警告:AOF fsync 延迟 {info['aof_delayed_fsync']} 次")
if __name__ == '__main__':
check_persistence_health()`
`# Redis 持久化监控脚本
import redis
import time
def check_persistence_health():
r = redis.Redis(host='localhost', port=6379)
# 检查 AOF 文件大小
info = r.info('persistence')
aof_size = info.get('aof_current_size', 0)
aof_base_size = info.get('aof_base_size', 0)
if aof_size > aof_base_size * 3:
print(f"⚠️ 警告:AOF 文件过大 {aof_size / 1024 / 1024:.2f}MB")
# 检查 fork 耗时
stats = r.info('stats')
fork_time = stats.get('latest_fork_usec', 0)
if fork_time > 1000000: # 超过1秒
print(f"⚠️ 警告:fork 耗时过长 {fork_time / 1000:.2f}ms")
# 检查 AOF 延迟
if info.get('aof_delayed_fsync', 0) > 0:
print(f"⚠️ 警告:AOF fsync 延迟 {info['aof_delayed_fsync']} 次")
if __name__ == '__main__':
check_persistence_health()if __name__ == '__main__':`
`# Redis 持久化监控脚本
import redis
import time
def check_persistence_health():
r = redis.Redis(host='localhost', port=6379)
# 检查 AOF 文件大小
info = r.info('persistence')
aof_size = info.get('aof_current_size', 0)
aof_base_size = info.get('aof_base_size', 0)
if aof_size > aof_base_size * 3:
print(f"⚠️ 警告:AOF 文件过大 {aof_size / 1024 / 1024:.2f}MB")
# 检查 fork 耗时
stats = r.info('stats')
fork_time = stats.get('latest_fork_usec', 0)
if fork_time > 1000000: # 超过1秒
print(f"⚠️ 警告:fork 耗时过长 {fork_time / 1000:.2f}ms")
# 检查 AOF 延迟
if info.get('aof_delayed_fsync', 0) > 0:
print(f"⚠️ 警告:AOF fsync 延迟 {info['aof_delayed_fsync']} 次")
if __name__ == '__main__':
check_persistence_health() check_persistence_health()
3. 灾难恢复演练
#!/bin/bash
# Redis 灾难恢复演练
echo "=== Redis 灾难恢复演练 ==="
# 1. 停止 Redis
systemctl stop redis
# 2. 备份当前数据
cp /var/lib/redis/dump.rdb /var/lib/redis/dump.rdb.backup
cp /var/lib/redis/appendonly.aof /var/lib/redis/appendonly.aof.backup
# 3. 模拟数据丢失
rm /var/lib/redis/dump.rdb
rm /var/lib/redis/appendonly.aof
# 4. 从备份恢复
cp /data/redis/backups/dump_20260331_120000.rdb.gz /tmp/
gunzip /tmp/dump_20260331_120000.rdb.gz
cp /tmp/dump_20260331_120000.rdb /var/lib/redis/dump.rdb
# 5. 启动 Redis
systemctl start redis
# 6. 验证数据
redis-cli PING
redis-cli DBSIZE
echo "=== 演练完成 ==="#!/bin/bash
#!/bin/bash
# Redis 灾难恢复演练
echo "=== Redis 灾难恢复演练 ==="
# 1. 停止 Redis
systemctl stop redis
# 2. 备份当前数据
cp /var/lib/redis/dump.rdb /var/lib/redis/dump.rdb.backup
cp /var/lib/redis/appendonly.aof /var/lib/redis/appendonly.aof.backup
# 3. 模拟数据丢失
rm /var/lib/redis/dump.rdb
rm /var/lib/redis/appendonly.aof
# 4. 从备份恢复
cp /data/redis/backups/dump_20260331_120000.rdb.gz /tmp/
gunzip /tmp/dump_20260331_120000.rdb.gz
cp /tmp/dump_20260331_120000.rdb /var/lib/redis/dump.rdb
# 5. 启动 Redis
systemctl start redis
# 6. 验证数据
redis-cli PING
redis-cli DBSIZE
echo "=== 演练完成 ==="# Redis 灾难恢复演练
#!/bin/bash
# Redis 灾难恢复演练
echo "=== Redis 灾难恢复演练 ==="
# 1. 停止 Redis
systemctl stop redis
# 2. 备份当前数据
cp /var/lib/redis/dump.rdb /var/lib/redis/dump.rdb.backup
cp /var/lib/redis/appendonly.aof /var/lib/redis/appendonly.aof.backup
# 3. 模拟数据丢失
rm /var/lib/redis/dump.rdb
rm /var/lib/redis/appendonly.aof
# 4. 从备份恢复
cp /data/redis/backups/dump_20260331_120000.rdb.gz /tmp/
gunzip /tmp/dump_20260331_120000.rdb.gz
cp /tmp/dump_20260331_120000.rdb /var/lib/redis/dump.rdb
# 5. 启动 Redis
systemctl start redis
# 6. 验证数据
redis-cli PING
redis-cli DBSIZE
echo "=== 演练完成 ==="
#!/bin/bash
# Redis 灾难恢复演练
echo "=== Redis 灾难恢复演练 ==="
# 1. 停止 Redis
systemctl stop redis
# 2. 备份当前数据
cp /var/lib/redis/dump.rdb /var/lib/redis/dump.rdb.backup
cp /var/lib/redis/appendonly.aof /var/lib/redis/appendonly.aof.backup
# 3. 模拟数据丢失
rm /var/lib/redis/dump.rdb
rm /var/lib/redis/appendonly.aof
# 4. 从备份恢复
cp /data/redis/backups/dump_20260331_120000.rdb.gz /tmp/
gunzip /tmp/dump_20260331_120000.rdb.gz
cp /tmp/dump_20260331_120000.rdb /var/lib/redis/dump.rdb
# 5. 启动 Redis
systemctl start redis
# 6. 验证数据
redis-cli PING
redis-cli DBSIZE
echo "=== 演练完成 ==="echo "=== Redis 灾难恢复演练 ==="
#!/bin/bash
# Redis 灾难恢复演练
echo "=== Redis 灾难恢复演练 ==="
# 1. 停止 Redis
systemctl stop redis
# 2. 备份当前数据
cp /var/lib/redis/dump.rdb /var/lib/redis/dump.rdb.backup
cp /var/lib/redis/appendonly.aof /var/lib/redis/appendonly.aof.backup
# 3. 模拟数据丢失
rm /var/lib/redis/dump.rdb
rm /var/lib/redis/appendonly.aof
# 4. 从备份恢复
cp /data/redis/backups/dump_20260331_120000.rdb.gz /tmp/
gunzip /tmp/dump_20260331_120000.rdb.gz
cp /tmp/dump_20260331_120000.rdb /var/lib/redis/dump.rdb
# 5. 启动 Redis
systemctl start redis
# 6. 验证数据
redis-cli PING
redis-cli DBSIZE
echo "=== 演练完成 ==="
#!/bin/bash
# Redis 灾难恢复演练
echo "=== Redis 灾难恢复演练 ==="
# 1. 停止 Redis
systemctl stop redis
# 2. 备份当前数据
cp /var/lib/redis/dump.rdb /var/lib/redis/dump.rdb.backup
cp /var/lib/redis/appendonly.aof /var/lib/redis/appendonly.aof.backup
# 3. 模拟数据丢失
rm /var/lib/redis/dump.rdb
rm /var/lib/redis/appendonly.aof
# 4. 从备份恢复
cp /data/redis/backups/dump_20260331_120000.rdb.gz /tmp/
gunzip /tmp/dump_20260331_120000.rdb.gz
cp /tmp/dump_20260331_120000.rdb /var/lib/redis/dump.rdb
# 5. 启动 Redis
systemctl start redis
# 6. 验证数据
redis-cli PING
redis-cli DBSIZE
echo "=== 演练完成 ==="# 1. 停止 Redis
#!/bin/bash
# Redis 灾难恢复演练
echo "=== Redis 灾难恢复演练 ==="
# 1. 停止 Redis
systemctl stop redis
# 2. 备份当前数据
cp /var/lib/redis/dump.rdb /var/lib/redis/dump.rdb.backup
cp /var/lib/redis/appendonly.aof /var/lib/redis/appendonly.aof.backup
# 3. 模拟数据丢失
rm /var/lib/redis/dump.rdb
rm /var/lib/redis/appendonly.aof
# 4. 从备份恢复
cp /data/redis/backups/dump_20260331_120000.rdb.gz /tmp/
gunzip /tmp/dump_20260331_120000.rdb.gz
cp /tmp/dump_20260331_120000.rdb /var/lib/redis/dump.rdb
# 5. 启动 Redis
systemctl start redis
# 6. 验证数据
redis-cli PING
redis-cli DBSIZE
echo "=== 演练完成 ==="systemctl stop redis
#!/bin/bash
# Redis 灾难恢复演练
echo "=== Redis 灾难恢复演练 ==="
# 1. 停止 Redis
systemctl stop redis
# 2. 备份当前数据
cp /var/lib/redis/dump.rdb /var/lib/redis/dump.rdb.backup
cp /var/lib/redis/appendonly.aof /var/lib/redis/appendonly.aof.backup
# 3. 模拟数据丢失
rm /var/lib/redis/dump.rdb
rm /var/lib/redis/appendonly.aof
# 4. 从备份恢复
cp /data/redis/backups/dump_20260331_120000.rdb.gz /tmp/
gunzip /tmp/dump_20260331_120000.rdb.gz
cp /tmp/dump_20260331_120000.rdb /var/lib/redis/dump.rdb
# 5. 启动 Redis
systemctl start redis
# 6. 验证数据
redis-cli PING
redis-cli DBSIZE
echo "=== 演练完成 ==="
#!/bin/bash
# Redis 灾难恢复演练
echo "=== Redis 灾难恢复演练 ==="
# 1. 停止 Redis
systemctl stop redis
# 2. 备份当前数据
cp /var/lib/redis/dump.rdb /var/lib/redis/dump.rdb.backup
cp /var/lib/redis/appendonly.aof /var/lib/redis/appendonly.aof.backup
# 3. 模拟数据丢失
rm /var/lib/redis/dump.rdb
rm /var/lib/redis/appendonly.aof
# 4. 从备份恢复
cp /data/redis/backups/dump_20260331_120000.rdb.gz /tmp/
gunzip /tmp/dump_20260331_120000.rdb.gz
cp /tmp/dump_20260331_120000.rdb /var/lib/redis/dump.rdb
# 5. 启动 Redis
systemctl start redis
# 6. 验证数据
redis-cli PING
redis-cli DBSIZE
echo "=== 演练完成 ==="# 2. 备份当前数据
#!/bin/bash
# Redis 灾难恢复演练
echo "=== Redis 灾难恢复演练 ==="
# 1. 停止 Redis
systemctl stop redis
# 2. 备份当前数据
cp /var/lib/redis/dump.rdb /var/lib/redis/dump.rdb.backup
cp /var/lib/redis/appendonly.aof /var/lib/redis/appendonly.aof.backup
# 3. 模拟数据丢失
rm /var/lib/redis/dump.rdb
rm /var/lib/redis/appendonly.aof
# 4. 从备份恢复
cp /data/redis/backups/dump_20260331_120000.rdb.gz /tmp/
gunzip /tmp/dump_20260331_120000.rdb.gz
cp /tmp/dump_20260331_120000.rdb /var/lib/redis/dump.rdb
# 5. 启动 Redis
systemctl start redis
# 6. 验证数据
redis-cli PING
redis-cli DBSIZE
echo "=== 演练完成 ==="cp /var/lib/redis/dump.rdb /var/lib/redis/dump.rdb.backup
#!/bin/bash
# Redis 灾难恢复演练
echo "=== Redis 灾难恢复演练 ==="
# 1. 停止 Redis
systemctl stop redis
# 2. 备份当前数据
cp /var/lib/redis/dump.rdb /var/lib/redis/dump.rdb.backup
cp /var/lib/redis/appendonly.aof /var/lib/redis/appendonly.aof.backup
# 3. 模拟数据丢失
rm /var/lib/redis/dump.rdb
rm /var/lib/redis/appendonly.aof
# 4. 从备份恢复
cp /data/redis/backups/dump_20260331_120000.rdb.gz /tmp/
gunzip /tmp/dump_20260331_120000.rdb.gz
cp /tmp/dump_20260331_120000.rdb /var/lib/redis/dump.rdb
# 5. 启动 Redis
systemctl start redis
# 6. 验证数据
redis-cli PING
redis-cli DBSIZE
echo "=== 演练完成 ==="cp /var/lib/redis/appendonly.aof /var/lib/redis/appendonly.aof.backup
#!/bin/bash
# Redis 灾难恢复演练
echo "=== Redis 灾难恢复演练 ==="
# 1. 停止 Redis
systemctl stop redis
# 2. 备份当前数据
cp /var/lib/redis/dump.rdb /var/lib/redis/dump.rdb.backup
cp /var/lib/redis/appendonly.aof /var/lib/redis/appendonly.aof.backup
# 3. 模拟数据丢失
rm /var/lib/redis/dump.rdb
rm /var/lib/redis/appendonly.aof
# 4. 从备份恢复
cp /data/redis/backups/dump_20260331_120000.rdb.gz /tmp/
gunzip /tmp/dump_20260331_120000.rdb.gz
cp /tmp/dump_20260331_120000.rdb /var/lib/redis/dump.rdb
# 5. 启动 Redis
systemctl start redis
# 6. 验证数据
redis-cli PING
redis-cli DBSIZE
echo "=== 演练完成 ==="
#!/bin/bash
# Redis 灾难恢复演练
echo "=== Redis 灾难恢复演练 ==="
# 1. 停止 Redis
systemctl stop redis
# 2. 备份当前数据
cp /var/lib/redis/dump.rdb /var/lib/redis/dump.rdb.backup
cp /var/lib/redis/appendonly.aof /var/lib/redis/appendonly.aof.backup
# 3. 模拟数据丢失
rm /var/lib/redis/dump.rdb
rm /var/lib/redis/appendonly.aof
# 4. 从备份恢复
cp /data/redis/backups/dump_20260331_120000.rdb.gz /tmp/
gunzip /tmp/dump_20260331_120000.rdb.gz
cp /tmp/dump_20260331_120000.rdb /var/lib/redis/dump.rdb
# 5. 启动 Redis
systemctl start redis
# 6. 验证数据
redis-cli PING
redis-cli DBSIZE
echo "=== 演练完成 ==="# 3. 模拟数据丢失
#!/bin/bash
# Redis 灾难恢复演练
echo "=== Redis 灾难恢复演练 ==="
# 1. 停止 Redis
systemctl stop redis
# 2. 备份当前数据
cp /var/lib/redis/dump.rdb /var/lib/redis/dump.rdb.backup
cp /var/lib/redis/appendonly.aof /var/lib/redis/appendonly.aof.backup
# 3. 模拟数据丢失
rm /var/lib/redis/dump.rdb
rm /var/lib/redis/appendonly.aof
# 4. 从备份恢复
cp /data/redis/backups/dump_20260331_120000.rdb.gz /tmp/
gunzip /tmp/dump_20260331_120000.rdb.gz
cp /tmp/dump_20260331_120000.rdb /var/lib/redis/dump.rdb
# 5. 启动 Redis
systemctl start redis
# 6. 验证数据
redis-cli PING
redis-cli DBSIZE
echo "=== 演练完成 ==="rm /var/lib/redis/dump.rdb
#!/bin/bash
# Redis 灾难恢复演练
echo "=== Redis 灾难恢复演练 ==="
# 1. 停止 Redis
systemctl stop redis
# 2. 备份当前数据
cp /var/lib/redis/dump.rdb /var/lib/redis/dump.rdb.backup
cp /var/lib/redis/appendonly.aof /var/lib/redis/appendonly.aof.backup
# 3. 模拟数据丢失
rm /var/lib/redis/dump.rdb
rm /var/lib/redis/appendonly.aof
# 4. 从备份恢复
cp /data/redis/backups/dump_20260331_120000.rdb.gz /tmp/
gunzip /tmp/dump_20260331_120000.rdb.gz
cp /tmp/dump_20260331_120000.rdb /var/lib/redis/dump.rdb
# 5. 启动 Redis
systemctl start redis
# 6. 验证数据
redis-cli PING
redis-cli DBSIZE
echo "=== 演练完成 ==="rm /var/lib/redis/appendonly.aof
#!/bin/bash
# Redis 灾难恢复演练
echo "=== Redis 灾难恢复演练 ==="
# 1. 停止 Redis
systemctl stop redis
# 2. 备份当前数据
cp /var/lib/redis/dump.rdb /var/lib/redis/dump.rdb.backup
cp /var/lib/redis/appendonly.aof /var/lib/redis/appendonly.aof.backup
# 3. 模拟数据丢失
rm /var/lib/redis/dump.rdb
rm /var/lib/redis/appendonly.aof
# 4. 从备份恢复
cp /data/redis/backups/dump_20260331_120000.rdb.gz /tmp/
gunzip /tmp/dump_20260331_120000.rdb.gz
cp /tmp/dump_20260331_120000.rdb /var/lib/redis/dump.rdb
# 5. 启动 Redis
systemctl start redis
# 6. 验证数据
redis-cli PING
redis-cli DBSIZE
echo "=== 演练完成 ==="
#!/bin/bash
# Redis 灾难恢复演练
echo "=== Redis 灾难恢复演练 ==="
# 1. 停止 Redis
systemctl stop redis
# 2. 备份当前数据
cp /var/lib/redis/dump.rdb /var/lib/redis/dump.rdb.backup
cp /var/lib/redis/appendonly.aof /var/lib/redis/appendonly.aof.backup
# 3. 模拟数据丢失
rm /var/lib/redis/dump.rdb
rm /var/lib/redis/appendonly.aof
# 4. 从备份恢复
cp /data/redis/backups/dump_20260331_120000.rdb.gz /tmp/
gunzip /tmp/dump_20260331_120000.rdb.gz
cp /tmp/dump_20260331_120000.rdb /var/lib/redis/dump.rdb
# 5. 启动 Redis
systemctl start redis
# 6. 验证数据
redis-cli PING
redis-cli DBSIZE
echo "=== 演练完成 ==="# 4. 从备份恢复
#!/bin/bash
# Redis 灾难恢复演练
echo "=== Redis 灾难恢复演练 ==="
# 1. 停止 Redis
systemctl stop redis
# 2. 备份当前数据
cp /var/lib/redis/dump.rdb /var/lib/redis/dump.rdb.backup
cp /var/lib/redis/appendonly.aof /var/lib/redis/appendonly.aof.backup
# 3. 模拟数据丢失
rm /var/lib/redis/dump.rdb
rm /var/lib/redis/appendonly.aof
# 4. 从备份恢复
cp /data/redis/backups/dump_20260331_120000.rdb.gz /tmp/
gunzip /tmp/dump_20260331_120000.rdb.gz
cp /tmp/dump_20260331_120000.rdb /var/lib/redis/dump.rdb
# 5. 启动 Redis
systemctl start redis
# 6. 验证数据
redis-cli PING
redis-cli DBSIZE
echo "=== 演练完成 ==="cp /data/redis/backups/dump_20260331_120000.rdb.gz /tmp/
#!/bin/bash
# Redis 灾难恢复演练
echo "=== Redis 灾难恢复演练 ==="
# 1. 停止 Redis
systemctl stop redis
# 2. 备份当前数据
cp /var/lib/redis/dump.rdb /var/lib/redis/dump.rdb.backup
cp /var/lib/redis/appendonly.aof /var/lib/redis/appendonly.aof.backup
# 3. 模拟数据丢失
rm /var/lib/redis/dump.rdb
rm /var/lib/redis/appendonly.aof
# 4. 从备份恢复
cp /data/redis/backups/dump_20260331_120000.rdb.gz /tmp/
gunzip /tmp/dump_20260331_120000.rdb.gz
cp /tmp/dump_20260331_120000.rdb /var/lib/redis/dump.rdb
# 5. 启动 Redis
systemctl start redis
# 6. 验证数据
redis-cli PING
redis-cli DBSIZE
echo "=== 演练完成 ==="gunzip /tmp/dump_20260331_120000.rdb.gz
#!/bin/bash
# Redis 灾难恢复演练
echo "=== Redis 灾难恢复演练 ==="
# 1. 停止 Redis
systemctl stop redis
# 2. 备份当前数据
cp /var/lib/redis/dump.rdb /var/lib/redis/dump.rdb.backup
cp /var/lib/redis/appendonly.aof /var/lib/redis/appendonly.aof.backup
# 3. 模拟数据丢失
rm /var/lib/redis/dump.rdb
rm /var/lib/redis/appendonly.aof
# 4. 从备份恢复
cp /data/redis/backups/dump_20260331_120000.rdb.gz /tmp/
gunzip /tmp/dump_20260331_120000.rdb.gz
cp /tmp/dump_20260331_120000.rdb /var/lib/redis/dump.rdb
# 5. 启动 Redis
systemctl start redis
# 6. 验证数据
redis-cli PING
redis-cli DBSIZE
echo "=== 演练完成 ==="cp /tmp/dump_20260331_120000.rdb /var/lib/redis/dump.rdb
#!/bin/bash
# Redis 灾难恢复演练
echo "=== Redis 灾难恢复演练 ==="
# 1. 停止 Redis
systemctl stop redis
# 2. 备份当前数据
cp /var/lib/redis/dump.rdb /var/lib/redis/dump.rdb.backup
cp /var/lib/redis/appendonly.aof /var/lib/redis/appendonly.aof.backup
# 3. 模拟数据丢失
rm /var/lib/redis/dump.rdb
rm /var/lib/redis/appendonly.aof
# 4. 从备份恢复
cp /data/redis/backups/dump_20260331_120000.rdb.gz /tmp/
gunzip /tmp/dump_20260331_120000.rdb.gz
cp /tmp/dump_20260331_120000.rdb /var/lib/redis/dump.rdb
# 5. 启动 Redis
systemctl start redis
# 6. 验证数据
redis-cli PING
redis-cli DBSIZE
echo "=== 演练完成 ==="
#!/bin/bash
# Redis 灾难恢复演练
echo "=== Redis 灾难恢复演练 ==="
# 1. 停止 Redis
systemctl stop redis
# 2. 备份当前数据
cp /var/lib/redis/dump.rdb /var/lib/redis/dump.rdb.backup
cp /var/lib/redis/appendonly.aof /var/lib/redis/appendonly.aof.backup
# 3. 模拟数据丢失
rm /var/lib/redis/dump.rdb
rm /var/lib/redis/appendonly.aof
# 4. 从备份恢复
cp /data/redis/backups/dump_20260331_120000.rdb.gz /tmp/
gunzip /tmp/dump_20260331_120000.rdb.gz
cp /tmp/dump_20260331_120000.rdb /var/lib/redis/dump.rdb
# 5. 启动 Redis
systemctl start redis
# 6. 验证数据
redis-cli PING
redis-cli DBSIZE
echo "=== 演练完成 ==="# 5. 启动 Redis
#!/bin/bash
# Redis 灾难恢复演练
echo "=== Redis 灾难恢复演练 ==="
# 1. 停止 Redis
systemctl stop redis
# 2. 备份当前数据
cp /var/lib/redis/dump.rdb /var/lib/redis/dump.rdb.backup
cp /var/lib/redis/appendonly.aof /var/lib/redis/appendonly.aof.backup
# 3. 模拟数据丢失
rm /var/lib/redis/dump.rdb
rm /var/lib/redis/appendonly.aof
# 4. 从备份恢复
cp /data/redis/backups/dump_20260331_120000.rdb.gz /tmp/
gunzip /tmp/dump_20260331_120000.rdb.gz
cp /tmp/dump_20260331_120000.rdb /var/lib/redis/dump.rdb
# 5. 启动 Redis
systemctl start redis
# 6. 验证数据
redis-cli PING
redis-cli DBSIZE
echo "=== 演练完成 ==="systemctl start redis
#!/bin/bash
# Redis 灾难恢复演练
echo "=== Redis 灾难恢复演练 ==="
# 1. 停止 Redis
systemctl stop redis
# 2. 备份当前数据
cp /var/lib/redis/dump.rdb /var/lib/redis/dump.rdb.backup
cp /var/lib/redis/appendonly.aof /var/lib/redis/appendonly.aof.backup
# 3. 模拟数据丢失
rm /var/lib/redis/dump.rdb
rm /var/lib/redis/appendonly.aof
# 4. 从备份恢复
cp /data/redis/backups/dump_20260331_120000.rdb.gz /tmp/
gunzip /tmp/dump_20260331_120000.rdb.gz
cp /tmp/dump_20260331_120000.rdb /var/lib/redis/dump.rdb
# 5. 启动 Redis
systemctl start redis
# 6. 验证数据
redis-cli PING
redis-cli DBSIZE
echo "=== 演练完成 ==="
#!/bin/bash
# Redis 灾难恢复演练
echo "=== Redis 灾难恢复演练 ==="
# 1. 停止 Redis
systemctl stop redis
# 2. 备份当前数据
cp /var/lib/redis/dump.rdb /var/lib/redis/dump.rdb.backup
cp /var/lib/redis/appendonly.aof /var/lib/redis/appendonly.aof.backup
# 3. 模拟数据丢失
rm /var/lib/redis/dump.rdb
rm /var/lib/redis/appendonly.aof
# 4. 从备份恢复
cp /data/redis/backups/dump_20260331_120000.rdb.gz /tmp/
gunzip /tmp/dump_20260331_120000.rdb.gz
cp /tmp/dump_20260331_120000.rdb /var/lib/redis/dump.rdb
# 5. 启动 Redis
systemctl start redis
# 6. 验证数据
redis-cli PING
redis-cli DBSIZE
echo "=== 演练完成 ==="# 6. 验证数据
#!/bin/bash
# Redis 灾难恢复演练
echo "=== Redis 灾难恢复演练 ==="
# 1. 停止 Redis
systemctl stop redis
# 2. 备份当前数据
cp /var/lib/redis/dump.rdb /var/lib/redis/dump.rdb.backup
cp /var/lib/redis/appendonly.aof /var/lib/redis/appendonly.aof.backup
# 3. 模拟数据丢失
rm /var/lib/redis/dump.rdb
rm /var/lib/redis/appendonly.aof
# 4. 从备份恢复
cp /data/redis/backups/dump_20260331_120000.rdb.gz /tmp/
gunzip /tmp/dump_20260331_120000.rdb.gz
cp /tmp/dump_20260331_120000.rdb /var/lib/redis/dump.rdb
# 5. 启动 Redis
systemctl start redis
# 6. 验证数据
redis-cli PING
redis-cli DBSIZE
echo "=== 演练完成 ==="redis-cli PING
#!/bin/bash
# Redis 灾难恢复演练
echo "=== Redis 灾难恢复演练 ==="
# 1. 停止 Redis
systemctl stop redis
# 2. 备份当前数据
cp /var/lib/redis/dump.rdb /var/lib/redis/dump.rdb.backup
cp /var/lib/redis/appendonly.aof /var/lib/redis/appendonly.aof.backup
# 3. 模拟数据丢失
rm /var/lib/redis/dump.rdb
rm /var/lib/redis/appendonly.aof
# 4. 从备份恢复
cp /data/redis/backups/dump_20260331_120000.rdb.gz /tmp/
gunzip /tmp/dump_20260331_120000.rdb.gz
cp /tmp/dump_20260331_120000.rdb /var/lib/redis/dump.rdb
# 5. 启动 Redis
systemctl start redis
# 6. 验证数据
redis-cli PING
redis-cli DBSIZE
echo "=== 演练完成 ==="redis-cli DBSIZE
#!/bin/bash
# Redis 灾难恢复演练
echo "=== Redis 灾难恢复演练 ==="
# 1. 停止 Redis
systemctl stop redis
# 2. 备份当前数据
cp /var/lib/redis/dump.rdb /var/lib/redis/dump.rdb.backup
cp /var/lib/redis/appendonly.aof /var/lib/redis/appendonly.aof.backup
# 3. 模拟数据丢失
rm /var/lib/redis/dump.rdb
rm /var/lib/redis/appendonly.aof
# 4. 从备份恢复
cp /data/redis/backups/dump_20260331_120000.rdb.gz /tmp/
gunzip /tmp/dump_20260331_120000.rdb.gz
cp /tmp/dump_20260331_120000.rdb /var/lib/redis/dump.rdb
# 5. 启动 Redis
systemctl start redis
# 6. 验证数据
redis-cli PING
redis-cli DBSIZE
echo "=== 演练完成 ==="
#!/bin/bash
# Redis 灾难恢复演练
echo "=== Redis 灾难恢复演练 ==="
# 1. 停止 Redis
systemctl stop redis
# 2. 备份当前数据
cp /var/lib/redis/dump.rdb /var/lib/redis/dump.rdb.backup
cp /var/lib/redis/appendonly.aof /var/lib/redis/appendonly.aof.backup
# 3. 模拟数据丢失
rm /var/lib/redis/dump.rdb
rm /var/lib/redis/appendonly.aof
# 4. 从备份恢复
cp /data/redis/backups/dump_20260331_120000.rdb.gz /tmp/
gunzip /tmp/dump_20260331_120000.rdb.gz
cp /tmp/dump_20260331_120000.rdb /var/lib/redis/dump.rdb
# 5. 启动 Redis
systemctl start redis
# 6. 验证数据
redis-cli PING
redis-cli DBSIZE
echo "=== 演练完成 ==="echo "=== 演练完成 ==="
总结
Redis 的 RDB 和 AOF 持久化机制各有优劣:
- RDB:适合备份和灾难恢复,性能高但数据完整性较低
- AOF:数据完整性好,但文件大且恢复慢
- 混合持久化:结合两者优势,是生产环境的最佳选择
关键要点:
- 理解业务需求,选择合适的持久化策略
- 监控 fork 耗时和磁盘 I/O,避免性能问题
- 定期进行备份和恢复演练,确保数据安全
- 根据实际负载动态调整配置参数
参考资料
作者简介:本文作者专注于 Redis 源码研究和性能优化,深入分析过 Redis 7.x 的多个核心模块。
版权声明:本文为原创内容,转载请注明出处。
附录:完整测试代码
A. 性能测试脚本
python
#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}")#!/usr/bin/env python3`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}")# Redis 持久化性能测试`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}")`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}")import redis`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}")import time`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}")import statistics`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}")`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}")def test_write_performance(mode='rdb', iterations=100000):`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") """测试不同持久化模式下的写入性能"""`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") `
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") r = redis.Redis(host='localhost', port=6379, decode_responses=True)`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") `
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") # 清空数据`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") r.flushall()`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") `
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") # 配置持久化模式`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") if mode == 'rdb':`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") r.config_set('appendonly', 'no')`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") r.config_set('save', '300 10')`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") elif mode == 'aof-everysec':`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") r.config_set('appendonly', 'yes')`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") r.config_set('appendfsync', 'everysec')`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") elif mode == 'aof-always':`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") r.config_set('appendonly', 'yes')`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") r.config_set('appendfsync', 'always')`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") `
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") # 等待配置生效`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") time.sleep(2)`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") `
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") # 开始测试`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") latencies = []`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") start_time = time.time()`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") `
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") for i in range(iterations):`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") key = f"test_key_{i}"`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") value = f"test_value_{i}" * 10 # 约100字节的值`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") `
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") # 记录延迟`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") op_start = time.time()`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") r.set(key, value)`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") op_end = time.time()`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") `
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") latencies.append((op_end - op_start) * 1000) # 转换为毫秒`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") `
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") end_time = time.time()`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") `
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") # 计算统计信息`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") total_time = end_time - start_time`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") qps = iterations / total_time`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") avg_latency = statistics.mean(latencies)`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") p99_latency = statistics.quantiles(latencies, n=100)[98] # P99`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") `
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") print(f"\n=== {mode.upper()} 模式测试结果 ===")`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") print(f"总操作数: {iterations:,}")`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") print(f"总耗时: {total_time:.2f} 秒")`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") print(f"QPS: {qps:,.0f}")`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") print(f"平均延迟: {avg_latency:.3f} ms")`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") print(f"P99 延迟: {p99_latency:.3f} ms")`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") `
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") return {`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") 'mode': mode,`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") 'qps': qps,`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") 'avg_latency': avg_latency,`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") 'p99_latency': p99_latency`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") }`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}")`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}")`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}")if __name__ == '__main__':`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") modes = ['rdb', 'aof-everysec', 'aof-always']`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") results = []`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") `
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") for mode in modes:`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") result = test_write_performance(mode, iterations=10000)`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") results.append(result)`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") time.sleep(5) # 等待系统稳定`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") `
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") # 输出对比表格`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") print("\n=== 性能对比 ===")`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") print("-" * 65)`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") for r in results:`
`#!/usr/bin/env python3
# Redis 持久化性能测试
import redis
import time
import statistics
def test_write_performance(mode='rdb', iterations=100000):
"""测试不同持久化模式下的写入性能"""
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
# 清空数据
r.flushall()
# 配置持久化模式
if mode == 'rdb':
r.config_set('appendonly', 'no')
r.config_set('save', '300 10')
elif mode == 'aof-everysec':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'everysec')
elif mode == 'aof-always':
r.config_set('appendonly', 'yes')
r.config_set('appendfsync', 'always')
# 等待配置生效
time.sleep(2)
# 开始测试
latencies = []
start_time = time.time()
for i in range(iterations):
key = f"test_key_{i}"
value = f"test_value_{i}" * 10 # 约100字节的值
# 记录延迟
op_start = time.time()
r.set(key, value)
op_end = time.time()
latencies.append((op_end - op_start) * 1000) # 转换为毫秒
end_time = time.time()
# 计算统计信息
total_time = end_time - start_time
qps = iterations / total_time
avg_latency = statistics.mean(latencies)
p99_latency = statistics.quantiles(latencies, n=100)[98] # P99
print(f"\n=== {mode.upper()} 模式测试结果 ===")
print(f"总操作数: {iterations:,}")
print(f"总耗时: {total_time:.2f} 秒")
print(f"QPS: {qps:,.0f}")
print(f"平均延迟: {avg_latency:.3f} ms")
print(f"P99 延迟: {p99_latency:.3f} ms")
return {
'mode': mode,
'qps': qps,
'avg_latency': avg_latency,
'p99_latency': p99_latency
}
if __name__ == '__main__':
modes = ['rdb', 'aof-everysec', 'aof-always']
results = []
for mode in modes:
result = test_write_performance(mode, iterations=10000)
results.append(result)
time.sleep(5) # 等待系统稳定
# 输出对比表格
print("\n=== 性能对比 ===")
print(f"{'模式':<20} {'QPS':>15} {'平均延迟(ms)':>15} {'P99延迟(ms)':>15}")
print("-" * 65)
for r in results:
print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}") print(f"{r['mode']:<20} {r['qps']:>15,.0f} {r['avg_latency']:>15.3f} {r['p99_latency']:>15.3f}")
B. RDB 文件解析工具
python
#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse()#!/usr/bin/env python3`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse()# RDB 文件解析工具(简化版)`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse()`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse()import struct`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse()import sys`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse()`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse()class RDBParser:`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() """RDB 文件解析器"""`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() `
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() # 操作码定义`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() OPcodes = {`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() 0xFF: 'EOF',`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() 0xFA: 'Auxiliary field',`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() 0xFB: 'ResizeDB',`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() 0xFC: 'Expire time ms',`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() 0xFD: 'Expire time s',`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() 0xFE: 'Select DB',`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() 0xFF: 'EOF'`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() }`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() `
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() def __init__(self, filename):`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() self.filename = filename`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() self.file = None`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() `
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() def __enter__(self):`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() self.file = open(self.filename, 'rb')`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() return self`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() `
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() def __exit__(self, *args):`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() if self.file:`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() self.file.close()`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() `
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() def parse_header(self):`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() """解析 RDB 文件头"""`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() magic = self.file.read(5)`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() if magic != b'REDIS':`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() raise ValueError("Invalid RDB file")`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() `
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() print(f"RDB Version: {version}")`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() return version`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() `
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() def parse_length(self):`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() """解析长度编码"""`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() first_byte = self.file.read(1)`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() if not first_byte:`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() return None`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() `
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() first_byte = ord(first_byte)`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() length_type = first_byte >> 6`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() `
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() if length_type == 0:`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() # 6 bit length`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() return first_byte & 0x3F`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() elif length_type == 1:`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() # 14 bit length`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() second_byte = ord(self.file.read(1))`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() return ((first_byte & 0x3F) << 8) | second_byte`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() elif length_type == 2:`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() # 32 bit length`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() data = self.file.read(4)`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() return struct.unpack('>I', data)[0]`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() else:`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() # 下一个字节编码长度`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() return struct.unpack('>Q', self.file.read(8))[0]`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() `
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() def parse_string(self):`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() """解析字符串"""`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() length = self.parse_length()`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() if length is None:`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() return None`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() `
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() value = self.file.read(length)`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() return value.decode('utf-8')`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() `
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() def parse(self):`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() """解析 RDB 文件"""`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() try:`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() version = self.parse_header()`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() print(f"✓ 成功解析 RDB 文件头 (版本 {version})")`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() `
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() while True:`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() opcode = self.file.read(1)`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() if not opcode:`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() break`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() `
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() opcode = ord(opcode)`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() `
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() if opcode == 0xFF:`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() print("✓ 到达文件末尾")`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() break`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() elif opcode == 0xFE:`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() # Select DB`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() db_num = self.parse_length()`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() print(f"✓ 选择数据库: {db_num}")`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() elif opcode == 0xFB:`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() # Resize DB`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() db_size = self.parse_length()`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() expires_size = self.parse_length()`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() elif opcode in [0xFC, 0xFD]:`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() # Expire time`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() if opcode == 0xFC:`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() expire_time = struct.unpack('>q', self.file.read(8))[0]`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() else:`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() expire_time = struct.unpack('>i', self.file.read(4))[0]`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() print(f"✓ 过期时间: {expire_time}")`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() else:`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() # Key-Value 对`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() print(f"✓ 读取键值对...")`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() key = self.parse_string()`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() print(f" Key: {key}")`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() `
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() except Exception as e:`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() print(f"✗ 解析错误: {e}")`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() import traceback`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() traceback.print_exc()`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse()`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse()`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse()if __name__ == '__main__':`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() if len(sys.argv) < 2:`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() print("Usage: python rdb_parser.py <rdb_file>")`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() sys.exit(1)`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() `
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() parser = RDBParser(sys.argv[1])`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() with parser:`
`#!/usr/bin/env python3
# RDB 文件解析工具(简化版)
import struct
import sys
class RDBParser:
"""RDB 文件解析器"""
# 操作码定义
OPcodes = {
0xFF: 'EOF',
0xFA: 'Auxiliary field',
0xFB: 'ResizeDB',
0xFC: 'Expire time ms',
0xFD: 'Expire time s',
0xFE: 'Select DB',
0xFF: 'EOF'
}
def __init__(self, filename):
self.filename = filename
self.file = None
def __enter__(self):
self.file = open(self.filename, 'rb')
return self
def __exit__(self, *args):
if self.file:
self.file.close()
def parse_header(self):
"""解析 RDB 文件头"""
magic = self.file.read(5)
if magic != b'REDIS':
raise ValueError("Invalid RDB file")
version = struct.unpack('>I', b'\x00' + self.file.read(4))[0]
print(f"RDB Version: {version}")
return version
def parse_length(self):
"""解析长度编码"""
first_byte = self.file.read(1)
if not first_byte:
return None
first_byte = ord(first_byte)
length_type = first_byte >> 6
if length_type == 0:
# 6 bit length
return first_byte & 0x3F
elif length_type == 1:
# 14 bit length
second_byte = ord(self.file.read(1))
return ((first_byte & 0x3F) << 8) | second_byte
elif length_type == 2:
# 32 bit length
data = self.file.read(4)
return struct.unpack('>I', data)[0]
else:
# 下一个字节编码长度
return struct.unpack('>Q', self.file.read(8))[0]
def parse_string(self):
"""解析字符串"""
length = self.parse_length()
if length is None:
return None
value = self.file.read(length)
return value.decode('utf-8')
def parse(self):
"""解析 RDB 文件"""
try:
version = self.parse_header()
print(f"✓ 成功解析 RDB 文件头 (版本 {version})")
while True:
opcode = self.file.read(1)
if not opcode:
break
opcode = ord(opcode)
if opcode == 0xFF:
print("✓ 到达文件末尾")
break
elif opcode == 0xFE:
# Select DB
db_num = self.parse_length()
print(f"✓ 选择数据库: {db_num}")
elif opcode == 0xFB:
# Resize DB
db_size = self.parse_length()
expires_size = self.parse_length()
print(f"✓ 数据库大小: {db_size}, 过期键数: {expires_size}")
elif opcode in [0xFC, 0xFD]:
# Expire time
if opcode == 0xFC:
expire_time = struct.unpack('>q', self.file.read(8))[0]
else:
expire_time = struct.unpack('>i', self.file.read(4))[0]
print(f"✓ 过期时间: {expire_time}")
else:
# Key-Value 对
print(f"✓ 读取键值对...")
key = self.parse_string()
print(f" Key: {key}")
except Exception as e:
print(f"✗ 解析错误: {e}")
import traceback
traceback.print_exc()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python rdb_parser.py <rdb_file>")
sys.exit(1)
parser = RDBParser(sys.argv[1])
with parser:
parser.parse() parser.parse()
C. AOF 重写演示脚本
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVE#!/bin/bash
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVE# AOF 重写演示
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVE
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVEecho "=== AOF 重写演示 ==="
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVE
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVE# 启动 Redis(AOF 模式)
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVEredis-server --daemonize yes \
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVE --appendonly yes \
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVE --appendfsync everysec \
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVE --auto-aof-rewrite-percentage 100 \
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVE --auto-aof-rewrite-min-size 1mb
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVE
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVEsleep 2
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVE
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVE# 写入数据
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVEecho "1. 写入 100,000 个键..."
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVEredis-cli --pipe <<EOF
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVESET key1 value1
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVESET key2 value2
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVE...
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVEEOF
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVE
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVE# 或者使用 Python 批量写入
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVEpython3 <<'PY'
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVEimport redis
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVEr = redis.Redis(host='localhost', port=6379)
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVEpipe = r.pipeline()
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVEfor i in range(100000):
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVE pipe.set(f"key_{i}", f"value_{i}" * 100)
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVEpipe.execute()
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVEPY
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVE
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVE# 查看当前 AOF 文件大小
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVEAOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVEecho "2. 当前 AOF 文件大小: $AOF_SIZE"
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVE
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVE# 手动触发重写
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVEecho "3. 触发 AOF 重写..."
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVEredis-cli BGREWRITEAOF
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVE
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVE# 等待重写完成
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVEwhile [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVE sleep 1
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVEdone
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVE
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVE# 查看重写后的文件大小
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVENEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVEecho "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVE
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVE# 查看 AOF 统计信息
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVEredis-cli INFO persistence | grep aof
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVE
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVEecho "=== 演示完成 ==="
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVE
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVE# 清理
#!/bin/bash
# AOF 重写演示
echo "=== AOF 重写演示 ==="
# 启动 Redis(AOF 模式)
redis-server --daemonize yes \
--appendonly yes \
--appendfsync everysec \
--auto-aof-rewrite-percentage 100 \
--auto-aof-rewrite-min-size 1mb
sleep 2
# 写入数据
echo "1. 写入 100,000 个键..."
redis-cli --pipe <<EOF
SET key1 value1
SET key2 value2
...
EOF
# 或者使用 Python 批量写入
python3 <<'PY'
import redis
r = redis.Redis(host='localhost', port=6379)
pipe = r.pipeline()
for i in range(100000):
pipe.set(f"key_{i}", f"value_{i}" * 100)
pipe.execute()
PY
# 查看当前 AOF 文件大小
AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "2. 当前 AOF 文件大小: $AOF_SIZE"
# 手动触发重写
echo "3. 触发 AOF 重写..."
redis-cli BGREWRITEAOF
# 等待重写完成
while [ "$(redis-cli LASTSAVE)" == "$(redis-cli LASTSAVE)" ]; do
sleep 1
done
# 查看重写后的文件大小
NEW_AOF_SIZE=$(du -h /var/lib/redis/appendonly.aof | cut -f1)
echo "4. 重写后 AOF 文件大小: $NEW_AOF_SIZE"
# 查看 AOF 统计信息
redis-cli INFO persistence | grep aof
echo "=== 演示完成 ==="
# 清理
redis-cli SHUTDOWN NOSAVEredis-cli SHUTDOWN NOSAVE
面试常见问题
Q1: Redis 7.x 的 AOF 相比之前版本有哪些改进?
答案要点:
- 多文件 AOF 机制:引入了 BASE、INCR、HISTORY 三种文件类型
- AOF Manifest:统一管理所有 AOF 文件,提高可靠性
- 无盘复制:支持直接通过套接字传输 RDB,减少磁盘 I/O
- 多线程 AOF:Redis 7.0 支持多线程 fsync,提高性能
Q2: RDB 文件中的 Copy-on-Write 是如何工作的?
答案要点:
-
fork() 之后:父子进程共享相同的物理内存页
-
页表复制:fork 复制页表而非实际内存页
-
写时复制:只有当某个进程修改内存页时,才真正复制该页
-
影响:fork 后,修改的数据越多,COW 开销越大
#// Linux 中的 fork 实现(简化)
pid_t fork(void) {
// 1. 复制进程描述符
// 2. 复制页表(而非内存页)
copy_page_tables(parent, child);// 3. 标记所有内存页为只读 mark_pages_read_only(); return child_pid;}
// 写时复制处理
void page_fault_handler(void *addr) {
if (page_is_shared(addr)) {
// 真正复制该内存页
void *new_page = allocate_page();
copy_page(new_page, addr);
update_page_table(new_page);
}
}// Linux 中的 fork 实现(简化)
#// Linux 中的 fork 实现(简化)
pid_t fork(void) {
// 1. 复制进程描述符
// 2. 复制页表(而非内存页)
copy_page_tables(parent, child);// 3. 标记所有内存页为只读 mark_pages_read_only(); return child_pid;}
// 写时复制处理
void page_fault_handler(void *addr) {
if (page_is_shared(addr)) {
// 真正复制该内存页
void *new_page = allocate_page();
copy_page(new_page, addr);
update_page_table(new_page);
}
}pid_t fork(void) {
#// Linux 中的 fork 实现(简化)
pid_t fork(void) {
// 1. 复制进程描述符
// 2. 复制页表(而非内存页)
copy_page_tables(parent, child);// 3. 标记所有内存页为只读 mark_pages_read_only(); return child_pid;}
// 写时复制处理
void page_fault_handler(void *addr) {
if (page_is_shared(addr)) {
// 真正复制该内存页
void *new_page = allocate_page();
copy_page(new_page, addr);
update_page_table(new_page);
}
} // 1. 复制进程描述符
#// Linux 中的 fork 实现(简化)
pid_t fork(void) {
// 1. 复制进程描述符
// 2. 复制页表(而非内存页)
copy_page_tables(parent, child);// 3. 标记所有内存页为只读 mark_pages_read_only(); return child_pid;}
// 写时复制处理
void page_fault_handler(void *addr) {
if (page_is_shared(addr)) {
// 真正复制该内存页
void *new_page = allocate_page();
copy_page(new_page, addr);
update_page_table(new_page);
}
} // 2. 复制页表(而非内存页)
#// Linux 中的 fork 实现(简化)
pid_t fork(void) {
// 1. 复制进程描述符
// 2. 复制页表(而非内存页)
copy_page_tables(parent, child);// 3. 标记所有内存页为只读 mark_pages_read_only(); return child_pid;}
// 写时复制处理
void page_fault_handler(void *addr) {
if (page_is_shared(addr)) {
// 真正复制该内存页
void *new_page = allocate_page();
copy_page(new_page, addr);
update_page_table(new_page);
}
} copy_page_tables(parent, child);
#// Linux 中的 fork 实现(简化)
pid_t fork(void) {
// 1. 复制进程描述符
// 2. 复制页表(而非内存页)
copy_page_tables(parent, child);// 3. 标记所有内存页为只读 mark_pages_read_only(); return child_pid;}
// 写时复制处理
void page_fault_handler(void *addr) {
if (page_is_shared(addr)) {
// 真正复制该内存页
void *new_page = allocate_page();
copy_page(new_page, addr);
update_page_table(new_page);
}
}
#// Linux 中的 fork 实现(简化)
pid_t fork(void) {
// 1. 复制进程描述符
// 2. 复制页表(而非内存页)
copy_page_tables(parent, child);// 3. 标记所有内存页为只读 mark_pages_read_only(); return child_pid;}
// 写时复制处理
void page_fault_handler(void *addr) {
if (page_is_shared(addr)) {
// 真正复制该内存页
void *new_page = allocate_page();
copy_page(new_page, addr);
update_page_table(new_page);
}
} // 3. 标记所有内存页为只读
#// Linux 中的 fork 实现(简化)
pid_t fork(void) {
// 1. 复制进程描述符
// 2. 复制页表(而非内存页)
copy_page_tables(parent, child);// 3. 标记所有内存页为只读 mark_pages_read_only(); return child_pid;}
// 写时复制处理
void page_fault_handler(void *addr) {
if (page_is_shared(addr)) {
// 真正复制该内存页
void *new_page = allocate_page();
copy_page(new_page, addr);
update_page_table(new_page);
}
} mark_pages_read_only();
#// Linux 中的 fork 实现(简化)
pid_t fork(void) {
// 1. 复制进程描述符
// 2. 复制页表(而非内存页)
copy_page_tables(parent, child);// 3. 标记所有内存页为只读 mark_pages_read_only(); return child_pid;}
// 写时复制处理
void page_fault_handler(void *addr) {
if (page_is_shared(addr)) {
// 真正复制该内存页
void *new_page = allocate_page();
copy_page(new_page, addr);
update_page_table(new_page);
}
}
#// Linux 中的 fork 实现(简化)
pid_t fork(void) {
// 1. 复制进程描述符
// 2. 复制页表(而非内存页)
copy_page_tables(parent, child);// 3. 标记所有内存页为只读 mark_pages_read_only(); return child_pid;}
// 写时复制处理
void page_fault_handler(void *addr) {
if (page_is_shared(addr)) {
// 真正复制该内存页
void *new_page = allocate_page();
copy_page(new_page, addr);
update_page_table(new_page);
}
} return child_pid;
#// Linux 中的 fork 实现(简化)
pid_t fork(void) {
// 1. 复制进程描述符
// 2. 复制页表(而非内存页)
copy_page_tables(parent, child);// 3. 标记所有内存页为只读 mark_pages_read_only(); return child_pid;}
// 写时复制处理
void page_fault_handler(void *addr) {
if (page_is_shared(addr)) {
// 真正复制该内存页
void *new_page = allocate_page();
copy_page(new_page, addr);
update_page_table(new_page);
}
}}
#// Linux 中的 fork 实现(简化)
pid_t fork(void) {
// 1. 复制进程描述符
// 2. 复制页表(而非内存页)
copy_page_tables(parent, child);// 3. 标记所有内存页为只读 mark_pages_read_only(); return child_pid;}
// 写时复制处理
void page_fault_handler(void *addr) {
if (page_is_shared(addr)) {
// 真正复制该内存页
void *new_page = allocate_page();
copy_page(new_page, addr);
update_page_table(new_page);
}
}
#// Linux 中的 fork 实现(简化)
pid_t fork(void) {
// 1. 复制进程描述符
// 2. 复制页表(而非内存页)
copy_page_tables(parent, child);// 3. 标记所有内存页为只读 mark_pages_read_only(); return child_pid;}
// 写时复制处理
void page_fault_handler(void *addr) {
if (page_is_shared(addr)) {
// 真正复制该内存页
void *new_page = allocate_page();
copy_page(new_page, addr);
update_page_table(new_page);
}
}// 写时复制处理
#// Linux 中的 fork 实现(简化)
pid_t fork(void) {
// 1. 复制进程描述符
// 2. 复制页表(而非内存页)
copy_page_tables(parent, child);// 3. 标记所有内存页为只读 mark_pages_read_only(); return child_pid;}
// 写时复制处理
void page_fault_handler(void *addr) {
if (page_is_shared(addr)) {
// 真正复制该内存页
void *new_page = allocate_page();
copy_page(new_page, addr);
update_page_table(new_page);
}
}void page_fault_handler(void *addr) {
#// Linux 中的 fork 实现(简化)
pid_t fork(void) {
// 1. 复制进程描述符
// 2. 复制页表(而非内存页)
copy_page_tables(parent, child);// 3. 标记所有内存页为只读 mark_pages_read_only(); return child_pid;}
// 写时复制处理
void page_fault_handler(void *addr) {
if (page_is_shared(addr)) {
// 真正复制该内存页
void *new_page = allocate_page();
copy_page(new_page, addr);
update_page_table(new_page);
}
} if (page_is_shared(addr)) {
#// Linux 中的 fork 实现(简化)
pid_t fork(void) {
// 1. 复制进程描述符
// 2. 复制页表(而非内存页)
copy_page_tables(parent, child);// 3. 标记所有内存页为只读 mark_pages_read_only(); return child_pid;}
// 写时复制处理
void page_fault_handler(void *addr) {
if (page_is_shared(addr)) {
// 真正复制该内存页
void *new_page = allocate_page();
copy_page(new_page, addr);
update_page_table(new_page);
}
} // 真正复制该内存页
#// Linux 中的 fork 实现(简化)
pid_t fork(void) {
// 1. 复制进程描述符
// 2. 复制页表(而非内存页)
copy_page_tables(parent, child);// 3. 标记所有内存页为只读 mark_pages_read_only(); return child_pid;}
// 写时复制处理
void page_fault_handler(void *addr) {
if (page_is_shared(addr)) {
// 真正复制该内存页
void *new_page = allocate_page();
copy_page(new_page, addr);
update_page_table(new_page);
}
} void *new_page = allocate_page();
#// Linux 中的 fork 实现(简化)
pid_t fork(void) {
// 1. 复制进程描述符
// 2. 复制页表(而非内存页)
copy_page_tables(parent, child);// 3. 标记所有内存页为只读 mark_pages_read_only(); return child_pid;}
// 写时复制处理
void page_fault_handler(void *addr) {
if (page_is_shared(addr)) {
// 真正复制该内存页
void *new_page = allocate_page();
copy_page(new_page, addr);
update_page_table(new_page);
}
} copy_page(new_page, addr);
#// Linux 中的 fork 实现(简化)
pid_t fork(void) {
// 1. 复制进程描述符
// 2. 复制页表(而非内存页)
copy_page_tables(parent, child);// 3. 标记所有内存页为只读 mark_pages_read_only(); return child_pid;}
// 写时复制处理
void page_fault_handler(void *addr) {
if (page_is_shared(addr)) {
// 真正复制该内存页
void *new_page = allocate_page();
copy_page(new_page, addr);
update_page_table(new_page);
}
} update_page_table(new_page);
#// Linux 中的 fork 实现(简化)
pid_t fork(void) {
// 1. 复制进程描述符
// 2. 复制页表(而非内存页)
copy_page_tables(parent, child);// 3. 标记所有内存页为只读 mark_pages_read_only(); return child_pid;}
// 写时复制处理
void page_fault_handler(void *addr) {
if (page_is_shared(addr)) {
// 真正复制该内存页
void *new_page = allocate_page();
copy_page(new_page, addr);
update_page_table(new_page);
}
} }
#// Linux 中的 fork 实现(简化)
pid_t fork(void) {
// 1. 复制进程描述符
// 2. 复制页表(而非内存页)
copy_page_tables(parent, child);// 3. 标记所有内存页为只读 mark_pages_read_only(); return child_pid;}
// 写时复制处理
void page_fault_handler(void *addr) {
if (page_is_shared(addr)) {
// 真正复制该内存页
void *new_page = allocate_page();
copy_page(new_page, addr);
update_page_table(new_page);
}
}}
Q3: 为什么 AOF 重写时不阻塞主进程?
答案要点:
-
fork 子进程:重写在子进程中进行
-
主进程继续服务:同时继续处理客户端请求
-
重写缓冲区 :主进程将重写期间的写命令追加到
aof_rewrite_buf_blocks -
合并机制:重写完成后,主进程将增量命令合并到新文件
#// Redis 7.2 AOF 重写缓冲区实现
struct aofrwblock {
int used, free;
char buf[AOF_RW_TBL_BLOCK_SZ];
};// 主进程在重写期间追加命令
void aofRewriteBufferAppend(unsigned char *s, unsigned long len) {
listNode *ln = listLast(server.aof_rewrite_buf_blocks);
aofrwblock *block = ln ? ln->value : NULL;// 追加到缓冲区 while (len) { if (block->free == 0) { // 分配新块 block = zmalloc(sizeof(*block)); block->free = AOF_RW_TBL_BLOCK_SZ; block->used = 0; listAddNodeTail(server.aof_rewrite_buf_blocks, block); } // 复制数据 tocopy = min(block->free, len); memcpy(block->buf + block->used, s, tocopy); block->used += tocopy; block->free -= tocopy; len -= tocopy; s += tocopy; }}// Redis 7.2 AOF 重写缓冲区实现
#// Redis 7.2 AOF 重写缓冲区实现
struct aofrwblock {
int used, free;
char buf[AOF_RW_TBL_BLOCK_SZ];
};// 主进程在重写期间追加命令
void aofRewriteBufferAppend(unsigned char *s, unsigned long len) {
listNode *ln = listLast(server.aof_rewrite_buf_blocks);
aofrwblock *block = ln ? ln->value : NULL;// 追加到缓冲区 while (len) { if (block->free == 0) { // 分配新块 block = zmalloc(sizeof(*block)); block->free = AOF_RW_TBL_BLOCK_SZ; block->used = 0; listAddNodeTail(server.aof_rewrite_buf_blocks, block); } // 复制数据 tocopy = min(block->free, len); memcpy(block->buf + block->used, s, tocopy); block->used += tocopy; block->free -= tocopy; len -= tocopy; s += tocopy; }}struct aofrwblock {
#// Redis 7.2 AOF 重写缓冲区实现
struct aofrwblock {
int used, free;
char buf[AOF_RW_TBL_BLOCK_SZ];
};// 主进程在重写期间追加命令
void aofRewriteBufferAppend(unsigned char *s, unsigned long len) {
listNode *ln = listLast(server.aof_rewrite_buf_blocks);
aofrwblock *block = ln ? ln->value : NULL;// 追加到缓冲区 while (len) { if (block->free == 0) { // 分配新块 block = zmalloc(sizeof(*block)); block->free = AOF_RW_TBL_BLOCK_SZ; block->used = 0; listAddNodeTail(server.aof_rewrite_buf_blocks, block); } // 复制数据 tocopy = min(block->free, len); memcpy(block->buf + block->used, s, tocopy); block->used += tocopy; block->free -= tocopy; len -= tocopy; s += tocopy; }} int used, free;
#// Redis 7.2 AOF 重写缓冲区实现
struct aofrwblock {
int used, free;
char buf[AOF_RW_TBL_BLOCK_SZ];
};// 主进程在重写期间追加命令
void aofRewriteBufferAppend(unsigned char *s, unsigned long len) {
listNode *ln = listLast(server.aof_rewrite_buf_blocks);
aofrwblock *block = ln ? ln->value : NULL;// 追加到缓冲区 while (len) { if (block->free == 0) { // 分配新块 block = zmalloc(sizeof(*block)); block->free = AOF_RW_TBL_BLOCK_SZ; block->used = 0; listAddNodeTail(server.aof_rewrite_buf_blocks, block); } // 复制数据 tocopy = min(block->free, len); memcpy(block->buf + block->used, s, tocopy); block->used += tocopy; block->free -= tocopy; len -= tocopy; s += tocopy; }} char buf[AOF_RW_TBL_BLOCK_SZ];
#// Redis 7.2 AOF 重写缓冲区实现
struct aofrwblock {
int used, free;
char buf[AOF_RW_TBL_BLOCK_SZ];
};// 主进程在重写期间追加命令
void aofRewriteBufferAppend(unsigned char *s, unsigned long len) {
listNode *ln = listLast(server.aof_rewrite_buf_blocks);
aofrwblock *block = ln ? ln->value : NULL;// 追加到缓冲区 while (len) { if (block->free == 0) { // 分配新块 block = zmalloc(sizeof(*block)); block->free = AOF_RW_TBL_BLOCK_SZ; block->used = 0; listAddNodeTail(server.aof_rewrite_buf_blocks, block); } // 复制数据 tocopy = min(block->free, len); memcpy(block->buf + block->used, s, tocopy); block->used += tocopy; block->free -= tocopy; len -= tocopy; s += tocopy; }}};
#// Redis 7.2 AOF 重写缓冲区实现
struct aofrwblock {
int used, free;
char buf[AOF_RW_TBL_BLOCK_SZ];
};// 主进程在重写期间追加命令
void aofRewriteBufferAppend(unsigned char *s, unsigned long len) {
listNode *ln = listLast(server.aof_rewrite_buf_blocks);
aofrwblock *block = ln ? ln->value : NULL;// 追加到缓冲区 while (len) { if (block->free == 0) { // 分配新块 block = zmalloc(sizeof(*block)); block->free = AOF_RW_TBL_BLOCK_SZ; block->used = 0; listAddNodeTail(server.aof_rewrite_buf_blocks, block); } // 复制数据 tocopy = min(block->free, len); memcpy(block->buf + block->used, s, tocopy); block->used += tocopy; block->free -= tocopy; len -= tocopy; s += tocopy; }}
#// Redis 7.2 AOF 重写缓冲区实现
struct aofrwblock {
int used, free;
char buf[AOF_RW_TBL_BLOCK_SZ];
};// 主进程在重写期间追加命令
void aofRewriteBufferAppend(unsigned char *s, unsigned long len) {
listNode *ln = listLast(server.aof_rewrite_buf_blocks);
aofrwblock *block = ln ? ln->value : NULL;// 追加到缓冲区 while (len) { if (block->free == 0) { // 分配新块 block = zmalloc(sizeof(*block)); block->free = AOF_RW_TBL_BLOCK_SZ; block->used = 0; listAddNodeTail(server.aof_rewrite_buf_blocks, block); } // 复制数据 tocopy = min(block->free, len); memcpy(block->buf + block->used, s, tocopy); block->used += tocopy; block->free -= tocopy; len -= tocopy; s += tocopy; }}// 主进程在重写期间追加命令
#// Redis 7.2 AOF 重写缓冲区实现
struct aofrwblock {
int used, free;
char buf[AOF_RW_TBL_BLOCK_SZ];
};// 主进程在重写期间追加命令
void aofRewriteBufferAppend(unsigned char *s, unsigned long len) {
listNode *ln = listLast(server.aof_rewrite_buf_blocks);
aofrwblock *block = ln ? ln->value : NULL;// 追加到缓冲区 while (len) { if (block->free == 0) { // 分配新块 block = zmalloc(sizeof(*block)); block->free = AOF_RW_TBL_BLOCK_SZ; block->used = 0; listAddNodeTail(server.aof_rewrite_buf_blocks, block); } // 复制数据 tocopy = min(block->free, len); memcpy(block->buf + block->used, s, tocopy); block->used += tocopy; block->free -= tocopy; len -= tocopy; s += tocopy; }}void aofRewriteBufferAppend(unsigned char *s, unsigned long len) {
#// Redis 7.2 AOF 重写缓冲区实现
struct aofrwblock {
int used, free;
char buf[AOF_RW_TBL_BLOCK_SZ];
};// 主进程在重写期间追加命令
void aofRewriteBufferAppend(unsigned char *s, unsigned long len) {
listNode *ln = listLast(server.aof_rewrite_buf_blocks);
aofrwblock *block = ln ? ln->value : NULL;// 追加到缓冲区 while (len) { if (block->free == 0) { // 分配新块 block = zmalloc(sizeof(*block)); block->free = AOF_RW_TBL_BLOCK_SZ; block->used = 0; listAddNodeTail(server.aof_rewrite_buf_blocks, block); } // 复制数据 tocopy = min(block->free, len); memcpy(block->buf + block->used, s, tocopy); block->used += tocopy; block->free -= tocopy; len -= tocopy; s += tocopy; }} listNode *ln = listLast(server.aof_rewrite_buf_blocks);
#// Redis 7.2 AOF 重写缓冲区实现
struct aofrwblock {
int used, free;
char buf[AOF_RW_TBL_BLOCK_SZ];
};// 主进程在重写期间追加命令
void aofRewriteBufferAppend(unsigned char *s, unsigned long len) {
listNode *ln = listLast(server.aof_rewrite_buf_blocks);
aofrwblock *block = ln ? ln->value : NULL;// 追加到缓冲区 while (len) { if (block->free == 0) { // 分配新块 block = zmalloc(sizeof(*block)); block->free = AOF_RW_TBL_BLOCK_SZ; block->used = 0; listAddNodeTail(server.aof_rewrite_buf_blocks, block); } // 复制数据 tocopy = min(block->free, len); memcpy(block->buf + block->used, s, tocopy); block->used += tocopy; block->free -= tocopy; len -= tocopy; s += tocopy; }} aofrwblock *block = ln ? ln->value : NULL;
#// Redis 7.2 AOF 重写缓冲区实现
struct aofrwblock {
int used, free;
char buf[AOF_RW_TBL_BLOCK_SZ];
};// 主进程在重写期间追加命令
void aofRewriteBufferAppend(unsigned char *s, unsigned long len) {
listNode *ln = listLast(server.aof_rewrite_buf_blocks);
aofrwblock *block = ln ? ln->value : NULL;// 追加到缓冲区 while (len) { if (block->free == 0) { // 分配新块 block = zmalloc(sizeof(*block)); block->free = AOF_RW_TBL_BLOCK_SZ; block->used = 0; listAddNodeTail(server.aof_rewrite_buf_blocks, block); } // 复制数据 tocopy = min(block->free, len); memcpy(block->buf + block->used, s, tocopy); block->used += tocopy; block->free -= tocopy; len -= tocopy; s += tocopy; }}
#// Redis 7.2 AOF 重写缓冲区实现
struct aofrwblock {
int used, free;
char buf[AOF_RW_TBL_BLOCK_SZ];
};// 主进程在重写期间追加命令
void aofRewriteBufferAppend(unsigned char *s, unsigned long len) {
listNode *ln = listLast(server.aof_rewrite_buf_blocks);
aofrwblock *block = ln ? ln->value : NULL;// 追加到缓冲区 while (len) { if (block->free == 0) { // 分配新块 block = zmalloc(sizeof(*block)); block->free = AOF_RW_TBL_BLOCK_SZ; block->used = 0; listAddNodeTail(server.aof_rewrite_buf_blocks, block); } // 复制数据 tocopy = min(block->free, len); memcpy(block->buf + block->used, s, tocopy); block->used += tocopy; block->free -= tocopy; len -= tocopy; s += tocopy; }} // 追加到缓冲区
#// Redis 7.2 AOF 重写缓冲区实现
struct aofrwblock {
int used, free;
char buf[AOF_RW_TBL_BLOCK_SZ];
};// 主进程在重写期间追加命令
void aofRewriteBufferAppend(unsigned char *s, unsigned long len) {
listNode *ln = listLast(server.aof_rewrite_buf_blocks);
aofrwblock *block = ln ? ln->value : NULL;// 追加到缓冲区 while (len) { if (block->free == 0) { // 分配新块 block = zmalloc(sizeof(*block)); block->free = AOF_RW_TBL_BLOCK_SZ; block->used = 0; listAddNodeTail(server.aof_rewrite_buf_blocks, block); } // 复制数据 tocopy = min(block->free, len); memcpy(block->buf + block->used, s, tocopy); block->used += tocopy; block->free -= tocopy; len -= tocopy; s += tocopy; }} while (len) {
#// Redis 7.2 AOF 重写缓冲区实现
struct aofrwblock {
int used, free;
char buf[AOF_RW_TBL_BLOCK_SZ];
};// 主进程在重写期间追加命令
void aofRewriteBufferAppend(unsigned char *s, unsigned long len) {
listNode *ln = listLast(server.aof_rewrite_buf_blocks);
aofrwblock *block = ln ? ln->value : NULL;// 追加到缓冲区 while (len) { if (block->free == 0) { // 分配新块 block = zmalloc(sizeof(*block)); block->free = AOF_RW_TBL_BLOCK_SZ; block->used = 0; listAddNodeTail(server.aof_rewrite_buf_blocks, block); } // 复制数据 tocopy = min(block->free, len); memcpy(block->buf + block->used, s, tocopy); block->used += tocopy; block->free -= tocopy; len -= tocopy; s += tocopy; }} if (block->free == 0) {
#// Redis 7.2 AOF 重写缓冲区实现
struct aofrwblock {
int used, free;
char buf[AOF_RW_TBL_BLOCK_SZ];
};// 主进程在重写期间追加命令
void aofRewriteBufferAppend(unsigned char *s, unsigned long len) {
listNode *ln = listLast(server.aof_rewrite_buf_blocks);
aofrwblock *block = ln ? ln->value : NULL;// 追加到缓冲区 while (len) { if (block->free == 0) { // 分配新块 block = zmalloc(sizeof(*block)); block->free = AOF_RW_TBL_BLOCK_SZ; block->used = 0; listAddNodeTail(server.aof_rewrite_buf_blocks, block); } // 复制数据 tocopy = min(block->free, len); memcpy(block->buf + block->used, s, tocopy); block->used += tocopy; block->free -= tocopy; len -= tocopy; s += tocopy; }} // 分配新块
#// Redis 7.2 AOF 重写缓冲区实现
struct aofrwblock {
int used, free;
char buf[AOF_RW_TBL_BLOCK_SZ];
};// 主进程在重写期间追加命令
void aofRewriteBufferAppend(unsigned char *s, unsigned long len) {
listNode *ln = listLast(server.aof_rewrite_buf_blocks);
aofrwblock *block = ln ? ln->value : NULL;// 追加到缓冲区 while (len) { if (block->free == 0) { // 分配新块 block = zmalloc(sizeof(*block)); block->free = AOF_RW_TBL_BLOCK_SZ; block->used = 0; listAddNodeTail(server.aof_rewrite_buf_blocks, block); } // 复制数据 tocopy = min(block->free, len); memcpy(block->buf + block->used, s, tocopy); block->used += tocopy; block->free -= tocopy; len -= tocopy; s += tocopy; }} block = zmalloc(sizeof(*block));
#// Redis 7.2 AOF 重写缓冲区实现
struct aofrwblock {
int used, free;
char buf[AOF_RW_TBL_BLOCK_SZ];
};// 主进程在重写期间追加命令
void aofRewriteBufferAppend(unsigned char *s, unsigned long len) {
listNode *ln = listLast(server.aof_rewrite_buf_blocks);
aofrwblock *block = ln ? ln->value : NULL;// 追加到缓冲区 while (len) { if (block->free == 0) { // 分配新块 block = zmalloc(sizeof(*block)); block->free = AOF_RW_TBL_BLOCK_SZ; block->used = 0; listAddNodeTail(server.aof_rewrite_buf_blocks, block); } // 复制数据 tocopy = min(block->free, len); memcpy(block->buf + block->used, s, tocopy); block->used += tocopy; block->free -= tocopy; len -= tocopy; s += tocopy; }} block->free = AOF_RW_TBL_BLOCK_SZ;
#// Redis 7.2 AOF 重写缓冲区实现
struct aofrwblock {
int used, free;
char buf[AOF_RW_TBL_BLOCK_SZ];
};// 主进程在重写期间追加命令
void aofRewriteBufferAppend(unsigned char *s, unsigned long len) {
listNode *ln = listLast(server.aof_rewrite_buf_blocks);
aofrwblock *block = ln ? ln->value : NULL;// 追加到缓冲区 while (len) { if (block->free == 0) { // 分配新块 block = zmalloc(sizeof(*block)); block->free = AOF_RW_TBL_BLOCK_SZ; block->used = 0; listAddNodeTail(server.aof_rewrite_buf_blocks, block); } // 复制数据 tocopy = min(block->free, len); memcpy(block->buf + block->used, s, tocopy); block->used += tocopy; block->free -= tocopy; len -= tocopy; s += tocopy; }} block->used = 0;
#// Redis 7.2 AOF 重写缓冲区实现
struct aofrwblock {
int used, free;
char buf[AOF_RW_TBL_BLOCK_SZ];
};// 主进程在重写期间追加命令
void aofRewriteBufferAppend(unsigned char *s, unsigned long len) {
listNode *ln = listLast(server.aof_rewrite_buf_blocks);
aofrwblock *block = ln ? ln->value : NULL;// 追加到缓冲区 while (len) { if (block->free == 0) { // 分配新块 block = zmalloc(sizeof(*block)); block->free = AOF_RW_TBL_BLOCK_SZ; block->used = 0; listAddNodeTail(server.aof_rewrite_buf_blocks, block); } // 复制数据 tocopy = min(block->free, len); memcpy(block->buf + block->used, s, tocopy); block->used += tocopy; block->free -= tocopy; len -= tocopy; s += tocopy; }} listAddNodeTail(server.aof_rewrite_buf_blocks, block);
#// Redis 7.2 AOF 重写缓冲区实现
struct aofrwblock {
int used, free;
char buf[AOF_RW_TBL_BLOCK_SZ];
};// 主进程在重写期间追加命令
void aofRewriteBufferAppend(unsigned char *s, unsigned long len) {
listNode *ln = listLast(server.aof_rewrite_buf_blocks);
aofrwblock *block = ln ? ln->value : NULL;// 追加到缓冲区 while (len) { if (block->free == 0) { // 分配新块 block = zmalloc(sizeof(*block)); block->free = AOF_RW_TBL_BLOCK_SZ; block->used = 0; listAddNodeTail(server.aof_rewrite_buf_blocks, block); } // 复制数据 tocopy = min(block->free, len); memcpy(block->buf + block->used, s, tocopy); block->used += tocopy; block->free -= tocopy; len -= tocopy; s += tocopy; }} }
#// Redis 7.2 AOF 重写缓冲区实现
struct aofrwblock {
int used, free;
char buf[AOF_RW_TBL_BLOCK_SZ];
};// 主进程在重写期间追加命令
void aofRewriteBufferAppend(unsigned char *s, unsigned long len) {
listNode *ln = listLast(server.aof_rewrite_buf_blocks);
aofrwblock *block = ln ? ln->value : NULL;// 追加到缓冲区 while (len) { if (block->free == 0) { // 分配新块 block = zmalloc(sizeof(*block)); block->free = AOF_RW_TBL_BLOCK_SZ; block->used = 0; listAddNodeTail(server.aof_rewrite_buf_blocks, block); } // 复制数据 tocopy = min(block->free, len); memcpy(block->buf + block->used, s, tocopy); block->used += tocopy; block->free -= tocopy; len -= tocopy; s += tocopy; }}
#// Redis 7.2 AOF 重写缓冲区实现
struct aofrwblock {
int used, free;
char buf[AOF_RW_TBL_BLOCK_SZ];
};// 主进程在重写期间追加命令
void aofRewriteBufferAppend(unsigned char *s, unsigned long len) {
listNode *ln = listLast(server.aof_rewrite_buf_blocks);
aofrwblock *block = ln ? ln->value : NULL;// 追加到缓冲区 while (len) { if (block->free == 0) { // 分配新块 block = zmalloc(sizeof(*block)); block->free = AOF_RW_TBL_BLOCK_SZ; block->used = 0; listAddNodeTail(server.aof_rewrite_buf_blocks, block); } // 复制数据 tocopy = min(block->free, len); memcpy(block->buf + block->used, s, tocopy); block->used += tocopy; block->free -= tocopy; len -= tocopy; s += tocopy; }} // 复制数据
#// Redis 7.2 AOF 重写缓冲区实现
struct aofrwblock {
int used, free;
char buf[AOF_RW_TBL_BLOCK_SZ];
};// 主进程在重写期间追加命令
void aofRewriteBufferAppend(unsigned char *s, unsigned long len) {
listNode *ln = listLast(server.aof_rewrite_buf_blocks);
aofrwblock *block = ln ? ln->value : NULL;// 追加到缓冲区 while (len) { if (block->free == 0) { // 分配新块 block = zmalloc(sizeof(*block)); block->free = AOF_RW_TBL_BLOCK_SZ; block->used = 0; listAddNodeTail(server.aof_rewrite_buf_blocks, block); } // 复制数据 tocopy = min(block->free, len); memcpy(block->buf + block->used, s, tocopy); block->used += tocopy; block->free -= tocopy; len -= tocopy; s += tocopy; }} tocopy = min(block->free, len);
#// Redis 7.2 AOF 重写缓冲区实现
struct aofrwblock {
int used, free;
char buf[AOF_RW_TBL_BLOCK_SZ];
};// 主进程在重写期间追加命令
void aofRewriteBufferAppend(unsigned char *s, unsigned long len) {
listNode *ln = listLast(server.aof_rewrite_buf_blocks);
aofrwblock *block = ln ? ln->value : NULL;// 追加到缓冲区 while (len) { if (block->free == 0) { // 分配新块 block = zmalloc(sizeof(*block)); block->free = AOF_RW_TBL_BLOCK_SZ; block->used = 0; listAddNodeTail(server.aof_rewrite_buf_blocks, block); } // 复制数据 tocopy = min(block->free, len); memcpy(block->buf + block->used, s, tocopy); block->used += tocopy; block->free -= tocopy; len -= tocopy; s += tocopy; }} memcpy(block->buf + block->used, s, tocopy);
#// Redis 7.2 AOF 重写缓冲区实现
struct aofrwblock {
int used, free;
char buf[AOF_RW_TBL_BLOCK_SZ];
};// 主进程在重写期间追加命令
void aofRewriteBufferAppend(unsigned char *s, unsigned long len) {
listNode *ln = listLast(server.aof_rewrite_buf_blocks);
aofrwblock *block = ln ? ln->value : NULL;// 追加到缓冲区 while (len) { if (block->free == 0) { // 分配新块 block = zmalloc(sizeof(*block)); block->free = AOF_RW_TBL_BLOCK_SZ; block->used = 0; listAddNodeTail(server.aof_rewrite_buf_blocks, block); } // 复制数据 tocopy = min(block->free, len); memcpy(block->buf + block->used, s, tocopy); block->used += tocopy; block->free -= tocopy; len -= tocopy; s += tocopy; }} block->used += tocopy;
#// Redis 7.2 AOF 重写缓冲区实现
struct aofrwblock {
int used, free;
char buf[AOF_RW_TBL_BLOCK_SZ];
};// 主进程在重写期间追加命令
void aofRewriteBufferAppend(unsigned char *s, unsigned long len) {
listNode *ln = listLast(server.aof_rewrite_buf_blocks);
aofrwblock *block = ln ? ln->value : NULL;// 追加到缓冲区 while (len) { if (block->free == 0) { // 分配新块 block = zmalloc(sizeof(*block)); block->free = AOF_RW_TBL_BLOCK_SZ; block->used = 0; listAddNodeTail(server.aof_rewrite_buf_blocks, block); } // 复制数据 tocopy = min(block->free, len); memcpy(block->buf + block->used, s, tocopy); block->used += tocopy; block->free -= tocopy; len -= tocopy; s += tocopy; }} block->free -= tocopy;
#// Redis 7.2 AOF 重写缓冲区实现
struct aofrwblock {
int used, free;
char buf[AOF_RW_TBL_BLOCK_SZ];
};// 主进程在重写期间追加命令
void aofRewriteBufferAppend(unsigned char *s, unsigned long len) {
listNode *ln = listLast(server.aof_rewrite_buf_blocks);
aofrwblock *block = ln ? ln->value : NULL;// 追加到缓冲区 while (len) { if (block->free == 0) { // 分配新块 block = zmalloc(sizeof(*block)); block->free = AOF_RW_TBL_BLOCK_SZ; block->used = 0; listAddNodeTail(server.aof_rewrite_buf_blocks, block); } // 复制数据 tocopy = min(block->free, len); memcpy(block->buf + block->used, s, tocopy); block->used += tocopy; block->free -= tocopy; len -= tocopy; s += tocopy; }}
#// Redis 7.2 AOF 重写缓冲区实现
struct aofrwblock {
int used, free;
char buf[AOF_RW_TBL_BLOCK_SZ];
};// 主进程在重写期间追加命令
void aofRewriteBufferAppend(unsigned char *s, unsigned long len) {
listNode *ln = listLast(server.aof_rewrite_buf_blocks);
aofrwblock *block = ln ? ln->value : NULL;// 追加到缓冲区 while (len) { if (block->free == 0) { // 分配新块 block = zmalloc(sizeof(*block)); block->free = AOF_RW_TBL_BLOCK_SZ; block->used = 0; listAddNodeTail(server.aof_rewrite_buf_blocks, block); } // 复制数据 tocopy = min(block->free, len); memcpy(block->buf + block->used, s, tocopy); block->used += tocopy; block->free -= tocopy; len -= tocopy; s += tocopy; }} len -= tocopy;
#// Redis 7.2 AOF 重写缓冲区实现
struct aofrwblock {
int used, free;
char buf[AOF_RW_TBL_BLOCK_SZ];
};// 主进程在重写期间追加命令
void aofRewriteBufferAppend(unsigned char *s, unsigned long len) {
listNode *ln = listLast(server.aof_rewrite_buf_blocks);
aofrwblock *block = ln ? ln->value : NULL;// 追加到缓冲区 while (len) { if (block->free == 0) { // 分配新块 block = zmalloc(sizeof(*block)); block->free = AOF_RW_TBL_BLOCK_SZ; block->used = 0; listAddNodeTail(server.aof_rewrite_buf_blocks, block); } // 复制数据 tocopy = min(block->free, len); memcpy(block->buf + block->used, s, tocopy); block->used += tocopy; block->free -= tocopy; len -= tocopy; s += tocopy; }} s += tocopy;
#// Redis 7.2 AOF 重写缓冲区实现
struct aofrwblock {
int used, free;
char buf[AOF_RW_TBL_BLOCK_SZ];
};// 主进程在重写期间追加命令
void aofRewriteBufferAppend(unsigned char *s, unsigned long len) {
listNode *ln = listLast(server.aof_rewrite_buf_blocks);
aofrwblock *block = ln ? ln->value : NULL;// 追加到缓冲区 while (len) { if (block->free == 0) { // 分配新块 block = zmalloc(sizeof(*block)); block->free = AOF_RW_TBL_BLOCK_SZ; block->used = 0; listAddNodeTail(server.aof_rewrite_buf_blocks, block); } // 复制数据 tocopy = min(block->free, len); memcpy(block->buf + block->used, s, tocopy); block->used += tocopy; block->free -= tocopy; len -= tocopy; s += tocopy; }} }
#// Redis 7.2 AOF 重写缓冲区实现
struct aofrwblock {
int used, free;
char buf[AOF_RW_TBL_BLOCK_SZ];
};// 主进程在重写期间追加命令
void aofRewriteBufferAppend(unsigned char *s, unsigned long len) {
listNode *ln = listLast(server.aof_rewrite_buf_blocks);
aofrwblock *block = ln ? ln->value : NULL;// 追加到缓冲区 while (len) { if (block->free == 0) { // 分配新块 block = zmalloc(sizeof(*block)); block->free = AOF_RW_TBL_BLOCK_SZ; block->used = 0; listAddNodeTail(server.aof_rewrite_buf_blocks, block); } // 复制数据 tocopy = min(block->free, len); memcpy(block->buf + block->used, s, tocopy); block->used += tocopy; block->free -= tocopy; len -= tocopy; s += tocopy; }}}
Q4: 如何选择 RDB 和 AOF 的触发时机?
答案要点:
- RDB 触发时机 :
- 定期快照(根据
save配置) - 主从复制时
- 手动执行
BGSAVE
- 定期快照(根据
- AOF 重写触发时机 :
- 文件大小达到阈值(
auto-aof-rewrite-min-size) - 文件增长率超过阈值(
auto-aof-rewrite-percentage) - 手动执行
BGREWRITEAOF
- 文件大小达到阈值(
- 最佳实践 :
- 在业务低峰期触发
- 监控 fork 耗时,避免阻塞
- 使用监控工具告警
延伸阅读
- Redis 内存优化:了解如何通过数据结构选择和参数调优降低内存占用
- Redis 集群模式:探索 Redis Cluster 和 Sentinel 的高可用方案
- Redis 慢查询分析:掌握性能瓶颈定位和优化技巧
- Redis 8.0 新特性:关注即将发布的 Redis 8.0 带来的新功能
更新日志
- 2026-03-31: 初版发布,基于 Redis 7.2 源码分析
- 计划更新: 添加 Redis 8.0 新特性分析
感谢阅读!如果本文对你有帮助,欢迎点赞、收藏、评论。