Redis编程开发知识点详解
一、Lettuce客户端基础
1.1 RedisClient基础连接
java
// 导入必要的包
import io.lettuce.core.RedisClient;
import io.lettuce.core.RedisURI;
import io.lettuce.core.api.StatefulRedisConnection;
import io.lettuce.core.api.sync.RedisCommands;
/**
* RedisClient基础连接示例
* 演示如何使用Lettuce创建和管理Redis连接
*/
public class RedisClientDemo {
public static void main(String[] args) {
// 1. 创建RedisURI对象,配置连接信息
RedisURI redisUri = RedisURI.builder()
.withHost("localhost") // Redis服务器主机
.withPort(6379) // Redis服务器端口
.withPassword("password".toCharArray()) // 密码验证
.withDatabase(0) // 选择数据库索引
.withTimeout(java.time.Duration.ofSeconds(10)) // 连接超时
.build();
// 2. 创建RedisClient实例
RedisClient redisClient = RedisClient.create(redisUri);
// 3. 建立连接
try (StatefulRedisConnection<String, String> connection = redisClient.connect()) {
// 4. 获取同步命令API
RedisCommands<String, String> syncCommands = connection.sync();
// 5. 执行Redis操作
syncCommands.set("key", "Hello Redis"); // 设置键值对
String value = syncCommands.get("key"); // 获取值
System.out.println("获取的值: " + value);
// 6. 批量操作示例
syncCommands.mset("key1", "value1", "key2", "value2");
// 7. 设置过期时间
syncCommands.expire("key", 60); // 60秒后过期
} catch (Exception e) {
e.printStackTrace();
} finally {
// 8. 关闭客户端连接
redisClient.shutdown();
}
}
}
1.2 Redis连接池配置
java
import io.lettuce.core.RedisClient;
import io.lettuce.core.RedisURI;
import io.lettuce.core.api.StatefulRedisConnection;
import io.lettuce.core.support.ConnectionPoolSupport;
import org.apache.commons.pool2.impl.GenericObjectPool;
import org.apache.commons.pool2.impl.GenericObjectPoolConfig;
/**
* Redis连接池配置示例
* 使用连接池管理Redis连接,提高性能
*/
public class RedisConnectionPoolDemo {
private GenericObjectPool<StatefulRedisConnection<String, String>> pool;
private RedisClient redisClient;
/**
* 初始化连接池
*/
public void init() {
// 1. 配置Redis连接信息
RedisURI redisUri = RedisURI.builder()
.withHost("localhost")
.withPort(6379)
.build();
// 2. 创建RedisClient
redisClient = RedisClient.create(redisUri);
// 3. 配置连接池参数
GenericObjectPoolConfig<StatefulRedisConnection<String, String>> poolConfig =
new GenericObjectPoolConfig<>();
poolConfig.setMaxTotal(20); // 最大连接数
poolConfig.setMaxIdle(10); // 最大空闲连接数
poolConfig.setMinIdle(5); // 最小空闲连接数
poolConfig.setMaxWaitMillis(3000); // 获取连接最大等待时间
poolConfig.setTestOnBorrow(true); // 获取连接时测试连接有效性
poolConfig.setTestOnReturn(true); // 归还连接时测试连接有效性
// 4. 创建连接池
pool = ConnectionPoolSupport.createGenericObjectPool(
() -> redisClient.connect(), poolConfig);
}
/**
* 使用连接池执行Redis操作
*/
public void executeWithPool() {
StatefulRedisConnection<String, String> connection = null;
try {
// 1. 从连接池获取连接
connection = pool.borrowObject();
// 2. 获取同步命令API
RedisCommands<String, String> sync = connection.sync();
// 3. 执行操作
sync.set("user:1001", "张三");
String user = sync.get("user:1001");
System.out.println("查询用户: " + user);
// 4. 执行复杂操作
sync.hset("user:1001:info", "name", "张三");
sync.hset("user:1001:info", "age", "25");
sync.hset("user:1001:info", "city", "北京");
// 5. 获取哈希中的所有字段
var userInfo = sync.hgetall("user:1001:info");
userInfo.forEach((field, value) ->
System.out.println(field + ": " + value));
} catch (Exception e) {
e.printStackTrace();
} finally {
// 6. 归还连接到连接池
if (connection != null) {
connection.close();
}
}
}
/**
* 关闭连接池
*/
public void destroy() {
if (pool != null) {
pool.close();
}
if (redisClient != null) {
redisClient.shutdown();
}
}
}
1.3 RedisAsyncCommands异步操作
java
import io.lettuce.core.RedisClient;
import io.lettuce.core.RedisURI;
import io.lettuce.core.api.StatefulRedisConnection;
import io.lettuce.core.api.async.RedisAsyncCommands;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.TimeUnit;
/**
* Redis异步操作示例
* 演示非阻塞的异步Redis操作
*/
public class RedisAsyncDemo {
private RedisClient redisClient;
private StatefulRedisConnection<String, String> connection;
private RedisAsyncCommands<String, String> asyncCommands;
public void init() {
// 创建连接
RedisURI redisUri = RedisURI.builder()
.withHost("localhost")
.withPort(6379)
.build();
redisClient = RedisClient.create(redisUri);
connection = redisClient.connect();
asyncCommands = connection.async();
}
/**
* 异步操作示例
*/
public void asyncOperations() throws Exception {
// 1. 基本的异步设置
asyncCommands.set("async:key", "异步值")
.thenAccept(result -> {
System.out.println("设置结果: " + result);
});
// 2. 链式异步操作
asyncCommands.set("user:2001", "李四")
.thenCompose(result -> {
System.out.println("设置用户成功: " + result);
return asyncCommands.get("user:2001");
})
.thenAccept(user -> {
System.out.println("获取用户: " + user);
})
.exceptionally(throwable -> {
System.err.println("操作失败: " + throwable.getMessage());
return null;
});
// 3. 使用CompletableFuture包装
CompletableFuture<String> future = new CompletableFuture<>();
asyncCommands.get("user:2001").whenComplete((value, throwable) -> {
if (throwable != null) {
future.completeExceptionally(throwable);
} else {
future.complete(value);
}
});
// 4. 等待异步操作完成
String result = future.get(5, TimeUnit.SECONDS);
System.out.println("异步结果: " + result);
// 5. 批量异步操作
asyncCommands.multi(); // 开启事务
asyncCommands.set("tx:1", "value1");
asyncCommands.set("tx:2", "value2");
asyncCommands.exec(); // 提交事务
}
/**
* 异步管道操作
*/
public void asyncPipeline() {
// 设置自动刷新
asyncCommands.setAutoFlushCommands(false);
try {
// 批量写入命令(不会立即发送)
for (int i = 0; i < 100; i++) {
asyncCommands.set("bulk:" + i, "data-" + i);
}
// 批量写入命令(不会立即发送)
for (int i = 0; i < 100; i++) {
asyncCommands.get("bulk:" + i);
}
// 刷新管道,一次性发送所有命令
asyncCommands.flushCommands();
} catch (Exception e) {
e.printStackTrace();
}
}
public void close() {
if (connection != null) {
connection.close();
}
if (redisClient != null) {
redisClient.shutdown();
}
}
}
1.4 RedisReactiveCommands响应式操作
java
import io.lettuce.core.RedisClient;
import io.lettuce.core.RedisURI;
import io.lettuce.core.api.StatefulRedisConnection;
import io.lettuce.core.api.reactive.RedisReactiveCommands;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
/**
* Redis响应式操作示例
* 基于Reactive Streams的非阻塞操作
*/
public class RedisReactiveDemo {
private RedisClient redisClient;
private StatefulRedisConnection<String, String> connection;
private RedisReactiveCommands<String, String> reactiveCommands;
public void init() {
RedisURI redisUri = RedisURI.builder()
.withHost("localhost")
.withPort(6379)
.build();
redisClient = RedisClient.create(redisUri);
connection = redisClient.connect();
reactiveCommands = connection.reactive();
}
/**
* 响应式基本操作
*/
public void reactiveOperations() {
// 1. Mono示例(单个值)
Mono<String> setMono = reactiveCommands.set("reactive:key", "响应式值");
Mono<String> getMono = reactiveCommands.get("reactive:key");
setMono.then(getMono)
.subscribe(value -> {
System.out.println("获取响应式值: " + value);
});
// 2. Flux示例(多个值)
Flux<String> stringFlux = reactiveCommands.lpush("list", "a", "b", "c")
.thenMany(reactiveCommands.lrange("list", 0, -1));
stringFlux.subscribe(
item -> System.out.println("列表项: " + item),
error -> System.err.println("错误: " + error),
() -> System.out.println("完成")
);
// 3. 组合操作
reactiveCommands.hset("user:3001", "name", "王五")
.then(reactiveCommands.hset("user:3001", "age", "30"))
.then(reactiveCommands.hset("user:3001", "city", "上海"))
.thenMany(reactiveCommands.hgetall("user:3001"))
.subscribe(entry ->
System.out.println(entry.getKey() + ": " + entry.getValue()));
}
/**
* 响应式流处理
*/
public void reactiveStreamProcessing() {
// 1. 转换操作
reactiveCommands.sadd("numbers", "1", "2", "3", "4", "5")
.thenMany(reactiveCommands.smembers("numbers"))
.map(Integer::parseInt)
.filter(num -> num % 2 == 0)
.map(num -> "偶数: " + num)
.subscribe(System.out::println);
// 2. 合并流
Flux<String> flux1 = reactiveCommands.lrange("list1", 0, -1);
Flux<String> flux2 = reactiveCommands.lrange("list2", 0, -1);
Flux.merge(flux1, flux2)
.distinct()
.subscribe(item -> System.out.println("合并项: " + item));
// 3. 错误处理
reactiveCommands.get("non-existent-key")
.onErrorReturn("默认值")
.subscribe(System.out::println);
}
/**
* 响应式事务
*/
public void reactiveTransaction() {
reactiveCommands.multi()
.then(reactiveCommands.set("tx:key1", "value1"))
.then(reactiveCommands.set("tx:key2", "value2"))
.then(reactiveCommands.exec())
.subscribe(transactionResult -> {
System.out.println("事务执行成功");
});
}
}
二、Spring Data Redis集成
2.1 Spring Data Redis配置
java
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.data.redis.connection.RedisConnectionFactory;
import org.springframework.data.redis.connection.RedisStandaloneConfiguration;
import org.springframework.data.redis.connection.lettuce.LettuceConnectionFactory;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.data.redis.serializer.GenericJackson2JsonRedisSerializer;
import org.springframework.data.redis.serializer.StringRedisSerializer;
import org.springframework.data.redis.serializer.JdkSerializationRedisSerializer;
import org.springframework.data.redis.serializer.RedisSerializer;
/**
* Spring Data Redis配置类
*/
@Configuration
public class RedisConfig {
/**
* 配置Redis连接工厂
*/
@Bean
public RedisConnectionFactory redisConnectionFactory() {
// 单机模式配置
RedisStandaloneConfiguration config = new RedisStandaloneConfiguration();
config.setHostName("localhost");
config.setPort(6379);
config.setDatabase(0);
config.setPassword("redispassword"); // 如果有密码
// 使用Lettuce连接工厂
return new LettuceConnectionFactory(config);
}
/**
* 配置RedisTemplate
*/
@Bean
public RedisTemplate<String, Object> redisTemplate(
RedisConnectionFactory connectionFactory) {
RedisTemplate<String, Object> template = new RedisTemplate<>();
template.setConnectionFactory(connectionFactory);
// 设置key序列化方式(使用String)
StringRedisSerializer stringSerializer = new StringRedisSerializer();
template.setKeySerializer(stringSerializer);
template.setHashKeySerializer(stringSerializer);
// 设置value序列化方式(使用JSON)
GenericJackson2JsonRedisSerializer jsonSerializer =
new GenericJackson2JsonRedisSerializer();
template.setValueSerializer(jsonSerializer);
template.setHashValueSerializer(jsonSerializer);
// 启用事务支持
template.setEnableTransactionSupport(true);
template.afterPropertiesSet();
return template;
}
/**
* 配置自定义的RedisTemplate(针对特定类型)
*/
@Bean
public RedisTemplate<String, User> userRedisTemplate(
RedisConnectionFactory connectionFactory) {
RedisTemplate<String, User> template = new RedisTemplate<>();
template.setConnectionFactory(connectionFactory);
// key使用String序列化
template.setKeySerializer(new StringRedisSerializer());
// value使用JSON序列化,并指定类型
GenericJackson2JsonRedisSerializer jsonSerializer =
new GenericJackson2JsonRedisSerializer();
template.setValueSerializer(jsonSerializer);
return template;
}
}
2.2 RedisTemplate基础操作
java
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.data.redis.core.*;
import org.springframework.stereotype.Service;
import java.util.*;
import java.util.concurrent.TimeUnit;
/**
* RedisTemplate操作示例
*/
@Service
public class RedisTemplateDemo {
@Autowired
private RedisTemplate<String, Object> redisTemplate;
/**
* String类型操作
*/
public void stringOperations() {
// 获取String操作对象
ValueOperations<String, Object> ops = redisTemplate.opsForValue();
// 1. 设置值
ops.set("name", "张三");
// 2. 设置值并指定过期时间
ops.set("token", "xyz123", 30, TimeUnit.MINUTES);
// 3. 获取值
String name = (String) ops.get("name");
System.out.println("获取name: " + name);
// 4. 如果key不存在才设置
Boolean setIfAbsent = ops.setIfAbsent("counter", 1);
System.out.println("设置不存在的key: " + setIfAbsent);
// 5. 递增/递减
Long increment = ops.increment("counter");
System.out.println("递增后: " + increment);
// 6. 批量操作
Map<String, String> map = new HashMap<>();
map.put("user1", "value1");
map.put("user2", "value2");
ops.multiSet(map);
// 7. 追加字符串
Integer append = ops.append("name", " - 李四");
System.out.println("追加后长度: " + append);
}
/**
* Hash类型操作
*/
public void hashOperations() {
// 获取Hash操作对象
HashOperations<String, Object, Object> ops = redisTemplate.opsForHash();
// 1. 设置hash字段
ops.put("user:1001", "name", "张三");
ops.put("user:1001", "age", "25");
ops.put("user:1001", "city", "北京");
// 2. 批量设置
Map<String, Object> userMap = new HashMap<>();
userMap.put("name", "李四");
userMap.put("age", "30");
userMap.put("city", "上海");
ops.putAll("user:1002", userMap);
// 3. 获取单个字段
Object name = ops.get("user:1001", "name");
System.out.println("用户名: " + name);
// 4. 获取多个字段
List<Object> values = ops.multiGet("user:1001",
Arrays.asList("name", "age", "city"));
// 5. 获取所有字段
Map<Object, Object> entries = ops.entries("user:1001");
entries.forEach((field, value) ->
System.out.println(field + ": " + value));
// 6. 获取所有字段名
Set<Object> keys = ops.keys("user:1001");
// 7. 获取所有值
List<Object> valueList = ops.values("user:1001");
// 8. 递增hash字段
ops.increment("user:1001", "age", 1);
// 9. 判断字段是否存在
Boolean hasKey = ops.hasKey("user:1001", "phone");
}
/**
* List类型操作
*/
public void listOperations() {
// 获取List操作对象
ListOperations<String, Object> ops = redisTemplate.opsForList();
// 1. 左压入(从左侧添加)
ops.leftPush("messages", "msg1");
ops.leftPushAll("messages", "msg2", "msg3", "msg4");
// 2. 右压入(从右侧添加)
ops.rightPush("notifications", "notif1");
ops.rightPushAll("notifications", "notif2", "notif3");
// 3. 在指定元素前后插入
ops.leftPush("messages", "msg2", "newMsg");
// 4. 左弹出(从左侧取出并移除)
Object leftPop = ops.leftPop("messages");
// 5. 右弹出
Object rightPop = ops.rightPop("notifications");
// 6. 带超时的弹出
Object popWithTimeout = ops.rightPop("queue", 5, TimeUnit.SECONDS);
// 7. 获取指定范围内的元素
List<Object> range = ops.range("messages", 0, -1);
// 8. 根据索引获取元素
Object indexValue = ops.index("messages", 1);
// 9. 获取列表长度
Long size = ops.size("messages");
// 10. 修剪列表
ops.trim("messages", 0, 10);
// 11. 移除指定元素
Long removed = ops.remove("messages", 1, "msg1");
}
/**
* Set类型操作
*/
public void setOperations() {
// 获取Set操作对象
SetOperations<String, Object> ops = redisTemplate.opsForSet();
// 1. 添加元素
ops.add("user:tags", "java", "python", "javascript");
// 2. 移除元素
ops.remove("user:tags", "javascript");
// 3. 获取所有成员
Set<Object> members = ops.members("user:tags");
// 4. 判断是否是成员
Boolean isMember = ops.isMember("user:tags", "java");
// 5. 集合运算
ops.add("set1", "a", "b", "c", "d");
ops.add("set2", "c", "d", "e", "f");
// 交集
Set<Object> intersect = ops.intersect("set1", "set2");
// 并集
Set<Object> union = ops.union("set1", "set2");
// 差集
Set<Object> difference = ops.difference("set1", "set2");
// 6. 随机获取成员
Object randomMember = ops.randomMember("user:tags");
// 7. 随机弹出成员
Object pop = ops.pop("user:tags");
// 8. 获取集合大小
Long size = ops.size("user:tags");
}
/**
* ZSet类型操作
*/
public void zSetOperations() {
// 获取ZSet操作对象
ZSetOperations<String, Object> ops = redisTemplate.opsForZSet();
// 1. 添加元素带分数
ops.add("ranking", "user1", 100);
ops.add("ranking", "user2", 90);
ops.add("ranking", "user3", 80);
// 2. 批量添加
Set<ZSetOperations.TypedTuple<Object>> tuples = new HashSet<>();
tuples.add(new DefaultTypedTuple<>("user4", 95.0));
tuples.add(new DefaultTypedTuple<>("user5", 85.0));
ops.add("ranking", tuples);
// 3. 获取排名范围内的元素(升序)
Set<Object> range = ops.range("ranking", 0, 2);
// 4. 获取排名范围内的元素及分数(降序)
Set<ZSetOperations.TypedTuple<Object>> reverseRangeWithScores =
ops.reverseRangeWithScores("ranking", 0, 2);
// 5. 获取指定分数范围内的元素
Set<Object> rangeByScore = ops.rangeByScore("ranking", 80, 95);
// 6. 获取排名
Long rank = ops.rank("ranking", "user1");
Long reverseRank = ops.reverseRank("ranking", "user1");
// 7. 获取分数
Double score = ops.score("ranking", "user1");
// 8. 递增分数
ops.incrementScore("ranking", "user1", 10);
// 9. 统计分数范围内的元素个数
Long count = ops.count("ranking", 80, 100);
// 10. 获取集合大小
Long size = ops.zCard("ranking");
}
}
2.3 对象序列化处理
java
import lombok.Data;
import lombok.Builder;
import lombok.NoArgsConstructor;
import lombok.AllArgsConstructor;
import org.springframework.data.annotation.Id;
import org.springframework.data.redis.core.RedisHash;
import org.springframework.data.redis.core.index.Indexed;
import java.io.Serializable;
import java.time.LocalDateTime;
import java.util.List;
/**
* 可序列化的实体类
* 方式1:实现Serializable接口
*/
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
public class User implements Serializable {
private static final long serialVersionUID = 1L;
private Long id;
private String username;
private String password;
private Integer age;
private String email;
private LocalDateTime createTime;
private List<String> roles;
private Address address; // 嵌套对象
}
/**
* 嵌套对象
*/
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
class Address implements Serializable {
private static final long serialVersionUID = 1L;
private String province;
private String city;
private String street;
private String zipCode;
}
/**
* 使用@RedisHash注解的方式
* 方式2:Spring Data Redis的Repository方式
*/
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
@RedisHash("users") // 指定Redis中的key前缀
public class UserEntity {
@Id
private String id; // Redis key的一部分
@Indexed // 创建二级索引,方便查询
private String username;
private String password;
@Indexed
private Integer age;
private String email;
private LocalDateTime createTime;
}
/**
* 序列化配置示例
*/
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule;
import com.fasterxml.jackson.databind.SerializationFeature;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.data.redis.serializer.Jackson2JsonRedisSerializer;
import org.springframework.data.redis.serializer.JdkSerializationRedisSerializer;
import org.springframework.data.redis.serializer.OxmSerializer;
import org.springframework.data.redis.serializer.StringRedisSerializer;
@Configuration
public class SerializerConfig {
/**
* 自定义Jackson序列化器
*/
@Bean
public Jackson2JsonRedisSerializer<User> jacksonSerializer() {
// 创建Jackson2JsonRedisSerializer,指定类型
Jackson2JsonRedisSerializer<User> serializer =
new Jackson2JsonRedisSerializer<>(User.class);
// 配置ObjectMapper
ObjectMapper objectMapper = new ObjectMapper();
objectMapper.registerModule(new JavaTimeModule()); // 支持Java 8时间
objectMapper.disable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS);
serializer.setObjectMapper(objectMapper);
return serializer;
}
/**
* JDK原生序列化器
*/
@Bean
public JdkSerializationRedisSerializer jdkSerializer() {
return new JdkSerializationRedisSerializer();
}
/**
* 使用自定义序列化器的RedisTemplate
*/
@Bean
public RedisTemplate<String, User> userRedisTemplate(
RedisConnectionFactory connectionFactory,
Jackson2JsonRedisSerializer<User> jacksonSerializer) {
RedisTemplate<String, User> template = new RedisTemplate<>();
template.setConnectionFactory(connectionFactory);
// key使用String序列化
template.setKeySerializer(new StringRedisSerializer());
// value使用自定义的Jackson序列化器
template.setValueSerializer(jacksonSerializer);
template.setHashValueSerializer(jacksonSerializer);
return template;
}
}
/**
* 序列化操作示例
*/
@Service
public class SerializationDemo {
@Autowired
private RedisTemplate<String, Object> redisTemplate;
@Autowired
private RedisTemplate<String, User> userRedisTemplate;
/**
* 存储对象
*/
public void saveUser() {
// 创建用户对象
User user = User.builder()
.id(1L)
.username("张三")
.password("123456")
.age(25)
.email("zhangsan@example.com")
.createTime(LocalDateTime.now())
.roles(Arrays.asList("ADMIN", "USER"))
.address(Address.builder()
.province("北京市")
.city("北京市")
.street("朝阳区xx路")
.zipCode("100000")
.build())
.build();
// 存储对象(自动序列化为JSON)
redisTemplate.opsForValue().set("user:1", user);
// 存储到Hash中
redisTemplate.opsForHash().putAll("user:hash:1",
convertUserToMap(user));
}
/**
* 读取对象
*/
public User getUser(Long userId) {
// 读取对象(自动反序列化)
return (User) redisTemplate.opsForValue()
.get("user:" + userId);
}
/**
* 使用专门的User模板存储
*/
public void saveUserWithTemplate() {
User user = User.builder()
.id(2L)
.username("李四")
.build();
userRedisTemplate.opsForValue().set("user:2", user);
User retrieved = userRedisTemplate.opsForValue()
.get("user:2");
}
/**
* 转换为Map
*/
private Map<String, Object> convertUserToMap(User user) {
Map<String, Object> map = new HashMap<>();
map.put("id", user.getId().toString());
map.put("username", user.getUsername());
map.put("age", user.getAge().toString());
map.put("email", user.getEmail());
return map;
}
}
三、Pipeline命令流水线
3.1 Pipeline基础使用
java
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.dao.DataAccessException;
import org.springframework.data.redis.core.RedisCallback;
import org.springframework.data.redis.core.RedisOperations;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.data.redis.core.SessionCallback;
import org.springframework.stereotype.Service;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.Pipeline;
import java.util.List;
/**
* Pipeline流水线操作示例
* 用于批量执行命令,减少网络往返
*/
@Service
public class PipelineDemo {
@Autowired
private RedisTemplate<String, Object> redisTemplate;
/**
* 使用SessionCallback实现Pipeline
* 方式1:Spring Data Redis推荐方式
*/
public void pipelineWithSessionCallback() {
List<Object> results = redisTemplate.executePipelined(
new SessionCallback<Object>() {
@Override
public Object execute(RedisOperations operations)
throws DataAccessException {
// 在同一个session中执行多个操作
operations.opsForValue().set("pipeline:key1", "value1");
operations.opsForValue().set("pipeline:key2", "value2");
operations.opsForValue().get("pipeline:key1");
operations.opsForHash().put("pipeline:hash", "field1", "value1");
operations.opsForList().leftPush("pipeline:list", "item1");
operations.opsForSet().add("pipeline:set", "member1");
// 返回null表示使用pipeline
return null;
}
});
// 处理返回结果
for (int i = 0; i < results.size(); i++) {
System.out.println("结果 " + i + ": " + results.get(i));
}
}
/**
* 批量写入大量数据
*/
public void batchWrite(int count) {
long startTime = System.currentTimeMillis();
List<Object> results = redisTemplate.executePipelined(
new SessionCallback<Object>() {
@Override
public Object execute(RedisOperations operations)
throws DataAccessException {
for (int i = 0; i < count; i++) {
operations.opsForValue()
.set("batch:key:" + i, "value:" + i);
}
return null;
}
});
long endTime = System.currentTimeMillis();
System.out.println("批量写入 " + count + " 条数据耗时: "
+ (endTime - startTime) + "ms");
System.out.println("返回结果数量: " + results.size());
}
/**
* 使用RedisCallback实现Pipeline
* 方式2:底层API方式
*/
public void pipelineWithRedisCallback() {
List<Object> results = redisTemplate.executePipelined(
new RedisCallback<Object>() {
@Override
public Object doInRedis(RedisConnection connection)
throws DataAccessException {
// 使用原生连接命令
connection.set("callback:key1".getBytes(),
"value1".getBytes());
connection.set("callback:key2".getBytes(),
"value2".getBytes());
connection.get("callback:key1".getBytes());
// 返回null
return null;
}
});
// 处理结果
results.forEach(result -> {
if (result instanceof byte[]) {
System.out.println("结果: " + new String((byte[]) result));
} else {
System.out.println("结果: " + result);
}
});
}
/**
* 复杂Pipeline操作
*/
public void complexPipeline() {
List<Object> results = redisTemplate.executePipelined(
new SessionCallback<Object>() {
@Override
public Object execute(RedisOperations operations)
throws DataAccessException {
String userKey = "user:1001";
String orderKey = "orders:1001";
// 1. 检查用户是否存在
operations.hasKey(userKey);
// 2. 获取用户信息
operations.opsForHash().entries(userKey);
// 3. 获取用户订单列表
operations.opsForList().range(orderKey, 0, -1);
// 4. 更新用户最后访问时间
operations.opsForHash().put(userKey,
"lastAccess", String.valueOf(System.currentTimeMillis()));
// 5. 记录访问日志
operations.opsForList().leftPush("access:log",
"user:1001 accessed at " + System.currentTimeMillis());
// 6. 限制列表长度
operations.opsForList().trim("access:log", 0, 999);
return null;
}
});
// 解析结果
for (Object result : results) {
if (result instanceof Boolean) {
System.out.println("布尔结果: " + result);
} else if (result instanceof Map) {
System.out.println("Map结果: " + result);
} else if (result instanceof List) {
System.out.println("List结果: " + result);
} else {
System.out.println("其他结果: " + result);
}
}
}
/**
* Pipeline与事务结合
*/
public void pipelineWithTransaction() {
List<Object> results = redisTemplate.executePipelined(
new SessionCallback<Object>() {
@Override
public Object execute(RedisOperations operations)
throws DataAccessException {
// 开启事务
operations.multi();
try {
// 执行事务操作
operations.opsForValue()
.set("tx:key1", "txvalue1");
operations.opsForValue()
.set("tx:key2", "txvalue2");
operations.opsForValue()
.increment("tx:counter");
// 故意制造错误(例如除以0)
// int i = 1/0;
// 提交事务
operations.exec();
} catch (Exception e) {
// 回滚事务
operations.discard();
throw e;
}
return null;
}
});
System.out.println("事务执行结果: " + results);
}
}
四、Spring Cache整合Redis
4.1 缓存配置
java
import org.springframework.cache.CacheManager;
import org.springframework.cache.annotation.CachingConfigurerSupport;
import org.springframework.cache.annotation.EnableCaching;
import org.springframework.cache.interceptor.KeyGenerator;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.data.redis.cache.RedisCacheConfiguration;
import org.springframework.data.redis.cache.RedisCacheManager;
import org.springframework.data.redis.cache.RedisCacheWriter;
import org.springframework.data.redis.connection.RedisConnectionFactory;
import org.springframework.data.redis.serializer.GenericJackson2JsonRedisSerializer;
import org.springframework.data.redis.serializer.RedisSerializationContext;
import org.springframework.data.redis.serializer.StringRedisSerializer;
import java.time.Duration;
import java.util.HashMap;
import java.util.Map;
/**
* Spring Cache Redis配置
*/
@Configuration
@EnableCaching // 启用缓存注解
public class CacheConfig extends CachingConfigurerSupport {
/**
* 配置缓存管理器
*/
@Bean
public CacheManager cacheManager(RedisConnectionFactory connectionFactory) {
// 创建默认缓存配置
RedisCacheConfiguration defaultCacheConfig = RedisCacheConfiguration
.defaultCacheConfig()
// 设置key序列化器
.serializeKeysWith(
RedisSerializationContext.SerializationPair
.fromSerializer(new StringRedisSerializer()))
// 设置value序列化器
.serializeValuesWith(
RedisSerializationContext.SerializationPair
.fromSerializer(new GenericJackson2JsonRedisSerializer()))
// 设置缓存过期时间(默认30分钟)
.entryTtl(Duration.ofMinutes(30))
// 禁止缓存null值
.disableCachingNullValues()
// 缓存前缀
.prefixCacheNameWith("cache:");
// 为不同的缓存设置不同的过期时间
Map<String, RedisCacheConfiguration> cacheConfigurations =
new HashMap<>();
// 用户缓存配置(1小时过期)
cacheConfigurations.put("users", defaultCacheConfig
.entryTtl(Duration.ofHours(1))
.prefixCacheNameWith("cache:users:"));
// 商品缓存配置(10分钟过期)
cacheConfigurations.put("products", defaultCacheConfig
.entryTtl(Duration.ofMinutes(10))
.prefixCacheNameWith("cache:products:"));
// 会话缓存配置(30分钟过期)
cacheConfigurations.put("sessions", defaultCacheConfig
.entryTtl(Duration.ofMinutes(30))
.prefixCacheNameWith("cache:sessions:"));
// 创建缓存管理器
return RedisCacheManager.builder(connectionFactory)
.cacheDefaults(defaultCacheConfig)
.withInitialCacheConfigurations(cacheConfigurations)
.transactionAware() // 事务感知
.build();
}
/**
* 自定义key生成策略
*/
@Override
@Bean
public KeyGenerator keyGenerator() {
return (target, method, params) -> {
StringBuilder sb = new StringBuilder();
sb.append(target.getClass().getSimpleName()); // 类名
sb.append(".").append(method.getName()); // 方法名
// 添加参数
for (Object param : params) {
sb.append(".").append(param.toString());
}
return sb.toString();
};
}
/**
* 自定义的key生成器(按需使用)
*/
@Bean
public KeyGenerator userKeyGenerator() {
return (target, method, params) -> {
// 生成类似 "user:123" 的key
if (params.length > 0 && params[0] instanceof Long) {
return "user:" + params[0];
}
return "user:all";
};
}
}
4.2 缓存注解使用
java
import org.springframework.cache.annotation.*;
import org.springframework.stereotype.Service;
import java.util.concurrent.TimeUnit;
/**
* 缓存注解使用示例
*/
@Service
@CacheConfig(cacheNames = "users") // 类级别的缓存配置
public class UserCacheService {
/**
* @Cacheable - 缓存结果
* 如果缓存中存在,直接返回;否则执行方法并缓存结果
*/
@Cacheable(
value = "users", // 缓存名称
key = "#userId", // 缓存的key(使用参数userId)
unless = "#result == null", // 当结果为null时不缓存
condition = "#userId > 0" // 条件满足时才缓存
)
public User getUserById(Long userId) {
System.out.println("从数据库查询用户: " + userId);
// 模拟数据库查询
return simulateDatabaseQuery(userId);
}
/**
* @Cacheable - 使用复杂key
*/
@Cacheable(
value = "users",
key = "T(java.lang.String).format('user_%d_%s', #id, #type)",
sync = true // 同步模式,防止缓存击穿
)
public User getUserWithType(Long id, String type) {
System.out.println("查询用户类型: " + type);
return new User(id, "用户" + id, type);
}
/**
* @CachePut - 更新缓存
* 每次都会执行方法,并更新缓存
*/
@CachePut(
value = "users",
key = "#user.id",
condition = "#user.id != null"
)
public User updateUser(User user) {
System.out.println("更新用户: " + user.getId());
// 更新数据库
updateDatabase(user);
return user;
}
/**
* @CacheEvict - 清除缓存
* 清除指定缓存
*/
@CacheEvict(
value = "users",
key = "#userId",
beforeInvocation = false // 方法执行后清除缓存
)
public void deleteUser(Long userId) {
System.out.println("删除用户: " + userId);
// 从数据库删除
deleteFromDatabase(userId);
}
/**
* @CacheEvict - 清除所有缓存
*/
@CacheEvict(
value = "users",
allEntries = true, // 清除所有条目
beforeInvocation = true // 方法执行前清除缓存
)
public void clearAllUsers() {
System.out.println("清除所有用户缓存");
}
/**
* @Caching - 组合多个缓存操作
*/
@Caching(
// 存入缓存
put = {
@CachePut(value = "users", key = "#user.id"),
@CachePut(value = "users", key = "#user.username")
},
// 清除其他缓存
evict = {
@CacheEvict(value = "userLists", allEntries = true)
}
)
public User saveUser(User user) {
System.out.println("保存用户: " + user);
// 保存到数据库
saveToDatabase(user);
return user;
}
/**
* 自定义缓存解析器示例
*/
@Cacheable(
cacheNames = {"users", "profiles"},
key = "#userId",
cacheManager = "cacheManager" // 指定缓存管理器
)
public User getUserProfile(Long userId) {
return getUserById(userId);
}
}
/**
* 缓存管理服务
*/
@Service
public class CacheManagementService {
@Autowired
private CacheManager cacheManager;
/**
* 手动操作缓存
*/
public void manualCacheOperation(Long userId) {
// 获取缓存对象
Cache usersCache = cacheManager.getCache("users");
if (usersCache != null) {
// 存入缓存
usersCache.put("user:" + userId, new User(userId, "临时用户"));
// 获取缓存
Cache.ValueWrapper valueWrapper = usersCache.get("user:" + userId);
if (valueWrapper != null) {
User user = (User) valueWrapper.get();
System.out.println("从缓存获取: " + user);
}
// 清除缓存
usersCache.evict("user:" + userId);
// 清除所有缓存
usersCache.clear();
}
}
/**
* 获取所有缓存名称
*/
public Collection<String> getCacheNames() {
return cacheManager.getCacheNames();
}
}
4.3 缓存高级特性
java
import org.springframework.cache.annotation.Cacheable;
import org.springframework.stereotype.Service;
import com.github.benmanes.caffeine.cache.Caffeine;
import org.springframework.cache.CacheManager;
import org.springframework.cache.caffeine.CaffeineCacheManager;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Primary;
import org.springframework.data.redis.cache.RedisCacheManager;
import java.util.concurrent.TimeUnit;
/**
* 多级缓存示例(Caffeine + Redis)
*/
@Configuration
public class MultiLevelCacheConfig {
/**
* 一级缓存:Caffeine(本地缓存)
*/
@Bean
public CacheManager caffeineCacheManager() {
CaffeineCacheManager cacheManager = new CaffeineCacheManager();
cacheManager.setCaffeine(Caffeine.newBuilder()
.initialCapacity(100) // 初始容量
.maximumSize(1000) // 最大容量
.expireAfterWrite(10, TimeUnit.MINUTES) // 写入后过期
.recordStats()); // 记录统计信息
return cacheManager;
}
/**
* 二级缓存:Redis(分布式缓存)
*/
@Bean
@Primary // 主缓存管理器
public CacheManager redisCacheManager(RedisConnectionFactory connectionFactory) {
RedisCacheConfiguration config = RedisCacheConfiguration.defaultCacheConfig()
.entryTtl(Duration.ofHours(1))
.serializeValuesWith(
RedisSerializationContext.SerializationPair
.fromSerializer(new GenericJackson2JsonRedisSerializer()));
return RedisCacheManager.builder(connectionFactory)
.cacheDefaults(config)
.build();
}
}
/**
* 缓存穿透、击穿、雪崩解决方案
*/
@Service
public class CacheProtectionService {
/**
* 解决缓存穿透:缓存空对象
*/
@Cacheable(value = "products", key = "#productId", unless = "#result == null")
public Product getProductWithNullCache(Long productId) {
// 先从数据库查询
Product product = queryFromDatabase(productId);
// 如果数据库不存在,返回空对象并缓存
if (product == null) {
product = new Product(); // 空对象
product.setId(productId);
product.setEmpty(true); // 标记为空对象
}
return product;
}
/**
* 解决缓存击穿:使用分布式锁
*/
public Product getProductWithLock(Long productId) {
String cacheKey = "product:" + productId;
// 1. 先从缓存获取
Product product = (Product) redisTemplate.opsForValue().get(cacheKey);
if (product == null) {
// 2. 尝试获取分布式锁
String lockKey = "lock:product:" + productId;
Boolean locked = redisTemplate.opsForValue()
.setIfAbsent(lockKey, "locked", 10, TimeUnit.SECONDS);
if (Boolean.TRUE.equals(locked)) {
try {
// 3. 双重检查
product = (Product) redisTemplate.opsForValue().get(cacheKey);
if (product == null) {
// 4. 查询数据库
product = queryFromDatabase(productId);
if (product != null) {
// 5. 放入缓存
redisTemplate.opsForValue()
.set(cacheKey, product, 1, TimeUnit.HOURS);
}
}
} finally {
// 6. 释放锁
redisTemplate.delete(lockKey);
}
} else {
// 7. 等待其他线程加载
try {
Thread.sleep(100);
return getProductWithLock(productId);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
}
return product;
}
/**
* 解决缓存雪崩:设置随机过期时间
*/
public void setWithRandomExpire(String key, Object value) {
// 基础过期时间
long baseExpire = 3600; // 1小时
// 随机过期时间(0-300秒随机)
long randomExpire = ThreadLocalRandom.current().nextLong(0, 300);
// 总过期时间
long totalExpire = baseExpire + randomExpire;
redisTemplate.opsForValue()
.set(key, value, totalExpire, TimeUnit.SECONDS);
}
}
五、分布式锁实现
5.1 基于Redis的分布式锁
java
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.data.redis.core.script.DefaultRedisScript;
import org.springframework.data.redis.core.script.RedisScript;
import org.springframework.stereotype.Component;
import java.util.Collections;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
/**
* Redis分布式锁实现
* 支持可重入、自动续期
*/
@Component
public class RedisDistributedLock {
@Autowired
private RedisTemplate<String, Object> redisTemplate;
private static final String LOCK_PREFIX = "lock:";
private static final long DEFAULT_EXPIRE = 30; // 默认过期时间30秒
private ThreadLocal<String> lockFlag = new ThreadLocal<>();
/**
* 尝试获取锁
* @param key 锁的key
* @return 是否获取成功
*/
public boolean tryLock(String key) {
return tryLock(key, DEFAULT_EXPIRE, TimeUnit.SECONDS);
}
/**
* 尝试获取锁(带超时时间)
* @param key 锁的key
* @param expire 过期时间
* @param unit 时间单位
* @return 是否获取成功
*/
public boolean tryLock(String key, long expire, TimeUnit unit) {
String lockKey = LOCK_PREFIX + key;
// 生成唯一标识
String value = UUID.randomUUID().toString();
lockFlag.set(value);
// 使用setIfAbsent实现原子操作
Boolean success = redisTemplate.opsForValue()
.setIfAbsent(lockKey, value, expire, unit);
return Boolean.TRUE.equals(success);
}
/**
* 释放锁
* @param key 锁的key
* @return 是否释放成功
*/
public boolean unlock(String key) {
String lockKey = LOCK_PREFIX + key;
String value = lockFlag.get();
if (value == null) {
return false;
}
// Lua脚本:确保只有锁的持有者才能释放锁
String luaScript =
"if redis.call('get', KEYS[1]) == ARGV[1] then " +
" return redis.call('del', KEYS[1]) " +
"else " +
" return 0 " +
"end";
RedisScript<Long> script = new DefaultRedisScript<>(luaScript, Long.class);
Long result = redisTemplate.execute(script,
Collections.singletonList(lockKey), value);
if (result != null && result > 0) {
lockFlag.remove();
return true;
}
return false;
}
/**
* 尝试获取锁,支持等待
* @param key 锁的key
* @param waitTime 等待时间
* @param expire 过期时间
* @param unit 时间单位
* @return 是否获取成功
*/
public boolean tryLockWithWait(String key, long waitTime,
long expire, TimeUnit unit) throws InterruptedException {
long start = System.currentTimeMillis();
long waitMillis = unit.toMillis(waitTime);
while (System.currentTimeMillis() - start < waitMillis) {
if (tryLock(key, expire, unit)) {
return true;
}
// 等待一段时间后重试
Thread.sleep(100);
}
return false;
}
}
/**
* 可重入分布式锁
*/
@Component
public class ReentrantRedisLock {
@Autowired
private RedisTemplate<String, Object> redisTemplate;
private static final String LOCK_PREFIX = "lock:";
private static final String COUNT_SUFFIX = ":count";
private ThreadLocal<String> lockFlag = new ThreadLocal<>();
private ThreadLocal<Integer> reentrantCount = ThreadLocal.withInitial(() -> 0);
/**
* 获取可重入锁
*/
public boolean lock(String key, long expire, TimeUnit unit) {
String lockKey = LOCK_PREFIX + key;
String countKey = lockKey + COUNT_SUFFIX;
String value = lockFlag.get();
// 如果是当前线程已经持有锁
if (value != null) {
// 增加重入计数
Integer count = reentrantCount.get();
reentrantCount.set(count + 1);
// 更新计数
redisTemplate.opsForValue().increment(countKey);
return true;
}
// 生成新的锁标识
value = UUID.randomUUID().toString();
lockFlag.set(value);
// 尝试获取锁
Boolean success = redisTemplate.opsForValue()
.setIfAbsent(lockKey, value, expire, unit);
if (Boolean.TRUE.equals(success)) {
// 初始化重入计数
reentrantCount.set(1);
redisTemplate.opsForValue().set(countKey, "1", expire, unit);
return true;
}
lockFlag.remove();
return false;
}
/**
* 释放可重入锁
*/
public boolean unlock(String key) {
String lockKey = LOCK_PREFIX + key;
String countKey = lockKey + COUNT_SUFFIX;
String value = lockFlag.get();
if (value == null) {
return false;
}
// 减少重入计数
Integer count = reentrantCount.get();
if (count > 1) {
reentrantCount.set(count - 1);
redisTemplate.opsForValue().decrement(countKey);
return true;
}
// Lua脚本:原子操作删除锁
String luaScript =
"if redis.call('get', KEYS[1]) == ARGV[1] then " +
" redis.call('del', KEYS[2]) " +
" return redis.call('del', KEYS[1]) " +
"else " +
" return 0 " +
"end";
RedisScript<Long> script = new DefaultRedisScript<>(luaScript, Long.class);
Long result = redisTemplate.execute(script,
Arrays.asList(lockKey, countKey), value);
if (result != null && result > 0) {
lockFlag.remove();
reentrantCount.remove();
return true;
}
return false;
}
}
/**
* 分布式锁使用示例
*/
@Service
public class LockUsageDemo {
@Autowired
private RedisDistributedLock distributedLock;
@Autowired
private RedissonClient redissonClient; // Redisson客户端
/**
* 使用自定义分布式锁
*/
public void processWithLock(String orderId) {
String lockKey = "order:" + orderId;
try {
// 尝试获取锁
if (distributedLock.tryLock(lockKey, 30, TimeUnit.SECONDS)) {
try {
// 执行业务逻辑
System.out.println("处理订单: " + orderId);
Thread.sleep(5000); // 模拟业务处理
} finally {
// 释放锁
distributedLock.unlock(lockKey);
}
} else {
System.out.println("获取锁失败,订单正在被处理: " + orderId);
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
/**
* 使用Redisson实现分布式锁
* Redisson提供了更完善的分布式锁实现
*/
public void processWithRedisson(String orderId) {
RLock lock = redissonClient.getLock("lock:order:" + orderId);
try {
// 尝试获取锁,最多等待10秒,锁持有30秒自动释放
if (lock.tryLock(10, 30, TimeUnit.SECONDS)) {
try {
// 执行业务逻辑
System.out.println("使用Redisson处理订单: " + orderId);
// 可以获取锁的状态
int holdCount = lock.getHoldCount();
System.out.println("锁持有次数: " + holdCount);
} finally {
lock.unlock();
}
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
/**
* 使用Redisson的看门狗机制
* 自动续期,防止业务未完成锁已过期
*/
public void processWithWatchdog(String orderId) {
RLock lock = redissonClient.getLock("lock:order:" + orderId);
try {
// 不指定leaseTime,启用看门狗机制
// 默认锁持有30秒,每10秒续期一次
lock.lock();
try {
// 长时间业务处理
System.out.println("开始长时间任务: " + orderId);
Thread.sleep(60000); // 模拟60秒的业务处理
System.out.println("任务完成: " + orderId);
} finally {
lock.unlock();
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
}
六、接口幂等性实现
6.1 基于Redis的幂等性方案
java
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.stereotype.Component;
import org.springframework.web.bind.annotation.*;
import javax.servlet.http.HttpServletRequest;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
/**
* 幂等性令牌生成器
*/
@Component
public class IdempotentTokenGenerator {
@Autowired
private RedisTemplate<String, Object> redisTemplate;
private static final String TOKEN_PREFIX = "idempotent:token:";
private static final long TOKEN_EXPIRE = 30; // 30分钟
/**
* 生成幂等性令牌
*/
public String generateToken() {
String token = UUID.randomUUID().toString();
String key = TOKEN_PREFIX + token;
// 存储令牌
redisTemplate.opsForValue()
.set(key, "1", TOKEN_EXPIRE, TimeUnit.MINUTES);
return token;
}
/**
* 校验并删除令牌
* 使用Lua脚本保证原子性
*/
public boolean checkAndDeleteToken(String token) {
if (token == null) {
return false;
}
String key = TOKEN_PREFIX + token;
// Lua脚本:检查令牌是否存在并删除
String luaScript =
"if redis.call('get', KEYS[1]) then " +
" return redis.call('del', KEYS[1]) " +
"else " +
" return 0 " +
"end";
RedisScript<Long> script = new DefaultRedisScript<>(luaScript, Long.class);
Long result = redisTemplate.execute(script,
Collections.singletonList(key));
return result != null && result > 0;
}
/**
* 校验令牌(只校验,不删除)
*/
public boolean checkToken(String token) {
if (token == null) {
return false;
}
String key = TOKEN_PREFIX + token;
Boolean hasKey = redisTemplate.hasKey(key);
return Boolean.TRUE.equals(hasKey);
}
}
/**
* 幂等性注解
*/
@Target(ElementType.METHOD)
@Retention(RetentionPolicy.RUNTIME)
public @interface Idempotent {
/**
* 超时时间(秒)
*/
int timeout() default 30;
/**
* 时间单位
*/
TimeUnit timeUnit() default TimeUnit.SECONDS;
/**
* 幂等性key前缀
*/
String keyPrefix() default "idempotent:";
/**
* 幂等性提示信息
*/
String message() default "请勿重复提交";
}
/**
* 幂等性拦截器
*/
@Component
public class IdempotentInterceptor implements HandlerInterceptor {
@Autowired
private RedisTemplate<String, Object> redisTemplate;
private static final String IDEMPOTENT_TOKEN_HEADER = "Idempotent-Token";
@Override
public boolean preHandle(HttpServletRequest request,
HttpServletResponse response, Object handler) throws Exception {
if (!(handler instanceof HandlerMethod)) {
return true;
}
HandlerMethod handlerMethod = (HandlerMethod) handler;
Idempotent idempotent = handlerMethod.getMethodAnnotation(Idempotent.class);
if (idempotent == null) {
return true;
}
// 获取幂等性token
String token = request.getHeader(IDEMPOTENT_TOKEN_HEADER);
if (StringUtils.isEmpty(token)) {
token = request.getParameter("idempotentToken");
}
if (StringUtils.isEmpty(token)) {
throw new RuntimeException("缺少幂等性令牌");
}
// 生成幂等性key
String key = idempotent.keyPrefix() + token;
// 使用setIfAbsent原子操作
Boolean success = redisTemplate.opsForValue()
.setIfAbsent(key, "1", idempotent.timeout(), idempotent.timeUnit());
if (!Boolean.TRUE.equals(success)) {
// 重复提交
response.setStatus(429); // Too Many Requests
response.setContentType("application/json;charset=UTF-8");
response.getWriter().write("{\"code\":429,\"msg\":\""
+ idempotent.message() + "\"}");
return false;
}
return true;
}
}
/**
* 幂等性控制层示例
*/
@RestController
@RequestMapping("/api/order")
public class OrderController {
@Autowired
private IdempotentTokenGenerator tokenGenerator;
@Autowired
private OrderService orderService;
/**
* 获取幂等性令牌
*/
@GetMapping("/token")
public Result<String> getIdempotentToken() {
String token = tokenGenerator.generateToken();
return Result.success(token);
}
/**
* 创建订单(使用幂等性令牌)
*/
@PostMapping("/create")
@Idempotent(timeout = 60, message = "订单正在处理中,请勿重复提交")
public Result<Order> createOrder(@RequestBody OrderCreateDTO dto) {
// 从请求头或参数中获取token
String token = dto.getIdempotentToken();
// 校验并删除令牌(方案1:一次性令牌)
boolean valid = tokenGenerator.checkAndDeleteToken(token);
if (!valid) {
return Result.error("无效的令牌或请求已处理");
}
// 创建订单
Order order = orderService.createOrder(dto);
return Result.success(order);
}
/**
* 支付订单(使用业务key作为幂等性标识)
*/
@PostMapping("/pay")
@Idempotent(keyPrefix = "idempotent:pay:", timeout = 300)
public Result<Boolean> payOrder(@RequestBody PayDTO dto) {
// 使用订单号作为幂等性key的一部分
String businessKey = dto.getOrderNo();
// 执行业务逻辑
boolean result = orderService.payOrder(dto.getOrderNo());
return Result.success(result);
}
}
/**
* 基于业务key的幂等性实现
*/
@Service
public class IdempotentBusinessService {
@Autowired
private RedisTemplate<String, Object> redisTemplate;
private static final String BUSINESS_IDEMPOTENT_PREFIX = "business:idempotent:";
/**
* 基于业务key的幂等性处理
*/
public <T> T executeWithIdempotent(String businessKey,
Supplier<T> supplier, long expire, TimeUnit unit) {
String key = BUSINESS_IDEMPOTENT_PREFIX + businessKey;
// 1. 检查是否已处理过
Object result = redisTemplate.opsForValue().get(key);
if (result != null) {
// 返回缓存的结果
return (T) result;
}
// 2. 使用分布式锁防止并发
String lockKey = key + ":lock";
Boolean locked = redisTemplate.opsForValue()
.setIfAbsent(lockKey, "1", 10, TimeUnit.SECONDS);
if (Boolean.TRUE.equals(locked)) {
try {
// 双重检查
result = redisTemplate.opsForValue().get(key);
if (result != null) {
return (T) result;
}
// 执行业务逻辑
T businessResult = supplier.get();
// 缓存结果
redisTemplate.opsForValue()
.set(key, businessResult, expire, unit);
return businessResult;
} finally {
redisTemplate.delete(lockKey);
}
} else {
// 等待其他线程处理完成
try {
Thread.sleep(100);
return executeWithIdempotent(businessKey, supplier, expire, unit);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException("幂等性处理中断");
}
}
}
}
七、Web集群与分布式Session管理
7.1 Spring Session Redis配置
java
import org.springframework.context.annotation.Configuration;
import org.springframework.session.data.redis.config.annotation.web.http.EnableRedisHttpSession;
import org.springframework.session.web.context.AbstractHttpSessionApplicationInitializer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.data.redis.connection.RedisConnectionFactory;
import org.springframework.data.redis.connection.RedisStandaloneConfiguration;
import org.springframework.data.redis.connection.lettuce.LettuceConnectionFactory;
import org.springframework.session.data.redis.RedisFlushMode;
import org.springframework.session.data.redis.config.annotation.web.http.RedisHttpSessionConfiguration;
/**
* Spring Session Redis配置
* 使用Redis存储HTTP Session,实现分布式会话
*/
@Configuration
@EnableRedisHttpSession(
maxInactiveIntervalInSeconds = 1800, // Session超时时间(秒)
redisNamespace = "spring:session", // Redis命名空间
flushMode = RedisFlushMode.ON_SAVE // 刷新模式:保存时刷新
)
public class SpringSessionConfig extends AbstractHttpSessionApplicationInitializer {
/**
* 配置Redis连接工厂
*/
@Bean
public RedisConnectionFactory redisConnectionFactory() {
RedisStandaloneConfiguration config = new RedisStandaloneConfiguration();
config.setHostName("localhost");
config.setPort(6379);
config.setDatabase(0);
return new LettuceConnectionFactory(config);
}
/**
* 自定义Session配置
*/
@Bean
public RedisHttpSessionConfiguration redisHttpSessionConfiguration() {
RedisHttpSessionConfiguration config = new RedisHttpSessionConfiguration();
// 设置Session超时时间
config.setMaxInactiveIntervalInSeconds(1800);
// 设置Redis命名空间
config.setRedisNamespace("myapp:session");
// 设置刷新模式
config.setRedisFlushMode(RedisFlushMode.IMMEDIATE);
return config;
}
}
/**
* Session配置类(扩展配置)
*/
@Configuration
public class SessionCustomConfig {
@Value("${server.servlet.session.timeout:30m}")
private Duration sessionTimeout;
/**
* 自定义Cookie配置
*/
@Bean
public DefaultCookieSerializer cookieSerializer() {
DefaultCookieSerializer serializer = new DefaultCookieSerializer();
// Cookie名称
serializer.setCookieName("SESSIONID");
// Cookie路径
serializer.setCookiePath("/");
// 设置域名(用于跨子域共享Session)
serializer.setDomainName("example.com");
// 设置Cookie最大存活时间(-1表示浏览器关闭时失效)
serializer.setCookieMaxAge((int) sessionTimeout.getSeconds());
// 是否仅HTTP(防止XSS攻击)
serializer.setUseHttpOnlyCookie(true);
// 是否使用安全Cookie(HTTPS)
serializer.setUseSecureCookie(false);
// 是否记住我(永久的)
serializer.setRememberMeRequestAttribute("remember-me");
return serializer;
}
/**
* 自定义Session存储库
*/
@Bean
public RedisIndexedSessionRepository redisIndexedSessionRepository(
RedisConnectionFactory connectionFactory) {
RedisIndexedSessionRepository repository =
new RedisIndexedSessionRepository(connectionFactory);
// 设置默认最大不活动时间
repository.setDefaultMaxInactiveInterval(
(int) sessionTimeout.getSeconds());
// 设置刷新模式
repository.setRedisFlushMode(RedisFlushMode.ON_SAVE);
// 设置保存模式
repository.setSaveMode(SaveMode.ON_SET_ATTRIBUTE);
return repository;
}
}
/**
* application.yml配置
*
* spring:
* session:
* store-type: redis
* redis:
* namespace: spring:session
* flush-mode: on_save
* redis:
* host: localhost
* port: 6379
* password:
* database: 0
* timeout: 2000ms
* lettuce:
* pool:
* max-active: 8
* max-idle: 8
* min-idle: 0
*/
7.2 Session操作示例
java
import org.springframework.web.bind.annotation.*;
import org.springframework.web.context.request.RequestContextHolder;
import org.springframework.web.context.request.ServletRequestAttributes;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpSession;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.Map;
/**
* Session操作控制器
*/
@RestController
@RequestMapping("/api/session")
public class SessionController {
/**
* 设置Session属性
*/
@PostMapping("/set")
public Map<String, Object> setSessionAttribute(
@RequestParam String key,
@RequestParam String value,
HttpSession session) {
// 设置Session属性
session.setAttribute(key, value);
// 获取Session ID
String sessionId = session.getId();
// 获取创建时间
long creationTime = session.getCreationTime();
// 获取最后访问时间
long lastAccessedTime = session.getLastAccessedTime();
// 获取最大不活动时间
int maxInactiveInterval = session.getMaxInactiveInterval();
Map<String, Object> result = new HashMap<>();
result.put("sessionId", sessionId);
result.put("key", key);
result.put("value", value);
result.put("creationTime", creationTime);
result.put("lastAccessedTime", lastAccessedTime);
result.put("maxInactiveInterval", maxInactiveInterval);
return result;
}
/**
* 获取Session属性
*/
@GetMapping("/get")
public Map<String, Object> getSessionAttribute(
@RequestParam String key,
HttpSession session) {
Object value = session.getAttribute(key);
Map<String, Object> result = new HashMap<>();
result.put("sessionId", session.getId());
result.put("key", key);
result.put("value", value);
return result;
}
/**
* 获取所有Session属性
*/
@GetMapping("/all")
public Map<String, Object> getAllSessionAttributes(HttpSession session) {
Map<String, Object> attributes = new HashMap<>();
Enumeration<String> attributeNames = session.getAttributeNames();
while (attributeNames.hasMoreElements()) {
String name = attributeNames.nextElement();
attributes.put(name, session.getAttribute(name));
}
Map<String, Object> result = new HashMap<>();
result.put("sessionId", session.getId());
result.put("attributes", attributes);
result.put("creationTime", session.getCreationTime());
result.put("lastAccessedTime", session.getLastAccessedTime());
return result;
}
/**
* 移除Session属性
*/
@DeleteMapping("/remove")
public String removeSessionAttribute(
@RequestParam String key,
HttpSession session) {
session.removeAttribute(key);
return "属性 " + key + " 已移除";
}
/**
* 销毁Session
*/
@PostMapping("/invalidate")
public String invalidateSession(HttpSession session) {
String sessionId = session.getId();
session.invalidate();
return "Session " + sessionId + " 已销毁";
}
/**
* 获取Session统计信息
*/
@GetMapping("/info")
public Map<String, Object> getSessionInfo(HttpServletRequest request) {
HttpSession session = request.getSession(false);
Map<String, Object> info = new HashMap<>();
info.put("sessionId", session != null ? session.getId() : null);
info.put("hasSession", session != null);
info.put("requestedSessionId", request.getRequestedSessionId());
info.put("isRequestedSessionIdValid", request.isRequestedSessionIdValid());
info.put("isRequestedSessionIdFromCookie",
request.isRequestedSessionIdFromCookie());
info.put("isRequestedSessionIdFromURL",
request.isRequestedSessionIdFromURL());
return info;
}
}
/**
* 用户登录示例
*/
@RestController
@RequestMapping("/api/auth")
public class AuthController {
@Autowired
private UserService userService;
/**
* 用户登录
*/
@PostMapping("/login")
public Result<UserVO> login(@RequestBody LoginDTO dto, HttpSession session) {
// 验证用户
User user = userService.authenticate(dto.getUsername(), dto.getPassword());
if (user != null) {
// 将用户信息存入Session
session.setAttribute("user", user);
session.setAttribute("loginTime", System.currentTimeMillis());
// 设置Session超时时间(30分钟)
session.setMaxInactiveInterval(30 * 60);
UserVO userVO = UserVO.fromUser(user);
return Result.success(userVO);
} else {
return Result.error("用户名或密码错误");
}
}
/**
* 获取当前用户
*/
@GetMapping("/current")
public Result<UserVO> getCurrentUser(HttpSession session) {
User user = (User) session.getAttribute("user");
if (user != null) {
return Result.success(UserVO.fromUser(user));
} else {
return Result.error("用户未登录");
}
}
/**
* 用户登出
*/
@PostMapping("/logout")
public Result<String> logout(HttpSession session) {
session.invalidate();
return Result.success("登出成功");
}
/**
* 检查登录状态
*/
@GetMapping("/check")
public Result<Map<String, Object>> checkLogin(HttpSession session) {
User user = (User) session.getAttribute("user");
Map<String, Object> result = new HashMap<>();
result.put("loggedIn", user != null);
result.put("username", user != null ? user.getUsername() : null);
result.put("sessionId", session.getId());
return Result.success(result);
}
}
/**
* Session监听器
*/
@Component
public class SessionListener implements HttpSessionListener,
HttpSessionAttributeListener {
private static final Logger logger = LoggerFactory.getLogger(SessionListener.class);
@Autowired
private RedisTemplate<String, Object> redisTemplate;
/**
* Session创建时
*/
@Override
public void sessionCreated(HttpSessionEvent se) {
HttpSession session = se.getSession();
logger.info("Session创建 - ID: {}, 时间: {}",
session.getId(), new Date(session.getCreationTime()));
// 记录Session创建统计
redisTemplate.opsForValue()
.increment("stat:session:created");
}
/**
* Session销毁时
*/
@Override
public void sessionDestroyed(HttpSessionEvent se) {
HttpSession session = se.getSession();
logger.info("Session销毁 - ID: {}, 最后访问: {}",
session.getId(), new Date(session.getLastAccessedTime()));
// 记录Session销毁统计
redisTemplate.opsForValue()
.increment("stat:session:destroyed");
// 获取用户信息(如果有)
User user = (User) session.getAttribute("user");
if (user != null) {
// 记录用户登出日志
logger.info("用户登出 - 用户名: {}, Session: {}",
user.getUsername(), session.getId());
// 清理用户在线状态
redisTemplate.delete("online:user:" + user.getId());
}
}
/**
* 属性添加时
*/
@Override
public void attributeAdded(HttpSessionBindingEvent se) {
logger.info("Session属性添加 - Name: {}, Value: {}",
se.getName(), se.getValue());
}
/**
* 属性移除时
*/
@Override
public void attributeRemoved(HttpSessionBindingEvent se) {
logger.info("Session属性移除 - Name: {}", se.getName());
}
/**
* 属性替换时
*/
@Override
public void attributeReplaced(HttpSessionBindingEvent se) {
logger.info("Session属性替换 - Name: {}, New Value: {}",
se.getName(), se.getValue());
}
}
7.3 Nginx负载均衡配置
nginx
# nginx.conf
# Nginx负载均衡配置示例
# 定义上游服务器(后端应用集群)
upstream backend_cluster {
# 负载均衡策略
# 1. 轮询(默认)
# 2. ip_hash:基于IP的会话保持
# 3. least_conn:最少连接
# 4. weight:权重
# IP哈希策略(确保同一IP的请求始终发送到同一台服务器)
ip_hash;
# 服务器列表
server 192.168.1.10:8080 weight=3 max_fails=3 fail_timeout=30s;
server 192.168.1.11:8080 weight=2 max_fails=3 fail_timeout=30s;
server 192.168.1.12:8080 weight=1 max_fails=3 fail_timeout=30s backup;
# 健康检查(需要nginx_upstream_check_module模块)
check interval=3000 rise=2 fall=5 timeout=1000 type=http;
check_http_send "HEAD /health HTTP/1.0\r\n\r\n";
check_http_expect_alive http_2xx http_3xx;
}
# 定义另一个上游服务器(只读服务)
upstream readonly_cluster {
server 192.168.1.20:8080;
server 192.168.1.21:8080;
server 192.168.1.22:8080;
}
# HTTP服务器配置
server {
listen 80;
server_name www.example.com;
# 访问日志
access_log /var/log/nginx/example_access.log;
error_log /var/log/nginx/example_error.log;
# 静态资源缓存
location ~* \.(jpg|jpeg|png|gif|ico|css|js)$ {
expires 30d;
add_header Cache-Control "public, no-transform";
root /var/www/static;
}
# 健康检查接口
location /health {
access_log off;
return 200 "healthy\n";
add_header Content-Type text/plain;
}
# 主应用路由
location / {
# 使用定义的上游服务器集群
proxy_pass http://backend_cluster;
# 设置代理请求头
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# 传递Session Cookie
proxy_set_header Cookie $http_cookie;
# 超时设置
proxy_connect_timeout 5s;
proxy_send_timeout 30s;
proxy_read_timeout 30s;
# 缓冲区设置
proxy_buffering on;
proxy_buffer_size 4k;
proxy_buffers 8 4k;
proxy_busy_buffers_size 8k;
# 重试机制
proxy_next_upstream error timeout invalid_header http_500 http_502 http_503;
proxy_next_upstream_tries 3;
# 禁用重定向重写
proxy_redirect off;
}
# API路由
location /api/ {
proxy_pass http://backend_cluster/api/;
# CORS配置
add_header Access-Control-Allow-Origin *;
add_header Access-Control-Allow-Methods 'GET, POST, PUT, DELETE, OPTIONS';
add_header Access-Control-Allow-Headers 'DNT,X-Mx-ReqToken,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization';
if ($request_method = 'OPTIONS') {
return 204;
}
}
# 只读API路由
location /api/readonly/ {
proxy_pass http://readonly_cluster/api/readonly/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
}
# WebSocket支持
location /ws/ {
proxy_pass http://backend_cluster;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
# WebSocket超时
proxy_read_timeout 3600s;
proxy_send_timeout 3600s;
}
# 错误页面
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}
# HTTPS服务器配置
server {
listen 443 ssl http2;
server_name www.example.com;
# SSL证书配置
ssl_certificate /etc/nginx/ssl/example.com.crt;
ssl_certificate_key /etc/nginx/ssl/example.com.key;
# SSL协议配置
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
# SSL会话缓存
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 10m;
# 开启压缩
gzip on;
gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;
# 其他配置同HTTP
location / {
proxy_pass http://backend_cluster;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
# 负载均衡配置示例(docker-compose.yml)
# version: '3'
# services:
# nginx:
# image: nginx:latest
# ports:
# - "80:80"
# - "443:443"
# volumes:
# - ./nginx.conf:/etc/nginx/nginx.conf:ro
# - ./ssl:/etc/nginx/ssl:ro
# depends_on:
# - app1
# - app2
# - app3
#
# app1:
# build: ./app
# environment:
# - SERVER_PORT=8080
#
# app2:
# build: ./app
# environment:
# - SERVER_PORT=8080
#
# app3:
# build: ./app
# environment:
# - SERVER_PORT=8080
#
# redis:
# image: redis:alpine
# ports:
# - "6379:6379"
7.4 Keepalived高可用配置
bash
# keepalived.conf
# Keepalived主备配置示例
# 全局配置
global_defs {
# 路由ID(通常为当前节点名称)
router_id LVS_MASTER
# 脚本执行用户
script_user root
# 启用脚本检测
enable_script_security
# 通知脚本
notification_email {
admin@example.com
}
notification_email_from keepalived@localhost
smtp_server 127.0.0.1
smtp_connect_timeout 30
}
# 脚本检测配置
vrrp_script chk_nginx {
# 检测Nginx服务是否运行
script "/usr/local/bin/check_nginx.sh"
# 检测间隔时间(秒)
interval 2
# 超时时间(秒)
timeout 2
# 权重(如果检测失败,优先级减少20)
weight -20
# 失败次数阈值
fall 3
# 成功次数阈值
rise 2
# 用户
user root
}
# VRRP实例配置
vrrp_instance VI_1 {
# 状态(MASTER/BACKUP)
state MASTER
# 网卡接口
interface eth0
# 虚拟路由ID(同一组保持一致)
virtual_router_id 51
# 优先级(数值越大越优先)
priority 100
# 心跳检测间隔(秒)
advert_int 1
# 认证配置
authentication {
auth_type PASS
auth_pass 1234
}
# 虚拟IP地址(VIP)
virtual_ipaddress {
192.168.1.100/24 dev eth0
192.168.1.101/24 dev eth0
}
# 使用脚本检测
track_script {
chk_nginx
}
# 通知脚本
notify_master "/usr/local/bin/notify.sh master"
notify_backup "/usr/local/bin/notify.sh backup"
notify_fault "/usr/local/bin/notify.sh fault"
# 追踪接口
track_interface {
eth0
eth1
}
# 组播地址
unicast_src_ip 192.168.1.10
unicast_peer {
192.168.1.11
192.168.1.12
}
}
# 虚拟服务器配置(LVS)
virtual_server 192.168.1.100 80 {
# 延迟循环
delay_loop 6
# 负载均衡算法
lb_algo rr
# 负载均衡模式(NAT/DR/TUN)
lb_kind DR
# 持久性超时
persistence_timeout 50
# 协议
protocol TCP
# 真实服务器
real_server 192.168.1.10 8080 {
weight 3
TCP_CHECK {
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
connect_port 8080
}
}
real_server 192.168.1.11 8080 {
weight 2
TCP_CHECK {
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
connect_port 8080
}
}
real_server 192.168.1.12 8080 {
weight 1
TCP_CHECK {
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
connect_port 8080
}
}
}
# BACKUP节点配置文件
# global_defs配置同上
#
# vrrp_instance VI_1 {
# state BACKUP
# interface eth0
# virtual_router_id 51
# priority 90 # 优先级低于MASTER
# advert_int 1
#
# authentication {
# auth_type PASS
# auth_pass 1234
# }
#
# virtual_ipaddress {
# 192.168.1.100/24 dev eth0
# }
#
# track_script {
# chk_nginx
# }
#
# unicast_src_ip 192.168.1.11
# unicast_peer {
# 192.168.1.10
# 192.168.1.12
# }
# }
bash
# check_nginx.sh
#!/bin/bash
# Nginx服务检测脚本
# 检查Nginx进程
if pgrep -x "nginx" > /dev/null
then
# 检查Nginx端口
if netstat -tln | grep -q ":80 "
then
exit 0 # 服务正常
else
exit 1 # 端口异常
fi
else
# 尝试重启Nginx
systemctl start nginx
sleep 2
# 再次检查
if pgrep -x "nginx" > /dev/null
then
exit 0
else
exit 1
fi
fi
bash
# notify.sh
#!/bin/bash
# Keepalived状态通知脚本
TYPE=$1
NAME=$2
STATE=$3
case $STATE in
"MASTER")
echo "`date`: 成为MASTER节点" >> /var/log/keepalived.log
# 启动VIP相关服务
# 例如:启动Nginx、注册到服务发现等
systemctl start nginx
# 发送通知
curl -X POST http://monitor.example.com/api/notify \
-H "Content-Type: application/json" \
-d "{\"type\":\"MASTER\",\"host\":\"$(hostname)\",\"time\":\"$(date)\"}"
;;
"BACKUP")
echo "`date`: 成为BACKUP节点" >> /var/log/keepalived.log
# 停止VIP相关服务
systemctl stop nginx
;;
"FAULT")
echo "`date`: 出现故障" >> /var/log/keepalived.log
# 记录故障日志
logger -t keepalived "Keepalived fault state"
;;
esac
yaml
# docker-compose-keepalived.yml
version: '3'
services:
keepalived-master:
image: osixia/keepalived:latest
container_name: keepalived-master
cap_add:
- NET_ADMIN
- NET_BROADCAST
- NET_RAW
network_mode: host
volumes:
- ./keepalived-master.conf:/container/service/keepalived/assets/keepalived.conf
- ./check_nginx.sh:/usr/local/bin/check_nginx.sh
- ./notify.sh:/usr/local/bin/notify.sh
environment:
- KEEPALIVED_INTERFACE=eth0
- KEEPALIVED_PASSWORD=1234
- KEEPALIVED_PRIORITY=100
- KEEPALIVED_VIRTUAL_IPS=192.168.1.100
restart: always
keepalived-backup:
image: osixia/keepalived:latest
container_name: keepalived-backup
cap_add:
- NET_ADMIN
- NET_BROADCAST
- NET_RAW
network_mode: host
volumes:
- ./keepalived-backup.conf:/container/service/keepalived/assets/keepalived.conf
- ./check_nginx.sh:/usr/local/bin/check_nginx.sh
- ./notify.sh:/usr/local/bin/notify.sh
environment:
- KEEPALIVED_INTERFACE=eth0
- KEEPALIVED_PASSWORD=1234
- KEEPALIVED_PRIORITY=90
- KEEPALIVED_VIRTUAL_IPS=192.168.1.100
restart: always
7.5 完整的集群部署示例
java
/**
* 应用启动类
*/
@SpringBootApplication
@EnableRedisHttpSession
public class ClusterApplication {
public static void main(String[] args) {
SpringApplication.run(ClusterApplication.class, args);
}
/**
* 获取当前实例信息
*/
@Bean
public InstanceInfo instanceInfo() {
String host = System.getenv("HOSTNAME");
if (host == null) {
host = InetAddress.getLocalHost().getHostName();
}
String ip = InetAddress.getLocalHost().getHostAddress();
return new InstanceInfo(host, ip,
ManagementFactory.getRuntimeMXBean().getName());
}
}
/**
* 实例信息类
*/
@Data
@AllArgsConstructor
public class InstanceInfo {
private String hostname;
private String ip;
private String processId;
}
/**
* 集群信息控制器
*/
@RestController
@RequestMapping("/api/cluster")
public class ClusterInfoController {
@Autowired
private InstanceInfo instanceInfo;
@Autowired
private RedisTemplate<String, Object> redisTemplate;
/**
* 获取当前实例信息
*/
@GetMapping("/instance")
public Result<InstanceInfo> getInstanceInfo() {
return Result.success(instanceInfo);
}
/**
* 获取所有活跃Session
*/
@GetMapping("/sessions")
public Result<Set<String>> getActiveSessions() {
// 从Redis获取所有Session
Set<String> sessions = redisTemplate.keys("spring:session:sessions:*");
return Result.success(sessions);
}
/**
* 获取集群健康状态
*/
@GetMapping("/health")
public Map<String, Object> health() {
Map<String, Object> health = new HashMap<>();
health.put("status", "UP");
health.put("instance", instanceInfo.getHostname());
health.put("timestamp", System.currentTimeMillis());
// Redis健康检查
try {
String pong = redisTemplate.getConnectionFactory()
.getConnection().ping();
health.put("redis", "UP");
} catch (Exception e) {
health.put("redis", "DOWN");
health.put("status", "DOWN");
}
return health;
}
}
八、本章总结
本章详细介绍了Redis在Java应用中的各种编程开发技术,涵盖了从基础连接到高级应用的全方位内容:
- Lettuce客户端基础:包括RedisClient连接管理、连接池配置、异步操作和响应式编程
- Spring Data Redis:完整的RedisTemplate操作示例、对象序列化处理
- Pipeline命令流水线:批量操作优化技术
- Spring Cache整合:缓存注解使用、多级缓存配置
- 分布式锁实现:可重入锁、自动续期等高级特性
- 接口幂等性:基于Redis的幂等性解决方案
- 集群与分布式Session:Spring Session集成、Nginx负载均衡、Keepalived高可用配置
每个知识点都提供了具体的代码示例和详细注释,能够帮助开发者快速掌握Redis在实际项目中的应用技术。