在 Spring Boot 中使用 Kafka 并保证顺序性(Topic 分区为 100)的完整案例

1. 项目依赖

xml

复制代码
<!-- pom.xml -->
<dependencies>
    <dependency>
        <groupId>org.springframework.boot</groupId>
        <artifactId>spring-boot-starter-web</artifactId>
    </dependency>
    <dependency>
        <groupId>org.springframework.kafka</groupId>
        <artifactId>spring-kafka</artifactId>
    </dependency>
    <dependency>
        <groupId>org.projectlombok</groupId>
        <artifactId>lombok</artifactId>
        <optional>true</optional>
    </dependency>
</dependencies>

2. 配置文件

yaml

复制代码
# application.yml
spring:
  kafka:
    bootstrap-servers: localhost:9092
    
    producer:
      key-serializer: org.apache.kafka.common.serialization.StringSerializer
      value-serializer: org.apache.kafka.common.serialization.StringSerializer
      acks: all
      retries: 3
      batch-size: 16384
      buffer-memory: 33554432
      properties:
        enable.idempotence: true # 启用幂等性
        max.in.flight.requests.per.connection: 1 # 关键:保证顺序性的配置
    
    consumer:
      group-id: order-group
      key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
      value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
      enable-auto-commit: false # 手动提交offset
      auto-offset-reset: earliest
      properties:
        isolation.level: read_committed # 读取已提交的消息

    listener:
      ack-mode: manual_immediate # 手动提交
      concurrency: 1 # 每个分区一个消费者实例(重要!)

3. 生产者配置类

java

复制代码
import org.apache.kafka.clients.producer.ProducerConfig;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.core.ProducerFactory;

import java.util.HashMap;
import java.util.Map;

@Configuration
public class KafkaProducerConfig {
    
    @Bean
    public ProducerFactory<String, String> producerFactory() {
        Map<String, Object> configProps = new HashMap<>();
        configProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
        configProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, 
                       "org.apache.kafka.common.serialization.StringSerializer");
        configProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, 
                       "org.apache.kafka.common.serialization.StringSerializer");
        
        // 关键配置:保证顺序性
        configProps.put(ProducerConfig.ACKS_CONFIG, "all"); // 确保消息被所有副本接收
        configProps.put(ProducerConfig.RETRIES_CONFIG, 3);
        configProps.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, 1);
        configProps.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, true);
        
        // 性能优化
        configProps.put(ProducerConfig.LINGER_MS_CONFIG, 5);
        configProps.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384);
        configProps.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, "snappy");
        
        return new DefaultKafkaProducerFactory<>(configProps);
    }
    
    @Bean
    public KafkaTemplate<String, String> kafkaTemplate() {
        return new KafkaTemplate<>(producerFactory());
    }
}

4. 顺序消息生产者

java

复制代码
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.support.SendResult;
import org.springframework.stereotype.Component;
import org.springframework.util.concurrent.ListenableFuture;
import org.springframework.util.concurrent.ListenableFutureCallback;

@Component
@Slf4j
public class OrderProducer {
    
    private static final String TOPIC = "order-topic";
    
    @Autowired
    private KafkaTemplate<String, String> kafkaTemplate;
    
    /**
     * 发送顺序消息
     * @param orderId 订单ID - 作为partition key,保证相同订单的消息进入同一分区
     * @param message 消息内容
     * @param messageSeq 消息序列号(用于顺序验证)
     */
    public void sendOrderedMessage(String orderId, String message, int messageSeq) {
        // 使用订单ID作为key,确保相同订单的消息进入同一个分区
        String key = orderId;
        String value = String.format("seq-%d|%s", messageSeq, message);
        
        // 发送消息
        ListenableFuture<SendResult<String, String>> future = 
                kafkaTemplate.send(TOPIC, key, value);
        
        // 异步回调
        future.addCallback(new ListenableFutureCallback<SendResult<String, String>>() {
            @Override
            public void onSuccess(SendResult<String, String> result) {
                log.info("发送成功: orderId={}, seq={}, partition={}, offset={}", 
                        orderId, messageSeq, 
                        result.getRecordMetadata().partition(),
                        result.getRecordMetadata().offset());
            }
            
            @Override
            public void onFailure(Throwable ex) {
                log.error("发送失败: orderId={}, seq={}, error={}", 
                        orderId, messageSeq, ex.getMessage());
                // 这里可以添加重试逻辑
                retrySend(orderId, value);
            }
        });
    }
    
    /**
     * 批量发送订单的多个步骤(保证顺序)
     */
    public void sendOrderProcess(String orderId) {
        // 模拟订单处理流程,这些消息必须按顺序消费
        sendOrderedMessage(orderId, "订单创建", 1);
        sendOrderedMessage(orderId, "支付成功", 2);
        sendOrderedMessage(orderId, "库存扣减", 3);
        sendOrderedMessage(orderId, "订单发货", 4);
        sendOrderedMessage(orderId, "订单完成", 5);
    }
    
    private void retrySend(String key, String value) {
        // 实现重试逻辑
        // 注意:重试时要保持相同的key才能保证进入同一分区
    }
    
    /**
     * 同步发送(确保消息发送成功)
     */
    public SendResult<String, String> sendSync(String orderId, String message, int seq) 
            throws Exception {
        String key = orderId;
        String value = String.format("seq-%d|%s", seq, message);
        
        return kafkaTemplate.send(TOPIC, key, value).get();
    }
}

5. 消费者配置类

java

复制代码
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;

import java.util.HashMap;
import java.util.Map;

@Configuration
public class KafkaConsumerConfig {
    
    @Bean
    public ConsumerFactory<String, String> consumerFactory() {
        Map<String, Object> props = new HashMap<>();
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
        props.put(ConsumerConfig.GROUP_ID_CONFIG, "order-consumer-group");
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, 
                 "org.apache.kafka.common.serialization.StringDeserializer");
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, 
                 "org.apache.kafka.common.serialization.StringDeserializer");
        
        // 关键配置:保证顺序消费
        props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 1); // 每次只拉取一条消息
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false); // 手动提交
        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
        props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed");
        
        return new DefaultKafkaConsumerFactory<>(props);
    }
    
    @Bean
    public ConcurrentKafkaListenerContainerFactory<String, String> 
            kafkaListenerContainerFactory() {
        
        ConcurrentKafkaListenerContainerFactory<String, String> factory =
                new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactory());
        
        // 重要:每个分区一个消费者线程
        // 100个分区需要配置足够的并发数
        factory.setConcurrency(100); // 与分区数匹配
        
        // 手动提交模式
        factory.getContainerProperties().setAckMode(
                org.springframework.kafka.listener.ContainerProperties.AckMode.MANUAL_IMMEDIATE);
        
        // 批量消费关闭
        factory.setBatchListener(false);
        
        return factory;
    }
}

6. 顺序消息消费者

java

复制代码
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.stereotype.Component;

import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;

@Component
@Slf4j
public class OrderConsumer {
    
    // 用于跟踪每个订单的消费序列号
    private final Map<String, Integer> orderSeqTracker = new ConcurrentHashMap<>();
    
    /**
     * 监听订单Topic的100个分区
     */
    @KafkaListener(
            topics = "order-topic",
            groupId = "order-consumer-group",
            containerFactory = "kafkaListenerContainerFactory"
    )
    public void consumeOrderMessage(ConsumerRecord<String, String> record, 
                                   Acknowledgment ack) {
        try {
            String orderId = record.key();
            String value = record.value();
            
            // 解析消息序列号
            String[] parts = value.split("\\|", 2);
            int currentSeq = Integer.parseInt(parts[0].replace("seq-", ""));
            String message = parts.length > 1 ? parts[1] : "";
            
            // 获取该订单的上一个序列号
            Integer lastSeq = orderSeqTracker.get(orderId);
            
            // 顺序验证
            if (lastSeq != null && currentSeq != lastSeq + 1) {
                log.warn("消息顺序错误! orderId={}, 期望seq={}, 实际seq={}, 等待重试...", 
                        orderId, lastSeq + 1, currentSeq);
                
                // 不提交ack,等待重新消费
                // 注意:这里可能会导致消息重复消费,需要业务层做幂等处理
                return;
            }
            
            // 处理业务逻辑
            processOrderMessage(orderId, currentSeq, message);
            
            // 更新序列号
            orderSeqTracker.put(orderId, currentSeq);
            
            // 手动提交offset
            ack.acknowledge();
            
            log.info("消费成功: orderId={}, seq={}, partition={}, offset={}", 
                    orderId, currentSeq, record.partition(), record.offset());
            
        } catch (Exception e) {
            log.error("消费失败: partition={}, offset={}, error={}", 
                    record.partition(), record.offset(), e.getMessage());
            // 根据业务需求决定是否提交offset
        }
    }
    
    private void processOrderMessage(String orderId, int seq, String message) {
        // 业务处理逻辑
        log.info("处理订单消息: orderId={}, seq={}, message={}", orderId, seq, message);
        
        // 模拟业务处理
        switch (seq) {
            case 1:
                // 创建订单
                break;
            case 2:
                // 处理支付
                break;
            case 3:
                // 扣减库存
                break;
            case 4:
                // 发货处理
                break;
            case 5:
                // 订单完成
                // 清理序列号跟踪
                orderSeqTracker.remove(orderId);
                break;
        }
    }
    
    /**
     * 用于监控消费进度
     */
    public Map<String, Integer> getOrderSeqTracker() {
        return new ConcurrentHashMap<>(orderSeqTracker);
    }
}

7. 控制器层

java

复制代码
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.*;

@RestController
@RequestMapping("/api/kafka")
public class KafkaController {
    
    @Autowired
    private OrderProducer orderProducer;
    
    @Autowired
    private OrderConsumer orderConsumer;
    
    /**
     * 发送顺序消息
     */
    @PostMapping("/send-order")
    public String sendOrderMessage(@RequestParam String orderId) {
        orderProducer.sendOrderProcess(orderId);
        return "订单消息已发送: " + orderId;
    }
    
    /**
     * 批量发送测试
     */
    @PostMapping("/send-batch")
    public String sendBatchMessages(@RequestParam int count) {
        for (int i = 1; i <= count; i++) {
            String orderId = "ORDER-" + System.currentTimeMillis() + "-" + i;
            orderProducer.sendOrderProcess(orderId);
        }
        return "批量发送完成,共" + count + "个订单";
    }
    
    /**
     * 查看消费状态
     */
    @GetMapping("/status")
    public Object getConsumeStatus() {
        return orderConsumer.getOrderSeqTracker();
    }
}

8. 主应用类

java

复制代码
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;

@SpringBootApplication
public class KafkaOrderApplication {
    
    public static void main(String[] args) {
        SpringApplication.run(KafkaOrderApplication.class, args);
    }
}

9. 测试类

java

复制代码
import org.junit.jupiter.api.Test;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;

@SpringBootTest
class OrderProducerTest {
    
    @Autowired
    private OrderProducer orderProducer;
    
    @Test
    void testOrderSequence() {
        // 测试顺序性
        String orderId = "TEST-ORDER-001";
        
        // 发送顺序消息
        orderProducer.sendOrderedMessage(orderId, "创建订单", 1);
        orderProducer.sendOrderedMessage(orderId, "支付", 2);
        orderProducer.sendOrderedMessage(orderId, "发货", 3);
        
        // 等待消费完成
        try {
            Thread.sleep(5000);
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
    }
}

10. 关键配置说明

保证顺序性的关键点:

  1. 生产者端

    • max.in.flight.requests.per.connection=1:同一连接最多只能有一个请求在途

    • acks=all:确保消息被所有副本接收

    • enable.idempotence=true:启用幂等性

    • 使用订单ID作为Key,确保相同订单进入同一分区

  2. 消费者端

    • 每个分区一个消费者实例(concurrency=100

    • 手动提交offset

    • 业务层做顺序验证和幂等处理

    • 单个消费者组内保证分区消费顺序

  3. Topic创建(需要预先创建):

bash

复制代码
# 创建100个分区的Topic
kafka-topics.sh --create \
  --bootstrap-server localhost:9092 \
  --replication-factor 3 \
  --partitions 100 \
  --topic order-topic

11. 监控和管理

java

复制代码
// 可选的监控组件
@Component
@Slf4j
public class KafkaMetrics {
    
    @Autowired
    private KafkaTemplate<String, String> kafkaTemplate;
    
    // 定期检查生产者状态
    @Scheduled(fixedDelay = 60000)
    public void checkProducerHealth() {
        Map<Object, Object> metrics = kafkaTemplate.metrics();
        log.info("Kafka Producer Metrics: {}", metrics);
    }
}

这个方案通过 Kafka 的分区机制保证了相同订单 ID 的消息被发送到同一个分区,从而保证了顺序性。同时考虑了高并发、容错和监控等生产级需求。

相关推荐
無欲無为3 小时前
Spring Boot 整合 RabbitMQ 详细指南:从入门到实战
spring boot·rabbitmq·java-rabbitmq
老华带你飞11 小时前
农产品销售管理|基于java + vue农产品销售管理系统(源码+数据库+文档)
java·开发语言·前端·数据库·vue.js·spring boot·后端
李白的粉12 小时前
基于springboot的银行客户管理系统(全套)
java·spring boot·毕业设计·课程设计·源代码·银行客户管理系统
JIngJaneIL12 小时前
基于springboot + vue房屋租赁管理系统(源码+数据库+文档)
java·开发语言·前端·数据库·vue.js·spring boot·后端
千寻技术帮13 小时前
10422_基于Springboot的教务管理系统
java·spring boot·后端·vue·教务管理
地瓜伯伯14 小时前
SpringBoot项目整合Elasticsearch启动失败的常见错误总结(2)
spring boot·elasticsearch·spring cloud
晴天飛 雪15 小时前
Spring Boot 上传shp压缩包解析多少地块
java·spring boot
AC赳赳老秦16 小时前
pbootcms模板后台版权如何修改
java·开发语言·spring boot·postgresql·测试用例·pbootcms·建站