Spring Boot 4.0 整合 Kafka 企业级应用指南

Spring Boot 4.0 整合 Kafka 企业级应用指南

一、环境准备

1.1 版本说明

  • Spring Boot: 4.0
  • Spring Kafka: 3.2
  • Kafka: 3.2
  • Java: 17

1.2 项目依赖

xml 复制代码
<!-- pom.xml -->
<dependencies>
    <!-- Spring Boot Starter -->
    <dependency>
        <groupId>org.springframework.boot</groupId>
        <artifactId>spring-boot-starter</artifactId>
    </dependency>
    
    <!-- Spring Kafka -->
    <dependency>
        <groupId>org.springframework.kafka</groupId>
        <artifactId>spring-kafka</artifactId>
    </dependency>
    
    <!-- 消息序列化支持 -->
    <dependency>
        <groupId>com.fasterxml.jackson.core</groupId>
        <artifactId>jackson-databind</artifactId>
    </dependency>
    
    <!-- 配置注解支持 -->
    <dependency>
        <groupId>org.springframework.boot</groupId>
        <artifactId>spring-boot-configuration-processor</artifactId>
        <optional>true</optional>
    </dependency>
    
    <!-- 测试 -->
    <dependency>
        <groupId>org.springframework.kafka</groupId>
        <artifactId>spring-kafka-test</artifactId>
        <scope>test</scope>
    </dependency>
</dependencies>

二、Kafka 核心配置

2.1 配置类

java 复制代码
@Configuration
@EnableKafka
@ConfigurationProperties(prefix = "spring.kafka")
public class KafkaConfig {
    
    // 1. 生产者配置
    @Bean
    public ProducerFactory<String, Object> producerFactory() {
        Map<String, Object> configProps = new HashMap<>();
        
        // 基础配置
        configProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
        configProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        configProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, JsonSerializer.class);
        
        // 可靠性配置
        configProps.put(ProducerConfig.ACKS_CONFIG, "all");  // 确保所有副本确认
        configProps.put(ProducerConfig.RETRIES_CONFIG, 3);   // 重试次数
        configProps.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, true);  // 幂等性
        
        // 性能优化
        configProps.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, "snappy");  // 压缩
        configProps.put(ProducerConfig.LINGER_MS_CONFIG, 5);  // 批量发送延迟
        configProps.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384);  // 批次大小
        
        // 事务支持
        configProps.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "txn-");
        
        return new DefaultKafkaProducerFactory<>(configProps);
    }
    
    // 2. 消费者配置
    @Bean
    public ConsumerFactory<String, Object> consumerFactory() {
        Map<String, Object> props = new HashMap<>();
        
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, JsonDeserializer.class);
        
        // 消费者组
        props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
        props.put(ConsumerConfig.CLIENT_ID_CONFIG, clientId);
        
        // 偏移量提交策略
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);  // 手动提交
        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");  // 偏移量重置策略
        
        // 性能与可靠性
        props.put(ConsumerConfig.FETCH_MIN_BYTES_CONFIG, 1);  // 最小拉取字节
        props.put(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, 500);  // 拉取最大等待时间
        props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 500);  // 每次拉取最大记录数
        
        // 反序列化信任包
        props.put(JsonDeserializer.TRUSTED_PACKAGES, "*");
        
        return new DefaultKafkaConsumerFactory<>(props);
    }
    
    // 3. KafkaTemplate
    @Bean
    public KafkaTemplate<String, Object> kafkaTemplate() {
        KafkaTemplate<String, Object> template = new KafkaTemplate<>(producerFactory());
        
        // 设置生产者拦截器
        template.setProducerListener(new ProducerListener<String, Object>() {
            @Override
            public void onSuccess(
                ProducerRecord<String, Object> producerRecord,
                RecordMetadata recordMetadata) {
                // 发送成功回调
            }
            
            @Override
            public void onError(
                ProducerRecord<String, Object> producerRecord,
                Exception exception) {
                // 发送失败回调
            }
        });
        
        return template;
    }
    
    // 4. 并发消费者容器工厂
    @Bean
    public ConcurrentKafkaListenerContainerFactory<String, Object> kafkaListenerContainerFactory() {
        ConcurrentKafkaListenerContainerFactory<String, Object> factory =
            new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactory());
        
        // 并发配置
        factory.setConcurrency(3);  // 每个主题3个消费者
        
        // 批量消费
        factory.setBatchListener(true);
        
        // 异常处理器
        factory.setErrorHandler(new SeekToCurrentErrorHandler());
        
        // 重试机制
        factory.setStatefulRetry(true);
        
        return factory;
    }
    
    // 5. 死信队列配置
    @Bean
    public DeadLetterPublishingRecoverer dlqRecoverer() {
        return new DeadLetterPublishingRecoverer(kafkaTemplate(),
            (record, ex) -> {
                // 死信队列命名规则:原主题名 + .DLT
                return new TopicPartition(record.topic() + ".DLT", record.partition());
            });
    }
}

2.2 应用配置

yaml 复制代码
# application.yml
spring:
  kafka:
    # 服务器配置
    bootstrap-servers: localhost:9092,localhost:9093,localhost:9094
    consumer:
      group-id: ${spring.application.name}-group
      client-id: ${spring.application.name}-${random.uuid}
      auto-offset-reset: latest
      enable-auto-commit: false
      max-poll-records: 500
      fetch-min-size: 1
      fetch-max-wait: 500
    producer:
      acks: all
      retries: 3
      compression-type: snappy
      linger-ms: 5
      batch-size: 16384
    listener:
      type: batch
      concurrency: 3
      ack-mode: manual_immediate
      poll-timeout: 3000
    
    # 安全配置
    properties:
      security.protocol: SASL_PLAINTEXT
      sasl.mechanism: PLAIN
      sasl.jaas.config: org.apache.kafka.common.security.plain.PlainLoginModule required username="admin" password="admin-secret";
    
    # SSL配置
    ssl:
      key-password: password
      keystore-location: classpath:kafka.client.keystore.jks
      keystore-password: password
      truststore-location: classpath:kafka.client.truststore.jks
      truststore-password: password

三、企业常用注解详解

3.1 核心注解

java 复制代码
@Service
@Slf4j
public class OrderMessageService {
    
    // 1. @KafkaListener - 核心消费者注解
    @KafkaListener(
        id = "order-consumer",  // 消费者ID
        topics = "${kafka.topic.order}",  // 监听主题
        groupId = "${spring.kafka.consumer.group-id}",  // 消费者组
        containerFactory = "kafkaListenerContainerFactory",  // 容器工厂
        concurrency = "3",  // 并发数
        autoStartup = "true",  // 自动启动
        properties = {
            "max.poll.records=500",  // 自定义属性
            "fetch.min.bytes=1"
        }
    )
    public void consumeOrderMessage(
        // 2. @Payload - 消息体
        @Payload OrderMessage message,
        
        // 3. @Header - 消息头
        @Header(KafkaHeaders.RECEIVED_TOPIC) String topic,
        @Header(KafkaHeaders.RECEIVED_PARTITION) int partition,
        @Header(KafkaHeaders.OFFSET) long offset,
        @Header(KafkaHeaders.RECEIVED_TIMESTAMP) long timestamp,
        @Header(KafkaHeaders.RECEIVED_MESSAGE_KEY) String key,
        
        // 4. @Headers - 所有头信息
        @Headers MessageHeaders headers,
        
        // 5. Acknowledgment - 手动提交偏移量
        Acknowledgment acknowledgment,
        
        // 6. Consumer - Kafka消费者对象
        Consumer<String, OrderMessage> consumer
    ) {
        try {
            log.info("接收到订单消息: {}, topic: {}, partition: {}, offset: {}", 
                message, topic, partition, offset);
            
            // 业务处理
            processOrder(message);
            
            // 手动提交偏移量
            acknowledgment.acknowledge();
            
        } catch (Exception e) {
            log.error("处理消息失败", e);
            // 记录到死信队列
        }
    }
    
    // 7. 批量消费
    @KafkaListener(
        topics = "${kafka.topic.order.batch}",
        containerFactory = "batchContainerFactory"
    )
    public void consumeBatchMessages(List<ConsumerRecord<String, OrderMessage>> records) {
        for (ConsumerRecord<String, OrderMessage> record : records) {
            processOrder(record.value());
        }
    }
    
    // 8. 过滤特定消息
    @KafkaListener(topics = "orders", containerFactory = "filterContainerFactory")
    public void listenWithFilter(OrderMessage message) {
        // 只处理满足条件的消息
    }
    
    // 9. 动态主题订阅
    @KafkaListener(topicPattern = "orders-.*")
    public void listenPattern(String message) {
        // 匹配所有以orders-开头的主题
    }
}

3.2 条件监听注解

java 复制代码
@Component
@Slf4j
public class ConditionalListener {
    
    // 条件监听 - 只在生产环境生效
    @KafkaListener(
        topics = "orders",
        condition = "#{environment.getProperty('spring.profiles.active') == 'prod'}"
    )
    public void listenInProd(OrderMessage message) {
        // 生产环境特定逻辑
    }
    
    // 条件监听 - 基于消息内容
    @KafkaListener(
        topics = "orders",
        condition = "headers['messageType']=='urgent'"
    )
    public void listenUrgentMessages(@Payload String message) {
        // 处理紧急消息
    }
}

四、消息生产者实现

4.1 基础生产者

java 复制代码
@Component
@Slf4j
public class KafkaProducerService {
    
    @Autowired
    private KafkaTemplate<String, Object> kafkaTemplate;
    
    /**
     * 同步发送消息
     */
    public SendResult<String, Object> sendSync(String topic, Object data) {
        try {
            // 创建消息
            ProducerRecord<String, Object> record = new ProducerRecord<>(
                topic, 
                data instanceof Order ? ((Order) data).getId() : null,
                data
            );
            
            // 添加消息头
            record.headers().add("source", "order-service".getBytes());
            record.headers().add("timestamp", String.valueOf(System.currentTimeMillis()).getBytes());
            
            // 同步发送
            return kafkaTemplate.send(record).get();
            
        } catch (Exception e) {
            log.error("同步发送消息失败", e);
            throw new RuntimeException("发送消息失败", e);
        }
    }
    
    /**
     * 异步发送消息
     */
    public void sendAsync(String topic, Object data) {
        kafkaTemplate.send(topic, data)
            .addCallback(new ListenableFutureCallback<SendResult<String, Object>>() {
                @Override
                public void onSuccess(SendResult<String, Object> result) {
                    log.info("消息发送成功: topic={}, partition={}, offset={}",
                        result.getRecordMetadata().topic(),
                        result.getRecordMetadata().partition(),
                        result.getRecordMetadata().offset());
                }
                
                @Override
                public void onFailure(Throwable ex) {
                    log.error("消息发送失败", ex);
                    // 重试或记录到数据库
                }
            });
    }
    
    /**
     * 发送事务消息
     */
    @Transactional
    public void sendTransactionMessage(String topic, Order order) {
        // 1. 保存到数据库
        orderRepository.save(order);
        
        // 2. 发送Kafka消息
        kafkaTemplate.send(topic, order);
        
        // 3. 其他业务操作
        // ...
        
        // 事务提交时会一并提交Kafka消息
    }
    
    /**
     * 批量发送消息
     */
    public void sendBatch(List<ProducerRecord<String, Object>> records) {
        List<CompletableFuture<SendResult<String, Object>>> futures = new ArrayList<>();
        
        for (ProducerRecord<String, Object> record : records) {
            futures.add(CompletableFuture.completedFuture(kafkaTemplate.send(record)));
        }
        
        // 等待所有发送完成
        CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]))
            .exceptionally(ex -> {
                log.error("批量发送失败", ex);
                return null;
            });
    }
}

4.2 消息拦截器

java 复制代码
@Component
public class ProducerInterceptor implements ProducerInterceptor<String, Object> {
    
    @Override
    public ProducerRecord<String, Object> onSend(ProducerRecord<String, Object> record) {
        // 发送前处理
        record.headers().add("trace-id", UUID.randomUUID().toString().getBytes());
        record.headers().add("send-time", String.valueOf(System.currentTimeMillis()).getBytes());
        
        return record;
    }
    
    @Override
    public void onAcknowledgement(RecordMetadata metadata, Exception exception) {
        if (exception != null) {
            // 发送失败处理
            log.error("消息发送失败: topic={}", metadata.topic(), exception);
        } else {
            // 发送成功处理
            log.info("消息发送成功: topic={}, partition={}, offset={}",
                metadata.topic(), metadata.partition(), metadata.offset());
        }
    }
    
    @Override
    public void close() {
        // 资源清理
    }
    
    @Override
    public void configure(Map<String, ?> configs) {
        // 配置初始化
    }
}

五、消息消费者实现

5.1 高级消费者配置

java 复制代码
@Configuration
@Slf4j
public class AdvancedConsumerConfig {
    
    // 1. 手动提交偏移量配置
    @Bean
    public ConcurrentKafkaListenerContainerFactory<String, Object> manualAckContainerFactory() {
        ConcurrentKafkaListenerContainerFactory<String, Object> factory =
            new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactory());
        
        // 设置手动提交模式
        ContainerProperties containerProperties = factory.getContainerProperties();
        containerProperties.setAckMode(AckMode.MANUAL_IMMEDIATE);
        
        return factory;
    }
    
    // 2. 批量消费配置
    @Bean
    public ConcurrentKafkaListenerContainerFactory<String, Object> batchContainerFactory() {
        ConcurrentKafkaListenerContainerFactory<String, Object> factory =
            new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactory());
        factory.setBatchListener(true);
        factory.setConcurrency(3);
        factory.getContainerProperties().setPollTimeout(3000);
        
        return factory;
    }
    
    // 3. 重试机制配置
    @Bean
    public ConcurrentKafkaListenerContainerFactory<String, Object> retryContainerFactory() {
        ConcurrentKafkaListenerContainerFactory<String, Object> factory =
            new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactory());
        
        // 重试模板
        RetryTemplate retryTemplate = new RetryTemplate();
        ExponentialBackOffPolicy backOffPolicy = new ExponentialBackOffPolicy();
        backOffPolicy.setInitialInterval(1000);
        backOffPolicy.setMultiplier(2.0);
        backOffPolicy.setMaxInterval(10000);
        retryTemplate.setBackOffPolicy(backOffPolicy);
        
        // 重试策略
        SimpleRetryPolicy retryPolicy = new SimpleRetryPolicy();
        retryPolicy.setMaxAttempts(3);
        retryTemplate.setRetryPolicy(retryPolicy);
        
        factory.setRetryTemplate(retryTemplate);
        
        return factory;
    }
}

5.2 消费者服务实现

java 复制代码
@Service
@Slf4j
public class OrderConsumerService {
    
    // 1. 基础消费者
    @KafkaListener(
        topics = "${kafka.topics.order}",
        groupId = "${spring.kafka.consumer.group-id}",
        containerFactory = "kafkaListenerContainerFactory"
    )
    public void consumeOrder(OrderMessage message) {
        log.info("接收到订单消息: {}", message);
        processOrder(message);
    }
    
    // 2. 手动提交偏移量
    @KafkaListener(
        topics = "orders",
        containerFactory = "manualAckContainerFactory"
    )
    public void consumeWithManualAck(
        @Payload OrderMessage message,
        Acknowledgment acknowledgment,
        @Header(KafkaHeaders.RECEIVED_PARTITION) int partition,
        @Header(KafkaHeaders.OFFSET) long offset) {
        
        try {
            processOrder(message);
            
            // 手动提交偏移量
            acknowledgment.acknowledge();
            
            log.info("消息处理完成: partition={}, offset={}", partition, offset);
            
        } catch (Exception e) {
            log.error("处理消息失败", e);
            // 可以根据异常类型决定是否提交偏移量
        }
    }
    
    // 3. 批量消费
    @KafkaListener(
        topics = "orders-batch",
        containerFactory = "batchContainerFactory"
    )
    public void consumeOrdersBatch(List<ConsumerRecord<String, OrderMessage>> records) {
        log.info("批量接收到 {} 条消息", records.size());
        
        for (ConsumerRecord<String, OrderMessage> record : records) {
            try {
                processOrder(record.value());
            } catch (Exception e) {
                log.error("处理消息失败: key={}", record.key(), e);
            }
        }
    }
    
    // 4. 带重试机制的消费
    @KafkaListener(
        topics = "orders-retry",
        containerFactory = "retryContainerFactory"
    )
    public void consumeWithRetry(OrderMessage message) {
        // 如果抛出异常,会自动重试3次
        processOrder(message);
    }
    
    // 5. 动态主题监听
    @KafkaListener(id = "dynamicListener", topicPattern = "user-.*-events")
    public void listenDynamicTopics(String message) {
        // 处理动态主题的消息
    }
    
    private void processOrder(OrderMessage message) {
        // 业务处理逻辑
        if (message.getAmount() > 10000) {
            // 大额订单特殊处理
            processLargeOrder(message);
        } else {
            processNormalOrder(message);
        }
    }
}

六、企业级特性实现

6.1 死信队列处理

java 复制代码
@Configuration
@Slf4j
public class DLQConfiguration {
    
    @Bean
    public KafkaTemplate<String, Object> dlqKafkaTemplate() {
        Map<String, Object> configs = new HashMap<>();
        configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
        configs.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        configs.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, JsonSerializer.class);
        
        return new KafkaTemplate<>(new DefaultKafkaProducerFactory<>(configs));
    }
    
    @Bean
    public DeadLetterPublishingRecoverer dlqRecoverer() {
        return new DeadLetterPublishingRecoverer(dlqKafkaTemplate(),
            (record, ex) -> {
                // 死信队列路由规则
                if (ex instanceof SerializationException) {
                    return new TopicPartition(record.topic() + ".DLQ.SERIALIZATION", 0);
                } else if (ex instanceof BusinessException) {
                    return new TopicPartition(record.topic() + ".DLQ.BUSINESS", 0);
                } else {
                    return new TopicPartition(record.topic() + ".DLQ.ERROR", 0);
                }
            });
    }
    
    @Bean
    public SeekToCurrentErrorHandler errorHandler() {
        return new SeekToCurrentErrorHandler(dlqRecoverer(), 3);  // 重试3次后进入死信队列
    }
}

6.2 消息幂等性处理

java 复制代码
@Service
@Slf4j
public class IdempotentConsumer {
    
    @Autowired
    private RedisTemplate<String, String> redisTemplate;
    
    private static final String MESSAGE_IDEMPOTENT_KEY = "msg:idempotent:";
    
    @KafkaListener(topics = "orders")
    public void consumeWithIdempotent(
        @Payload OrderMessage message,
        @Header(KafkaHeaders.RECEIVED_MESSAGE_KEY) String messageId) {
        
        // 检查消息是否已处理
        String processed = redisTemplate.opsForValue().get(MESSAGE_IDEMPOTENT_KEY + messageId);
        if ("processed".equals(processed)) {
            log.warn("消息已处理,跳过: messageId={}", messageId);
            return;
        }
        
        try {
            // 处理消息
            processOrder(message);
            
            // 记录消息已处理
            redisTemplate.opsForValue().set(
                MESSAGE_IDEMPOTENT_KEY + messageId,
                "processed",
                1, TimeUnit.HOURS);
            
        } catch (Exception e) {
            log.error("处理消息失败", e);
            throw e;
        }
    }
}

6.3 消息轨迹追踪

java 复制代码
@Component
@Slf4j
public class MessageTracer {
    
    @KafkaListener(topics = "orders")
    public void consumeWithTrace(
        @Payload OrderMessage message,
        @Header(name = "trace-id", required = false) String traceId) {
        
        String currentTraceId = traceId != null ? traceId : UUID.randomUUID().toString();
        
        MDC.put("traceId", currentTraceId);
        
        try {
            log.info("开始处理消息: traceId={}, message={}", currentTraceId, message);
            processOrder(message);
            log.info("消息处理完成: traceId={}", currentTraceId);
        } finally {
            MDC.remove("traceId");
        }
    }
}

七、监控与指标

7.1 监控配置

java 复制代码
@Configuration
@EnableKafka
public class KafkaMetricsConfig {
    
    @Bean
    public MeterRegistryCustomizer<MeterRegistry> kafkaMetrics() {
        return registry -> {
            // 监控生产者指标
            new KafkaProducerMetrics(producerFactory()).bindTo(registry);
            
            // 监控消费者指标
            new KafkaConsumerMetrics(consumerFactory()).bindTo(registry);
        };
    }
    
    @Bean
    public MicrometerProducerListener<String, Object> micrometerProducerListener(MeterRegistry meterRegistry) {
        return new MicrometerProducerListener<>(meterRegistry);
    }
}

7.2 健康检查

java 复制代码
@Component
@Slf4j
public class KafkaHealthIndicator implements HealthIndicator {
    
    @Autowired
    private KafkaAdmin kafkaAdmin;
    
    @Override
    public Health health() {
        try {
            // 获取Kafka集群信息
            AdminClient adminClient = kafkaAdmin.createAdminClient();
            DescribeClusterResult cluster = adminClient.describeCluster();
            
            int nodeCount = cluster.nodes().get().size();
            String clusterId = cluster.clusterId().get();
            
            adminClient.close();
            
            return Health.up()
                .withDetail("clusterId", clusterId)
                .withDetail("nodeCount", nodeCount)
                .build();
                
        } catch (Exception e) {
            return Health.down()
                .withException(e)
                .build();
        }
    }
}

八、测试

8.1 集成测试

java 复制代码
@SpringBootTest
@EmbeddedKafka(
    partitions = 1,
    topics = {"test-topic"},
    brokerProperties = {
        "listeners=PLAINTEXT://localhost:9092",
        "port=9092"
    }
)
@Slf4j
class KafkaIntegrationTest {
    
    @Autowired
    private KafkaTemplate<String, String> kafkaTemplate;
    
    @Autowired
    private KafkaListenerEndpointRegistry registry;
    
    @Test
    void testMessageSendAndReceive() throws Exception {
        // 发送消息
        String message = "test message";
        kafkaTemplate.send("test-topic", message);
        
        // 等待消息被消费
        Thread.sleep(1000);
        
        // 验证消息处理
        // ...
    }
}

8.2 单元测试

java 复制代码
@ExtendWith(MockitoExtension.class)
class KafkaProducerServiceTest {
    
    @Mock
    private KafkaTemplate<String, Object> kafkaTemplate;
    
    @InjectMocks
    private KafkaProducerService producerService;
    
    @Test
    void testSendSync() throws Exception {
        // 准备
        String topic = "test-topic";
        String message = "test";
        SendResult<String, Object> expectedResult = mock(SendResult.class);
        when(kafkaTemplate.send(any(ProducerRecord.class)))
            .thenReturn(CompletableFuture.completedFuture(expectedResult));
        
        // 执行
        SendResult<String, Object> result = producerService.sendSync(topic, message);
        
        // 验证
        assertThat(result).isEqualTo(expectedResult);
        verify(kafkaTemplate).send(any(ProducerRecord.class));
    }
}

九、最佳实践总结

9.1 配置最佳实践

  1. 生产者配置

    • 设置 acks=all 确保消息可靠性
    • 启用幂等性 enable.idempotence=true
    • 合理设置重试次数和超时时间
    • 使用压缩减少网络传输
  2. 消费者配置

    • 禁用自动提交,使用手动提交
    • 合理设置 max.poll.records 避免内存溢出
    • 配置合适的心跳和会话超时时间
    • 使用消费者组实现负载均衡
  3. 主题设计

    • 按业务领域划分主题
    • 合理设置分区数和副本数
    • 设置合理的消息保留策略
    • 使用死信队列处理失败消息

9.2 代码最佳实践

  1. 消息设计

    • 定义明确的消息契约
    • 使用版本控制管理消息格式
    • 包含必要的元数据(traceId、timestamp等)
  2. 错误处理

    • 实现完善的异常处理机制
    • 使用重试和死信队列
    • 监控关键指标和告警
  3. 性能优化

    • 批量发送和消费
    • 合理设置批次大小和延迟
    • 使用压缩和高效序列化

9.3 运维最佳实践

  1. 监控

    • 监控消息延迟
    • 监控消费者滞后
    • 监控错误率和重试率
    • 监控系统资源使用
  2. 告警

    • 设置消费者滞后告警
    • 监控死信队列增长
    • 监控生产者失败率
  3. 扩缩容

    • 根据负载动态调整分区数
    • 合理设置消费者并发数
    • 监控分区倾斜

十、常见问题排查

10.1 问题排查清单

  1. 消息发送失败

    • 检查网络连接
    • 检查认证配置
    • 检查主题是否存在
    • 检查序列化配置
  2. 消息消费失败

    • 检查消费者组配置
    • 检查偏移量提交策略
    • 检查反序列化配置
    • 检查权限配置
  3. 性能问题

    • 调整批次大小
    • 调整并发数
    • 检查网络延迟
    • 检查磁盘IO

10.2 调试技巧

java 复制代码
// 启用调试日志
logging:
  level:
    org.apache.kafka: DEBUG
    org.springframework.kafka: DEBUG

主要把Spring Boot整合Kafka在企业级应用中的各个方面,包括配置、注解、高级特性、监控和最佳实践。根据实际业务需求,可以适当调整配置和实现方式。

相关推荐
麒qiqi2 小时前
【Linux 系统编程核心】进程的本质、管理与核心操作
java·linux·服务器
Data_agent2 小时前
京东获得京东商品详情API,python请求示例
java·前端·爬虫·python
西门老铁2 小时前
解读 Casbin 之二:如何在 Spring项目中实现菜单权限
后端
迈巴赫车主2 小时前
蓝桥杯 20531黑客java
java·开发语言·数据结构·算法·职场和发展·蓝桥杯
CodeSheep2 小时前
这个知名编程软件,正式宣布停运了!
前端·后端·程序员
ZePingPingZe2 小时前
Spring Boot常见注解
java·spring boot·后端
星辰烈龙2 小时前
黑马程序员Java基础8
java·开发语言
毕设源码-郭学长2 小时前
【开题答辩全过程】以 公司考勤系统为例,包含答辩的问题和答案
java
SimonKing2 小时前
镜像拉不下来怎么办?境内Docker镜像状态在线监控来了
java·后端·程序员