如何使用@KafkaListener实现从nacos中动态获取监听的topic

1、简介

对于经常需要变更kafka主题的场景,为了实现动态监听topic的功能,可以使用以下方式。

2、使用步骤
2.1、添加依赖
XML 复制代码
<dependency>
   <groupId>org.springframework.kafka</groupId>
   <artifactId>spring-kafka</artifactId>
   <version>2.8.1</version>
</dependency>
2.2、nacos中配置
bash 复制代码
    # kafka 配置
spring:
  kafka:
    bootstrap-servers: ip地址:9092
    topics: topic1,tpic2
    producer:
      key-serializer: org.apache.kafka.common.serialization.StringSerializer
      value-serializer: org.apache.kafka.common.serialization.StringSerializer
      enable-idempotence: true
      acks: all
      transactional-id: kafka-group
    consumer:
      key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
      value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
      group-id: kafka-clickhouse-group
      auto-offset-reset: latest
      enable-auto-commit: false
      isolation-level: read_committed
      allow-auto-create-topics: true
    listener:
      ack-mode: MANUAL_IMMEDIATE
      concurrency: 3
2.3、配置类
java 复制代码
package org.aecsi.kafkadatatock.config;

import lombok.RequiredArgsConstructor;
import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.admin.ListTopicsResult;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.boot.ApplicationRunner;
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
import org.springframework.cloud.context.config.annotation.RefreshScope;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerEndpointRegistry;
import org.springframework.kafka.core.*;
import org.springframework.kafka.listener.ContainerProperties;
import org.springframework.kafka.transaction.KafkaTransactionManager;

import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ExecutionException;

@Configuration
@RequiredArgsConstructor
@EnableKafka
@RefreshScope
public class KafkaConfig {
    private final KafkaProperties kafkaProperties;

    @Bean
    public KafkaAdmin kafkaAdmin() {
        return new KafkaAdmin(kafkaProperties.buildAdminProperties());
    }

    @Bean
    public AdminClient adminClient(KafkaAdmin kafkaAdmin) {
        return AdminClient.create(kafkaAdmin.getConfigurationProperties());
    }

    @Bean
    public ProducerFactory<String, String> producerFactory() {
        Map<String, Object> configProps = new HashMap<>(kafkaProperties.buildProducerProperties());
        configProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        configProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        configProps.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, true);
        configProps.put(ProducerConfig.ACKS_CONFIG, "all");
        configProps.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "kafka-clickhouse-producer");
        DefaultKafkaProducerFactory<String, String> factory = new DefaultKafkaProducerFactory<>(configProps);
        factory.setTransactionIdPrefix("kafka-clickhouse-producer-");
        return factory;
    }

    @Bean
    public KafkaTemplate<String, String> kafkaTemplate(ProducerFactory<String, String> producerFactory) {
        return new KafkaTemplate<>(producerFactory);
    }

    @Bean
    @RefreshScope
    public ConsumerFactory<String, String> consumerFactory() {
        Map<String, Object> configProps = new HashMap<>(kafkaProperties.buildConsumerProperties());
        configProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        configProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        configProps.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
        configProps.put(ConsumerConfig.ALLOW_AUTO_CREATE_TOPICS_CONFIG, true);
        configProps.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed");
        return new DefaultKafkaConsumerFactory<>(configProps);
    }

    @Bean
    public KafkaTransactionManager<String, String> transactionManager(ProducerFactory<String, String> producerFactory) {
        return new KafkaTransactionManager<>(producerFactory);
    }

    @Bean
    public ConcurrentKafkaListenerContainerFactory<String, String> kafkaListenerContainerFactory(
            ConsumerFactory<String, String> consumerFactory,
            KafkaTransactionManager<String, String> transactionManager) {
        ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactory);
        factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE);
        factory.getContainerProperties().setTransactionManager(transactionManager);
        return factory;
    }
    @Bean
    @RefreshScope
    public ConcurrentKafkaListenerContainerFactory<String, String> kafkaListenerContainerFactory() {
        ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactory());
        factory.setAutoStartup(true);
        return factory;
    }

    @Bean
    public ApplicationRunner kafkaListenerStarter(KafkaListenerEndpointRegistry registry) {
        return args -> {
            // 启动所有 Kafka 监听器
            registry.start();
        };
    }
}

接收消息类

java 复制代码
    @KafkaListener(topics = "#{'${spring.kafka.topics}'.split(',')}", autoStartup = "false")
    @Transactional(transactionManager = "transactionManager")
    public void processMessage(ConsumerRecord<String, String> record,
                               Acknowledgment acknowledgment,
                               @Header(KafkaHeaders.RECEIVED_TOPIC) String topic,
                               @Header(KafkaHeaders.RECEIVED_TIMESTAMP) long timestamp) {
        try {
            log.info("kafka 接受 topic: {} 消息", topic);
//          处理消息
            acknowledgment.acknowledge();
        } catch (Exception e) {
            log.error("Error processing message for topic {}: {}", topic, e.getMessage());
            throw e;
        }
    }

主启动类添加一个注解

java 复制代码
@EnableConfigurationProperties(KafkaProperties.class)
3、总结

实现kafka动态获取topic还有其他方式,博主目前只验证这一种,其他方式待更新。

相关推荐
武子康5 小时前
大数据-76 Kafka 从发送到消费:Kafka 消息丢失/重复问题深入剖析与最佳实践
大数据·后端·kafka
鼠鼠我捏,要死了捏6 小时前
Kafka Streams vs Apache Flink vs Apache Storm: 实时流处理方案对比与选型建议
kafka·apache flink·apache storm
Tapdata 钛铂数据7 小时前
TapData vs Kafka ETL Pipeline:竞争?共存?——企业实时数据策略的正确打开方式
kafka·数据同步·实时数据·kafka connect
ffyyhh9955111 天前
kafka生产者 消费者工作原理
kafka
香吧香1 天前
kafka 副本集设置和理解
kafka
Rookie小强1 天前
kafka的rebalance机制是什么
分布式·kafka
码农小灰2 天前
Kafka消息持久化机制全解析:存储原理与实战场景
java·分布式·kafka
Raisy_2 天前
05 ODS层(Operation Data Store)
大数据·数据仓库·kafka·flume
纪莫2 天前
Kafka如何保证「消息不丢失」,「顺序传输」,「不重复消费」,以及为什么会发生重平衡(reblanace)
java·分布式·后端·中间件·kafka·队列
poemyang2 天前
千亿消息“过眼云烟”?Kafka把硬盘当内存用的性能魔法,全靠这一手!
kafka·高并发·pagecache·存储架构·顺序i/o·局部性原理