Spring Boot集成Kafka:最佳实践与详细指南

文章目录

一、生产者

1.引入库

引入需要依赖的jar包,引入POM文件:

yaml 复制代码
 <dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka-clients</artifactId>
</dependency>

2.配置文件

配置Kafka的相关参数(或者你项目的cacos或者yaml文件里添加)

以下是一个示例配置:application.properties

yaml 复制代码
ccm.kafka.servers:192.168.1.95:9092,192.168.1.96:9092,192.168.1.97:9092
ccm.kafka.topics.xxx:xxx_content_dev

Tip:建议topic命名规则:租户简称+项目关键词+系统环境的方式,更容易区分

3.配置类

PublicConfig.java

java 复制代码
@Data
@Configuration
@ConfigurationProperties(prefix = "ccm.kafka")
//配置信息nacos中配置
public class PublicConfig {

    private String servers;

    private String alertTopic;

}

MessageProducer.java

java 复制代码
@Slf4j
@Component
public class MessageProducer {

    private Producer producerKafka;

    @Autowired
    PublicConfig publicConfig;

    /**
     * 初始化方法
     */
    @PostConstruct
    public String init() {
        Properties props = new Properties();
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, publicConfig.getServers());
        props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "PLAINTEXT");
        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
        props.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, String.valueOf(30 * 1000));
        props.put(ProducerConfig.ACKS_CONFIG, "all");
        producerKafka = new KafkaProducer(props);
        log.info("kafka message channel created successfully");
        return "OK";
    }

    public ResponseData send(String content, String topic) {

        long startTime = System.currentTimeMillis();
        try {
            String key = UUID.randomUUID().toString().replace("-", "");
            ProducerRecord<String, String> kafkaMessage = new ProducerRecord<>(topic, key, content);
            log.info("MessageProducer send key {},message{}", key, content);
            Future<RecordMetadata> send = producerKafka.send(kafkaMessage);
            send.get();
            log.info("MessageProducer send cost time:{}", System.currentTimeMillis() - startTime);
        } catch (Exception e) {
            log.error("MessageProducer Failed to push message:{}", e.getMessage());
            return ResponseData.errorWithMsg("MessageProducer Failed to push message:" + e.getMessage());
        }
        return null;
    }

}

4.业务处理类

示例代码的业务场景:定时生成预警消息发送给下游系统调用。

java 复制代码
//启动类注意增加定时注解的支持
@SpringBootApplication
@MapperScan(basePackages = {"com.xx.xx.mapper","com.xx.xx.crawler.mapper"})
@EnableScheduling
public class CATApp {
    public static void main(String[] args) {
        SpringApplication.run(CATApp.class,args);
    }}

@Service
@Slf4j
public class CrawlerService {
     @Scheduled(cron = "${crawler.scheduled.cron:0 */1 * * * ?}") // 每5分钟执行一次
     //   @Scheduled(cron = "${crawler.scheduled.cron:0 0 0/1 * * ?}") // 每小时执行一次
    public void crawlAndSaveAlertInfos() {
        log.info(">>>>>>>>>>>>> crawlAndSaveAlertInfos  ");
        //替换成具体的业务场景 
        List<AlertInfo> alertInfos = fetchAlertInfoList();
        if (!alertInfos.isEmpty()) {
            for (AlertInfo alertInfo : alertInfos) {
                //发送预警信息到kafka供下游调用
                crawlerAlertSyncService.sendCrawlerAlertMsgKafka(alertInfo);
            }
        }
    }
/**
 *
 * 预警消息通过Kafka异步同步其他应用
 */
public interface CrawlerAlertSyncService {

     void sendCrawlerAlertMsgKafka(AlertInfo alertInfo) ;

}

@Slf4j
@Service
public class CrawlerAlertSyncServiceImpl implements CrawlerAlertSyncService {
    @Autowired
    private MessageProducer messageProducer;

    @Resource
    private PublicConfig publicConfig;

    @Override
    public void sendCrawlerAlertMsgKafka(AlertInfo alertInfo) {
        String topic = publicConfig.getAlertTopic();
        String servers = publicConfig.getServers();
        log.info("send publish msg to kafka  ,topic:{},bizId:{}", topic, alertInfo.getAlertid());
        log.info("send publish msg to kafka  ,servers:{}", servers);
        String content = JSON.toJSONString(alertInfo);
        log.info("send publish msg to kafka  ,content:{}", content);
        if (StringUtils.isNotBlank(topic)) {
            messageProducer.send(content, topic);
        }
    }
}

三、消费者

1.引入库

在消费者工程pom文件中配置依赖

yaml 复制代码
 <dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka-clients</artifactId>
</dependency>

2.配置类

同样根据该项目情况编写配置类,示例代码中仍为读取naco配置

PublicConfig.java

java 复制代码
@Data
@Configuration
@Slf4j
@ConfigurationProperties(prefix = "xman.kafka")
public class PublicConfig {

    private String servers;

    private Map<String,String> topics;

    public String getTopic(String appCode) {
        if(Objects.isNull(topics) || topics.isEmpty()){
            return null;
        }

        return topics.get(appCode);
    }

    private String alertTopic;
    private String group;
}

MessageConsumer.java

java 复制代码
@Slf4j
@Component
public abstract class MessageConsumer {

    // 用于持续监听kafka消息的专用线程池
    private ExecutorService threadPool;
    // 用于持续消费kafka消息的专用线程池
    private ExecutorService consumerThreadPool;

    @Resource
    private PublicConfig publicConfig;

    /**
     * 初始化方法
     */
    @PostConstruct
    public String init() {
        MessageConfigField messageConfig = MessageConfigField.builder()
                .servers(publicConfig.getServers())
                .topic(publicConfig.getAlertTopic())
                .group(publicConfig.getGroup())
                .build();

        if (StringUtils.isBlank(messageConfig.getServers())) {
            //没有配置kafka信息
            return "OK";
        }
        initThreadPool();

        KafkaConsumer<String, String> instance = kafkaInstance(messageConfig.getServers(),
                messageConfig.getGroup(), messageConfig.getTopic(), messageConfig.getClientName(),
                messageConfig.getUsername(), messageConfig.getPassword());
        startListen(instance);
        log.info("ccm kafka消息订阅成功:clientId:" + messageConfig.getClientName());
        return "OK";

    }

    private void initThreadPool() {
        if (null == threadPool) {
            log.info("initThreadPool start");
            threadPool = Executors.newFixedThreadPool(1);
            log.info("initThreadPool done");
        }
    }
    

    private void startListen(KafkaConsumer<String, String> consumer) {
        threadPool.submit(() -> {
            TenantContext.setContextCode(CommonConstants.TENANT_CODE);
            while (true) {
                try {
                    ConsumerRecords<String, String> records = consumer.poll(Duration.ofSeconds(10));
                    if (records == null || records.isEmpty()) {
                        continue;
                    }
                    for (ConsumerRecord<String, String> record : records) {
                        Optional<String> kafkaMessage = Optional.ofNullable(record.value());
                        if (kafkaMessage.isPresent()) {
                            String msg = kafkaMessage.get();
                            if (StringUtils.isNotBlank(msg)) {
                                log.info("msgJson:" + msg);
                                consumeMsg(msg);
                            }
                        }
                    }
          
                } catch (Exception e) {
                    TimeUnit.SECONDS.sleep(1);
                    log.error("consume error", e);
                }
            }
        });
    }

    public static KafkaConsumer<String, String> kafkaInstance(String servers, String group,
                                                              String topic, String clientId, String username, String password) {
        Properties props = new Properties();
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
        if (StringUtils.isNotBlank(group)) {
            props.put(ConsumerConfig.GROUP_ID_CONFIG, group);
        }
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
        props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,
                "org.apache.kafka.common.serialization.StringDeserializer");
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
                "org.apache.kafka.common.serialization.StringDeserializer");

        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
        List<String> subscribedTopics = new ArrayList<>();
        subscribedTopics.add(topic);
        consumer.subscribe(subscribedTopics);
        return consumer;
    }

    /**
     * 核心逻辑,由子类继承实现
     *
     * @param msgData msg
     */
    public abstract void consumeMsg(String msgData) throws Exception;

}

3.业务类

java 复制代码
@Slf4j
@Service
@RefreshScope
public class CmsInfoConsumer extends MessageConsumer {
    
    @Resource
    private InfoService infoService;

    @Override
    public void consumeMsg(String msgData) throws Exception {
        log.info("CmsWeatherConsumer收到mq消息message:{}", msgData);
        CcmAlertInfoDTO alertInfoDTO = JSONObject.parseObject(msgData, CcmAlertInfoDTO.class);
        try {
            //to_do 处理消费内容
            infoService.saveInfoContent(alertInfoDTO);
        } catch (Exception e) {
            e.printStackTrace();
            log.info("同步用户消息失败:" + e);
        }
    }
}

至此,一个简单的通过kafka同步预警消息的应用就开发完了。

相关推荐
utmhikari20 分钟前
【架构艺术】Go语言微服务monorepo的代码架构设计
后端·微服务·架构·golang·monorepo
蜡笔小新星23 分钟前
Flask项目框架
开发语言·前端·经验分享·后端·python·学习·flask
计算机学姐27 分钟前
基于Asp.net的驾校管理系统
vue.js·后端·mysql·sqlserver·c#·asp.net·.netcore
欢乐少年19042 小时前
SpringBoot集成Sentry日志收集-3 (Spring Boot集成)
spring boot·后端·sentry
夏天的味道٥3 小时前
使用 Java 执行 SQL 语句和存储过程
java·开发语言·sql
冰糖码奇朵5 小时前
大数据表高效导入导出解决方案,mysql数据库LOAD DATA命令和INTO OUTFILE命令详解
java·数据库·sql·mysql
好教员好5 小时前
【Spring】整合【SpringMVC】
java·spring
程序员的世界你不懂6 小时前
Kafka 推送消息,移动端自动化测试,数据驱动测试
分布式·kafka·linq
浪九天6 小时前
Java直通车系列13【Spring MVC】(Spring MVC常用注解)
java·后端·spring
堕落年代7 小时前
Maven匹配机制和仓库库设置
java·maven