kafka消费者监听消费

1. pom

xml 复制代码
        <dependency>
            <groupId>org.springframework.kafka</groupId>
            <artifactId>spring-kafka</artifactId>
        </dependency>

2. kafka 监听消费

消费成功调用 ack.acknowledge()方法确认。

java 复制代码
import com.xxx.gsc.sci.order.entity.SciMbgPsdHistoryEntity;
import com.xxx.gsc.sci.order.mapper.SciMbgPsdHistoryMapper;
import lombok.extern.slf4j.Slf4j;
import org.apache.ibatis.session.ExecutorType;
import org.apache.ibatis.session.SqlSession;
import org.mybatis.spring.SqlSessionTemplate;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.stereotype.Component;

import java.util.List;


/**
 * 同步SCI系统mbg_psd_partial_line_history表。</p>
 * 上游增量推送,PK: VERSION_NUM,SO,SO_ITEM,PSD,PLANNED_QTY
 *
 * @date 2022/08/05 18:06
 * @see org.springframework.kafka.listener.MessageListener
 */
@Component
@Slf4j
public class SyncSciMbgPsdHistory {

    @Autowired
    private SqlSessionTemplate sqlSessionTemplate;

    @KafkaListener(topics = "#{'${customer.kafka.topics}'.split(',')[1]}")
    public void sync(List<SciMbgPsdHistoryEntity> dataList, Acknowledgment ack) {
        SqlSession session = null;
        try {
            log.info("Starting to consume of PSD data ...");
            long startTime = System.currentTimeMillis();
            session = sqlSessionTemplate.getSqlSessionFactory().openSession(ExecutorType.BATCH, false);
            SciMbgPsdHistoryMapper mapper = session.getMapper(SciMbgPsdHistoryMapper.class);
            dataList.forEach(v -> mapper.upsert(v));
            session.commit();
            ack.acknowledge();
            long duration = System.currentTimeMillis() - startTime;
            log.info("Finished to consume of PSD data! total count: {}条, total time: {} s", dataList.size(), duration / 1000.0);
        } catch (Throwable e) {
            e.printStackTrace();
            log.error(e.getMessage());
        } finally {
            if (null != session) {
                session.close();
            }
        }
    }
}

3. 配置

java 复制代码
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.PropertyNamingStrategy;
import com.fasterxml.jackson.databind.module.SimpleModule;
import com.xxx.gsc.tech.framework.GscTechAutoConfiguration;
import com.xxx.gsc.tech.framework.jackson.deserializer.DateDeserializer;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.io.FileUtils;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.IsolationLevel;
import org.apache.kafka.common.serialization.Serializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.SpringBootConfiguration;
import org.springframework.boot.autoconfigure.AutoConfigureAfter;
import org.springframework.boot.autoconfigure.condition.ConditionalOnBean;
import org.springframework.boot.autoconfigure.condition.ConditionalOnClass;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.autoconfigure.kafka.ConcurrentKafkaListenerContainerFactoryConfigurer;
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
import org.springframework.boot.system.ApplicationHome;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Primary;
import org.springframework.core.io.FileSystemResource;
import org.springframework.core.io.Resource;
import org.springframework.jdbc.datasource.DataSourceTransactionManager;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.core.*;
import org.springframework.kafka.support.ProducerListener;
import org.springframework.kafka.support.converter.RecordMessageConverter;
import org.springframework.kafka.support.converter.StringJsonMessageConverter;
import org.springframework.kafka.support.serializer.JsonSerializer;
import org.springframework.kafka.transaction.ChainedKafkaTransactionManager;
import org.springframework.kafka.transaction.KafkaTransactionManager;

import java.io.File;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.*;

/**
 * Kafka自动载入类。<p/>
 * 实现初始化Kafka配置工厂
 *
 * @author zhengwei16
 * @date 2022/8/12 15:29
 * @version 1.0
 */
@SpringBootConfiguration
@AutoConfigureAfter(GscTechAutoConfiguration.class)
@ConditionalOnClass(KafkaTemplate.class)
@Slf4j
public class KafkaAutoConfiguration {
    private final Logger logger = LoggerFactory.getLogger(getClass());

    private KafkaProperties properties;

    public KafkaAutoConfiguration(KafkaProperties properties) throws IOException {
        Resource trustStoreLocation = properties.getSsl().getTrustStoreLocation();
        log.info("SSL file path:" + (Objects.isNull(trustStoreLocation) ? "" : trustStoreLocation.getURI().toString()));
        if (trustStoreLocation != null && !trustStoreLocation.isFile()) {
            ApplicationHome applicationHome = new ApplicationHome(getClass());
            log.info("Application Home:" + applicationHome.getDir().getPath());
            File sslFile = new File(applicationHome.getSource().getParentFile(), Objects.requireNonNull(trustStoreLocation.getFilename()));
            FileUtils.copyInputStreamToFile(Objects.requireNonNull(trustStoreLocation.getInputStream(), "SSL File Not Exist"), sslFile);
            properties.getSsl().setTrustStoreLocation(new FileSystemResource(sslFile));
        }

        this.properties = properties;
    }

    @Bean
    @Primary
    public KafkaTemplate<?, ?> kafkaTemplate(ProducerFactory<Object, Object> kafkaProducerFactory,
                                             ProducerListener<Object, Object> kafkaProducerListener,
                                             RecordMessageConverter messageConverter) {
        KafkaTemplate<Object, Object> kafkaTemplate = new KafkaTemplate<>(kafkaProducerFactory);
        if (messageConverter != null) {
            kafkaTemplate.setMessageConverter(messageConverter);
        }
        kafkaTemplate.setProducerListener(kafkaProducerListener);
        kafkaTemplate.setDefaultTopic(this.properties.getTemplate().getDefaultTopic());
        return kafkaTemplate;
    }

    @Bean
    @Primary
    public ConsumerFactory<?, ?> kafkaConsumerFactory() {
        Map<String, Object> configs = this.properties.buildConsumerProperties();
        configs.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, IsolationLevel.READ_COMMITTED.toString().toLowerCase(Locale.ROOT));
        return new DefaultKafkaConsumerFactory<>(configs);
    }

    @Bean
    @Primary
    public ProducerFactory<?, ?> kafkaProducerFactory() {
        Serializer stringSerializer = new StringSerializer();
        Serializer jsonSerializer = new JsonSerializer(new ObjectMapper() {{
            setSerializationInclusion(JsonInclude.Include.NON_NULL);
            configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
            setDateFormat(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"));
        }});
        DefaultKafkaProducerFactory<?, ?> factory = new DefaultKafkaProducerFactory<>(this.properties.buildProducerProperties(), stringSerializer, jsonSerializer);
        String transactionIdPrefix = this.properties.getProducer().getTransactionIdPrefix();
        if (transactionIdPrefix != null) {
            factory.setTransactionIdPrefix(transactionIdPrefix + "_" + UUID.randomUUID());
        }
        return factory;
    }

    @Bean
    @Primary
    public ConcurrentKafkaListenerContainerFactory<?, ?> kafkaListenerContainerFactory(
            ConcurrentKafkaListenerContainerFactoryConfigurer configurer,
            ConsumerFactory<Object, Object> kafkaConsumerFactory) {
        ConcurrentKafkaListenerContainerFactory<Object, Object> factory = new ConcurrentKafkaListenerContainerFactory<>();
        configurer.configure(factory, kafkaConsumerFactory);
        return factory;
    }

    @Bean
    @Primary
    public RecordMessageConverter kafkaMessageConverter(ObjectMapper objectMapper) {
        ObjectMapper om = new ObjectMapper();
        return new StringJsonMessageConverter(om
                .setSerializationInclusion(JsonInclude.Include.NON_NULL)
                .setPropertyNamingStrategy(PropertyNamingStrategy.SNAKE_CASE)
                .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false)
                .registerModule(new SimpleModule().addDeserializer(Date.class, new DateDeserializer()))
                .setDateFormat(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")));
    }

    @Bean
    @ConditionalOnProperty(name = "spring.kafka.producer.transaction-id-prefix")
    public KafkaTransactionManager<?, ?> kafkaTransactionManager(ProducerFactory<?, ?> producerFactory) {
        KafkaTransactionManager<?, ?> kafkaTransactionManager = new KafkaTransactionManager<>(producerFactory);
        // kafkaTransactionManager.setTransactionSynchronization(KafkaTransactionManager.SYNCHRONIZATION_ON_ACTUAL_TRANSACTION);
        kafkaTransactionManager.setNestedTransactionAllowed(true);
        return kafkaTransactionManager;
    }

    @Bean
    @ConditionalOnBean(KafkaTransactionManager.class)
    public ChainedKafkaTransactionManager<?, ?> chainedKafkaTransactionManager(DataSourceTransactionManager dataSourceTransactionManager, KafkaTransactionManager<?, ?> kafkaTransactionManager) {
        return new ChainedKafkaTransactionManager<>(kafkaTransactionManager, dataSourceTransactionManager);
    }

}

4. application配置

yaml 复制代码
spring:
  kafka:
# Tst
    bootstrap-servers: n1-mkt-sy.xxx.com:9092,n2-mkt-sy.xxx.com:9092,n3-mkt-sy.xxx.com:9092
    consumer:
      group-id: mbgcpfr
      auto-offset-reset: earliest
      enable-auto-commit: false
      auto-commit-interval: 1000
      max-poll-records: 5000
      security:
        protocol: SASL_SSL
      properties:
        max.partition.fetch.bytes: 104857600
        fetch.min.bytes: 2108576
        fetch.max.wait.ms: 10000
        session.timeout.ms: 300000  # default 10000
        request.timeout.ms: 600000 # default 30000
        max.poll.interval.ms: 600000 # default 300000
        sasl:
          mechanism: SCRAM-SHA-512
          jaas:
            config: org.apache.kafka.common.security.scram.ScramLoginModule required username='kaf-username' password='passwd';
    ssl:
      trust-store-location: classpath:client_truststore.jks
      trust-store-password: PASSWD
    listener:
      #concurrency: 2 #容器中的线程数,用于提高并发量
      #      ack-count: # Number of records between offset commits when ackMode is "COUNT" or "COUNT_TIME".
      ack-mode: manual_immediate # Listener AckMode. See the spring-kafka documentation.
      #      ack-time: # Time between offset commits when ackMode is "TIME" or "COUNT_TIME".
      #poll-timeout: # Timeout to use when polling the consumer.
      type: batch # Listener type.
      missing-topics-fatal: false
相关推荐
山沐与山7 小时前
【MQ】Kafka与RocketMQ深度对比
分布式·kafka·rocketmq
yumgpkpm9 小时前
Cloudera CDP7、CDH5、CDH6 在华为鲲鹏 ARM 麒麟KylinOS做到无缝切换平缓迁移过程
大数据·arm开发·华为·flink·spark·kafka·cloudera
树下水月9 小时前
Easyoole 使用rdkafka 进行kafka的创建topic创建 删除 以及数据发布 订阅
分布式·kafka
Cat God 00710 小时前
基于Docker搭建kafka集群
docker·容器·kafka
Cat God 00710 小时前
基于 Docker 部署 Kafka(KRaft + SASL/PLAIN 认证)
docker·容器·kafka
KD14 小时前
设计模式——责任链模式实战,优雅处理Kafka消息
后端·设计模式·kafka
原神启动11 天前
Kafka详解
分布式·kafka
一只懒鱼a1 天前
搭建kafka集群(安装包 + docker方式)
运维·容器·kafka
青春不流名1 天前
如何在Kafka中使用SSL/TLS证书认证
分布式·kafka·ssl
青春不流名1 天前
Kafka 的认证机制
kafka