kafka消费者监听消费

1. pom

xml 复制代码
        <dependency>
            <groupId>org.springframework.kafka</groupId>
            <artifactId>spring-kafka</artifactId>
        </dependency>

2. kafka 监听消费

消费成功调用 ack.acknowledge()方法确认。

java 复制代码
import com.xxx.gsc.sci.order.entity.SciMbgPsdHistoryEntity;
import com.xxx.gsc.sci.order.mapper.SciMbgPsdHistoryMapper;
import lombok.extern.slf4j.Slf4j;
import org.apache.ibatis.session.ExecutorType;
import org.apache.ibatis.session.SqlSession;
import org.mybatis.spring.SqlSessionTemplate;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.stereotype.Component;

import java.util.List;


/**
 * 同步SCI系统mbg_psd_partial_line_history表。</p>
 * 上游增量推送,PK: VERSION_NUM,SO,SO_ITEM,PSD,PLANNED_QTY
 *
 * @date 2022/08/05 18:06
 * @see org.springframework.kafka.listener.MessageListener
 */
@Component
@Slf4j
public class SyncSciMbgPsdHistory {

    @Autowired
    private SqlSessionTemplate sqlSessionTemplate;

    @KafkaListener(topics = "#{'${customer.kafka.topics}'.split(',')[1]}")
    public void sync(List<SciMbgPsdHistoryEntity> dataList, Acknowledgment ack) {
        SqlSession session = null;
        try {
            log.info("Starting to consume of PSD data ...");
            long startTime = System.currentTimeMillis();
            session = sqlSessionTemplate.getSqlSessionFactory().openSession(ExecutorType.BATCH, false);
            SciMbgPsdHistoryMapper mapper = session.getMapper(SciMbgPsdHistoryMapper.class);
            dataList.forEach(v -> mapper.upsert(v));
            session.commit();
            ack.acknowledge();
            long duration = System.currentTimeMillis() - startTime;
            log.info("Finished to consume of PSD data! total count: {}条, total time: {} s", dataList.size(), duration / 1000.0);
        } catch (Throwable e) {
            e.printStackTrace();
            log.error(e.getMessage());
        } finally {
            if (null != session) {
                session.close();
            }
        }
    }
}

3. 配置

java 复制代码
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.PropertyNamingStrategy;
import com.fasterxml.jackson.databind.module.SimpleModule;
import com.xxx.gsc.tech.framework.GscTechAutoConfiguration;
import com.xxx.gsc.tech.framework.jackson.deserializer.DateDeserializer;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.io.FileUtils;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.IsolationLevel;
import org.apache.kafka.common.serialization.Serializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.SpringBootConfiguration;
import org.springframework.boot.autoconfigure.AutoConfigureAfter;
import org.springframework.boot.autoconfigure.condition.ConditionalOnBean;
import org.springframework.boot.autoconfigure.condition.ConditionalOnClass;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.autoconfigure.kafka.ConcurrentKafkaListenerContainerFactoryConfigurer;
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
import org.springframework.boot.system.ApplicationHome;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Primary;
import org.springframework.core.io.FileSystemResource;
import org.springframework.core.io.Resource;
import org.springframework.jdbc.datasource.DataSourceTransactionManager;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.core.*;
import org.springframework.kafka.support.ProducerListener;
import org.springframework.kafka.support.converter.RecordMessageConverter;
import org.springframework.kafka.support.converter.StringJsonMessageConverter;
import org.springframework.kafka.support.serializer.JsonSerializer;
import org.springframework.kafka.transaction.ChainedKafkaTransactionManager;
import org.springframework.kafka.transaction.KafkaTransactionManager;

import java.io.File;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.*;

/**
 * Kafka自动载入类。<p/>
 * 实现初始化Kafka配置工厂
 *
 * @author zhengwei16
 * @date 2022/8/12 15:29
 * @version 1.0
 */
@SpringBootConfiguration
@AutoConfigureAfter(GscTechAutoConfiguration.class)
@ConditionalOnClass(KafkaTemplate.class)
@Slf4j
public class KafkaAutoConfiguration {
    private final Logger logger = LoggerFactory.getLogger(getClass());

    private KafkaProperties properties;

    public KafkaAutoConfiguration(KafkaProperties properties) throws IOException {
        Resource trustStoreLocation = properties.getSsl().getTrustStoreLocation();
        log.info("SSL file path:" + (Objects.isNull(trustStoreLocation) ? "" : trustStoreLocation.getURI().toString()));
        if (trustStoreLocation != null && !trustStoreLocation.isFile()) {
            ApplicationHome applicationHome = new ApplicationHome(getClass());
            log.info("Application Home:" + applicationHome.getDir().getPath());
            File sslFile = new File(applicationHome.getSource().getParentFile(), Objects.requireNonNull(trustStoreLocation.getFilename()));
            FileUtils.copyInputStreamToFile(Objects.requireNonNull(trustStoreLocation.getInputStream(), "SSL File Not Exist"), sslFile);
            properties.getSsl().setTrustStoreLocation(new FileSystemResource(sslFile));
        }

        this.properties = properties;
    }

    @Bean
    @Primary
    public KafkaTemplate<?, ?> kafkaTemplate(ProducerFactory<Object, Object> kafkaProducerFactory,
                                             ProducerListener<Object, Object> kafkaProducerListener,
                                             RecordMessageConverter messageConverter) {
        KafkaTemplate<Object, Object> kafkaTemplate = new KafkaTemplate<>(kafkaProducerFactory);
        if (messageConverter != null) {
            kafkaTemplate.setMessageConverter(messageConverter);
        }
        kafkaTemplate.setProducerListener(kafkaProducerListener);
        kafkaTemplate.setDefaultTopic(this.properties.getTemplate().getDefaultTopic());
        return kafkaTemplate;
    }

    @Bean
    @Primary
    public ConsumerFactory<?, ?> kafkaConsumerFactory() {
        Map<String, Object> configs = this.properties.buildConsumerProperties();
        configs.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, IsolationLevel.READ_COMMITTED.toString().toLowerCase(Locale.ROOT));
        return new DefaultKafkaConsumerFactory<>(configs);
    }

    @Bean
    @Primary
    public ProducerFactory<?, ?> kafkaProducerFactory() {
        Serializer stringSerializer = new StringSerializer();
        Serializer jsonSerializer = new JsonSerializer(new ObjectMapper() {{
            setSerializationInclusion(JsonInclude.Include.NON_NULL);
            configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
            setDateFormat(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"));
        }});
        DefaultKafkaProducerFactory<?, ?> factory = new DefaultKafkaProducerFactory<>(this.properties.buildProducerProperties(), stringSerializer, jsonSerializer);
        String transactionIdPrefix = this.properties.getProducer().getTransactionIdPrefix();
        if (transactionIdPrefix != null) {
            factory.setTransactionIdPrefix(transactionIdPrefix + "_" + UUID.randomUUID());
        }
        return factory;
    }

    @Bean
    @Primary
    public ConcurrentKafkaListenerContainerFactory<?, ?> kafkaListenerContainerFactory(
            ConcurrentKafkaListenerContainerFactoryConfigurer configurer,
            ConsumerFactory<Object, Object> kafkaConsumerFactory) {
        ConcurrentKafkaListenerContainerFactory<Object, Object> factory = new ConcurrentKafkaListenerContainerFactory<>();
        configurer.configure(factory, kafkaConsumerFactory);
        return factory;
    }

    @Bean
    @Primary
    public RecordMessageConverter kafkaMessageConverter(ObjectMapper objectMapper) {
        ObjectMapper om = new ObjectMapper();
        return new StringJsonMessageConverter(om
                .setSerializationInclusion(JsonInclude.Include.NON_NULL)
                .setPropertyNamingStrategy(PropertyNamingStrategy.SNAKE_CASE)
                .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false)
                .registerModule(new SimpleModule().addDeserializer(Date.class, new DateDeserializer()))
                .setDateFormat(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")));
    }

    @Bean
    @ConditionalOnProperty(name = "spring.kafka.producer.transaction-id-prefix")
    public KafkaTransactionManager<?, ?> kafkaTransactionManager(ProducerFactory<?, ?> producerFactory) {
        KafkaTransactionManager<?, ?> kafkaTransactionManager = new KafkaTransactionManager<>(producerFactory);
        // kafkaTransactionManager.setTransactionSynchronization(KafkaTransactionManager.SYNCHRONIZATION_ON_ACTUAL_TRANSACTION);
        kafkaTransactionManager.setNestedTransactionAllowed(true);
        return kafkaTransactionManager;
    }

    @Bean
    @ConditionalOnBean(KafkaTransactionManager.class)
    public ChainedKafkaTransactionManager<?, ?> chainedKafkaTransactionManager(DataSourceTransactionManager dataSourceTransactionManager, KafkaTransactionManager<?, ?> kafkaTransactionManager) {
        return new ChainedKafkaTransactionManager<>(kafkaTransactionManager, dataSourceTransactionManager);
    }

}

4. application配置

yaml 复制代码
spring:
  kafka:
# Tst
    bootstrap-servers: n1-mkt-sy.xxx.com:9092,n2-mkt-sy.xxx.com:9092,n3-mkt-sy.xxx.com:9092
    consumer:
      group-id: mbgcpfr
      auto-offset-reset: earliest
      enable-auto-commit: false
      auto-commit-interval: 1000
      max-poll-records: 5000
      security:
        protocol: SASL_SSL
      properties:
        max.partition.fetch.bytes: 104857600
        fetch.min.bytes: 2108576
        fetch.max.wait.ms: 10000
        session.timeout.ms: 300000  # default 10000
        request.timeout.ms: 600000 # default 30000
        max.poll.interval.ms: 600000 # default 300000
        sasl:
          mechanism: SCRAM-SHA-512
          jaas:
            config: org.apache.kafka.common.security.scram.ScramLoginModule required username='kaf-username' password='passwd';
    ssl:
      trust-store-location: classpath:client_truststore.jks
      trust-store-password: PASSWD
    listener:
      #concurrency: 2 #容器中的线程数,用于提高并发量
      #      ack-count: # Number of records between offset commits when ackMode is "COUNT" or "COUNT_TIME".
      ack-mode: manual_immediate # Listener AckMode. See the spring-kafka documentation.
      #      ack-time: # Time between offset commits when ackMode is "TIME" or "COUNT_TIME".
      #poll-timeout: # Timeout to use when polling the consumer.
      type: batch # Listener type.
      missing-topics-fatal: false
相关推荐
杨荧2 分钟前
【JAVA毕业设计】基于Vue和SpringBoot的服装商城系统学科竞赛管理系统
java·开发语言·vue.js·spring boot·spring cloud·java-ee·kafka
zmd-zk44 分钟前
kafka+zookeeper的搭建
大数据·分布式·zookeeper·中间件·kafka
激流丶1 小时前
【Kafka 实战】如何解决Kafka Topic数量过多带来的性能问题?
java·大数据·kafka·topic
筱源源1 小时前
Kafka-linux环境部署
linux·kafka
Mephisto.java1 小时前
【大数据学习 | kafka高级部分】kafka中的选举机制
大数据·学习·kafka
Mephisto.java1 小时前
【大数据学习 | kafka高级部分】kafka的优化参数整理
大数据·sql·oracle·kafka·json·database
Mephisto.java6 小时前
【大数据学习 | kafka高级部分】kafka的kraft集群
大数据·sql·oracle·kafka·json·hbase
Mephisto.java6 小时前
【大数据学习 | kafka高级部分】kafka的文件存储原理
大数据·sql·oracle·kafka·json
yx9o6 小时前
Kafka 源码 KRaft 模式本地运行
分布式·kafka
java1234_小锋14 小时前
讲讲RabbitMQ 性能优化
kafka