kafka消费者监听消费

1. pom

xml 复制代码
        <dependency>
            <groupId>org.springframework.kafka</groupId>
            <artifactId>spring-kafka</artifactId>
        </dependency>

2. kafka 监听消费

消费成功调用 ack.acknowledge()方法确认。

java 复制代码
import com.xxx.gsc.sci.order.entity.SciMbgPsdHistoryEntity;
import com.xxx.gsc.sci.order.mapper.SciMbgPsdHistoryMapper;
import lombok.extern.slf4j.Slf4j;
import org.apache.ibatis.session.ExecutorType;
import org.apache.ibatis.session.SqlSession;
import org.mybatis.spring.SqlSessionTemplate;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.stereotype.Component;

import java.util.List;


/**
 * 同步SCI系统mbg_psd_partial_line_history表。</p>
 * 上游增量推送,PK: VERSION_NUM,SO,SO_ITEM,PSD,PLANNED_QTY
 *
 * @date 2022/08/05 18:06
 * @see org.springframework.kafka.listener.MessageListener
 */
@Component
@Slf4j
public class SyncSciMbgPsdHistory {

    @Autowired
    private SqlSessionTemplate sqlSessionTemplate;

    @KafkaListener(topics = "#{'${customer.kafka.topics}'.split(',')[1]}")
    public void sync(List<SciMbgPsdHistoryEntity> dataList, Acknowledgment ack) {
        SqlSession session = null;
        try {
            log.info("Starting to consume of PSD data ...");
            long startTime = System.currentTimeMillis();
            session = sqlSessionTemplate.getSqlSessionFactory().openSession(ExecutorType.BATCH, false);
            SciMbgPsdHistoryMapper mapper = session.getMapper(SciMbgPsdHistoryMapper.class);
            dataList.forEach(v -> mapper.upsert(v));
            session.commit();
            ack.acknowledge();
            long duration = System.currentTimeMillis() - startTime;
            log.info("Finished to consume of PSD data! total count: {}条, total time: {} s", dataList.size(), duration / 1000.0);
        } catch (Throwable e) {
            e.printStackTrace();
            log.error(e.getMessage());
        } finally {
            if (null != session) {
                session.close();
            }
        }
    }
}

3. 配置

java 复制代码
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.PropertyNamingStrategy;
import com.fasterxml.jackson.databind.module.SimpleModule;
import com.xxx.gsc.tech.framework.GscTechAutoConfiguration;
import com.xxx.gsc.tech.framework.jackson.deserializer.DateDeserializer;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.io.FileUtils;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.IsolationLevel;
import org.apache.kafka.common.serialization.Serializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.SpringBootConfiguration;
import org.springframework.boot.autoconfigure.AutoConfigureAfter;
import org.springframework.boot.autoconfigure.condition.ConditionalOnBean;
import org.springframework.boot.autoconfigure.condition.ConditionalOnClass;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.autoconfigure.kafka.ConcurrentKafkaListenerContainerFactoryConfigurer;
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
import org.springframework.boot.system.ApplicationHome;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Primary;
import org.springframework.core.io.FileSystemResource;
import org.springframework.core.io.Resource;
import org.springframework.jdbc.datasource.DataSourceTransactionManager;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.core.*;
import org.springframework.kafka.support.ProducerListener;
import org.springframework.kafka.support.converter.RecordMessageConverter;
import org.springframework.kafka.support.converter.StringJsonMessageConverter;
import org.springframework.kafka.support.serializer.JsonSerializer;
import org.springframework.kafka.transaction.ChainedKafkaTransactionManager;
import org.springframework.kafka.transaction.KafkaTransactionManager;

import java.io.File;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.*;

/**
 * Kafka自动载入类。<p/>
 * 实现初始化Kafka配置工厂
 *
 * @author zhengwei16
 * @date 2022/8/12 15:29
 * @version 1.0
 */
@SpringBootConfiguration
@AutoConfigureAfter(GscTechAutoConfiguration.class)
@ConditionalOnClass(KafkaTemplate.class)
@Slf4j
public class KafkaAutoConfiguration {
    private final Logger logger = LoggerFactory.getLogger(getClass());

    private KafkaProperties properties;

    public KafkaAutoConfiguration(KafkaProperties properties) throws IOException {
        Resource trustStoreLocation = properties.getSsl().getTrustStoreLocation();
        log.info("SSL file path:" + (Objects.isNull(trustStoreLocation) ? "" : trustStoreLocation.getURI().toString()));
        if (trustStoreLocation != null && !trustStoreLocation.isFile()) {
            ApplicationHome applicationHome = new ApplicationHome(getClass());
            log.info("Application Home:" + applicationHome.getDir().getPath());
            File sslFile = new File(applicationHome.getSource().getParentFile(), Objects.requireNonNull(trustStoreLocation.getFilename()));
            FileUtils.copyInputStreamToFile(Objects.requireNonNull(trustStoreLocation.getInputStream(), "SSL File Not Exist"), sslFile);
            properties.getSsl().setTrustStoreLocation(new FileSystemResource(sslFile));
        }

        this.properties = properties;
    }

    @Bean
    @Primary
    public KafkaTemplate<?, ?> kafkaTemplate(ProducerFactory<Object, Object> kafkaProducerFactory,
                                             ProducerListener<Object, Object> kafkaProducerListener,
                                             RecordMessageConverter messageConverter) {
        KafkaTemplate<Object, Object> kafkaTemplate = new KafkaTemplate<>(kafkaProducerFactory);
        if (messageConverter != null) {
            kafkaTemplate.setMessageConverter(messageConverter);
        }
        kafkaTemplate.setProducerListener(kafkaProducerListener);
        kafkaTemplate.setDefaultTopic(this.properties.getTemplate().getDefaultTopic());
        return kafkaTemplate;
    }

    @Bean
    @Primary
    public ConsumerFactory<?, ?> kafkaConsumerFactory() {
        Map<String, Object> configs = this.properties.buildConsumerProperties();
        configs.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, IsolationLevel.READ_COMMITTED.toString().toLowerCase(Locale.ROOT));
        return new DefaultKafkaConsumerFactory<>(configs);
    }

    @Bean
    @Primary
    public ProducerFactory<?, ?> kafkaProducerFactory() {
        Serializer stringSerializer = new StringSerializer();
        Serializer jsonSerializer = new JsonSerializer(new ObjectMapper() {{
            setSerializationInclusion(JsonInclude.Include.NON_NULL);
            configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
            setDateFormat(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"));
        }});
        DefaultKafkaProducerFactory<?, ?> factory = new DefaultKafkaProducerFactory<>(this.properties.buildProducerProperties(), stringSerializer, jsonSerializer);
        String transactionIdPrefix = this.properties.getProducer().getTransactionIdPrefix();
        if (transactionIdPrefix != null) {
            factory.setTransactionIdPrefix(transactionIdPrefix + "_" + UUID.randomUUID());
        }
        return factory;
    }

    @Bean
    @Primary
    public ConcurrentKafkaListenerContainerFactory<?, ?> kafkaListenerContainerFactory(
            ConcurrentKafkaListenerContainerFactoryConfigurer configurer,
            ConsumerFactory<Object, Object> kafkaConsumerFactory) {
        ConcurrentKafkaListenerContainerFactory<Object, Object> factory = new ConcurrentKafkaListenerContainerFactory<>();
        configurer.configure(factory, kafkaConsumerFactory);
        return factory;
    }

    @Bean
    @Primary
    public RecordMessageConverter kafkaMessageConverter(ObjectMapper objectMapper) {
        ObjectMapper om = new ObjectMapper();
        return new StringJsonMessageConverter(om
                .setSerializationInclusion(JsonInclude.Include.NON_NULL)
                .setPropertyNamingStrategy(PropertyNamingStrategy.SNAKE_CASE)
                .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false)
                .registerModule(new SimpleModule().addDeserializer(Date.class, new DateDeserializer()))
                .setDateFormat(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")));
    }

    @Bean
    @ConditionalOnProperty(name = "spring.kafka.producer.transaction-id-prefix")
    public KafkaTransactionManager<?, ?> kafkaTransactionManager(ProducerFactory<?, ?> producerFactory) {
        KafkaTransactionManager<?, ?> kafkaTransactionManager = new KafkaTransactionManager<>(producerFactory);
        // kafkaTransactionManager.setTransactionSynchronization(KafkaTransactionManager.SYNCHRONIZATION_ON_ACTUAL_TRANSACTION);
        kafkaTransactionManager.setNestedTransactionAllowed(true);
        return kafkaTransactionManager;
    }

    @Bean
    @ConditionalOnBean(KafkaTransactionManager.class)
    public ChainedKafkaTransactionManager<?, ?> chainedKafkaTransactionManager(DataSourceTransactionManager dataSourceTransactionManager, KafkaTransactionManager<?, ?> kafkaTransactionManager) {
        return new ChainedKafkaTransactionManager<>(kafkaTransactionManager, dataSourceTransactionManager);
    }

}

4. application配置

yaml 复制代码
spring:
  kafka:
# Tst
    bootstrap-servers: n1-mkt-sy.xxx.com:9092,n2-mkt-sy.xxx.com:9092,n3-mkt-sy.xxx.com:9092
    consumer:
      group-id: mbgcpfr
      auto-offset-reset: earliest
      enable-auto-commit: false
      auto-commit-interval: 1000
      max-poll-records: 5000
      security:
        protocol: SASL_SSL
      properties:
        max.partition.fetch.bytes: 104857600
        fetch.min.bytes: 2108576
        fetch.max.wait.ms: 10000
        session.timeout.ms: 300000  # default 10000
        request.timeout.ms: 600000 # default 30000
        max.poll.interval.ms: 600000 # default 300000
        sasl:
          mechanism: SCRAM-SHA-512
          jaas:
            config: org.apache.kafka.common.security.scram.ScramLoginModule required username='kaf-username' password='passwd';
    ssl:
      trust-store-location: classpath:client_truststore.jks
      trust-store-password: PASSWD
    listener:
      #concurrency: 2 #容器中的线程数,用于提高并发量
      #      ack-count: # Number of records between offset commits when ackMode is "COUNT" or "COUNT_TIME".
      ack-mode: manual_immediate # Listener AckMode. See the spring-kafka documentation.
      #      ack-time: # Time between offset commits when ackMode is "TIME" or "COUNT_TIME".
      #poll-timeout: # Timeout to use when polling the consumer.
      type: batch # Listener type.
      missing-topics-fatal: false
相关推荐
qq_5470261797 小时前
Kafka 常见问题
kafka
core5127 小时前
flink sink kafka
flink·kafka·sink
飞来又飞去9 小时前
kafka sasl和acl之间的关系
分布式·kafka
张伯毅14 小时前
Flink SQL 支持 kafka 开启 kerberos 权限控制.
sql·flink·kafka
darkdragonking17 小时前
OpenEuler 22.03 不依赖zookeeper安装 kafka 3.3.2集群
kafka
saynaihe1 天前
安全地使用 Docker 和 Systemctl 部署 Kafka 的综合指南
运维·安全·docker·容器·kafka
隔着天花板看星星1 天前
Spark-Streaming集成Kafka
大数据·分布式·中间件·spark·kafka
太阳伞下的阿呆2 天前
kafka常用命令(持续更新)
分布式·kafka
BUTCHER52 天前
Kafka安装篇
分布式·kafka
若雨叶2 天前
Kafka实现监听多个topic
分布式·kafka