webFlux自定义多kafka监听

架包

xml 复制代码
        <dependency>
            <groupId>org.springframework.cloud</groupId>
            <artifactId>spring-cloud-starter-stream-kafka</artifactId>
        </dependency>
        <dependency>
            <groupId>org.springframework.kafka</groupId>
            <artifactId>spring-kafka</artifactId>
        </dependency>
        <dependency>
            <groupId>org.springframework.cloud</groupId>
            <artifactId>spring-cloud-stream</artifactId>
        </dependency>
        <dependency>
            <groupId>io.projectreactor.kafka</groupId>
            <artifactId>reactor-kafka</artifactId>
            <version>1.3.11</version>
        </dependency>

配置项

yaml 复制代码
kafka:
  enabled:
    listen: #监听器是否启用
      root: ${KAFKA_ENABLED_LISTEN_ROOT:false} #kafka是否启用监听的总开关
      consumer1: ${KAFKA_ENABLED_LISTEN_CONSUMER_1:true} #kafka是否启用监听器1的开关
      consumer2: ${KAFKA_ENABLED_LISTEN_CONSUMER_2:true} #kafka是否启用监听器2的开关
  reactive: #{@link com.kittlen.boot.distribute.kafka.config.properties.ReactiveKafkaConsumerProperties}
    #    配置消费者信息
    consumer1: #第一个消费者
      canConsume: ${KAFKA_CONSUMER_1_CAN_CONSUME:true} #是否消费消息
      bootstrap-servers:  ${KAFKA_C1_CONSUMER_ADDR:192.168.1.52:9092} #kafka服务地址
      #      bootstrap-servers:  ${KAFKA_CONSUMER_1_CONSUMER_ADDR:127.0.0.1:9092} #kafka服务地址
      key-serializer: org.apache.kafka.common.serialization.ByteArraySerializer
      value-serializer: org.springframework.kafka.support.serializer.JsonSerializer
      key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
      value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
      max-poll-records: ${KAFKA_CONSUMER_1_CONSUMER_MAX_POLL_RECORDS:100}
      auto-offset-reset: ${KAFKA_CONSUMER_1_CONSUMER_AUTO_OFFSET_RESET:latest} #earliest 最早未被消费的offset ; latest 消费最新的offset
      enable-auto-commit: false
      group-id: ${KAFKA_CONSUMER_1_CONSUMER_GROUP_ID:mmcd}
      topics: ${KAFKA_CONSUMER_1_CONSUMER_TOPIC:t1}
    consumer2: #消费者2
      canConsume: ${KAFKA_CONSUMER_2_CAN_CONSUME:true} #是否消费消息
      properties:
        security:
          protocol: SASL_PLAINTEXT
        sasl:
          mechanism: SCRAM-SHA-256
          jaas:
            config: 'org.apache.kafka.common.security.scram.ScramLoginModule required username="${KAFKA_CONSUMER_2_CONSUMER_USERNAME:user}" password="${KAFKA_CONSUMER_2_CONSUMER_PASSWORD:pwd}";'
      bootstrap-servers:  ${KAFKA_CONSUMER_2_CONSUMER_ADDR:192.168.1.55:9092} #kafka服务地址
      key-serializer: org.apache.kafka.common.serialization.ByteArraySerializer
      value-serializer: org.springframework.kafka.support.serializer.JsonSerializer
      key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
      value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
      max-poll-records: ${KAFKA_CONSUMER_1_CONSUMER_MAX_POLL_RECORDS:100}
      auto-offset-reset: ${KAFKA_CONSUMER_2_CONSUMER_AUTO_OFFSET_RESET:latest}
      group-id: ${KAFKA_CONSUMER_2_CONSUMER_GROUP_ID:mmcd}
      topics: ${KAFKA_CONSUMER_2_CONSUMER_TOPIC:t2}

代码

配置配置项实体

java 复制代码
package com.kittlen.boot.distribute.kafka.ext;

import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import lombok.Getter;
import lombok.Setter;
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;

/**
 * @author kittlen
 * @version 1.0
 */
@Getter
@Setter
@ApiModel("kafka消费者")
public class KafkaConsumerExt extends KafkaProperties.Consumer {

    /**
     * 监听的topic,多个用逗号隔开
     */
    @ApiModelProperty("监听的topic,多个用逗号隔开")
    private String topics;

    /**
     * 是否消费消息
     */
    @ApiModelProperty("是否消费消息")
    private boolean canConsume = true;
}
java 复制代码
package com.kittlen.boot.distribute.kafka.config.properties;

import com.kittlen.boot.distribute.kafka.ext.KafkaConsumerExt;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import lombok.Getter;
import lombok.Setter;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.stereotype.Component;

/**
 * @author kittlen
 * @version 1.0
 */
@Getter
@Setter
@Component
@ApiModel("kafka消费者配置参数")
@ConfigurationProperties(ReactiveKafkaConsumerProperties.PROPERTIES)
public class ReactiveKafkaConsumerProperties {

    public static final String PROPERTIES = "kafka.reactive";

    /**
     * 消费者1参数
     */
    @ApiModelProperty("消费者1参数")
    private KafkaConsumerExt consumer1 = new KafkaConsumerExt();

    /**
     * 消费者2参数
     */
    @ApiModelProperty("消费者2参数")
    private KafkaConsumerExt consumer2 = new KafkaConsumerExt();
}

config

java 复制代码
package com.kittlen.boot.distribute.kafka.config;

import com.kittlen.boot.distribute.kafka.config.properties.ReactiveKafkaConsumerProperties;
import com.kittlen.boot.distribute.kafka.ext.KafkaConsumerExt;
import com.kittlen.boot.distribute.kafka.ext.ReactiveKafkaConsumerTemplateExt;
import lombok.extern.slf4j.Slf4j;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Bean;
import org.springframework.stereotype.Component;
import reactor.kafka.receiver.ReceiverOptions;

import java.util.List;
import java.util.stream.Collectors;
import java.util.stream.Stream;

/**
 * @author kittlen
 * @version 1.0
 */
@Slf4j
@Component
@ConditionalOnProperty(value = ReactiveConsumerConfig.CONSUMER_ROOT_ENABLED_PROPERTY, havingValue = "true")
public class ReactiveConsumerConfig {

    /**
     * 是否启用kafkaConsumer总开关
     */
    public static final String CONSUMER_ROOT_ENABLED_PROPERTY = "kafka.enabled.listen.root";
    /**
     * 是否启用kafkaConsumer1开关
     */
    public static final String CONSUMER_CONSUMER1_ENABLED_PROPERTY = "kafka.enabled.listen.consumer1";
    /**
     * 是否启用kafkaConsumer2开关
     */
    public static final String CONSUMER_CONSUMER2_ENABLED_PROPERTY = "kafka.enabled.listen.consumer2";

    /**
     * 监听器1
     *
     * @param reactiveKafkaConsumerProperties
     * @return
     */
    @Bean
    @ConditionalOnProperty(value = ReactiveConsumerConfig.CONSUMER_CONSUMER1_ENABLED_PROPERTY, havingValue = "true")
    public ReactiveKafkaConsumerTemplateExt<String, String> consumer1ReactiveKafkaConsumerTemplate(ReactiveKafkaConsumerProperties reactiveKafkaConsumerProperties) {
        KafkaConsumerExt consumer1 = reactiveKafkaConsumerProperties.getConsumer1();
        List<String> topics = Stream.of(consumer1.getTopics().split(",")).collect(Collectors.toList());
        ReceiverOptions<String, String> objectObjectReceiverOptions = ReceiverOptions.create(consumer1.buildProperties());
        ReceiverOptions<String, String> subscription = objectObjectReceiverOptions.subscription(topics);
        ReactiveKafkaConsumerTemplateExt<String, String> stringStringReactiveKafkaConsumerTemplate = new ReactiveKafkaConsumerTemplateExt<>(subscription, consumer1, "consumer1");
        log.info("loading consumer1 reactive kafka consumer completed");
        return stringStringReactiveKafkaConsumerTemplate;
    }

    /**
     * 监听器2
     *
     * @param reactiveKafkaConsumerProperties
     * @return
     */
    @Bean
    @ConditionalOnProperty(value = ReactiveConsumerConfig.CONSUMER_CONSUMER2_ENABLED_PROPERTY, havingValue = "true")
    public ReactiveKafkaConsumerTemplateExt<String, String> consumer2ReactiveKafkaConsumerTemplate(ReactiveKafkaConsumerProperties reactiveKafkaConsumerProperties) {
        KafkaConsumerExt consumer2= reactiveKafkaConsumerProperties.getConsumer2();
        List<String> topics = Stream.of(consumer2.getTopics().split(",")).collect(Collectors.toList());
        ReceiverOptions<String, String> objectObjectReceiverOptions = ReceiverOptions.create(consumer2.buildProperties());
        ReceiverOptions<String, String> subscription = objectObjectReceiverOptions.subscription(topics);
        ReactiveKafkaConsumerTemplateExt<String, String> stringStringReactiveKafkaConsumerTemplate = new ReactiveKafkaConsumerTemplateExt<>(subscription, consumer2, "consumer2");
        log.info("loading consumer2 reactive kafka consumer completed");
        return stringStringReactiveKafkaConsumerTemplate;
    }

}
java 复制代码
package com.kittlen.boot.distribute.kafka.consumer;

import com.kittlen.boot.comm.exceptions.BizException;
import com.kittlen.boot.distribute.kafka.config.ReactiveConsumerConfig;
import com.kittlen.boot.distribute.kafka.consumer.handlers.AbstractConsumerHandler;
import com.kittlen.boot.distribute.kafka.ext.ReactiveKafkaConsumerTemplateExt;
import lombok.extern.slf4j.Slf4j;
import org.springframework.boot.autoconfigure.condition.ConditionalOnBean;
import org.springframework.stereotype.Component;
import reactor.core.publisher.Mono;

import java.time.Duration;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;

/**
 * @author kittlen
 * @version 1.0
 */
@Slf4j
@Component
@ConditionalOnBean(ReactiveConsumerConfig.class)
public class KafkaReactiveConsumer {

    private final Map<String, AbstractConsumerHandler> handlerMap;
    private final Duration defTimeOut = Duration.ofSeconds(240);

    public KafkaReactiveConsumer(List<ReactiveKafkaConsumerTemplateExt<String, String>> reactiveKafkaConsumerTemplateExtList, List<AbstractConsumerHandler> consumerHandlers) {
        handlerMap = consumerHandlers.stream().collect(Collectors.toMap(AbstractConsumerHandler::consumerName, a -> a, (a, b) -> {
            throw new BizException("consumerName: " + b.consumerName() + " 重复");
        }));
        for (ReactiveKafkaConsumerTemplateExt<String, String> stringStringReactiveKafkaConsumerTemplateExt : reactiveKafkaConsumerTemplateExtList) {
            this.consumerRun(stringStringReactiveKafkaConsumerTemplateExt);
        }
    }

    private void consumerRun(ReactiveKafkaConsumerTemplateExt<String, String> reactiveKafkaConsumerTemplateExt) {
        if (reactiveKafkaConsumerTemplateExt == null) {
            return;
        }
        log.info("consumer:{}监听services:{},topic:{}", reactiveKafkaConsumerTemplateExt.getTemplateConsumerName(), reactiveKafkaConsumerTemplateExt.getKafkaConsumerExt().getBootstrapServers(), reactiveKafkaConsumerTemplateExt.getKafkaConsumerExt().getTopics());
        reactiveKafkaConsumerTemplateExt.receiveAutoAck().map(c -> {
            String templateConsumerName = reactiveKafkaConsumerTemplateExt.getTemplateConsumerName();
            if (!reactiveKafkaConsumerTemplateExt.isCanConsume()) {
                log.debug("consumer:{} not consume topic:{} value:{}", templateConsumerName, c.topic(), c.value());
                return Mono.empty();
            }
            log.debug("consumer:{} handler consume topic:{} value:{}", templateConsumerName, c.topic(), c.value());
            AbstractConsumerHandler abstractConsumerHandler = handlerMap.get(templateConsumerName);
            try {
                if (abstractConsumerHandler != null) {
                    return abstractConsumerHandler.handler(c).timeout(defTimeOut, Mono.just(false));
                } else {
                    log.info("未知consumerHandler:{}", templateConsumerName);
                    return Mono.empty();
                }
            } catch (Exception e) {
                log.error("consumer:{} 处理topic:{}实体信息:{}时出现异常为:{}", templateConsumerName, c.topic(), c.value(), e.getMessage(), e);
                return Mono.empty();
            }
        }).doOnError(throwable -> log.error("something bad happened while consuming : {}", throwable.getMessage()))
                .subscribe(Mono::subscribe);
    }

}

消息处理

通过实现下列接口来处理消息

java 复制代码
package com.kittlen.boot.distribute.kafka.consumer.handlers;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import reactor.core.publisher.Mono;

/**
 * @author kittlen
 * @version 1.0
 */
public interface AbstractConsumerHandler {

    /**
     * 处理器名
     * 与监听器的templateConsumerName一直,次处理器就会使用
     *
     * @return
     */
    String consumerName();

    /**
     * 处理监听器消息
     * 处理对应consumerName监听器监听到的信息
     *
     * @return
     */
    Mono<Boolean> handler(ConsumerRecord<String, String> consumerRecord);
}
相关推荐
极客先躯5 小时前
Hadoop krb5.conf 配置详解
大数据·hadoop·分布式·kerberos·krb5.conf·认证系统
CopyLower6 小时前
Kafka 消费者状态及高水位(High Watermark)详解
分布式·kafka
2301_786964368 小时前
3、练习常用的HBase Shell命令+HBase 常用的Java API 及应用实例
java·大数据·数据库·分布式·hbase
信徒_9 小时前
kafka
分布式·kafka
Uranus^9 小时前
rabbitMQ 简单使用
分布式·rabbitmq
攒了一袋星辰9 小时前
今日指数项目项目集成RabbitMQ与CaffienCatch
java·分布式·rabbitmq
DieSnowK10 小时前
[Redis][集群][下]详细讲解
数据库·redis·分布式·缓存·集群·高可用·新手向
这孩子叫逆13 小时前
rabbitmq消费者应答模式
分布式·rabbitmq
灰色孤星A14 小时前
Kafka学习笔记(三)Kafka分区和副本机制、自定义分区、消费者指定分区
zookeeper·kafka·kafka分区机制·kafka副本机制·kafka自定义分区
信徒_15 小时前
Rabbitmq
分布式·rabbitmq