kafka消费者模式

一、单线程消费者模式

java 复制代码
package nj.zb.kb23.kafka;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.time.Duration;
import java.util.Collections;
import java.util.Properties;

/*
* 单线程
*/
public class MyConsumer {
    public static void main(String[] args) {
        Properties properties = new Properties();
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.91.11:9092");
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class);
        /*
        earliest: 当各分区下有已提交的offset时,从提交的offset开始消费,无提交的offset时,从头开始消费
         latest: 当各分区下有已提交的offset时,从提交的offset开始消费,无提交的offset时,消费新产生的数据
         none:   当各分区下有已提交的offset时,从提交的offset开始消费,无提交的offset时,抛出异常
         */
        properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,"earliest");
        /*
         * ENABLE_AUTO_COMMIT_CONFIG 设置是否自动提交,获取数据的状态,false手动提交,true自动提交
         * AUTO_COMMIT_INTERVAL_MS_CONFIG   设置提交时间,1000ms
         */
        properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,"false");
        properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG,"1000");
        /**
         * 设置消费组
         */
        properties.put(ConsumerConfig.GROUP_ID_CONFIG,"group1");
        //单线程
        KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<>(properties);
        kafkaConsumer.subscribe(Collections.singleton("kb23"));
        while (true){
            ConsumerRecords<String, String> records = kafkaConsumer.poll(Duration.ofMillis(100));
            for (ConsumerRecord<String, String> record:
            records){
                System.out.println("topic:"+record.topic()
                        +" partition:"+record.partition()
                        +" 偏移量:"+record.offset()
                        +" value:"+record.value()
                        +" 时间戳:"+record.timestamp());
            }
            //设置手动提交  earliest/latest都接着上次的内容继续输出,除非有新消息输入
            kafkaConsumer.commitAsync();
        }
    }
}

二、多线程消费者模式

java 复制代码
package nj.zb.kb23.kafka;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.time.Duration;
import java.util.Collections;
import java.util.Properties;

/*
* 多线程
*/
public class MyConsumer2 {
    public static void main(String[] args) {
        Properties properties = new Properties();
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.91.11:9092");
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class);
        properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,"earliest");
        properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,"false");
        properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG,"1000");
        /**
         * 设置消费组
         */
        properties.put(ConsumerConfig.GROUP_ID_CONFIG,"心心威武");
        //多线程(3个线程)
        for(int i=0;i<=3;i++){
            new Thread(new Runnable() {
                @Override
                public void run() {
                    KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<>(properties);
                    kafkaConsumer.subscribe(Collections.singleton("kb23"));
                    while(true){
                        ConsumerRecords<String, String> records = kafkaConsumer.poll(Duration.ofMillis(100));
                        for (ConsumerRecord<String, String> record : records) {
                            System.out.println(Thread.currentThread().getName()+
                                    " topic: "+record.topic()+
                                    " partition: "+record.partition()+
                                    " 偏移量: "+record.offset()+
                                    " value: "+record.value()+
                                    " 时间戳: "+record.timestamp());
                        }
                    }
                }
            }).start();
        }
    }
}

|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| "C:\Program Files\Java\jdk1.8.0_144\bin\java.exe" "-javaagent:D:\Program Files\JetBrains\IntelliJ IDEA 。。。。。。。。。。。。。。。。。。。。。。。。 SLF4J: Failed to load class "org.slf4j.impl.StaticLoggerBinder". SLF4J: Defaulting to no-operation (NOP) logger implementation SLF4J: See http://www.slf4j.org/codes.html#StaticLoggerBinder for further details. Thread-3 topic: kb23 partition: 0 偏移量: 0 value: hello java 时间戳: 1695173593009 Thread-3 topic: kb23 partition: 0 偏移量: 1 value: hello c== 时间戳: 1695173606546 Thread-2 topic: kb23 partition: 1 偏移量: 0 value: dufiudhifch 时间戳: 1695174679229 Thread-1 topic: kb23 partition: 2 偏移量: 0 value: hel 时间戳: 1695173599314 Thread-3 topic: kb23 partition: 0 偏移量: 2 value: djfhjsjkhfk 时间戳: 1695174683054 Thread-1 topic: kb23 partition: 2 偏移量: 1 value: hello world 时间戳: 1695173611446 Thread-2 topic: kb23 partition: 1 偏移量: 1 value: hsdakhskfhak 时间戳: 1695174686318 Thread-1 topic: kb23 partition: 2 偏移量: 2 value: hshcdshcdskc 时间戳: 1695174681057 Thread-3 topic: kb23 partition: 0 偏移量: 3 value: jkfdsajklfjalds 时间戳: 1695174689058 Thread-1 topic: kb23 partition: 2 偏移量: 3 value: dhjfhkshkf 时间戳: 1695174684802 |

三、消费者模式seek方法

java 复制代码
package nj.zb.kb23.kafka;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.time.Duration;
import java.util.Collections;
import java.util.HashSet;
import java.util.Properties;
import java.util.Set;

/*
* seek指定开始消费的位置
*/
public class MyConsumerSeek {
    public static void main(String[] args) {
        Properties properties = new Properties();
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.91.11:9092");
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class);
        /*
        earliest: 当各分区下有已提交的offset时,从提交的offset开始消费,无提交的offset时,从头开始消费
         latest: 当各分区下有已提交的offset时,从提交的offset开始消费,无提交的offset时,消费新产生的数据
         none:   当各分区下有已提交的offset时,从提交的offset开始消费,无提交的offset时,抛出异常
         */
        properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,"earliest");
        /*
         * ENABLE_AUTO_COMMIT_CONFIG 设置是否自动提交,获取数据的状态,false手动提交,true自动提交
         * AUTO_COMMIT_INTERVAL_MS_CONFIG   设置提交时间,1000ms
         */
        properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,"false");
        properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG,"1000");
        /**
         * 设置消费组
         */
        properties.put(ConsumerConfig.GROUP_ID_CONFIG,"group3");
        //单线程
        KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<>(properties);
        kafkaConsumer.subscribe(Collections.singleton("kb23"));

        Set<TopicPartition> assignment = new HashSet<>();
        while (assignment.size()==0){
            kafkaConsumer.poll(Duration.ofMillis(1000));
            assignment = kafkaConsumer.assignment();
        }
        for (TopicPartition topicPartition :
                assignment) {
            //topic:kb23 tp-0:4 tp-1:5  tp-2:4
            System.out.println(topicPartition.topic()+"\t"+topicPartition.partition());
            if (topicPartition.partition()==0){
                kafkaConsumer.seek(topicPartition,4);
            }else if (topicPartition.partition()==1){
                kafkaConsumer.seek(topicPartition,5);
            }else if (topicPartition.partition()==2){
                kafkaConsumer.seek(topicPartition,4);
            }
        }
        while (true){
            ConsumerRecords<String, String> records = kafkaConsumer.poll(Duration.ofMillis(100));
            for (ConsumerRecord<String, String> record:
            records){
                System.out.println("topic:"+record.topic()
                        +" partition:"+record.partition()
                        +" 偏移量:"+record.offset()
                        +" value:"+record.value()
                        +" 时间戳:"+record.timestamp());
            }
        }
    }
}

|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| "C:\Program Files\Java\jdk1.8.0_144\bin\java.exe" "-javaagent:D:\Program Files\JetBrains\IntelliJ IDEA.。。。。。。。。。。。。。 SLF4J: See http://www.slf4j.org/codes.html#StaticLoggerBinder for further details. kb23 2 kb23 1 kb23 0 topic:kb23 partition:2 偏移量:4 value:sjhkdksahkdah 时间戳:1695174687827 topic:kb23 partition:2 偏移量:5 value:hhh1 时间戳:1695175898301 topic:kb23 partition:2 偏移量:6 value:2222 时间戳:1695176003767 topic:kb23 partition:2 偏移量:7 value:444 时间戳:1695176010084 topic:kb23 partition:2 偏移量:8 value:ppp 时间戳:1695177956251 topic:kb23 partition:2 偏移量:9 value:ppp1 时间戳:1695178017439 topic:kb23 partition:2 偏移量:10 value:ppp3 时间戳:1695178021374 topic:kb23 partition:2 偏移量:11 value:ananaq 时间戳:1695179560702 topic:kb23 partition:1 偏移量:5 value:qqq 时间戳:1695175970133 |

相关推荐
道一云黑板报2 小时前
Flink集群批作业实践:七析BI批作业执行
大数据·分布式·数据分析·flink·kubernetes
qq_5470261792 小时前
Kafka 常见问题
kafka
core5122 小时前
flink sink kafka
flink·kafka·sink
飞来又飞去4 小时前
kafka sasl和acl之间的关系
分布式·kafka
MZWeiei5 小时前
Zookeeper的监听机制
分布式·zookeeper
莹雨潇潇5 小时前
Hadoop完全分布式环境部署
大数据·hadoop·分布式
浩哲Zhe6 小时前
RabbitMQ
java·分布式·rabbitmq
明达技术6 小时前
分布式 IO 模块:赋能造纸业,革新高速纸机主传动
分布式
Allen Bright7 小时前
RabbitMQ中的Topic模式
分布式·rabbitmq
李洋-蛟龙腾飞公司8 小时前
HarmonyOS Next 应用元服务开发-分布式数据对象迁移数据权限与基础数据
分布式·华为·harmonyos