kafka消费者模式

一、单线程消费者模式

java 复制代码
package nj.zb.kb23.kafka;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.time.Duration;
import java.util.Collections;
import java.util.Properties;

/*
* 单线程
*/
public class MyConsumer {
    public static void main(String[] args) {
        Properties properties = new Properties();
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.91.11:9092");
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class);
        /*
        earliest: 当各分区下有已提交的offset时,从提交的offset开始消费,无提交的offset时,从头开始消费
         latest: 当各分区下有已提交的offset时,从提交的offset开始消费,无提交的offset时,消费新产生的数据
         none:   当各分区下有已提交的offset时,从提交的offset开始消费,无提交的offset时,抛出异常
         */
        properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,"earliest");
        /*
         * ENABLE_AUTO_COMMIT_CONFIG 设置是否自动提交,获取数据的状态,false手动提交,true自动提交
         * AUTO_COMMIT_INTERVAL_MS_CONFIG   设置提交时间,1000ms
         */
        properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,"false");
        properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG,"1000");
        /**
         * 设置消费组
         */
        properties.put(ConsumerConfig.GROUP_ID_CONFIG,"group1");
        //单线程
        KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<>(properties);
        kafkaConsumer.subscribe(Collections.singleton("kb23"));
        while (true){
            ConsumerRecords<String, String> records = kafkaConsumer.poll(Duration.ofMillis(100));
            for (ConsumerRecord<String, String> record:
            records){
                System.out.println("topic:"+record.topic()
                        +" partition:"+record.partition()
                        +" 偏移量:"+record.offset()
                        +" value:"+record.value()
                        +" 时间戳:"+record.timestamp());
            }
            //设置手动提交  earliest/latest都接着上次的内容继续输出,除非有新消息输入
            kafkaConsumer.commitAsync();
        }
    }
}

二、多线程消费者模式

java 复制代码
package nj.zb.kb23.kafka;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.time.Duration;
import java.util.Collections;
import java.util.Properties;

/*
* 多线程
*/
public class MyConsumer2 {
    public static void main(String[] args) {
        Properties properties = new Properties();
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.91.11:9092");
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class);
        properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,"earliest");
        properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,"false");
        properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG,"1000");
        /**
         * 设置消费组
         */
        properties.put(ConsumerConfig.GROUP_ID_CONFIG,"心心威武");
        //多线程(3个线程)
        for(int i=0;i<=3;i++){
            new Thread(new Runnable() {
                @Override
                public void run() {
                    KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<>(properties);
                    kafkaConsumer.subscribe(Collections.singleton("kb23"));
                    while(true){
                        ConsumerRecords<String, String> records = kafkaConsumer.poll(Duration.ofMillis(100));
                        for (ConsumerRecord<String, String> record : records) {
                            System.out.println(Thread.currentThread().getName()+
                                    " topic: "+record.topic()+
                                    " partition: "+record.partition()+
                                    " 偏移量: "+record.offset()+
                                    " value: "+record.value()+
                                    " 时间戳: "+record.timestamp());
                        }
                    }
                }
            }).start();
        }
    }
}

|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| "C:\Program Files\Java\jdk1.8.0_144\bin\java.exe" "-javaagent:D:\Program Files\JetBrains\IntelliJ IDEA 。。。。。。。。。。。。。。。。。。。。。。。。 SLF4J: Failed to load class "org.slf4j.impl.StaticLoggerBinder". SLF4J: Defaulting to no-operation (NOP) logger implementation SLF4J: See http://www.slf4j.org/codes.html#StaticLoggerBinder for further details. Thread-3 topic: kb23 partition: 0 偏移量: 0 value: hello java 时间戳: 1695173593009 Thread-3 topic: kb23 partition: 0 偏移量: 1 value: hello c== 时间戳: 1695173606546 Thread-2 topic: kb23 partition: 1 偏移量: 0 value: dufiudhifch 时间戳: 1695174679229 Thread-1 topic: kb23 partition: 2 偏移量: 0 value: hel 时间戳: 1695173599314 Thread-3 topic: kb23 partition: 0 偏移量: 2 value: djfhjsjkhfk 时间戳: 1695174683054 Thread-1 topic: kb23 partition: 2 偏移量: 1 value: hello world 时间戳: 1695173611446 Thread-2 topic: kb23 partition: 1 偏移量: 1 value: hsdakhskfhak 时间戳: 1695174686318 Thread-1 topic: kb23 partition: 2 偏移量: 2 value: hshcdshcdskc 时间戳: 1695174681057 Thread-3 topic: kb23 partition: 0 偏移量: 3 value: jkfdsajklfjalds 时间戳: 1695174689058 Thread-1 topic: kb23 partition: 2 偏移量: 3 value: dhjfhkshkf 时间戳: 1695174684802 |

三、消费者模式seek方法

java 复制代码
package nj.zb.kb23.kafka;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.time.Duration;
import java.util.Collections;
import java.util.HashSet;
import java.util.Properties;
import java.util.Set;

/*
* seek指定开始消费的位置
*/
public class MyConsumerSeek {
    public static void main(String[] args) {
        Properties properties = new Properties();
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.91.11:9092");
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class);
        /*
        earliest: 当各分区下有已提交的offset时,从提交的offset开始消费,无提交的offset时,从头开始消费
         latest: 当各分区下有已提交的offset时,从提交的offset开始消费,无提交的offset时,消费新产生的数据
         none:   当各分区下有已提交的offset时,从提交的offset开始消费,无提交的offset时,抛出异常
         */
        properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,"earliest");
        /*
         * ENABLE_AUTO_COMMIT_CONFIG 设置是否自动提交,获取数据的状态,false手动提交,true自动提交
         * AUTO_COMMIT_INTERVAL_MS_CONFIG   设置提交时间,1000ms
         */
        properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,"false");
        properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG,"1000");
        /**
         * 设置消费组
         */
        properties.put(ConsumerConfig.GROUP_ID_CONFIG,"group3");
        //单线程
        KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<>(properties);
        kafkaConsumer.subscribe(Collections.singleton("kb23"));

        Set<TopicPartition> assignment = new HashSet<>();
        while (assignment.size()==0){
            kafkaConsumer.poll(Duration.ofMillis(1000));
            assignment = kafkaConsumer.assignment();
        }
        for (TopicPartition topicPartition :
                assignment) {
            //topic:kb23 tp-0:4 tp-1:5  tp-2:4
            System.out.println(topicPartition.topic()+"\t"+topicPartition.partition());
            if (topicPartition.partition()==0){
                kafkaConsumer.seek(topicPartition,4);
            }else if (topicPartition.partition()==1){
                kafkaConsumer.seek(topicPartition,5);
            }else if (topicPartition.partition()==2){
                kafkaConsumer.seek(topicPartition,4);
            }
        }
        while (true){
            ConsumerRecords<String, String> records = kafkaConsumer.poll(Duration.ofMillis(100));
            for (ConsumerRecord<String, String> record:
            records){
                System.out.println("topic:"+record.topic()
                        +" partition:"+record.partition()
                        +" 偏移量:"+record.offset()
                        +" value:"+record.value()
                        +" 时间戳:"+record.timestamp());
            }
        }
    }
}

|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| "C:\Program Files\Java\jdk1.8.0_144\bin\java.exe" "-javaagent:D:\Program Files\JetBrains\IntelliJ IDEA.。。。。。。。。。。。。。 SLF4J: See http://www.slf4j.org/codes.html#StaticLoggerBinder for further details. kb23 2 kb23 1 kb23 0 topic:kb23 partition:2 偏移量:4 value:sjhkdksahkdah 时间戳:1695174687827 topic:kb23 partition:2 偏移量:5 value:hhh1 时间戳:1695175898301 topic:kb23 partition:2 偏移量:6 value:2222 时间戳:1695176003767 topic:kb23 partition:2 偏移量:7 value:444 时间戳:1695176010084 topic:kb23 partition:2 偏移量:8 value:ppp 时间戳:1695177956251 topic:kb23 partition:2 偏移量:9 value:ppp1 时间戳:1695178017439 topic:kb23 partition:2 偏移量:10 value:ppp3 时间戳:1695178021374 topic:kb23 partition:2 偏移量:11 value:ananaq 时间戳:1695179560702 topic:kb23 partition:1 偏移量:5 value:qqq 时间戳:1695175970133 |

相关推荐
B站计算机毕业设计超人1 分钟前
计算机毕业设计SparkStreaming+Kafka旅游推荐系统 旅游景点客流量预测 旅游可视化 旅游大数据 Hive数据仓库 机器学习 深度学习
大数据·数据仓库·hadoop·python·kafka·课程设计·数据可视化
processflow流程图2 小时前
分布式kettle调度平台v6.4.0新功能介绍
分布式
全栈开发圈2 小时前
干货分享|分布式数据科学工具 Xorbits 的使用
分布式
运维&陈同学4 小时前
【zookeeper01】消息队列与微服务之zookeeper工作原理
运维·分布式·微服务·zookeeper·云原生·架构·消息队列
时差9534 小时前
Flink Standalone集群模式安装部署
大数据·分布式·flink·部署
菠萝咕噜肉i4 小时前
超详细:Redis分布式锁
数据库·redis·分布式·缓存·分布式锁
Mephisto.java4 小时前
【大数据学习 | Spark】Spark的改变分区的算子
大数据·elasticsearch·oracle·spark·kafka·memcache
只因在人海中多看了你一眼8 小时前
分布式缓存 + 数据存储 + 消息队列知识体系
分布式·缓存
zhixingheyi_tian10 小时前
Spark 之 Aggregate
大数据·分布式·spark
KevinAha12 小时前
Kafka 3.5 源码导读
kafka