kafka消费者模式

一、单线程消费者模式

java 复制代码
package nj.zb.kb23.kafka;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.time.Duration;
import java.util.Collections;
import java.util.Properties;

/*
* 单线程
*/
public class MyConsumer {
    public static void main(String[] args) {
        Properties properties = new Properties();
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.91.11:9092");
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class);
        /*
        earliest: 当各分区下有已提交的offset时,从提交的offset开始消费,无提交的offset时,从头开始消费
         latest: 当各分区下有已提交的offset时,从提交的offset开始消费,无提交的offset时,消费新产生的数据
         none:   当各分区下有已提交的offset时,从提交的offset开始消费,无提交的offset时,抛出异常
         */
        properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,"earliest");
        /*
         * ENABLE_AUTO_COMMIT_CONFIG 设置是否自动提交,获取数据的状态,false手动提交,true自动提交
         * AUTO_COMMIT_INTERVAL_MS_CONFIG   设置提交时间,1000ms
         */
        properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,"false");
        properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG,"1000");
        /**
         * 设置消费组
         */
        properties.put(ConsumerConfig.GROUP_ID_CONFIG,"group1");
        //单线程
        KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<>(properties);
        kafkaConsumer.subscribe(Collections.singleton("kb23"));
        while (true){
            ConsumerRecords<String, String> records = kafkaConsumer.poll(Duration.ofMillis(100));
            for (ConsumerRecord<String, String> record:
            records){
                System.out.println("topic:"+record.topic()
                        +" partition:"+record.partition()
                        +" 偏移量:"+record.offset()
                        +" value:"+record.value()
                        +" 时间戳:"+record.timestamp());
            }
            //设置手动提交  earliest/latest都接着上次的内容继续输出,除非有新消息输入
            kafkaConsumer.commitAsync();
        }
    }
}

二、多线程消费者模式

java 复制代码
package nj.zb.kb23.kafka;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.time.Duration;
import java.util.Collections;
import java.util.Properties;

/*
* 多线程
*/
public class MyConsumer2 {
    public static void main(String[] args) {
        Properties properties = new Properties();
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.91.11:9092");
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class);
        properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,"earliest");
        properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,"false");
        properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG,"1000");
        /**
         * 设置消费组
         */
        properties.put(ConsumerConfig.GROUP_ID_CONFIG,"心心威武");
        //多线程(3个线程)
        for(int i=0;i<=3;i++){
            new Thread(new Runnable() {
                @Override
                public void run() {
                    KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<>(properties);
                    kafkaConsumer.subscribe(Collections.singleton("kb23"));
                    while(true){
                        ConsumerRecords<String, String> records = kafkaConsumer.poll(Duration.ofMillis(100));
                        for (ConsumerRecord<String, String> record : records) {
                            System.out.println(Thread.currentThread().getName()+
                                    " topic: "+record.topic()+
                                    " partition: "+record.partition()+
                                    " 偏移量: "+record.offset()+
                                    " value: "+record.value()+
                                    " 时间戳: "+record.timestamp());
                        }
                    }
                }
            }).start();
        }
    }
}

|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| "C:\Program Files\Java\jdk1.8.0_144\bin\java.exe" "-javaagent:D:\Program Files\JetBrains\IntelliJ IDEA 。。。。。。。。。。。。。。。。。。。。。。。。 SLF4J: Failed to load class "org.slf4j.impl.StaticLoggerBinder". SLF4J: Defaulting to no-operation (NOP) logger implementation SLF4J: See http://www.slf4j.org/codes.html#StaticLoggerBinder for further details. Thread-3 topic: kb23 partition: 0 偏移量: 0 value: hello java 时间戳: 1695173593009 Thread-3 topic: kb23 partition: 0 偏移量: 1 value: hello c== 时间戳: 1695173606546 Thread-2 topic: kb23 partition: 1 偏移量: 0 value: dufiudhifch 时间戳: 1695174679229 Thread-1 topic: kb23 partition: 2 偏移量: 0 value: hel 时间戳: 1695173599314 Thread-3 topic: kb23 partition: 0 偏移量: 2 value: djfhjsjkhfk 时间戳: 1695174683054 Thread-1 topic: kb23 partition: 2 偏移量: 1 value: hello world 时间戳: 1695173611446 Thread-2 topic: kb23 partition: 1 偏移量: 1 value: hsdakhskfhak 时间戳: 1695174686318 Thread-1 topic: kb23 partition: 2 偏移量: 2 value: hshcdshcdskc 时间戳: 1695174681057 Thread-3 topic: kb23 partition: 0 偏移量: 3 value: jkfdsajklfjalds 时间戳: 1695174689058 Thread-1 topic: kb23 partition: 2 偏移量: 3 value: dhjfhkshkf 时间戳: 1695174684802 |

三、消费者模式seek方法

java 复制代码
package nj.zb.kb23.kafka;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.time.Duration;
import java.util.Collections;
import java.util.HashSet;
import java.util.Properties;
import java.util.Set;

/*
* seek指定开始消费的位置
*/
public class MyConsumerSeek {
    public static void main(String[] args) {
        Properties properties = new Properties();
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.91.11:9092");
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class);
        /*
        earliest: 当各分区下有已提交的offset时,从提交的offset开始消费,无提交的offset时,从头开始消费
         latest: 当各分区下有已提交的offset时,从提交的offset开始消费,无提交的offset时,消费新产生的数据
         none:   当各分区下有已提交的offset时,从提交的offset开始消费,无提交的offset时,抛出异常
         */
        properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,"earliest");
        /*
         * ENABLE_AUTO_COMMIT_CONFIG 设置是否自动提交,获取数据的状态,false手动提交,true自动提交
         * AUTO_COMMIT_INTERVAL_MS_CONFIG   设置提交时间,1000ms
         */
        properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,"false");
        properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG,"1000");
        /**
         * 设置消费组
         */
        properties.put(ConsumerConfig.GROUP_ID_CONFIG,"group3");
        //单线程
        KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<>(properties);
        kafkaConsumer.subscribe(Collections.singleton("kb23"));

        Set<TopicPartition> assignment = new HashSet<>();
        while (assignment.size()==0){
            kafkaConsumer.poll(Duration.ofMillis(1000));
            assignment = kafkaConsumer.assignment();
        }
        for (TopicPartition topicPartition :
                assignment) {
            //topic:kb23 tp-0:4 tp-1:5  tp-2:4
            System.out.println(topicPartition.topic()+"\t"+topicPartition.partition());
            if (topicPartition.partition()==0){
                kafkaConsumer.seek(topicPartition,4);
            }else if (topicPartition.partition()==1){
                kafkaConsumer.seek(topicPartition,5);
            }else if (topicPartition.partition()==2){
                kafkaConsumer.seek(topicPartition,4);
            }
        }
        while (true){
            ConsumerRecords<String, String> records = kafkaConsumer.poll(Duration.ofMillis(100));
            for (ConsumerRecord<String, String> record:
            records){
                System.out.println("topic:"+record.topic()
                        +" partition:"+record.partition()
                        +" 偏移量:"+record.offset()
                        +" value:"+record.value()
                        +" 时间戳:"+record.timestamp());
            }
        }
    }
}

|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| "C:\Program Files\Java\jdk1.8.0_144\bin\java.exe" "-javaagent:D:\Program Files\JetBrains\IntelliJ IDEA.。。。。。。。。。。。。。 SLF4J: See http://www.slf4j.org/codes.html#StaticLoggerBinder for further details. kb23 2 kb23 1 kb23 0 topic:kb23 partition:2 偏移量:4 value:sjhkdksahkdah 时间戳:1695174687827 topic:kb23 partition:2 偏移量:5 value:hhh1 时间戳:1695175898301 topic:kb23 partition:2 偏移量:6 value:2222 时间戳:1695176003767 topic:kb23 partition:2 偏移量:7 value:444 时间戳:1695176010084 topic:kb23 partition:2 偏移量:8 value:ppp 时间戳:1695177956251 topic:kb23 partition:2 偏移量:9 value:ppp1 时间戳:1695178017439 topic:kb23 partition:2 偏移量:10 value:ppp3 时间戳:1695178021374 topic:kb23 partition:2 偏移量:11 value:ananaq 时间戳:1695179560702 topic:kb23 partition:1 偏移量:5 value:qqq 时间戳:1695175970133 |

相关推荐
传感器与混合集成电路1 小时前
面向储气库注采井的分布式光纤监测技术
分布式
ZTLJQ2 小时前
任务调度的艺术:Python分布式任务系统完全解析
开发语言·分布式·python
被摘下的星星2 小时前
Hadoop伪分布式集群搭建实验原理概要
大数据·hadoop·分布式
无名-CODING5 小时前
Java 爬虫高级技术:反反爬策略与分布式爬虫实战
java·分布式·爬虫
8Qi85 小时前
Redis哨兵模式(Sentinel)深度解析
java·数据库·redis·分布式·缓存·sentinel
爱学习的程序媛6 小时前
JWT签发全指南:从原理到安全实践
分布式·安全·web安全·安全架构·jwt签发·无状态认证
wanhengidc7 小时前
徐州服务器租用的优势
大数据·运维·服务器·分布式·智能手机
枫叶V8 小时前
Kafka 怎么保证消息的顺序性
kafka
wanzehongsheng8 小时前
分布式光伏电站的技术优势与智能运维实践:以WZ HELIO²双轴跟踪系统为例
运维·分布式
爱浦路 IPLOOK8 小时前
分布式UPF架构:让低时延与大带宽不再是难题
分布式·架构