kafka复习:(26)通过RecordHeaders和RecordHeader来实现TTL功能

一、定义生产者,在消息中加入RecordHeaders

复制代码
package com.cisdi.dsp.modules.metaAnalysis.rest.kafka2023;

import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.header.internals.RecordHeaders;
import org.apache.kafka.common.serialization.StringSerializer;

import java.util.Date;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;

public class KafkaTest26 {
    public static void main(String[] args) {
        Properties properties= new Properties();

        properties.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        properties.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        properties.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"xx.xx.xx.xx:9092");
        KafkaProducer<String,String> kafkaProducer=new KafkaProducer<String, String>(properties);

        //大概率被消费者拦截器任务超时而丢弃
        RecordHeaders recordHeaders1 = new RecordHeaders();
        recordHeaders1.add("ttl", BytesUtils.longToBytes(1));

        RecordHeaders recordHeaders2 = new RecordHeaders();
        recordHeaders2.add("ttl", BytesUtils.longToBytes(30));

        RecordHeaders recordHeaders3 = new RecordHeaders();
        recordHeaders3.add("ttl", BytesUtils.longToBytes(60));

        ProducerRecord<String,String> producerRecord1 = new ProducerRecord<>("ttl",0,
                new Date().getTime(),"fff","hello sister,now is: "+ new Date(), recordHeaders1);
        ProducerRecord<String,String> producerRecord2 = new ProducerRecord<>("ttl",0,
                new Date().getTime(),"fff","hello sister,now is: "+ new Date(), recordHeaders2);
        ProducerRecord<String,String> producerRecord3 = new ProducerRecord<>("ttl",0,
                new Date().getTime(),"fff","hello sister,now is: "+ new Date(), recordHeaders3);


        Future<RecordMetadata> future = kafkaProducer.send(producerRecord1);
        Future<RecordMetadata> future2 = kafkaProducer.send(producerRecord2);
        Future<RecordMetadata> future3 = kafkaProducer.send(producerRecord3);

        try {
            future.get();
            future2.get();
            future3.get();
        } catch (InterruptedException e) {
            e.printStackTrace();
        } catch (ExecutionException e) {
            e.printStackTrace();
        }
        System.out.println("ok");

        kafkaProducer.close();
    }
}

二、定义消费者拦截器:

复制代码
package com.cisdi.dsp.modules.metaAnalysis.rest.kafka2023;

import org.apache.kafka.clients.consumer.ConsumerInterceptor;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.header.Header;

import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

public class TtlConsumerInterceptor implements ConsumerInterceptor<String, String> {
    @Override
    public ConsumerRecords<String, String> onConsume(ConsumerRecords<String, String> records) {
        long now = System.currentTimeMillis();
        Map<TopicPartition, List<ConsumerRecord<String, String>>> newRecords = new HashMap<>();
        for (TopicPartition tp : records.partitions()) {
            List<ConsumerRecord<String, String>> tpRecords = records.records(tp);
            List<ConsumerRecord<String, String>> newTpRecords = new ArrayList<>();
            for (ConsumerRecord<String, String> record : tpRecords) {
                long ttl = -1;
                for (Header header : record.headers()) {
                    if (header.key().equals("ttl")){
                        ttl = BytesUtils.bytesToLong(header.value());
                    }
                }
                // 超时???
                if (ttl > 0 && (now - record.timestamp() < ttl * 1000)){
                    newTpRecords.add(record);
                } else {
                    newTpRecords.add(record);
                }
                if (!newTpRecords.isEmpty()){
                    newRecords.put(tp, newTpRecords);
                }
            }
        }
        return new ConsumerRecords<>(newRecords);
    }


    @Override
    public void onCommit(Map<TopicPartition, OffsetAndMetadata> offsets) {

    }

    @Override
    public void close() {

    }

    @Override
    public void configure(Map<String, ?> configs) {

    }
}

三、定义消费者,配置上述拦截器

复制代码
package com.cisdi.dsp.modules.metaAnalysis.rest.kafka2023;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;

import java.time.Duration;
import java.time.temporal.TemporalUnit;
import java.util.Arrays;
import java.util.Properties;
import java.util.concurrent.TimeUnit;

public class KafkaTest27 {

    private static Properties getProperties(){
        Properties properties=new Properties();

        properties.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        properties.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        properties.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"xx.xx.xx.xx:9092");
        properties.setProperty(ConsumerConfig.GROUP_ID_CONFIG,"testGroup");
        properties.setProperty(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, TtlConsumerInterceptor.class.getName());
        return properties;
    }
    public static void main(String[] args) {

        KafkaConsumer<String,String> myConsumer=new KafkaConsumer<String, String>(getProperties());
        String topic="ttl";
        myConsumer.subscribe(Arrays.asList(topic));

        while(true){
            ConsumerRecords<String,String> consumerRecords=myConsumer.poll(Duration.ofMillis(5000));
            for(ConsumerRecord record: consumerRecords){
                System.out.println(record.value());
                System.out.println("record offset is: "+record.offset());
            }

        }



    }
}
相关推荐
AAA小肥杨4 小时前
基于k8s的Python的分布式深度学习训练平台搭建简单实践
人工智能·分布式·python·ai·kubernetes·gpu
爬山算法7 小时前
Redis(73)如何处理Redis分布式锁的死锁问题?
数据库·redis·分布式
yumgpkpm9 小时前
华为鲲鹏 Aarch64 环境下多 Oracle 、mysql数据库汇聚到Cloudera CDP7.3操作指南
大数据·数据库·mysql·华为·oracle·kafka·cloudera
祈祷苍天赐我java之术9 小时前
Redis 数据类型与使用场景
java·开发语言·前端·redis·分布式·spring·bootstrap
猫林老师12 小时前
HarmonyOS线程模型与性能优化实战
数据库·分布式·harmonyos
阿里云云原生13 小时前
AI 时代的数据通道:云消息队列 Kafka 的演进与实践
云原生·kafka
在未来等你15 小时前
Elasticsearch面试精讲 Day 26:集群部署与配置最佳实践
大数据·分布式·elasticsearch·搜索引擎·面试
勤源科技16 小时前
分布式链路追踪中的上下文传播与一致性维护技术
分布式
互联网工匠16 小时前
分布式操作的一致性方案
分布式·架构
熊猫钓鱼>_>16 小时前
【案例实战】鸿蒙分布式智能办公应用的架构设计与性能优化
分布式·华为·harmonyos