flink消费kafka数据,按照指定时间开始消费

kafka中根据时间戳开始消费数据

java 复制代码
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.kafka.shaded.org.apache.kafka.clients.consumer.OffsetResetStrategy;
import org.apache.flink.kafka.shaded.org.apache.kafka.common.TopicPartition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.yaml.snakeyaml.nodes.CollectionNode;

import java.util.Collection;
import java.util.Date;
import java.util.HashMap;
import java.util.Map;

/**
 * 支持按topic指定开始消费时间戳
 *
 * @author 
 */
public class KafkaOffsetsInitializer implements OffsetsInitializer  {

    private Logger logger = LoggerFactory.getLogger(KafkaOffsetsInitializer.class);


    private static final long serialVersionUID = 1L;
    /**
     * key:topic,value:开始消费时间戳
     */
    private Map<String, Long> topicStartingTimestamps;
    private ParameterTool parameters;

    /**
     * @param topicStartingTimestamps
     * @param parameters
     */
    public KafkaOffsetsInitializer(Map<String, Long> topicStartingTimestamps, ParameterTool parameters) {
        this.topicStartingTimestamps = topicStartingTimestamps;
        this.parameters = parameters;
    }

    @Override
    public Map<TopicPartition, Long> getPartitionOffsets(Collection<TopicPartition> partitions,
                                                         PartitionOffsetsRetriever partitionOffsetsRetriever) {
        //定义起始时间,初始offset
        Map<TopicPartition, Long> startingTimestamps = new HashMap<>();
        Map<TopicPartition, Long> initialOffsets = new HashMap<>();

        //commited offset
        Map<TopicPartition, Long> committedOffsets = partitionOffsetsRetriever.committedOffsets(partitions);

        //beginningOffsets the first offset for the given partitions.
        Map<TopicPartition, Long> beginningOffsets = partitionOffsetsRetriever.beginningOffsets(partitions);
        //endOffsets the for the given partitions.
        Map<TopicPartition, Long> endOffsets = partitionOffsetsRetriever.endOffsets(partitions);

        final long now = System.currentTimeMillis();
        partitions.forEach(tp -> {
            //起始时间赋值为从redis中获取到相对应topic的时间
            Long startingTimestamp = topicStartingTimestamps.get(tp.topic());
            if (startingTimestamp == null) {
                //redis里没有取到消费开始时间从启动时间消费
                startingTimestamp = now;
                logger.info("从redis没有取到时间戳,topic:{},partition:{},使用当前时间:{},{}", tp.topic(), tp.partition(), now, new Date(now));
            }
            logger.info("读取时间戳,topic:{},partition:{},时间戳:{},{}", tp.topic(), tp.partition(), now, new Date(now));
            startingTimestamps.put(tp, startingTimestamp);
        });
        partitionOffsetsRetriever.offsetsForTimes(startingTimestamps).forEach((tp, offsetMetadata) -> {
            long offsetForTime = beginningOffsets.get(tp);
            long offsetForCommit = beginningOffsets.get(tp);
            if (offsetMetadata != null) {
                offsetForTime = offsetMetadata.offset();
                logger.info("根据时间戳取到offset,topic:{},partition:{},offset:{}", tp.topic(), tp.partition(), offsetForTime);
            }

            Long commitedOffset = committedOffsets.get(tp);
            if (commitedOffset != null) {
                offsetForCommit = commitedOffset.longValue();
                logger.info("根据已提交offset取到offset,topic:{},partition:{},offset:{}", tp.topic(), tp.partition(), offsetForCommit);
            }
            logger.info("设置读取offset,topic:{},partition:{},offset:{},endOffset:{}", tp.topic(), tp.partition(), Math.max(offsetForTime, offsetForCommit), endOffsets.get(tp));
            //对比时间戳对应的offset和checkpoint保存的offset,取较大值
            //initialOffsets.put(tp, Math.max(offsetForTime, offsetForCommit));
            initialOffsets.put(tp, offsetForCommit);
        });
        return initialOffsets;
    }

    @Override
    public OffsetResetStrategy getAutoOffsetResetStrategy() {
        return OffsetResetStrategy.NONE;
    }
}
相关推荐
Wang's Blog21 小时前
Kafka: 消费者限流策略与再平衡机制深度解析
分布式·kafka
Hello.Reader1 天前
Flink Process Table Functions(PTF)实战详解:把 SQL 变成“可编程算子”,状态、时间、定时器一把梭
网络·sql·flink
Apache Flink2 天前
Flink + Fluss 实战: Delta Join 原理解析与操作指南
大数据·数据库·flink
xiaoshujiaa2 天前
Java大厂面试实录:谢飞机硬刚互联网医疗微服务架构,Spring Cloud+Redis+Kafka全踩坑
spring boot·redis·微服务·kafka·flyway·java面试·互联网医疗
xiaoshujiaa2 天前
微服务与大数据场景下的Java面试实录:从Spring Cloud到Flink的层层拷问
大数据·spring cloud·微服务·flink·kubernetes·java面试·resilience4j
yours_Gabriel2 天前
【kafka】基本概念
分布式·中间件·kafka
Jackyzhe2 天前
Flink源码阅读:窗口
大数据·flink
Hello.Reader2 天前
Flink Procedures 用 SQL 的 `CALL` 跑 Flink Job(实现、类型推断、命名参数、Catalog 集成一篇搞懂)
大数据·sql·flink
Wang's Blog2 天前
Kafka: Admin 客户端操作指南之主题管理与集群监控
分布式·kafka
Wang's Blog2 天前
Kafka: AdminClient 核心操作详解之Topic 信息查询、配置修改与分区管理
分布式·kafka