Kafka官方提供的RoundRobinPartitioner出现奇偶数据不均匀

Kafka官方提供的RoundRobinPartitioner出现奇偶数据不均匀

参考:

https://www.cnblogs.com/cbc-onne/p/18140043

  1. 使用RoundRobinPartitioner
bash 复制代码
/*
 * Licensed to the Apache Software Foundation (ASF) under one or more
 * contributor license agreements. See the NOTICE file distributed with
 * this work for additional information regarding copyright ownership.
 * The ASF licenses this file to You under the Apache License, Version 2.0
 * (the "License"); you may not use this file except in compliance with
 * the License. You may obtain a copy of the License at
 *
 *    http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.kafka.clients.producer;

import java.util.List;
import java.util.Map;
import java.util.Queue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicInteger;

import org.apache.kafka.common.Cluster;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.utils.Utils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
 * The "Round-Robin" partitioner - MODIFIED TO WORK PROPERLY WITH STICKY PARTITIONING (KIP-480)
 * <p>
 * This partitioning strategy can be used when user wants to distribute the writes to all
 * partitions equally. This is the behaviour regardless of record key hash.
 */
public class RoundRobinPartitioner implements Partitioner {
    private static final Logger LOGGER = LoggerFactory.getLogger(RoundRobinPartitioner.class);
    private final ConcurrentMap<String, AtomicInteger> topicCounterMap = new ConcurrentHashMap<>();
    private final ConcurrentMap<String, Queue<Integer>> topicPartitionQueueMap = new ConcurrentHashMap<>();

    public void configure(Map<String, ?> configs) {
    }

    /**
     * Compute the partition for the given record.
     *
     * @param topic      The topic name
     * @param key        The key to partition on (or null if no key)
     * @param keyBytes   serialized key to partition on (or null if no key)
     * @param value      The value to partition on or null
     * @param valueBytes serialized value to partition on or null
     * @param cluster    The current cluster metadata
     */
    @Override
    public int partition(
        String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) {
        Queue<Integer> partitionQueue = partitionQueueComputeIfAbsent(topic);
        Integer queuedPartition = partitionQueue.poll();
        if (queuedPartition != null) {
            LOGGER.trace("Partition chosen from queue: {}", queuedPartition);
            return queuedPartition;
        } else {
            List<PartitionInfo> partitions = cluster.partitionsForTopic(topic);
            int numPartitions = partitions.size();
            int nextValue = nextValue(topic);
            List<PartitionInfo> availablePartitions = cluster.availablePartitionsForTopic(topic);
            if (!availablePartitions.isEmpty()) {
                int part = Utils.toPositive(nextValue) % availablePartitions.size();
                int partition = availablePartitions.get(part).partition();
                LOGGER.trace("Partition chosen: {}", partition);
                return partition;
            } else {
                // no partitions are available, give a non-available partition
                return Utils.toPositive(nextValue) % numPartitions;
            }
        }
    }

    private int nextValue(String topic) {
        AtomicInteger counter =
            topicCounterMap.computeIfAbsent(
                topic,
                k -> {
                    return new AtomicInteger(0);
                });
        return counter.getAndIncrement();
    }

    private Queue<Integer> partitionQueueComputeIfAbsent(String topic) {
        return topicPartitionQueueMap.computeIfAbsent(topic, k -> {
            return new ConcurrentLinkedQueue<>();
        });
    }

    public void close() {
    }

    /**
     * Notifies the partitioner a new batch is about to be created. When using the sticky partitioner,
     * this method can change the chosen sticky partition for the new batch.
     *
     * @param topic         The topic name
     * @param cluster       The current cluster metadata
     * @param prevPartition The partition previously selected for the record that triggered a new
     *                      batch
     */
    @Override
    public void onNewBatch(String topic, Cluster cluster, int prevPartition) {
        LOGGER.trace("New batch so enqueuing partition {} for topic {}", prevPartition, topic);
        Queue<Integer> partitionQueue = partitionQueueComputeIfAbsent(topic);
        partitionQueue.add(prevPartition);
    }
}
相关推荐
Hello.Reader1 小时前
用 Kafka 打通实时数据总线Flink CDC Pipeline 的 Kafka Sink 实战
flink·kafka·linq
周杰伦_Jay2 小时前
【日志处理方案大比拼】 Filebeat+Kafka+Flink+Spark+ES+HDFS VS ELK/AOP/RocketMQ/大厂方案
flink·spark·kafka
回家路上绕了弯2 小时前
高并发订单去重:布隆过滤器过滤已存在订单号的实战方案
分布式·后端
Slow菜鸟4 小时前
Java后端常用技术选型 |(三)分布式篇
java·分布式
q***65695 小时前
Spring Boot集成Kafka:最佳实践与详细指南
spring boot·kafka·linq
大飞哥~BigFei7 小时前
RabbitMq消费延迟衰减重试实现思路
java·分布式·rabbitmq
小泊客17 小时前
使用讯飞星火 Spark X1-32K 打造本地知识助手
大数据·分布式·spark·大模型应用·本地知识助手
百***79461 天前
Spring集成kafka的最佳方式
spring·kafka·linq
Ace_31750887761 天前
京东关键字搜索接口逆向:从动态签名破解到分布式请求调度
分布式·python
❀͜͡傀儡师1 天前
使用DelayQueue 分布式延时队列,干掉定时任务!
java·分布式·delayqueue·spingboot