flink 从monggo读取PB级全部数据根据分组统计数据 样例

简述

  1. 从mongoDB数据库中读取千万级数据。
  2. 根据某个字段进行汇总统计数据。
  3. 将获取的数据保存到redis中,获取最终的统计数据。

maven

xml 复制代码
 		<dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-connector-mongodb</artifactId>
            <version>1.1.0-1.18</version>
        </dependency>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-connector-base</artifactId>
            <version>${flink.version}</version>
        </dependency>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-streaming-scala_2.12</artifactId>
            <version>1.8.2</version>
        </dependency>
		<dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-java</artifactId>
            <version>${flink.version}</version>
        </dependency>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-core</artifactId>
            <version>${flink.version}</version>
        </dependency>

        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-streaming-java</artifactId>
            <version>${flink.version}</version>
        </dependency>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-clients</artifactId>
            <version>${flink.version}</version>
        </dependency>   
        <dependency>
            <groupId>org.apache.bahir</groupId>
            <artifactId>flink-connector-redis_2.11</artifactId>
            <version>1.1.0</version>
        </dependency>     

代码

RedisExampleMapper

java 复制代码
package com.wfg.flink.connector.redis;

import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.connectors.redis.common.mapper.RedisCommand;
import org.apache.flink.streaming.connectors.redis.common.mapper.RedisCommandDescription;
import org.apache.flink.streaming.connectors.redis.common.mapper.RedisMapper;

/**
 * @author wfg
 */
public class RedisExampleMapper implements RedisMapper<Tuple2<String, Integer>> {

    @Override
    public RedisCommandDescription getCommandDescription() {
        return new RedisCommandDescription(RedisCommand.HSET, "HASH_NAME");
    }

    @Override
    public String getKeyFromData(Tuple2<String, Integer> data) {
        return data.f0;
    }

    @Override
    public String getValueFromData(Tuple2<String, Integer> data) {
        return data.f1.toString();
    }
}

根据名字统计访问次数

java 复制代码
package com.wfg.flink.connector.mongodb;


import cn.hutool.core.date.DateUtil;
import com.alibaba.fastjson2.JSONObject;
import com.wfg.flink.connector.dto.KafkaPvDto;
import com.wfg.flink.connector.redis.RedisExampleMapper;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.functions.ReduceFunction;
import org.apache.flink.api.common.typeinfo.BasicTypeInfo;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.MemorySize;
import org.apache.flink.connector.mongodb.source.MongoSource;
import org.apache.flink.connector.mongodb.source.enumerator.splitter.PartitionStrategy;
import org.apache.flink.connector.mongodb.source.reader.deserializer.MongoDeserializationSchema;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.redis.RedisSink;
import org.apache.flink.streaming.connectors.redis.common.config.FlinkJedisPoolConfig;
import org.bson.BsonDocument;

import static com.wfg.flink.connector.constants.Constants.MONGO_TEST_PV_COLLECTION;

/**
 * @author wfg
 * 根据名字统计访问次数
 */
public class MongoAllNameRedisCounts {

    public static void main(String[] args) throws Exception {
        String startTime = DateUtil.format(DateUtil.date(), "yyyy-MM-dd HH:mm:ss");
        System.out.println("StartTime:" + startTime);
        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        // 开启Checkpointing,设置Checkpoint间隔
        env.enableCheckpointing(30000);
        // 设置Checkpoint模式
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        // 设置最小Checkpoint间隔
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(500);
        // 设置最大并发Checkpoint数目
        env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);
        // 使用RocksDB作为状态后端
        env.setStateBackend(new HashMapStateBackend());
        env.setParallelism(10);

        // 配置MongoDB源
        MongoSource<String> mongoSource = MongoSource.<String>builder()
                .setUri("mongodb://root:123456@127.0.0.1:27017,127.0.0.1:27018,127.0.0.1:27019/admin?replicaSet=rs0&authSource=admin")
                .setDatabase("sjzz")
                .setCollection(MONGO_TEST_PV_COLLECTION)
                .setFetchSize(2048)
//                .setLimit(1000)
                .setNoCursorTimeout(true)
                .setPartitionStrategy(PartitionStrategy.SINGLE)
                .setPartitionSize(MemorySize.ofMebiBytes(64))
//                .setSamplesPerPartition(10)
                .setDeserializationSchema(new MongoDeserializationSchema<>() {
                    @Override
                    public String deserialize(BsonDocument document) {
                        return document.toJson();
                    }

                    @Override
                    public TypeInformation<String> getProducedType() {
                        return BasicTypeInfo.STRING_TYPE_INFO;
                    }
                })
                .build();
        // 创建MongoDB数据流
        DataStream<String> sourceStream = env.fromSource(mongoSource, WatermarkStrategy.noWatermarks(), "kafka Mongo Source");
// 转换数据,提取人名作为Key
        DataStream<Tuple2<String, Integer>> nameCountStream = sourceStream
                .map(new MapFunction<String, Tuple2<String, Integer>>() {
                    @Override
                    public Tuple2<String, Integer> map(String value) throws Exception {
                        KafkaPvDto data = JSONObject.parseObject(value, KafkaPvDto.class);
                        return Tuple2.of(data.getUserName(), 1);
                    }
                })
                .keyBy(value -> value.f0)
                .reduce(new ReduceFunction<>() {
                    @Override
                    public Tuple2<String, Integer> reduce(Tuple2<String, Integer> value1, Tuple2<String, Integer> value2) {
                        return new Tuple2<>(value1.f0, value1.f1 + value2.f1);
                    }
                });
        FlinkJedisPoolConfig conf = new FlinkJedisPoolConfig.Builder().setHost("127.0.0.1").build();
        RedisSink<Tuple2<String, Integer>> sink = new RedisSink<>(conf, new RedisExampleMapper());
        nameCountStream.addSink(sink);
        // 输出结果
        env.execute("Flink MongoDB Name Count ");
        System.out.println("-----------------------------------");
        System.out.println("startTime: " + startTime);
        System.out.println("EndTime: " + DateUtil.format(DateUtil.date(), "yyyy-MM-dd HH:mm:ss"));
    }
}

注意:

大数据生成参考:批量数据生成

中间件部署参考:

相关推荐
2301_786964361 小时前
3、练习常用的HBase Shell命令+HBase 常用的Java API 及应用实例
java·大数据·数据库·分布式·hbase
matlabgoodboy2 小时前
“图像识别技术:重塑生活与工作的未来”
大数据·人工智能·生活
happycao1233 小时前
Flink 03 | 数据流基本操作
大数据·flink
Neituijunsir3 小时前
2024.09.22 校招 实习 内推 面经
大数据·人工智能·算法·面试·自动驾驶·汽车·求职招聘
Data 3175 小时前
Hive数仓操作(十一)
大数据·数据库·数据仓库·hive·hadoop
qtj-0016 小时前
普通人在刚开始做副业时要注意哪些细节?
大数据·微信·新媒体运营·创业创新
知识分享小能手6 小时前
mysql学习教程,从入门到精通,SQL 修改表(ALTER TABLE 语句)(29)
大数据·开发语言·数据库·sql·学习·mysql·数据分析
a6953188_7 小时前
如何评估一个副业项目的可行性?
大数据·微信·创业创新
州周7 小时前
Flink一点整理
大数据·flink
柚乐果果7 小时前
数据分析实战简例
java·大数据·python