Flink快速上手

Flink快速上手

批处理

Maven配置pom文件

复制代码
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>

    <groupId>org.example</groupId>
    <artifactId>com.atguigu</artifactId>
    <version>1.0-SNAPSHOT</version>

    <properties>
        <flink.version>1.17.0</flink.version>
    </properties>


    <dependencies>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-streaming-java</artifactId>
            <version>${flink.version}</version>
        </dependency>

        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-clients</artifactId>
            <version>${flink.version}</version>
        </dependency>
    </dependencies>

</project>

java编写wordcount代码

基于DataSet API(过时的,不推荐)

之后用 DataStream API

javascript 复制代码
package com.atguigu.wc;

import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.java.ExecutionEnvironment;
import org.apache.flink.api.java.operators.AggregateOperator;
import org.apache.flink.api.java.operators.DataSource;
import org.apache.flink.api.java.operators.FlatMapOperator;
import org.apache.flink.api.java.operators.UnsortedGrouping;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.util.Collector;

public class WordCountBatchDemo {
    public static void main(String[] args) throws Exception {
        //1.创建执行环境
        ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
        //2.读取数据,从文件中读取
        DataSource<String> lineDS = env.readTextFile("input/word.txt");
        //3.切分、转换(word,1)
        FlatMapOperator<String, Tuple2<String, Integer>> wordAndOne = lineDS.flatMap(new FlatMapFunction<String, Tuple2<String, Integer>>() {
            @Override
            public void flatMap(String value, Collector<Tuple2<String, Integer>> out) throws Exception {
                //Todo3.1 按照空格 切分单词
                String[] words = value.split(" ");
                //Todo3.2 将单词转换为(word,1)
                for (String word : words) {
                    Tuple2<String, Integer> wordTuple2 = Tuple2.of(word, 1);
                    //Todo3.3 调用采集器collector 向下游发送数据
                    out.collect(wordTuple2);

                }
            }
        });
        //4.按照word分组
        UnsortedGrouping<Tuple2<String, Integer>> wordAndOneGroupBy = wordAndOne.groupBy(0);
        //5.各分组内聚合
        AggregateOperator<Tuple2<String, Integer>> sum = wordAndOneGroupBy.sum(1);
        //6.输出
        sum.print();
    }
}

有界流处理

javascript 复制代码
package com.atguigu.wc;

import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;

public class WordCountStreamDemo {
    public static void main(String[] args) throws Exception {
        //TODO 1. 创建执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        //TODO 2. 读取数据
        DataStreamSource<String> lineDS = env.readTextFile("input/word.txt");
        //TODO 3. 处理数据:切分/转换/分组/聚合
        //TODO 3.1 处理数据:切分/转换
        SingleOutputStreamOperator<Tuple2<String, Integer>> wordAndOneDS = lineDS.flatMap(new FlatMapFunction<String, Tuple2<String, Integer>>() {
            @Override
            public void flatMap(String value, Collector<Tuple2<String, Integer>> out) throws Exception {
                //切分
                String[] words = value.split(" ");
                for (String word : words) {
                    //转换为二元组(word,1)
                    Tuple2<String, Integer> wordAndOne = Tuple2.of(word, 1);
                    //通过采集器 向下游发送数据
                    out.collect(wordAndOne);
                }
            }
        });
        //TODO 3.2 处理数据:分组
        KeyedStream<Tuple2<String, Integer>, String> wordAndOneKS = wordAndOneDS.keyBy(
                new KeySelector<Tuple2<String, Integer>, String>() {
            @Override
            public String getKey(Tuple2<String, Integer> value) throws Exception {
                return value.f0;
            }
        });
        //TODO 3.3 处理数据:聚合
        SingleOutputStreamOperator<Tuple2<String, Integer>> sumDS = wordAndOneKS.sum(1);

        //TODO 4. 输出数据
        sumDS.print();

        //TODO 5. 执行:sparkstreaming 最后 ssc.start()
        env.execute();

    }
}

无界流处理

事件驱动

javascript 复制代码
package com.atguigu.wc;

import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;

public class WordCountSocketStream {
    public static void main(String[] args) throws Exception {
        //TODO 1. 创建执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        DataStreamSource<String> socketDS = env.socketTextStream("hadoop102", 7777);
        SingleOutputStreamOperator<Tuple2<String, Integer>> sum = socketDS
                .flatMap((String value, Collector<Tuple2<String, Integer>> out) -> {
                            //切分
                            String[] words = value.split(" ");
                            for (String word : words) {
                                //转换为二元组(word,1)
                                //通过采集器 向下游发送数据
                                out.collect(Tuple2.of(word, 1));
                            }
                        }
                )
                .returns(Types.TUPLE(Types.STRING, Types.INT))
                .keyBy(value -> value.f0)
                .sum(1);
        sum.print();
        env.execute();



    }
}

事件触发

来一个处理一个

相关推荐
富能量爆棚23 分钟前
spark-local模式
大数据
码小跳25 分钟前
Halcon案例(一):C#联合Halcon识别路由器上的散热孔
图像处理·c#
lqlj223328 分钟前
配置 Spark 以 YARN 模式
大数据·spark
AidLux1 小时前
端侧智能重构智能监控新路径 | 2025 高通边缘智能创新应用大赛第三场公开课来袭!
大数据·人工智能
炒空心菜菜2 小时前
SparkSQL 连接 MySQL 并添加新数据:实战指南
大数据·开发语言·数据库·后端·mysql·spark
富能量爆棚2 小时前
Hadoop和Spark生态系统
大数据
神仙别闹3 小时前
基于C#+MySQL实现(WinForm)企业设备使用信息管理系统
开发语言·mysql·c#
czhaii3 小时前
PLC脉冲位置 单片机跟踪读取记录显示
开发语言·c#
2401_871290584 小时前
Spark的缓存
大数据·spark
神仙别闹5 小时前
基于C#+SQL Server开发(WinForm)租房管理系统
数据库·oracle·c#