flinkSql中累计窗口CUMULATE

eventTime

复制代码
package com.bigdata.day08;


import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;


public class _05_flinkSql_Cumulate_eventTime {
    /**
     * 累积窗口 + eventTime
     * 1 分钟 每十秒计算一次 3秒水印
     * 数据格式
     * {"username":"zs","price":20,"event_time":"2023-07-18 12:12:43.000"}
     * {"username":"zs","price":20,"event_time":"2023-07-18 12:12:53.000"}
     * {"username":"zs","price":20,"event_time":"2023-07-18 12:13:03.000"}
     * {"username":"zs","price":20,"event_time":"2023-07-18 12:13:13.000"}
     */

    public static void main(String[] args) throws Exception {

        //1. env-准备环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        StreamTableEnvironment tenv = StreamTableEnvironment.create(env);

        //2. 创建表
        tenv.executeSql("CREATE TABLE table1 (\n" +
                "  `username` String,\n" +
                "  `price` int,\n" +
                "  `event_time` TIMESTAMP(3),\n" +
                "   watermark for event_time as event_time - interval '3' second\n" +
                ") WITH (\n" +
                "  'connector' = 'kafka',\n" +
                "  'topic' = 'topic1',\n" +
                "  'properties.bootstrap.servers' = 'bigdata01:9092,bigdata02:9092,bigdata03:9092',\n" +
                "  'properties.group.id' = 'testGroup1',\n" +
                "  'scan.startup.mode' = 'latest-offset',\n" +
                "  'format' = 'json'\n" +
                ")");
        //3. 通过sql语句统计结果

        tenv.executeSql("select \n" +
                "   window_start,\n" +
                "   window_end,\n" +
                "   username,\n" +
                "   count(1) zongNum,\n" +
                "   sum(price) totalMoney \n" +
                "   from table(CUMULATE(TABLE table1, DESCRIPTOR(event_time), INTERVAL '10' second ,INTERVAL '60' second))\n" +
                "group by window_start,window_end,username").print();
        //4. sink-数据输出



        //5. execute-执行
        env.execute();
    }
}

processTime

复制代码
package com.bigdata.day08;


import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;


public class _06_flinkSql_Cumulate_processTime {
    /**
     * 累积窗口 + processTime
     * 1 分钟 每十秒计算一次
     * 数据格式
     * {"username":"zs","price":20}
     * {"username":"lisi","price":15}
     * {"username":"lisi","price":20}
     * {"username":"zs","price":20}
     * {"username":"zs","price":20}
     * {"username":"zs","price":20}
     * {"username":"zs","price":20}
     */

    public static void main(String[] args) throws Exception {

        //1. env-准备环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        StreamTableEnvironment tenv = StreamTableEnvironment.create(env);

        //2. 创建表
        tenv.executeSql("CREATE TABLE table1 (\n" +
                "  `username` String,\n" +
                "  `price` int,\n" +
                "  `event_time` as proctime()\n" +
                ") WITH (\n" +
                "  'connector' = 'kafka',\n" +
                "  'topic' = 'topic1',\n" +
                "  'properties.bootstrap.servers' = 'bigdata01:9092,bigdata02:9092,bigdata03:9092',\n" +
                "  'properties.group.id' = 'testGroup1',\n" +
                "  'scan.startup.mode' = 'latest-offset',\n" +
                "  'format' = 'json'\n" +
                ")");
        //3. 通过sql语句统计结果

        tenv.executeSql("select \n" +
                "   window_start,\n" +
                "   window_end,\n" +
                "   username,\n" +
                "   count(1) zongNum,\n" +
                "   sum(price) totalMoney \n" +
                "   from table(CUMULATE(TABLE table1, DESCRIPTOR(event_time), INTERVAL '10' second ,INTERVAL '60' second))\n" +
                "group by window_start,window_end,username").print();
        //4. sink-数据输出



        //5. execute-执行
        env.execute();
    }
}

topN案例

复制代码
需求:在每个分钟内找出点击量最多的Top 3网页。 

滚动窗口(1分钟)+eventTime+3秒水印

hive sql

with t1 as (
        select page_id,sum(clicks)  totalSum  
                from  table1
                        group by page_id
), t2 as(
        select page_id,totalSum,
         row_number() over ( order by totalSum desc) px 
                from t1 
) select  * from t2 where px <=3


flink sql

with t1 as (
        select window_start,window_end,page_id,sum(clicks)  totalSum  
                from table(tumble(table table1,DESCRIPTOR(event_time), INTERVAL '60' second )) 
                        group by window_start,window_end,page_id
), t2 as(
        select window_start,window_end,page_id,totalSum,
        row_number() over (partition by window_start,window_end order by totalSum desc) px 
                from t1 
) select  * from t2 where px <=3


* 数据格式
{"ts": "2023-09-05 12:00:10", "page_id": 1, "clicks": 100}
{"ts": "2023-09-05 12:00:20", "page_id": 2, "clicks": 90}
{"ts": "2023-09-05 12:00:30", "page_id": 3, "clicks": 110}
{"ts": "2023-09-05 12:00:40", "page_id": 4, "clicks": 23}
{"ts": "2023-09-05 12:00:50", "page_id": 5, "clicks": 456}
{"ts": "2023-09-05 12:00:55", "page_id": 5, "clicks": 456}
// 触发数据
{"ts": "2023-09-05 12:01:03", "page_id": 5, "clicks": 456}

package com.bigdata.day08;


import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;


public class _07_flinkSql_topN {


    public static void main(String[] args) throws Exception {

        //1. env-准备环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        StreamTableEnvironment tenv = StreamTableEnvironment.create(env);

        //2. 创建表

        //3. 通过sql语句统计结果

        tenv.executeSql("CREATE TABLE table1 (\n" +
                "    `page_id` INT,\n" +
                "    `clicks` INT,\n" +
                "  `ts` TIMESTAMP(3) ,\n" +
                "   watermark for ts as ts - interval '3' second \n" +
                ") WITH (\n" +
                "  'connector' = 'kafka',\n" +
                "  'topic' = 'topic1',\n" +
                "  'properties.bootstrap.servers' = 'bigdata01:9092,bigdata02:9092,bigdata03:9092',\n" +
                "  'properties.group.id' = 'testGroup1',\n" +
                "  'scan.startup.mode' = 'latest-offset',\n" +
                "  'format' = 'json'\n" +
                ")");

        tenv.executeSql("with t1 as (\n" +
                "\tselect window_start,window_end,page_id,sum(clicks)  totalSum  from table(tumble(table table1,DESCRIPTOR(ts), INTERVAL '60' second )) group by window_start,window_end,page_id\n" +
                "), t2 as(\n" +
                "\tselect window_start,window_end,page_id,totalSum,row_number() over (partition by window_start,window_end order by totalSum desc) px from t1 \n" +
                ") select  * from t2 where px <=3").print();
        //4. sink-数据输出


        //5. execute-执行
        env.execute();
    }
}
相关推荐
AI大数据智能洞察4 小时前
大数据领域数据仓库的备份恢复方案优化
大数据·数据仓库·ai
AI应用开发实战派4 小时前
大数据领域数据仓库的自动化测试实践
大数据·数据仓库·ai
AI算力网络与通信4 小时前
大数据领域 Hive 数据仓库搭建实战
大数据·数据仓库·hive·ai
Leo.yuan4 小时前
ODS 是什么?一文搞懂 ODS 与数据仓库区别
大数据·数据仓库·数据挖掘·数据分析·spark
拾光师7 小时前
Hadoop RPC深度解析:分布式通信的核心机制
大数据·hadoop
isNotNullX8 小时前
ETL详解:从核心流程到典型应用场景
大数据·数据仓库·人工智能·架构·etl
云和数据.ChenGuang9 小时前
大型企业级金融信贷平台需求报告
大数据·金融·毕业设计
Hello.Reader9 小时前
Flink 有状态流处理State、Keyed State、Checkpoint、对齐/不对齐与生产实践
大数据·flink·linq
帅气的小峰9 小时前
【源码剖析】5-生产者-RecordAccumulator分析
大数据·kafka·源码
源码宝10 小时前
智慧工地系统:建筑行业数字化转型的核心趋势,集成云计算、物联网、大数据等技术,构建覆盖施工全周期的智能化管理体系。
大数据·源码·软件开发·智慧工地·智慧工地源码·数字工地·工地智能化