使用FlinkCatalog将kafka的数据写入hive

java 复制代码
package com.atguigu.flink.test_hk;

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.SqlDialect;
import org.apache.flink.table.api.TableEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.table.catalog.hive.HiveCatalog;

public class KafkaToHive3 {

    public static void main(String[] args) {

        System.setProperty("HADOOP_USER_NAME", "atguigu");

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
        env.setParallelism(1);

        env.enableCheckpointing(6000);

// to use hive dialect
        tableEnv.getConfig().setSqlDialect(SqlDialect.HIVE);

        // 注册Hive Catalog
        HiveCatalog hiveCatalog = new HiveCatalog("hive", "default", "conf");
        tableEnv.registerCatalog("hive", hiveCatalog);

// 使用默认的Flink catalog创建Kafka表
        tableEnv.useCatalog("hive");

// to use default dialect
//        tableEnv.getConfig().setSqlDialect(SqlDialect.DEFAULT);

        String hivesql = "CREATE TABLE  IF NOT EXISTS hive_table7 (\n" +
                "  id int\n" +
                ") ";

        tableEnv.executeSql(hivesql);



//        tableEnv.sqlQuery("select * from hive_table7  ").execute().print();

        // 使用默认的Flink catalog创建Kafka表
        tableEnv.getConfig().setSqlDialect(SqlDialect.DEFAULT);
        tableEnv.useCatalog("default_catalog");

        String readSql = " create table t2 (" +
                " id int  " +
                ") WITH (" +
                " 'connector' = 'kafka', " +
                " 'topic' = 'topicA', " +
                " 'properties.bootstrap.servers' = 'hadoop102:9092', " +
                " 'properties.group.id' = 'flin_"+System.currentTimeMillis()+"', " +
                " 'scan.startup.mode' = 'latest-offset', " +
                " 'value.format' = 'csv' " +
                ")" ;
        tableEnv.executeSql(readSql);

//        tableEnv.sqlQuery("select * from t2  ").execute().print();

        tableEnv.getConfig().setSqlDialect(SqlDialect.HIVE);
        tableEnv.useCatalog("hive");

        tableEnv.executeSql("INSERT INTO hive_table7 select id from default_catalog.default_database.t2");


    }

}
XML 复制代码
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
    <parent>
        <artifactId>BigData230201</artifactId>
        <groupId>com.atguigu</groupId>
        <version>1.0-SNAPSHOT</version>
    </parent>
    <modelVersion>4.0.0</modelVersion>

    <artifactId>Flink</artifactId>

    <properties>
        <maven.compiler.source>8</maven.compiler.source>
        <maven.compiler.target>8</maven.compiler.target>
        <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
        <flink.version>1.17.0</flink.version>
        <scala.version>2.12</scala.version>
    </properties>

    <!--
      依赖范围:
        1. compile(默认的) :  主程序和测试程序都可以用。  打包时会包含在内。
        2. test           :  测试程序可用。打包时会排除掉。
        3. provided       :  主程序和测试程序都可以用。 打包时会排除掉
                             只在编译期生效, 运行时不可用。

    -->


    <dependencies>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-streaming-java</artifactId>
            <version>${flink.version}</version>
            <scope>provided</scope>
        </dependency>

        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-clients</artifactId>
            <version>${flink.version}</version>
            <scope>provided</scope>
        </dependency>
        <dependency>
            <groupId>org.slf4j</groupId>
            <artifactId>slf4j-log4j12</artifactId>
            <version>1.7.25</version>
            <scope>provided</scope>
        </dependency>

        <!-- lombok -->
        <dependency>
            <groupId>org.projectlombok</groupId>
            <artifactId>lombok</artifactId>
            <version>1.18.26</version>
        </dependency>

        <!-- WEB UI -->
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-runtime-web</artifactId>
            <version>${flink.version}</version>
            <scope>provided</scope>
        </dependency>

        <!-- DataGen connector-->
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-connector-datagen</artifactId>
            <version>${flink.version}</version>
            <scope>provided</scope>
        </dependency>


        <!-- file connector -->
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-connector-files</artifactId>
            <version>${flink.version}</version>
            <scope>provided</scope>
        </dependency>

        <!-- hadoop client -->
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-client</artifactId>
            <version>3.3.4</version>
            <scope>provided</scope>
        </dependency>

        <!-- kafka connector -->
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-connector-kafka</artifactId>
            <version>${flink.version}</version>
            <scope>provided</scope>
        </dependency>

        <!-- fastJson -->
        <dependency>
            <groupId>com.alibaba</groupId>
            <artifactId>fastjson</artifactId>
            <version>1.2.83</version>
            <scope>provided</scope>
        </dependency>

        <!-- jdbc connector -->
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-connector-jdbc</artifactId>
            <version>3.1.0-1.17</version>
            <scope>provided</scope>
        </dependency>

        <!-- mysql connector -->
        <dependency>
            <groupId>com.mysql</groupId>
            <artifactId>mysql-connector-j</artifactId>
            <version>8.0.32</version>
            <scope>provided</scope>
        </dependency>

        <!-- RocksDB stateBackend -->
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-statebackend-rocksdb</artifactId>
            <version>${flink.version}</version>
            <scope>provided</scope>
        </dependency>

        <!-- changelog -->
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-statebackend-changelog</artifactId>
            <version>${flink.version}</version>
            <scope>provided</scope>
        </dependency>

        <!-- flink SQL -->
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-table-api-java-uber</artifactId>
            <version>${flink.version}</version>
        </dependency>

        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-table-runtime</artifactId>
            <version>${flink.version}</version>
        </dependency>


<!--        <dependency>-->
<!--            <groupId>org.apache.flink</groupId>-->
<!--            <artifactId>flink-table-planner-loader</artifactId>-->
<!--            <version>${flink.version}</version>-->
<!--        </dependency>-->


        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-table-planner_${scala.version}</artifactId>
            <version>${flink.version}</version>
        </dependency>

        <!-- Flink-csv -->
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-csv</artifactId>
            <version>${flink.version}</version>
        </dependency>

        <!-- Flink-json -->
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-json</artifactId>
            <version>${flink.version}</version>
        </dependency>

        <!-- hive connector -->
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-connector-hive_2.12</artifactId>
            <version>${flink.version}</version>
            <scope>provided</scope>
        </dependency>

        <dependency>
            <groupId>org.apache.hive</groupId>
            <artifactId>hive-exec</artifactId>
            <version>3.1.3</version>
            <scope>provided</scope>
        </dependency>






    </dependencies>

    <build>
        <plugins>
            <plugin>
                <groupId>org.apache.maven.plugins</groupId>
                <artifactId>maven-shade-plugin</artifactId>
                <version>3.2.4</version>
                <executions>
                    <execution>
                        <phase>package</phase>
                        <goals>
                            <goal>shade</goal>
                        </goals>
                        <configuration>
                            <artifactSet>
                                <excludes>
                                    <exclude>com.google.code.findbugs:jsr305</exclude>
                                    <exclude>org.slf4j:*</exclude>
                                    <exclude>log4j:*</exclude>
                                </excludes>
                            </artifactSet>
                            <filters>
                                <filter>
                                    <!-- Do not copy the signatures in the META-INF folder.
                                    Otherwise, this might cause SecurityExceptions when using the JAR. -->
                                    <artifact>*:*</artifact>
                                    <excludes>
                                        <exclude>META-INF/*.SF</exclude>
                                        <exclude>META-INF/*.DSA</exclude>
                                        <exclude>META-INF/*.RSA</exclude>
                                    </excludes>
                                </filter>
                            </filters>
                            <transformers combine.children="append">
                                <transformer
                                        implementation="org.apache.maven.plugins.shade.resource.ServicesResourceTransformer">
                                </transformer>
                            </transformers>
                        </configuration>
                    </execution>
                </executions>
            </plugin>
        </plugins>
    </build>




</project>

hive-site.xml

XML 复制代码
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
    <!--配置Hive保存元数据信息所需的 MySQL URL地址-->
    <property>
        <name>javax.jdo.option.ConnectionURL</name>
        <value>jdbc:mysql://hadoop102:3306/metastore?useSSL=false&amp;useUnicode=true&amp;characterEncoding=UTF-8&amp;allowPublicKeyRetrieval=true</value>
    </property>

    <!--配置Hive连接MySQL的驱动全类名-->
    <property>
        <name>javax.jdo.option.ConnectionDriverName</name>
        <value>com.mysql.cj.jdbc.Driver</value>
    </property>

    <!--配置Hive连接MySQL的用户名 -->
    <property>
        <name>javax.jdo.option.ConnectionUserName</name>
        <value>root</value>
    </property>

    <!--配置Hive连接MySQL的密码 -->
    <property>
        <name>javax.jdo.option.ConnectionPassword</name>
        <value>000000</value>
    </property>

    <property>
        <name>hive.metastore.warehouse.dir</name>
        <value>/user/hive/warehouse</value>
    </property>

    <property>
        <name>hive.metastore.schema.verification</name>
        <value>false</value>
    </property>

    <property>
    <name>hive.server2.thrift.port</name>
    <value>10000</value>
    </property>

    <property>
        <name>hive.server2.thrift.bind.host</name>
        <value>hadoop102</value>
    </property>

    <property>
        <name>hive.metastore.event.db.notification.api.auth</name>
        <value>false</value>
    </property>
    
    <property>
        <name>hive.cli.print.header</name>
        <value>true</value>
    </property>

    <property>
        <name>hive.cli.print.current.db</name>
        <value>true</value>
    </property>
<!--Spark依赖位置(注意:端口号8020必须和namenode的端口号一致)-->
<property>
    <name>spark.yarn.jars</name>
    <value>hdfs://hadoop102:8020/spark-jars/*</value>
</property>
  
<!--Hive执行引擎-->
<property>
    <name>hive.execution.engine</name>
    <value>spark</value>
</property>

<property>
	<name>metastore.storage.schema.reader.impl</name>
	<value>org.apache.hadoop.hive.metastore.SerDeStorageSchemaReader</value>
</property>

<property>
    <name>hive.metastore.uris</name>
    <value>thrift://hadoop102:9083</value>
</property>



</configuration>
相关推荐
张某布响丸辣2 小时前
探索消息中间件:RabbitMQ深度解析
分布式·rabbitmq
wclass-zhengge2 小时前
RabbitMQ篇(死信交换机)
分布式·rabbitmq
黄尚圈圈4 小时前
快速理解mQ(三)——RabbitMQ 各种交换机的区别与应用
分布式·rabbitmq
Data 3175 小时前
Hive数仓操作(十)
大数据·数据库·数据仓库·hive·hadoop
ON.LIN5 小时前
Hadoop大数据入门——Hive-SQL语法大全
大数据·数据库·hive·hadoop·分布式·sql
励志成为美貌才华为一体的女子6 小时前
《大规模语言模型从理论到实践》第一轮学习--第四章分布式训练
人工智能·分布式·语言模型
青云交6 小时前
大数据新视界 --大数据大厂之 Kafka 性能优化的进阶之道:应对海量数据的高效传输
大数据·数据库·人工智能·性能优化·kafka·数据压缩·分区策略·磁盘 i/o
-$_$-6 小时前
【黑马点评】 使用RabbitMQ实现消息队列——1.Docker与RabbitMQ环境安装
分布式·docker·rabbitmq
weixin_453965007 小时前
master节点k8s部署]33.ceph分布式存储(四)
分布式·ceph·kubernetes
奔跑吧邓邓子14 小时前
大数据利器Hadoop:从基础到实战,一篇文章掌握大数据处理精髓!
大数据·hadoop·分布式