FlinkSQL_1.12_用DDL实现Kafka到MySQL的数据传输_实现按照条件进行过滤写入MySQL_flink从kafka拉取数据并过滤数据写入mysql_旧城里的阳光的博客-CSDN博客
参考这篇文章,写了kafka到mysql的代码例子,因为自己改了表结构,运行下面代码:
java
package org.test.flink;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
//TODO 用DDL实现Kafka到MySQL的数据传输
public class FlinkSQL15_SQL_DDL_Kafka_MySQL {
public static void main(String[] args) throws Exception {
//1.获取执行环境
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(1);
StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
//2.使用DDL的方式加载数据--注册SourceTable
tableEnv.executeSql("create table source_sensor(account_id BIGINT)" +
"with (" +
"'connector.type' = 'kafka'," +
"'connector.version' = 'universal'," +
"'connector.topic' = 'testtopic'," +
"'connector.properties.bootstrap.servers' = '11.0.24.216:9092'," +
"'connector.properties.group.id' = 'bigdata1109'," +
"'format.type' = 'json'"
+ ")");
Table table = tableEnv.sqlQuery("select * from source_sensor");
//3.注册SinkTable:Mysql
tableEnv.executeSql("CREATE TABLE spend_report (\n" +
" account_id BIGINT,\n" +
" PRIMARY KEY (account_id) NOT ENFORCED)" +
"with (" +
"'connector' = 'jdbc'," +
"'url' = 'jdbc:mysql://11.0.24.216:4306/test?serverTimezone=Asia/Shanghai&useUnicode=true&characterEncoding=utf8&useSSL=false',"+
"'table-name' = 'spend_report',"+
"'username' = 'root',"+
"'password' = '123456'"
+ ")");
//4.执行查询kafka数据
// Table source_sensor = tableEnv.from("source_sensor");
// //5.将数据写入Mysql
// source_sensor.executeInsert("sink_sensor");
//
table.executeInsert("sink_sensor");
//6.执行任务
env.execute();
}
}
发现报错如下:
java
Exception in thread "main" org.apache.flink.table.api.TableException: Sink `default_catalog`.`default_database`.`sink_sensor` does not exists
at org.apache.flink.table.planner.delegation.PlannerBase.translateToRel(PlannerBase.scala:247)
at org.apache.flink.table.planner.delegation.PlannerBase.$anonfun$translate$1(PlannerBase.scala:159)
at scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:286)
at scala.collection.Iterator.foreach(Iterator.scala:943)
at scala.collection.Iterator.foreach$(Iterator.scala:943)
at scala.collection.AbstractIterator.foreach(Iterator.scala:1431)
at scala.collection.IterableLike.foreach(IterableLike.scala:74)
at scala.collection.IterableLike.foreach$(IterableLike.scala:73)
at scala.collection.AbstractIterable.foreach(Iterable.scala:56)
at scala.collection.TraversableLike.map(TraversableLike.scala:286)
at scala.collection.TraversableLike.map$(TraversableLike.scala:279)
at scala.collection.AbstractTraversable.map(Traversable.scala:108)
at org.apache.flink.table.planner.delegation.PlannerBase.translate(PlannerBase.scala:159)
at org.apache.flink.table.api.internal.TableEnvironmentImpl.translate(TableEnvironmentImpl.java:1329)
at org.apache.flink.table.api.internal.TableEnvironmentImpl.executeInternal(TableEnvironmentImpl.java:676)
at org.apache.flink.table.api.internal.TableImpl.executeInsert(TableImpl.java:572)
at org.apache.flink.table.api.internal.TableImpl.executeInsert(TableImpl.java:554)
at org.test.flink.FlinkSQL15_SQL_DDL_Kafka_MySQL.main(FlinkSQL15_SQL_DDL_Kafka_MySQL.java:50)
点击table.executeInsert看了下源码:
java
/**
* Writes the {@link Table} to a {@link TableSink} that was registered under the specified path,
* and then execute the insert operation.
*
* <p>See the documentation of {@link TableEnvironment#useDatabase(String)} or {@link
* TableEnvironment#useCatalog(String)} for the rules on the path resolution.
*
* <p>A batch {@link Table} can only be written to a {@code
* org.apache.flink.table.sinks.BatchTableSink}, a streaming {@link Table} requires a {@code
* org.apache.flink.table.sinks.AppendStreamTableSink}, a {@code
* org.apache.flink.table.sinks.RetractStreamTableSink}, or an {@code
* org.apache.flink.table.sinks.UpsertStreamTableSink}.
*
* <p>Example:
*
* <pre>{@code
* Table table = tableEnv.fromQuery("select * from MyTable");
* TableResult tableResult = table.executeInsert("MySink");
* tableResult...
* }</pre>
*
* @param tablePath The path of the registered TableSink to which the Table is written.
* @return The insert operation execution result.
*/
TableResult executeInsert(String tablePath);
发现executeInsert方法的参数tablePath需要传入表名,这里的表名应该和
java
tableEnv.executeSql("create table source_sensor(account_id BIGINT)"
的表名source_sensor一致。
将:
java
table.executeInsert("sink_sensor");
改成:
java
table.executeInsert("source_sensor");
后执行成功。
flink1.2的demo完整代码:flink-java-1.12.7: flink1.12.7的java demo,包括flink wordcount示例,如何连接kafka