SparkSql

SparkSql

pom.xml

javascript 复制代码
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>

    <groupId>org.example</groupId>
    <artifactId>spark_sql</artifactId>
    <version>1.0-SNAPSHOT</version>
    <dependencies>
        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-core_2.12</artifactId>
            <version>3.0.0</version>
        </dependency>
        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-sql_2.12</artifactId>
            <version>3.0.0</version>
        </dependency>
        <dependency>
            <groupId>mysql</groupId>
            <artifactId>mysql-connector-java</artifactId>
            <version>5.1.27</version>
        </dependency>
    </dependencies>

    <build>
        <plugins>
            <!-- 该插件用于将 Scala 代码编译成 class 文件 -->
            <plugin>
                <groupId>net.alchim31.maven</groupId>
                <artifactId>scala-maven-plugin</artifactId>
                <version>3.2.2</version>
                <executions>
                    <execution>
                        <!-- 声明绑定到 maven 的 compile 阶段 -->
                        <goals>
                            <goal>testCompile</goal>
                        </goals>
                    </execution>
                </executions>
            </plugin>
            <plugin>
                <groupId>org.apache.maven.plugins</groupId>
                <artifactId>maven-assembly-plugin</artifactId>
                <version>3.1.0</version>
                <configuration>
                    <descriptorRefs>
                        <descriptorRef>jar-with-dependencies</descriptorRef>
                    </descriptorRefs>
                </configuration>
                <executions>
                    <execution>
                        <id>make-assembly</id>
                        <phase>package</phase>
                        <goals>
                            <goal>single</goal>
                        </goals>
                    </execution>
                </executions>
            </plugin>
        </plugins>
    </build>

</project>

SparkSQL01_Demo

javascript 复制代码
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession

object SparkSQL01_Demo {
  def main(args:Array[String])={
    val sparkConf = new SparkConf().setMaster("local[*]").setAppName("sparkSQL")
    val spark = SparkSession.builder().config(sparkConf).getOrCreate()

    val df = spark.read
      .format("jdbc")
      .option("url", "jdbc:mysql://hadoop102:3306/localstreamdata")
      .option("driver", "com.mysql.jdbc.Driver")
      .option("user", "root")
      .option("password", "000000")
      .option("dbtable", "normal_data")
      .load()
    df.show

    spark.close()
  }

}

sparksql写入mysql

提前在mysql中建好表

javascript 复制代码
use localstreamdata;
DESCRIBE normal_data;
CREATE TABLE IF NOT EXISTS gpb2 (
    stream_id varchar(20),
    stream_time datetime,
    stream_user_id bigint(20),
    stream_money int(11),
    stream_consume_type int(11),
    stream_consume_location varchar(50),
    stream_sign_location varchar(50),
    stream_time_date int(11),
    stream_time_minute varchar(20),
    stream_seconds int(11),
    stream_is_new int(3),
    stream_is_normal varchar(20)
);
DESCRIBE gpb2;
alter table gpb2 change stream_consume_location stream_consume_location varchar(100) character set utf8;
alter table gpb2 change stream_sign_location stream_sign_location varchar(100) character set utf8;
javascript 复制代码
import org.apache.spark.SparkConf
import org.apache.spark.sql.{Column, SaveMode, SparkSession}
import org.apache.spark.sql.functions._
import org.apache.spark.sql._

object SparkSQL01_Demo {
  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf().setMaster("local[*]").setAppName("sparkSQL")
    val spark = SparkSession.builder().config(sparkConf).getOrCreate()

    val df = spark.read
      .format("jdbc")
      .option("url", "jdbc:mysql://hadoop102:3306/localstreamdata?characterEncoding=utf8&useSSL=false")
      .option("driver", "com.mysql.jdbc.Driver")
      .option("user", "root")
      .option("password", "000000")
      .option("dbtable", "normal_data")
      .load()

    df.show

    import spark.implicits._
    val cleanedDF = df.withColumn("stream_consume_location", your_clean_function(col("stream_consume_location")))

    cleanedDF.write
      .format("jdbc")
      .option("url", "jdbc:mysql://hadoop102:3306/localstreamdata?characterEncoding=utf8&useSSL=false")
      .option("driver", "com.mysql.jdbc.Driver")
      .option("user", "root")
      .option("password", "000000")
      .option("dbtable", "gpb2")
      .mode(SaveMode.Append)
      .save()

    spark.close()
  }

  def your_clean_function(str: Column): Column = {
    // 根据需要实现清理或转换逻辑
    // 返回清理后的字符串列
    // 示例代码:
    str
  }
}
javascript 复制代码
import org.apache.spark.SparkConf
import org.apache.spark.sql.{Column, SaveMode, SparkSession}
import org.apache.spark.sql.functions._
import org.apache.spark.sql._

object SparkSQL01_Demo {
  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf().setMaster("local[*]").setAppName("sparkSQL")
    val spark = SparkSession.builder().config(sparkConf).getOrCreate()

    val df = spark.read
      .format("jdbc")
      .option("url", "jdbc:mysql://hadoop102:3306/localstreamdata?characterEncoding=utf8&useSSL=false")
      .option("driver", "com.mysql.jdbc.Driver")
      .option("user", "root")
      .option("password", "000000")
      .option("dbtable", "normal_data")
      .load()

    df.show

    import spark.implicits._
    //val cleanedDF = df.withColumn("stream_consume_location", your_clean_function(col("stream_consume_location")))

    df.write
      .format("jdbc")
      .option("url", "jdbc:mysql://hadoop102:3306/localstreamdata?characterEncoding=utf8&useSSL=false")
      .option("driver", "com.mysql.jdbc.Driver")
      .option("user", "root")
      .option("password", "000000")
      .option("dbtable", "gpb2")
      .mode(SaveMode.Append)
      .save()

    spark.close()
  }
/*
  def your_clean_function(str: Column): Column = {
    // 根据需要实现清理或转换逻辑
    // 返回清理后的字符串列
    // 示例代码:
    str
  }

 */
}
相关推荐
yumgpkpm16 小时前
Cloudera CDP7、CDH5、CDH6 在华为鲲鹏 ARM 麒麟KylinOS做到无缝切换平缓迁移过程
大数据·arm开发·华为·flink·spark·kafka·cloudera
青云交16 小时前
Java 大视界 -- Java+Spark 构建企业级用户画像平台:从数据采集到标签输出全流程(437)
java·开发语言·spark·hbase 优化·企业级用户画像·标签计算·高并发查询
qq_124987075316 小时前
基于spark的新闻文本分类系统(源码+论文+部署+安装)
大数据·分类·数据挖掘·spark
yumgpkpm1 天前
Iceberg在Cloudera CDP集群详细操作步骤
大数据·人工智能·hive·zookeeper·spark·开源·cloudera
梦里不知身是客112 天前
spark的统一内存管理机制
java·大数据·spark
华阙之梦2 天前
【仅公网互通的 Spark 集群通信与配置实战方案】
大数据·ajax·spark
心止水j2 天前
数据采集-----案例
spark
梦里不知身是客112 天前
RDD分区的设定规则
spark
梦里不知身是客112 天前
spark中如何调节Executor的堆外内存
大数据·javascript·spark
beijingliushao2 天前
105-Spark之Standalone HA环境搭建过程
大数据·spark