SparkSql

SparkSql

pom.xml

javascript 复制代码
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>

    <groupId>org.example</groupId>
    <artifactId>spark_sql</artifactId>
    <version>1.0-SNAPSHOT</version>
    <dependencies>
        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-core_2.12</artifactId>
            <version>3.0.0</version>
        </dependency>
        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-sql_2.12</artifactId>
            <version>3.0.0</version>
        </dependency>
        <dependency>
            <groupId>mysql</groupId>
            <artifactId>mysql-connector-java</artifactId>
            <version>5.1.27</version>
        </dependency>
    </dependencies>

    <build>
        <plugins>
            <!-- 该插件用于将 Scala 代码编译成 class 文件 -->
            <plugin>
                <groupId>net.alchim31.maven</groupId>
                <artifactId>scala-maven-plugin</artifactId>
                <version>3.2.2</version>
                <executions>
                    <execution>
                        <!-- 声明绑定到 maven 的 compile 阶段 -->
                        <goals>
                            <goal>testCompile</goal>
                        </goals>
                    </execution>
                </executions>
            </plugin>
            <plugin>
                <groupId>org.apache.maven.plugins</groupId>
                <artifactId>maven-assembly-plugin</artifactId>
                <version>3.1.0</version>
                <configuration>
                    <descriptorRefs>
                        <descriptorRef>jar-with-dependencies</descriptorRef>
                    </descriptorRefs>
                </configuration>
                <executions>
                    <execution>
                        <id>make-assembly</id>
                        <phase>package</phase>
                        <goals>
                            <goal>single</goal>
                        </goals>
                    </execution>
                </executions>
            </plugin>
        </plugins>
    </build>

</project>

SparkSQL01_Demo

javascript 复制代码
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession

object SparkSQL01_Demo {
  def main(args:Array[String])={
    val sparkConf = new SparkConf().setMaster("local[*]").setAppName("sparkSQL")
    val spark = SparkSession.builder().config(sparkConf).getOrCreate()

    val df = spark.read
      .format("jdbc")
      .option("url", "jdbc:mysql://hadoop102:3306/localstreamdata")
      .option("driver", "com.mysql.jdbc.Driver")
      .option("user", "root")
      .option("password", "000000")
      .option("dbtable", "normal_data")
      .load()
    df.show

    spark.close()
  }

}

sparksql写入mysql

提前在mysql中建好表

javascript 复制代码
use localstreamdata;
DESCRIBE normal_data;
CREATE TABLE IF NOT EXISTS gpb2 (
    stream_id varchar(20),
    stream_time datetime,
    stream_user_id bigint(20),
    stream_money int(11),
    stream_consume_type int(11),
    stream_consume_location varchar(50),
    stream_sign_location varchar(50),
    stream_time_date int(11),
    stream_time_minute varchar(20),
    stream_seconds int(11),
    stream_is_new int(3),
    stream_is_normal varchar(20)
);
DESCRIBE gpb2;
alter table gpb2 change stream_consume_location stream_consume_location varchar(100) character set utf8;
alter table gpb2 change stream_sign_location stream_sign_location varchar(100) character set utf8;
javascript 复制代码
import org.apache.spark.SparkConf
import org.apache.spark.sql.{Column, SaveMode, SparkSession}
import org.apache.spark.sql.functions._
import org.apache.spark.sql._

object SparkSQL01_Demo {
  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf().setMaster("local[*]").setAppName("sparkSQL")
    val spark = SparkSession.builder().config(sparkConf).getOrCreate()

    val df = spark.read
      .format("jdbc")
      .option("url", "jdbc:mysql://hadoop102:3306/localstreamdata?characterEncoding=utf8&useSSL=false")
      .option("driver", "com.mysql.jdbc.Driver")
      .option("user", "root")
      .option("password", "000000")
      .option("dbtable", "normal_data")
      .load()

    df.show

    import spark.implicits._
    val cleanedDF = df.withColumn("stream_consume_location", your_clean_function(col("stream_consume_location")))

    cleanedDF.write
      .format("jdbc")
      .option("url", "jdbc:mysql://hadoop102:3306/localstreamdata?characterEncoding=utf8&useSSL=false")
      .option("driver", "com.mysql.jdbc.Driver")
      .option("user", "root")
      .option("password", "000000")
      .option("dbtable", "gpb2")
      .mode(SaveMode.Append)
      .save()

    spark.close()
  }

  def your_clean_function(str: Column): Column = {
    // 根据需要实现清理或转换逻辑
    // 返回清理后的字符串列
    // 示例代码:
    str
  }
}
javascript 复制代码
import org.apache.spark.SparkConf
import org.apache.spark.sql.{Column, SaveMode, SparkSession}
import org.apache.spark.sql.functions._
import org.apache.spark.sql._

object SparkSQL01_Demo {
  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf().setMaster("local[*]").setAppName("sparkSQL")
    val spark = SparkSession.builder().config(sparkConf).getOrCreate()

    val df = spark.read
      .format("jdbc")
      .option("url", "jdbc:mysql://hadoop102:3306/localstreamdata?characterEncoding=utf8&useSSL=false")
      .option("driver", "com.mysql.jdbc.Driver")
      .option("user", "root")
      .option("password", "000000")
      .option("dbtable", "normal_data")
      .load()

    df.show

    import spark.implicits._
    //val cleanedDF = df.withColumn("stream_consume_location", your_clean_function(col("stream_consume_location")))

    df.write
      .format("jdbc")
      .option("url", "jdbc:mysql://hadoop102:3306/localstreamdata?characterEncoding=utf8&useSSL=false")
      .option("driver", "com.mysql.jdbc.Driver")
      .option("user", "root")
      .option("password", "000000")
      .option("dbtable", "gpb2")
      .mode(SaveMode.Append)
      .save()

    spark.close()
  }
/*
  def your_clean_function(str: Column): Column = {
    // 根据需要实现清理或转换逻辑
    // 返回清理后的字符串列
    // 示例代码:
    str
  }

 */
}
相关推荐
Light601 天前
智链护航,数档永存:基于领码SPARK平台构建下一代AI+区块链档案系统解决方案
人工智能·spark·区块链
鸿乃江边鸟1 天前
Spark native向量化组件 datafusion comet
大数据·spark·native·向量化
yumgpkpm2 天前
Cloudera CDH、CDP、Hadoop大数据+决策模型及其案例
大数据·hive·hadoop·分布式·spark·kafka·cloudera
num_killer2 天前
小白的Spark初识(RDD)
大数据·分布式·spark
红队it2 天前
【Spark+Hadoop】基于spark+hadoop游戏评论数据分析可视化大屏(完整系统源码+数据库+开发笔记+详细部署教程+虚拟机分布式启动教程)✅
大数据·hadoop·分布式·算法·游戏·数据分析·spark
oMcLin2 天前
如何在CentOS 8上配置并调优Apache Spark集群,确保大规模数据分析任务的高效运行与资源分配?
spark·centos·apache
俊哥大数据2 天前
【项目9】 基于Spark网站流量日志大数据实时分析系统
大数据·分布式·spark
Light604 天前
从“报告”到“能力”——构建智能化、可审计的数据治理闭环——领码 SPARK 数据质量平台白皮书
大数据·分布式·spark
火龙谷4 天前
day2-采集数据
spark
大厂技术总监下海5 天前
从Hadoop MapReduce到Apache Spark:一场由“磁盘”到“内存”的速度与范式革命
大数据·hadoop·spark·开源