创建Spark SQL环境、使用Spark DSL以及Spark on Hive

相关代码展示:

spark SQL

java 复制代码
package com.shujia.spark.sql

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

object Demo1DataFrame {
  def main(args: Array[String]): Unit = {

    //1、创建spark sql环境
    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("df")
      //指定shuffle之后RDD的分区数
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    import spark.implicits._

    //2、读取数据
    //DataFrame:在RDD的基础上增加了表结构,为了写sql
    val studentDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("id STRING,name STRING,age INT,sex STRING,clazz STRING")
      .load("data/students.txt")

    //查看数据
    studentDF.show()

    //创建临时视图
    studentDF.createOrReplaceTempView("students")

    //编写sql处理数据
    val clazzNumDF: DataFrame = spark.sql(
      """
        |select clazz,count(1) as num
        |from students
        |group by clazz
        |""".stripMargin)

    clazzNumDF.show()

    import org.apache.spark.sql.functions._
    //使用DSL处理数据
    val clazzNum: DataFrame = studentDF
      .groupBy("clazz")
      .agg(count("id") as "num")

    //保存结果
    clazzNum
      .write
      .format("csv")
      .option("sep", "\t")
    //.save("data/clazz_num")

    //使用RDD处理数据
    val kvDS: RDD[(String, Int)] = studentDF
      //转换成RDD
      .rdd
      .map {
        //DF中的每一行是一个ROW对象
        case Row(id, name, age, sex, clazz: String) => (clazz, 1)
      }

    kvDS
      .reduceByKey(_ + _)
      .foreach(println)

  }
}

Spark DSL

java 复制代码
package com.shujia.spark.sql

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo2DSL {
  def main(args: Array[String]): Unit = {
    //创建spark sql环境
    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("dsl")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    //读取数据,创建DF
    val studentDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("id STRING,name STRING,age INT,sex STRING,clazz STRING")
      .load("data/students.txt")

    val scoreDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("id STRING,cid STRING,score DOUBLE")
      .load("data/score.txt")

    import spark.implicits._
    import org.apache.spark.sql.functions._

    //1、select
    studentDF.select("name", "age").show()
    //$"age": 获取列对象
    studentDF.select($"name", $"age" + 1 as "age").show()
    //在select中使用函数
    studentDF.select(substring($"clazz", 1, 2) as "type").show()
    studentDF.selectExpr("age+1 as age").show()

    //2、where
    studentDF.where($"sex" =!= "男" and $"age" === 23).show()
    studentDF.where(substring($"clazz", 1, 2) === "文科").show()
    studentDF.where($"name" isin("葛德曜", "符半双", "羿彦昌")).show()

    //3、groupBy之后在agg中聚合
    studentDF
      .groupBy($"clazz")
      .agg(count($"id") as "count", round(avg($"age"), 2) as "avgAge")
      .show()

    //4、集合之后过滤(having)
    /**
     * select clazz,count(1) as count from
     * students
     * group by clazz
     * having count > 80
     */
    studentDF
      .groupBy($"clazz")
      .agg(count($"id") as "count", round(avg($"age"), 2) as "avgAge")
      .where($"count" > 80)
      .show()

    //5、order by
    studentDF
      .groupBy($"clazz")
      .agg(count($"id") as "count", round(avg($"age"), 2) as "avgAge")
      .orderBy($"count".desc)
      .show()

    //6、limit
    studentDF
      .groupBy($"clazz")
      .agg(count($"id") as "count", round(avg($"age"), 2) as "avgAge")
      .orderBy($"count".desc)
      .limit(10)
      .show()

    //6、show:相当于action算子
    studentDF.show()
    studentDF.show(10)
    studentDF.show(10, truncate = false)


    //7、join
    studentDF.as("a").join(scoreDF.as("b"), $"a.id" === $"b.id", "inner").show()

    studentDF
      .as("a") //取别名
      .join(scoreDF.as("b"), $"a.id" === $"b.id", "inner")
      .groupBy($"name")
      .agg(sum($"score") as "sumScore")
      .show()

    //8、row_number
    /**
     * select * from (
     *    select *,row_number() over(partition by clazz order by sumScore desc) as r from(
     *       select a.id,name,clazz,sum(score) as sumScore from
     *       student as a
     *       join
     *       score as b
     *       on a.id=b.id
     *       group by a.id,name,clazz
     *    ) as c
     * ) as d
     * where r<=10
     */
    studentDF
      .as("a") //取别名
      .join(scoreDF.as("b"), $"a.id" === $"b.id", "inner")
      .groupBy($"a.id", $"name", $"clazz")
      .agg(sum($"score") as "sumScore")
      //.select($"id", $"name", $"clazz", $"sumScore", row_number() over Window.partitionBy($"clazz").orderBy($"sumScore".desc) as "r")
      //withColumn:在上面DF的基础上增加新的字段
      .withColumn("r", row_number() over Window.partitionBy($"clazz").orderBy($"sumScore".desc))
      .where($"r" <= 10)
      .show()
  }
}

Spark on Hive

在代码里整合spark sql 和Hive ,

如果需要将代码提交到服务器运行

bash 复制代码
spark-submit --master yarn --deploy-mode client --num-executors 2 --executor-cores 1 --executor-memory 2G --class com.company.spark.sql.Demo4SparkOnHive spark-1.0.jar

代码展示:

java 复制代码
package com.shujia.spark.sql

import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo4SparkOnHive {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .appName("dsl")
      .config("spark.sql.shuffle.partitions", 1)
      //开启hive元数据支持
      .enableHiveSupport()
      .getOrCreate()
    import spark.implicits._
    import org.apache.spark.sql.functions._

    //使用hive的表
    spark.sql(
      """
        |show tables
        |""".stripMargin).show()

    //编写sql处理hive的表
    spark.sql(
      """
        |select clazz,count(1) as num from
        |students
        |group by clazz
        |""".stripMargin).show()

    //获取表得到DF
    val studentDF: DataFrame = spark.table("students")

    //当DF被多次使用时可以缓存
    studentDF.cache()

    studentDF
      .groupBy($"clazz")
      .agg(count($"id") as "num")
      .show()

    studentDF
      .groupBy($"sex")
      .agg(count($"id") as "num")
      .show()

    //清理缓存
    studentDF.unpersist()

    //需要将代码提交到服务器运行
    //spark-submit --master yarn --deploy-mode client --num-executors 2 --executor-cores 1 --executor-memory 2G --class com.shujia.spark.sql.Demo4SparkOnHive spark-1.0.jar
  }
}
相关推荐
阿里云大数据AI技术3 小时前
用 SQL 调大模型?Hologres + 百炼,让数据开发直接“对话”AI
sql·llm
字节跳动数据平台9 小时前
代码量减少 70%、GPU 利用率达 95%:火山引擎多模态数据湖如何释放模思智能的算法生产力
大数据
得物技术10 小时前
深入剖析Spark UI界面:参数与界面详解|得物技术
大数据·后端·spark
武子康12 小时前
大数据-238 离线数仓 - 广告业务 Hive分析实战:ADS 点击率、购买率与 Top100 排名避坑
大数据·后端·apache hive
武子康1 天前
大数据-237 离线数仓 - Hive 广告业务实战:ODS→DWD 事件解析、广告明细与转化分析落地
大数据·后端·apache hive
大大大大晴天1 天前
Flink生产问题排障-Kryo serializer scala extensions are not available
大数据·flink
武子康3 天前
大数据-236 离线数仓 - 会员指标验证、DataX 导出与广告业务 ODS/DWD/ADS 全流程
大数据·后端·apache hive
肌肉娃子4 天前
20260227.spark.Spark 性能刺客:千万别在 for 循环里写 withColumn
spark
武子康4 天前
大数据-235 离线数仓 - 实战:Flume+HDFS+Hive 搭建 ODS/DWD/DWS/ADS 会员分析链路
大数据·后端·apache hive
DianSan_ERP5 天前
电商API接口全链路监控:构建坚不可摧的线上运维防线
大数据·运维·网络·人工智能·git·servlet