创建Spark SQL环境、使用Spark DSL以及Spark on Hive

相关代码展示:

spark SQL

java 复制代码
package com.shujia.spark.sql

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

object Demo1DataFrame {
  def main(args: Array[String]): Unit = {

    //1、创建spark sql环境
    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("df")
      //指定shuffle之后RDD的分区数
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    import spark.implicits._

    //2、读取数据
    //DataFrame:在RDD的基础上增加了表结构,为了写sql
    val studentDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("id STRING,name STRING,age INT,sex STRING,clazz STRING")
      .load("data/students.txt")

    //查看数据
    studentDF.show()

    //创建临时视图
    studentDF.createOrReplaceTempView("students")

    //编写sql处理数据
    val clazzNumDF: DataFrame = spark.sql(
      """
        |select clazz,count(1) as num
        |from students
        |group by clazz
        |""".stripMargin)

    clazzNumDF.show()

    import org.apache.spark.sql.functions._
    //使用DSL处理数据
    val clazzNum: DataFrame = studentDF
      .groupBy("clazz")
      .agg(count("id") as "num")

    //保存结果
    clazzNum
      .write
      .format("csv")
      .option("sep", "\t")
    //.save("data/clazz_num")

    //使用RDD处理数据
    val kvDS: RDD[(String, Int)] = studentDF
      //转换成RDD
      .rdd
      .map {
        //DF中的每一行是一个ROW对象
        case Row(id, name, age, sex, clazz: String) => (clazz, 1)
      }

    kvDS
      .reduceByKey(_ + _)
      .foreach(println)

  }
}

Spark DSL

java 复制代码
package com.shujia.spark.sql

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo2DSL {
  def main(args: Array[String]): Unit = {
    //创建spark sql环境
    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("dsl")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    //读取数据,创建DF
    val studentDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("id STRING,name STRING,age INT,sex STRING,clazz STRING")
      .load("data/students.txt")

    val scoreDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("id STRING,cid STRING,score DOUBLE")
      .load("data/score.txt")

    import spark.implicits._
    import org.apache.spark.sql.functions._

    //1、select
    studentDF.select("name", "age").show()
    //$"age": 获取列对象
    studentDF.select($"name", $"age" + 1 as "age").show()
    //在select中使用函数
    studentDF.select(substring($"clazz", 1, 2) as "type").show()
    studentDF.selectExpr("age+1 as age").show()

    //2、where
    studentDF.where($"sex" =!= "男" and $"age" === 23).show()
    studentDF.where(substring($"clazz", 1, 2) === "文科").show()
    studentDF.where($"name" isin("葛德曜", "符半双", "羿彦昌")).show()

    //3、groupBy之后在agg中聚合
    studentDF
      .groupBy($"clazz")
      .agg(count($"id") as "count", round(avg($"age"), 2) as "avgAge")
      .show()

    //4、集合之后过滤(having)
    /**
     * select clazz,count(1) as count from
     * students
     * group by clazz
     * having count > 80
     */
    studentDF
      .groupBy($"clazz")
      .agg(count($"id") as "count", round(avg($"age"), 2) as "avgAge")
      .where($"count" > 80)
      .show()

    //5、order by
    studentDF
      .groupBy($"clazz")
      .agg(count($"id") as "count", round(avg($"age"), 2) as "avgAge")
      .orderBy($"count".desc)
      .show()

    //6、limit
    studentDF
      .groupBy($"clazz")
      .agg(count($"id") as "count", round(avg($"age"), 2) as "avgAge")
      .orderBy($"count".desc)
      .limit(10)
      .show()

    //6、show:相当于action算子
    studentDF.show()
    studentDF.show(10)
    studentDF.show(10, truncate = false)


    //7、join
    studentDF.as("a").join(scoreDF.as("b"), $"a.id" === $"b.id", "inner").show()

    studentDF
      .as("a") //取别名
      .join(scoreDF.as("b"), $"a.id" === $"b.id", "inner")
      .groupBy($"name")
      .agg(sum($"score") as "sumScore")
      .show()

    //8、row_number
    /**
     * select * from (
     *    select *,row_number() over(partition by clazz order by sumScore desc) as r from(
     *       select a.id,name,clazz,sum(score) as sumScore from
     *       student as a
     *       join
     *       score as b
     *       on a.id=b.id
     *       group by a.id,name,clazz
     *    ) as c
     * ) as d
     * where r<=10
     */
    studentDF
      .as("a") //取别名
      .join(scoreDF.as("b"), $"a.id" === $"b.id", "inner")
      .groupBy($"a.id", $"name", $"clazz")
      .agg(sum($"score") as "sumScore")
      //.select($"id", $"name", $"clazz", $"sumScore", row_number() over Window.partitionBy($"clazz").orderBy($"sumScore".desc) as "r")
      //withColumn:在上面DF的基础上增加新的字段
      .withColumn("r", row_number() over Window.partitionBy($"clazz").orderBy($"sumScore".desc))
      .where($"r" <= 10)
      .show()
  }
}

Spark on Hive

在代码里整合spark sql 和Hive ,

如果需要将代码提交到服务器运行

bash 复制代码
spark-submit --master yarn --deploy-mode client --num-executors 2 --executor-cores 1 --executor-memory 2G --class com.company.spark.sql.Demo4SparkOnHive spark-1.0.jar

代码展示:

java 复制代码
package com.shujia.spark.sql

import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo4SparkOnHive {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .appName("dsl")
      .config("spark.sql.shuffle.partitions", 1)
      //开启hive元数据支持
      .enableHiveSupport()
      .getOrCreate()
    import spark.implicits._
    import org.apache.spark.sql.functions._

    //使用hive的表
    spark.sql(
      """
        |show tables
        |""".stripMargin).show()

    //编写sql处理hive的表
    spark.sql(
      """
        |select clazz,count(1) as num from
        |students
        |group by clazz
        |""".stripMargin).show()

    //获取表得到DF
    val studentDF: DataFrame = spark.table("students")

    //当DF被多次使用时可以缓存
    studentDF.cache()

    studentDF
      .groupBy($"clazz")
      .agg(count($"id") as "num")
      .show()

    studentDF
      .groupBy($"sex")
      .agg(count($"id") as "num")
      .show()

    //清理缓存
    studentDF.unpersist()

    //需要将代码提交到服务器运行
    //spark-submit --master yarn --deploy-mode client --num-executors 2 --executor-cores 1 --executor-memory 2G --class com.shujia.spark.sql.Demo4SparkOnHive spark-1.0.jar
  }
}
相关推荐
Allen_LVyingbo1 分钟前
医院大数据平台建设:基于快速流程化工具集的考察
大数据·网络·人工智能·健康医疗
jiejianyun85713 分钟前
零售小程序怎么自己搭建?开个小卖铺如何留住客户?
大数据
计算机学无涯29 分钟前
Spring事务回滚
数据库·sql·spring
m0_7482347144 分钟前
Python大数据可视化:基于spark的短视频推荐系统的设计与实现_django+spider
python·spark·django
web135085886352 小时前
9. 大数据集群(PySpark)+Hive+MySQL+PyEcharts+Flask:信用贷款风险分析与预测
大数据·hive·mysql
神秘打工猴9 小时前
Flink 集群有哪些⻆⾊?各⾃有什么作⽤?
大数据·flink
小刘鸭!9 小时前
Flink的三种时间语义
大数据·flink
天冬忘忧9 小时前
Flink优化----FlinkSQL 调优
大数据·sql·flink
LinkTime_Cloud10 小时前
GitLab 将停止为中国区用户提供服务,60天迁移期如何应对? | LeetTalk Daily
大数据·运维·gitlab
寒暄喆意11 小时前
智慧农业物联网传感器:开启农业新时代
大数据·人工智能·科技·物联网