SparkSql 处理json文件案列

该博客演示了如何使用Spark SQL处理JSON数据。首先,它设置了日志级别,然后创建SparkConf和SparkContext。接着,从`json.txt`文件中读取数据并定义数据结构。通过`get_json_object`函数解析JSON字段,展示第一层和第二层数据。最后,使用`explode`操作展开事件数组,并展示了处理后的结果数据。博客还展示了如何将结果数据创建为临时视图并执行SQL查询。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

package org.example
import java.lang

import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.{Row, SparkSession}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.functions._

object json {
  def main(args: Array[String]): Unit = {
    Logger.getLogger("org.apache.spark").setLevel(Level.OFF)
    Logger.getLogger("org.eclipse.jetty.server").setLevel(Level.OFF)

    var conf = new SparkConf().setAppName("json").setMaster("local")
    var sc = new SparkContext(conf)
    val spark = SparkSession.builder().config(conf).getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.types._

    //读取文件创建 RDD
    val RDD = sc.textFile("file/input/json.txt")

    //创建 schema
        var schemastring = "Time Comtent"
        val fields = schemastring.split(" ").map(x=>StructField(x,StringType,nullable=true))
        var schema = StructType(fields)

    println("+++++++++++++++++++++++++++DF:创建 DateFrame+++++++++++++++++++++++")
    // 注意:如果用createDataFrame(rowRDD,schema) 则RDD需要 Row
    val rowRDD = RDD.map(_.split("\\|")).map(x=>Row(x(0).trim(),x(1).trim()))
    var opDF = spark.createDataFrame(rowRDD,schema)
    /*
    val rowRDD = RDD.map(_.split("\\|")).map(x => (x(0).trim(), x(1).trim()))
    var opDF = rowRDD.toDF("time","coment")   //或者 var opDF = spark.createDataFrame(rowRDD).toDF("time", "coment")
    */

    opDF.show(1, false)


    //查看第一层
    val opDF1 = opDF.select($"time", get_json_object($"coment", "$.cm").alias("cm"), get_json_object($"coment", "$.ap").alias("ap"),
      get_json_object($"coment", "$.et").alias("et"))
    println("+++++++++++++++++++++++++++DF1: 第一层 +++++++++++++++++++++")
    opDF1.printSchema()
    opDF1.show(10, false)

    //查看第二层
    var opDF2 = opDF1.select($"time", $"ap", get_json_object($"cm", "$.ln").alias("ln"), get_json_object($"cm", "$.sv").alias("sv"),
      get_json_object($"cm", "$.os").alias("os"), get_json_object($"cm", "$.g").alias("g"), get_json_object($"cm", "$.mid").alias("mid"),
      get_json_object($"cm", "$.nw").alias("nw"), get_json_object($"cm", "$.l").alias("l"), get_json_object($"cm", "$.vc").alias("vc"),
      get_json_object($"cm", "$.hw").alias("hw"), get_json_object($"cm", "$.ar").alias("ar"), get_json_object($"cm", "$.uid").alias("uid"),
      get_json_object($"cm", "$.t").alias("t"), get_json_object($"cm", "$.la").alias("la"), get_json_object($"cm", "$.md").alias("md"),
      get_json_object($"cm", "$.vn").alias("vn"), get_json_object($"cm", "$.ba").alias("ba"), get_json_object($"cm", "$.sr").alias("sr"),
      from_json($"et", ArrayType(StructType(StructField("ett", StringType) :: StructField("en", StringType) :: StructField("kv", StringType) :: Nil))).as("events"))
    //val opDF2 = opDF1.select($"time",$"ap",get_json_object($"cm","$.ln").alias("ln"),get_json_object($"cm","$.sv").alias("sv"),get_json_object($"cm","$.os").alias("os"),
    // get_json_object($"cm","$.g").alias("g"),get_json_object($"cm","$.mid").alias("mid"),get_json_object($"cm","$.nw").alias("nw"),get_json_object($"cm","$.l").alias("l"),
    // get_json_object($"cm","$.vc").alias("vc"),get_json_object($"cm","$.hw").alias("hw"),get_json_object($"cm","$.ar").alias("ar"),get_json_object($"cm","$.uid").alias("uid"),
    // get_json_object($"cm","$.t").alias("t"),get_json_object($"cm","$.la").alias("la"),get_json_object($"cm","$.md").alias("md"),get_json_object($"cm","$.vn").alias("vn"),
    // get_json_object($"cm","$.ba").alias("ba"),get_json_object($"cm","$.sr").alias("sr"),
    // from_json($"et",ArrayType(StructType(StructField("ett",StringType)::StructField("en",StringType)::StructField("kv",StringType)::Nil))).as("events"))
    println("+++++++++++++++++++++++++++++++++++DF2:+++++++++++++++++++++++++")
    opDF2.printSchema()
    opDF2.show(10, false)

    //explode
    val opDF3 = opDF2.select($"time", $"ap", $"ln", $"sv", $"os", $"g", $"mid", $"nw", $"l", $"vc", $"hw", $"ar", $"uid", $"t", $"la",
      $"md", $"vn", $"ba", $"sr", explode($"events").as("eventcontent"))
    println("++++++++++++++++++++++++++++++++DF3:++++++++++++++++++++++++++++")
    opDF3.printSchema()
    opDF3.show(10, false)

    //获取第三层,即event
    val opDF4 = opDF3.select($"time", $"ap", $"ln", $"sv", $"os", $"g", $"mid", $"nw", $"l", $"vc", $"hw", $"ar", $"uid", $"t", $"la",
      $"md", $"vn", $"ba", $"sr", $"eventcontent.ett", $"eventcontent.en", $"eventcontent.kv")
    println("+++++++++++++++++++++++++++++++++DF4:+++++++++++++++++++++++++++")
    opDF4.printSchema
    opDF4.show(10, false)


    opDF4.createTempView("cai")
    println("创建临时表以后的数据查询")
    spark.sql("select time,hw from cai").show(10, false)
  }
}
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值