xml_utils.py文件:
python
# -*- coding: utf-8 -*-
import xml.etree.ElementTree as ET
def parse_xml(file_path):
map = {}
tree = ET.parse(file_path)
root = tree.getroot()
props = root.findall("property")
for prop in props:
name = prop.find('name').text
value = prop.find('value').text
map[name] = value
return map
# test only
if __name__ == '__main__':
xml_file = 'D:\hive\conf\hive-site.xml'
map = parse_xml(xml_file)
print(map)
spark_utils.py文件:
python
# -*- coding: utf-8 -*-
import os
from pyspark.sql import SparkSession
from pyspark import SparkContext, SparkConf
from src.xml_utils import parse_xml
metastore_uris_key = "hive.metastore.uris"
metastore_princiapl_key = "hive.metastore.kerberos.principal"
metastore_sasl_enabled_key = "hive.metastore.sasl.enabled"
class SparkUtils(object):
def __init__(self, hadoop_home
, hadoop_conf_dir
, principal
, keytab_path
, krb5_realm
, krb5_kdc
, krb5_conf
, hive_conf_dir
, log_level="INFO"):
self.hadoop_home = hadoop_home
self.hadoop_conf_dir = hadoop_conf_dir
self.principal = principal
self.keytab_path = keytab_path
self.krb5_realm = krb5_realm
self.krb5_kdc = krb5_kdc
self.krb5_conf = krb5_conf
self.hive_conf_dir = hive_conf_dir
hive_site_map = parse_xml(f"{hive_conf_dir}/hive-site.xml")
self.metastore_uris = hive_site_map.get(metastore_uris_key)
self.metastore_princiapl = hive_site_map.get(metastore_princiapl_key)
self.metastore_sasl_enabled = hive_site_map.get(metastore_sasl_enabled_key)
self.java_options = f"-Djava.security.krb5.conf={krb5_conf} -Djava.security.krb5.realm={krb5_realm} -Djava.security.krb5.kdc={krb5_kdc}"
self.log_level = log_level
def get_spark(self):
os.environ['HADOOP_HOME'] = self.hadoop_home
os.environ['HADOOP_CONF_DIR'] = self.hadoop_conf_dir
conf = SparkConf().setAppName("pyspark-sql") \
.setSparkHome("local[*]") \
.set("spark.sql.catalogImplementation", "hive") \
.set(metastore_uris_key, self.metastore_uris) \
.set(metastore_princiapl_key, self.metastore_princiapl) \
.set(metastore_sasl_enabled_key, self.metastore_sasl_enabled) \
.set("spark.driver.extraJavaOptions", self.java_options) \
.set("spark.executor.extraJavaOptions", self.java_options) \
.set("spark.yarn.keytab", self.keytab_path) \
.set("spark.yarn.principal", self.principal)
sc = SparkContext(conf=conf)
sc.setLogLevel(self.log_level)
spark = SparkSession(sc)
return spark
python
# -*- coding: utf-8 -*-
from pyspark.sql.types import *
from src.spark_utils import SparkUtils
import pandas as pd
if __name__ == '__main__':
keytab_path = '/D:/kerberos/user.keytab' # keytab位置
principal = 'user@XXXXX.COM'
hadoop_home = '/D:\env\components\hadoop'
hadoop_conf_dir = 'D:\env\components\hadoop\etc\hadoop-exp'
krb5_realm = "XXXXX.COM"
krb5_kdc = "kdc.xxx.com"
krb5_conf = "/D:\xxx\krb5.conf"
hive_conf_dir = "D:\hive\conf"
log_level = "INFO"
spark_utils = SparkUtils(hadoop_home
, hadoop_conf_dir
, principal
, keytab_path
, krb5_realm
, krb5_kdc
, krb5_conf
, hive_conf_dir
, log_level)
spark = spark_utils.get_spark()
pd01 = spark.sql('select id,name,score from default.tbl1').toPandas()
pd02 = spark.sql('select id,name,score from default.tbl2').toPandas()
union_pd = pd.concat([pd01, pd02], ignore_index=True)
agg_pd = union_pd.groupby(['id','name']).agg({'age': 'sum'}).reset_index()
agg_pd_schema=StructType([
StructField('id', StringType(), True),
StructField('name', StringType(), True),
StructField('total_score', LongType(), True)
])
agg_df=spark.createDataFrame(agg_pd,schema=agg_pd_schema)
# agg_df.show()
agg_df.write.mode('overwrite').saveAsTable('default.tbl3')
spark.stop()