文章目录
大数据Spark范式
python
from pyspark.sql import SparkSession
import json
def filter_debate(x):
data = json.loads(x)
return data["subject"]
if __name__ == '__main__':
spark = SparkSession.builder.appName("NonDebateFilter").getOrCreate()
sc = spark.sparkContext
# 假设${DATA_ID:12455}是一个环境变量或需要替换的具体路径前缀
input_path = "${DATA_ID:00001}:Auser/data_en/*"
rdd = sc.textFile(input_path)
filtered_rdd = rdd.map(debate)
# 取消注释以保存所有过滤后的记录,而不是只取前100条
output_path = "${DATA_ID:00001}:Auser/data_en/01"
filtered_rdd.saveAsTextFile(output_path)
# 如果确实只需要前100条记录进行打印或其它处理,可以在保存后单独处理
# for i in filtered_rdd.take(100):
# print(i)
sc.stop()