#!/usr/bin/python
# coding=utf-8

import sys
from pyspark.sql import SparkSession

# 创建SparkSession实例
spark = SparkSession.builder.appName("PySparkExample").getOrCreate()

# 创建一个简单的DataFrame
df = spark.read.option("delimiter", ",").option("header", True).csv(sys.argv[1])

# 显示DataFrame的前几行
df.show(5)

# 执行一个简单的聚合操作：计算每个部门的总薪资
sum_salary_per_department = df.groupBy("department").agg({"salary": "sum"}).show()

# 关闭SparkSession以释放资源
spark.stop()
