Вот мой код pyspark для этого:
from pyspark.sql.functions import *
from pyspark.sql.types import *
values = [
(2019,"Jan",10,10),
(2019,"Jan",20,20),
(2019,"FEB",30,30),
(2019,"FEB",10,10)
]
rdd = sc.parallelize(values)
schema = StructType([
StructField("Year", IntegerType(), True),
StructField("Mon", StringType(), True),
StructField("value1", IntegerType(), True),
StructField("value2", IntegerType(), True)
])
data = spark.createDataFrame(rdd, schema)
valu1Pivoted = data.groupBy("Year") \
.pivot("Mon") \
.sum("value1") \
.select(col("Year"), \
col("Jan").alias("JAN_VALUE1"), \
col("FEB").alias("FEB_VALUE1"))
valu2Pivoted = data.groupBy("Year") \
.pivot("Mon") \
.sum("value2") \
.select(col("Year"), \
col("Jan").alias("JAN_VALUE2"), \
col("FEB").alias("FEB_VALUE2"))
finalData = valu1Pivoted.join(valu2Pivoted, valu1Pivoted.Year == valu2Pivoted.Year, "inner") \
.drop(valu1Pivoted.Year) \
.select("Year", "JAN_VALUE1","FEB_VALUE1","JAN_VALUE2","FEB_VALUE2")
finalData.show(20,False)
Результаты:
+----+----------+----------+----------+----------+
|Year|JAN_VALUE1|FEB_VALUE1|JAN_VALUE2|FEB_VALUE2|
+----+----------+----------+----------+----------+
|2019|30 |40 |30 |40 |
+----+----------+----------+----------+----------+