Вот кадр данных
df = spark.createDataFrame([('1','2019-11-02','1',50),('1','2019-11-02','2',10),('1','2019-11-01','1',110),('1','2019-11-01','2',80),('2','2019-11-02','1',280),('2','2019-11-02','2',95),('2','2019-11-01','1',3000),('2','2019-11-01','2',130)],['Employee_ID','Created_Date','status','Time_in_Seconds'])
df.groupBy('Employee_ID').pivot('status').agg({'Time_in_Seconds':'sum'}).show()