Так я решил свою проблему. Ответ, предоставленный @WaqarAhmed, также является другим способом решения той же проблемы
from pyspark.sql.functions import *
from pyspark.sql.window import *
w_1day = Window().partitionBy('device').orderBy('date').rowsBetween(-1, 0)
w_2day = Window().partitionBy('device').orderBy('date').rowsBetween(-2, 0)
w_5day = Window().partitionBy('device').orderBy('date').rowsBetween(-5, 0)
w_7day = Window().partitionBy('device').orderBy('date').rowsBetween(-7, 0)
w_14day = Window().partitionBy('device').orderBy('date').rowsBetween(-14, 0)
w_21day = Window().partitionBy('device').orderBy('date').rowsBetween(-21, 0)
w_30day = Window().partitionBy('device').orderBy('date').rowsBetween(-30, 0)
df_avg = df.withColumn('rolling_1_day_average_q', avg("quality").over(w_1day))
df_avg = df_avg.withColumn('rolling_2_day_average_q', avg("quality").over(w_2day))
df_avg = df_avg.withColumn('rolling_5_day_average_q', avg("quality").over(w_5day))
df_avg = df_avg.withColumn('rolling_7_day_average_q', avg("quality").over(w_7day))
df_avg = df_avg.withColumn('rolling_14_day_average_q', avg("quality").over(w_14day))
df_avg = df_avg.withColumn('rolling_21_day_average_q', avg("quality").over(w_21day))
df_avg = df_avg.withColumn('rolling_30_day_average_q', avg("quality").over(w_30day))