Используйте для этого случая crossJoin
, row_number
Example:
df=spark.createDataFrame([('orange',)],['name'])
df1=spark.createDataFrame([(10,)],['age'])
df2=spark.createDataFrame([('delhi',)],['place'])
df.crossJoin(df1).crossJoin(df2).show()
#+------+---+-----+
#| name|age|place|
#+------+---+-----+
#|orange| 10|delhi|
#+------+---+-----+
#using window
from pyspark.sql import *
from pyspark.sql.functions import *
w=Window.orderBy(lit(1))
df=df.withColumn("rn",row_number().over(w))
df1=df1.withColumn("rn",row_number().over(w))
df2=df2.withColumn("rn",row_number().over(w))
df.join(df1,['rn'],'inner').join(df2,['rn'],'inner').drop('rn').show()
#+------+---+-----+
#| name|age|place|
#+------+---+-----+
#|orange| 10|delhi|
#+------+---+-----+