Вы можете сделать это путем группировки и подсчета.
from pyspark.sql import functions as F
from pyspark.sql.functions import when
from pyspark.sql.types import *
def relation_type(df, fromCol, toCol):
df2 = df.groupBy(fromCol)\
.agg(F.countDistinct(toCol).alias('val_count'))\
.agg(F.max('val_count').alias('max_rel_count'))
return df2.withColumn('mapping', when(df2['max_rel_count'] > 1, 'OM')\
.otherwise('OO'))\
.drop('max_rel_count')
def relation_types(df, cols):
schemaArr = [StructField('ColName', StringType(), True)]
for i in cols:
schemaArr.append(StructField(i, StringType(), True))
schema = StructType(schemaArr)
result = sqlContext.createDataFrame(sc.emptyRDD(), schema)
for i in cols:
rowDict = []
rowDict.append(i)
for j in cols:
val = relation_type(df, i, j).collect()[0]
rowDict.append(val['mapping'])
row = sqlContext.createDataFrame([rowDict])
result = result.union(row)
return result
затем назовите его с нужными столбцами
relation_types(df, ['id', 't']).show()
результат
+-------+---+---+
|ColName| id| t|
+-------+---+---+
| id| OO| OM|
| t| OM| OO|
+-------+---+---+