С помощью приведенного ниже подхода, я полагаю, вы можете добиться этого
val input_rdd = spark.sparkContext.parallelize(List(("A1", "A10"), ("A1", "A2"), ("A2", "A3"), ("A3", "A4"), ("A5", "A7"), ("A7", "A6"), ("A8", "A9"), ("A4", "A11"), ("A11", "A12"), ("A6", "A13")))
val input_df = input_rdd.toDF("Parent", "Child")
input_df.createOrReplaceTempView("TABLE1")
input_df.show()
Вход
+------+-----+
|Parent|Child|
+------+-----+
| A1| A10|
| A1| A2|
| A2| A3|
| A3| A4|
| A5| A7|
| A7| A6|
| A8| A9|
| A4| A11|
| A11| A12|
| A6| A13|
+------+-----+
# # linkchild function to get the root
def linkchild(df: DataFrame): DataFrame = {
df.createOrReplaceTempView("TEMP")
val link_child_df = spark.sql("""select distinct a.parent, b.child from TEMP a inner join TEMP b on a.parent = b.parent or a.child = b.parent""")
link_child_df
}
# # findroot function to validate and generate output
def findroot(rdf: DataFrame) {
val link_child_df = linkchild(rdf)
link_child_df.createOrReplaceTempView("TEMP1")
val cnt = spark.sql("""select * from table1 where child not in (select child from (select * from (select distinct a.parent, b.child from TEMP1 a inner join TEMP1 b on a.parent = b.parent or a.child = b.parent
where a.parent not in(select distinct child from TABLE1))))""").count()
if (cnt == 0) {
spark.sql("""select * from (select distinct a.parent, b.child from TEMP1 a inner join TEMP1 b on a.parent = b.parent or a.child = b.parent
where a.parent not in(select distinct child from TABLE1)) order by parent, child""").show
} else {
findroot(output_df1)
}
}
# # Calling findroot function for the first time with input_df which in turn calls linkchild function till it reaches target
findroot(input_df)
Выход
+------+-----+
|parent|child|
+------+-----+
| A1| A10|
| A1| A11|
| A1| A12|
| A1| A14|
| A1| A15|
| A1| A16|
| A1| A17|
| A1| A18|
| A1| A2|
| A1| A3|
| A1| A4|
| A5| A13|
| A5| A6|
| A5| A7|
| A8| A9|
+------+-----+