Так как между df1 и df2 нет соответствующего ключа, мы должны сделать crossJoin. Проверьте это решение rdd:
scala> val df1 = Seq(
| ("001",Array(123,124,125)),
| ("002",Array(501,502,503,504)),
| ("003",Array(123,501,125)) ).toDF("receipt_no","items_no_set")
df1: org.apache.spark.sql.DataFrame = [receipt_no: string, items_no_set: array<int>]
scala> val df2 = Seq(
| ("909",Array(123,124)),
| ("908",Array(501,502,503)),
| ("907",Array(123,501,125)) ).toDF("product_no","product_items_set")
df2: org.apache.spark.sql.DataFrame = [product_no: string, product_items_set: array<int>]
scala> import org.apache.spark.sql.types._
import org.apache.spark.sql.types._
scala> val df3 = df1.crossJoin(df2)
df3: org.apache.spark.sql.DataFrame = [receipt_no: string, items_no_set: array<int> ... 2 more fields]
scala> val rdd2= df3.rdd.filter( x => {
| val items = x.getAs[scala.collection.mutable.WrappedArray[Int]]("items_no_set").toArray;
| val prds = x.getAs[scala.collection.mutable.WrappedArray[Int]]("product_items_set").toArray;
| val chk = prds.intersect(items).length == prds.length
| ( chk == true )
| }
| ).map( x => Row(x(0),x(1),x(2)))
rdd2: org.apache.spark.rdd.RDD[org.apache.spark.sql.Row] = MapPartitionsRDD[76] at map at <console>:50
scala> val schema = df1.schema.add(StructField("product_no",StringType))
schema: org.apache.spark.sql.types.StructType = StructType(StructField(receipt_no,StringType,true), StructField(items_no_set,ArrayType(IntegerType,false),true), StructField(product_no,StringType,true))
scala> spark.createDataFrame(rdd2,schema).show(false)
+----------+--------------------+----------+
|receipt_no|items_no_set |product_no|
+----------+--------------------+----------+
|001 |[123, 124, 125] |909 |
|002 |[501, 502, 503, 504]|908 |
|003 |[123, 501, 125] |907 |
+----------+--------------------+----------+
scala>