Можно zip_arrays
и transform
from pyspark.sql.functions import expr
df = spark.createDataFrame(
[([5, 4, 2, 4, 3], [4, 3, 1, 2, 1])], ("array1", "array2")
)
df.withColumn(
"array3",
expr("transform(arrays_zip(array1, array2), x -> x.array1 - x.array2)")
).show()
# +---------------+---------------+---------------+
# | array1| array2| array3|
# +---------------+---------------+---------------+
# |[5, 4, 2, 4, 3]|[4, 3, 1, 2, 1]|[1, 1, 1, 2, 2]|
# +---------------+---------------+---------------+
Для действительного udf
потребуется эквивалентная логика, т.е.
from pyspark.sql.functions import udf
@udf("array<double>")
def differencer(xs, ys):
if xs and ys:
return [float(x - y) for x, y in zip(xs, ys)]
df.withColumn("array3", differencer("array1", "array2")).show()
# +---------------+---------------+--------------------+
# | array1| array2| array3|
# +---------------+---------------+--------------------+
# |[5, 4, 2, 4, 3]|[4, 3, 1, 2, 1]|[1.0, 1.0, 1.0, 2...|
# +---------------+---------------+--------------------+