Вот один из подходов: перебор схемы и генерация Hive SQL:
from pyspark.sql.types import StructType, StructField, StringType, LongType
schema = StructType([
StructField('name', StringType()),
StructField('age', StringType()),
StructField('value', LongType()),
StructField('date', StringType()),
StructField('subid', StringType())
])
hiveCols = ""
hivePartitionCols = ""
for idx, c in enumerate(schema):
# populate hive schema
if(idx < len(schema[:-2])):
hiveCols += "{0} {1}".format(c.name, c.dataType.simpleString())
if(idx < len(schema[:-2]) - 1):
hiveCols += ","
# populate hive partition
if(idx >= len(schema) - 2):
hivePartitionCols += "{0} {1}".format(c.name, c.dataType.simpleString())
if(idx < len(schema) - 1):
hivePartitionCols += ","
hiveCreateSql = "create table schema.final({0}) partitioned by ({1}) stored as parquet".format(hiveCols, hivePartitionCols)
# create table schema.final(name string,age string,value bigint) partitioned by (date string,subid string) stored as parquet
spark.sql(hiveCreateSql)