У меня есть код ниже:
import java.io.File
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession
import org.apache.spark.{SparkConf, SparkContext}
object RDFBenchVerticalPartionedTables {
def main(args: Array[String]): Unit = {
println("Start of programm .... ")
val conf = new SparkConf().setMaster("local").setAppName("SQLSPARK")
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
val sc = new SparkContext(conf)
sc.setLogLevel("ERROR")
println("Conf and SC declared... ")
val spark = SparkSession
.builder()
.master("local[*]")
.appName("SparkConversionSingleTable")
.getOrCreate()
println("SparkSession declared... ")
println("Before Agrs..... ")
val filePathCSV=args(0)
val filePathAVRO=args(1)
val filePathORC=args(2)
val filePathParquet=args(3)
println("After Agrs..... ")
val csvFiles = new File(filePathCSV).list()
println("After List of Files Agrs..... " + csvFiles.length )
println("Before the foreach ... ")
csvFiles.foreach{verticalTableName=>
println("inside the foreach ... ")
val verticalTableName2=verticalTableName.dropRight(4)
val RDFVerticalTableDF = spark.read.format("csv").option("header", "true").option("inferSchema", "true").load(filePathCSV+"/"+verticalTableName).toDF()
RDFVerticalTableDF.write.format("com.databricks.spark.avro").save(filePathAVRO+"/"+verticalTableName2+".avro")
RDFVerticalTableDF.write.parquet(filePathParquet+"/"+verticalTableName2+".parquet")
RDFVerticalTableDF.write.orc(filePathORC+"/"+verticalTableName2+".orc")
println("Vertical Table: '" +verticalTableName2+"' Has been Successfully Converted to AVRO, PARQUET and ORC !")
}
}
}
этот класс преобразует список csv-файлов в директории, заданный в аргументах (0), и сохраняет различные форматы (avro, orc и parquet) в трех каталогахзадано также как args (1) args (2) и args (3).
Я пытался отправить это задание, используя spark-submit в Windows, оно работает, но при запуске того же задания в Ubuntu не удается выполнить сэта ошибка:
ubuntu@ragab:~$ spark-submit --class RDFBenchVerticalPartionedTables --master local[*] /home/ubuntu/testjar/rdfschemaconversion_2.11-0.1.jar "/data/RDFBench4/VerticalPartionnedTables/VerticalPartitionedTables100" "/data/RDFBench3/ConvertedData/SP2Bench100/AVRO/VerticalTables" "/data/RDFBench3/ConvertedData/SP2Bench100/ORC/VerticalTables" "/data/RDFBench3/ConvertedData/SP2Bench100/Parquet"
19/05/04 18:10:06 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
Start of programm ....
Using Spark's default log4j profile: org/apache/spark/log4j-defaults.properties
Conf and SC declared...
SparkSession declared...
Before Agrs.....
After Agrs.....
After List of Files Agrs..... 25
Before the foreach ...
Exception in thread "main" java.lang.NoSuchMethodError: scala.Predef$.refArrayOps([Ljava/lang/Object;)Lscala/collection/mutable/ArrayOps;
at RDFBenchVerticalPartionedTables$.main(RDFBenchVerticalPartionedTables.scala:45)
at RDFBenchVerticalPartionedTables.main(RDFBenchVerticalPartionedTables.scala)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)
at org.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:849)
at org.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:167)
at org.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:195)
at org.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:86)
at org.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:924)
at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:933)
at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
это мой файл sbt:
name := "RDFSchemaConversion"
version := "0.1"
scalaVersion := "2.11.12"
mainClass in (Compile, run) := Some("RDFBenchVerticalPartionedTables")
mainClass in (Compile, packageBin) := Some("RDFBenchVerticalPartionedTables")
libraryDependencies += "org.apache.spark" %% "spark-core" % "2.3.0"
libraryDependencies += "org.apache.spark" %% "spark-sql" % "2.3.0"
libraryDependencies += "com.typesafe" % "config" % "1.3.1"
libraryDependencies += "com.databricks" %% "spark-avro" % "4.0.0"