если еще в спринге передать условие, чтобы найти значение из CSV-файла - PullRequest
0 голосов
/ 27 сентября 2019

Я хочу прочитать CSV-файл в dfTRUEcsv

Как получить значение (03,05) и 11 в виде строки в приведенном ниже примере, например, я хочу передать эту строку в качестве параметра для получения файлов из этой папки

i will pass (03,05) and 11 as parameters 
if TRUE , for each Loop start Folder\03 ;
                              Folder\05 ;   


Folder\11

+-------------+--------------+--------------------+-----------------+--------+
|Calendar_year|Calendar_month|EDAP_Data_Load_Statu|lake_refined_date|isreload|
+-------------+--------------+--------------------+-----------------+--------+
|         2019|             2|                HIST|         20190829|   FALSE|
|         2019|             3|                HIST|         20190829|    TRUE|
|         2019|             4|                HIST|         20190829|   FALSE|
|         2019|             5|                HIST|         20190829|    TRUE|
|         2019|            11|                HIST|         20190829|   FALSE|
+-------------+--------------+--------------------+-----------------+--------+

       if the file has column isreload =='TRUE' 
                    var Foldercolumn Calendar_month 
                     Foldercolumn =     03
                     Foldercolumn =     05


      else
                 var Foldercolumn  max(Calendar_year ),max(Calendar_month )
                       Foldercolumn =     11

      end if

ниже мой искровой код для вышеуказанного требования

val destinationContainerPath= "Finance/Data"
val dfCSVLogs = readCSV(s"$destinationContainerPath/sourcecsv.csv")

val dfTRUEcsv = dfCSVLogs.select(dfCSVLogs.col("*")).filter("isreload =='TRUE'")

1 Ответ

0 голосов
/ 27 сентября 2019
//read input control CSV file 
    scala> val df = spark.read.format("csv").option("header", "true").load("file.csv")
    scala> df.show(false)
    +-------------+--------------+--------------------+-----------------+--------+
    |Calendar_year|Calendar_month|EDAP_Data_Load_Statu|lake_refined_date|isreload|
    +-------------+--------------+--------------------+-----------------+--------+
    |2018         |12            |HIST                |20190829         |FALSE   |
    |2019         |2             |HIST                |20190829         |FALSE   |
    |2019         |3             |HIST                |20190829         |TRUE    |
    |2019         |4             |HIST                |20190829         |FALSE   |
    |2019         |11            |HIST                |20190829         |FALSE   |
    |2019         |5             |HIST                |20190829         |TRUE    |
    +-------------+--------------+--------------------+-----------------+--------+
    //initialize variable for max year and month 
    //note: below execution cam be modified on the basis of your requirement simply use filter to get max of particular condition

    scala> val maxYearMonth =  df.select(struct(col("Calendar_year").cast("Int"), col("Calendar_month").cast("Int")) as "ym").agg(max("ym") as "max").selectExpr("stack(1,max.col1,max.col2) as (year, month)").select( concat(col("year"), lit("/") ,col("month"))).rdd.collect.map( r => r(0)).mkString
           res56: maxYearMonth = 2019/11

    //Adding column temparary in input DataFrame
    scala> val df2 = df.withColumn("strFoldercolumn", when(col("isreload") === "TRUE", concat(col("Calendar_year"), lit("/"),col("Calendar_month"))).otherwise(lit(maxYearMonth)))
    scala> df2.show(false)
    +-------------+--------------+--------------------+-----------------+--------+-----------+
    |Calendar_year|Calendar_month|EDAP_Data_Load_Statu|lake_refined_date|isreload|strFoldercolumn|
    +-------------+--------------+--------------------+-----------------+--------+-----------+
    |2018         |12            |HIST                |20190829         |FALSE   |2019/11    |
    |2019         |2             |HIST                |20190829         |FALSE   |2019/11    |
    |2019         |3             |HIST                |20190829         |TRUE    |2019/3     |
    |2019         |4             |HIST                |20190829         |FALSE   |2019/11    |
    |2019         |11            |HIST                |20190829         |FALSE   |2019/11    |
    |2019         |5             |HIST                |20190829         |TRUE    |2019/5     |
    +-------------+--------------+--------------------+-----------------+--------+-----------+


    //move value of column strFoldercolumn into strFoldercolumn list variable 
    scala> val strFoldercolumn = df2.select("strFoldercolumn").distinct.rdd.collect.toList
    strFoldercolumn: List[org.apache.spark.sql.Row] = List([2019/5], [2019/11], [2019/3])

    //lopping each value
    scala>strFoldercolumn.foreach { x =>
         | val csvPath =  "folder/" + x.toString + "/*.csv"
         | val srcdf = spark.read.format("csv").option("header", "true").load(csvPath)
         | // Write logic to copy or write srcdf to your destination folder
         | 
         | }
...