Есть ли способ сгладить вложенные JSON в потоковой искре? - PullRequest
0 голосов
/ 27 апреля 2020

Я написал код задания набора данных (Batch) для выравнивания данных, который работает нормально, но когда я попытался использовать тот же фрагмент кода в заданиях потоковой передачи с плавающей точкой, он выдает следующую ошибку Запросы с источниками потоковой передачи должны быть выполняется с помощью writeStream.start ();

Так есть ли способы выравнивания Nested JSON в потоковых заданиях? пример ввода Вложенный JSON - {"name": "Aka sh", "age": 26, "watches": {"name": "Apple", "models": ["Apple Watch Series 5", "Apple Watch Nike"]}, "phones": [{"name": "Apple", "models": ["iphone X", "iphone XR", "iphone XS", "iphone 11 "," iphone 11 Pro "]}, {" name ":" Samsung "," models ": [" Galaxy Note10 "," Galaxy Note10 + "," Galaxy S10e "," Galaxy S10 "," " Galaxy S10 + "]}, {" name ":" Google "," models ": [" Pixel 3 "," Pixel 3a "]}]}

Ожидаемый результат. вывод после сбоя

ниже приведен фрагмент кода.

private static org.apache.spark.sql.Dataset flattenJSONdf(
            org.apache.spark.sql.Dataset<org.apache.spark.sql.Row> ds) {
        org.apache.spark.sql.types.StructField[] fields = ds.schema().fields();
        java.util.List<String> fieldsNames = new java.util.ArrayList<>();
        for (org.apache.spark.sql.types.StructField s : fields) {
            fieldsNames.add(s.name());
        }

        for (int i = 0; i < fields.length; i++) {

            org.apache.spark.sql.types.StructField field = fields[i];
            org.apache.spark.sql.types.DataType fieldType = field.dataType();
            String fieldName = field.name();

            if (fieldType instanceof org.apache.spark.sql.types.ArrayType) {
                java.util.List<String> fieldNamesExcludingArray = new java.util.ArrayList<String>();
                for (String fieldName_index : fieldsNames) {
                    if (!fieldName.equals(fieldName_index))
                        fieldNamesExcludingArray.add(fieldName_index);
                }

                java.util.List<String> fieldNamesAndExplode = new java.util.ArrayList<>(
                        fieldNamesExcludingArray);
                String s = String.format("explode_outer(%s) as %s", fieldName,
                        fieldName);
                fieldNamesAndExplode.add(s);

                String[] exFieldsWithArray = new String[fieldNamesAndExplode
                        .size()];
                org.apache.spark.sql.Dataset exploded_ds = ds
                        .selectExpr(fieldNamesAndExplode
                                .toArray(exFieldsWithArray));

                // explodedDf.show();

                return flattenJSONdf(exploded_ds);

            } else if (fieldType instanceof org.apache.spark.sql.types.StructType) {

                String[] childFieldnames_struct = ((org.apache.spark.sql.types.StructType) fieldType)
                        .fieldNames();

                java.util.List<String> childFieldnames = new java.util.ArrayList<>();
                for (String childName : childFieldnames_struct) {
                    childFieldnames.add(fieldName + "." + childName);
                }

                java.util.List<String> newfieldNames = new java.util.ArrayList<>();
                for (String fieldName_index : fieldsNames) {
                    if (!fieldName.equals(fieldName_index))
                        newfieldNames.add(fieldName_index);
                }

                newfieldNames.addAll(childFieldnames);

                java.util.List<org.apache.spark.sql.Column> renamedStrutctCols = new java.util.ArrayList<>();

                for (String newFieldNames_index : newfieldNames) {
                    renamedStrutctCols.add(new org.apache.spark.sql.Column(
                            newFieldNames_index.toString())
                            .as(newFieldNames_index.toString()
                                    .replace(".", "_")));
                }

                scala.collection.Seq renamedStructCols_seq = scala.collection.JavaConverters
                        .collectionAsScalaIterableConverter(renamedStrutctCols)
                        .asScala().toSeq();

                org.apache.spark.sql.Dataset ds_struct = ds
                        .select(renamedStructCols_seq);

                return flattenJSONdf(ds_struct);
            }

        }
        return ds;
    }

1 Ответ

0 голосов
/ 28 апреля 2020

Note код в scala и я использовал Spark Structured Streaming.

Вы можете использовать функцию org.apache.spark.sql.functions.explode для выравнивания столбцов массива. Пожалуйста, проверьте код ниже.


scala> import org.apache.spark.sql.types._
import org.apache.spark.sql.types._

scala>  val schema = DataType.fromJson("""{"type":"struct","fields":[{"name":"age","type":"long","nullable":true,"metadata":{}},{"name":"name","type":"string","nullable":true,"metadata":{}},{"name":"phones","type":{"type":"array","elementType":{"type":"struct","fields":[{"name":"models","type":{"type":"array","elementType":"string","containsNull":true},"nullable":true,"metadata":{}},{"name":"name","type":"string","nullable":true,"metadata":{}}]},"containsNull":true},"nullable":true,"metadata":{}},{"name":"watches","type":{"type":"struct","fields":[{"name":"models","type":{"type":"array","elementType":"string","containsNull":true},"nullable":true,"metadata":{}},{"name":"name","type":"string","nullable":true,"metadata":{}}]},"nullable":true,"metadata":{}}]}""").asInstanceOf[StructType]
schema: org.apache.spark.sql.types.StructType = StructType(StructField(age,LongType,true), StructField(name,StringType,true), StructField(phones,ArrayType(StructType(StructField(models,ArrayType(StringType,true),true), StructField(name,StringType,true)),true),true), StructField(watches,StructType(StructField(models,ArrayType(StringType,true),true), StructField(name,StringType,true)),true))

scala> val streamDF = spark.readStream.format("json").schema(schema).load("/tmp/jdata")
streamDF: org.apache.spark.sql.DataFrame = [age: bigint, name: string ... 2 more fields]

scala> :paste
// Entering paste mode (ctrl-D to finish)

streamDF
.withColumn("watches_models",explode($"watches.models")).withColumn("watches_name",$"watches.name")
.withColumn("phones_models",explode($"phones.models")).withColumn("phones_models",explode($"phones_models"))
.withColumn("phones_name",explode($"phones.name"))
.drop("watches","phones")
.writeStream
.format("console")
.outputMode("append")
.start()
.awaitTermination()

// Exiting paste mode, now interpreting.

-------------------------------------------
Batch: 0
-------------------------------------------
+---+------+--------------------+------------+--------------+-----------+
|age|  name|      watches_models|watches_name| phones_models|phones_name|
+---+------+--------------------+------------+--------------+-----------+
| 26| Akash|Apple Watch Series 5|       Apple|      iphone X|      Apple|
| 26| Akash|Apple Watch Series 5|       Apple|      iphone X|    Samsung|
| 26| Akash|Apple Watch Series 5|       Apple|      iphone X|     Google|
| 26| Akash|Apple Watch Series 5|       Apple|     iphone XR|      Apple|
| 26| Akash|Apple Watch Series 5|       Apple|     iphone XR|    Samsung|
| 26| Akash|Apple Watch Series 5|       Apple|     iphone XR|     Google|
| 26| Akash|Apple Watch Series 5|       Apple|     iphone XS|      Apple|
| 26| Akash|Apple Watch Series 5|       Apple|     iphone XS|    Samsung|
| 26| Akash|Apple Watch Series 5|       Apple|     iphone XS|     Google|
| 26| Akash|Apple Watch Series 5|       Apple|     iphone 11|      Apple|
| 26| Akash|Apple Watch Series 5|       Apple|     iphone 11|    Samsung|
| 26| Akash|Apple Watch Series 5|       Apple|     iphone 11|     Google|
| 26| Akash|Apple Watch Series 5|       Apple| iphone 11 Pro|      Apple|
| 26| Akash|Apple Watch Series 5|       Apple| iphone 11 Pro|    Samsung|
| 26| Akash|Apple Watch Series 5|       Apple| iphone 11 Pro|     Google|
| 26| Akash|Apple Watch Series 5|       Apple| Galaxy Note10|      Apple|
| 26| Akash|Apple Watch Series 5|       Apple| Galaxy Note10|    Samsung|
| 26| Akash|Apple Watch Series 5|       Apple| Galaxy Note10|     Google|
| 26| Akash|Apple Watch Series 5|       Apple|Galaxy Note10+|      Apple|
| 26| Akash|Apple Watch Series 5|       Apple|Galaxy Note10+|    Samsung|
+---+------+--------------------+------------+--------------+-----------+
only showing top 20 rows

...