NVIDIA / spark-rapids

Spark RAPIDS plugin - accelerate Apache Spark with GPUs
https://nvidia.github.io/spark-rapids
Apache License 2.0
823 stars 236 forks source link

[BUG] Spark UT framework: select one deep nested complex field after outer join, IOException parsing parquet #11629

Open Feng-Jiang28 opened 1 month ago

Feng-Jiang28 commented 1 month ago

contacts parquet is defined as following and has saved here: contacts.zip

This one has similarities with https://github.com/NVIDIA/spark-rapids/issues/11628

Reproduce:

val dataSourceName = "parquet" 
val path = "/home/fejiang/Desktop"
spark.conf.set("spark.sql.parquet.enableVectorizedReader", "true")
val schema = ("`id` INT,`name` STRUCT<`first`: STRING, `middle`: STRING, `last`: STRING>, " +
  "`address` STRING,`pets` INT,`friends` ARRAY<STRUCT<`first`: STRING, `middle`: STRING, " +
  "`last`: STRING>>,`relatives` MAP<STRING, STRUCT<`first`: STRING, `middle`: STRING, " +
  "`last`: STRING>>,`employer` STRUCT<`id`: INT, `company`: STRUCT<`name`: STRING, " +
  "`address`: STRING>>,`relations` MAP<STRUCT<`first`: STRING, `middle`: STRING, " +
  "`last`: STRING>,STRING>,`p` INT")
spark.read.format(dataSourceName).schema(schema).load(path + "/contacts").createOrReplaceTempView("contacts")

val departmentSchema = "`depId` INT,`depName` STRING,`contactId` INT,`employer` STRUCT<`id`: INT, `company`: STRUCT<`name`: STRING, `address`: STRING>>"
spark.read.format(dataSourceName).schema(departmentSchema).load(path + "/departments")
     .createOrReplaceTempView("departments")  
val query = spark.sql("select departments.contactId, contacts.name.middle from departments left outer join contacts on departments.contactId = contacts.id")
query.show()

CPU:

scala> val dataSourceName = "parquet" 
dataSourceName: String = parquet

scala> val path = "/home/fejiang/Desktop"
path: String = /home/fejiang/Desktop

scala> spark.conf.set("spark.sql.parquet.enableVectorizedReader", "true")

scala> val schema = ("`id` INT,`name` STRUCT<`first`: STRING, `middle`: STRING, `last`: STRING>, " +
     |   "`address` STRING,`pets` INT,`friends` ARRAY<STRUCT<`first`: STRING, `middle`: STRING, " +
     |   "`last`: STRING>>,`relatives` MAP<STRING, STRUCT<`first`: STRING, `middle`: STRING, " +
     |   "`last`: STRING>>,`employer` STRUCT<`id`: INT, `company`: STRUCT<`name`: STRING, " +
     |   "`address`: STRING>>,`relations` MAP<STRUCT<`first`: STRING, `middle`: STRING, " +
     |   "`last`: STRING>,STRING>,`p` INT")
schema: String = `id` INT,`name` STRUCT<`first`: STRING, `middle`: STRING, `last`: STRING>, `address` STRING,`pets` INT,`friends` ARRAY<STRUCT<`first`: STRING, `middle`: STRING, `last`: STRING>>,`relatives` MAP<STRING, STRUCT<`first`: STRING, `middle`: STRING, `last`: STRING>>,`employer` STRUCT<`id`: INT, `company`: STRUCT<`name`: STRING, `address`: STRING>>,`relations` MAP<STRUCT<`first`: STRING, `middle`: STRING, `last`: STRING>,STRING>,`p` INT

scala> spark.read.format(dataSourceName).schema(schema).load(path + "/contacts").createOrReplaceTempView("contacts")

scala> 

scala> val departmentSchema = "`depId` INT,`depName` STRING,`contactId` INT,`employer` STRUCT<`id`: INT, `company`: STRUCT<`name`: STRING, `address`: STRING>>"
departmentSchema: String = `depId` INT,`depName` STRING,`contactId` INT,`employer` STRUCT<`id`: INT, `company`: STRUCT<`name`: STRING, `address`: STRING>>

scala> spark.read.format(dataSourceName).schema(departmentSchema).load(path + "/departments")
res21: org.apache.spark.sql.DataFrame = [depId: int, depName: string ... 2 more fields]

scala>      .createOrReplaceTempView("departments")  

scala> val query = spark.sql("select departments.contactId, contacts.name.middle from departments left outer join contacts on departments.contactId = contacts.id")
query: org.apache.spark.sql.DataFrame = [contactId: int, middle: string]

scala> query.show()
+---------+------+
|contactId|middle|
+---------+------+
|        0|    X.|
|        1|    Y.|
|        4|  null|
+---------+------+

GPU:

scala> val dataSourceName = "parquet" 
dataSourceName: String = parquet

scala> val path = "/home/fejiang/Desktop"
path: String = /home/fejiang/Desktop

scala> spark.conf.set("spark.sql.parquet.enableVectorizedReader", "true")

scala> val schema = ("`id` INT,`name` STRUCT<`first`: STRING, `middle`: STRING, `last`: STRING>, " +
     |   "`address` STRING,`pets` INT,`friends` ARRAY<STRUCT<`first`: STRING, `middle`: STRING, " +
     |   "`last`: STRING>>,`relatives` MAP<STRING, STRUCT<`first`: STRING, `middle`: STRING, " +
     |   "`last`: STRING>>,`employer` STRUCT<`id`: INT, `company`: STRUCT<`name`: STRING, " +
     |   "`address`: STRING>>,`relations` MAP<STRUCT<`first`: STRING, `middle`: STRING, " +
     |   "`last`: STRING>,STRING>,`p` INT")
schema: String = `id` INT,`name` STRUCT<`first`: STRING, `middle`: STRING, `last`: STRING>, `address` STRING,`pets` INT,`friends` ARRAY<STRUCT<`first`: STRING, `middle`: STRING, `last`: STRING>>,`relatives` MAP<STRING, STRUCT<`first`: STRING, `middle`: STRING, `last`: STRING>>,`employer` STRUCT<`id`: INT, `company`: STRUCT<`name`: STRING, `address`: STRING>>,`relations` MAP<STRUCT<`first`: STRING, `middle`: STRING, `last`: STRING>,STRING>,`p` INT

scala> spark.read.format(dataSourceName).schema(schema).load(path + "/contacts").createOrReplaceTempView("contacts")

scala> 

scala> val departmentSchema = "`depId` INT,`depName` STRING,`contactId` INT,`employer` STRUCT<`id`: INT, `company`: STRUCT<`name`: STRING, `address`: STRING>>"
departmentSchema: String = `depId` INT,`depName` STRING,`contactId` INT,`employer` STRUCT<`id`: INT, `company`: STRUCT<`name`: STRING, `address`: STRING>>

scala> spark.read.format(dataSourceName).schema(departmentSchema).load(path + "/departments")
res7: org.apache.spark.sql.DataFrame = [depId: int, depName: string ... 2 more fields]

scala>      .createOrReplaceTempView("departments")  

scala> val query = spark.sql("select departments.contactId, contacts.name.middle from departments left outer join contacts on departments.contactId = contacts.id")
query: org.apache.spark.sql.DataFrame = [contactId: int, middle: string]

scala> query.show()
24/10/18 17:53:22 WARN GpuOverrides: 
!Exec <CollectLimitExec> cannot run on GPU because the Exec CollectLimitExec has been disabled, and is disabled by default because Collect Limit replacement can be slower on the GPU, if huge number of rows in a batch it could help by limiting the number of rows transferred from GPU to CPU. Set spark.rapids.sql.exec.CollectLimitExec to true if you wish to enable it
  @Partitioning <SinglePartition$> could run on GPU
  *Exec <ProjectExec> will run on GPU
    *Expression <Alias> cast(contactId#63 as string) AS contactId#74 will run on GPU
      *Expression <Cast> cast(contactId#63 as string) will run on GPU
    *Expression <Alias> _extract_middle#78 AS middle#75 will run on GPU
    *Exec <BroadcastHashJoinExec> will run on GPU
      *Exec <LocalLimitExec> will run on GPU
        *Exec <FileSourceScanExec> will run on GPU
      *Exec <BroadcastExchangeExec> will run on GPU
        *Exec <ProjectExec> will run on GPU
          *Expression <Alias> name#44.middle AS _extract_middle#78 will run on GPU
            *Expression <GetStructField> name#44.middle will run on GPU
          *Exec <FilterExec> will run on GPU
            *Expression <IsNotNull> isnotnull(id#43) will run on GPU
            *Exec <FileSourceScanExec> will run on GPU

24/10/18 17:53:22 WARN GpuOverrides: 
!Exec <CollectLimitExec> cannot run on GPU because the Exec CollectLimitExec has been disabled, and is disabled by default because Collect Limit replacement can be slower on the GPU, if huge number of rows in a batch it could help by limiting the number of rows transferred from GPU to CPU. Set spark.rapids.sql.exec.CollectLimitExec to true if you wish to enable it
  @Partitioning <SinglePartition$> could run on GPU
  *Exec <ProjectExec> will run on GPU
    *Expression <Alias> cast(contactId#63 as string) AS contactId#74 will run on GPU
      *Expression <Cast> cast(contactId#63 as string) will run on GPU
    *Expression <Alias> _extract_middle#78 AS middle#75 will run on GPU
    *Exec <BroadcastHashJoinExec> will run on GPU
      *Exec <LocalLimitExec> will run on GPU
        *Exec <FileSourceScanExec> will run on GPU
      *Exec <BroadcastExchangeExec> will run on GPU
        *Exec <ProjectExec> will run on GPU
          *Expression <Alias> name#44.middle AS _extract_middle#78 will run on GPU
            *Expression <GetStructField> name#44.middle will run on GPU
          *Exec <FilterExec> will run on GPU
            *Expression <IsNotNull> isnotnull(id#43) will run on GPU
            *Exec <FileSourceScanExec> will run on GPU

24/10/18 17:53:22 WARN GpuOverrides: 
!Exec <CollectLimitExec> cannot run on GPU because the Exec CollectLimitExec has been disabled, and is disabled by default because Collect Limit replacement can be slower on the GPU, if huge number of rows in a batch it could help by limiting the number of rows transferred from GPU to CPU. Set spark.rapids.sql.exec.CollectLimitExec to true if you wish to enable it
  @Partitioning <SinglePartition$> could run on GPU
  *Exec <ProjectExec> will run on GPU
    *Expression <Alias> cast(contactId#63 as string) AS contactId#74 will run on GPU
      *Expression <Cast> cast(contactId#63 as string) will run on GPU
    *Expression <Alias> _extract_middle#78 AS middle#75 will run on GPU
    *Exec <BroadcastHashJoinExec> will run on GPU
      *Exec <LocalLimitExec> will run on GPU
        *Exec <FileSourceScanExec> will run on GPU
      *Exec <BroadcastExchangeExec> will run on GPU
        *Exec <ProjectExec> will run on GPU
          *Expression <Alias> name#44.middle AS _extract_middle#78 will run on GPU
            *Expression <GetStructField> name#44.middle will run on GPU
          *Exec <FilterExec> will run on GPU
            *Expression <IsNotNull> isnotnull(id#43) will run on GPU
            *Exec <FileSourceScanExec> will run on GPU

24/10/18 17:53:22 WARN GpuOverrides: 
*Exec <BroadcastExchangeExec> will run on GPU
  *Exec <ProjectExec> will run on GPU
    *Expression <Alias> name#44.middle AS _extract_middle#78 will run on GPU
      *Expression <GetStructField> name#44.middle will run on GPU
    *Exec <FilterExec> will run on GPU
      *Expression <IsNotNull> isnotnull(id#43) will run on GPU
      *Exec <FileSourceScanExec> will run on GPU

24/10/18 17:53:22 ERROR Executor: Exception in task 2.0 in stage 3.0 (TID 8)
java.io.IOException: Error when processing path: file:///home/fejiang/Desktop/contacts/p=2/part-00000-000fbc57-9d4a-4d07-a5fe-1c8c0815d1f8-c000.snappy.parquet, range: 0-991, partition values: [empty row]
    at com.nvidia.spark.rapids.ParquetTableReader.$anonfun$next$1(GpuParquetScan.scala:2709)
    at com.nvidia.spark.rapids.Arm$.withResource(Arm.scala:30)
    at com.nvidia.spark.rapids.ParquetTableReader.next(GpuParquetScan.scala:2696)
    at com.nvidia.spark.rapids.ParquetTableReader.next(GpuParquetScan.scala:2668)
    at com.nvidia.spark.rapids.CachedGpuBatchIterator$.$anonfun$apply$1(GpuDataProducer.scala:159)
    at com.nvidia.spark.rapids.Arm$.withResource(Arm.scala:30)
    at com.nvidia.spark.rapids.CachedGpuBatchIterator$.apply(GpuDataProducer.scala:156)
    at com.nvidia.spark.rapids.MultiFileCoalescingPartitionReaderBase.$anonfun$readBatch$4(GpuMultiFileReader.scala:1066)
    at com.nvidia.spark.rapids.RmmRapidsRetryIterator$AutoCloseableAttemptSpliterator.next(RmmRapidsRetryIterator.scala:477)
    at com.nvidia.spark.rapids.RmmRapidsRetryIterator$RmmRapidsRetryIterator.next(RmmRapidsRetryIterator.scala:613)
    at com.nvidia.spark.rapids.RmmRapidsRetryIterator$RmmRapidsRetryAutoCloseableIterator.next(RmmRapidsRetryIterator.scala:517)
    at com.nvidia.spark.rapids.RmmRapidsRetryIterator$.drainSingleWithVerification(RmmRapidsRetryIterator.scala:291)
    at com.nvidia.spark.rapids.RmmRapidsRetryIterator$.withRetryNoSplit(RmmRapidsRetryIterator.scala:132)