diff --git a/sql-plugin/src/main/scala/org/apache/spark/sql/catalyst/json/rapids/GpuJsonScan.scala b/sql-plugin/src/main/scala/org/apache/spark/sql/catalyst/json/rapids/GpuJsonScan.scala index dbd23c31a78..61ee9ca27fa 100644 --- a/sql-plugin/src/main/scala/org/apache/spark/sql/catalyst/json/rapids/GpuJsonScan.scala +++ b/sql-plugin/src/main/scala/org/apache/spark/sql/catalyst/json/rapids/GpuJsonScan.scala @@ -325,7 +325,7 @@ object JsonPartitionReader { withResource(new NvtxWithMetrics(formatName + " decode", NvtxColor.DARK_GREEN, decodeTime)) { _ => try { - Table.readJSON(cudfSchema, jsonOpts, dataBuffer, 0, dataSize, dataBufferer.getNumLines) + Table.readJSON(cudfSchema, jsonOpts, dataBuffer, 0, dataSize) } catch { case e: AssertionError if e.getMessage == "CudfColumns can't be null or empty" => // this happens when every row in a JSON file is invalid (or we are diff --git a/sql-plugin/src/main/scala/org/apache/spark/sql/rapids/GpuJsonToStructs.scala b/sql-plugin/src/main/scala/org/apache/spark/sql/rapids/GpuJsonToStructs.scala index a62aba24760..4138d56715d 100644 --- a/sql-plugin/src/main/scala/org/apache/spark/sql/rapids/GpuJsonToStructs.scala +++ b/sql-plugin/src/main/scala/org/apache/spark/sql/rapids/GpuJsonToStructs.scala @@ -103,9 +103,7 @@ case class GpuJsonToStructs( // Step 3: Have cudf parse the JSON data try { cudf.Table.readJSON(cudfSchema, - jsonOptionBuilder.withLineDelimiter(concatenated.delimiter).build(), - ds, - numRows) + jsonOptionBuilder.withLineDelimiter(concatenated.delimiter).build(), ds) } catch { case e: RuntimeException => throw new JsonParsingException("Currently some JsonToStructs cases " +