&#010;Exception thrown while executing query:&#010;== Parsed Logical Plan ==&#010;Sort [index#164782 ASC], true&#010;+- Relation[index#164782,col#164783] ParquetRelation&#010;&#010;== Analyzed Logical Plan ==&#010;index: int, col: tinyint&#010;Sort [index#164782 ASC], true&#010;+- Relation[index#164782,col#164783] ParquetRelation&#010;&#010;== Optimized Logical Plan ==&#010;Sort [index#164782 ASC], true&#010;+- Relation[index#164782,col#164783] ParquetRelation&#010;&#010;== Physical Plan ==&#010;Sort [index#164782 ASC], true, 0&#010;+- Exchange rangepartitioning(index#164782 ASC, 2), None&#010; +- WholeStageCodegen&#010; : +- Scan ParquetRelation[index#164782,col#164783] InputPaths: file:/home/jenkins/workspace/spark-master-test-maven-hadoop-2.7/sql/hive/target/tmp/spark-27cf3a35-c998-410c-8b14-077eecc80a43&#010;== Exception ==&#010;org.apache.spark.sql.catalyst.errors.package$TreeNodeException: execute, tree:&#010;Exchange rangepartitioning(index#164782 ASC, 2), None&#010;+- WholeStageCodegen&#010; : +- Scan ParquetRelation[index#164782,col#164783] InputPaths: file:/home/jenkins/workspace/spark-master-test-maven-hadoop-2.7/sql/hive/target/tmp/spark-27cf3a35-c998-410c-8b14-077eecc80a43&#010;&#010;org.apache.spark.sql.catalyst.errors.package$TreeNodeException: execute, tree:&#010;Exchange rangepartitioning(index#164782 ASC, 2), None&#010;+- WholeStageCodegen&#010; : +- Scan ParquetRelation[index#164782,col#164783] InputPaths: file:/home/jenkins/workspace/spark-master-test-maven-hadoop-2.7/sql/hive/target/tmp/spark-27cf3a35-c998-410c-8b14-077eecc80a43&#010;&#010; at org.apache.spark.sql.catalyst.errors.package$.attachTree(package.scala:47)&#010; at org.apache.spark.sql.execution.exchange.ShuffleExchange.doExecute(ShuffleExchange.scala:106)&#010; at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:116)&#010; at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:116)&#010; at org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:135)&#010; at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150)&#010; at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:132)&#010; at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:115)&#010; at org.apache.spark.sql.execution.Sort.doExecute(Sort.scala:60)&#010; at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:116)&#010; at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:116)&#010; at org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:135)&#010; at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150)&#010; at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:132)&#010; at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:115)&#010; at org.apache.spark.sql.execution.SparkPlan.executeCollect(SparkPlan.scala:223)&#010; at org.apache.spark.sql.execution.SparkPlan.executeCollectPublic(SparkPlan.scala:231)&#010; at org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$execute$1$1.apply(DataFrame.scala:1499)&#010; at org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$execute$1$1.apply(DataFrame.scala:1499)&#010; at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:53)&#010; at org.apache.spark.sql.DataFrame.withNewExecutionId(DataFrame.scala:1748)&#010; at org.apache.spark.sql.DataFrame.org$apache$spark$sql$DataFrame$$execute$1(DataFrame.scala:1498)&#010; at org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$collect$1.apply(DataFrame.scala:1503)&#010; at org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$collect$1.apply(DataFrame.scala:1503)&#010; at org.apache.spark.sql.DataFrame.withCallback(DataFrame.scala:1761)&#010; at org.apache.spark.sql.DataFrame.org$apache$spark$sql$DataFrame$$collect(DataFrame.scala:1503)&#010; at org.apache.spark.sql.DataFrame.collect(DataFrame.scala:1480)&#010; at org.apache.spark.sql.QueryTest$.checkAnswer(QueryTest.scala:315)&#010; at org.apache.spark.sql.QueryTest.checkAnswer(QueryTest.scala:143)&#010; at org.apache.spark.sql.QueryTest.checkAnswer(QueryTest.scala:154)&#010; at org.apache.spark.sql.sources.HadoopFsRelationTest$$anonfun$32$$anonfun$apply$1$$anonfun$apply$mcV$sp$19.apply(hadoopFsRelationSuites.scala:154)&#010; at org.apache.spark.sql.sources.HadoopFsRelationTest$$anonfun$32$$anonfun$apply$1$$anonfun$apply$mcV$sp$19.apply(hadoopFsRelationSuites.scala:121)&#010; at org.apache.spark.sql.test.SQLTestUtils$class.withTempPath(SQLTestUtils.scala:125)&#010; at org.apache.spark.sql.sources.HadoopFsRelationTest.withTempPath(hadoopFsRelationSuites.scala:38)&#010; at org.apache.spark.sql.sources.HadoopFsRelationTest$$anonfun$32$$anonfun$apply$1.apply$mcV$sp(hadoopFsRelationSuites.scala:121)&#010; at org.apache.spark.sql.sources.HadoopFsRelationTest$$anonfun$32$$anonfun$apply$1.apply(hadoopFsRelationSuites.scala:121)&#010; at org.apache.spark.sql.sources.HadoopFsRelationTest$$anonfun$32$$anonfun$apply$1.apply(hadoopFsRelationSuites.scala:121)&#010; at org.scalatest.Transformer$$anonfun$apply$1.apply$mcV$sp(Transformer.scala:22)&#010; at org.scalatest.OutcomeOf$class.outcomeOf(OutcomeOf.scala:85)&#010; at org.scalatest.OutcomeOf$.outcomeOf(OutcomeOf.scala:104)&#010; at org.scalatest.Transformer.apply(Transformer.scala:22)&#010; at org.scalatest.Transformer.apply(Transformer.scala:20)&#010; at org.scalatest.FunSuiteLike$$anon$1.apply(FunSuiteLike.scala:166)&#010; at org.apache.spark.SparkFunSuite.withFixture(SparkFunSuite.scala:54)&#010; at org.scalatest.FunSuiteLike$class.invokeWithFixture$1(FunSuiteLike.scala:163)&#010; at org.scalatest.FunSuiteLike$$anonfun$runTest$1.apply(FunSuiteLike.scala:175)&#010; at org.scalatest.FunSuiteLike$$anonfun$runTest$1.apply(FunSuiteLike.scala:175)&#010; at org.scalatest.SuperEngine.runTestImpl(Engine.scala:306)&#010; at org.scalatest.FunSuiteLike$class.runTest(FunSuiteLike.scala:175)&#010; at org.scalatest.FunSuite.runTest(FunSuite.scala:1555)&#010; at org.scalatest.FunSuiteLike$$anonfun$runTests$1.apply(FunSuiteLike.scala:208)&#010; at org.scalatest.FunSuiteLike$$anonfun$runTests$1.apply(FunSuiteLike.scala:208)&#010; at org.scalatest.SuperEngine$$anonfun$traverseSubNodes$1$1.apply(Engine.scala:413)&#010; at org.scalatest.SuperEngine$$anonfun$traverseSubNodes$1$1.apply(Engine.scala:401)&#010; at scala.collection.immutable.List.foreach(List.scala:381)&#010; at org.scalatest.SuperEngine.traverseSubNodes$1(Engine.scala:401)&#010; at org.scalatest.SuperEngine.org$scalatest$SuperEngine$$runTestsInBranch(Engine.scala:396)&#010; at org.scalatest.SuperEngine.runTestsImpl(Engine.scala:483)&#010; at org.scalatest.FunSuiteLike$class.runTests(FunSuiteLike.scala:208)&#010; at org.scalatest.FunSuite.runTests(FunSuite.scala:1555)&#010; at org.scalatest.Suite$class.run(Suite.scala:1424)&#010; at org.scalatest.FunSuite.org$scalatest$FunSuiteLike$$super$run(FunSuite.scala:1555)&#010; at org.scalatest.FunSuiteLike$$anonfun$run$1.apply(FunSuiteLike.scala:212)&#010; at org.scalatest.FunSuiteLike$$anonfun$run$1.apply(FunSuiteLike.scala:212)&#010; at org.scalatest.SuperEngine.runImpl(Engine.scala:545)&#010; at org.scalatest.FunSuiteLike$class.run(FunSuiteLike.scala:212)&#010; at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterAll$$super$run(SparkFunSuite.scala:26)&#010; at org.scalatest.BeforeAndAfterAll$class.liftedTree1$1(BeforeAndAfterAll.scala:257)&#010; at org.scalatest.BeforeAndAfterAll$class.run(BeforeAndAfterAll.scala:256)&#010; at org.apache.spark.SparkFunSuite.run(SparkFunSuite.scala:26)&#010; at org.scalatest.Suite$class.callExecuteOnSuite$1(Suite.scala:1492)&#010; at org.scalatest.Suite$$anonfun$runNestedSuites$1.apply(Suite.scala:1528)&#010; at org.scalatest.Suite$$anonfun$runNestedSuites$1.apply(Suite.scala:1526)&#010; at scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33)&#010; at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186)&#010; at org.scalatest.Suite$class.runNestedSuites(Suite.scala:1526)&#010; at org.scalatest.tools.DiscoverySuite.runNestedSuites(DiscoverySuite.scala:29)&#010; at org.scalatest.Suite$class.run(Suite.scala:1421)&#010; at org.scalatest.tools.DiscoverySuite.run(DiscoverySuite.scala:29)&#010; at org.scalatest.tools.SuiteRunner.run(SuiteRunner.scala:55)&#010; at org.scalatest.tools.Runner$$anonfun$doRunRunRunDaDoRunRun$3.apply(Runner.scala:2563)&#010; at org.scalatest.tools.Runner$$anonfun$doRunRunRunDaDoRunRun$3.apply(Runner.scala:2557)&#010; at scala.collection.immutable.List.foreach(List.scala:381)&#010; at org.scalatest.tools.Runner$.doRunRunRunDaDoRunRun(Runner.scala:2557)&#010; at org.scalatest.tools.Runner$$anonfun$runOptionallyWithPassFailReporter$2.apply(Runner.scala:1044)&#010; at org.scalatest.tools.Runner$$anonfun$runOptionallyWithPassFailReporter$2.apply(Runner.scala:1043)&#010; at org.scalatest.tools.Runner$.withClassLoaderAndDispatchReporter(Runner.scala:2722)&#010; at org.scalatest.tools.Runner$.runOptionallyWithPassFailReporter(Runner.scala:1043)&#010; at org.scalatest.tools.Runner$.main(Runner.scala:860)&#010; at org.scalatest.tools.Runner.main(Runner.scala)&#010;Caused by: org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 12684.0 failed 1 times, most recent failure: Lost task 0.0 in stage 12684.0 (TID 124627, localhost): java.lang.NullPointerException&#010; at org.apache.spark.sql.execution.vectorized.OnHeapColumnVector.putInt(OnHeapColumnVector.java:193)&#010; at org.apache.spark.sql.execution.datasources.parquet.VectorizedPlainValuesReader.readBytes(VectorizedPlainValuesReader.java:88)&#010; at org.apache.spark.sql.execution.datasources.parquet.VectorizedRleValuesReader.readBytes(VectorizedRleValuesReader.java:283)&#010; at org.apache.spark.sql.execution.datasources.parquet.UnsafeRowParquetRecordReader$ColumnReader.readIntBatch(UnsafeRowParquetRecordReader.java:763)&#010; at org.apache.spark.sql.execution.datasources.parquet.UnsafeRowParquetRecordReader$ColumnReader.readBatch(UnsafeRowParquetRecordReader.java:640)&#010; at org.apache.spark.sql.execution.datasources.parquet.UnsafeRowParquetRecordReader$ColumnReader.access$000(UnsafeRowParquetRecordReader.java:461)&#010; at org.apache.spark.sql.execution.datasources.parquet.UnsafeRowParquetRecordReader.nextBatch(UnsafeRowParquetRecordReader.java:224)&#010; at org.apache.spark.sql.execution.datasources.parquet.UnsafeRowParquetRecordReader.nextKeyValue(UnsafeRowParquetRecordReader.java:174)&#010; at org.apache.spark.rdd.SqlNewHadoopRDD$$anon$1.hasNext(SqlNewHadoopRDD.scala:203)&#010; at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.processNext(generated.java:32)&#010; at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:40)&#010; at org.apache.spark.sql.execution.WholeStageCodegen$$anonfun$5$$anon$1.hasNext(WholeStageCodegen.scala:305)&#010; at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:369)&#010; at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:369)&#010; at org.apache.spark.util.random.SamplingUtils$.reservoirSampleAndCount(SamplingUtils.scala:41)&#010; at org.apache.spark.RangePartitioner$$anonfun$9.apply(Partitioner.scala:261)&#010; at org.apache.spark.RangePartitioner$$anonfun$9.apply(Partitioner.scala:259)&#010; at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsWithIndex$1$$anonfun$apply$24.apply(RDD.scala:755)&#010; at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsWithIndex$1$$anonfun$apply$24.apply(RDD.scala:755)&#010; at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)&#010; at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:313)&#010; at org.apache.spark.rdd.RDD.iterator(RDD.scala:277)&#010; at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:69)&#010; at org.apache.spark.scheduler.Task.run(Task.scala:82)&#010; at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:214)&#010; at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)&#010; at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)&#010; at java.lang.Thread.run(Thread.java:745)&#010;&#010;Driver stacktrace:&#010; at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1452)&#010; at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1440)&#010; at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1439)&#010; at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)&#010; at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)&#010; at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1439)&#010; at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:802)&#010; at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:802)&#010; at scala.Option.foreach(Option.scala:257)&#010; at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:802)&#010; at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1661)&#010; at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1620)&#010; at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1609)&#010; at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)&#010; at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:623)&#010; at org.apache.spark.SparkContext.runJob(SparkContext.scala:1773)&#010; at org.apache.spark.SparkContext.runJob(SparkContext.scala:1786)&#010; at org.apache.spark.SparkContext.runJob(SparkContext.scala:1799)&#010; at org.apache.spark.SparkContext.runJob(SparkContext.scala:1813)&#010; at org.apache.spark.rdd.RDD$$anonfun$collect$1.apply(RDD.scala:847)&#010; at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150)&#010; at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:111)&#010; at org.apache.spark.rdd.RDD.withScope(RDD.scala:323)&#010; at org.apache.spark.rdd.RDD.collect(RDD.scala:846)&#010; at org.apache.spark.RangePartitioner$.sketch(Partitioner.scala:264)&#010; at org.apache.spark.RangePartitioner.<init>(Partitioner.scala:126)&#010; at org.apache.spark.sql.execution.exchange.ShuffleExchange$.prepareShuffleDependency(ShuffleExchange.scala:211)&#010; at org.apache.spark.sql.execution.exchange.ShuffleExchange.prepareShuffleDependency(ShuffleExchange.scala:83)&#010; at org.apache.spark.sql.execution.exchange.ShuffleExchange$$anonfun$doExecute$1.apply(ShuffleExchange.scala:113)&#010; at org.apache.spark.sql.execution.exchange.ShuffleExchange$$anonfun$doExecute$1.apply(ShuffleExchange.scala:107)&#010; at org.apache.spark.sql.catalyst.errors.package$.attachTree(package.scala:46)&#010; ... 89 more&#010;Caused by: java.lang.NullPointerException&#010; at org.apache.spark.sql.execution.vectorized.OnHeapColumnVector.putInt(OnHeapColumnVector.java:193)&#010; at org.apache.spark.sql.execution.datasources.parquet.VectorizedPlainValuesReader.readBytes(VectorizedPlainValuesReader.java:88)&#010; at org.apache.spark.sql.execution.datasources.parquet.VectorizedRleValuesReader.readBytes(VectorizedRleValuesReader.java:283)&#010; at org.apache.spark.sql.execution.datasources.parquet.UnsafeRowParquetRecordReader$ColumnReader.readIntBatch(UnsafeRowParquetRecordReader.java:763)&#010; at org.apache.spark.sql.execution.datasources.parquet.UnsafeRowParquetRecordReader$ColumnReader.readBatch(UnsafeRowParquetRecordReader.java:640)&#010; at org.apache.spark.sql.execution.datasources.parquet.UnsafeRowParquetRecordReader$ColumnReader.access$000(UnsafeRowParquetRecordReader.java:461)&#010; at org.apache.spark.sql.execution.datasources.parquet.UnsafeRowParquetRecordReader.nextBatch(UnsafeRowParquetRecordReader.java:224)&#010; at org.apache.spark.sql.execution.datasources.parquet.UnsafeRowParquetRecordReader.nextKeyValue(UnsafeRowParquetRecordReader.java:174)&#010; at org.apache.spark.rdd.SqlNewHadoopRDD$$anon$1.hasNext(SqlNewHadoopRDD.scala:203)&#010; at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.processNext(generated.java:32)&#010; at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:40)&#010; at org.apache.spark.sql.execution.WholeStageCodegen$$anonfun$5$$anon$1.hasNext(WholeStageCodegen.scala:305)&#010; at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:369)&#010; at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:369)&#010; at org.apache.spark.util.random.SamplingUtils$.reservoirSampleAndCount(SamplingUtils.scala:41)&#010; at org.apache.spark.RangePartitioner$$anonfun$9.apply(Partitioner.scala:261)&#010; at org.apache.spark.RangePartitioner$$anonfun$9.apply(Partitioner.scala:259)&#010; at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsWithIndex$1$$anonfun$apply$24.apply(RDD.scala:755)&#010; at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsWithIndex$1$$anonfun$apply$24.apply(RDD.scala:755)&#010; at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)&#010; at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:313)&#010; at org.apache.spark.rdd.RDD.iterator(RDD.scala:277)&#010; at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:69)&#010; at org.apache.spark.scheduler.Task.run(Task.scala:82)&#010; at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:214)&#010; at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)&#010; at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)&#010; at java.lang.Thread.run(Thread.java:745)&#010;&#010;


      org.scalatest.exceptions.TestFailedException: 
Exception thrown while executing query:
== Parsed Logical Plan ==
Sort [index#164782 ASC], true
+- Relation[index#164782,col#164783] ParquetRelation

== Analyzed Logical Plan ==
index: int, col: tinyint
Sort [index#164782 ASC], true
+- Relation[index#164782,col#164783] ParquetRelation

== Optimized Logical Plan ==
Sort [index#164782 ASC], true
+- Relation[index#164782,col#164783] ParquetRelation

== Physical Plan ==
Sort [index#164782 ASC], true, 0
+- Exchange rangepartitioning(index#164782 ASC, 2), None
   +- WholeStageCodegen
      :  +- Scan ParquetRelation[index#164782,col#164783] InputPaths: file:/home/jenkins/workspace/spark-master-test-maven-hadoop-2.7/sql/hive/target/tmp/spark-27cf3a35-c998-410c-8b14-077eecc80a43
== Exception ==
org.apache.spark.sql.catalyst.errors.package$TreeNodeException: execute, tree:
Exchange rangepartitioning(index#164782 ASC, 2), None
+- WholeStageCodegen
   :  +- Scan ParquetRelation[index#164782,col#164783] InputPaths: file:/home/jenkins/workspace/spark-master-test-maven-hadoop-2.7/sql/hive/target/tmp/spark-27cf3a35-c998-410c-8b14-077eecc80a43

org.apache.spark.sql.catalyst.errors.package$TreeNodeException: execute, tree:
Exchange rangepartitioning(index#164782 ASC, 2), None
+- WholeStageCodegen
   :  +- Scan ParquetRelation[index#164782,col#164783] InputPaths: file:/home/jenkins/workspace/spark-master-test-maven-hadoop-2.7/sql/hive/target/tmp/spark-27cf3a35-c998-410c-8b14-077eecc80a43

	at org.apache.spark.sql.catalyst.errors.package$.attachTree(package.scala:47)
	at org.apache.spark.sql.execution.exchange.ShuffleExchange.doExecute(ShuffleExchange.scala:106)
	at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:116)
	at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:116)
	at org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:135)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150)
	at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:132)
	at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:115)
	at org.apache.spark.sql.execution.Sort.doExecute(Sort.scala:60)
	at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:116)
	at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:116)
	at org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:135)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150)
	at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:132)
	at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:115)
	at org.apache.spark.sql.execution.SparkPlan.executeCollect(SparkPlan.scala:223)
	at org.apache.spark.sql.execution.SparkPlan.executeCollectPublic(SparkPlan.scala:231)
	at org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$execute$1$1.apply(DataFrame.scala:1499)
	at org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$execute$1$1.apply(DataFrame.scala:1499)
	at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:53)
	at org.apache.spark.sql.DataFrame.withNewExecutionId(DataFrame.scala:1748)
	at org.apache.spark.sql.DataFrame.org$apache$spark$sql$DataFrame$$execute$1(DataFrame.scala:1498)
	at org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$collect$1.apply(DataFrame.scala:1503)
	at org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$collect$1.apply(DataFrame.scala:1503)
	at org.apache.spark.sql.DataFrame.withCallback(DataFrame.scala:1761)
	at org.apache.spark.sql.DataFrame.org$apache$spark$sql$DataFrame$$collect(DataFrame.scala:1503)
	at org.apache.spark.sql.DataFrame.collect(DataFrame.scala:1480)
	at org.apache.spark.sql.QueryTest$.checkAnswer(QueryTest.scala:315)
	at org.apache.spark.sql.QueryTest.checkAnswer(QueryTest.scala:143)
	at org.apache.spark.sql.QueryTest.checkAnswer(QueryTest.scala:154)
	at org.apache.spark.sql.sources.HadoopFsRelationTest$$anonfun$32$$anonfun$apply$1$$anonfun$apply$mcV$sp$19.apply(hadoopFsRelationSuites.scala:154)
	at org.apache.spark.sql.sources.HadoopFsRelationTest$$anonfun$32$$anonfun$apply$1$$anonfun$apply$mcV$sp$19.apply(hadoopFsRelationSuites.scala:121)
	at org.apache.spark.sql.test.SQLTestUtils$class.withTempPath(SQLTestUtils.scala:125)
	at org.apache.spark.sql.sources.HadoopFsRelationTest.withTempPath(hadoopFsRelationSuites.scala:38)
	at org.apache.spark.sql.sources.HadoopFsRelationTest$$anonfun$32$$anonfun$apply$1.apply$mcV$sp(hadoopFsRelationSuites.scala:121)
	at org.apache.spark.sql.sources.HadoopFsRelationTest$$anonfun$32$$anonfun$apply$1.apply(hadoopFsRelationSuites.scala:121)
	at org.apache.spark.sql.sources.HadoopFsRelationTest$$anonfun$32$$anonfun$apply$1.apply(hadoopFsRelationSuites.scala:121)
	at org.scalatest.Transformer$$anonfun$apply$1.apply$mcV$sp(Transformer.scala:22)
	at org.scalatest.OutcomeOf$class.outcomeOf(OutcomeOf.scala:85)
	at org.scalatest.OutcomeOf$.outcomeOf(OutcomeOf.scala:104)
	at org.scalatest.Transformer.apply(Transformer.scala:22)
	at org.scalatest.Transformer.apply(Transformer.scala:20)
	at org.scalatest.FunSuiteLike$$anon$1.apply(FunSuiteLike.scala:166)
	at org.apache.spark.SparkFunSuite.withFixture(SparkFunSuite.scala:54)
	at org.scalatest.FunSuiteLike$class.invokeWithFixture$1(FunSuiteLike.scala:163)
	at org.scalatest.FunSuiteLike$$anonfun$runTest$1.apply(FunSuiteLike.scala:175)
	at org.scalatest.FunSuiteLike$$anonfun$runTest$1.apply(FunSuiteLike.scala:175)
	at org.scalatest.SuperEngine.runTestImpl(Engine.scala:306)
	at org.scalatest.FunSuiteLike$class.runTest(FunSuiteLike.scala:175)
	at org.scalatest.FunSuite.runTest(FunSuite.scala:1555)
	at org.scalatest.FunSuiteLike$$anonfun$runTests$1.apply(FunSuiteLike.scala:208)
	at org.scalatest.FunSuiteLike$$anonfun$runTests$1.apply(FunSuiteLike.scala:208)
	at org.scalatest.SuperEngine$$anonfun$traverseSubNodes$1$1.apply(Engine.scala:413)
	at org.scalatest.SuperEngine$$anonfun$traverseSubNodes$1$1.apply(Engine.scala:401)
	at scala.collection.immutable.List.foreach(List.scala:381)
	at org.scalatest.SuperEngine.traverseSubNodes$1(Engine.scala:401)
	at org.scalatest.SuperEngine.org$scalatest$SuperEngine$$runTestsInBranch(Engine.scala:396)
	at org.scalatest.SuperEngine.runTestsImpl(Engine.scala:483)
	at org.scalatest.FunSuiteLike$class.runTests(FunSuiteLike.scala:208)
	at org.scalatest.FunSuite.runTests(FunSuite.scala:1555)
	at org.scalatest.Suite$class.run(Suite.scala:1424)
	at org.scalatest.FunSuite.org$scalatest$FunSuiteLike$$super$run(FunSuite.scala:1555)
	at org.scalatest.FunSuiteLike$$anonfun$run$1.apply(FunSuiteLike.scala:212)
	at org.scalatest.FunSuiteLike$$anonfun$run$1.apply(FunSuiteLike.scala:212)
	at org.scalatest.SuperEngine.runImpl(Engine.scala:545)
	at org.scalatest.FunSuiteLike$class.run(FunSuiteLike.scala:212)
	at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterAll$$super$run(SparkFunSuite.scala:26)
	at org.scalatest.BeforeAndAfterAll$class.liftedTree1$1(BeforeAndAfterAll.scala:257)
	at org.scalatest.BeforeAndAfterAll$class.run(BeforeAndAfterAll.scala:256)
	at org.apache.spark.SparkFunSuite.run(SparkFunSuite.scala:26)
	at org.scalatest.Suite$class.callExecuteOnSuite$1(Suite.scala:1492)
	at org.scalatest.Suite$$anonfun$runNestedSuites$1.apply(Suite.scala:1528)
	at org.scalatest.Suite$$anonfun$runNestedSuites$1.apply(Suite.scala:1526)
	at scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33)
	at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186)
	at org.scalatest.Suite$class.runNestedSuites(Suite.scala:1526)
	at org.scalatest.tools.DiscoverySuite.runNestedSuites(DiscoverySuite.scala:29)
	at org.scalatest.Suite$class.run(Suite.scala:1421)
	at org.scalatest.tools.DiscoverySuite.run(DiscoverySuite.scala:29)
	at org.scalatest.tools.SuiteRunner.run(SuiteRunner.scala:55)
	at org.scalatest.tools.Runner$$anonfun$doRunRunRunDaDoRunRun$3.apply(Runner.scala:2563)
	at org.scalatest.tools.Runner$$anonfun$doRunRunRunDaDoRunRun$3.apply(Runner.scala:2557)
	at scala.collection.immutable.List.foreach(List.scala:381)
	at org.scalatest.tools.Runner$.doRunRunRunDaDoRunRun(Runner.scala:2557)
	at org.scalatest.tools.Runner$$anonfun$runOptionallyWithPassFailReporter$2.apply(Runner.scala:1044)
	at org.scalatest.tools.Runner$$anonfun$runOptionallyWithPassFailReporter$2.apply(Runner.scala:1043)
	at org.scalatest.tools.Runner$.withClassLoaderAndDispatchReporter(Runner.scala:2722)
	at org.scalatest.tools.Runner$.runOptionallyWithPassFailReporter(Runner.scala:1043)
	at org.scalatest.tools.Runner$.main(Runner.scala:860)
	at org.scalatest.tools.Runner.main(Runner.scala)
Caused by: org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 12684.0 failed 1 times, most recent failure: Lost task 0.0 in stage 12684.0 (TID 124627, localhost): java.lang.NullPointerException
	at org.apache.spark.sql.execution.vectorized.OnHeapColumnVector.putInt(OnHeapColumnVector.java:193)
	at org.apache.spark.sql.execution.datasources.parquet.VectorizedPlainValuesReader.readBytes(VectorizedPlainValuesReader.java:88)
	at org.apache.spark.sql.execution.datasources.parquet.VectorizedRleValuesReader.readBytes(VectorizedRleValuesReader.java:283)
	at org.apache.spark.sql.execution.datasources.parquet.UnsafeRowParquetRecordReader$ColumnReader.readIntBatch(UnsafeRowParquetRecordReader.java:763)
	at org.apache.spark.sql.execution.datasources.parquet.UnsafeRowParquetRecordReader$ColumnReader.readBatch(UnsafeRowParquetRecordReader.java:640)
	at org.apache.spark.sql.execution.datasources.parquet.UnsafeRowParquetRecordReader$ColumnReader.access$000(UnsafeRowParquetRecordReader.java:461)
	at org.apache.spark.sql.execution.datasources.parquet.UnsafeRowParquetRecordReader.nextBatch(UnsafeRowParquetRecordReader.java:224)
	at org.apache.spark.sql.execution.datasources.parquet.UnsafeRowParquetRecordReader.nextKeyValue(UnsafeRowParquetRecordReader.java:174)
	at org.apache.spark.rdd.SqlNewHadoopRDD$$anon$1.hasNext(SqlNewHadoopRDD.scala:203)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.processNext(generated.java:32)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:40)
	at org.apache.spark.sql.execution.WholeStageCodegen$$anonfun$5$$anon$1.hasNext(WholeStageCodegen.scala:305)
	at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:369)
	at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:369)
	at org.apache.spark.util.random.SamplingUtils$.reservoirSampleAndCount(SamplingUtils.scala:41)
	at org.apache.spark.RangePartitioner$$anonfun$9.apply(Partitioner.scala:261)
	at org.apache.spark.RangePartitioner$$anonfun$9.apply(Partitioner.scala:259)
	at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsWithIndex$1$$anonfun$apply$24.apply(RDD.scala:755)
	at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsWithIndex$1$$anonfun$apply$24.apply(RDD.scala:755)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:313)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:277)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:69)
	at org.apache.spark.scheduler.Task.run(Task.scala:82)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:214)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
	at java.lang.Thread.run(Thread.java:745)

Driver stacktrace:
	at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1452)
	at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1440)
	at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1439)
	at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
	at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1439)
	at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:802)
	at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:802)
	at scala.Option.foreach(Option.scala:257)
	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:802)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1661)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1620)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1609)
	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
	at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:623)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:1773)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:1786)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:1799)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:1813)
	at org.apache.spark.rdd.RDD$$anonfun$collect$1.apply(RDD.scala:847)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:111)
	at org.apache.spark.rdd.RDD.withScope(RDD.scala:323)
	at org.apache.spark.rdd.RDD.collect(RDD.scala:846)
	at org.apache.spark.RangePartitioner$.sketch(Partitioner.scala:264)
	at org.apache.spark.RangePartitioner.<init>(Partitioner.scala:126)
	at org.apache.spark.sql.execution.exchange.ShuffleExchange$.prepareShuffleDependency(ShuffleExchange.scala:211)
	at org.apache.spark.sql.execution.exchange.ShuffleExchange.prepareShuffleDependency(ShuffleExchange.scala:83)
	at org.apache.spark.sql.execution.exchange.ShuffleExchange$$anonfun$doExecute$1.apply(ShuffleExchange.scala:113)
	at org.apache.spark.sql.execution.exchange.ShuffleExchange$$anonfun$doExecute$1.apply(ShuffleExchange.scala:107)
	at org.apache.spark.sql.catalyst.errors.package$.attachTree(package.scala:46)
	... 89 more
Caused by: java.lang.NullPointerException
	at org.apache.spark.sql.execution.vectorized.OnHeapColumnVector.putInt(OnHeapColumnVector.java:193)
	at org.apache.spark.sql.execution.datasources.parquet.VectorizedPlainValuesReader.readBytes(VectorizedPlainValuesReader.java:88)
	at org.apache.spark.sql.execution.datasources.parquet.VectorizedRleValuesReader.readBytes(VectorizedRleValuesReader.java:283)
	at org.apache.spark.sql.execution.datasources.parquet.UnsafeRowParquetRecordReader$ColumnReader.readIntBatch(UnsafeRowParquetRecordReader.java:763)
	at org.apache.spark.sql.execution.datasources.parquet.UnsafeRowParquetRecordReader$ColumnReader.readBatch(UnsafeRowParquetRecordReader.java:640)
	at org.apache.spark.sql.execution.datasources.parquet.UnsafeRowParquetRecordReader$ColumnReader.access$000(UnsafeRowParquetRecordReader.java:461)
	at org.apache.spark.sql.execution.datasources.parquet.UnsafeRowParquetRecordReader.nextBatch(UnsafeRowParquetRecordReader.java:224)
	at org.apache.spark.sql.execution.datasources.parquet.UnsafeRowParquetRecordReader.nextKeyValue(UnsafeRowParquetRecordReader.java:174)
	at org.apache.spark.rdd.SqlNewHadoopRDD$$anon$1.hasNext(SqlNewHadoopRDD.scala:203)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.processNext(generated.java:32)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:40)
	at org.apache.spark.sql.execution.WholeStageCodegen$$anonfun$5$$anon$1.hasNext(WholeStageCodegen.scala:305)
	at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:369)
	at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:369)
	at org.apache.spark.util.random.SamplingUtils$.reservoirSampleAndCount(SamplingUtils.scala:41)
	at org.apache.spark.RangePartitioner$$anonfun$9.apply(Partitioner.scala:261)
	at org.apache.spark.RangePartitioner$$anonfun$9.apply(Partitioner.scala:259)
	at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsWithIndex$1$$anonfun$apply$24.apply(RDD.scala:755)
	at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsWithIndex$1$$anonfun$apply$24.apply(RDD.scala:755)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:313)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:277)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:69)
	at org.apache.spark.scheduler.Task.run(Task.scala:82)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:214)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
	at java.lang.Thread.run(Thread.java:745)

          
      at org.scalatest.Assertions$class.newAssertionFailedException(Assertions.scala:495)
      at org.scalatest.FunSuite.newAssertionFailedException(FunSuite.scala:1555)
      at org.scalatest.Assertions$class.fail(Assertions.scala:1328)
      at org.scalatest.FunSuite.fail(FunSuite.scala:1555)
      at org.apache.spark.sql.QueryTest.checkAnswer(QueryTest.scala:144)
      at org.apache.spark.sql.QueryTest.checkAnswer(QueryTest.scala:154)
      at org.apache.spark.sql.sources.HadoopFsRelationTest$$anonfun$32$$anonfun$apply$1$$anonfun$apply$mcV$sp$19.apply(hadoopFsRelationSuites.scala:154)
      at org.apache.spark.sql.sources.HadoopFsRelationTest$$anonfun$32$$anonfun$apply$1$$anonfun$apply$mcV$sp$19.apply(hadoopFsRelationSuites.scala:121)
      at org.apache.spark.sql.test.SQLTestUtils$class.withTempPath(SQLTestUtils.scala:125)
      at org.apache.spark.sql.sources.HadoopFsRelationTest.withTempPath(hadoopFsRelationSuites.scala:38)
      at org.apache.spark.sql.sources.HadoopFsRelationTest$$anonfun$32$$anonfun$apply$1.apply$mcV$sp(hadoopFsRelationSuites.scala:121)
      at org.apache.spark.sql.sources.HadoopFsRelationTest$$anonfun$32$$anonfun$apply$1.apply(hadoopFsRelationSuites.scala:121)
      at org.apache.spark.sql.sources.HadoopFsRelationTest$$anonfun$32$$anonfun$apply$1.apply(hadoopFsRelationSuites.scala:121)
      at org.scalatest.Transformer$$anonfun$apply$1.apply$mcV$sp(Transformer.scala:22)
      at org.scalatest.OutcomeOf$class.outcomeOf(OutcomeOf.scala:85)
      at org.scalatest.OutcomeOf$.outcomeOf(OutcomeOf.scala:104)
      at org.scalatest.Transformer.apply(Transformer.scala:22)
      at org.scalatest.Transformer.apply(Transformer.scala:20)
      at org.scalatest.FunSuiteLike$$anon$1.apply(FunSuiteLike.scala:166)
      at org.apache.spark.SparkFunSuite.withFixture(SparkFunSuite.scala:54)
      at org.scalatest.FunSuiteLike$class.invokeWithFixture$1(FunSuiteLike.scala:163)
      at org.scalatest.FunSuiteLike$$anonfun$runTest$1.apply(FunSuiteLike.scala:175)
      at org.scalatest.FunSuiteLike$$anonfun$runTest$1.apply(FunSuiteLike.scala:175)
      at org.scalatest.SuperEngine.runTestImpl(Engine.scala:306)
      at org.scalatest.FunSuiteLike$class.runTest(FunSuiteLike.scala:175)
      at org.scalatest.FunSuite.runTest(FunSuite.scala:1555)
      at org.scalatest.FunSuiteLike$$anonfun$runTests$1.apply(FunSuiteLike.scala:208)
      at org.scalatest.FunSuiteLike$$anonfun$runTests$1.apply(FunSuiteLike.scala:208)
      at org.scalatest.SuperEngine$$anonfun$traverseSubNodes$1$1.apply(Engine.scala:413)
      at org.scalatest.SuperEngine$$anonfun$traverseSubNodes$1$1.apply(Engine.scala:401)
      at scala.collection.immutable.List.foreach(List.scala:381)
      at org.scalatest.SuperEngine.traverseSubNodes$1(Engine.scala:401)
      at org.scalatest.SuperEngine.org$scalatest$SuperEngine$$runTestsInBranch(Engine.scala:396)
      at org.scalatest.SuperEngine.runTestsImpl(Engine.scala:483)
      at org.scalatest.FunSuiteLike$class.runTests(FunSuiteLike.scala:208)
      at org.scalatest.FunSuite.runTests(FunSuite.scala:1555)
      at org.scalatest.Suite$class.run(Suite.scala:1424)
      at org.scalatest.FunSuite.org$scalatest$FunSuiteLike$$super$run(FunSuite.scala:1555)
      at org.scalatest.FunSuiteLike$$anonfun$run$1.apply(FunSuiteLike.scala:212)
      at org.scalatest.FunSuiteLike$$anonfun$run$1.apply(FunSuiteLike.scala:212)
      at org.scalatest.SuperEngine.runImpl(Engine.scala:545)
      at org.scalatest.FunSuiteLike$class.run(FunSuiteLike.scala:212)
      at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterAll$$super$run(SparkFunSuite.scala:26)
      at org.scalatest.BeforeAndAfterAll$class.liftedTree1$1(BeforeAndAfterAll.scala:257)
      at org.scalatest.BeforeAndAfterAll$class.run(BeforeAndAfterAll.scala:256)
      at org.apache.spark.SparkFunSuite.run(SparkFunSuite.scala:26)
      at org.scalatest.Suite$class.callExecuteOnSuite$1(Suite.scala:1492)
      at org.scalatest.Suite$$anonfun$runNestedSuites$1.apply(Suite.scala:1528)
      at org.scalatest.Suite$$anonfun$runNestedSuites$1.apply(Suite.scala:1526)
      at scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33)
      at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186)
      at org.scalatest.Suite$class.runNestedSuites(Suite.scala:1526)
      at org.scalatest.tools.DiscoverySuite.runNestedSuites(DiscoverySuite.scala:29)
      at org.scalatest.Suite$class.run(Suite.scala:1421)
      at org.scalatest.tools.DiscoverySuite.run(DiscoverySuite.scala:29)
      at org.scalatest.tools.SuiteRunner.run(SuiteRunner.scala:55)
      at org.scalatest.tools.Runner$$anonfun$doRunRunRunDaDoRunRun$3.apply(Runner.scala:2563)
      at org.scalatest.tools.Runner$$anonfun$doRunRunRunDaDoRunRun$3.apply(Runner.scala:2557)
      at scala.collection.immutable.List.foreach(List.scala:381)
      at org.scalatest.tools.Runner$.doRunRunRunDaDoRunRun(Runner.scala:2557)
      at org.scalatest.tools.Runner$$anonfun$runOptionallyWithPassFailReporter$2.apply(Runner.scala:1044)
      at org.scalatest.tools.Runner$$anonfun$runOptionallyWithPassFailReporter$2.apply(Runner.scala:1043)
      at org.scalatest.tools.Runner$.withClassLoaderAndDispatchReporter(Runner.scala:2722)
      at org.scalatest.tools.Runner$.runOptionallyWithPassFailReporter(Runner.scala:1043)
      at org.scalatest.tools.Runner$.main(Runner.scala:860)
      at org.scalatest.tools.Runner.main(Runner.scala)