execute, tree:
HashAggregate(keys=[key#279494], functions=[count(if ((gid#280004 = 2)) default.agg2.`value1`#280006 else null), sum(if ((gid#280004 = 1)) CAST(default.agg2.`value1` AS BIGINT)#280005L else null), count(if ((gid#280004 = 6)) default.agg2.`value2`#280008 else null), sum(if ((gid#280004 = 3)) CAST(default.agg2.`value2` AS BIGINT)#280007L else null), count(if ((gid#280004 = 5)) default.agg2.`value1`#280006 else null, if ((gid#280004 = 5)) default.agg2.`value2`#280008 else null), longproductsum(if ((gid#280004 = 4)) CAST(default.agg2.`value1` AS BIGINT)#280005L else null, if ((gid#280004 = 4)) CAST(default.agg2.`value2` AS BIGINT)#280007L else null, org.apache.spark.sql.hive.execution.LongProductSum@262c8dec, 0, 0), first(if ((gid#280004 = 0)) count(default.agg2.`value1`)#280015L else null, true), first(if ((gid#280004 = 0)) sum(CAST(default.agg2.`value1` AS BIGINT))#280017L else null, true), first(if ((gid#280004 = 0)) count(default.agg2.`value2`)#280019L else null, true), first(if ((gid#280004 = 0)) sum(CAST(default.agg2.`value2` AS BIGINT))#280021L else null, true), first(if ((gid#280004 = 0)) longproductsum(CAST(default.agg2.`value1` AS BIGINT), CAST(default.agg2.`value2` AS BIGINT))#280025L else null, true), first(if ((gid#280004 = 0)) count(1)#280027L else null, true), first(if ((gid#280004 = 0)) count(1)#280029L else null, true)], output=[key#279494, count(DISTINCT value1)#279881L, sum(DISTINCT value1)#279882L, count(DISTINCT value2)#279883L, sum(DISTINCT value2)#279884L, count(DISTINCT value1, value2)#279885L, longproductsum(DISTINCT CAST(value1 AS BIGINT), CAST(value2 AS BIGINT))#279898L, count(value1)#279886L, sum(value1)#279887L, count(value2)#279888L, sum(value2)#279889L, longproductsum(CAST(value1 AS BIGINT), CAST(value2 AS BIGINT))#279901L, count(1)#279890L, count(1)#279891L])
+- Exchange hashpartitioning(key#279494, 5), true, [id=#205709]
 +- HashAggregate(keys=[key#279494], functions=[partial_count(if ((gid#280004 = 2)) default.agg2.`value1`#280006 else null), partial_sum(if ((gid#280004 = 1)) CAST(default.agg2.`value1` AS BIGINT)#280005L else null), partial_count(if ((gid#280004 = 6)) default.agg2.`value2`#280008 else null), partial_sum(if ((gid#280004 = 3)) CAST(default.agg2.`value2` AS BIGINT)#280007L else null), partial_count(if ((gid#280004 = 5)) default.agg2.`value1`#280006 else null, if ((gid#280004 = 5)) default.agg2.`value2`#280008 else null), partial_longproductsum(if ((gid#280004 = 4)) CAST(default.agg2.`value1` AS BIGINT)#280005L else null, if ((gid#280004 = 4)) CAST(default.agg2.`value2` AS BIGINT)#280007L else null, org.apache.spark.sql.hive.execution.LongProductSum@262c8dec, 0, 0), partial_first(if ((gid#280004 = 0)) count(default.agg2.`value1`)#280015L else null, true), partial_first(if ((gid#280004 = 0)) sum(CAST(default.agg2.`value1` AS BIGINT))#280017L else null, true), partial_first(if ((gid#280004 = 0)) count(default.agg2.`value2`)#280019L else null, true), partial_first(if ((gid#280004 = 0)) sum(CAST(default.agg2.`value2` AS BIGINT))#280021L else null, true), partial_first(if ((gid#280004 = 0)) longproductsum(CAST(default.agg2.`value1` AS BIGINT), CAST(default.agg2.`value2` AS BIGINT))#280025L else null, true), partial_first(if ((gid#280004 = 0)) count(1)#280027L else null, true), partial_first(if ((gid#280004 = 0)) count(1)#280029L else null, true)], output=[key#279494, count#280052L, sum#280053L, count#280054L, sum#280055L, count#280056L, product#280010L, first#280057L, valueSet#280058, first#280059L, valueSet#280060, first#280061L, valueSet#280062, first#280063L, valueSet#280064, first#280065L, valueSet#280066, first#280067L, valueSet#280068, first#280069L, valueSet#280070])
 +- HashAggregate(keys=[key#279494, CAST(default.agg2.`value1` AS BIGINT)#280005L, default.agg2.`value1`#280006, CAST(default.agg2.`value2` AS BIGINT)#280007L, default.agg2.`value2`#280008, gid#280004], functions=[count(default.agg2.`value1`#280011), sum(CAST(default.agg2.`value1` AS BIGINT)#280012L), count(default.agg2.`value2`#280013), sum(CAST(default.agg2.`value2` AS BIGINT)#280014L), longproductsum(CAST(default.agg2.`value1` AS BIGINT)#280012L, CAST(default.agg2.`value2` AS BIGINT)#280014L, org.apache.spark.sql.hive.execution.LongProductSum@262c8dec, 0, 0), count(1)], output=[key#279494, CAST(default.agg2.`value1` AS BIGINT)#280005L, default.agg2.`value1`#280006, CAST(default.agg2.`value2` AS BIGINT)#280007L, default.agg2.`value2`#280008, gid#280004, count(default.agg2.`value1`)#280015L, sum(CAST(default.agg2.`value1` AS BIGINT))#280017L, count(default.agg2.`value2`)#280019L, sum(CAST(default.agg2.`value2` AS BIGINT))#280021L, longproductsum(CAST(default.agg2.`value1` AS BIGINT), CAST(default.agg2.`value2` AS BIGINT))#280025L, count(1)#280027L, count(1)#280029L])
 +- Exchange hashpartitioning(key#279494, CAST(default.agg2.`value1` AS BIGINT)#280005L, default.agg2.`value1`#280006, CAST(default.agg2.`value2` AS BIGINT)#280007L, default.agg2.`value2`#280008, gid#280004, 5), true, [id=#205706]
 +- HashAggregate(keys=[key#279494, CAST(default.agg2.`value1` AS BIGINT)#280005L, default.agg2.`value1`#280006, CAST(default.agg2.`value2` AS BIGINT)#280007L, default.agg2.`value2`#280008, gid#280004], functions=[partial_count(default.agg2.`value1`#280011), partial_sum(CAST(default.agg2.`value1` AS BIGINT)#280012L), partial_count(default.agg2.`value2`#280013), partial_sum(CAST(default.agg2.`value2` AS BIGINT)#280014L), partial_longproductsum(CAST(default.agg2.`value1` AS BIGINT)#280012L, CAST(default.agg2.`value2` AS BIGINT)#280014L, org.apache.spark.sql.hive.execution.LongProductSum@262c8dec, 0, 0), partial_count(1)], output=[key#279494, CAST(default.agg2.`value1` AS BIGINT)#280005L, default.agg2.`value1`#280006, CAST(default.agg2.`value2` AS BIGINT)#280007L, default.agg2.`value2`#280008, gid#280004, count#280077L, sum#280078L, count#280079L, sum#280080L, product#280024L, count#280002L])
 +- *(1) Expand [ArrayBuffer(key#279494, null, null, null, null, 0, value1#279495, cast(value1#279495 as bigint), value2#279496, cast(value2#279496 as bigint)), ArrayBuffer(key#279494, cast(value1#279495 as bigint), null, null, null, 1, null, null, null, null), ArrayBuffer(key#279494, null, value1#279495, null, null, 2, null, null, null, null), ArrayBuffer(key#279494, null, null, cast(value2#279496 as bigint), null, 3, null, null, null, null), ArrayBuffer(key#279494, cast(value1#279495 as bigint), null, cast(value2#279496 as bigint), null, 4, null, null, null, null), ArrayBuffer(key#279494, null, value1#279495, null, value2#279496, 5, null, null, null, null), ArrayBuffer(key#279494, null, null, null, value2#279496, 6, null, null, null, null)], [key#279494, CAST(default.agg2.`value1` AS BIGINT)#280005L, default.agg2.`value1`#280006, CAST(default.agg2.`value2` AS BIGINT)#280007L, default.agg2.`value2`#280008, gid#280004, default.agg2.`value1`#280011, CAST(default.agg2.`value1` AS BIGINT)#280012L, default.agg2.`value2`#280013, CAST(default.agg2.`value2` AS BIGINT)#280014L]
 +- *(1) ColumnarToRow
 +- FileScan parquet default.agg2[key#279494,value1#279495,value2#279496] Batched: true, DataFilters: [], Format: Parquet, Location: InMemoryFileIndex[file:/home/jenkins/workspace/spark-master-test-maven-hadoop-2.7/sql/hive/target..., PartitionFilters: [], PushedFilters: [], ReadSchema: struct<key:int,value1:int,value2:int>

org.apache.spark.sql.catalyst.errors.package$TreeNodeException: execute, tree:
HashAggregate(keys=[key#279494], functions=[count(if ((gid#280004 = 2)) default.agg2.`value1`#280006 else null), sum(if ((gid#280004 = 1)) CAST(default.agg2.`value1` AS BIGINT)#280005L else null), count(if ((gid#280004 = 6)) default.agg2.`value2`#280008 else null), sum(if ((gid#280004 = 3)) CAST(default.agg2.`value2` AS BIGINT)#280007L else null), count(if ((gid#280004 = 5)) default.agg2.`value1`#280006 else null, if ((gid#280004 = 5)) default.agg2.`value2`#280008 else null), longproductsum(if ((gid#280004 = 4)) CAST(default.agg2.`value1` AS BIGINT)#280005L else null, if ((gid#280004 = 4)) CAST(default.agg2.`value2` AS BIGINT)#280007L else null, org.apache.spark.sql.hive.execution.LongProductSum@262c8dec, 0, 0), first(if ((gid#280004 = 0)) count(default.agg2.`value1`)#280015L else null, true), first(if ((gid#280004 = 0)) sum(CAST(default.agg2.`value1` AS BIGINT))#280017L else null, true), first(if ((gid#280004 = 0)) count(default.agg2.`value2`)#280019L else null, true), first(if ((gid#280004 = 0)) sum(CAST(default.agg2.`value2` AS BIGINT))#280021L else null, true), first(if ((gid#280004 = 0)) longproductsum(CAST(default.agg2.`value1` AS BIGINT), CAST(default.agg2.`value2` AS BIGINT))#280025L else null, true), first(if ((gid#280004 = 0)) count(1)#280027L else null, true), first(if ((gid#280004 = 0)) count(1)#280029L else null, true)], output=[key#279494, count(DISTINCT value1)#279881L, sum(DISTINCT value1)#279882L, count(DISTINCT value2)#279883L, sum(DISTINCT value2)#279884L, count(DISTINCT value1, value2)#279885L, longproductsum(DISTINCT CAST(value1 AS BIGINT), CAST(value2 AS BIGINT))#279898L, count(value1)#279886L, sum(value1)#279887L, count(value2)#279888L, sum(value2)#279889L, longproductsum(CAST(value1 AS BIGINT), CAST(value2 AS BIGINT))#279901L, count(1)#279890L, count(1)#279891L])
+- Exchange hashpartitioning(key#279494, 5), true, [id=#205709]
+- HashAggregate(keys=[key#279494], functions=[partial_count(if ((gid#280004 = 2)) default.agg2.`value1`#280006 else null), partial_sum(if ((gid#280004 = 1)) CAST(default.agg2.`value1` AS BIGINT)#280005L else null), partial_count(if ((gid#280004 = 6)) default.agg2.`value2`#280008 else null), partial_sum(if ((gid#280004 = 3)) CAST(default.agg2.`value2` AS BIGINT)#280007L else null), partial_count(if ((gid#280004 = 5)) default.agg2.`value1`#280006 else null, if ((gid#280004 = 5)) default.agg2.`value2`#280008 else null), partial_longproductsum(if ((gid#280004 = 4)) CAST(default.agg2.`value1` AS BIGINT)#280005L else null, if ((gid#280004 = 4)) CAST(default.agg2.`value2` AS BIGINT)#280007L else null, org.apache.spark.sql.hive.execution.LongProductSum@262c8dec, 0, 0), partial_first(if ((gid#280004 = 0)) count(default.agg2.`value1`)#280015L else null, true), partial_first(if ((gid#280004 = 0)) sum(CAST(default.agg2.`value1` AS BIGINT))#280017L else null, true), partial_first(if ((gid#280004 = 0)) count(default.agg2.`value2`)#280019L else null, true), partial_first(if ((gid#280004 = 0)) sum(CAST(default.agg2.`value2` AS BIGINT))#280021L else null, true), partial_first(if ((gid#280004 = 0)) longproductsum(CAST(default.agg2.`value1` AS BIGINT), CAST(default.agg2.`value2` AS BIGINT))#280025L else null, true), partial_first(if ((gid#280004 = 0)) count(1)#280027L else null, true), partial_first(if ((gid#280004 = 0)) count(1)#280029L else null, true)], output=[key#279494, count#280052L, sum#280053L, count#280054L, sum#280055L, count#280056L, product#280010L, first#280057L, valueSet#280058, first#280059L, valueSet#280060, first#280061L, valueSet#280062, first#280063L, valueSet#280064, first#280065L, valueSet#280066, first#280067L, valueSet#280068, first#280069L, valueSet#280070])
+- HashAggregate(keys=[key#279494, CAST(default.agg2.`value1` AS BIGINT)#280005L, default.agg2.`value1`#280006, CAST(default.agg2.`value2` AS BIGINT)#280007L, default.agg2.`value2`#280008, gid#280004], functions=[count(default.agg2.`value1`#280011), sum(CAST(default.agg2.`value1` AS BIGINT)#280012L), count(default.agg2.`value2`#280013), sum(CAST(default.agg2.`value2` AS BIGINT)#280014L), longproductsum(CAST(default.agg2.`value1` AS BIGINT)#280012L, CAST(default.agg2.`value2` AS BIGINT)#280014L, org.apache.spark.sql.hive.execution.LongProductSum@262c8dec, 0, 0), count(1)], output=[key#279494, CAST(default.agg2.`value1` AS BIGINT)#280005L, default.agg2.`value1`#280006, CAST(default.agg2.`value2` AS BIGINT)#280007L, default.agg2.`value2`#280008, gid#280004, count(default.agg2.`value1`)#280015L, sum(CAST(default.agg2.`value1` AS BIGINT))#280017L, count(default.agg2.`value2`)#280019L, sum(CAST(default.agg2.`value2` AS BIGINT))#280021L, longproductsum(CAST(default.agg2.`value1` AS BIGINT), CAST(default.agg2.`value2` AS BIGINT))#280025L, count(1)#280027L, count(1)#280029L])
+- Exchange hashpartitioning(key#279494, CAST(default.agg2.`value1` AS BIGINT)#280005L, default.agg2.`value1`#280006, CAST(default.agg2.`value2` AS BIGINT)#280007L, default.agg2.`value2`#280008, gid#280004, 5), true, [id=#205706]
+- HashAggregate(keys=[key#279494, CAST(default.agg2.`value1` AS BIGINT)#280005L, default.agg2.`value1`#280006, CAST(default.agg2.`value2` AS BIGINT)#280007L, default.agg2.`value2`#280008, gid#280004], functions=[partial_count(default.agg2.`value1`#280011), partial_sum(CAST(default.agg2.`value1` AS BIGINT)#280012L), partial_count(default.agg2.`value2`#280013), partial_sum(CAST(default.agg2.`value2` AS BIGINT)#280014L), partial_longproductsum(CAST(default.agg2.`value1` AS BIGINT)#280012L, CAST(default.agg2.`value2` AS BIGINT)#280014L, org.apache.spark.sql.hive.execution.LongProductSum@262c8dec, 0, 0), partial_count(1)], output=[key#279494, CAST(default.agg2.`value1` AS BIGINT)#280005L, default.agg2.`value1`#280006, CAST(default.agg2.`value2` AS BIGINT)#280007L, default.agg2.`value2`#280008, gid#280004, count#280077L, sum#280078L, count#280079L, sum#280080L, product#280024L, count#280002L])
+- *(1) Expand [ArrayBuffer(key#279494, null, null, null, null, 0, value1#279495, cast(value1#279495 as bigint), value2#279496, cast(value2#279496 as bigint)), ArrayBuffer(key#279494, cast(value1#279495 as bigint), null, null, null, 1, null, null, null, null), ArrayBuffer(key#279494, null, value1#279495, null, null, 2, null, null, null, null), ArrayBuffer(key#279494, null, null, cast(value2#279496 as bigint), null, 3, null, null, null, null), ArrayBuffer(key#279494, cast(value1#279495 as bigint), null, cast(value2#279496 as bigint), null, 4, null, null, null, null), ArrayBuffer(key#279494, null, value1#279495, null, value2#279496, 5, null, null, null, null), ArrayBuffer(key#279494, null, null, null, value2#279496, 6, null, null, null, null)], [key#279494, CAST(default.agg2.`value1` AS BIGINT)#280005L, default.agg2.`value1`#280006, CAST(default.agg2.`value2` AS BIGINT)#280007L, default.agg2.`value2`#280008, gid#280004, default.agg2.`value1`#280011, CAST(default.agg2.`value1` AS BIGINT)#280012L, default.agg2.`value2`#280013, CAST(default.agg2.`value2` AS BIGINT)#280014L]
+- *(1) ColumnarToRow
+- FileScan parquet default.agg2[key#279494,value1#279495,value2#279496] Batched: true, DataFilters: [], Format: Parquet, Location: InMemoryFileIndex[file:/home/jenkins/workspace/spark-master-test-maven-hadoop-2.7/sql/hive/target..., PartitionFilters: [], PushedFilters: [], ReadSchema: struct<key:int,value1:int,value2:int>
at org.apache.spark.sql.catalyst.errors.package$.attachTree(package.scala:56)
at org.apache.spark.sql.execution.aggregate.HashAggregateExec.doExecute(HashAggregateExec.scala:104)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:189)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:227)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:224)
at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:185)
at org.apache.spark.sql.execution.DeserializeToObjectExec.doExecute(objects.scala:96)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:189)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:227)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:224)
at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:185)
at org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:110)
at org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:109)
at org.apache.spark.sql.Dataset.rdd$lzycompute(Dataset.scala:3001)
at org.apache.spark.sql.Dataset.rdd(Dataset.scala:2999)
at org.apache.spark.sql.QueryTest$.$anonfun$checkAnswer$1(QueryTest.scala:256)
at scala.runtime.java8.JFunction0$mcJ$sp.apply(JFunction0$mcJ$sp.java:23)
at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:160)
at org.apache.spark.sql.QueryTest$.checkAnswer(QueryTest.scala:256)
at org.apache.spark.sql.QueryTest.checkAnswer(QueryTest.scala:153)
at org.apache.spark.sql.hive.execution.AggregationQuerySuite.$anonfun$new$50(AggregationQuerySuite.scala:692)
at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
at org.scalatest.OutcomeOf.outcomeOf(OutcomeOf.scala:85)
at org.scalatest.OutcomeOf.outcomeOf$(OutcomeOf.scala:83)
at org.scalatest.OutcomeOf$.outcomeOf(OutcomeOf.scala:104)
at org.scalatest.Transformer.apply(Transformer.scala:22)
at org.scalatest.Transformer.apply(Transformer.scala:20)
at org.scalatest.FunSuiteLike$$anon$1.apply(FunSuiteLike.scala:186)
at org.apache.spark.SparkFunSuite.withFixture(SparkFunSuite.scala:149)
at org.scalatest.FunSuiteLike.invokeWithFixture$1(FunSuiteLike.scala:184)
at org.scalatest.FunSuiteLike.$anonfun$runTest$1(FunSuiteLike.scala:196)
at org.scalatest.SuperEngine.runTestImpl(Engine.scala:289)
at org.scalatest.FunSuiteLike.runTest(FunSuiteLike.scala:196)
at org.scalatest.FunSuiteLike.runTest$(FunSuiteLike.scala:178)
at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterEach$$super$runTest(SparkFunSuite.scala:56)
at org.scalatest.BeforeAndAfterEach.runTest(BeforeAndAfterEach.scala:221)
at org.scalatest.BeforeAndAfterEach.runTest$(BeforeAndAfterEach.scala:214)
at org.apache.spark.SparkFunSuite.runTest(SparkFunSuite.scala:56)
at org.scalatest.FunSuiteLike.$anonfun$runTests$1(FunSuiteLike.scala:229)
at org.scalatest.SuperEngine.$anonfun$runTestsInBranch$1(Engine.scala:396)
at scala.collection.immutable.List.foreach(List.scala:392)
at org.scalatest.SuperEngine.traverseSubNodes$1(Engine.scala:384)
at org.scalatest.SuperEngine.runTestsInBranch(Engine.scala:379)
at org.scalatest.SuperEngine.runTestsImpl(Engine.scala:461)
at org.scalatest.FunSuiteLike.runTests(FunSuiteLike.scala:229)
at org.scalatest.FunSuiteLike.runTests$(FunSuiteLike.scala:228)
at org.scalatest.FunSuite.runTests(FunSuite.scala:1560)
at org.scalatest.Suite.run(Suite.scala:1147)
at org.scalatest.Suite.run$(Suite.scala:1129)
at org.scalatest.FunSuite.org$scalatest$FunSuiteLike$$super$run(FunSuite.scala:1560)
at org.scalatest.FunSuiteLike.$anonfun$run$1(FunSuiteLike.scala:233)
at org.scalatest.SuperEngine.runImpl(Engine.scala:521)
at org.scalatest.FunSuiteLike.run(FunSuiteLike.scala:233)
at org.scalatest.FunSuiteLike.run$(FunSuiteLike.scala:232)
at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterAll$$super$run(SparkFunSuite.scala:56)
at org.scalatest.BeforeAndAfterAll.liftedTree1$1(BeforeAndAfterAll.scala:213)
at org.scalatest.BeforeAndAfterAll.run(BeforeAndAfterAll.scala:210)
at org.scalatest.BeforeAndAfterAll.run$(BeforeAndAfterAll.scala:208)
at org.apache.spark.SparkFunSuite.run(SparkFunSuite.scala:56)
at org.scalatest.Suite.callExecuteOnSuite$1(Suite.scala:1210)
at org.scalatest.Suite.$anonfun$runNestedSuites$1(Suite.scala:1257)
at scala.collection.IndexedSeqOptimized.foreach(IndexedSeqOptimized.scala:36)
at scala.collection.IndexedSeqOptimized.foreach$(IndexedSeqOptimized.scala:33)
at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:198)
at org.scalatest.Suite.runNestedSuites(Suite.scala:1255)
at org.scalatest.Suite.runNestedSuites$(Suite.scala:1189)
at org.scalatest.tools.DiscoverySuite.runNestedSuites(DiscoverySuite.scala:30)
at org.scalatest.Suite.run(Suite.scala:1144)
at org.scalatest.Suite.run$(Suite.scala:1129)
at org.scalatest.tools.DiscoverySuite.run(DiscoverySuite.scala:30)
at org.scalatest.tools.SuiteRunner.run(SuiteRunner.scala:45)
at org.scalatest.tools.Runner$.$anonfun$doRunRunRunDaDoRunRun$13(Runner.scala:1346)
at org.scalatest.tools.Runner$.$anonfun$doRunRunRunDaDoRunRun$13$adapted(Runner.scala:1340)
at scala.collection.immutable.List.foreach(List.scala:392)
at org.scalatest.tools.Runner$.doRunRunRunDaDoRunRun(Runner.scala:1340)
at org.scalatest.tools.Runner$.$anonfun$runOptionallyWithPassFailReporter$24(Runner.scala:1031)
at org.scalatest.tools.Runner$.$anonfun$runOptionallyWithPassFailReporter$24$adapted(Runner.scala:1010)
at org.scalatest.tools.Runner$.withClassLoaderAndDispatchReporter(Runner.scala:1506)
at org.scalatest.tools.Runner$.runOptionallyWithPassFailReporter(Runner.scala:1010)
at org.scalatest.tools.Runner$.main(Runner.scala:827)
at org.scalatest.tools.Runner.main(Runner.scala)
Cause: org.apache.spark.sql.catalyst.errors.package$TreeNodeException: execute, tree:
Exchange hashpartitioning(key#279494, 5), true, [id=#205709]
+- HashAggregate(keys=[key#279494], functions=[partial_count(if ((gid#280004 = 2)) default.agg2.`value1`#280006 else null), partial_sum(if ((gid#280004 = 1)) CAST(default.agg2.`value1` AS BIGINT)#280005L else null), partial_count(if ((gid#280004 = 6)) default.agg2.`value2`#280008 else null), partial_sum(if ((gid#280004 = 3)) CAST(default.agg2.`value2` AS BIGINT)#280007L else null), partial_count(if ((gid#280004 = 5)) default.agg2.`value1`#280006 else null, if ((gid#280004 = 5)) default.agg2.`value2`#280008 else null), partial_longproductsum(if ((gid#280004 = 4)) CAST(default.agg2.`value1` AS BIGINT)#280005L else null, if ((gid#280004 = 4)) CAST(default.agg2.`value2` AS BIGINT)#280007L else null, org.apache.spark.sql.hive.execution.LongProductSum@262c8dec, 0, 0), partial_first(if ((gid#280004 = 0)) count(default.agg2.`value1`)#280015L else null, true), partial_first(if ((gid#280004 = 0)) sum(CAST(default.agg2.`value1` AS BIGINT))#280017L else null, true), partial_first(if ((gid#280004 = 0)) count(default.agg2.`value2`)#280019L else null, true), partial_first(if ((gid#280004 = 0)) sum(CAST(default.agg2.`value2` AS BIGINT))#280021L else null, true), partial_first(if ((gid#280004 = 0)) longproductsum(CAST(default.agg2.`value1` AS BIGINT), CAST(default.agg2.`value2` AS BIGINT))#280025L else null, true), partial_first(if ((gid#280004 = 0)) count(1)#280027L else null, true), partial_first(if ((gid#280004 = 0)) count(1)#280029L else null, true)], output=[key#279494, count#280052L, sum#280053L, count#280054L, sum#280055L, count#280056L, product#280010L, first#280057L, valueSet#280058, first#280059L, valueSet#280060, first#280061L, valueSet#280062, first#280063L, valueSet#280064, first#280065L, valueSet#280066, first#280067L, valueSet#280068, first#280069L, valueSet#280070])
+- HashAggregate(keys=[key#279494, CAST(default.agg2.`value1` AS BIGINT)#280005L, default.agg2.`value1`#280006, CAST(default.agg2.`value2` AS BIGINT)#280007L, default.agg2.`value2`#280008, gid#280004], functions=[count(default.agg2.`value1`#280011), sum(CAST(default.agg2.`value1` AS BIGINT)#280012L), count(default.agg2.`value2`#280013), sum(CAST(default.agg2.`value2` AS BIGINT)#280014L), longproductsum(CAST(default.agg2.`value1` AS BIGINT)#280012L, CAST(default.agg2.`value2` AS BIGINT)#280014L, org.apache.spark.sql.hive.execution.LongProductSum@262c8dec, 0, 0), count(1)], output=[key#279494, CAST(default.agg2.`value1` AS BIGINT)#280005L, default.agg2.`value1`#280006, CAST(default.agg2.`value2` AS BIGINT)#280007L, default.agg2.`value2`#280008, gid#280004, count(default.agg2.`value1`)#280015L, sum(CAST(default.agg2.`value1` AS BIGINT))#280017L, count(default.agg2.`value2`)#280019L, sum(CAST(default.agg2.`value2` AS BIGINT))#280021L, longproductsum(CAST(default.agg2.`value1` AS BIGINT), CAST(default.agg2.`value2` AS BIGINT))#280025L, count(1)#280027L, count(1)#280029L])
+- Exchange hashpartitioning(key#279494, CAST(default.agg2.`value1` AS BIGINT)#280005L, default.agg2.`value1`#280006, CAST(default.agg2.`value2` AS BIGINT)#280007L, default.agg2.`value2`#280008, gid#280004, 5), true, [id=#205706]
+- HashAggregate(keys=[key#279494, CAST(default.agg2.`value1` AS BIGINT)#280005L, default.agg2.`value1`#280006, CAST(default.agg2.`value2` AS BIGINT)#280007L, default.agg2.`value2`#280008, gid#280004], functions=[partial_count(default.agg2.`value1`#280011), partial_sum(CAST(default.agg2.`value1` AS BIGINT)#280012L), partial_count(default.agg2.`value2`#280013), partial_sum(CAST(default.agg2.`value2` AS BIGINT)#280014L), partial_longproductsum(CAST(default.agg2.`value1` AS BIGINT)#280012L, CAST(default.agg2.`value2` AS BIGINT)#280014L, org.apache.spark.sql.hive.execution.LongProductSum@262c8dec, 0, 0), partial_count(1)], output=[key#279494, CAST(default.agg2.`value1` AS BIGINT)#280005L, default.agg2.`value1`#280006, CAST(default.agg2.`value2` AS BIGINT)#280007L, default.agg2.`value2`#280008, gid#280004, count#280077L, sum#280078L, count#280079L, sum#280080L, product#280024L, count#280002L])
+- *(1) Expand [ArrayBuffer(key#279494, null, null, null, null, 0, value1#279495, cast(value1#279495 as bigint), value2#279496, cast(value2#279496 as bigint)), ArrayBuffer(key#279494, cast(value1#279495 as bigint), null, null, null, 1, null, null, null, null), ArrayBuffer(key#279494, null, value1#279495, null, null, 2, null, null, null, null), ArrayBuffer(key#279494, null, null, cast(value2#279496 as bigint), null, 3, null, null, null, null), ArrayBuffer(key#279494, cast(value1#279495 as bigint), null, cast(value2#279496 as bigint), null, 4, null, null, null, null), ArrayBuffer(key#279494, null, value1#279495, null, value2#279496, 5, null, null, null, null), ArrayBuffer(key#279494, null, null, null, value2#279496, 6, null, null, null, null)], [key#279494, CAST(default.agg2.`value1` AS BIGINT)#280005L, default.agg2.`value1`#280006, CAST(default.agg2.`value2` AS BIGINT)#280007L, default.agg2.`value2`#280008, gid#280004, default.agg2.`value1`#280011, CAST(default.agg2.`value1` AS BIGINT)#280012L, default.agg2.`value2`#280013, CAST(default.agg2.`value2` AS BIGINT)#280014L]
+- *(1) ColumnarToRow
+- FileScan parquet default.agg2[key#279494,value1#279495,value2#279496] Batched: true, DataFilters: [], Format: Parquet, Location: InMemoryFileIndex[file:/home/jenkins/workspace/spark-master-test-maven-hadoop-2.7/sql/hive/target..., PartitionFilters: [], PushedFilters: [], ReadSchema: struct<key:int,value1:int,value2:int>
at org.apache.spark.sql.catalyst.errors.package$.attachTree(package.scala:56)
at org.apache.spark.sql.execution.exchange.ShuffleExchangeExec.doExecute(ShuffleExchangeExec.scala:90)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:189)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:227)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:224)
at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:185)
at org.apache.spark.sql.execution.aggregate.HashAggregateExec.$anonfun$doExecute$1(HashAggregateExec.scala:111)
at org.apache.spark.sql.catalyst.errors.package$.attachTree(package.scala:52)
at org.apache.spark.sql.execution.aggregate.HashAggregateExec.doExecute(HashAggregateExec.scala:104)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:189)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:227)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:224)
at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:185)
at org.apache.spark.sql.execution.DeserializeToObjectExec.doExecute(objects.scala:96)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:189)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:227)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:224)
at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:185)
at org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:110)
at org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:109)
at org.apache.spark.sql.Dataset.rdd$lzycompute(Dataset.scala:3001)
at org.apache.spark.sql.Dataset.rdd(Dataset.scala:2999)
at org.apache.spark.sql.QueryTest$.$anonfun$checkAnswer$1(QueryTest.scala:256)
at scala.runtime.java8.JFunction0$mcJ$sp.apply(JFunction0$mcJ$sp.java:23)
at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:160)
at org.apache.spark.sql.QueryTest$.checkAnswer(QueryTest.scala:256)
at org.apache.spark.sql.QueryTest.checkAnswer(QueryTest.scala:153)
at org.apache.spark.sql.hive.execution.AggregationQuerySuite.$anonfun$new$50(AggregationQuerySuite.scala:692)
at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
at org.scalatest.OutcomeOf.outcomeOf(OutcomeOf.scala:85)
at org.scalatest.OutcomeOf.outcomeOf$(OutcomeOf.scala:83)
at org.scalatest.OutcomeOf$.outcomeOf(OutcomeOf.scala:104)
at org.scalatest.Transformer.apply(Transformer.scala:22)
at org.scalatest.Transformer.apply(Transformer.scala:20)
at org.scalatest.FunSuiteLike$$anon$1.apply(FunSuiteLike.scala:186)
at org.apache.spark.SparkFunSuite.withFixture(SparkFunSuite.scala:149)
at org.scalatest.FunSuiteLike.invokeWithFixture$1(FunSuiteLike.scala:184)
at org.scalatest.FunSuiteLike.$anonfun$runTest$1(FunSuiteLike.scala:196)
at org.scalatest.SuperEngine.runTestImpl(Engine.scala:289)
at org.scalatest.FunSuiteLike.runTest(FunSuiteLike.scala:196)
at org.scalatest.FunSuiteLike.runTest$(FunSuiteLike.scala:178)
at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterEach$$super$runTest(SparkFunSuite.scala:56)
at org.scalatest.BeforeAndAfterEach.runTest(BeforeAndAfterEach.scala:221)
at org.scalatest.BeforeAndAfterEach.runTest$(BeforeAndAfterEach.scala:214)
at org.apache.spark.SparkFunSuite.runTest(SparkFunSuite.scala:56)
at org.scalatest.FunSuiteLike.$anonfun$runTests$1(FunSuiteLike.scala:229)
at org.scalatest.SuperEngine.$anonfun$runTestsInBranch$1(Engine.scala:396)
at scala.collection.immutable.List.foreach(List.scala:392)
at org.scalatest.SuperEngine.traverseSubNodes$1(Engine.scala:384)
at org.scalatest.SuperEngine.runTestsInBranch(Engine.scala:379)
at org.scalatest.SuperEngine.runTestsImpl(Engine.scala:461)
at org.scalatest.FunSuiteLike.runTests(FunSuiteLike.scala:229)
at org.scalatest.FunSuiteLike.runTests$(FunSuiteLike.scala:228)
at org.scalatest.FunSuite.runTests(FunSuite.scala:1560)
at org.scalatest.Suite.run(Suite.scala:1147)
at org.scalatest.Suite.run$(Suite.scala:1129)
at org.scalatest.FunSuite.org$scalatest$FunSuiteLike$$super$run(FunSuite.scala:1560)
at org.scalatest.FunSuiteLike.$anonfun$run$1(FunSuiteLike.scala:233)
at org.scalatest.SuperEngine.runImpl(Engine.scala:521)
at org.scalatest.FunSuiteLike.run(FunSuiteLike.scala:233)
at org.scalatest.FunSuiteLike.run$(FunSuiteLike.scala:232)
at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterAll$$super$run(SparkFunSuite.scala:56)
at org.scalatest.BeforeAndAfterAll.liftedTree1$1(BeforeAndAfterAll.scala:213)
at org.scalatest.BeforeAndAfterAll.run(BeforeAndAfterAll.scala:210)
at org.scalatest.BeforeAndAfterAll.run$(BeforeAndAfterAll.scala:208)
at org.apache.spark.SparkFunSuite.run(SparkFunSuite.scala:56)
at org.scalatest.Suite.callExecuteOnSuite$1(Suite.scala:1210)
at org.scalatest.Suite.$anonfun$runNestedSuites$1(Suite.scala:1257)
at scala.collection.IndexedSeqOptimized.foreach(IndexedSeqOptimized.scala:36)
at scala.collection.IndexedSeqOptimized.foreach$(IndexedSeqOptimized.scala:33)
at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:198)
at org.scalatest.Suite.runNestedSuites(Suite.scala:1255)
at org.scalatest.Suite.runNestedSuites$(Suite.scala:1189)
at org.scalatest.tools.DiscoverySuite.runNestedSuites(DiscoverySuite.scala:30)
at org.scalatest.Suite.run(Suite.scala:1144)
at org.scalatest.Suite.run$(Suite.scala:1129)
at org.scalatest.tools.DiscoverySuite.run(DiscoverySuite.scala:30)
at org.scalatest.tools.SuiteRunner.run(SuiteRunner.scala:45)
at org.scalatest.tools.Runner$.$anonfun$doRunRunRunDaDoRunRun$13(Runner.scala:1346)
at org.scalatest.tools.Runner$.$anonfun$doRunRunRunDaDoRunRun$13$adapted(Runner.scala:1340)
at scala.collection.immutable.List.foreach(List.scala:392)
at org.scalatest.tools.Runner$.doRunRunRunDaDoRunRun(Runner.scala:1340)
at org.scalatest.tools.Runner$.$anonfun$runOptionallyWithPassFailReporter$24(Runner.scala:1031)
at org.scalatest.tools.Runner$.$anonfun$runOptionallyWithPassFailReporter$24$adapted(Runner.scala:1010)
at org.scalatest.tools.Runner$.withClassLoaderAndDispatchReporter(Runner.scala:1506)
at org.scalatest.tools.Runner$.runOptionallyWithPassFailReporter(Runner.scala:1010)
at org.scalatest.tools.Runner$.main(Runner.scala:827)
at org.scalatest.tools.Runner.main(Runner.scala)
Cause: org.apache.spark.sql.catalyst.errors.package$TreeNodeException: execute, tree:
HashAggregate(keys=[key#279494], functions=[partial_count(if ((gid#280004 = 2)) default.agg2.`value1`#280006 else null), partial_sum(if ((gid#280004 = 1)) CAST(default.agg2.`value1` AS BIGINT)#280005L else null), partial_count(if ((gid#280004 = 6)) default.agg2.`value2`#280008 else null), partial_sum(if ((gid#280004 = 3)) CAST(default.agg2.`value2` AS BIGINT)#280007L else null), partial_count(if ((gid#280004 = 5)) default.agg2.`value1`#280006 else null, if ((gid#280004 = 5)) default.agg2.`value2`#280008 else null), partial_longproductsum(if ((gid#280004 = 4)) CAST(default.agg2.`value1` AS BIGINT)#280005L else null, if ((gid#280004 = 4)) CAST(default.agg2.`value2` AS BIGINT)#280007L else null, org.apache.spark.sql.hive.execution.LongProductSum@262c8dec, 0, 0), partial_first(if ((gid#280004 = 0)) count(default.agg2.`value1`)#280015L else null, true), partial_first(if ((gid#280004 = 0)) sum(CAST(default.agg2.`value1` AS BIGINT))#280017L else null, true), partial_first(if ((gid#280004 = 0)) count(default.agg2.`value2`)#280019L else null, true), partial_first(if ((gid#280004 = 0)) sum(CAST(default.agg2.`value2` AS BIGINT))#280021L else null, true), partial_first(if ((gid#280004 = 0)) longproductsum(CAST(default.agg2.`value1` AS BIGINT), CAST(default.agg2.`value2` AS BIGINT))#280025L else null, true), partial_first(if ((gid#280004 = 0)) count(1)#280027L else null, true), partial_first(if ((gid#280004 = 0)) count(1)#280029L else null, true)], output=[key#279494, count#280052L, sum#280053L, count#280054L, sum#280055L, count#280056L, product#280010L, first#280057L, valueSet#280058, first#280059L, valueSet#280060, first#280061L, valueSet#280062, first#280063L, valueSet#280064, first#280065L, valueSet#280066, first#280067L, valueSet#280068, first#280069L, valueSet#280070])
+- HashAggregate(keys=[key#279494, CAST(default.agg2.`value1` AS BIGINT)#280005L, default.agg2.`value1`#280006, CAST(default.agg2.`value2` AS BIGINT)#280007L, default.agg2.`value2`#280008, gid#280004], functions=[count(default.agg2.`value1`#280011), sum(CAST(default.agg2.`value1` AS BIGINT)#280012L), count(default.agg2.`value2`#280013), sum(CAST(default.agg2.`value2` AS BIGINT)#280014L), longproductsum(CAST(default.agg2.`value1` AS BIGINT)#280012L, CAST(default.agg2.`value2` AS BIGINT)#280014L, org.apache.spark.sql.hive.execution.LongProductSum@262c8dec, 0, 0), count(1)], output=[key#279494, CAST(default.agg2.`value1` AS BIGINT)#280005L, default.agg2.`value1`#280006, CAST(default.agg2.`value2` AS BIGINT)#280007L, default.agg2.`value2`#280008, gid#280004, count(default.agg2.`value1`)#280015L, sum(CAST(default.agg2.`value1` AS BIGINT))#280017L, count(default.agg2.`value2`)#280019L, sum(CAST(default.agg2.`value2` AS BIGINT))#280021L, longproductsum(CAST(default.agg2.`value1` AS BIGINT), CAST(default.agg2.`value2` AS BIGINT))#280025L, count(1)#280027L, count(1)#280029L])
+- Exchange hashpartitioning(key#279494, CAST(default.agg2.`value1` AS BIGINT)#280005L, default.agg2.`value1`#280006, CAST(default.agg2.`value2` AS BIGINT)#280007L, default.agg2.`value2`#280008, gid#280004, 5), true, [id=#205706]
+- HashAggregate(keys=[key#279494, CAST(default.agg2.`value1` AS BIGINT)#280005L, default.agg2.`value1`#280006, CAST(default.agg2.`value2` AS BIGINT)#280007L, default.agg2.`value2`#280008, gid#280004], functions=[partial_count(default.agg2.`value1`#280011), partial_sum(CAST(default.agg2.`value1` AS BIGINT)#280012L), partial_count(default.agg2.`value2`#280013), partial_sum(CAST(default.agg2.`value2` AS BIGINT)#280014L), partial_longproductsum(CAST(default.agg2.`value1` AS BIGINT)#280012L, CAST(default.agg2.`value2` AS BIGINT)#280014L, org.apache.spark.sql.hive.execution.LongProductSum@262c8dec, 0, 0), partial_count(1)], output=[key#279494, CAST(default.agg2.`value1` AS BIGINT)#280005L, default.agg2.`value1`#280006, CAST(default.agg2.`value2` AS BIGINT)#280007L, default.agg2.`value2`#280008, gid#280004, count#280077L, sum#280078L, count#280079L, sum#280080L, product#280024L, count#280002L])
+- *(1) Expand [ArrayBuffer(key#279494, null, null, null, null, 0, value1#279495, cast(value1#279495 as bigint), value2#279496, cast(value2#279496 as bigint)), ArrayBuffer(key#279494, cast(value1#279495 as bigint), null, null, null, 1, null, null, null, null), ArrayBuffer(key#279494, null, value1#279495, null, null, 2, null, null, null, null), ArrayBuffer(key#279494, null, null, cast(value2#279496 as bigint), null, 3, null, null, null, null), ArrayBuffer(key#279494, cast(value1#279495 as bigint), null, cast(value2#279496 as bigint), null, 4, null, null, null, null), ArrayBuffer(key#279494, null, value1#279495, null, value2#279496, 5, null, null, null, null), ArrayBuffer(key#279494, null, null, null, value2#279496, 6, null, null, null, null)], [key#279494, CAST(default.agg2.`value1` AS BIGINT)#280005L, default.agg2.`value1`#280006, CAST(default.agg2.`value2` AS BIGINT)#280007L, default.agg2.`value2`#280008, gid#280004, default.agg2.`value1`#280011, CAST(default.agg2.`value1` AS BIGINT)#280012L, default.agg2.`value2`#280013, CAST(default.agg2.`value2` AS BIGINT)#280014L]
+- *(1) ColumnarToRow
+- FileScan parquet default.agg2[key#279494,value1#279495,value2#279496] Batched: true, DataFilters: [], Format: Parquet, Location: InMemoryFileIndex[file:/home/jenkins/workspace/spark-master-test-maven-hadoop-2.7/sql/hive/target..., PartitionFilters: [], PushedFilters: [], ReadSchema: struct<key:int,value1:int,value2:int>
at org.apache.spark.sql.catalyst.errors.package$.attachTree(package.scala:56)
at org.apache.spark.sql.execution.aggregate.HashAggregateExec.doExecute(HashAggregateExec.scala:104)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:189)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:227)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:224)
at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:185)
at org.apache.spark.sql.execution.exchange.ShuffleExchangeExec.inputRDD$lzycompute(ShuffleExchangeExec.scala:64)
at org.apache.spark.sql.execution.exchange.ShuffleExchangeExec.inputRDD(ShuffleExchangeExec.scala:64)
at org.apache.spark.sql.execution.exchange.ShuffleExchangeExec.shuffleDependency$lzycompute(ShuffleExchangeExec.scala:74)
at org.apache.spark.sql.execution.exchange.ShuffleExchangeExec.shuffleDependency(ShuffleExchangeExec.scala:72)
at org.apache.spark.sql.execution.exchange.ShuffleExchangeExec.createShuffledRDD(ShuffleExchangeExec.scala:82)
at org.apache.spark.sql.execution.exchange.ShuffleExchangeExec.$anonfun$doExecute$1(ShuffleExchangeExec.scala:93)
at org.apache.spark.sql.catalyst.errors.package$.attachTree(package.scala:52)
at org.apache.spark.sql.execution.exchange.ShuffleExchangeExec.doExecute(ShuffleExchangeExec.scala:90)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:189)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:227)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:224)
at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:185)
at org.apache.spark.sql.execution.aggregate.HashAggregateExec.$anonfun$doExecute$1(HashAggregateExec.scala:111)
at org.apache.spark.sql.catalyst.errors.package$.attachTree(package.scala:52)
at org.apache.spark.sql.execution.aggregate.HashAggregateExec.doExecute(HashAggregateExec.scala:104)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:189)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:227)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:224)
at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:185)
at org.apache.spark.sql.execution.DeserializeToObjectExec.doExecute(objects.scala:96)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:189)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:227)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:224)
at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:185)
at org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:110)
at org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:109)
at org.apache.spark.sql.Dataset.rdd$lzycompute(Dataset.scala:3001)
at org.apache.spark.sql.Dataset.rdd(Dataset.scala:2999)
at org.apache.spark.sql.QueryTest$.$anonfun$checkAnswer$1(QueryTest.scala:256)
at scala.runtime.java8.JFunction0$mcJ$sp.apply(JFunction0$mcJ$sp.java:23)
at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:160)
at org.apache.spark.sql.QueryTest$.checkAnswer(QueryTest.scala:256)
at org.apache.spark.sql.QueryTest.checkAnswer(QueryTest.scala:153)
at org.apache.spark.sql.hive.execution.AggregationQuerySuite.$anonfun$new$50(AggregationQuerySuite.scala:692)
at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
at org.scalatest.OutcomeOf.outcomeOf(OutcomeOf.scala:85)
at org.scalatest.OutcomeOf.outcomeOf$(OutcomeOf.scala:83)
at org.scalatest.OutcomeOf$.outcomeOf(OutcomeOf.scala:104)
at org.scalatest.Transformer.apply(Transformer.scala:22)
at org.scalatest.Transformer.apply(Transformer.scala:20)
at org.scalatest.FunSuiteLike$$anon$1.apply(FunSuiteLike.scala:186)
at org.apache.spark.SparkFunSuite.withFixture(SparkFunSuite.scala:149)
at org.scalatest.FunSuiteLike.invokeWithFixture$1(FunSuiteLike.scala:184)
at org.scalatest.FunSuiteLike.$anonfun$runTest$1(FunSuiteLike.scala:196)
at org.scalatest.SuperEngine.runTestImpl(Engine.scala:289)
at org.scalatest.FunSuiteLike.runTest(FunSuiteLike.scala:196)
at org.scalatest.FunSuiteLike.runTest$(FunSuiteLike.scala:178)
at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterEach$$super$runTest(SparkFunSuite.scala:56)
at org.scalatest.BeforeAndAfterEach.runTest(BeforeAndAfterEach.scala:221)
at org.scalatest.BeforeAndAfterEach.runTest$(BeforeAndAfterEach.scala:214)
at org.apache.spark.SparkFunSuite.runTest(SparkFunSuite.scala:56)
at org.scalatest.FunSuiteLike.$anonfun$runTests$1(FunSuiteLike.scala:229)
at org.scalatest.SuperEngine.$anonfun$runTestsInBranch$1(Engine.scala:396)
at scala.collection.immutable.List.foreach(List.scala:392)
at org.scalatest.SuperEngine.traverseSubNodes$1(Engine.scala:384)
at org.scalatest.SuperEngine.runTestsInBranch(Engine.scala:379)
at org.scalatest.SuperEngine.runTestsImpl(Engine.scala:461)
at org.scalatest.FunSuiteLike.runTests(FunSuiteLike.scala:229)
at org.scalatest.FunSuiteLike.runTests$(FunSuiteLike.scala:228)
at org.scalatest.FunSuite.runTests(FunSuite.scala:1560)
at org.scalatest.Suite.run(Suite.scala:1147)
at org.scalatest.Suite.run$(Suite.scala:1129)
at org.scalatest.FunSuite.org$scalatest$FunSuiteLike$$super$run(FunSuite.scala:1560)
at org.scalatest.FunSuiteLike.$anonfun$run$1(FunSuiteLike.scala:233)
at org.scalatest.SuperEngine.runImpl(Engine.scala:521)
at org.scalatest.FunSuiteLike.run(FunSuiteLike.scala:233)
at org.scalatest.FunSuiteLike.run$(FunSuiteLike.scala:232)
at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterAll$$super$run(SparkFunSuite.scala:56)
at org.scalatest.BeforeAndAfterAll.liftedTree1$1(BeforeAndAfterAll.scala:213)
at org.scalatest.BeforeAndAfterAll.run(BeforeAndAfterAll.scala:210)
at org.scalatest.BeforeAndAfterAll.run$(BeforeAndAfterAll.scala:208)
at org.apache.spark.SparkFunSuite.run(SparkFunSuite.scala:56)
at org.scalatest.Suite.callExecuteOnSuite$1(Suite.scala:1210)
at org.scalatest.Suite.$anonfun$runNestedSuites$1(Suite.scala:1257)
at scala.collection.IndexedSeqOptimized.foreach(IndexedSeqOptimized.scala:36)
at scala.collection.IndexedSeqOptimized.foreach$(IndexedSeqOptimized.scala:33)
at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:198)
at org.scalatest.Suite.runNestedSuites(Suite.scala:1255)
at org.scalatest.Suite.runNestedSuites$(Suite.scala:1189)
at org.scalatest.tools.DiscoverySuite.runNestedSuites(DiscoverySuite.scala:30)
at org.scalatest.Suite.run(Suite.scala:1144)
at org.scalatest.Suite.run$(Suite.scala:1129)
at org.scalatest.tools.DiscoverySuite.run(DiscoverySuite.scala:30)
at org.scalatest.tools.SuiteRunner.run(SuiteRunner.scala:45)
at org.scalatest.tools.Runner$.$anonfun$doRunRunRunDaDoRunRun$13(Runner.scala:1346)
at org.scalatest.tools.Runner$.$anonfun$doRunRunRunDaDoRunRun$13$adapted(Runner.scala:1340)
at scala.collection.immutable.List.foreach(List.scala:392)
at org.scalatest.tools.Runner$.doRunRunRunDaDoRunRun(Runner.scala:1340)
at org.scalatest.tools.Runner$.$anonfun$runOptionallyWithPassFailReporter$24(Runner.scala:1031)
at org.scalatest.tools.Runner$.$anonfun$runOptionallyWithPassFailReporter$24$adapted(Runner.scala:1010)
at org.scalatest.tools.Runner$.withClassLoaderAndDispatchReporter(Runner.scala:1506)
at org.scalatest.tools.Runner$.runOptionallyWithPassFailReporter(Runner.scala:1010)
at org.scalatest.tools.Runner$.main(Runner.scala:827)
at org.scalatest.tools.Runner.main(Runner.scala)
Cause: org.apache.spark.sql.catalyst.errors.package$TreeNodeException: execute, tree:
HashAggregate(keys=[key#279494, CAST(default.agg2.`value1` AS BIGINT)#280005L, default.agg2.`value1`#280006, CAST(default.agg2.`value2` AS BIGINT)#280007L, default.agg2.`value2`#280008, gid#280004], functions=[count(default.agg2.`value1`#280011), sum(CAST(default.agg2.`value1` AS BIGINT)#280012L), count(default.agg2.`value2`#280013), sum(CAST(default.agg2.`value2` AS BIGINT)#280014L), longproductsum(CAST(default.agg2.`value1` AS BIGINT)#280012L, CAST(default.agg2.`value2` AS BIGINT)#280014L, org.apache.spark.sql.hive.execution.LongProductSum@262c8dec, 0, 0), count(1)], output=[key#279494, CAST(default.agg2.`value1` AS BIGINT)#280005L, default.agg2.`value1`#280006, CAST(default.agg2.`value2` AS BIGINT)#280007L, default.agg2.`value2`#280008, gid#280004, count(default.agg2.`value1`)#280015L, sum(CAST(default.agg2.`value1` AS BIGINT))#280017L, count(default.agg2.`value2`)#280019L, sum(CAST(default.agg2.`value2` AS BIGINT))#280021L, longproductsum(CAST(default.agg2.`value1` AS BIGINT), CAST(default.agg2.`value2` AS BIGINT))#280025L, count(1)#280027L, count(1)#280029L])
+- Exchange hashpartitioning(key#279494, CAST(default.agg2.`value1` AS BIGINT)#280005L, default.agg2.`value1`#280006, CAST(default.agg2.`value2` AS BIGINT)#280007L, default.agg2.`value2`#280008, gid#280004, 5), true, [id=#205706]
+- HashAggregate(keys=[key#279494, CAST(default.agg2.`value1` AS BIGINT)#280005L, default.agg2.`value1`#280006, CAST(default.agg2.`value2` AS BIGINT)#280007L, default.agg2.`value2`#280008, gid#280004], functions=[partial_count(default.agg2.`value1`#280011), partial_sum(CAST(default.agg2.`value1` AS BIGINT)#280012L), partial_count(default.agg2.`value2`#280013), partial_sum(CAST(default.agg2.`value2` AS BIGINT)#280014L), partial_longproductsum(CAST(default.agg2.`value1` AS BIGINT)#280012L, CAST(default.agg2.`value2` AS BIGINT)#280014L, org.apache.spark.sql.hive.execution.LongProductSum@262c8dec, 0, 0), partial_count(1)], output=[key#279494, CAST(default.agg2.`value1` AS BIGINT)#280005L, default.agg2.`value1`#280006, CAST(default.agg2.`value2` AS BIGINT)#280007L, default.agg2.`value2`#280008, gid#280004, count#280077L, sum#280078L, count#280079L, sum#280080L, product#280024L, count#280002L])
+- *(1) Expand [ArrayBuffer(key#279494, null, null, null, null, 0, value1#279495, cast(value1#279495 as bigint), value2#279496, cast(value2#279496 as bigint)), ArrayBuffer(key#279494, cast(value1#279495 as bigint), null, null, null, 1, null, null, null, null), ArrayBuffer(key#279494, null, value1#279495, null, null, 2, null, null, null, null), ArrayBuffer(key#279494, null, null, cast(value2#279496 as bigint), null, 3, null, null, null, null), ArrayBuffer(key#279494, cast(value1#279495 as bigint), null, cast(value2#279496 as bigint), null, 4, null, null, null, null), ArrayBuffer(key#279494, null, value1#279495, null, value2#279496, 5, null, null, null, null), ArrayBuffer(key#279494, null, null, null, value2#279496, 6, null, null, null, null)], [key#279494, CAST(default.agg2.`value1` AS BIGINT)#280005L, default.agg2.`value1`#280006, CAST(default.agg2.`value2` AS BIGINT)#280007L, default.agg2.`value2`#280008, gid#280004, default.agg2.`value1`#280011, CAST(default.agg2.`value1` AS BIGINT)#280012L, default.agg2.`value2`#280013, CAST(default.agg2.`value2` AS BIGINT)#280014L]
+- *(1) ColumnarToRow
+- FileScan parquet default.agg2[key#279494,value1#279495,value2#279496] Batched: true, DataFilters: [], Format: Parquet, Location: InMemoryFileIndex[file:/home/jenkins/workspace/spark-master-test-maven-hadoop-2.7/sql/hive/target..., PartitionFilters: [], PushedFilters: [], ReadSchema: struct<key:int,value1:int,value2:int>
at org.apache.spark.sql.catalyst.errors.package$.attachTree(package.scala:56)
at org.apache.spark.sql.execution.aggregate.HashAggregateExec.doExecute(HashAggregateExec.scala:104)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:189)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:227)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:224)
at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:185)
at org.apache.spark.sql.execution.aggregate.HashAggregateExec.$anonfun$doExecute$1(HashAggregateExec.scala:111)
at org.apache.spark.sql.catalyst.errors.package$.attachTree(package.scala:52)
at org.apache.spark.sql.execution.aggregate.HashAggregateExec.doExecute(HashAggregateExec.scala:104)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:189)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:227)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:224)
at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:185)
at org.apache.spark.sql.execution.exchange.ShuffleExchangeExec.inputRDD$lzycompute(ShuffleExchangeExec.scala:64)
at org.apache.spark.sql.execution.exchange.ShuffleExchangeExec.inputRDD(ShuffleExchangeExec.scala:64)
at org.apache.spark.sql.execution.exchange.ShuffleExchangeExec.shuffleDependency$lzycompute(ShuffleExchangeExec.scala:74)
at org.apache.spark.sql.execution.exchange.ShuffleExchangeExec.shuffleDependency(ShuffleExchangeExec.scala:72)
at org.apache.spark.sql.execution.exchange.ShuffleExchangeExec.createShuffledRDD(ShuffleExchangeExec.scala:82)
at org.apache.spark.sql.execution.exchange.ShuffleExchangeExec.$anonfun$doExecute$1(ShuffleExchangeExec.scala:93)
at org.apache.spark.sql.catalyst.errors.package$.attachTree(package.scala:52)
at org.apache.spark.sql.execution.exchange.ShuffleExchangeExec.doExecute(ShuffleExchangeExec.scala:90)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:189)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:227)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:224)
at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:185)
at org.apache.spark.sql.execution.aggregate.HashAggregateExec.$anonfun$doExecute$1(HashAggregateExec.scala:111)
at org.apache.spark.sql.catalyst.errors.package$.attachTree(package.scala:52)
at org.apache.spark.sql.execution.aggregate.HashAggregateExec.doExecute(HashAggregateExec.scala:104)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:189)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:227)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:224)
at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:185)
at org.apache.spark.sql.execution.DeserializeToObjectExec.doExecute(objects.scala:96)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:189)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:227)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:224)
at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:185)
at org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:110)
at org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:109)
at org.apache.spark.sql.Dataset.rdd$lzycompute(Dataset.scala:3001)
at org.apache.spark.sql.Dataset.rdd(Dataset.scala:2999)
at org.apache.spark.sql.QueryTest$.$anonfun$checkAnswer$1(QueryTest.scala:256)
at scala.runtime.java8.JFunction0$mcJ$sp.apply(JFunction0$mcJ$sp.java:23)
at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:160)
at org.apache.spark.sql.QueryTest$.checkAnswer(QueryTest.scala:256)
at org.apache.spark.sql.QueryTest.checkAnswer(QueryTest.scala:153)
at org.apache.spark.sql.hive.execution.AggregationQuerySuite.$anonfun$new$50(AggregationQuerySuite.scala:692)
at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
at org.scalatest.OutcomeOf.outcomeOf(OutcomeOf.scala:85)
at org.scalatest.OutcomeOf.outcomeOf$(OutcomeOf.scala:83)
at org.scalatest.OutcomeOf$.outcomeOf(OutcomeOf.scala:104)
at org.scalatest.Transformer.apply(Transformer.scala:22)
at org.scalatest.Transformer.apply(Transformer.scala:20)
at org.scalatest.FunSuiteLike$$anon$1.apply(FunSuiteLike.scala:186)
at org.apache.spark.SparkFunSuite.withFixture(SparkFunSuite.scala:149)
at org.scalatest.FunSuiteLike.invokeWithFixture$1(FunSuiteLike.scala:184)
at org.scalatest.FunSuiteLike.$anonfun$runTest$1(FunSuiteLike.scala:196)
at org.scalatest.SuperEngine.runTestImpl(Engine.scala:289)
at org.scalatest.FunSuiteLike.runTest(FunSuiteLike.scala:196)
at org.scalatest.FunSuiteLike.runTest$(FunSuiteLike.scala:178)
at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterEach$$super$runTest(SparkFunSuite.scala:56)
at org.scalatest.BeforeAndAfterEach.runTest(BeforeAndAfterEach.scala:221)
at org.scalatest.BeforeAndAfterEach.runTest$(BeforeAndAfterEach.scala:214)
at org.apache.spark.SparkFunSuite.runTest(SparkFunSuite.scala:56)
at org.scalatest.FunSuiteLike.$anonfun$runTests$1(FunSuiteLike.scala:229)
at org.scalatest.SuperEngine.$anonfun$runTestsInBranch$1(Engine.scala:396)
at scala.collection.immutable.List.foreach(List.scala:392)
at org.scalatest.SuperEngine.traverseSubNodes$1(Engine.scala:384)
at org.scalatest.SuperEngine.runTestsInBranch(Engine.scala:379)
at org.scalatest.SuperEngine.runTestsImpl(Engine.scala:461)
at org.scalatest.FunSuiteLike.runTests(FunSuiteLike.scala:229)
at org.scalatest.FunSuiteLike.runTests$(FunSuiteLike.scala:228)
at org.scalatest.FunSuite.runTests(FunSuite.scala:1560)
at org.scalatest.Suite.run(Suite.scala:1147)
at org.scalatest.Suite.run$(Suite.scala:1129)
at org.scalatest.FunSuite.org$scalatest$FunSuiteLike$$super$run(FunSuite.scala:1560)
at org.scalatest.FunSuiteLike.$anonfun$run$1(FunSuiteLike.scala:233)
at org.scalatest.SuperEngine.runImpl(Engine.scala:521)
at org.scalatest.FunSuiteLike.run(FunSuiteLike.scala:233)
at org.scalatest.FunSuiteLike.run$(FunSuiteLike.scala:232)
at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterAll$$super$run(SparkFunSuite.scala:56)
at org.scalatest.BeforeAndAfterAll.liftedTree1$1(BeforeAndAfterAll.scala:213)
at org.scalatest.BeforeAndAfterAll.run(BeforeAndAfterAll.scala:210)
at org.scalatest.BeforeAndAfterAll.run$(BeforeAndAfterAll.scala:208)
at org.apache.spark.SparkFunSuite.run(SparkFunSuite.scala:56)
at org.scalatest.Suite.callExecuteOnSuite$1(Suite.scala:1210)
at org.scalatest.Suite.$anonfun$runNestedSuites$1(Suite.scala:1257)
at scala.collection.IndexedSeqOptimized.foreach(IndexedSeqOptimized.scala:36)
at scala.collection.IndexedSeqOptimized.foreach$(IndexedSeqOptimized.scala:33)
at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:198)
at org.scalatest.Suite.runNestedSuites(Suite.scala:1255)
at org.scalatest.Suite.runNestedSuites$(Suite.scala:1189)
at org.scalatest.tools.DiscoverySuite.runNestedSuites(DiscoverySuite.scala:30)
at org.scalatest.Suite.run(Suite.scala:1144)
at org.scalatest.Suite.run$(Suite.scala:1129)
at org.scalatest.tools.DiscoverySuite.run(DiscoverySuite.scala:30)
at org.scalatest.tools.SuiteRunner.run(SuiteRunner.scala:45)
at org.scalatest.tools.Runner$.$anonfun$doRunRunRunDaDoRunRun$13(Runner.scala:1346)
at org.scalatest.tools.Runner$.$anonfun$doRunRunRunDaDoRunRun$13$adapted(Runner.scala:1340)
at scala.collection.immutable.List.foreach(List.scala:392)
at org.scalatest.tools.Runner$.doRunRunRunDaDoRunRun(Runner.scala:1340)
at org.scalatest.tools.Runner$.$anonfun$runOptionallyWithPassFailReporter$24(Runner.scala:1031)
at org.scalatest.tools.Runner$.$anonfun$runOptionallyWithPassFailReporter$24$adapted(Runner.scala:1010)
at org.scalatest.tools.Runner$.withClassLoaderAndDispatchReporter(Runner.scala:1506)
at org.scalatest.tools.Runner$.runOptionallyWithPassFailReporter(Runner.scala:1010)
at org.scalatest.tools.Runner$.main(Runner.scala:827)
at org.scalatest.tools.Runner.main(Runner.scala)
Cause: org.apache.spark.sql.catalyst.errors.package$TreeNodeException: execute, tree:
Exchange hashpartitioning(key#279494, CAST(default.agg2.`value1` AS BIGINT)#280005L, default.agg2.`value1`#280006, CAST(default.agg2.`value2` AS BIGINT)#280007L, default.agg2.`value2`#280008, gid#280004, 5), true, [id=#205706]
+- HashAggregate(keys=[key#279494, CAST(default.agg2.`value1` AS BIGINT)#280005L, default.agg2.`value1`#280006, CAST(default.agg2.`value2` AS BIGINT)#280007L, default.agg2.`value2`#280008, gid#280004], functions=[partial_count(default.agg2.`value1`#280011), partial_sum(CAST(default.agg2.`value1` AS BIGINT)#280012L), partial_count(default.agg2.`value2`#280013), partial_sum(CAST(default.agg2.`value2` AS BIGINT)#280014L), partial_longproductsum(CAST(default.agg2.`value1` AS BIGINT)#280012L, CAST(default.agg2.`value2` AS BIGINT)#280014L, org.apache.spark.sql.hive.execution.LongProductSum@262c8dec, 0, 0), partial_count(1)], output=[key#279494, CAST(default.agg2.`value1` AS BIGINT)#280005L, default.agg2.`value1`#280006, CAST(default.agg2.`value2` AS BIGINT)#280007L, default.agg2.`value2`#280008, gid#280004, count#280077L, sum#280078L, count#280079L, sum#280080L, product#280024L, count#280002L])
+- *(1) Expand [ArrayBuffer(key#279494, null, null, null, null, 0, value1#279495, cast(value1#279495 as bigint), value2#279496, cast(value2#279496 as bigint)), ArrayBuffer(key#279494, cast(value1#279495 as bigint), null, null, null, 1, null, null, null, null), ArrayBuffer(key#279494, null, value1#279495, null, null, 2, null, null, null, null), ArrayBuffer(key#279494, null, null, cast(value2#279496 as bigint), null, 3, null, null, null, null), ArrayBuffer(key#279494, cast(value1#279495 as bigint), null, cast(value2#279496 as bigint), null, 4, null, null, null, null), ArrayBuffer(key#279494, null, value1#279495, null, value2#279496, 5, null, null, null, null), ArrayBuffer(key#279494, null, null, null, value2#279496, 6, null, null, null, null)], [key#279494, CAST(default.agg2.`value1` AS BIGINT)#280005L, default.agg2.`value1`#280006, CAST(default.agg2.`value2` AS BIGINT)#280007L, default.agg2.`value2`#280008, gid#280004, default.agg2.`value1`#280011, CAST(default.agg2.`value1` AS BIGINT)#280012L, default.agg2.`value2`#280013, CAST(default.agg2.`value2` AS BIGINT)#280014L]
+- *(1) ColumnarToRow
+- FileScan parquet default.agg2[key#279494,value1#279495,value2#279496] Batched: true, DataFilters: [], Format: Parquet, Location: InMemoryFileIndex[file:/home/jenkins/workspace/spark-master-test-maven-hadoop-2.7/sql/hive/target..., PartitionFilters: [], PushedFilters: [], ReadSchema: struct<key:int,value1:int,value2:int>
at org.apache.spark.sql.catalyst.errors.package$.attachTree(package.scala:56)
at org.apache.spark.sql.execution.exchange.ShuffleExchangeExec.doExecute(ShuffleExchangeExec.scala:90)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:189)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:227)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:224)
at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:185)
at org.apache.spark.sql.execution.aggregate.HashAggregateExec.$anonfun$doExecute$1(HashAggregateExec.scala:111)
at org.apache.spark.sql.catalyst.errors.package$.attachTree(package.scala:52)
at org.apache.spark.sql.execution.aggregate.HashAggregateExec.doExecute(HashAggregateExec.scala:104)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:189)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:227)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:224)
at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:185)
at org.apache.spark.sql.execution.aggregate.HashAggregateExec.$anonfun$doExecute$1(HashAggregateExec.scala:111)
at org.apache.spark.sql.catalyst.errors.package$.attachTree(package.scala:52)
at org.apache.spark.sql.execution.aggregate.HashAggregateExec.doExecute(HashAggregateExec.scala:104)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:189)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:227)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:224)
at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:185)
at org.apache.spark.sql.execution.exchange.ShuffleExchangeExec.inputRDD$lzycompute(ShuffleExchangeExec.scala:64)
at org.apache.spark.sql.execution.exchange.ShuffleExchangeExec.inputRDD(ShuffleExchangeExec.scala:64)
at org.apache.spark.sql.execution.exchange.ShuffleExchangeExec.shuffleDependency$lzycompute(ShuffleExchangeExec.scala:74)
at org.apache.spark.sql.execution.exchange.ShuffleExchangeExec.shuffleDependency(ShuffleExchangeExec.scala:72)
at org.apache.spark.sql.execution.exchange.ShuffleExchangeExec.createShuffledRDD(ShuffleExchangeExec.scala:82)
at org.apache.spark.sql.execution.exchange.ShuffleExchangeExec.$anonfun$doExecute$1(ShuffleExchangeExec.scala:93)
at org.apache.spark.sql.catalyst.errors.package$.attachTree(package.scala:52)
at org.apache.spark.sql.execution.exchange.ShuffleExchangeExec.doExecute(ShuffleExchangeExec.scala:90)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:189)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:227)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:224)
at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:185)
at org.apache.spark.sql.execution.aggregate.HashAggregateExec.$anonfun$doExecute$1(HashAggregateExec.scala:111)
at org.apache.spark.sql.catalyst.errors.package$.attachTree(package.scala:52)
at org.apache.spark.sql.execution.aggregate.HashAggregateExec.doExecute(HashAggregateExec.scala:104)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:189)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:227)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:224)
at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:185)
at org.apache.spark.sql.execution.DeserializeToObjectExec.doExecute(objects.scala:96)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:189)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:227)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:224)
at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:185)
at org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:110)
at org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:109)
at org.apache.spark.sql.Dataset.rdd$lzycompute(Dataset.scala:3001)
at org.apache.spark.sql.Dataset.rdd(Dataset.scala:2999)
at org.apache.spark.sql.QueryTest$.$anonfun$checkAnswer$1(QueryTest.scala:256)
at scala.runtime.java8.JFunction0$mcJ$sp.apply(JFunction0$mcJ$sp.java:23)
at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:160)
at org.apache.spark.sql.QueryTest$.checkAnswer(QueryTest.scala:256)
at org.apache.spark.sql.QueryTest.checkAnswer(QueryTest.scala:153)
at org.apache.spark.sql.hive.execution.AggregationQuerySuite.$anonfun$new$50(AggregationQuerySuite.scala:692)
at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
at org.scalatest.OutcomeOf.outcomeOf(OutcomeOf.scala:85)
at org.scalatest.OutcomeOf.outcomeOf$(OutcomeOf.scala:83)
at org.scalatest.OutcomeOf$.outcomeOf(OutcomeOf.scala:104)
at org.scalatest.Transformer.apply(Transformer.scala:22)
at org.scalatest.Transformer.apply(Transformer.scala:20)
at org.scalatest.FunSuiteLike$$anon$1.apply(FunSuiteLike.scala:186)
at org.apache.spark.SparkFunSuite.withFixture(SparkFunSuite.scala:149)
at org.scalatest.FunSuiteLike.invokeWithFixture$1(FunSuiteLike.scala:184)
at org.scalatest.FunSuiteLike.$anonfun$runTest$1(FunSuiteLike.scala:196)
at org.scalatest.SuperEngine.runTestImpl(Engine.scala:289)
at org.scalatest.FunSuiteLike.runTest(FunSuiteLike.scala:196)
at org.scalatest.FunSuiteLike.runTest$(FunSuiteLike.scala:178)
at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterEach$$super$runTest(SparkFunSuite.scala:56)
at org.scalatest.BeforeAndAfterEach.runTest(BeforeAndAfterEach.scala:221)
at org.scalatest.BeforeAndAfterEach.runTest$(BeforeAndAfterEach.scala:214)
at org.apache.spark.SparkFunSuite.runTest(SparkFunSuite.scala:56)
at org.scalatest.FunSuiteLike.$anonfun$runTests$1(FunSuiteLike.scala:229)
at org.scalatest.SuperEngine.$anonfun$runTestsInBranch$1(Engine.scala:396)
at scala.collection.immutable.List.foreach(List.scala:392)
at org.scalatest.SuperEngine.traverseSubNodes$1(Engine.scala:384)
at org.scalatest.SuperEngine.runTestsInBranch(Engine.scala:379)
at org.scalatest.SuperEngine.runTestsImpl(Engine.scala:461)
at org.scalatest.FunSuiteLike.runTests(FunSuiteLike.scala:229)
at org.scalatest.FunSuiteLike.runTests$(FunSuiteLike.scala:228)
at org.scalatest.FunSuite.runTests(FunSuite.scala:1560)
at org.scalatest.Suite.run(Suite.scala:1147)
at org.scalatest.Suite.run$(Suite.scala:1129)
at org.scalatest.FunSuite.org$scalatest$FunSuiteLike$$super$run(FunSuite.scala:1560)
at org.scalatest.FunSuiteLike.$anonfun$run$1(FunSuiteLike.scala:233)
at org.scalatest.SuperEngine.runImpl(Engine.scala:521)
at org.scalatest.FunSuiteLike.run(FunSuiteLike.scala:233)
at org.scalatest.FunSuiteLike.run$(FunSuiteLike.scala:232)
at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterAll$$super$run(SparkFunSuite.scala:56)
at org.scalatest.BeforeAndAfterAll.liftedTree1$1(BeforeAndAfterAll.scala:213)
at org.scalatest.BeforeAndAfterAll.run(BeforeAndAfterAll.scala:210)
at org.scalatest.BeforeAndAfterAll.run$(BeforeAndAfterAll.scala:208)
at org.apache.spark.SparkFunSuite.run(SparkFunSuite.scala:56)
at org.scalatest.Suite.callExecuteOnSuite$1(Suite.scala:1210)
at org.scalatest.Suite.$anonfun$runNestedSuites$1(Suite.scala:1257)
at scala.collection.IndexedSeqOptimized.foreach(IndexedSeqOptimized.scala:36)
at scala.collection.IndexedSeqOptimized.foreach$(IndexedSeqOptimized.scala:33)
at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:198)
at org.scalatest.Suite.runNestedSuites(Suite.scala:1255)
at org.scalatest.Suite.runNestedSuites$(Suite.scala:1189)
at org.scalatest.tools.DiscoverySuite.runNestedSuites(DiscoverySuite.sc