org.scalatest.exceptions.TestFailedException: Error adding data: Could not find index of the source to which data was added org.apache.spark.sql.streaming.StreamTest$$anonfun$14.apply(StreamTest.scala:708) org.apache.spark.sql.streaming.StreamTest$$anonfun$14.apply(StreamTest.scala:708) scala.Option.getOrElse(Option.scala:121) org.apache.spark.sql.streaming.StreamTest$class.executeAction$1(StreamTest.scala:707) org.apache.spark.sql.streaming.StreamTest$$anonfun$liftedTree1$1$1.apply(StreamTest.scala:770) org.apache.spark.sql.streaming.StreamTest$$anonfun$liftedTree1$1$1.apply(StreamTest.scala:757) scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59) scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48) org.apache.spark.sql.streaming.StreamTest$class.liftedTree1$1(StreamTest.scala:757) org.apache.spark.sql.streaming.StreamTest$class.testStream(StreamTest.scala:756) == Progress == AssertOnQuery(<condition>, ) AddKafkaData(topics = Set(topic-0-seems), data = WrappedArray(1, 2, 3), message = ) CheckAnswer: [2],[3],[4] AssertOnQuery(<condition>, name) => AddKafkaData(topics = Set(topic-0-bad), data = WrappedArray(4, 5, 6), message = ) CheckAnswer: [2],[3],[4],[5],[6],[7] == Stream == Output Mode: Append Stream state: {KafkaSource[SubscribePattern[topic-0-.*]]: {"topic-0-seems":{"2":1,"4":0,"1":1,"3":1,"0":1}}} Thread state: alive Thread stack trace: scala.runtime.ScalaRunTime$.hash(ScalaRunTime.scala:206) scala.collection.immutable.HashMap.elemHashCode(HashMap.scala:80) scala.collection.immutable.HashMap.computeHash(HashMap.scala:89) scala.collection.immutable.HashMap.$plus(HashMap.scala:60) scala.collection.immutable.HashMap.$plus(HashMap.scala:37) scala.collection.mutable.MapBuilder.$plus$eq(MapBuilder.scala:29) scala.collection.mutable.MapBuilder.$plus$eq(MapBuilder.scala:25) scala.collection.generic.Growable$$anonfun$$plus$plus$eq$1.apply(Growable.scala:59) scala.collection.generic.Growable$$anonfun$$plus$plus$eq$1.apply(Growable.scala:59) scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59) scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48) scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:59) scala.collection.mutable.MapBuilder.$plus$plus$eq(MapBuilder.scala:25) scala.collection.generic.GenMapFactory.apply(GenMapFactory.scala:48) scala.sys.package$.env(package.scala:61) org.apache.spark.util.Utils$.isTesting(Utils.scala:1883) org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$class.assertNotAnalysisRule(AnalysisHelper.scala:134) org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.assertNotAnalysisRule(LogicalPlan.scala:29) org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$class.transformUp(AnalysisHelper.scala:157) org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformUp(LogicalPlan.scala:29) org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformUp(LogicalPlan.scala:29) org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:275) org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:275) org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$4.apply(TreeNode.scala:326) org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:187) org.apache.spark.sql.catalyst.trees.TreeNode.mapChildren(TreeNode.scala:324) org.apache.spark.sql.catalyst.trees.TreeNode.transformUp(TreeNode.scala:275) org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformUp(LogicalPlan.scala:29) org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$class.transformUp(AnalysisHelper.scala:158) org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformUp(LogicalPlan.scala:29) org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformUp(LogicalPlan.scala:29) org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:275) org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:275) org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$4.apply(TreeNode.scala:326) org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:187) org.apache.spark.sql.catalyst.trees.TreeNode.mapChildren(TreeNode.scala:324) org.apache.spark.sql.catalyst.trees.TreeNode.transformUp(TreeNode.scala:275) org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformUp(LogicalPlan.scala:29) org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$class.transformUp(AnalysisHelper.scala:158) org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformUp(LogicalPlan.scala:29) org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformUp(LogicalPlan.scala:29) org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:275) org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:275) org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$4.apply(TreeNode.scala:326) org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:187) org.apache.spark.sql.catalyst.trees.TreeNode.mapChildren(TreeNode.scala:324) org.apache.spark.sql.catalyst.trees.TreeNode.transformUp(TreeNode.scala:275) org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformUp(LogicalPlan.scala:29) org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$class.transformUp(AnalysisHelper.scala:158) org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformUp(LogicalPlan.scala:29) org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformUp(LogicalPlan.scala:29) org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:275) org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:275) org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$4.apply(TreeNode.scala:326) org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:187) org.apache.spark.sql.catalyst.trees.TreeNode.mapChildren(TreeNode.scala:324) org.apache.spark.sql.catalyst.trees.TreeNode.transformUp(TreeNode.scala:275) org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformUp(LogicalPlan.scala:29) org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$class.transformUp(AnalysisHelper.scala:158) org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformUp(LogicalPlan.scala:29) org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformUp(LogicalPlan.scala:29) org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:275) org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:275) org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$4.apply(TreeNode.scala:326) org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:187) org.apache.spark.sql.catalyst.trees.TreeNode.mapChildren(TreeNode.scala:324) org.apache.spark.sql.catalyst.trees.TreeNode.transformUp(TreeNode.scala:275) org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformUp(LogicalPlan.scala:29) org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$class.transformUp(AnalysisHelper.scala:158) org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformUp(LogicalPlan.scala:29) org.apache.spark.sql.catalyst.optimizer.UpdateNullabilityInAttributeReferences$.apply(Optimizer.scala:1613) org.apache.spark.sql.catalyst.optimizer.UpdateNullabilityInAttributeReferences$.apply(Optimizer.scala:1612) org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1$$anonfun$apply$1.apply(RuleExecutor.scala:87) org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1$$anonfun$apply$1.apply(RuleExecutor.scala:84) scala.collection.IndexedSeqOptimized$class.foldl(IndexedSeqOptimized.scala:57) scala.collection.IndexedSeqOptimized$class.foldLeft(IndexedSeqOptimized.scala:66) scala.collection.mutable.WrappedArray.foldLeft(WrappedArray.scala:35) org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1.apply(RuleExecutor.scala:84) org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1.apply(RuleExecutor.scala:76) scala.collection.immutable.List.foreach(List.scala:392) org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:76) org.apache.spark.sql.execution.streaming.IncrementalExecution.optimizedPlan$lzycompute(IncrementalExecution.scala:77) org.apache.spark.sql.execution.streaming.IncrementalExecution.optimizedPlan(IncrementalExecution.scala:77) org.apache.spark.sql.execution.QueryExecution.sparkPlan$lzycompute(QueryExecution.scala:72) org.apache.spark.sql.execution.QueryExecution.sparkPlan(QueryExecution.scala:68) org.apache.spark.sql.execution.QueryExecution.executedPlan$lzycompute(QueryExecution.scala:77) org.apache.spark.sql.execution.QueryExecution.executedPlan(QueryExecution.scala:77) org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution$$anonfun$runContinuous$2.apply(ContinuousExecution.scala:204) org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution$$anonfun$runContinuous$2.apply(ContinuousExecution.scala:195) org.apache.spark.sql.execution.streaming.ProgressReporter$class.reportTimeTaken(ProgressReporter.scala:401) org.apache.spark.sql.execution.streaming.StreamExecution.reportTimeTaken(StreamExecution.scala:58) org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution.runContinuous(ContinuousExecution.scala:195) org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution.runActivatedStream(ContinuousExecution.scala:90) org.apache.spark.sql.execution.streaming.StreamExecution.org$apache$spark$sql$execution$streaming$StreamExecution$$runStream(StreamExecution.scala:279) org.apache.spark.sql.execution.streaming.StreamExecution$$anon$1.run(StreamExecution.scala:189) == Sink == 0: 1: [2] [4] [3] 2: == Plan == == Parsed Logical Plan == WriteToContinuousDataSource org.apache.spark.sql.execution.streaming.sources.MemoryStreamingWriteSupport@6cac2f30 +- SerializeFromObject [input[0, int, false] AS value#14884] +- MapElements <function1>, class scala.Tuple2, [StructField(_1,StringType,true), StructField(_2,StringType,true)], obj#14883: int +- DeserializeToObject newInstance(class scala.Tuple2), obj#14882: scala.Tuple2 +- Project [cast(key#14962 as string) AS key#14874, cast(value#14963 as string) AS value#14875] +- Streaming RelationV2 kafka[key#14962, value#14963, topic#14964, partition#14965, offset#14966L, timestamp#14967, timestampType#14968] (Options: [kafka.metadata.max.age.ms=1,failOnDataLoss=false,kafka.bootstrap.servers=127.0.0.1:37266,kafka.d...) == Analyzed Logical Plan == WriteToContinuousDataSource org.apache.spark.sql.execution.streaming.sources.MemoryStreamingWriteSupport@6cac2f30 +- SerializeFromObject [input[0, int, false] AS value#14884] +- MapElements <function1>, class scala.Tuple2, [StructField(_1,StringType,true), StructField(_2,StringType,true)], obj#14883: int +- DeserializeToObject newInstance(class scala.Tuple2), obj#14882: scala.Tuple2 +- Project [cast(key#14962 as string) AS key#14874, cast(value#14963 as string) AS value#14875] +- Streaming RelationV2 kafka[key#14962, value#14963, topic#14964, partition#14965, offset#14966L, timestamp#14967, timestampType#14968] (Options: [kafka.metadata.max.age.ms=1,failOnDataLoss=false,kafka.bootstrap.servers=127.0.0.1:37266,kafka.d...) == Optimized Logical Plan == WriteToContinuousDataSource org.apache.spark.sql.execution.streaming.sources.MemoryStreamingWriteSupport@6cac2f30 +- SerializeFromObject [input[0, int, false] AS value#14884] +- MapElements <function1>, class scala.Tuple2, [StructField(_1,StringType,true), StructField(_2,StringType,true)], obj#14883: int +- DeserializeToObject newInstance(class scala.Tuple2), obj#14882: scala.Tuple2 +- Project [cast(key#14962 as string) AS key#14874, cast(value#14963 as string) AS value#14875] +- Streaming RelationV2 kafka[key#14962, value#14963, topic#14964, partition#14965, offset#14966L, timestamp#14967, timestampType#14968] (Options: [kafka.metadata.max.age.ms=1,failOnDataLoss=false,kafka.bootstrap.servers=127.0.0.1:37266,kafka.d...) == Physical Plan == WriteToContinuousDataSource org.apache.spark.sql.execution.streaming.sources.MemoryStreamingWriteSupport@6cac2f30 +- *(1) SerializeFromObject [input[0, int, false] AS value#14884] +- *(1) MapElements <function1>, obj#14883: int +- *(1) DeserializeToObject newInstance(class scala.Tuple2), obj#14882: scala.Tuple2 +- *(1) Project [cast(key#14962 as string) AS key#14874, cast(value#14963 as string) AS value#14875] +- *(1) Project [key#14962, value#14963, topic#14964, partition#14965, offset#14966L, timestamp#14967, timestampType#14968] +- *(1) ScanV2 kafka[key#14962, value#14963, topic#14964, partition#14965, offset#14966L, timestamp#14967, timestampType#14968] (Options: [kafka.metadata.max.age.ms=1,failOnDataLoss=false,kafka.bootstrap.servers=127.0.0.1:37266,kafka.d...)

sbt.ForkMain$ForkError: org.scalatest.exceptions.TestFailedException: 
Error adding data: Could not find index of the source to which data was added
org.apache.spark.sql.streaming.StreamTest$$anonfun$14.apply(StreamTest.scala:708)
	org.apache.spark.sql.streaming.StreamTest$$anonfun$14.apply(StreamTest.scala:708)
	scala.Option.getOrElse(Option.scala:121)
	org.apache.spark.sql.streaming.StreamTest$class.executeAction$1(StreamTest.scala:707)
	org.apache.spark.sql.streaming.StreamTest$$anonfun$liftedTree1$1$1.apply(StreamTest.scala:770)
	org.apache.spark.sql.streaming.StreamTest$$anonfun$liftedTree1$1$1.apply(StreamTest.scala:757)
	scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
	scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
	org.apache.spark.sql.streaming.StreamTest$class.liftedTree1$1(StreamTest.scala:757)
	org.apache.spark.sql.streaming.StreamTest$class.testStream(StreamTest.scala:756)


== Progress ==
   AssertOnQuery(<condition>, )
   AddKafkaData(topics = Set(topic-0-seems), data = WrappedArray(1, 2, 3), message = )
   CheckAnswer: [2],[3],[4]
   AssertOnQuery(<condition>, name)
=> AddKafkaData(topics = Set(topic-0-bad), data = WrappedArray(4, 5, 6), message = )
   CheckAnswer: [2],[3],[4],[5],[6],[7]

== Stream ==
Output Mode: Append
Stream state: {KafkaSource[SubscribePattern[topic-0-.*]]: {"topic-0-seems":{"2":1,"4":0,"1":1,"3":1,"0":1}}}
Thread state: alive
Thread stack trace: scala.runtime.ScalaRunTime$.hash(ScalaRunTime.scala:206)
scala.collection.immutable.HashMap.elemHashCode(HashMap.scala:80)
scala.collection.immutable.HashMap.computeHash(HashMap.scala:89)
scala.collection.immutable.HashMap.$plus(HashMap.scala:60)
scala.collection.immutable.HashMap.$plus(HashMap.scala:37)
scala.collection.mutable.MapBuilder.$plus$eq(MapBuilder.scala:29)
scala.collection.mutable.MapBuilder.$plus$eq(MapBuilder.scala:25)
scala.collection.generic.Growable$$anonfun$$plus$plus$eq$1.apply(Growable.scala:59)
scala.collection.generic.Growable$$anonfun$$plus$plus$eq$1.apply(Growable.scala:59)
scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:59)
scala.collection.mutable.MapBuilder.$plus$plus$eq(MapBuilder.scala:25)
scala.collection.generic.GenMapFactory.apply(GenMapFactory.scala:48)
scala.sys.package$.env(package.scala:61)
org.apache.spark.util.Utils$.isTesting(Utils.scala:1883)
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$class.assertNotAnalysisRule(AnalysisHelper.scala:134)
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.assertNotAnalysisRule(LogicalPlan.scala:29)
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$class.transformUp(AnalysisHelper.scala:157)
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformUp(LogicalPlan.scala:29)
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformUp(LogicalPlan.scala:29)
org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:275)
org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:275)
org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$4.apply(TreeNode.scala:326)
org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:187)
org.apache.spark.sql.catalyst.trees.TreeNode.mapChildren(TreeNode.scala:324)
org.apache.spark.sql.catalyst.trees.TreeNode.transformUp(TreeNode.scala:275)
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformUp(LogicalPlan.scala:29)
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$class.transformUp(AnalysisHelper.scala:158)
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformUp(LogicalPlan.scala:29)
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformUp(LogicalPlan.scala:29)
org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:275)
org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:275)
org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$4.apply(TreeNode.scala:326)
org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:187)
org.apache.spark.sql.catalyst.trees.TreeNode.mapChildren(TreeNode.scala:324)
org.apache.spark.sql.catalyst.trees.TreeNode.transformUp(TreeNode.scala:275)
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformUp(LogicalPlan.scala:29)
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$class.transformUp(AnalysisHelper.scala:158)
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformUp(LogicalPlan.scala:29)
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformUp(LogicalPlan.scala:29)
org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:275)
org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:275)
org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$4.apply(TreeNode.scala:326)
org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:187)
org.apache.spark.sql.catalyst.trees.TreeNode.mapChildren(TreeNode.scala:324)
org.apache.spark.sql.catalyst.trees.TreeNode.transformUp(TreeNode.scala:275)
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformUp(LogicalPlan.scala:29)
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$class.transformUp(AnalysisHelper.scala:158)
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformUp(LogicalPlan.scala:29)
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformUp(LogicalPlan.scala:29)
org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:275)
org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:275)
org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$4.apply(TreeNode.scala:326)
org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:187)
org.apache.spark.sql.catalyst.trees.TreeNode.mapChildren(TreeNode.scala:324)
org.apache.spark.sql.catalyst.trees.TreeNode.transformUp(TreeNode.scala:275)
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformUp(LogicalPlan.scala:29)
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$class.transformUp(AnalysisHelper.scala:158)
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformUp(LogicalPlan.scala:29)
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformUp(LogicalPlan.scala:29)
org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:275)
org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:275)
org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$4.apply(TreeNode.scala:326)
org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:187)
org.apache.spark.sql.catalyst.trees.TreeNode.mapChildren(TreeNode.scala:324)
org.apache.spark.sql.catalyst.trees.TreeNode.transformUp(TreeNode.scala:275)
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformUp(LogicalPlan.scala:29)
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$class.transformUp(AnalysisHelper.scala:158)
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformUp(LogicalPlan.scala:29)
org.apache.spark.sql.catalyst.optimizer.UpdateNullabilityInAttributeReferences$.apply(Optimizer.scala:1613)
org.apache.spark.sql.catalyst.optimizer.UpdateNullabilityInAttributeReferences$.apply(Optimizer.scala:1612)
org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1$$anonfun$apply$1.apply(RuleExecutor.scala:87)
org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1$$anonfun$apply$1.apply(RuleExecutor.scala:84)
scala.collection.IndexedSeqOptimized$class.foldl(IndexedSeqOptimized.scala:57)
scala.collection.IndexedSeqOptimized$class.foldLeft(IndexedSeqOptimized.scala:66)
scala.collection.mutable.WrappedArray.foldLeft(WrappedArray.scala:35)
org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1.apply(RuleExecutor.scala:84)
org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1.apply(RuleExecutor.scala:76)
scala.collection.immutable.List.foreach(List.scala:392)
org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:76)
org.apache.spark.sql.execution.streaming.IncrementalExecution.optimizedPlan$lzycompute(IncrementalExecution.scala:77)
org.apache.spark.sql.execution.streaming.IncrementalExecution.optimizedPlan(IncrementalExecution.scala:77)
org.apache.spark.sql.execution.QueryExecution.sparkPlan$lzycompute(QueryExecution.scala:72)
org.apache.spark.sql.execution.QueryExecution.sparkPlan(QueryExecution.scala:68)
org.apache.spark.sql.execution.QueryExecution.executedPlan$lzycompute(QueryExecution.scala:77)
org.apache.spark.sql.execution.QueryExecution.executedPlan(QueryExecution.scala:77)
org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution$$anonfun$runContinuous$2.apply(ContinuousExecution.scala:204)
org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution$$anonfun$runContinuous$2.apply(ContinuousExecution.scala:195)
org.apache.spark.sql.execution.streaming.ProgressReporter$class.reportTimeTaken(ProgressReporter.scala:401)
org.apache.spark.sql.execution.streaming.StreamExecution.reportTimeTaken(StreamExecution.scala:58)
org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution.runContinuous(ContinuousExecution.scala:195)
org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution.runActivatedStream(ContinuousExecution.scala:90)
org.apache.spark.sql.execution.streaming.StreamExecution.org$apache$spark$sql$execution$streaming$StreamExecution$$runStream(StreamExecution.scala:279)
org.apache.spark.sql.execution.streaming.StreamExecution$$anon$1.run(StreamExecution.scala:189)


== Sink ==
0: 
1: [2] [4] [3]
2: 


== Plan ==
== Parsed Logical Plan ==
WriteToContinuousDataSource org.apache.spark.sql.execution.streaming.sources.MemoryStreamingWriteSupport@6cac2f30
+- SerializeFromObject [input[0, int, false] AS value#14884]
   +- MapElements <function1>, class scala.Tuple2, [StructField(_1,StringType,true), StructField(_2,StringType,true)], obj#14883: int
      +- DeserializeToObject newInstance(class scala.Tuple2), obj#14882: scala.Tuple2
         +- Project [cast(key#14962 as string) AS key#14874, cast(value#14963 as string) AS value#14875]
            +- Streaming RelationV2 kafka[key#14962, value#14963, topic#14964, partition#14965, offset#14966L, timestamp#14967, timestampType#14968] (Options: [kafka.metadata.max.age.ms=1,failOnDataLoss=false,kafka.bootstrap.servers=127.0.0.1:37266,kafka.d...)

== Analyzed Logical Plan ==
WriteToContinuousDataSource org.apache.spark.sql.execution.streaming.sources.MemoryStreamingWriteSupport@6cac2f30
+- SerializeFromObject [input[0, int, false] AS value#14884]
   +- MapElements <function1>, class scala.Tuple2, [StructField(_1,StringType,true), StructField(_2,StringType,true)], obj#14883: int
      +- DeserializeToObject newInstance(class scala.Tuple2), obj#14882: scala.Tuple2
         +- Project [cast(key#14962 as string) AS key#14874, cast(value#14963 as string) AS value#14875]
            +- Streaming RelationV2 kafka[key#14962, value#14963, topic#14964, partition#14965, offset#14966L, timestamp#14967, timestampType#14968] (Options: [kafka.metadata.max.age.ms=1,failOnDataLoss=false,kafka.bootstrap.servers=127.0.0.1:37266,kafka.d...)

== Optimized Logical Plan ==
WriteToContinuousDataSource org.apache.spark.sql.execution.streaming.sources.MemoryStreamingWriteSupport@6cac2f30
+- SerializeFromObject [input[0, int, false] AS value#14884]
   +- MapElements <function1>, class scala.Tuple2, [StructField(_1,StringType,true), StructField(_2,StringType,true)], obj#14883: int
      +- DeserializeToObject newInstance(class scala.Tuple2), obj#14882: scala.Tuple2
         +- Project [cast(key#14962 as string) AS key#14874, cast(value#14963 as string) AS value#14875]
            +- Streaming RelationV2 kafka[key#14962, value#14963, topic#14964, partition#14965, offset#14966L, timestamp#14967, timestampType#14968] (Options: [kafka.metadata.max.age.ms=1,failOnDataLoss=false,kafka.bootstrap.servers=127.0.0.1:37266,kafka.d...)

== Physical Plan ==
WriteToContinuousDataSource org.apache.spark.sql.execution.streaming.sources.MemoryStreamingWriteSupport@6cac2f30
+- *(1) SerializeFromObject [input[0, int, false] AS value#14884]
   +- *(1) MapElements <function1>, obj#14883: int
      +- *(1) DeserializeToObject newInstance(class scala.Tuple2), obj#14882: scala.Tuple2
         +- *(1) Project [cast(key#14962 as string) AS key#14874, cast(value#14963 as string) AS value#14875]
            +- *(1) Project [key#14962, value#14963, topic#14964, partition#14965, offset#14966L, timestamp#14967, timestampType#14968]
               +- *(1) ScanV2 kafka[key#14962, value#14963, topic#14964, partition#14965, offset#14966L, timestamp#14967, timestampType#14968] (Options: [kafka.metadata.max.age.ms=1,failOnDataLoss=false,kafka.bootstrap.servers=127.0.0.1:37266,kafka.d...)
         
         
	at org.scalatest.Assertions$class.newAssertionFailedException(Assertions.scala:528)
	at org.scalatest.FunSuite.newAssertionFailedException(FunSuite.scala:1560)
	at org.scalatest.Assertions$class.fail(Assertions.scala:1089)
	at org.scalatest.FunSuite.fail(FunSuite.scala:1560)
	at org.apache.spark.sql.streaming.StreamTest$class.failTest$1(StreamTest.scala:450)
	at org.apache.spark.sql.streaming.StreamTest$class.executeAction$1(StreamTest.scala:716)
	at org.apache.spark.sql.streaming.StreamTest$$anonfun$liftedTree1$1$1.apply(StreamTest.scala:770)
	at org.apache.spark.sql.streaming.StreamTest$$anonfun$liftedTree1$1$1.apply(StreamTest.scala:757)
	at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
	at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
	at org.apache.spark.sql.streaming.StreamTest$class.liftedTree1$1(StreamTest.scala:757)
	at org.apache.spark.sql.streaming.StreamTest$class.testStream(StreamTest.scala:756)
	at org.apache.spark.sql.kafka010.KafkaSourceTest.testStream(KafkaMicroBatchSourceSuite.scala:49)
	at org.apache.spark.sql.kafka010.KafkaContinuousSourceTopicDeletionSuite$$anonfun$1.apply$mcV$sp(KafkaContinuousSourceSuite.scala:54)
	at org.apache.spark.sql.kafka010.KafkaContinuousSourceTopicDeletionSuite$$anonfun$1.apply(KafkaContinuousSourceSuite.scala:32)
	at org.apache.spark.sql.kafka010.KafkaContinuousSourceTopicDeletionSuite$$anonfun$1.apply(KafkaContinuousSourceSuite.scala:32)
	at org.scalatest.OutcomeOf$class.outcomeOf(OutcomeOf.scala:85)
	at org.scalatest.OutcomeOf$.outcomeOf(OutcomeOf.scala:104)
	at org.scalatest.Transformer.apply(Transformer.scala:22)
	at org.scalatest.Transformer.apply(Transformer.scala:20)
	at org.scalatest.FunSuiteLike$$anon$1.apply(FunSuiteLike.scala:186)
	at org.apache.spark.SparkFunSuite.withFixture(SparkFunSuite.scala:103)
	at org.scalatest.FunSuiteLike$class.invokeWithFixture$1(FunSuiteLike.scala:183)
	at org.scalatest.FunSuiteLike$$anonfun$runTest$1.apply(FunSuiteLike.scala:196)
	at org.scalatest.FunSuiteLike$$anonfun$runTest$1.apply(FunSuiteLike.scala:196)
	at org.scalatest.SuperEngine.runTestImpl(Engine.scala:289)
	at org.scalatest.FunSuiteLike$class.runTest(FunSuiteLike.scala:196)
	at org.apache.spark.sql.kafka010.KafkaSourceTest.org$scalatest$BeforeAndAfterEach$$super$runTest(KafkaMicroBatchSourceSuite.scala:49)
	at org.scalatest.BeforeAndAfterEach$class.runTest(BeforeAndAfterEach.scala:221)
	at org.apache.spark.sql.kafka010.KafkaSourceTest.runTest(KafkaMicroBatchSourceSuite.scala:49)
	at org.scalatest.FunSuiteLike$$anonfun$runTests$1.apply(FunSuiteLike.scala:229)
	at org.scalatest.FunSuiteLike$$anonfun$runTests$1.apply(FunSuiteLike.scala:229)
	at org.scalatest.SuperEngine$$anonfun$traverseSubNodes$1$1.apply(Engine.scala:396)
	at org.scalatest.SuperEngine$$anonfun$traverseSubNodes$1$1.apply(Engine.scala:384)
	at scala.collection.immutable.List.foreach(List.scala:392)
	at org.scalatest.SuperEngine.traverseSubNodes$1(Engine.scala:384)
	at org.scalatest.SuperEngine.org$scalatest$SuperEngine$$runTestsInBranch(Engine.scala:379)
	at org.scalatest.SuperEngine.runTestsImpl(Engine.scala:461)
	at org.scalatest.FunSuiteLike$class.runTests(FunSuiteLike.scala:229)
	at org.scalatest.FunSuite.runTests(FunSuite.scala:1560)
	at org.scalatest.Suite$class.run(Suite.scala:1147)
	at org.scalatest.FunSuite.org$scalatest$FunSuiteLike$$super$run(FunSuite.scala:1560)
	at org.scalatest.FunSuiteLike$$anonfun$run$1.apply(FunSuiteLike.scala:233)
	at org.scalatest.FunSuiteLike$$anonfun$run$1.apply(FunSuiteLike.scala:233)
	at org.scalatest.SuperEngine.runImpl(Engine.scala:521)
	at org.scalatest.FunSuiteLike$class.run(FunSuiteLike.scala:233)
	at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterAll$$super$run(SparkFunSuite.scala:52)
	at org.scalatest.BeforeAndAfterAll$class.liftedTree1$1(BeforeAndAfterAll.scala:213)
	at org.scalatest.BeforeAndAfterAll$class.run(BeforeAndAfterAll.scala:210)
	at org.apache.spark.SparkFunSuite.run(SparkFunSuite.scala:52)
	at org.scalatest.tools.Framework.org$scalatest$tools$Framework$$runSuite(Framework.scala:314)
	at org.scalatest.tools.Framework$ScalaTestTask.execute(Framework.scala:480)
	at sbt.ForkMain$Run$2.call(ForkMain.java:296)
	at sbt.ForkMain$Run$2.call(ForkMain.java:286)
	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)