Error adding data: Could not find index of the source to which data was added
org.apache.spark.sql.streaming.StreamTest$$anonfun$14.apply(StreamTest.scala:708)
 org.apache.spark.sql.streaming.StreamTest$$anonfun$14.apply(StreamTest.scala:708)
 scala.Option.getOrElse(Option.scala:121)
 org.apache.spark.sql.streaming.StreamTest$class.executeAction$1(StreamTest.scala:707)
 org.apache.spark.sql.streaming.StreamTest$$anonfun$liftedTree1$1$1.apply(StreamTest.scala:767)
 org.apache.spark.sql.streaming.StreamTest$$anonfun$liftedTree1$1$1.apply(StreamTest.scala:754)
 scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
 scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
 org.apache.spark.sql.streaming.StreamTest$class.liftedTree1$1(StreamTest.scala:754)
 org.apache.spark.sql.streaming.StreamTest$class.testStream(StreamTest.scala:753)


== Progress ==
 AssertOnQuery(<condition>, )
 AddKafkaData(topics = Set(topic-0-seems), data = WrappedArray(1, 2, 3), message = )
 CheckAnswer: [2],[3],[4]
 AssertOnQuery(<condition>, name)
=> AddKafkaData(topics = Set(topic-0-bad), data = WrappedArray(4, 5, 6), message = )
 CheckAnswer: [2],[3],[4],[5],[6],[7]

== Stream ==
Output Mode: Append
Stream state: {KafkaSource[SubscribePattern[topic-0-.*]]: {"topic-0-seems":{"2":1,"4":1,"1":1,"3":1,"0":0}}}
Thread state: alive
Thread stack trace: scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241)
scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241)
scala.collection.immutable.List.foreach(List.scala:392)
scala.collection.TraversableLike$class.flatMap(TraversableLike.scala:241)
scala.collection.immutable.List.flatMap(List.scala:355)
org.apache.spark.sql.catalyst.optimizer.UpdateNullabilityInAttributeReferences$$anonfun$apply$33.applyOrElse(Optimizer.scala:1615)
org.apache.spark.sql.catalyst.optimizer.UpdateNullabilityInAttributeReferences$$anonfun$apply$33.applyOrElse(Optimizer.scala:1613)
org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$transformUp$1.apply(TreeNode.scala:278)
org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$transformUp$1.apply(TreeNode.scala:278)
org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:70)
org.apache.spark.sql.catalyst.trees.TreeNode.transformUp(TreeNode.scala:277)
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformUp(LogicalPlan.scala:29)
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$class.transformUp(AnalysisHelper.scala:158)
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformUp(LogicalPlan.scala:29)
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformUp(LogicalPlan.scala:29)
org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:275)
org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:275)
org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$4.apply(TreeNode.scala:326)
org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:187)
org.apache.spark.sql.catalyst.trees.TreeNode.mapChildren(TreeNode.scala:324)
org.apache.spark.sql.catalyst.trees.TreeNode.transformUp(TreeNode.scala:275)
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformUp(LogicalPlan.scala:29)
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$class.transformUp(AnalysisHelper.scala:158)
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformUp(LogicalPlan.scala:29)
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformUp(LogicalPlan.scala:29)
org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:275)
org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:275)
org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$4.apply(TreeNode.scala:326)
org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:187)
org.apache.spark.sql.catalyst.trees.TreeNode.mapChildren(TreeNode.scala:324)
org.apache.spark.sql.catalyst.trees.TreeNode.transformUp(TreeNode.scala:275)
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformUp(LogicalPlan.scala:29)
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$class.transformUp(AnalysisHelper.scala:158)
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformUp(LogicalPlan.scala:29)
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformUp(LogicalPlan.scala:29)
org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:275)
org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:275)
org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$4.apply(TreeNode.scala:326)
org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:187)
org.apache.spark.sql.catalyst.trees.TreeNode.mapChildren(TreeNode.scala:324)
org.apache.spark.sql.catalyst.trees.TreeNode.transformUp(TreeNode.scala:275)
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformUp(LogicalPlan.scala:29)
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$class.transformUp(AnalysisHelper.scala:158)
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformUp(LogicalPlan.scala:29)
org.apache.spark.sql.catalyst.optimizer.UpdateNullabilityInAttributeReferences$.apply(Optimizer.scala:1613)
org.apache.spark.sql.catalyst.optimizer.UpdateNullabilityInAttributeReferences$.apply(Optimizer.scala:1612)
org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1$$anonfun$apply$1.apply(RuleExecutor.scala:87)
org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1$$anonfun$apply$1.apply(RuleExecutor.scala:84)
scala.collection.IndexedSeqOptimized$class.foldl(IndexedSeqOptimized.scala:57)
scala.collection.IndexedSeqOptimized$class.foldLeft(IndexedSeqOptimized.scala:66)
scala.collection.mutable.WrappedArray.foldLeft(WrappedArray.scala:35)
org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1.apply(RuleExecutor.scala:84)
org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1.apply(RuleExecutor.scala:76)
scala.collection.immutable.List.foreach(List.scala:392)
org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:76)
org.apache.spark.sql.execution.streaming.IncrementalExecution.optimizedPlan$lzycompute(IncrementalExecution.scala:77)
org.apache.spark.sql.execution.streaming.IncrementalExecution.optimizedPlan(IncrementalExecution.scala:77)
org.apache.spark.sql.execution.QueryExecution.sparkPlan$lzycompute(QueryExecution.scala:72)
org.apache.spark.sql.execution.QueryExecution.sparkPlan(QueryExecution.scala:68)
org.apache.spark.sql.execution.QueryExecution.executedPlan$lzycompute(QueryExecution.scala:77)
org.apache.spark.sql.execution.QueryExecution.executedPlan(QueryExecution.scala:77)
org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution$$anonfun$runContinuous$2.apply(ContinuousExecution.scala:204)
org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution$$anonfun$runContinuous$2.apply(ContinuousExecution.scala:195)
org.apache.spark.sql.execution.streaming.ProgressReporter$class.reportTimeTaken(ProgressReporter.scala:401)
org.apache.spark.sql.execution.streaming.StreamExecution.reportTimeTaken(StreamExecution.scala:58)
org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution.runContinuous(ContinuousExecution.scala:195)
org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution.runActivatedStream(ContinuousExecution.scala:90)
org.apache.spark.sql.execution.streaming.StreamExecution.org$apache$spark$sql$execution$streaming$StreamExecution$$runStream(StreamExecution.scala:279)
org.apache.spark.sql.execution.streaming.StreamExecution$$anon$1.run(StreamExecution.scala:189)


== Sink ==
0: 
1: [4] [3] [2]
2: 


== Plan ==
== Parsed Logical Plan ==
WriteToContinuousDataSource org.apache.spark.sql.execution.streaming.sources.MemoryStreamingWriteSupport@13544b7c
+- SerializeFromObject [input[0, int, false] AS value#15781]
 +- MapElements <function1>, class scala.Tuple2, [StructField(_1,StringType,true), StructField(_2,StringType,true)], obj#15780: int
 +- DeserializeToObject newInstance(class scala.Tuple2), obj#15779: scala.Tuple2
 +- Project [cast(key#15859 as string) AS key#15771, cast(value#15860 as string) AS value#15772]
 +- Streaming RelationV2 kafka[key#15859, value#15860, topic#15861, partition#15862, offset#15863L, timestamp#15864, timestampType#15865] (Options: [kafka.metadata.max.age.ms=1,failOnDataLoss=false,kafka.bootstrap.servers=127.0.0.1:43469,kafka.d...)

== Analyzed Logical Plan ==
WriteToContinuousDataSource org.apache.spark.sql.execution.streaming.sources.MemoryStreamingWriteSupport@13544b7c
+- SerializeFromObject [input[0, int, false] AS value#15781]
 +- MapElements <function1>, class scala.Tuple2, [StructField(_1,StringType,true), StructField(_2,StringType,true)], obj#15780: int
 +- DeserializeToObject newInstance(class scala.Tuple2), obj#15779: scala.Tuple2
 +- Project [cast(key#15859 as string) AS key#15771, cast(value#15860 as string) AS value#15772]
 +- Streaming RelationV2 kafka[key#15859, value#15860, topic#15861, partition#15862, offset#15863L, timestamp#15864, timestampType#15865] (Options: [kafka.metadata.max.age.ms=1,failOnDataLoss=false,kafka.bootstrap.servers=127.0.0.1:43469,kafka.d...)

== Optimized Logical Plan ==
WriteToContinuousDataSource org.apache.spark.sql.execution.streaming.sources.MemoryStreamingWriteSupport@13544b7c
+- SerializeFromObject [input[0, int, false] AS value#15781]
 +- MapElements <function1>, class scala.Tuple2, [StructField(_1,StringType,true), StructField(_2,StringType,true)], obj#15780: int
 +- DeserializeToObject newInstance(class scala.Tuple2), obj#15779: scala.Tuple2
 +- Project [cast(key#15859 as string) AS key#15771, cast(value#15860 as string) AS value#15772]
 +- Streaming RelationV2 kafka[key#15859, value#15860, topic#15861, partition#15862, offset#15863L, timestamp#15864, timestampType#15865] (Options: [kafka.metadata.max.age.ms=1,failOnDataLoss=false,kafka.bootstrap.servers=127.0.0.1:43469,kafka.d...)

== Physical Plan ==
WriteToContinuousDataSource org.apache.spark.sql.execution.streaming.sources.MemoryStreamingWriteSupport@13544b7c
+- *(1) SerializeFromObject [input[0, int, false] AS value#15781]
 +- *(1) MapElements <function1>, obj#15780: int
 +- *(1) DeserializeToObject newInstance(class scala.Tuple2), obj#15779: scala.Tuple2
 +- *(1) Project [cast(key#15859 as string) AS key#15771, cast(value#15860 as string) AS value#15772]
 +- *(1) Project [key#15859, value#15860, topic#15861, partition#15862, offset#15863L, timestamp#15864, timestampType#15865]
 +- *(1) ScanV2 kafka[key#15859, value#15860, topic#15861, partition#15862, offset#15863L, timestamp#15864, timestampType#15865] (Options: [kafka.metadata.max.age.ms=1,failOnDataLoss=false,kafka.bootstrap.servers=127.0.0.1:43469,kafka.d...)
 

org.scalatest.exceptions.TestFailedException:
Error adding data: Could not find index of the source to which data was added
org.apache.spark.sql.streaming.StreamTest$$anonfun$14.apply(StreamTest.scala:708)
org.apache.spark.sql.streaming.StreamTest$$anonfun$14.apply(StreamTest.scala:708)
scala.Option.getOrElse(Option.scala:121)
org.apache.spark.sql.streaming.StreamTest$class.executeAction$1(StreamTest.scala:707)
org.apache.spark.sql.streaming.StreamTest$$anonfun$liftedTree1$1$1.apply(StreamTest.scala:767)
org.apache.spark.sql.streaming.StreamTest$$anonfun$liftedTree1$1$1.apply(StreamTest.scala:754)
scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
org.apache.spark.sql.streaming.StreamTest$class.liftedTree1$1(StreamTest.scala:754)
org.apache.spark.sql.streaming.StreamTest$class.testStream(StreamTest.scala:753)
== Progress ==
AssertOnQuery(<condition>, )
AddKafkaData(topics = Set(topic-0-seems), data = WrappedArray(1, 2, 3), message = )
CheckAnswer: [2],[3],[4]
AssertOnQuery(<condition>, name)
=> AddKafkaData(topics = Set(topic-0-bad), data = WrappedArray(4, 5, 6), message = )
CheckAnswer: [2],[3],[4],[5],[6],[7]
== Stream ==
Output Mode: Append
Stream state: {KafkaSource[SubscribePattern[topic-0-.*]]: {"topic-0-seems":{"2":1,"4":1,"1":1,"3":1,"0":0}}}
Thread state: alive
Thread stack trace: scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241)
scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241)
scala.collection.immutable.List.foreach(List.scala:392)
scala.collection.TraversableLike$class.flatMap(TraversableLike.scala:241)
scala.collection.immutable.List.flatMap(List.scala:355)
org.apache.spark.sql.catalyst.optimizer.UpdateNullabilityInAttributeReferences$$anonfun$apply$33.applyOrElse(Optimizer.scala:1615)
org.apache.spark.sql.catalyst.optimizer.UpdateNullabilityInAttributeReferences$$anonfun$apply$33.applyOrElse(Optimizer.scala:1613)
org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$transformUp$1.apply(TreeNode.scala:278)
org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$transformUp$1.apply(TreeNode.scala:278)
org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:70)
org.apache.spark.sql.catalyst.trees.TreeNode.transformUp(TreeNode.scala:277)
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformUp(LogicalPlan.scala:29)
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$class.transformUp(AnalysisHelper.scala:158)
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformUp(LogicalPlan.scala:29)
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformUp(LogicalPlan.scala:29)
org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:275)
org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:275)
org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$4.apply(TreeNode.scala:326)
org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:187)
org.apache.spark.sql.catalyst.trees.TreeNode.mapChildren(TreeNode.scala:324)
org.apache.spark.sql.catalyst.trees.TreeNode.transformUp(TreeNode.scala:275)
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformUp(LogicalPlan.scala:29)
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$class.transformUp(AnalysisHelper.scala:158)
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformUp(LogicalPlan.scala:29)
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformUp(LogicalPlan.scala:29)
org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:275)
org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:275)
org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$4.apply(TreeNode.scala:326)
org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:187)
org.apache.spark.sql.catalyst.trees.TreeNode.mapChildren(TreeNode.scala:324)
org.apache.spark.sql.catalyst.trees.TreeNode.transformUp(TreeNode.scala:275)
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformUp(LogicalPlan.scala:29)
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$class.transformUp(AnalysisHelper.scala:158)
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformUp(LogicalPlan.scala:29)
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformUp(LogicalPlan.scala:29)
org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:275)
org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:275)
org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$4.apply(TreeNode.scala:326)
org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:187)
org.apache.spark.sql.catalyst.trees.TreeNode.mapChildren(TreeNode.scala:324)
org.apache.spark.sql.catalyst.trees.TreeNode.transformUp(TreeNode.scala:275)
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformUp(LogicalPlan.scala:29)
org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$class.transformUp(AnalysisHelper.scala:158)
org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformUp(LogicalPlan.scala:29)
org.apache.spark.sql.catalyst.optimizer.UpdateNullabilityInAttributeReferences$.apply(Optimizer.scala:1613)
org.apache.spark.sql.catalyst.optimizer.UpdateNullabilityInAttributeReferences$.apply(Optimizer.scala:1612)
org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1$$anonfun$apply$1.apply(RuleExecutor.scala:87)
org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1$$anonfun$apply$1.apply(RuleExecutor.scala:84)
scala.collection.IndexedSeqOptimized$class.foldl(IndexedSeqOptimized.scala:57)
scala.collection.IndexedSeqOptimized$class.foldLeft(IndexedSeqOptimized.scala:66)
scala.collection.mutable.WrappedArray.foldLeft(WrappedArray.scala:35)
org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1.apply(RuleExecutor.scala:84)
org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1.apply(RuleExecutor.scala:76)
scala.collection.immutable.List.foreach(List.scala:392)
org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:76)
org.apache.spark.sql.execution.streaming.IncrementalExecution.optimizedPlan$lzycompute(IncrementalExecution.scala:77)
org.apache.spark.sql.execution.streaming.IncrementalExecution.optimizedPlan(IncrementalExecution.scala:77)
org.apache.spark.sql.execution.QueryExecution.sparkPlan$lzycompute(QueryExecution.scala:72)
org.apache.spark.sql.execution.QueryExecution.sparkPlan(QueryExecution.scala:68)
org.apache.spark.sql.execution.QueryExecution.executedPlan$lzycompute(QueryExecution.scala:77)
org.apache.spark.sql.execution.QueryExecution.executedPlan(QueryExecution.scala:77)
org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution$$anonfun$runContinuous$2.apply(ContinuousExecution.scala:204)
org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution$$anonfun$runContinuous$2.apply(ContinuousExecution.scala:195)
org.apache.spark.sql.execution.streaming.ProgressReporter$class.reportTimeTaken(ProgressReporter.scala:401)
org.apache.spark.sql.execution.streaming.StreamExecution.reportTimeTaken(StreamExecution.scala:58)
org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution.runContinuous(ContinuousExecution.scala:195)
org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution.runActivatedStream(ContinuousExecution.scala:90)
org.apache.spark.sql.execution.streaming.StreamExecution.org$apache$spark$sql$execution$streaming$StreamExecution$$runStream(StreamExecution.scala:279)
org.apache.spark.sql.execution.streaming.StreamExecution$$anon$1.run(StreamExecution.scala:189)
== Sink ==
0:
1: [4] [3] [2]
2:
== Plan ==
== Parsed Logical Plan ==
WriteToContinuousDataSource org.apache.spark.sql.execution.streaming.sources.MemoryStreamingWriteSupport@13544b7c
+- SerializeFromObject [input[0, int, false] AS value#15781]
+- MapElements <function1>, class scala.Tuple2, [StructField(_1,StringType,true), StructField(_2,StringType,true)], obj#15780: int
+- DeserializeToObject newInstance(class scala.Tuple2), obj#15779: scala.Tuple2
+- Project [cast(key#15859 as string) AS key#15771, cast(value#15860 as string) AS value#15772]
+- Streaming RelationV2 kafka[key#15859, value#15860, topic#15861, partition#15862, offset#15863L, timestamp#15864, timestampType#15865] (Options: [kafka.metadata.max.age.ms=1,failOnDataLoss=false,kafka.bootstrap.servers=127.0.0.1:43469,kafka.d...)
== Analyzed Logical Plan ==
WriteToContinuousDataSource org.apache.spark.sql.execution.streaming.sources.MemoryStreamingWriteSupport@13544b7c
+- SerializeFromObject [input[0, int, false] AS value#15781]
+- MapElements <function1>, class scala.Tuple2, [StructField(_1,StringType,true), StructField(_2,StringType,true)], obj#15780: int
+- DeserializeToObject newInstance(class scala.Tuple2), obj#15779: scala.Tuple2
+- Project [cast(key#15859 as string) AS key#15771, cast(value#15860 as string) AS value#15772]
+- Streaming RelationV2 kafka[key#15859, value#15860, topic#15861, partition#15862, offset#15863L, timestamp#15864, timestampType#15865] (Options: [kafka.metadata.max.age.ms=1,failOnDataLoss=false,kafka.bootstrap.servers=127.0.0.1:43469,kafka.d...)
== Optimized Logical Plan ==
WriteToContinuousDataSource org.apache.spark.sql.execution.streaming.sources.MemoryStreamingWriteSupport@13544b7c
+- SerializeFromObject [input[0, int, false] AS value#15781]
+- MapElements <function1>, class scala.Tuple2, [StructField(_1,StringType,true), StructField(_2,StringType,true)], obj#15780: int
+- DeserializeToObject newInstance(class scala.Tuple2), obj#15779: scala.Tuple2
+- Project [cast(key#15859 as string) AS key#15771, cast(value#15860 as string) AS value#15772]
+- Streaming RelationV2 kafka[key#15859, value#15860, topic#15861, partition#15862, offset#15863L, timestamp#15864, timestampType#15865] (Options: [kafka.metadata.max.age.ms=1,failOnDataLoss=false,kafka.bootstrap.servers=127.0.0.1:43469,kafka.d...)
== Physical Plan ==
WriteToContinuousDataSource org.apache.spark.sql.execution.streaming.sources.MemoryStreamingWriteSupport@13544b7c
+- *(1) SerializeFromObject [input[0, int, false] AS value#15781]
+- *(1) MapElements <function1>, obj#15780: int
+- *(1) DeserializeToObject newInstance(class scala.Tuple2), obj#15779: scala.Tuple2
+- *(1) Project [cast(key#15859 as string) AS key#15771, cast(value#15860 as string) AS value#15772]
+- *(1) Project [key#15859, value#15860, topic#15861, partition#15862, offset#15863L, timestamp#15864, timestampType#15865]
+- *(1) ScanV2 kafka[key#15859, value#15860, topic#15861, partition#15862, offset#15863L, timestamp#15864, timestampType#15865] (Options: [kafka.metadata.max.age.ms=1,failOnDataLoss=false,kafka.bootstrap.servers=127.0.0.1:43469,kafka.d...)
at org.scalatest.Assertions$class.newAssertionFailedException(Assertions.scala:528)
at org.scalatest.FunSuite.newAssertionFailedException(FunSuite.scala:1560)
at org.scalatest.Assertions$class.fail(Assertions.scala:1089)
at org.scalatest.FunSuite.fail(FunSuite.scala:1560)
at org.apache.spark.sql.streaming.StreamTest$class.failTest$1(StreamTest.scala:450)
at org.apache.spark.sql.streaming.StreamTest$class.executeAction$1(StreamTest.scala:716)
at org.apache.spark.sql.streaming.StreamTest$$anonfun$liftedTree1$1$1.apply(StreamTest.scala:767)
at org.apache.spark.sql.streaming.StreamTest$$anonfun$liftedTree1$1$1.apply(StreamTest.scala:754)
at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
at org.apache.spark.sql.streaming.StreamTest$class.liftedTree1$1(StreamTest.scala:754)
at org.apache.spark.sql.streaming.StreamTest$class.testStream(StreamTest.scala:753)
at org.apache.spark.sql.kafka010.KafkaSourceTest.testStream(KafkaMicroBatchSourceSuite.scala:51)
at org.apache.spark.sql.kafka010.KafkaContinuousSourceTopicDeletionSuite$$anonfun$1.apply$mcV$sp(KafkaContinuousSourceSuite.scala:54)
at org.apache.spark.sql.kafka010.KafkaContinuousSourceTopicDeletionSuite$$anonfun$1.apply(KafkaContinuousSourceSuite.scala:32)
at org.apache.spark.sql.kafka010.KafkaContinuousSourceTopicDeletionSuite$$anonfun$1.apply(KafkaContinuousSourceSuite.scala:32)
at org.scalatest.OutcomeOf$class.outcomeOf(OutcomeOf.scala:85)
at org.scalatest.OutcomeOf$.outcomeOf(OutcomeOf.scala:104)
at org.scalatest.Transformer.apply(Transformer.scala:22)
at org.scalatest.Transformer.apply(Transformer.scala:20)
at org.scalatest.FunSuiteLike$$anon$1.apply(FunSuiteLike.scala:186)
at org.apache.spark.SparkFunSuite.withFixture(SparkFunSuite.scala:103)
at org.scalatest.FunSuiteLike$class.invokeWithFixture$1(FunSuiteLike.scala:183)
at org.scalatest.FunSuiteLike$$anonfun$runTest$1.apply(FunSuiteLike.scala:196)
at org.scalatest.FunSuiteLike$$anonfun$runTest$1.apply(FunSuiteLike.scala:196)
at org.scalatest.SuperEngine.runTestImpl(Engine.scala:289)
at org.scalatest.FunSuiteLike$class.runTest(FunSuiteLike.scala:196)
at org.apache.spark.sql.kafka010.KafkaSourceTest.org$scalatest$BeforeAndAfterEach$$super$runTest(KafkaMicroBatchSourceSuite.scala:51)
at org.scalatest.BeforeAndAfterEach$class.runTest(BeforeAndAfterEach.scala:221)
at org.apache.spark.sql.kafka010.KafkaSourceTest.runTest(KafkaMicroBatchSourceSuite.scala:51)
at org.scalatest.FunSuiteLike$$anonfun$runTests$1.apply(FunSuiteLike.scala:229)
at org.scalatest.FunSuiteLike$$anonfun$runTests$1.apply(FunSuiteLike.scala:229)
at org.scalatest.SuperEngine$$anonfun$traverseSubNodes$1$1.apply(Engine.scala:396)
at org.scalatest.SuperEngine$$anonfun$traverseSubNodes$1$1.apply(Engine.scala:384)
at scala.collection.immutable.List.foreach(List.scala:392)
at org.scalatest.SuperEngine.traverseSubNodes$1(Engine.scala:384)
at org.scalatest.SuperEngine.org$scalatest$SuperEngine$$runTestsInBranch(Engine.scala:379)
at org.scalatest.SuperEngine.runTestsImpl(Engine.scala:461)
at org.scalatest.FunSuiteLike$class.runTests(FunSuiteLike.scala:229)
at org.scalatest.FunSuite.runTests(FunSuite.scala:1560)
at org.scalatest.Suite$class.run(Suite.scala:1147)
at org.scalatest.FunSuite.org$scalatest$FunSuiteLike$$super$run(FunSuite.scala:1560)
at org.scalatest.FunSuiteLike$$anonfun$run$1.apply(FunSuiteLike.scala:233)
at org.scalatest.FunSuiteLike$$anonfun$run$1.apply(FunSuiteLike.scala:233)
at org.scalatest.SuperEngine.runImpl(Engine.scala:521)
at org.scalatest.FunSuiteLike$class.run(FunSuiteLike.scala:233)
at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterAll$$super$run(SparkFunSuite.scala:52)
at org.scalatest.BeforeAndAfterAll$class.liftedTree1$1(BeforeAndAfterAll.scala:213)
at org.scalatest.BeforeAndAfterAll$class.run(BeforeAndAfterAll.scala:210)
at org.apache.spark.SparkFunSuite.run(SparkFunSuite.scala:52)
at org.scalatest.Suite$class.callExecuteOnSuite$1(Suite.scala:1210)
at org.scalatest.Suite$$anonfun$runNestedSuites$1.apply(Suite.scala:1257)
at org.scalatest.Suite$$anonfun$runNestedSuites$1.apply(Suite.scala:1255)
at scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33)
at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186)
at org.scalatest.Suite$class.runNestedSuites(Suite.scala:1255)
at org.scalatest.tools.DiscoverySuite.runNestedSuites(DiscoverySuite.scala:30)
at org.scalatest.Suite$class.run(Suite.scala:1144)
at org.scalatest.tools.DiscoverySuite.run(DiscoverySuite.scala:30)
at org.scalatest.tools.SuiteRunner.run(SuiteRunner.scala:45)
at org.scalatest.tools.Runner$$anonfun$doRunRunRunDaDoRunRun$1.apply(Runner.scala:1340)
at org.scalatest.tools.Runner$$anonfun$doRunRunRunDaDoRunRun$1.apply(Runner.scala:1334)
at scala.collection.immutable.List.foreach(List.scala:392)
at org.scalatest.tools.Runner$.doRunRunRunDaDoRunRun(Runner.scala:1334)
at org.scalatest.tools.Runner$$anonfun$runOptionallyWithPassFailReporter$2.apply(Runner.scala:1011)
at org.scalatest.tools.Runner$$anonfun$runOptionallyWithPassFailReporter$2.apply(Runner.scala:1010)
at org.scalatest.tools.Runner$.withClassLoaderAndDispatchReporter(Runner.scala:1500)
at org.scalatest.tools.Runner$.runOptionallyWithPassFailReporter(Runner.scala:1010)
at org.scalatest.tools.Runner$.main(Runner.scala:827)
at org.scalatest.tools.Runner.main(Runner.scala)