&#010;Assert on query failed: name: The code passed to eventually never returned normally. Attempted 1979 times over 30.015032419 seconds. Last failure message: query.lastExecution.executedPlan.collectFirst[org.apache.spark.sql.kafka010.KafkaContinuousStream](({&#010; @SerialVersionUID(value = 0) final <synthetic> class $anonfun extends scala.runtime.AbstractPartialFunction[org.apache.spark.sql.execution.SparkPlan,org.apache.spark.sql.kafka010.KafkaContinuousStream] with Serializable {&#010; def <init>(): <$anon: org.apache.spark.sql.execution.SparkPlan => org.apache.spark.sql.kafka010.KafkaContinuousStream> = {&#010; $anonfun.super.<init>();&#010; ()&#010; };&#010; final override def applyOrElse[A1 <: org.apache.spark.sql.execution.SparkPlan, B1 >: org.apache.spark.sql.kafka010.KafkaContinuousStream](x1: A1, default: A1 => B1): B1 = ((x1.asInstanceOf[org.apache.spark.sql.execution.SparkPlan]: org.apache.spark.sql.execution.SparkPlan): org.apache.spark.sql.execution.SparkPlan @unchecked) match {&#010; case (scan @ (_: org.apache.spark.sql.execution.datasources.v2.ContinuousScanExec)) if scan.stream.isInstanceOf[org.apache.spark.sql.kafka010.KafkaContinuousStream] => scan.stream.asInstanceOf[org.apache.spark.sql.kafka010.KafkaContinuousStream]&#010; case (defaultCase$ @ _) => default.apply(x1)&#010; };&#010; final def isDefinedAt(x1: org.apache.spark.sql.execution.SparkPlan): Boolean = ((x1.asInstanceOf[org.apache.spark.sql.execution.SparkPlan]: org.apache.spark.sql.execution.SparkPlan): org.apache.spark.sql.execution.SparkPlan @unchecked) match {&#010; case (scan @ (_: org.apache.spark.sql.execution.datasources.v2.ContinuousScanExec)) if scan.stream.isInstanceOf[org.apache.spark.sql.kafka010.KafkaContinuousStream] => true&#010; case (defaultCase$ @ _) => false&#010; }&#010; };&#010; new $anonfun()&#010;}: PartialFunction[org.apache.spark.sql.execution.SparkPlan,org.apache.spark.sql.kafka010.KafkaContinuousStream])).exists(((stream: org.apache.spark.sql.kafka010.KafkaContinuousStream) => stream.knownPartitions.exists(((x$1: org.apache.kafka.common.TopicPartition) => x$1.topic().==(topic2))))) was false query never reconfigured to new topic topic-0-bad.&#010;org.scalatest.concurrent.Eventually.tryTryAgain$1(Eventually.scala:432)&#010; org.scalatest.concurrent.Eventually.eventually(Eventually.scala:439)&#010; org.scalatest.concurrent.Eventually.eventually$(Eventually.scala:391)&#010; org.apache.spark.sql.kafka010.KafkaSourceTest.eventually(KafkaMicroBatchSourceSuite.scala:49)&#010; org.scalatest.concurrent.Eventually.eventually(Eventually.scala:337)&#010; org.scalatest.concurrent.Eventually.eventually$(Eventually.scala:336)&#010; org.apache.spark.sql.kafka010.KafkaSourceTest.eventually(KafkaMicroBatchSourceSuite.scala:49)&#010; org.apache.spark.sql.kafka010.KafkaContinuousSourceTopicDeletionSuite.$anonfun$new$32(KafkaContinuousSourceSuite.scala:208)&#010; org.apache.spark.sql.streaming.StreamTest$Execute$.$anonfun$apply$6(StreamTest.scala:297)&#010; org.apache.spark.sql.streaming.StreamTest$Execute$.$anonfun$apply$6$adapted(StreamTest.scala:297)&#010;&#010; Caused by: query.lastExecution.executedPlan.collectFirst[org.apache.spark.sql.kafka010.KafkaContinuousStream](({&#010; @SerialVersionUID(value = 0) final <synthetic> class $anonfun extends scala.runtime.AbstractPartialFunction[org.apache.spark.sql.execution.SparkPlan,org.apache.spark.sql.kafka010.KafkaContinuousStream] with Serializable {&#010; def <init>(): <$anon: org.apache.spark.sql.execution.SparkPlan => org.apache.spark.sql.kafka010.KafkaContinuousStream> = {&#010; $anonfun.super.<init>();&#010; ()&#010; };&#010; final override def applyOrElse[A1 <: org.apache.spark.sql.execution.SparkPlan, B1 >: org.apache.spark.sql.kafka010.KafkaContinuousStream](x1: A1, default: A1 => B1): B1 = ((x1.asInstanceOf[org.apache.spark.sql.execution.SparkPlan]: org.apache.spark.sql.execution.SparkPlan): org.apache.spark.sql.execution.SparkPlan @unchecked) match {&#010; case (scan @ (_: org.apache.spark.sql.execution.datasources.v2.ContinuousScanExec)) if scan.stream.isInstanceOf[org.apache.spark.sql.kafka010.KafkaContinuousStream] => scan.stream.asInstanceOf[org.apache.spark.sql.kafka010.KafkaContinuousStream]&#010; case (defaultCase$ @ _) => default.apply(x1)&#010; };&#010; final def isDefinedAt(x1: org.apache.spark.sql.execution.SparkPlan): Boolean = ((x1.asInstanceOf[org.apache.spark.sql.execution.SparkPlan]: org.apache.spark.sql.execution.SparkPlan): org.apache.spark.sql.execution.SparkPlan @unchecked) match {&#010; case (scan @ (_: org.apache.spark.sql.execution.datasources.v2.ContinuousScanExec)) if scan.stream.isInstanceOf[org.apache.spark.sql.kafka010.KafkaContinuousStream] => true&#010; case (defaultCase$ @ _) => false&#010; }&#010; };&#010; new $anonfun()&#010;}: PartialFunction[org.apache.spark.sql.execution.SparkPlan,org.apache.spark.sql.kafka010.KafkaContinuousStream])).exists(((stream: org.apache.spark.sql.kafka010.KafkaContinuousStream) => stream.knownPartitions.exists(((x$1: org.apache.kafka.common.TopicPartition) => x$1.topic().==(topic2))))) was false query never reconfigured to new topic topic-0-bad&#010; org.scalatest.Assertions.newAssertionFailedException(Assertions.scala:528)&#010; org.scalatest.Assertions.newAssertionFailedException$(Assertions.scala:527)&#010; org.scalatest.FunSuite.newAssertionFailedException(FunSuite.scala:1560)&#010; org.scalatest.Assertions$AssertionsHelper.macroAssert(Assertions.scala:501)&#010; org.apache.spark.sql.kafka010.KafkaContinuousSourceTopicDeletionSuite.$anonfun$new$33(KafkaContinuousSourceSuite.scala:209)&#010; org.scalatest.concurrent.Eventually.makeAValiantAttempt$1(Eventually.scala:395)&#010; org.scalatest.concurrent.Eventually.tryTryAgain$1(Eventually.scala:409)&#010; org.scalatest.concurrent.Eventually.eventually(Eventually.scala:439)&#010; org.scalatest.concurrent.Eventually.eventually$(Eventually.scala:391)&#010; org.apache.spark.sql.kafka010.KafkaSourceTest.eventually(KafkaMicroBatchSourceSuite.scala:49)&#010;&#010;&#010;== Progress ==&#010; AssertOnQuery(<condition>, )&#010; AddKafkaData(topics = Set(topic-0-seems), data = WrappedArray(1, 2, 3), message = )&#010; CheckAnswer: [2],[3],[4]&#010;=> AssertOnQuery(<condition>, name)&#010; AddKafkaData(topics = Set(topic-0-bad), data = WrappedArray(4, 5, 6), message = )&#010; CheckAnswer: [2],[3],[4],[5],[6],[7]&#010;&#010;== Stream ==&#010;Output Mode: Append&#010;Stream state: {KafkaSource[SubscribePattern[topic-0-.*]]: {"topic-0-seems":{"2":1,"4":0,"1":1,"3":2,"0":0}}}&#010;Thread state: alive&#010;Thread stack trace: sun.misc.Unsafe.park(Native Method)&#010;java.util.concurrent.locks.LockSupport.park(LockSupport.java:175)&#010;java.util.concurrent.locks.AbstractQueuedSynchronizer.parkAndCheckInterrupt(AbstractQueuedSynchronizer.java:836)&#010;java.util.concurrent.locks.AbstractQueuedSynchronizer.doAcquireSharedInterruptibly(AbstractQueuedSynchronizer.java:997)&#010;java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1304)&#010;scala.concurrent.impl.Promise$DefaultPromise.tryAwait(Promise.scala:242)&#010;scala.concurrent.impl.Promise$DefaultPromise.ready(Promise.scala:258)&#010;scala.concurrent.impl.Promise$DefaultPromise.ready(Promise.scala:187)&#010;org.apache.spark.util.ThreadUtils$.awaitReady(ThreadUtils.scala:242)&#010;org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:736)&#010;org.apache.spark.SparkContext.runJob(SparkContext.scala:2013)&#010;org.apache.spark.SparkContext.runJob(SparkContext.scala:2034)&#010;org.apache.spark.SparkContext.runJob(SparkContext.scala:2053)&#010;org.apache.spark.SparkContext.runJob(SparkContext.scala:2078)&#010;org.apache.spark.rdd.RDD.$anonfun$collect$1(RDD.scala:961)&#010;org.apache.spark.rdd.RDD$$Lambda$2760/76361611.apply(Unknown Source)&#010;org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)&#010;org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)&#010;org.apache.spark.rdd.RDD.withScope(RDD.scala:366)&#010;org.apache.spark.rdd.RDD.collect(RDD.scala:960)&#010;org.apache.spark.sql.execution.streaming.continuous.WriteToContinuousDataSourceExec.doExecute(WriteToContinuousDataSourceExec.scala:54)&#010;org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:131)&#010;org.apache.spark.sql.execution.SparkPlan$$Lambda$2597/1671384568.apply(Unknown Source)&#010;org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:155)&#010;org.apache.spark.sql.execution.SparkPlan$$Lambda$2619/1828252298.apply(Unknown Source)&#010;org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)&#010;org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:152)&#010;org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:127)&#010;org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution.$anonfun$runContinuous$4(ContinuousExecution.scala:255)&#010;org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution$$Lambda$5151/85141961.apply(Unknown Source)&#010;org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$4(SQLExecution.scala:100)&#010;org.apache.spark.sql.execution.SQLExecution$$$Lambda$2523/35725340.apply(Unknown Source)&#010;org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:160)&#010;org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:87)&#010;org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution.$anonfun$runContinuous$3(ContinuousExecution.scala:255)&#010;org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution$$Lambda$5149/527036813.apply(Unknown Source)&#010;org.apache.spark.sql.execution.streaming.ProgressReporter.reportTimeTaken(ProgressReporter.scala:327)&#010;org.apache.spark.sql.execution.streaming.ProgressReporter.reportTimeTaken$(ProgressReporter.scala:325)&#010;org.apache.spark.sql.execution.streaming.StreamExecution.reportTimeTaken(StreamExecution.scala:67)&#010;org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution.runContinuous(ContinuousExecution.scala:254)&#010;org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution.runActivatedStream(ContinuousExecution.scala:108)&#010;org.apache.spark.sql.execution.streaming.StreamExecution.org$apache$spark$sql$execution$streaming$StreamExecution$$runStream(StreamExecution.scala:331)&#010;org.apache.spark.sql.execution.streaming.StreamExecution$$anon$1.run(StreamExecution.scala:243)&#010;&#010;&#010;== Sink ==&#010;0: &#010;1: [4] [3] [2]&#010;2: &#010;&#010;&#010;== Plan ==&#010;== Parsed Logical Plan ==&#010;WriteToContinuousDataSource org.apache.spark.sql.execution.streaming.sources.MemoryStreamingWrite@1bf2ee07&#010;+- SerializeFromObject [input[0, int, false] AS value#18093]&#010; +- MapElements org.apache.spark.sql.kafka010.KafkaContinuousSourceTopicDeletionSuite$$Lambda$6117/231896086@6bf60a08, class scala.Tuple2, [StructField(_1,StringType,true), StructField(_2,StringType,true)], obj#18092: int&#010; +- DeserializeToObject newInstance(class scala.Tuple2), obj#18091: scala.Tuple2&#010; +- Project [cast(key#18067 as string) AS key#18081, cast(value#18068 as string) AS value#18082]&#010; +- StreamingDataSourceV2Relation [key#18067, value#18068, topic#18069, partition#18070, offset#18071L, timestamp#18072, timestampType#18073], org.apache.spark.sql.kafka010.KafkaSourceProvider$KafkaScan@336d9f22, KafkaSource[SubscribePattern[topic-0-.*]], {"topic-0-seems":{"2":0,"4":0,"1":0,"3":1,"0":0}}&#010;&#010;== Analyzed Logical Plan ==&#010;&#010;WriteToContinuousDataSource org.apache.spark.sql.execution.streaming.sources.MemoryStreamingWrite@1bf2ee07&#010;+- SerializeFromObject [input[0, int, false] AS value#18093]&#010; +- MapElements org.apache.spark.sql.kafka010.KafkaContinuousSourceTopicDeletionSuite$$Lambda$6117/231896086@6bf60a08, class scala.Tuple2, [StructField(_1,StringType,true), StructField(_2,StringType,true)], obj#18092: int&#010; +- DeserializeToObject newInstance(class scala.Tuple2), obj#18091: scala.Tuple2&#010; +- Project [cast(key#18067 as string) AS key#18081, cast(value#18068 as string) AS value#18082]&#010; +- StreamingDataSourceV2Relation [key#18067, value#18068, topic#18069, partition#18070, offset#18071L, timestamp#18072, timestampType#18073], org.apache.spark.sql.kafka010.KafkaSourceProvider$KafkaScan@336d9f22, KafkaSource[SubscribePattern[topic-0-.*]], {"topic-0-seems":{"2":0,"4":0,"1":0,"3":1,"0":0}}&#010;&#010;== Optimized Logical Plan ==&#010;WriteToContinuousDataSource org.apache.spark.sql.execution.streaming.sources.MemoryStreamingWrite@1bf2ee07&#010;+- SerializeFromObject [input[0, int, false] AS value#18093]&#010; +- MapElements org.apache.spark.sql.kafka010.KafkaContinuousSourceTopicDeletionSuite$$Lambda$6117/231896086@6bf60a08, class scala.Tuple2, [StructField(_1,StringType,true), StructField(_2,StringType,true)], obj#18092: int&#010; +- DeserializeToObject newInstance(class scala.Tuple2), obj#18091: scala.Tuple2&#010; +- Project [cast(key#18067 as string) AS key#18081, cast(value#18068 as string) AS value#18082]&#010; +- StreamingDataSourceV2Relation [key#18067, value#18068, topic#18069, partition#18070, offset#18071L, timestamp#18072, timestampType#18073], org.apache.spark.sql.kafka010.KafkaSourceProvider$KafkaScan@336d9f22, KafkaSource[SubscribePattern[topic-0-.*]], {"topic-0-seems":{"2":0,"4":0,"1":0,"3":1,"0":0}}&#010;&#010;== Physical Plan ==&#010;WriteToContinuousDataSource org.apache.spark.sql.execution.streaming.sources.MemoryStreamingWrite@1bf2ee07&#010;+- *(1) SerializeFromObject [input[0, int, false] AS value#18093]&#010; +- *(1) MapElements org.apache.spark.sql.kafka010.KafkaContinuousSourceTopicDeletionSuite$$Lambda$6117/231896086@6bf60a08, obj#18092: int&#010; +- *(1) DeserializeToObject newInstance(class scala.Tuple2), obj#18091: scala.Tuple2&#010; +- *(1) Project [cast(key#18067 as string) AS key#18081, cast(value#18068 as string) AS value#18082]&#010; +- *(1) Project [key#18067, value#18068, topic#18069, partition#18070, offset#18071L, timestamp#18072, timestampType#18073]&#010; +- *(1) ContinuousScan[key#18067, value#18068, topic#18069, partition#18070, offset#18071L, timestamp#18072, timestampType#18073] class org.apache.spark.sql.kafka010.KafkaSourceProvider$KafkaScan&#010;&#010; &#010;


      org.scalatest.exceptions.TestFailedException: 
Assert on query failed: name: The code passed to eventually never returned normally. Attempted 1979 times over 30.015032419 seconds. Last failure message: query.lastExecution.executedPlan.collectFirst[org.apache.spark.sql.kafka010.KafkaContinuousStream](({
  @SerialVersionUID(value = 0) final <synthetic> class $anonfun extends scala.runtime.AbstractPartialFunction[org.apache.spark.sql.execution.SparkPlan,org.apache.spark.sql.kafka010.KafkaContinuousStream] with Serializable {
    def <init>(): <$anon: org.apache.spark.sql.execution.SparkPlan => org.apache.spark.sql.kafka010.KafkaContinuousStream> = {
      $anonfun.super.<init>();
      ()
    };
    final override def applyOrElse[A1 <: org.apache.spark.sql.execution.SparkPlan, B1 >: org.apache.spark.sql.kafka010.KafkaContinuousStream](x1: A1, default: A1 => B1): B1 = ((x1.asInstanceOf[org.apache.spark.sql.execution.SparkPlan]: org.apache.spark.sql.execution.SparkPlan): org.apache.spark.sql.execution.SparkPlan @unchecked) match {
      case (scan @ (_: org.apache.spark.sql.execution.datasources.v2.ContinuousScanExec)) if scan.stream.isInstanceOf[org.apache.spark.sql.kafka010.KafkaContinuousStream] => scan.stream.asInstanceOf[org.apache.spark.sql.kafka010.KafkaContinuousStream]
      case (defaultCase$ @ _) => default.apply(x1)
    };
    final def isDefinedAt(x1: org.apache.spark.sql.execution.SparkPlan): Boolean = ((x1.asInstanceOf[org.apache.spark.sql.execution.SparkPlan]: org.apache.spark.sql.execution.SparkPlan): org.apache.spark.sql.execution.SparkPlan @unchecked) match {
      case (scan @ (_: org.apache.spark.sql.execution.datasources.v2.ContinuousScanExec)) if scan.stream.isInstanceOf[org.apache.spark.sql.kafka010.KafkaContinuousStream] => true
      case (defaultCase$ @ _) => false
    }
  };
  new $anonfun()
}: PartialFunction[org.apache.spark.sql.execution.SparkPlan,org.apache.spark.sql.kafka010.KafkaContinuousStream])).exists(((stream: org.apache.spark.sql.kafka010.KafkaContinuousStream) => stream.knownPartitions.exists(((x$1: org.apache.kafka.common.TopicPartition) => x$1.topic().==(topic2))))) was false query never reconfigured to new topic topic-0-bad.
org.scalatest.concurrent.Eventually.tryTryAgain$1(Eventually.scala:432)
	org.scalatest.concurrent.Eventually.eventually(Eventually.scala:439)
	org.scalatest.concurrent.Eventually.eventually$(Eventually.scala:391)
	org.apache.spark.sql.kafka010.KafkaSourceTest.eventually(KafkaMicroBatchSourceSuite.scala:49)
	org.scalatest.concurrent.Eventually.eventually(Eventually.scala:337)
	org.scalatest.concurrent.Eventually.eventually$(Eventually.scala:336)
	org.apache.spark.sql.kafka010.KafkaSourceTest.eventually(KafkaMicroBatchSourceSuite.scala:49)
	org.apache.spark.sql.kafka010.KafkaContinuousSourceTopicDeletionSuite.$anonfun$new$32(KafkaContinuousSourceSuite.scala:208)
	org.apache.spark.sql.streaming.StreamTest$Execute$.$anonfun$apply$6(StreamTest.scala:297)
	org.apache.spark.sql.streaming.StreamTest$Execute$.$anonfun$apply$6$adapted(StreamTest.scala:297)

	Caused by: 	query.lastExecution.executedPlan.collectFirst[org.apache.spark.sql.kafka010.KafkaContinuousStream](({
  @SerialVersionUID(value = 0) final <synthetic> class $anonfun extends scala.runtime.AbstractPartialFunction[org.apache.spark.sql.execution.SparkPlan,org.apache.spark.sql.kafka010.KafkaContinuousStream] with Serializable {
    def <init>(): <$anon: org.apache.spark.sql.execution.SparkPlan => org.apache.spark.sql.kafka010.KafkaContinuousStream> = {
      $anonfun.super.<init>();
      ()
    };
    final override def applyOrElse[A1 <: org.apache.spark.sql.execution.SparkPlan, B1 >: org.apache.spark.sql.kafka010.KafkaContinuousStream](x1: A1, default: A1 => B1): B1 = ((x1.asInstanceOf[org.apache.spark.sql.execution.SparkPlan]: org.apache.spark.sql.execution.SparkPlan): org.apache.spark.sql.execution.SparkPlan @unchecked) match {
      case (scan @ (_: org.apache.spark.sql.execution.datasources.v2.ContinuousScanExec)) if scan.stream.isInstanceOf[org.apache.spark.sql.kafka010.KafkaContinuousStream] => scan.stream.asInstanceOf[org.apache.spark.sql.kafka010.KafkaContinuousStream]
      case (defaultCase$ @ _) => default.apply(x1)
    };
    final def isDefinedAt(x1: org.apache.spark.sql.execution.SparkPlan): Boolean = ((x1.asInstanceOf[org.apache.spark.sql.execution.SparkPlan]: org.apache.spark.sql.execution.SparkPlan): org.apache.spark.sql.execution.SparkPlan @unchecked) match {
      case (scan @ (_: org.apache.spark.sql.execution.datasources.v2.ContinuousScanExec)) if scan.stream.isInstanceOf[org.apache.spark.sql.kafka010.KafkaContinuousStream] => true
      case (defaultCase$ @ _) => false
    }
  };
  new $anonfun()
}: PartialFunction[org.apache.spark.sql.execution.SparkPlan,org.apache.spark.sql.kafka010.KafkaContinuousStream])).exists(((stream: org.apache.spark.sql.kafka010.KafkaContinuousStream) => stream.knownPartitions.exists(((x$1: org.apache.kafka.common.TopicPartition) => x$1.topic().==(topic2))))) was false query never reconfigured to new topic topic-0-bad
	org.scalatest.Assertions.newAssertionFailedException(Assertions.scala:528)
		org.scalatest.Assertions.newAssertionFailedException$(Assertions.scala:527)
		org.scalatest.FunSuite.newAssertionFailedException(FunSuite.scala:1560)
		org.scalatest.Assertions$AssertionsHelper.macroAssert(Assertions.scala:501)
		org.apache.spark.sql.kafka010.KafkaContinuousSourceTopicDeletionSuite.$anonfun$new$33(KafkaContinuousSourceSuite.scala:209)
		org.scalatest.concurrent.Eventually.makeAValiantAttempt$1(Eventually.scala:395)
		org.scalatest.concurrent.Eventually.tryTryAgain$1(Eventually.scala:409)
		org.scalatest.concurrent.Eventually.eventually(Eventually.scala:439)
		org.scalatest.concurrent.Eventually.eventually$(Eventually.scala:391)
		org.apache.spark.sql.kafka010.KafkaSourceTest.eventually(KafkaMicroBatchSourceSuite.scala:49)


== Progress ==
   AssertOnQuery(<condition>, )
   AddKafkaData(topics = Set(topic-0-seems), data = WrappedArray(1, 2, 3), message = )
   CheckAnswer: [2],[3],[4]
=> AssertOnQuery(<condition>, name)
   AddKafkaData(topics = Set(topic-0-bad), data = WrappedArray(4, 5, 6), message = )
   CheckAnswer: [2],[3],[4],[5],[6],[7]

== Stream ==
Output Mode: Append
Stream state: {KafkaSource[SubscribePattern[topic-0-.*]]: {"topic-0-seems":{"2":1,"4":0,"1":1,"3":2,"0":0}}}
Thread state: alive
Thread stack trace: sun.misc.Unsafe.park(Native Method)
java.util.concurrent.locks.LockSupport.park(LockSupport.java:175)
java.util.concurrent.locks.AbstractQueuedSynchronizer.parkAndCheckInterrupt(AbstractQueuedSynchronizer.java:836)
java.util.concurrent.locks.AbstractQueuedSynchronizer.doAcquireSharedInterruptibly(AbstractQueuedSynchronizer.java:997)
java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1304)
scala.concurrent.impl.Promise$DefaultPromise.tryAwait(Promise.scala:242)
scala.concurrent.impl.Promise$DefaultPromise.ready(Promise.scala:258)
scala.concurrent.impl.Promise$DefaultPromise.ready(Promise.scala:187)
org.apache.spark.util.ThreadUtils$.awaitReady(ThreadUtils.scala:242)
org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:736)
org.apache.spark.SparkContext.runJob(SparkContext.scala:2013)
org.apache.spark.SparkContext.runJob(SparkContext.scala:2034)
org.apache.spark.SparkContext.runJob(SparkContext.scala:2053)
org.apache.spark.SparkContext.runJob(SparkContext.scala:2078)
org.apache.spark.rdd.RDD.$anonfun$collect$1(RDD.scala:961)
org.apache.spark.rdd.RDD$$Lambda$2760/76361611.apply(Unknown Source)
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
org.apache.spark.rdd.RDD.withScope(RDD.scala:366)
org.apache.spark.rdd.RDD.collect(RDD.scala:960)
org.apache.spark.sql.execution.streaming.continuous.WriteToContinuousDataSourceExec.doExecute(WriteToContinuousDataSourceExec.scala:54)
org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:131)
org.apache.spark.sql.execution.SparkPlan$$Lambda$2597/1671384568.apply(Unknown Source)
org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:155)
org.apache.spark.sql.execution.SparkPlan$$Lambda$2619/1828252298.apply(Unknown Source)
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:152)
org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:127)
org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution.$anonfun$runContinuous$4(ContinuousExecution.scala:255)
org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution$$Lambda$5151/85141961.apply(Unknown Source)
org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$4(SQLExecution.scala:100)
org.apache.spark.sql.execution.SQLExecution$$$Lambda$2523/35725340.apply(Unknown Source)
org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:160)
org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:87)
org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution.$anonfun$runContinuous$3(ContinuousExecution.scala:255)
org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution$$Lambda$5149/527036813.apply(Unknown Source)
org.apache.spark.sql.execution.streaming.ProgressReporter.reportTimeTaken(ProgressReporter.scala:327)
org.apache.spark.sql.execution.streaming.ProgressReporter.reportTimeTaken$(ProgressReporter.scala:325)
org.apache.spark.sql.execution.streaming.StreamExecution.reportTimeTaken(StreamExecution.scala:67)
org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution.runContinuous(ContinuousExecution.scala:254)
org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution.runActivatedStream(ContinuousExecution.scala:108)
org.apache.spark.sql.execution.streaming.StreamExecution.org$apache$spark$sql$execution$streaming$StreamExecution$$runStream(StreamExecution.scala:331)
org.apache.spark.sql.execution.streaming.StreamExecution$$anon$1.run(StreamExecution.scala:243)


== Sink ==
0: 
1: [4] [3] [2]
2: 


== Plan ==
== Parsed Logical Plan ==
WriteToContinuousDataSource org.apache.spark.sql.execution.streaming.sources.MemoryStreamingWrite@1bf2ee07
+- SerializeFromObject [input[0, int, false] AS value#18093]
   +- MapElements org.apache.spark.sql.kafka010.KafkaContinuousSourceTopicDeletionSuite$$Lambda$6117/231896086@6bf60a08, class scala.Tuple2, [StructField(_1,StringType,true), StructField(_2,StringType,true)], obj#18092: int
      +- DeserializeToObject newInstance(class scala.Tuple2), obj#18091: scala.Tuple2
         +- Project [cast(key#18067 as string) AS key#18081, cast(value#18068 as string) AS value#18082]
            +- StreamingDataSourceV2Relation [key#18067, value#18068, topic#18069, partition#18070, offset#18071L, timestamp#18072, timestampType#18073], org.apache.spark.sql.kafka010.KafkaSourceProvider$KafkaScan@336d9f22, KafkaSource[SubscribePattern[topic-0-.*]], {"topic-0-seems":{"2":0,"4":0,"1":0,"3":1,"0":0}}

== Analyzed Logical Plan ==

WriteToContinuousDataSource org.apache.spark.sql.execution.streaming.sources.MemoryStreamingWrite@1bf2ee07
+- SerializeFromObject [input[0, int, false] AS value#18093]
   +- MapElements org.apache.spark.sql.kafka010.KafkaContinuousSourceTopicDeletionSuite$$Lambda$6117/231896086@6bf60a08, class scala.Tuple2, [StructField(_1,StringType,true), StructField(_2,StringType,true)], obj#18092: int
      +- DeserializeToObject newInstance(class scala.Tuple2), obj#18091: scala.Tuple2
         +- Project [cast(key#18067 as string) AS key#18081, cast(value#18068 as string) AS value#18082]
            +- StreamingDataSourceV2Relation [key#18067, value#18068, topic#18069, partition#18070, offset#18071L, timestamp#18072, timestampType#18073], org.apache.spark.sql.kafka010.KafkaSourceProvider$KafkaScan@336d9f22, KafkaSource[SubscribePattern[topic-0-.*]], {"topic-0-seems":{"2":0,"4":0,"1":0,"3":1,"0":0}}

== Optimized Logical Plan ==
WriteToContinuousDataSource org.apache.spark.sql.execution.streaming.sources.MemoryStreamingWrite@1bf2ee07
+- SerializeFromObject [input[0, int, false] AS value#18093]
   +- MapElements org.apache.spark.sql.kafka010.KafkaContinuousSourceTopicDeletionSuite$$Lambda$6117/231896086@6bf60a08, class scala.Tuple2, [StructField(_1,StringType,true), StructField(_2,StringType,true)], obj#18092: int
      +- DeserializeToObject newInstance(class scala.Tuple2), obj#18091: scala.Tuple2
         +- Project [cast(key#18067 as string) AS key#18081, cast(value#18068 as string) AS value#18082]
            +- StreamingDataSourceV2Relation [key#18067, value#18068, topic#18069, partition#18070, offset#18071L, timestamp#18072, timestampType#18073], org.apache.spark.sql.kafka010.KafkaSourceProvider$KafkaScan@336d9f22, KafkaSource[SubscribePattern[topic-0-.*]], {"topic-0-seems":{"2":0,"4":0,"1":0,"3":1,"0":0}}

== Physical Plan ==
WriteToContinuousDataSource org.apache.spark.sql.execution.streaming.sources.MemoryStreamingWrite@1bf2ee07
+- *(1) SerializeFromObject [input[0, int, false] AS value#18093]
   +- *(1) MapElements org.apache.spark.sql.kafka010.KafkaContinuousSourceTopicDeletionSuite$$Lambda$6117/231896086@6bf60a08, obj#18092: int
      +- *(1) DeserializeToObject newInstance(class scala.Tuple2), obj#18091: scala.Tuple2
         +- *(1) Project [cast(key#18067 as string) AS key#18081, cast(value#18068 as string) AS value#18082]
            +- *(1) Project [key#18067, value#18068, topic#18069, partition#18070, offset#18071L, timestamp#18072, timestampType#18073]
               +- *(1) ContinuousScan[key#18067, value#18068, topic#18069, partition#18070, offset#18071L, timestamp#18072, timestampType#18073] class org.apache.spark.sql.kafka010.KafkaSourceProvider$KafkaScan

         
         
      at org.scalatest.Assertions.newAssertionFailedException(Assertions.scala:528)
      at org.scalatest.Assertions.newAssertionFailedException$(Assertions.scala:527)
      at org.scalatest.FunSuite.newAssertionFailedException(FunSuite.scala:1560)
      at org.scalatest.Assertions.fail(Assertions.scala:1089)
      at org.scalatest.Assertions.fail$(Assertions.scala:1085)
      at org.scalatest.FunSuite.fail(FunSuite.scala:1560)
      at org.apache.spark.sql.streaming.StreamTest.failTest$1(StreamTest.scala:449)
      at org.apache.spark.sql.streaming.StreamTest.executeAction$1(StreamTest.scala:651)
      at org.apache.spark.sql.streaming.StreamTest.$anonfun$testStream$56(StreamTest.scala:775)
      at org.apache.spark.sql.streaming.StreamTest.$anonfun$testStream$56$adapted(StreamTest.scala:762)
      at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
      at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
      at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
      at org.apache.spark.sql.streaming.StreamTest.liftedTree1$1(StreamTest.scala:762)
      at org.apache.spark.sql.streaming.StreamTest.testStream(StreamTest.scala:761)
      at org.apache.spark.sql.streaming.StreamTest.testStream$(StreamTest.scala:328)
      at org.apache.spark.sql.kafka010.KafkaSourceTest.testStream(KafkaMicroBatchSourceSuite.scala:49)
      at org.apache.spark.sql.kafka010.KafkaContinuousSourceTopicDeletionSuite.$anonfun$new$30(KafkaContinuousSourceSuite.scala:222)
      at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
      at org.scalatest.OutcomeOf.outcomeOf(OutcomeOf.scala:85)
      at org.scalatest.OutcomeOf.outcomeOf$(OutcomeOf.scala:83)
      at org.scalatest.OutcomeOf$.outcomeOf(OutcomeOf.scala:104)
      at org.scalatest.Transformer.apply(Transformer.scala:22)
      at org.scalatest.Transformer.apply(Transformer.scala:20)
      at org.scalatest.FunSuiteLike$$anon$1.apply(FunSuiteLike.scala:186)
      at org.apache.spark.SparkFunSuite.withFixture(SparkFunSuite.scala:105)
      at org.scalatest.FunSuiteLike.invokeWithFixture$1(FunSuiteLike.scala:184)
      at org.scalatest.FunSuiteLike.$anonfun$runTest$1(FunSuiteLike.scala:196)
      at org.scalatest.SuperEngine.runTestImpl(Engine.scala:289)
      at org.scalatest.FunSuiteLike.runTest(FunSuiteLike.scala:196)
      at org.scalatest.FunSuiteLike.runTest$(FunSuiteLike.scala:178)
      at org.apache.spark.sql.kafka010.KafkaSourceTest.org$scalatest$BeforeAndAfterEach$$super$runTest(KafkaMicroBatchSourceSuite.scala:49)
      at org.scalatest.BeforeAndAfterEach.runTest(BeforeAndAfterEach.scala:221)
      at org.scalatest.BeforeAndAfterEach.runTest$(BeforeAndAfterEach.scala:214)
      at org.apache.spark.sql.kafka010.KafkaSourceTest.runTest(KafkaMicroBatchSourceSuite.scala:49)
      at org.scalatest.FunSuiteLike.$anonfun$runTests$1(FunSuiteLike.scala:229)
      at org.scalatest.SuperEngine.$anonfun$runTestsInBranch$1(Engine.scala:396)
      at scala.collection.immutable.List.foreach(List.scala:392)
      at org.scalatest.SuperEngine.traverseSubNodes$1(Engine.scala:384)
      at org.scalatest.SuperEngine.runTestsInBranch(Engine.scala:379)
      at org.scalatest.SuperEngine.runTestsImpl(Engine.scala:461)
      at org.scalatest.FunSuiteLike.runTests(FunSuiteLike.scala:229)
      at org.scalatest.FunSuiteLike.runTests$(FunSuiteLike.scala:228)
      at org.scalatest.FunSuite.runTests(FunSuite.scala:1560)
      at org.scalatest.Suite.run(Suite.scala:1147)
      at org.scalatest.Suite.run$(Suite.scala:1129)
      at org.scalatest.FunSuite.org$scalatest$FunSuiteLike$$super$run(FunSuite.scala:1560)
      at org.scalatest.FunSuiteLike.$anonfun$run$1(FunSuiteLike.scala:233)
      at org.scalatest.SuperEngine.runImpl(Engine.scala:521)
      at org.scalatest.FunSuiteLike.run(FunSuiteLike.scala:233)
      at org.scalatest.FunSuiteLike.run$(FunSuiteLike.scala:232)
      at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterAll$$super$run(SparkFunSuite.scala:54)
      at org.scalatest.BeforeAndAfterAll.liftedTree1$1(BeforeAndAfterAll.scala:213)
      at org.scalatest.BeforeAndAfterAll.run(BeforeAndAfterAll.scala:210)
      at org.scalatest.BeforeAndAfterAll.run$(BeforeAndAfterAll.scala:208)
      at org.apache.spark.SparkFunSuite.run(SparkFunSuite.scala:54)
      at org.scalatest.Suite.callExecuteOnSuite$1(Suite.scala:1210)
      at org.scalatest.Suite.$anonfun$runNestedSuites$1(Suite.scala:1257)
      at scala.collection.IndexedSeqOptimized.foreach(IndexedSeqOptimized.scala:36)
      at scala.collection.IndexedSeqOptimized.foreach$(IndexedSeqOptimized.scala:33)
      at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:198)
      at org.scalatest.Suite.runNestedSuites(Suite.scala:1255)
      at org.scalatest.Suite.runNestedSuites$(Suite.scala:1189)
      at org.scalatest.tools.DiscoverySuite.runNestedSuites(DiscoverySuite.scala:30)
      at org.scalatest.Suite.run(Suite.scala:1144)
      at org.scalatest.Suite.run$(Suite.scala:1129)
      at org.scalatest.tools.DiscoverySuite.run(DiscoverySuite.scala:30)
      at org.scalatest.tools.SuiteRunner.run(SuiteRunner.scala:45)
      at org.scalatest.tools.Runner$.$anonfun$doRunRunRunDaDoRunRun$13(Runner.scala:1346)
      at org.scalatest.tools.Runner$.$anonfun$doRunRunRunDaDoRunRun$13$adapted(Runner.scala:1340)
      at scala.collection.immutable.List.foreach(List.scala:392)
      at org.scalatest.tools.Runner$.doRunRunRunDaDoRunRun(Runner.scala:1340)
      at org.scalatest.tools.Runner$.$anonfun$runOptionallyWithPassFailReporter$24(Runner.scala:1031)
      at org.scalatest.tools.Runner$.$anonfun$runOptionallyWithPassFailReporter$24$adapted(Runner.scala:1010)
      at org.scalatest.tools.Runner$.withClassLoaderAndDispatchReporter(Runner.scala:1506)
      at org.scalatest.tools.Runner$.runOptionallyWithPassFailReporter(Runner.scala:1010)
      at org.scalatest.tools.Runner$.main(Runner.scala:827)
      at org.scalatest.tools.Runner.main(Runner.scala)