org.scalatest.exceptions.TestFailedException: Error adding data: Could not find index of the source to which data was added org.apache.spark.sql.streaming.StreamTest$$anonfun$14.apply(StreamTest.scala:708) org.apache.spark.sql.streaming.StreamTest$$anonfun$14.apply(StreamTest.scala:708) scala.Option.getOrElse(Option.scala:121) org.apache.spark.sql.streaming.StreamTest$class.executeAction$1(StreamTest.scala:707) org.apache.spark.sql.streaming.StreamTest$$anonfun$liftedTree1$1$1.apply(StreamTest.scala:770) org.apache.spark.sql.streaming.StreamTest$$anonfun$liftedTree1$1$1.apply(StreamTest.scala:757) scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59) scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48) org.apache.spark.sql.streaming.StreamTest$class.liftedTree1$1(StreamTest.scala:757) org.apache.spark.sql.streaming.StreamTest$class.testStream(StreamTest.scala:756) == Progress == AssertOnQuery(<condition>, ) AddKafkaData(topics = Set(topic-0-seems), data = WrappedArray(1, 2, 3), message = ) CheckAnswer: [2],[3],[4] AssertOnQuery(<condition>, name) => AddKafkaData(topics = Set(topic-0-bad), data = WrappedArray(4, 5, 6), message = ) CheckAnswer: [2],[3],[4],[5],[6],[7] == Stream == Output Mode: Append Stream state: {KafkaSource[SubscribePattern[topic-0-.*]]: {"topic-0-seems":{"2":0,"4":0,"1":1,"3":2,"0":1}}} Thread state: alive Thread stack trace: java.lang.Class.forName0(Native Method) java.lang.Class.forName(Class.java:348) org.apache.kafka.common.config.ConfigDef.parseType(ConfigDef.java:715) org.apache.kafka.common.config.ConfigDef.parseValue(ConfigDef.java:469) org.apache.kafka.common.config.ConfigDef.parse(ConfigDef.java:462) org.apache.kafka.common.config.AbstractConfig.<init>(AbstractConfig.java:62) org.apache.kafka.clients.consumer.ConsumerConfig.<init>(ConsumerConfig.java:503) org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:687) org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:615) org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:596) org.apache.spark.sql.kafka010.SubscribePatternStrategy.createConsumer(ConsumerStrategy.scala:76) org.apache.spark.sql.kafka010.KafkaOffsetReader.consumer(KafkaOffsetReader.scala:85) org.apache.spark.sql.kafka010.KafkaOffsetReader$$anonfun$fetchEarliestOffsets$1$$anonfun$apply$6.apply(KafkaOffsetReader.scala:180) org.apache.spark.sql.kafka010.KafkaOffsetReader$$anonfun$fetchEarliestOffsets$1$$anonfun$apply$6.apply(KafkaOffsetReader.scala:178) org.apache.spark.sql.kafka010.KafkaOffsetReader$$anonfun$org$apache$spark$sql$kafka010$KafkaOffsetReader$$withRetriesWithoutInterrupt$1.apply$mcV$sp(KafkaOffsetReader.scala:288) org.apache.spark.sql.kafka010.KafkaOffsetReader$$anonfun$org$apache$spark$sql$kafka010$KafkaOffsetReader$$withRetriesWithoutInterrupt$1.apply(KafkaOffsetReader.scala:287) org.apache.spark.sql.kafka010.KafkaOffsetReader$$anonfun$org$apache$spark$sql$kafka010$KafkaOffsetReader$$withRetriesWithoutInterrupt$1.apply(KafkaOffsetReader.scala:287) org.apache.spark.util.UninterruptibleThread.runUninterruptibly(UninterruptibleThread.scala:77) org.apache.spark.sql.kafka010.KafkaOffsetReader.org$apache$spark$sql$kafka010$KafkaOffsetReader$$withRetriesWithoutInterrupt(KafkaOffsetReader.scala:286) org.apache.spark.sql.kafka010.KafkaOffsetReader$$anonfun$fetchEarliestOffsets$1.apply(KafkaOffsetReader.scala:178) org.apache.spark.sql.kafka010.KafkaOffsetReader$$anonfun$fetchEarliestOffsets$1.apply(KafkaOffsetReader.scala:178) org.apache.spark.sql.kafka010.KafkaOffsetReader.runUninterruptibly(KafkaOffsetReader.scala:255) org.apache.spark.sql.kafka010.KafkaOffsetReader.fetchEarliestOffsets(KafkaOffsetReader.scala:177) org.apache.spark.sql.kafka010.KafkaContinuousScanConfigBuilder.build(KafkaContinuousReadSupport.scala:164) org.apache.spark.sql.execution.datasources.v2.DataSourceV2Strategy$.apply(DataSourceV2Strategy.scala:135) org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$1.apply(QueryPlanner.scala:63) org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$1.apply(QueryPlanner.scala:63) scala.collection.Iterator$$anon$12.nextCur(Iterator.scala:435) scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:441) scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:440) org.apache.spark.sql.catalyst.planning.QueryPlanner.plan(QueryPlanner.scala:93) org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2$$anonfun$apply$2.apply(QueryPlanner.scala:78) org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2$$anonfun$apply$2.apply(QueryPlanner.scala:75) scala.collection.TraversableOnce$$anonfun$foldLeft$1.apply(TraversableOnce.scala:157) scala.collection.TraversableOnce$$anonfun$foldLeft$1.apply(TraversableOnce.scala:157) scala.collection.Iterator$class.foreach(Iterator.scala:891) scala.collection.AbstractIterator.foreach(Iterator.scala:1334) scala.collection.TraversableOnce$class.foldLeft(TraversableOnce.scala:157) scala.collection.AbstractIterator.foldLeft(Iterator.scala:1334) org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2.apply(QueryPlanner.scala:75) org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2.apply(QueryPlanner.scala:67) scala.collection.Iterator$$anon$12.nextCur(Iterator.scala:435) scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:441) org.apache.spark.sql.catalyst.planning.QueryPlanner.plan(QueryPlanner.scala:93) org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2$$anonfun$apply$2.apply(QueryPlanner.scala:78) org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2$$anonfun$apply$2.apply(QueryPlanner.scala:75) scala.collection.TraversableOnce$$anonfun$foldLeft$1.apply(TraversableOnce.scala:157) scala.collection.TraversableOnce$$anonfun$foldLeft$1.apply(TraversableOnce.scala:157) scala.collection.Iterator$class.foreach(Iterator.scala:891) scala.collection.AbstractIterator.foreach(Iterator.scala:1334) scala.collection.TraversableOnce$class.foldLeft(TraversableOnce.scala:157) scala.collection.AbstractIterator.foldLeft(Iterator.scala:1334) org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2.apply(QueryPlanner.scala:75) org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2.apply(QueryPlanner.scala:67) scala.collection.Iterator$$anon$12.nextCur(Iterator.scala:435) scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:441) org.apache.spark.sql.catalyst.planning.QueryPlanner.plan(QueryPlanner.scala:93) org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2$$anonfun$apply$2.apply(QueryPlanner.scala:78) org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2$$anonfun$apply$2.apply(QueryPlanner.scala:75) scala.collection.TraversableOnce$$anonfun$foldLeft$1.apply(TraversableOnce.scala:157) scala.collection.TraversableOnce$$anonfun$foldLeft$1.apply(TraversableOnce.scala:157) scala.collection.Iterator$class.foreach(Iterator.scala:891) scala.collection.AbstractIterator.foreach(Iterator.scala:1334) scala.collection.TraversableOnce$class.foldLeft(TraversableOnce.scala:157) scala.collection.AbstractIterator.foldLeft(Iterator.scala:1334) org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2.apply(QueryPlanner.scala:75) org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2.apply(QueryPlanner.scala:67) scala.collection.Iterator$$anon$12.nextCur(Iterator.scala:435) scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:441) org.apache.spark.sql.catalyst.planning.QueryPlanner.plan(QueryPlanner.scala:93) org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2$$anonfun$apply$2.apply(QueryPlanner.scala:78) org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2$$anonfun$apply$2.apply(QueryPlanner.scala:75) scala.collection.TraversableOnce$$anonfun$foldLeft$1.apply(TraversableOnce.scala:157) scala.collection.TraversableOnce$$anonfun$foldLeft$1.apply(TraversableOnce.scala:157) scala.collection.Iterator$class.foreach(Iterator.scala:891) scala.collection.AbstractIterator.foreach(Iterator.scala:1334) scala.collection.TraversableOnce$class.foldLeft(TraversableOnce.scala:157) scala.collection.AbstractIterator.foldLeft(Iterator.scala:1334) org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2.apply(QueryPlanner.scala:75) org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2.apply(QueryPlanner.scala:67) scala.collection.Iterator$$anon$12.nextCur(Iterator.scala:435) scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:441) org.apache.spark.sql.catalyst.planning.QueryPlanner.plan(QueryPlanner.scala:93) org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2$$anonfun$apply$2.apply(QueryPlanner.scala:78) org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2$$anonfun$apply$2.apply(QueryPlanner.scala:75) scala.collection.TraversableOnce$$anonfun$foldLeft$1.apply(TraversableOnce.scala:157) scala.collection.TraversableOnce$$anonfun$foldLeft$1.apply(TraversableOnce.scala:157) scala.collection.Iterator$class.foreach(Iterator.scala:891) scala.collection.AbstractIterator.foreach(Iterator.scala:1334) scala.collection.TraversableOnce$class.foldLeft(TraversableOnce.scala:157) scala.collection.AbstractIterator.foldLeft(Iterator.scala:1334) org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2.apply(QueryPlanner.scala:75) org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2.apply(QueryPlanner.scala:67) scala.collection.Iterator$$anon$12.nextCur(Iterator.scala:435) scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:441) org.apache.spark.sql.catalyst.planning.QueryPlanner.plan(QueryPlanner.scala:93) org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2$$anonfun$apply$2.apply(QueryPlanner.scala:78) org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2$$anonfun$apply$2.apply(QueryPlanner.scala:75) scala.collection.TraversableOnce$$anonfun$foldLeft$1.apply(TraversableOnce.scala:157) scala.collection.TraversableOnce$$anonfun$foldLeft$1.apply(TraversableOnce.scala:157) scala.collection.Iterator$class.foreach(Iterator.scala:891) scala.collection.AbstractIterator.foreach(Iterator.scala:1334) scala.collection.TraversableOnce$class.foldLeft(TraversableOnce.scala:157) scala.collection.AbstractIterator.foldLeft(Iterator.scala:1334) org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2.apply(QueryPlanner.scala:75) org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2.apply(QueryPlanner.scala:67) scala.collection.Iterator$$anon$12.nextCur(Iterator.scala:435) scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:441) org.apache.spark.sql.catalyst.planning.QueryPlanner.plan(QueryPlanner.scala:93) org.apache.spark.sql.execution.QueryExecution.sparkPlan$lzycompute(QueryExecution.scala:72) org.apache.spark.sql.execution.QueryExecution.sparkPlan(QueryExecution.scala:68) org.apache.spark.sql.execution.QueryExecution.executedPlan$lzycompute(QueryExecution.scala:77) org.apache.spark.sql.execution.QueryExecution.executedPlan(QueryExecution.scala:77) org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution$$anonfun$runContinuous$2.apply(ContinuousExecution.scala:204) org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution$$anonfun$runContinuous$2.apply(ContinuousExecution.scala:195) org.apache.spark.sql.execution.streaming.ProgressReporter$class.reportTimeTaken(ProgressReporter.scala:401) org.apache.spark.sql.execution.streaming.StreamExecution.reportTimeTaken(StreamExecution.scala:58) org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution.runContinuous(ContinuousExecution.scala:195) org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution.runActivatedStream(ContinuousExecution.scala:90) org.apache.spark.sql.execution.streaming.StreamExecution.org$apache$spark$sql$execution$streaming$StreamExecution$$runStream(StreamExecution.scala:279) org.apache.spark.sql.execution.streaming.StreamExecution$$anon$1.run(StreamExecution.scala:189) == Sink == 0: 1: [2] [4] [3] 2: == Plan == == Parsed Logical Plan == WriteToContinuousDataSource org.apache.spark.sql.execution.streaming.sources.MemoryStreamingWriteSupport@77eeaba +- SerializeFromObject [input[0, int, false] AS value#14750] +- MapElements <function1>, class scala.Tuple2, [StructField(_1,StringType,true), StructField(_2,StringType,true)], obj#14749: int +- DeserializeToObject newInstance(class scala.Tuple2), obj#14748: scala.Tuple2 +- Project [cast(key#14828 as string) AS key#14740, cast(value#14829 as string) AS value#14741] +- Streaming RelationV2 kafka[key#14828, value#14829, topic#14830, partition#14831, offset#14832L, timestamp#14833, timestampType#14834] (Options: [kafka.metadata.max.age.ms=1,failOnDataLoss=false,kafka.bootstrap.servers=127.0.0.1:35756,kafka.d...) == Analyzed Logical Plan == WriteToContinuousDataSource org.apache.spark.sql.execution.streaming.sources.MemoryStreamingWriteSupport@77eeaba +- SerializeFromObject [input[0, int, false] AS value#14750] +- MapElements <function1>, class scala.Tuple2, [StructField(_1,StringType,true), StructField(_2,StringType,true)], obj#14749: int +- DeserializeToObject newInstance(class scala.Tuple2), obj#14748: scala.Tuple2 +- Project [cast(key#14828 as string) AS key#14740, cast(value#14829 as string) AS value#14741] +- Streaming RelationV2 kafka[key#14828, value#14829, topic#14830, partition#14831, offset#14832L, timestamp#14833, timestampType#14834] (Options: [kafka.metadata.max.age.ms=1,failOnDataLoss=false,kafka.bootstrap.servers=127.0.0.1:35756,kafka.d...) == Optimized Logical Plan == WriteToContinuousDataSource org.apache.spark.sql.execution.streaming.sources.MemoryStreamingWriteSupport@77eeaba +- SerializeFromObject [input[0, int, false] AS value#14750] +- MapElements <function1>, class scala.Tuple2, [StructField(_1,StringType,true), StructField(_2,StringType,true)], obj#14749: int +- DeserializeToObject newInstance(class scala.Tuple2), obj#14748: scala.Tuple2 +- Project [cast(key#14828 as string) AS key#14740, cast(value#14829 as string) AS value#14741] +- Streaming RelationV2 kafka[key#14828, value#14829, topic#14830, partition#14831, offset#14832L, timestamp#14833, timestampType#14834] (Options: [kafka.metadata.max.age.ms=1,failOnDataLoss=false,kafka.bootstrap.servers=127.0.0.1:35756,kafka.d...) == Physical Plan == WriteToContinuousDataSource org.apache.spark.sql.execution.streaming.sources.MemoryStreamingWriteSupport@77eeaba +- *(1) SerializeFromObject [input[0, int, false] AS value#14750] +- *(1) MapElements <function1>, obj#14749: int +- *(1) DeserializeToObject newInstance(class scala.Tuple2), obj#14748: scala.Tuple2 +- *(1) Project [cast(key#14828 as string) AS key#14740, cast(value#14829 as string) AS value#14741] +- *(1) Project [key#14828, value#14829, topic#14830, partition#14831, offset#14832L, timestamp#14833, timestampType#14834] +- *(1) ScanV2 kafka[key#14828, value#14829, topic#14830, partition#14831, offset#14832L, timestamp#14833, timestampType#14834] (Options: [kafka.metadata.max.age.ms=1,failOnDataLoss=false,kafka.bootstrap.servers=127.0.0.1:35756,kafka.d...)
sbt.ForkMain$ForkError: org.scalatest.exceptions.TestFailedException:
Error adding data: Could not find index of the source to which data was added
org.apache.spark.sql.streaming.StreamTest$$anonfun$14.apply(StreamTest.scala:708)
org.apache.spark.sql.streaming.StreamTest$$anonfun$14.apply(StreamTest.scala:708)
scala.Option.getOrElse(Option.scala:121)
org.apache.spark.sql.streaming.StreamTest$class.executeAction$1(StreamTest.scala:707)
org.apache.spark.sql.streaming.StreamTest$$anonfun$liftedTree1$1$1.apply(StreamTest.scala:770)
org.apache.spark.sql.streaming.StreamTest$$anonfun$liftedTree1$1$1.apply(StreamTest.scala:757)
scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
org.apache.spark.sql.streaming.StreamTest$class.liftedTree1$1(StreamTest.scala:757)
org.apache.spark.sql.streaming.StreamTest$class.testStream(StreamTest.scala:756)
== Progress ==
AssertOnQuery(<condition>, )
AddKafkaData(topics = Set(topic-0-seems), data = WrappedArray(1, 2, 3), message = )
CheckAnswer: [2],[3],[4]
AssertOnQuery(<condition>, name)
=> AddKafkaData(topics = Set(topic-0-bad), data = WrappedArray(4, 5, 6), message = )
CheckAnswer: [2],[3],[4],[5],[6],[7]
== Stream ==
Output Mode: Append
Stream state: {KafkaSource[SubscribePattern[topic-0-.*]]: {"topic-0-seems":{"2":0,"4":0,"1":1,"3":2,"0":1}}}
Thread state: alive
Thread stack trace: java.lang.Class.forName0(Native Method)
java.lang.Class.forName(Class.java:348)
org.apache.kafka.common.config.ConfigDef.parseType(ConfigDef.java:715)
org.apache.kafka.common.config.ConfigDef.parseValue(ConfigDef.java:469)
org.apache.kafka.common.config.ConfigDef.parse(ConfigDef.java:462)
org.apache.kafka.common.config.AbstractConfig.<init>(AbstractConfig.java:62)
org.apache.kafka.clients.consumer.ConsumerConfig.<init>(ConsumerConfig.java:503)
org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:687)
org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:615)
org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:596)
org.apache.spark.sql.kafka010.SubscribePatternStrategy.createConsumer(ConsumerStrategy.scala:76)
org.apache.spark.sql.kafka010.KafkaOffsetReader.consumer(KafkaOffsetReader.scala:85)
org.apache.spark.sql.kafka010.KafkaOffsetReader$$anonfun$fetchEarliestOffsets$1$$anonfun$apply$6.apply(KafkaOffsetReader.scala:180)
org.apache.spark.sql.kafka010.KafkaOffsetReader$$anonfun$fetchEarliestOffsets$1$$anonfun$apply$6.apply(KafkaOffsetReader.scala:178)
org.apache.spark.sql.kafka010.KafkaOffsetReader$$anonfun$org$apache$spark$sql$kafka010$KafkaOffsetReader$$withRetriesWithoutInterrupt$1.apply$mcV$sp(KafkaOffsetReader.scala:288)
org.apache.spark.sql.kafka010.KafkaOffsetReader$$anonfun$org$apache$spark$sql$kafka010$KafkaOffsetReader$$withRetriesWithoutInterrupt$1.apply(KafkaOffsetReader.scala:287)
org.apache.spark.sql.kafka010.KafkaOffsetReader$$anonfun$org$apache$spark$sql$kafka010$KafkaOffsetReader$$withRetriesWithoutInterrupt$1.apply(KafkaOffsetReader.scala:287)
org.apache.spark.util.UninterruptibleThread.runUninterruptibly(UninterruptibleThread.scala:77)
org.apache.spark.sql.kafka010.KafkaOffsetReader.org$apache$spark$sql$kafka010$KafkaOffsetReader$$withRetriesWithoutInterrupt(KafkaOffsetReader.scala:286)
org.apache.spark.sql.kafka010.KafkaOffsetReader$$anonfun$fetchEarliestOffsets$1.apply(KafkaOffsetReader.scala:178)
org.apache.spark.sql.kafka010.KafkaOffsetReader$$anonfun$fetchEarliestOffsets$1.apply(KafkaOffsetReader.scala:178)
org.apache.spark.sql.kafka010.KafkaOffsetReader.runUninterruptibly(KafkaOffsetReader.scala:255)
org.apache.spark.sql.kafka010.KafkaOffsetReader.fetchEarliestOffsets(KafkaOffsetReader.scala:177)
org.apache.spark.sql.kafka010.KafkaContinuousScanConfigBuilder.build(KafkaContinuousReadSupport.scala:164)
org.apache.spark.sql.execution.datasources.v2.DataSourceV2Strategy$.apply(DataSourceV2Strategy.scala:135)
org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$1.apply(QueryPlanner.scala:63)
org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$1.apply(QueryPlanner.scala:63)
scala.collection.Iterator$$anon$12.nextCur(Iterator.scala:435)
scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:441)
scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:440)
org.apache.spark.sql.catalyst.planning.QueryPlanner.plan(QueryPlanner.scala:93)
org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2$$anonfun$apply$2.apply(QueryPlanner.scala:78)
org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2$$anonfun$apply$2.apply(QueryPlanner.scala:75)
scala.collection.TraversableOnce$$anonfun$foldLeft$1.apply(TraversableOnce.scala:157)
scala.collection.TraversableOnce$$anonfun$foldLeft$1.apply(TraversableOnce.scala:157)
scala.collection.Iterator$class.foreach(Iterator.scala:891)
scala.collection.AbstractIterator.foreach(Iterator.scala:1334)
scala.collection.TraversableOnce$class.foldLeft(TraversableOnce.scala:157)
scala.collection.AbstractIterator.foldLeft(Iterator.scala:1334)
org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2.apply(QueryPlanner.scala:75)
org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2.apply(QueryPlanner.scala:67)
scala.collection.Iterator$$anon$12.nextCur(Iterator.scala:435)
scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:441)
org.apache.spark.sql.catalyst.planning.QueryPlanner.plan(QueryPlanner.scala:93)
org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2$$anonfun$apply$2.apply(QueryPlanner.scala:78)
org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2$$anonfun$apply$2.apply(QueryPlanner.scala:75)
scala.collection.TraversableOnce$$anonfun$foldLeft$1.apply(TraversableOnce.scala:157)
scala.collection.TraversableOnce$$anonfun$foldLeft$1.apply(TraversableOnce.scala:157)
scala.collection.Iterator$class.foreach(Iterator.scala:891)
scala.collection.AbstractIterator.foreach(Iterator.scala:1334)
scala.collection.TraversableOnce$class.foldLeft(TraversableOnce.scala:157)
scala.collection.AbstractIterator.foldLeft(Iterator.scala:1334)
org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2.apply(QueryPlanner.scala:75)
org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2.apply(QueryPlanner.scala:67)
scala.collection.Iterator$$anon$12.nextCur(Iterator.scala:435)
scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:441)
org.apache.spark.sql.catalyst.planning.QueryPlanner.plan(QueryPlanner.scala:93)
org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2$$anonfun$apply$2.apply(QueryPlanner.scala:78)
org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2$$anonfun$apply$2.apply(QueryPlanner.scala:75)
scala.collection.TraversableOnce$$anonfun$foldLeft$1.apply(TraversableOnce.scala:157)
scala.collection.TraversableOnce$$anonfun$foldLeft$1.apply(TraversableOnce.scala:157)
scala.collection.Iterator$class.foreach(Iterator.scala:891)
scala.collection.AbstractIterator.foreach(Iterator.scala:1334)
scala.collection.TraversableOnce$class.foldLeft(TraversableOnce.scala:157)
scala.collection.AbstractIterator.foldLeft(Iterator.scala:1334)
org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2.apply(QueryPlanner.scala:75)
org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2.apply(QueryPlanner.scala:67)
scala.collection.Iterator$$anon$12.nextCur(Iterator.scala:435)
scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:441)
org.apache.spark.sql.catalyst.planning.QueryPlanner.plan(QueryPlanner.scala:93)
org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2$$anonfun$apply$2.apply(QueryPlanner.scala:78)
org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2$$anonfun$apply$2.apply(QueryPlanner.scala:75)
scala.collection.TraversableOnce$$anonfun$foldLeft$1.apply(TraversableOnce.scala:157)
scala.collection.TraversableOnce$$anonfun$foldLeft$1.apply(TraversableOnce.scala:157)
scala.collection.Iterator$class.foreach(Iterator.scala:891)
scala.collection.AbstractIterator.foreach(Iterator.scala:1334)
scala.collection.TraversableOnce$class.foldLeft(TraversableOnce.scala:157)
scala.collection.AbstractIterator.foldLeft(Iterator.scala:1334)
org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2.apply(QueryPlanner.scala:75)
org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2.apply(QueryPlanner.scala:67)
scala.collection.Iterator$$anon$12.nextCur(Iterator.scala:435)
scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:441)
org.apache.spark.sql.catalyst.planning.QueryPlanner.plan(QueryPlanner.scala:93)
org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2$$anonfun$apply$2.apply(QueryPlanner.scala:78)
org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2$$anonfun$apply$2.apply(QueryPlanner.scala:75)
scala.collection.TraversableOnce$$anonfun$foldLeft$1.apply(TraversableOnce.scala:157)
scala.collection.TraversableOnce$$anonfun$foldLeft$1.apply(TraversableOnce.scala:157)
scala.collection.Iterator$class.foreach(Iterator.scala:891)
scala.collection.AbstractIterator.foreach(Iterator.scala:1334)
scala.collection.TraversableOnce$class.foldLeft(TraversableOnce.scala:157)
scala.collection.AbstractIterator.foldLeft(Iterator.scala:1334)
org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2.apply(QueryPlanner.scala:75)
org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2.apply(QueryPlanner.scala:67)
scala.collection.Iterator$$anon$12.nextCur(Iterator.scala:435)
scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:441)
org.apache.spark.sql.catalyst.planning.QueryPlanner.plan(QueryPlanner.scala:93)
org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2$$anonfun$apply$2.apply(QueryPlanner.scala:78)
org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2$$anonfun$apply$2.apply(QueryPlanner.scala:75)
scala.collection.TraversableOnce$$anonfun$foldLeft$1.apply(TraversableOnce.scala:157)
scala.collection.TraversableOnce$$anonfun$foldLeft$1.apply(TraversableOnce.scala:157)
scala.collection.Iterator$class.foreach(Iterator.scala:891)
scala.collection.AbstractIterator.foreach(Iterator.scala:1334)
scala.collection.TraversableOnce$class.foldLeft(TraversableOnce.scala:157)
scala.collection.AbstractIterator.foldLeft(Iterator.scala:1334)
org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2.apply(QueryPlanner.scala:75)
org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2.apply(QueryPlanner.scala:67)
scala.collection.Iterator$$anon$12.nextCur(Iterator.scala:435)
scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:441)
org.apache.spark.sql.catalyst.planning.QueryPlanner.plan(QueryPlanner.scala:93)
org.apache.spark.sql.execution.QueryExecution.sparkPlan$lzycompute(QueryExecution.scala:72)
org.apache.spark.sql.execution.QueryExecution.sparkPlan(QueryExecution.scala:68)
org.apache.spark.sql.execution.QueryExecution.executedPlan$lzycompute(QueryExecution.scala:77)
org.apache.spark.sql.execution.QueryExecution.executedPlan(QueryExecution.scala:77)
org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution$$anonfun$runContinuous$2.apply(ContinuousExecution.scala:204)
org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution$$anonfun$runContinuous$2.apply(ContinuousExecution.scala:195)
org.apache.spark.sql.execution.streaming.ProgressReporter$class.reportTimeTaken(ProgressReporter.scala:401)
org.apache.spark.sql.execution.streaming.StreamExecution.reportTimeTaken(StreamExecution.scala:58)
org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution.runContinuous(ContinuousExecution.scala:195)
org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution.runActivatedStream(ContinuousExecution.scala:90)
org.apache.spark.sql.execution.streaming.StreamExecution.org$apache$spark$sql$execution$streaming$StreamExecution$$runStream(StreamExecution.scala:279)
org.apache.spark.sql.execution.streaming.StreamExecution$$anon$1.run(StreamExecution.scala:189)
== Sink ==
0:
1: [2] [4] [3]
2:
== Plan ==
== Parsed Logical Plan ==
WriteToContinuousDataSource org.apache.spark.sql.execution.streaming.sources.MemoryStreamingWriteSupport@77eeaba
+- SerializeFromObject [input[0, int, false] AS value#14750]
+- MapElements <function1>, class scala.Tuple2, [StructField(_1,StringType,true), StructField(_2,StringType,true)], obj#14749: int
+- DeserializeToObject newInstance(class scala.Tuple2), obj#14748: scala.Tuple2
+- Project [cast(key#14828 as string) AS key#14740, cast(value#14829 as string) AS value#14741]
+- Streaming RelationV2 kafka[key#14828, value#14829, topic#14830, partition#14831, offset#14832L, timestamp#14833, timestampType#14834] (Options: [kafka.metadata.max.age.ms=1,failOnDataLoss=false,kafka.bootstrap.servers=127.0.0.1:35756,kafka.d...)
== Analyzed Logical Plan ==
WriteToContinuousDataSource org.apache.spark.sql.execution.streaming.sources.MemoryStreamingWriteSupport@77eeaba
+- SerializeFromObject [input[0, int, false] AS value#14750]
+- MapElements <function1>, class scala.Tuple2, [StructField(_1,StringType,true), StructField(_2,StringType,true)], obj#14749: int
+- DeserializeToObject newInstance(class scala.Tuple2), obj#14748: scala.Tuple2
+- Project [cast(key#14828 as string) AS key#14740, cast(value#14829 as string) AS value#14741]
+- Streaming RelationV2 kafka[key#14828, value#14829, topic#14830, partition#14831, offset#14832L, timestamp#14833, timestampType#14834] (Options: [kafka.metadata.max.age.ms=1,failOnDataLoss=false,kafka.bootstrap.servers=127.0.0.1:35756,kafka.d...)
== Optimized Logical Plan ==
WriteToContinuousDataSource org.apache.spark.sql.execution.streaming.sources.MemoryStreamingWriteSupport@77eeaba
+- SerializeFromObject [input[0, int, false] AS value#14750]
+- MapElements <function1>, class scala.Tuple2, [StructField(_1,StringType,true), StructField(_2,StringType,true)], obj#14749: int
+- DeserializeToObject newInstance(class scala.Tuple2), obj#14748: scala.Tuple2
+- Project [cast(key#14828 as string) AS key#14740, cast(value#14829 as string) AS value#14741]
+- Streaming RelationV2 kafka[key#14828, value#14829, topic#14830, partition#14831, offset#14832L, timestamp#14833, timestampType#14834] (Options: [kafka.metadata.max.age.ms=1,failOnDataLoss=false,kafka.bootstrap.servers=127.0.0.1:35756,kafka.d...)
== Physical Plan ==
WriteToContinuousDataSource org.apache.spark.sql.execution.streaming.sources.MemoryStreamingWriteSupport@77eeaba
+- *(1) SerializeFromObject [input[0, int, false] AS value#14750]
+- *(1) MapElements <function1>, obj#14749: int
+- *(1) DeserializeToObject newInstance(class scala.Tuple2), obj#14748: scala.Tuple2
+- *(1) Project [cast(key#14828 as string) AS key#14740, cast(value#14829 as string) AS value#14741]
+- *(1) Project [key#14828, value#14829, topic#14830, partition#14831, offset#14832L, timestamp#14833, timestampType#14834]
+- *(1) ScanV2 kafka[key#14828, value#14829, topic#14830, partition#14831, offset#14832L, timestamp#14833, timestampType#14834] (Options: [kafka.metadata.max.age.ms=1,failOnDataLoss=false,kafka.bootstrap.servers=127.0.0.1:35756,kafka.d...)
at org.scalatest.Assertions$class.newAssertionFailedException(Assertions.scala:528)
at org.scalatest.FunSuite.newAssertionFailedException(FunSuite.scala:1560)
at org.scalatest.Assertions$class.fail(Assertions.scala:1089)
at org.scalatest.FunSuite.fail(FunSuite.scala:1560)
at org.apache.spark.sql.streaming.StreamTest$class.failTest$1(StreamTest.scala:450)
at org.apache.spark.sql.streaming.StreamTest$class.executeAction$1(StreamTest.scala:716)
at org.apache.spark.sql.streaming.StreamTest$$anonfun$liftedTree1$1$1.apply(StreamTest.scala:770)
at org.apache.spark.sql.streaming.StreamTest$$anonfun$liftedTree1$1$1.apply(StreamTest.scala:757)
at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
at org.apache.spark.sql.streaming.StreamTest$class.liftedTree1$1(StreamTest.scala:757)
at org.apache.spark.sql.streaming.StreamTest$class.testStream(StreamTest.scala:756)
at org.apache.spark.sql.kafka010.KafkaSourceTest.testStream(KafkaMicroBatchSourceSuite.scala:49)
at org.apache.spark.sql.kafka010.KafkaContinuousSourceTopicDeletionSuite$$anonfun$1.apply$mcV$sp(KafkaContinuousSourceSuite.scala:54)
at org.apache.spark.sql.kafka010.KafkaContinuousSourceTopicDeletionSuite$$anonfun$1.apply(KafkaContinuousSourceSuite.scala:32)
at org.apache.spark.sql.kafka010.KafkaContinuousSourceTopicDeletionSuite$$anonfun$1.apply(KafkaContinuousSourceSuite.scala:32)
at org.scalatest.OutcomeOf$class.outcomeOf(OutcomeOf.scala:85)
at org.scalatest.OutcomeOf$.outcomeOf(OutcomeOf.scala:104)
at org.scalatest.Transformer.apply(Transformer.scala:22)
at org.scalatest.Transformer.apply(Transformer.scala:20)
at org.scalatest.FunSuiteLike$$anon$1.apply(FunSuiteLike.scala:186)
at org.apache.spark.SparkFunSuite.withFixture(SparkFunSuite.scala:103)
at org.scalatest.FunSuiteLike$class.invokeWithFixture$1(FunSuiteLike.scala:183)
at org.scalatest.FunSuiteLike$$anonfun$runTest$1.apply(FunSuiteLike.scala:196)
at org.scalatest.FunSuiteLike$$anonfun$runTest$1.apply(FunSuiteLike.scala:196)
at org.scalatest.SuperEngine.runTestImpl(Engine.scala:289)
at org.scalatest.FunSuiteLike$class.runTest(FunSuiteLike.scala:196)
at org.apache.spark.sql.kafka010.KafkaSourceTest.org$scalatest$BeforeAndAfterEach$$super$runTest(KafkaMicroBatchSourceSuite.scala:49)
at org.scalatest.BeforeAndAfterEach$class.runTest(BeforeAndAfterEach.scala:221)
at org.apache.spark.sql.kafka010.KafkaSourceTest.runTest(KafkaMicroBatchSourceSuite.scala:49)
at org.scalatest.FunSuiteLike$$anonfun$runTests$1.apply(FunSuiteLike.scala:229)
at org.scalatest.FunSuiteLike$$anonfun$runTests$1.apply(FunSuiteLike.scala:229)
at org.scalatest.SuperEngine$$anonfun$traverseSubNodes$1$1.apply(Engine.scala:396)
at org.scalatest.SuperEngine$$anonfun$traverseSubNodes$1$1.apply(Engine.scala:384)
at scala.collection.immutable.List.foreach(List.scala:392)
at org.scalatest.SuperEngine.traverseSubNodes$1(Engine.scala:384)
at org.scalatest.SuperEngine.org$scalatest$SuperEngine$$runTestsInBranch(Engine.scala:379)
at org.scalatest.SuperEngine.runTestsImpl(Engine.scala:461)
at org.scalatest.FunSuiteLike$class.runTests(FunSuiteLike.scala:229)
at org.scalatest.FunSuite.runTests(FunSuite.scala:1560)
at org.scalatest.Suite$class.run(Suite.scala:1147)
at org.scalatest.FunSuite.org$scalatest$FunSuiteLike$$super$run(FunSuite.scala:1560)
at org.scalatest.FunSuiteLike$$anonfun$run$1.apply(FunSuiteLike.scala:233)
at org.scalatest.FunSuiteLike$$anonfun$run$1.apply(FunSuiteLike.scala:233)
at org.scalatest.SuperEngine.runImpl(Engine.scala:521)
at org.scalatest.FunSuiteLike$class.run(FunSuiteLike.scala:233)
at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterAll$$super$run(SparkFunSuite.scala:52)
at org.scalatest.BeforeAndAfterAll$class.liftedTree1$1(BeforeAndAfterAll.scala:213)
at org.scalatest.BeforeAndAfterAll$class.run(BeforeAndAfterAll.scala:210)
at org.apache.spark.SparkFunSuite.run(SparkFunSuite.scala:52)
at org.scalatest.tools.Framework.org$scalatest$tools$Framework$$runSuite(Framework.scala:314)
at org.scalatest.tools.Framework$ScalaTestTask.execute(Framework.scala:480)
at sbt.ForkMain$Run$2.call(ForkMain.java:296)
at sbt.ForkMain$Run$2.call(ForkMain.java:286)
at java.util.concurrent.FutureTask.run(FutureTask.java:266)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)