@@ -84,7 +84,7 @@ import org.apache.kafka.coordinator.group.streams.StreamsGroupHeartbeatResult
8484import org.apache.kafka.coordinator.share.{ShareCoordinator, ShareCoordinatorTestConfig}
8585import org.apache.kafka.coordinator.transaction.TransactionLogConfig
8686import org.apache.kafka.image.{MetadataDelta, MetadataImage, MetadataProvenance}
87- import org.apache.kafka.metadata.{ConfigRepository, KRaftMetadataCache, MetadataCache, MockConfigRepository}
87+ import org.apache.kafka.metadata.{ConfigRepository, KRaftMetadataCache, MetadataCache, MetadataCacheTest, MockConfigRepository}
8888import org.apache.kafka.network.Session
8989import org.apache.kafka.network.metrics.{RequestChannelMetrics, RequestMetrics}
9090import org.apache.kafka.raft.{KRaftConfigs, QuorumConfig}
@@ -4393,7 +4393,7 @@ class KafkaApisTest extends Logging {
43934393 .setName(plaintextListener.value)
43944394 )
43954395 MetadataCacheTest.updateCache(metadataCache,
4396- Seq (new RegisterBrokerRecord().setBrokerId(0).setRack("rack").setFenced(false).setEndPoints(endpoints))
4396+ util.List.of (new RegisterBrokerRecord().setBrokerId(0).setRack("rack").setFenced(false).setEndPoints(endpoints))
43974397 )
43984398
43994399 // 2. Set up authorizer
@@ -4433,7 +4433,7 @@ class KafkaApisTest extends Logging {
44334433 }
44344434
44354435 val partitionRecords = Seq(authorizedTopicId, unauthorizedTopicId).map(createDummyPartitionRecord)
4436- MetadataCacheTest.updateCache(metadataCache, partitionRecords)
4436+ MetadataCacheTest.updateCache(metadataCache, ( partitionRecords : Seq[ApiMessage]).asJava )
44374437
44384438 // 4. Send TopicMetadataReq using topicId
44394439 val metadataReqByTopicId = MetadataRequest.Builder.forTopicIds(util.Set.of(authorizedTopicId, unauthorizedTopicId)).build()
@@ -10101,7 +10101,7 @@ class KafkaApisTest extends Logging {
1010110101 )
1010210102
1010310103 MetadataCacheTest.updateCache(metadataCache,
10104- Seq (new RegisterBrokerRecord()
10104+ util.List.of (new RegisterBrokerRecord()
1010510105 .setBrokerId(brokerId)
1010610106 .setRack("rack")
1010710107 .setFenced(false)
@@ -10155,7 +10155,7 @@ class KafkaApisTest extends Logging {
1015510155 )
1015610156
1015710157 MetadataCacheTest.updateCache(metadataCache,
10158- Seq (new RegisterBrokerRecord().setBrokerId(0).setRack("rack").setFenced(false).setEndPoints(endpoints0),
10158+ util.List.of (new RegisterBrokerRecord().setBrokerId(0).setRack("rack").setFenced(false).setEndPoints(endpoints0),
1015910159 new RegisterBrokerRecord().setBrokerId(1).setRack("rack").setFenced(false).setEndPoints(endpoints1))
1016010160 )
1016110161
@@ -10391,12 +10391,12 @@ class KafkaApisTest extends Logging {
1039110391
1039210392 private def setupBasicMetadataCache(topic: String, numPartitions: Int, numBrokers: Int, topicId: Uuid): Unit = {
1039310393 val updateMetadata = createBasicMetadata(topic, numPartitions, 0, numBrokers, topicId)
10394- MetadataCacheTest.updateCache(metadataCache, updateMetadata)
10394+ MetadataCacheTest.updateCache(metadataCache, updateMetadata.asJava )
1039510395 }
1039610396
1039710397 private def addTopicToMetadataCache(topic: String, numPartitions: Int, numBrokers: Int = 1, topicId: Uuid = Uuid.ZERO_UUID): Unit = {
1039810398 val updateMetadata = createBasicMetadata(topic, numPartitions, 0, numBrokers, topicId)
10399- MetadataCacheTest.updateCache(metadataCache, updateMetadata)
10399+ MetadataCacheTest.updateCache(metadataCache, updateMetadata.asJava )
1040010400 }
1040110401
1040210402 private def createMetadataBroker(brokerId: Int,
0 commit comments