@Internal public class KafkaSourceReader<T> extends org.apache.flink.connector.base.source.reader.SingleThreadMultiplexSourceReaderBase<org.apache.kafka.clients.consumer.ConsumerRecord<byte[],byte[]>,T,KafkaPartitionSplit,KafkaPartitionSplitState>
| 构造器和说明 |
|---|
KafkaSourceReader(org.apache.flink.connector.base.source.reader.synchronization.FutureCompletingBlockingQueue<org.apache.flink.connector.base.source.reader.RecordsWithSplitIds<org.apache.kafka.clients.consumer.ConsumerRecord<byte[],byte[]>>> elementsQueue,
KafkaSourceFetcherManager kafkaSourceFetcherManager,
org.apache.flink.connector.base.source.reader.RecordEmitter<org.apache.kafka.clients.consumer.ConsumerRecord<byte[],byte[]>,T,KafkaPartitionSplitState> recordEmitter,
org.apache.flink.configuration.Configuration config,
org.apache.flink.api.connector.source.SourceReaderContext context,
KafkaSourceReaderMetrics kafkaSourceReaderMetrics) |
| 限定符和类型 | 方法和说明 |
|---|---|
protected KafkaPartitionSplitState |
initializedState(KafkaPartitionSplit split) |
void |
notifyCheckpointComplete(long checkpointId) |
protected void |
onSplitFinished(Map<String,KafkaPartitionSplitState> finishedSplitIds) |
void |
pauseOrResumeSplits(Collection<String> splitsToPause,
Collection<String> splitsToResume) |
List<KafkaPartitionSplit> |
snapshotState(long checkpointId) |
protected KafkaPartitionSplit |
toSplitType(String splitId,
KafkaPartitionSplitState splitState) |
addSplits, close, getNumberOfCurrentlyAssignedSplits, handleSourceEvents, isAvailable, notifyNoMoreSplits, pollNext, startpublic KafkaSourceReader(org.apache.flink.connector.base.source.reader.synchronization.FutureCompletingBlockingQueue<org.apache.flink.connector.base.source.reader.RecordsWithSplitIds<org.apache.kafka.clients.consumer.ConsumerRecord<byte[],byte[]>>> elementsQueue,
KafkaSourceFetcherManager kafkaSourceFetcherManager,
org.apache.flink.connector.base.source.reader.RecordEmitter<org.apache.kafka.clients.consumer.ConsumerRecord<byte[],byte[]>,T,KafkaPartitionSplitState> recordEmitter,
org.apache.flink.configuration.Configuration config,
org.apache.flink.api.connector.source.SourceReaderContext context,
KafkaSourceReaderMetrics kafkaSourceReaderMetrics)
protected void onSplitFinished(Map<String,KafkaPartitionSplitState> finishedSplitIds)
onSplitFinished 在类中 org.apache.flink.connector.base.source.reader.SourceReaderBase<org.apache.kafka.clients.consumer.ConsumerRecord<byte[],byte[]>,T,KafkaPartitionSplit,KafkaPartitionSplitState>public List<KafkaPartitionSplit> snapshotState(long checkpointId)
snapshotState 在接口中 org.apache.flink.api.connector.source.SourceReader<T,KafkaPartitionSplit>snapshotState 在类中 org.apache.flink.connector.base.source.reader.SourceReaderBase<org.apache.kafka.clients.consumer.ConsumerRecord<byte[],byte[]>,T,KafkaPartitionSplit,KafkaPartitionSplitState>public void notifyCheckpointComplete(long checkpointId)
throws Exception
Exceptionprotected KafkaPartitionSplitState initializedState(KafkaPartitionSplit split)
initializedState 在类中 org.apache.flink.connector.base.source.reader.SourceReaderBase<org.apache.kafka.clients.consumer.ConsumerRecord<byte[],byte[]>,T,KafkaPartitionSplit,KafkaPartitionSplitState>protected KafkaPartitionSplit toSplitType(String splitId, KafkaPartitionSplitState splitState)
toSplitType 在类中 org.apache.flink.connector.base.source.reader.SourceReaderBase<org.apache.kafka.clients.consumer.ConsumerRecord<byte[],byte[]>,T,KafkaPartitionSplit,KafkaPartitionSplitState>public void pauseOrResumeSplits(Collection<String> splitsToPause, Collection<String> splitsToResume)
pauseOrResumeSplits 在接口中 org.apache.flink.api.connector.source.SourceReader<T,KafkaPartitionSplit>pauseOrResumeSplits 在类中 org.apache.flink.connector.base.source.reader.SourceReaderBase<org.apache.kafka.clients.consumer.ConsumerRecord<byte[],byte[]>,T,KafkaPartitionSplit,KafkaPartitionSplitState>Copyright © 2014–2023 The Apache Software Foundation. All rights reserved.