@Internal public class KafkaDynamicSource extends Object implements org.apache.flink.table.connector.source.ScanTableSource, org.apache.flink.table.connector.source.abilities.SupportsReadingMetadata, org.apache.flink.table.connector.source.abilities.SupportsWatermarkPushDown
ScanTableSource.| 限定符和类型 | 字段和说明 |
|---|---|
protected BoundedMode |
boundedMode
The bounded mode for the contained consumer (default is an unbounded data stream).
|
protected long |
boundedTimestampMillis
The bounded timestamp to locate partition offsets; only relevant when bounded mode is
BoundedMode.TIMESTAMP. |
protected org.apache.flink.table.connector.format.DecodingFormat<org.apache.flink.api.common.serialization.DeserializationSchema<org.apache.flink.table.data.RowData>> |
keyDecodingFormat
Optional format for decoding keys from Kafka.
|
protected String |
keyPrefix
Prefix that needs to be removed from fields when constructing the physical data type.
|
protected int[] |
keyProjection
Indices that determine the key fields and the target position in the produced row.
|
protected List<String> |
metadataKeys
Metadata that is appended at the end of a physical source row.
|
protected org.apache.flink.table.types.DataType |
physicalDataType
Data type to configure the formats.
|
protected org.apache.flink.table.types.DataType |
producedDataType
Data type that describes the final output of the source.
|
protected Properties |
properties
Properties for the Kafka consumer.
|
protected Map<KafkaTopicPartition,Long> |
specificBoundedOffsets
Specific end offsets; only relevant when bounded mode is
BoundedMode.SPECIFIC_OFFSETS. |
protected Map<KafkaTopicPartition,Long> |
specificStartupOffsets
Specific startup offsets; only relevant when startup mode is
StartupMode.SPECIFIC_OFFSETS. |
protected StartupMode |
startupMode
The startup mode for the contained consumer (default is
StartupMode.GROUP_OFFSETS). |
protected long |
startupTimestampMillis
The start timestamp to locate partition offsets; only relevant when startup mode is
StartupMode.TIMESTAMP. |
protected String |
tableIdentifier |
protected Pattern |
topicPattern
The Kafka topic pattern to consume.
|
protected List<String> |
topics
The Kafka topics to consume.
|
protected boolean |
upsertMode
Flag to determine source mode.
|
protected org.apache.flink.table.connector.format.DecodingFormat<org.apache.flink.api.common.serialization.DeserializationSchema<org.apache.flink.table.data.RowData>> |
valueDecodingFormat
Format for decoding values from Kafka.
|
protected int[] |
valueProjection
Indices that determine the value fields and the target position in the produced row.
|
protected org.apache.flink.api.common.eventtime.WatermarkStrategy<org.apache.flink.table.data.RowData> |
watermarkStrategy
Watermark strategy that is used to generate per-partition watermark.
|
| 构造器和说明 |
|---|
KafkaDynamicSource(org.apache.flink.table.types.DataType physicalDataType,
org.apache.flink.table.connector.format.DecodingFormat<org.apache.flink.api.common.serialization.DeserializationSchema<org.apache.flink.table.data.RowData>> keyDecodingFormat,
org.apache.flink.table.connector.format.DecodingFormat<org.apache.flink.api.common.serialization.DeserializationSchema<org.apache.flink.table.data.RowData>> valueDecodingFormat,
int[] keyProjection,
int[] valueProjection,
String keyPrefix,
List<String> topics,
Pattern topicPattern,
Properties properties,
StartupMode startupMode,
Map<KafkaTopicPartition,Long> specificStartupOffsets,
long startupTimestampMillis,
BoundedMode boundedMode,
Map<KafkaTopicPartition,Long> specificBoundedOffsets,
long boundedTimestampMillis,
boolean upsertMode,
String tableIdentifier) |
| 限定符和类型 | 方法和说明 |
|---|---|
void |
applyReadableMetadata(List<String> metadataKeys,
org.apache.flink.table.types.DataType producedDataType) |
void |
applyWatermark(org.apache.flink.api.common.eventtime.WatermarkStrategy<org.apache.flink.table.data.RowData> watermarkStrategy) |
String |
asSummaryString() |
org.apache.flink.table.connector.source.DynamicTableSource |
copy() |
protected KafkaSource<org.apache.flink.table.data.RowData> |
createKafkaSource(org.apache.flink.api.common.serialization.DeserializationSchema<org.apache.flink.table.data.RowData> keyDeserialization,
org.apache.flink.api.common.serialization.DeserializationSchema<org.apache.flink.table.data.RowData> valueDeserialization,
org.apache.flink.api.common.typeinfo.TypeInformation<org.apache.flink.table.data.RowData> producedTypeInfo) |
boolean |
equals(Object o) |
org.apache.flink.table.connector.ChangelogMode |
getChangelogMode() |
org.apache.flink.table.connector.source.ScanTableSource.ScanRuntimeProvider |
getScanRuntimeProvider(org.apache.flink.table.connector.source.ScanTableSource.ScanContext context) |
int |
hashCode() |
Map<String,org.apache.flink.table.types.DataType> |
listReadableMetadata() |
boolean |
supportsMetadataProjection() |
protected org.apache.flink.table.types.DataType producedDataType
protected List<String> metadataKeys
@Nullable protected org.apache.flink.api.common.eventtime.WatermarkStrategy<org.apache.flink.table.data.RowData> watermarkStrategy
protected final org.apache.flink.table.types.DataType physicalDataType
@Nullable protected final org.apache.flink.table.connector.format.DecodingFormat<org.apache.flink.api.common.serialization.DeserializationSchema<org.apache.flink.table.data.RowData>> keyDecodingFormat
protected final org.apache.flink.table.connector.format.DecodingFormat<org.apache.flink.api.common.serialization.DeserializationSchema<org.apache.flink.table.data.RowData>> valueDecodingFormat
protected final int[] keyProjection
protected final int[] valueProjection
@Nullable protected final String keyPrefix
protected final Pattern topicPattern
protected final Properties properties
protected final StartupMode startupMode
StartupMode.GROUP_OFFSETS).protected final Map<KafkaTopicPartition,Long> specificStartupOffsets
StartupMode.SPECIFIC_OFFSETS.protected final long startupTimestampMillis
StartupMode.TIMESTAMP.protected final BoundedMode boundedMode
protected final Map<KafkaTopicPartition,Long> specificBoundedOffsets
BoundedMode.SPECIFIC_OFFSETS.protected final long boundedTimestampMillis
BoundedMode.TIMESTAMP.protected final boolean upsertMode
protected final String tableIdentifier
public KafkaDynamicSource(org.apache.flink.table.types.DataType physicalDataType,
@Nullable
org.apache.flink.table.connector.format.DecodingFormat<org.apache.flink.api.common.serialization.DeserializationSchema<org.apache.flink.table.data.RowData>> keyDecodingFormat,
org.apache.flink.table.connector.format.DecodingFormat<org.apache.flink.api.common.serialization.DeserializationSchema<org.apache.flink.table.data.RowData>> valueDecodingFormat,
int[] keyProjection,
int[] valueProjection,
@Nullable
String keyPrefix,
@Nullable
List<String> topics,
@Nullable
Pattern topicPattern,
Properties properties,
StartupMode startupMode,
Map<KafkaTopicPartition,Long> specificStartupOffsets,
long startupTimestampMillis,
BoundedMode boundedMode,
Map<KafkaTopicPartition,Long> specificBoundedOffsets,
long boundedTimestampMillis,
boolean upsertMode,
String tableIdentifier)
public org.apache.flink.table.connector.ChangelogMode getChangelogMode()
getChangelogMode 在接口中 org.apache.flink.table.connector.source.ScanTableSourcepublic org.apache.flink.table.connector.source.ScanTableSource.ScanRuntimeProvider getScanRuntimeProvider(org.apache.flink.table.connector.source.ScanTableSource.ScanContext context)
getScanRuntimeProvider 在接口中 org.apache.flink.table.connector.source.ScanTableSourcepublic Map<String,org.apache.flink.table.types.DataType> listReadableMetadata()
listReadableMetadata 在接口中 org.apache.flink.table.connector.source.abilities.SupportsReadingMetadatapublic void applyReadableMetadata(List<String> metadataKeys, org.apache.flink.table.types.DataType producedDataType)
applyReadableMetadata 在接口中 org.apache.flink.table.connector.source.abilities.SupportsReadingMetadatapublic boolean supportsMetadataProjection()
supportsMetadataProjection 在接口中 org.apache.flink.table.connector.source.abilities.SupportsReadingMetadatapublic void applyWatermark(org.apache.flink.api.common.eventtime.WatermarkStrategy<org.apache.flink.table.data.RowData> watermarkStrategy)
applyWatermark 在接口中 org.apache.flink.table.connector.source.abilities.SupportsWatermarkPushDownpublic org.apache.flink.table.connector.source.DynamicTableSource copy()
copy 在接口中 org.apache.flink.table.connector.source.DynamicTableSourcepublic String asSummaryString()
asSummaryString 在接口中 org.apache.flink.table.connector.source.DynamicTableSourceprotected KafkaSource<org.apache.flink.table.data.RowData> createKafkaSource(org.apache.flink.api.common.serialization.DeserializationSchema<org.apache.flink.table.data.RowData> keyDeserialization, org.apache.flink.api.common.serialization.DeserializationSchema<org.apache.flink.table.data.RowData> valueDeserialization, org.apache.flink.api.common.typeinfo.TypeInformation<org.apache.flink.table.data.RowData> producedTypeInfo)
Copyright © 2014–2023 The Apache Software Foundation. All rights reserved.