[common] Bump debezium version to 1.6.4.Final

pull/1089/head
Leonard Xu 3 years ago committed by Leonard Xu
parent 1b27ee6b29
commit 59b310a24c

@ -10,7 +10,7 @@ This README is meant as a brief walkthrough on the core features of CDC Connecto
| Connector | Database | Driver |
|-----------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------|
| [mongodb-cdc](docs/content/connectors/mongodb-cdc.md) | <li> [MongoDB](https://www.mongodb.com): 3.6, 4.x, 5.0 | MongoDB Driver: 4.3.1 |
| [mysql-cdc](docs/content/connectors/mysql-cdc.md) | <li> [MySQL](https://dev.mysql.com/doc): 5.6, 5.7, 8.0.x <li> [RDS MySQL](https://www.aliyun.com/product/rds/mysql): 5.6, 5.7, 8.0.x <li> [PolarDB MySQL](https://www.aliyun.com/product/polardb): 5.6, 5.7, 8.0.x <li> [Aurora MySQL](https://aws.amazon.com/cn/rds/aurora): 5.6, 5.7, 8.0.x <li> [MariaDB](https://mariadb.org): 10.x <li> [PolarDB X](https://github.com/ApsaraDB/galaxysql): 2.0.1 | JDBC Driver: 8.0.21 |
| [mysql-cdc](docs/content/connectors/mysql-cdc.md) | <li> [MySQL](https://dev.mysql.com/doc): 5.6, 5.7, 8.0.x <li> [RDS MySQL](https://www.aliyun.com/product/rds/mysql): 5.6, 5.7, 8.0.x <li> [PolarDB MySQL](https://www.aliyun.com/product/polardb): 5.6, 5.7, 8.0.x <li> [Aurora MySQL](https://aws.amazon.com/cn/rds/aurora): 5.6, 5.7, 8.0.x <li> [MariaDB](https://mariadb.org): 10.x <li> [PolarDB X](https://github.com/ApsaraDB/galaxysql): 2.0.1 | JDBC Driver: 8.0.27 |
| [oceanbase-cdc](/docs/content/connectors/oceanbase-cdc.md) | <li> [OceanBase CE](https://open.oceanbase.com): 3.1.x | JDBC Driver: 5.7.4x |
| [oracle-cdc](docs/content/connectors/oracle-cdc.md) | <li> [Oracle](https://www.oracle.com/index.html): 11, 12, 19 | Oracle Driver: 19.3.0.0 |
| [postgres-cdc](docs/content/connectors/postgres-cdc.md) | <li> [PostgreSQL](https://www.postgresql.org): 9.6, 10, 11, 12 | JDBC Driver: 42.2.12 |

@ -10,7 +10,7 @@ The CDC Connectors for Apache Flink<sup>®</sup> integrate Debezium as the engin
| Connector | Database | Driver |
|-----------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------|
| [mongodb-cdc](connectors/mongodb-cdc.md) | <li> [MongoDB](https://www.mongodb.com): 3.6, 4.x, 5.0 | MongoDB Driver: 4.3.1 |
| [mysql-cdc](connectors/mysql-cdc.md) | <li> [MySQL](https://dev.mysql.com/doc): 5.6, 5.7, 8.0.x <li> [RDS MySQL](https://www.aliyun.com/product/rds/mysql): 5.6, 5.7, 8.0.x <li> [PolarDB MySQL](https://www.aliyun.com/product/polardb): 5.6, 5.7, 8.0.x <li> [Aurora MySQL](https://aws.amazon.com/cn/rds/aurora): 5.6, 5.7, 8.0.x <li> [MariaDB](https://mariadb.org): 10.x <li> [PolarDB X](https://github.com/ApsaraDB/galaxysql): 2.0.1 | JDBC Driver: 8.0.21 |
| [mysql-cdc](connectors/mysql-cdc.md) | <li> [MySQL](https://dev.mysql.com/doc): 5.6, 5.7, 8.0.x <li> [RDS MySQL](https://www.aliyun.com/product/rds/mysql): 5.6, 5.7, 8.0.x <li> [PolarDB MySQL](https://www.aliyun.com/product/polardb): 5.6, 5.7, 8.0.x <li> [Aurora MySQL](https://aws.amazon.com/cn/rds/aurora): 5.6, 5.7, 8.0.x <li> [MariaDB](https://mariadb.org): 10.x <li> [PolarDB X](https://github.com/ApsaraDB/galaxysql): 2.0.1 | JDBC Driver: 8.0.27 |
| [oceanbase-cdc](connectors/oceanbase-cdc.md) | <li> [OceanBase CE](https://open.oceanbase.com): 3.1.x | JDBC Driver: 5.7.4x |
| [oracle-cdc](connectors/oracle-cdc.md) | <li> [Oracle](https://www.oracle.com/index.html): 11, 12, 19 | Oracle Driver: 19.3.0.0 |
| [postgres-cdc](connectors/postgres-cdc.md) | <li> [PostgreSQL](https://www.postgresql.org): 9.6, 10, 11, 12 | JDBC Driver: 42.2.12 |

@ -174,7 +174,7 @@ public class FlinkDatabaseSchemaHistory implements DatabaseHistory {
}
@Override
public boolean storeOnlyMonitoredTables() {
public boolean storeOnlyCapturedTables() {
return storeOnlyMonitoredTablesDdl;
}

@ -130,7 +130,7 @@ public class EmbeddedFlinkDatabaseHistory implements DatabaseHistory {
}
@Override
public boolean storeOnlyMonitoredTables() {
public boolean storeOnlyCapturedTables() {
return storeOnlyMonitoredTablesDdl;
}

@ -132,7 +132,7 @@ public class EventDispatcherImpl<T extends DataCollectionId> extends EventDispat
T dataCollectionId, SchemaChangeEventEmitter schemaChangeEventEmitter)
throws InterruptedException {
if (dataCollectionId != null && !filter.isIncluded(dataCollectionId)) {
if (historizedSchema == null || historizedSchema.storeOnlyMonitoredTables()) {
if (historizedSchema == null || historizedSchema.storeOnlyCapturedTables()) {
LOG.trace("Filtering schema change event for {}", dataCollectionId);
return;
}
@ -156,7 +156,7 @@ public class EventDispatcherImpl<T extends DataCollectionId> extends EventDispat
}
}
if (!anyNonfilteredEvent) {
if (historizedSchema == null || historizedSchema.storeOnlyMonitoredTables()) {
if (historizedSchema == null || historizedSchema.storeOnlyCapturedTables()) {
LOG.trace("Filtering schema change event for {}", dataCollectionIds);
return;
}

@ -32,7 +32,6 @@ import com.ververica.cdc.connectors.mysql.source.split.MySqlSplit;
import com.ververica.cdc.connectors.mysql.source.utils.ChunkUtils;
import com.ververica.cdc.connectors.mysql.source.utils.RecordUtils;
import io.debezium.connector.base.ChangeEventQueue;
import io.debezium.connector.mysql.MySqlOffsetContext;
import io.debezium.connector.mysql.MySqlStreamingChangeEventSourceMetrics;
import io.debezium.pipeline.DataChangeEvent;
import io.debezium.pipeline.source.spi.ChangeEventSource;
@ -94,25 +93,25 @@ public class BinlogSplitReader implements DebeziumReader<SourceRecord, MySqlSpli
this.capturedTableFilter =
statefulTaskContext.getConnectorConfig().getTableFilters().dataCollectionFilter();
this.queue = statefulTaskContext.getQueue();
final MySqlOffsetContext mySqlOffsetContext = statefulTaskContext.getOffsetContext();
this.binlogSplitReadTask =
new MySqlBinlogSplitReadTask(
statefulTaskContext.getConnectorConfig(),
mySqlOffsetContext,
statefulTaskContext.getConnection(),
statefulTaskContext.getDispatcher(),
statefulTaskContext.getSignalEventDispatcher(),
statefulTaskContext.getErrorHandler(),
StatefulTaskContext.getClock(),
statefulTaskContext.getTaskContext(),
(MySqlStreamingChangeEventSourceMetrics)
statefulTaskContext.getStreamingChangeEventSourceMetrics(),
statefulTaskContext.getTopicSelector().getPrimaryTopic(),
currentBinlogSplit);
executor.submit(
() -> {
try {
binlogSplitReadTask.execute(new BinlogSplitChangeEventSourceContextImpl());
binlogSplitReadTask.execute(
new BinlogSplitChangeEventSourceContextImpl(),
statefulTaskContext.getOffsetContext());
} catch (Exception e) {
currentTaskRunning = false;
LOG.error(

@ -98,7 +98,6 @@ public class SnapshotSplitReader implements DebeziumReader<SourceRecord, MySqlSp
this.splitSnapshotReadTask =
new MySqlSnapshotSplitReadTask(
statefulTaskContext.getConnectorConfig(),
statefulTaskContext.getOffsetContext(),
statefulTaskContext.getSnapshotChangeEventSourceMetrics(),
statefulTaskContext.getDatabaseSchema(),
statefulTaskContext.getConnection(),
@ -114,7 +113,8 @@ public class SnapshotSplitReader implements DebeziumReader<SourceRecord, MySqlSp
final SnapshotSplitChangeEventSourceContextImpl sourceContext =
new SnapshotSplitChangeEventSourceContextImpl();
SnapshotResult snapshotResult =
splitSnapshotReadTask.execute(sourceContext);
splitSnapshotReadTask.execute(
sourceContext, statefulTaskContext.getOffsetContext());
final MySqlBinlogSplit backfillBinlogSplit =
createBackfillBinlogSplit(sourceContext);
@ -134,8 +134,16 @@ public class SnapshotSplitReader implements DebeziumReader<SourceRecord, MySqlSp
if (snapshotResult.isCompletedOrSkipped()) {
final MySqlBinlogSplitReadTask backfillBinlogReadTask =
createBackfillBinlogReadTask(backfillBinlogSplit);
final MySqlOffsetContext.Loader loader =
new MySqlOffsetContext.Loader(
statefulTaskContext.getConnectorConfig());
final MySqlOffsetContext mySqlOffsetContext =
loader.load(
backfillBinlogSplit.getStartingOffset().getOffset());
backfillBinlogReadTask.execute(
new SnapshotBinlogSplitChangeEventSourceContextImpl());
new SnapshotBinlogSplitChangeEventSourceContextImpl(),
mySqlOffsetContext);
} else {
readException =
new IllegalStateException(
@ -168,11 +176,6 @@ public class SnapshotSplitReader implements DebeziumReader<SourceRecord, MySqlSp
private MySqlBinlogSplitReadTask createBackfillBinlogReadTask(
MySqlBinlogSplit backfillBinlogSplit) {
final MySqlOffsetContext.Loader loader =
new MySqlOffsetContext.Loader(statefulTaskContext.getConnectorConfig());
final MySqlOffsetContext mySqlOffsetContext =
(MySqlOffsetContext)
loader.load(backfillBinlogSplit.getStartingOffset().getOffset());
// we should only capture events for the current table,
// otherwise, we may can't find corresponding schema
Configuration dezConf =
@ -187,15 +190,14 @@ public class SnapshotSplitReader implements DebeziumReader<SourceRecord, MySqlSp
// task to read binlog and backfill for current split
return new MySqlBinlogSplitReadTask(
new MySqlConnectorConfig(dezConf),
mySqlOffsetContext,
statefulTaskContext.getConnection(),
statefulTaskContext.getDispatcher(),
statefulTaskContext.getSignalEventDispatcher(),
statefulTaskContext.getErrorHandler(),
StatefulTaskContext.getClock(),
statefulTaskContext.getTaskContext(),
(MySqlStreamingChangeEventSourceMetrics)
statefulTaskContext.getStreamingChangeEventSourceMetrics(),
statefulTaskContext.getTopicSelector().getPrimaryTopic(),
backfillBinlogSplit);
}

@ -48,7 +48,6 @@ public class MySqlBinlogSplitReadTask extends MySqlStreamingChangeEventSource {
private static final Logger LOG = LoggerFactory.getLogger(MySqlBinlogSplitReadTask.class);
private final MySqlBinlogSplit binlogSplit;
private final MySqlOffsetContext offsetContext;
private final EventDispatcherImpl<TableId> eventDispatcher;
private final SignalEventDispatcher signalEventDispatcher;
private final ErrorHandler errorHandler;
@ -56,42 +55,31 @@ public class MySqlBinlogSplitReadTask extends MySqlStreamingChangeEventSource {
public MySqlBinlogSplitReadTask(
MySqlConnectorConfig connectorConfig,
MySqlOffsetContext offsetContext,
MySqlConnection connection,
EventDispatcherImpl<TableId> dispatcher,
SignalEventDispatcher signalEventDispatcher,
ErrorHandler errorHandler,
Clock clock,
MySqlTaskContext taskContext,
MySqlStreamingChangeEventSourceMetrics metrics,
String topic,
MySqlBinlogSplit binlogSplit) {
super(
connectorConfig,
offsetContext,
connection,
dispatcher,
errorHandler,
clock,
taskContext,
metrics);
super(connectorConfig, connection, dispatcher, errorHandler, clock, taskContext, metrics);
this.binlogSplit = binlogSplit;
this.eventDispatcher = dispatcher;
this.offsetContext = offsetContext;
this.errorHandler = errorHandler;
this.signalEventDispatcher =
new SignalEventDispatcher(
offsetContext.getPartition(), topic, eventDispatcher.getQueue());
this.signalEventDispatcher = signalEventDispatcher;
}
@Override
public void execute(ChangeEventSourceContext context) throws InterruptedException {
public void execute(ChangeEventSourceContext context, MySqlOffsetContext offsetContext)
throws InterruptedException {
this.context = context;
super.execute(context);
super.execute(context, offsetContext);
}
@Override
protected void handleEvent(Event event) {
super.handleEvent(event);
protected void handleEvent(MySqlOffsetContext offsetContext, Event event) {
super.handleEvent(offsetContext, event);
// check do we need to stop for read binlog for snapshot split.
if (isBoundedRead()) {
final BinlogOffset currentBinlogOffset = getBinlogPosition(offsetContext.getOffset());

@ -31,10 +31,9 @@ import io.debezium.connector.mysql.MySqlDatabaseSchema;
import io.debezium.connector.mysql.MySqlOffsetContext;
import io.debezium.connector.mysql.MySqlValueConverters;
import io.debezium.pipeline.EventDispatcher;
import io.debezium.pipeline.metrics.SnapshotChangeEventSourceMetrics;
import io.debezium.pipeline.source.AbstractSnapshotChangeEventSource;
import io.debezium.pipeline.source.spi.SnapshotProgressListener;
import io.debezium.pipeline.spi.ChangeRecordEmitter;
import io.debezium.pipeline.spi.OffsetContext;
import io.debezium.pipeline.spi.SnapshotResult;
import io.debezium.relational.Column;
import io.debezium.relational.RelationalSnapshotChangeEventSource;
@ -62,7 +61,8 @@ import java.util.Calendar;
import static com.ververica.cdc.connectors.mysql.debezium.DebeziumUtils.currentBinlogOffset;
/** Task to read snapshot split of table. */
public class MySqlSnapshotSplitReadTask extends AbstractSnapshotChangeEventSource {
public class MySqlSnapshotSplitReadTask
extends AbstractSnapshotChangeEventSource<MySqlOffsetContext> {
private static final Logger LOG = LoggerFactory.getLogger(MySqlSnapshotSplitReadTask.class);
@ -75,22 +75,19 @@ public class MySqlSnapshotSplitReadTask extends AbstractSnapshotChangeEventSourc
private final EventDispatcherImpl<TableId> dispatcher;
private final Clock clock;
private final MySqlSnapshotSplit snapshotSplit;
private final MySqlOffsetContext offsetContext;
private final TopicSelector<TableId> topicSelector;
private final SnapshotProgressListener snapshotProgressListener;
private final SnapshotChangeEventSourceMetrics snapshotChangeEventSourceMetrics;
public MySqlSnapshotSplitReadTask(
MySqlConnectorConfig connectorConfig,
MySqlOffsetContext previousOffset,
SnapshotProgressListener snapshotProgressListener,
SnapshotChangeEventSourceMetrics snapshotChangeEventSourceMetrics,
MySqlDatabaseSchema databaseSchema,
MySqlConnection jdbcConnection,
EventDispatcherImpl<TableId> dispatcher,
TopicSelector<TableId> topicSelector,
Clock clock,
MySqlSnapshotSplit snapshotSplit) {
super(connectorConfig, previousOffset, snapshotProgressListener);
this.offsetContext = previousOffset;
super(connectorConfig, snapshotChangeEventSourceMetrics);
this.connectorConfig = connectorConfig;
this.databaseSchema = databaseSchema;
this.jdbcConnection = jdbcConnection;
@ -98,13 +95,15 @@ public class MySqlSnapshotSplitReadTask extends AbstractSnapshotChangeEventSourc
this.clock = clock;
this.snapshotSplit = snapshotSplit;
this.topicSelector = topicSelector;
this.snapshotProgressListener = snapshotProgressListener;
this.snapshotChangeEventSourceMetrics = snapshotChangeEventSourceMetrics;
}
@Override
public SnapshotResult execute(ChangeEventSourceContext context) throws InterruptedException {
public SnapshotResult<MySqlOffsetContext> execute(
ChangeEventSourceContext context, MySqlOffsetContext previousOffset)
throws InterruptedException {
SnapshottingTask snapshottingTask = getSnapshottingTask(previousOffset);
final SnapshotContext ctx;
final SnapshotContext<MySqlOffsetContext> ctx;
try {
ctx = prepare(context);
} catch (Exception e) {
@ -112,7 +111,7 @@ public class MySqlSnapshotSplitReadTask extends AbstractSnapshotChangeEventSourc
throw new RuntimeException(e);
}
try {
return doExecute(context, ctx, snapshottingTask);
return doExecute(context, previousOffset, ctx, snapshottingTask);
} catch (InterruptedException e) {
LOG.warn("Snapshot was interrupted before completion");
throw e;
@ -122,17 +121,21 @@ public class MySqlSnapshotSplitReadTask extends AbstractSnapshotChangeEventSourc
}
@Override
protected SnapshotResult doExecute(
protected SnapshotResult<MySqlOffsetContext> doExecute(
ChangeEventSourceContext context,
SnapshotContext snapshotContext,
MySqlOffsetContext previousOffset,
SnapshotContext<MySqlOffsetContext> snapshotContext,
SnapshottingTask snapshottingTask)
throws Exception {
final RelationalSnapshotChangeEventSource.RelationalSnapshotContext ctx =
(RelationalSnapshotChangeEventSource.RelationalSnapshotContext) snapshotContext;
ctx.offset = offsetContext;
final RelationalSnapshotChangeEventSource.RelationalSnapshotContext<MySqlOffsetContext>
ctx =
(RelationalSnapshotChangeEventSource.RelationalSnapshotContext<
MySqlOffsetContext>)
snapshotContext;
ctx.offset = previousOffset;
final SignalEventDispatcher signalEventDispatcher =
new SignalEventDispatcher(
offsetContext.getPartition(),
previousOffset.getPartition(),
topicSelector.topicNameFor(snapshotSplit.getTableId()),
dispatcher.getQueue());
@ -163,7 +166,7 @@ public class MySqlSnapshotSplitReadTask extends AbstractSnapshotChangeEventSourc
}
@Override
protected SnapshottingTask getSnapshottingTask(OffsetContext previousOffset) {
protected SnapshottingTask getSnapshottingTask(MySqlOffsetContext offsetContext) {
return new SnapshottingTask(false, true);
}
@ -246,7 +249,7 @@ public class MySqlSnapshotSplitReadTask extends AbstractSnapshotChangeEventSourc
rows,
snapshotSplit.splitId(),
Strings.duration(stop - exportStart));
snapshotProgressListener.rowsScanned(table.id(), rows);
snapshotChangeEventSourceMetrics.rowsScanned(table.id(), rows);
logTimer = getTableScanLogTimer();
}
dispatcher.dispatchSnapshotEvent(

@ -22,6 +22,7 @@ import com.github.shyiko.mysql.binlog.BinaryLogClient;
import com.ververica.cdc.connectors.mysql.debezium.DebeziumUtils;
import com.ververica.cdc.connectors.mysql.debezium.EmbeddedFlinkDatabaseHistory;
import com.ververica.cdc.connectors.mysql.debezium.dispatcher.EventDispatcherImpl;
import com.ververica.cdc.connectors.mysql.debezium.dispatcher.SignalEventDispatcher;
import com.ververica.cdc.connectors.mysql.source.config.MySqlSourceConfig;
import com.ververica.cdc.connectors.mysql.source.offset.BinlogOffset;
import com.ververica.cdc.connectors.mysql.source.split.MySqlSplit;
@ -82,6 +83,7 @@ public class StatefulTaskContext {
private SnapshotChangeEventSourceMetrics snapshotChangeEventSourceMetrics;
private StreamingChangeEventSourceMetrics streamingChangeEventSourceMetrics;
private EventDispatcherImpl<TableId> dispatcher;
private SignalEventDispatcher signalEventDispatcher;
private ChangeEventQueue<DataChangeEvent> queue;
private ErrorHandler errorHandler;
@ -114,6 +116,7 @@ public class StatefulTaskContext {
this.taskContext =
new MySqlTaskContextImpl(connectorConfig, databaseSchema, binaryLogClient);
final int queueSize =
mySqlSplit.isSnapshotSplit()
? Integer.MAX_VALUE
@ -142,6 +145,10 @@ public class StatefulTaskContext {
metadataProvider,
schemaNameAdjuster);
this.signalEventDispatcher =
new SignalEventDispatcher(
offsetContext.getPartition(), topicSelector.getPrimaryTopic(), queue);
final MySqlChangeEventSourceMetricsFactory changeEventSourceMetricsFactory =
new MySqlChangeEventSourceMetricsFactory(
new MySqlStreamingChangeEventSourceMetrics(
@ -291,6 +298,10 @@ public class StatefulTaskContext {
return dispatcher;
}
public SignalEventDispatcher getSignalEventDispatcher() {
return signalEventDispatcher;
}
public ChangeEventQueue<DataChangeEvent> getQueue() {
return queue;
}

@ -74,7 +74,7 @@ under the License.
<properties>
<flink.version>1.13.5</flink.version>
<debezium.version>1.5.4.Final</debezium.version>
<debezium.version>1.6.4.Final</debezium.version>
<tikv.version>3.2.0</tikv.version>
<geometry.version>2.2.0</geometry.version>
<!-- OracleE2eITCase will report "container cannot be accessed" error when running in Azure Pipeline with 1.16.1 testconainters.

Loading…
Cancel
Save