[common] Fix schema history run out of memory

Fix schema history run out of memory, this close (#207)

Co-authored-by: Leonard Xu <xbjtdcq@gmail.com>
pull/237/head
Shengkai 4 years ago committed by GitHub
parent 5195647576
commit fcea3d14f7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -44,15 +44,17 @@ import com.alibaba.ververica.cdc.debezium.internal.DebeziumChangeFetcher;
import com.alibaba.ververica.cdc.debezium.internal.DebeziumOffset;
import com.alibaba.ververica.cdc.debezium.internal.DebeziumOffsetSerializer;
import com.alibaba.ververica.cdc.debezium.internal.FlinkDatabaseHistory;
import com.alibaba.ververica.cdc.debezium.internal.FlinkDatabaseSchemaHistory;
import com.alibaba.ververica.cdc.debezium.internal.FlinkOffsetBackingStore;
import com.alibaba.ververica.cdc.debezium.internal.Handover;
import com.alibaba.ververica.cdc.debezium.internal.SchemaRecord;
import io.debezium.document.DocumentReader;
import io.debezium.document.DocumentWriter;
import io.debezium.embedded.Connect;
import io.debezium.engine.DebeziumEngine;
import io.debezium.engine.spi.OffsetCommitPolicy;
import io.debezium.heartbeat.Heartbeat;
import io.debezium.relational.history.HistoryRecord;
import io.debezium.relational.history.DatabaseHistory;
import org.apache.commons.collections.map.LinkedMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -61,9 +63,14 @@ import javax.annotation.Nullable;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Properties;
import java.util.UUID;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
@ -114,6 +121,15 @@ public class DebeziumSourceFunction<T> extends RichSourceFunction<T>
/** The maximum number of pending non-committed checkpoints to track, to avoid memory leaks. */
public static final int MAX_NUM_PENDING_CHECKPOINTS = 100;
/**
* The configuration represents the Debezium MySQL Connector uses the legacy implementation or
* not.
*/
public static final String LEGACY_IMPLEMENTATION_KEY = "internal.implementation";
/** The configuration value represents legacy implementation. */
public static final String LEGACY_IMPLEMENTATION_VALUE = "legacy";
// ---------------------------------------------------------------------------------------
// Properties
// ---------------------------------------------------------------------------------------
@ -137,6 +153,13 @@ public class DebeziumSourceFunction<T> extends RichSourceFunction<T>
// State
// ---------------------------------------------------------------------------------------
/**
* Structure to maintain the current schema history. The content in {@link SchemaRecord} is up
* to the implementation of the {@link DatabaseHistory}.
*/
private static final ConcurrentMap<String, Collection<SchemaRecord>> HISTORY =
new ConcurrentHashMap<>();
/**
* The offsets to restore to, if the consumer restores state from a checkpoint.
*
@ -154,8 +177,9 @@ public class DebeziumSourceFunction<T> extends RichSourceFunction<T>
* State to store the history records, i.e. schema changes.
*
* @see FlinkDatabaseHistory
* @see FlinkDatabaseSchemaHistory
*/
private transient ListState<String> historyRecordsState;
private transient ListState<String> schemaRecordsState;
// ---------------------------------------------------------------------------------------
// Worker
@ -211,7 +235,7 @@ public class DebeziumSourceFunction<T> extends RichSourceFunction<T>
new ListStateDescriptor<>(
OFFSETS_STATE_NAME,
PrimitiveArrayTypeInfo.BYTE_PRIMITIVE_ARRAY_TYPE_INFO));
this.historyRecordsState =
this.schemaRecordsState =
stateStore.getUnionListState(
new ListStateDescriptor<>(
HISTORY_RECORDS_STATE_NAME, BasicTypeInfo.STRING_TYPE_INFO));
@ -254,21 +278,23 @@ public class DebeziumSourceFunction<T> extends RichSourceFunction<T>
private void restoreHistoryRecordsState() throws Exception {
DocumentReader reader = DocumentReader.defaultReader();
ConcurrentLinkedQueue<HistoryRecord> historyRecords = new ConcurrentLinkedQueue<>();
List<SchemaRecord> historyRecords = new ArrayList<>();
int recordsCount = 0;
boolean firstEntry = true;
for (String record : historyRecordsState.get()) {
for (String record : schemaRecordsState.get()) {
if (firstEntry) {
// we store the engine instance name in the first element
this.engineInstanceName = record;
firstEntry = false;
} else {
historyRecords.add(new HistoryRecord(reader.read(record)));
// Put the records into the state. The database history should read, reorganize and
// register the state.
historyRecords.add(new SchemaRecord(reader.read(record)));
recordsCount++;
}
}
if (engineInstanceName != null) {
FlinkDatabaseHistory.registerHistoryRecords(engineInstanceName, historyRecords);
StateUtils.registerHistory(engineInstanceName, historyRecords);
}
LOG.info(
"Consumer subtask {} restored history records state: {} with {} records.",
@ -324,17 +350,15 @@ public class DebeziumSourceFunction<T> extends RichSourceFunction<T>
}
private void snapshotHistoryRecordsState() throws Exception {
historyRecordsState.clear();
schemaRecordsState.clear();
if (engineInstanceName != null) {
historyRecordsState.add(engineInstanceName);
ConcurrentLinkedQueue<HistoryRecord> historyRecords =
FlinkDatabaseHistory.getRegisteredHistoryRecord(engineInstanceName);
if (historyRecords != null) {
schemaRecordsState.add(engineInstanceName);
Collection<SchemaRecord> records = StateUtils.retrieveHistory(engineInstanceName);
DocumentWriter writer = DocumentWriter.defaultWriter();
for (HistoryRecord record : historyRecords) {
historyRecordsState.add(writer.write(record.document()));
}
for (SchemaRecord record : records) {
schemaRecordsState.add(writer.write(record.toDocument()));
}
}
}
@ -356,20 +380,19 @@ public class DebeziumSourceFunction<T> extends RichSourceFunction<T>
properties.setProperty("offset.flush.interval.ms", String.valueOf(Long.MAX_VALUE));
// disable tombstones
properties.setProperty("tombstones.on.delete", "false");
// we have to use a persisted DatabaseHistory implementation, otherwise, recovery can't
// continue to read binlog
// see
// https://stackoverflow.com/questions/57147584/debezium-error-schema-isnt-know-to-this-connector
// and https://debezium.io/blog/2018/03/16/note-on-database-history-topic-configuration/
properties.setProperty("database.history", FlinkDatabaseHistory.class.getCanonicalName());
if (engineInstanceName == null) {
// not restore from recovery
engineInstanceName = UUID.randomUUID().toString();
FlinkDatabaseHistory.registerEmptyHistoryRecord(engineInstanceName);
}
// history instance name to initialize FlinkDatabaseHistory
properties.setProperty(
FlinkDatabaseHistory.DATABASE_HISTORY_INSTANCE_NAME, engineInstanceName);
// we have to use a persisted DatabaseHistory implementation, otherwise, recovery can't
// continue to read binlog
// see
// https://stackoverflow.com/questions/57147584/debezium-error-schema-isnt-know-to-this-connector
// and https://debezium.io/blog/2018/03/16/note-on-database-history-topic-configuration/
properties.setProperty("database.history", determineDatabase().getCanonicalName());
// we have to filter out the heartbeat events, otherwise the deserializer will fail
String dbzHeartbeatPrefix =
@ -519,4 +542,47 @@ public class DebeziumSourceFunction<T> extends RichSourceFunction<T>
public boolean getDebeziumStarted() {
return debeziumStarted;
}
private Class<?> determineDatabase() {
boolean isCompatibleWithLegacy =
FlinkDatabaseHistory.isCompatible(StateUtils.retrieveHistory(engineInstanceName));
if (LEGACY_IMPLEMENTATION_VALUE.equals(properties.get(LEGACY_IMPLEMENTATION_KEY))) {
// specifies the legacy implementation but the state may be incompatible
if (isCompatibleWithLegacy) {
return FlinkDatabaseHistory.class;
} else {
throw new IllegalStateException(
"The configured option 'debezium.internal.implementation' is 'legacy', but the state of source is incompatible with this implementation, you should remove the the option.");
}
} else if (FlinkDatabaseSchemaHistory.isCompatible(
StateUtils.retrieveHistory(engineInstanceName))) {
// tries the non-legacy first
return FlinkDatabaseSchemaHistory.class;
} else if (isCompatibleWithLegacy) {
// fallback to legacy if possible
return FlinkDatabaseHistory.class;
} else {
// impossible
throw new IllegalStateException("Can't determine which DatabaseHistory to use.");
}
}
// ---------------------------------------------------------------------------------------
/** Utils to get/put/remove the history of schema. */
public static final class StateUtils {
public static void registerHistory(
String engineName, Collection<SchemaRecord> engineHistory) {
HISTORY.put(engineName, engineHistory);
}
public static Collection<SchemaRecord> retrieveHistory(String engineName) {
return HISTORY.getOrDefault(engineName, Collections.emptyList());
}
public static void removeHistory(String engineName) {
HISTORY.remove(engineName);
}
}
}

@ -18,18 +18,15 @@
package com.alibaba.ververica.cdc.debezium.internal;
import com.alibaba.ververica.cdc.debezium.DebeziumSourceFunction.StateUtils;
import io.debezium.config.Configuration;
import io.debezium.relational.history.AbstractDatabaseHistory;
import io.debezium.relational.history.DatabaseHistory;
import io.debezium.relational.history.DatabaseHistoryException;
import io.debezium.relational.history.DatabaseHistoryListener;
import io.debezium.relational.history.FileDatabaseHistory;
import io.debezium.relational.history.HistoryRecord;
import io.debezium.relational.history.HistoryRecordComparator;
import io.debezium.relational.history.KafkaDatabaseHistory;
import java.util.HashMap;
import java.util.Map;
import java.util.Collection;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.function.Consumer;
@ -45,53 +42,13 @@ public class FlinkDatabaseHistory extends AbstractDatabaseHistory {
public static final String DATABASE_HISTORY_INSTANCE_NAME = "database.history.instance.name";
/**
* We will synchronize the records into Flink's state during snapshot. We have to use a global
* variable to communicate with Flink's source function, because Debezium will construct the
* instance of {@link DatabaseHistory} itself. Maybe we can improve this in the future.
*
* <p>NOTE: we just use Flink's state as a durable persistent storage as a replacement of {@link
* FileDatabaseHistory} and {@link KafkaDatabaseHistory}. It doesn't need to guarantee the
* exactly-once semantic for the history records. The history records shouldn't be super large,
* because we only monitor the schema changes for one single table.
*
* @see
* com.alibaba.ververica.cdc.debezium.DebeziumSourceFunction#snapshotState(org.apache.flink.runtime.state.FunctionSnapshotContext)
*/
public static final Map<String, ConcurrentLinkedQueue<HistoryRecord>> ALL_RECORDS =
new HashMap<>();
private ConcurrentLinkedQueue<HistoryRecord> records;
private ConcurrentLinkedQueue<SchemaRecord> schemaRecords;
private String instanceName;
/**
* Registers the given HistoryRecords into global variable under the given instance name, in
* order to be accessed by instance of {@link FlinkDatabaseHistory}.
*/
public static void registerHistoryRecords(
String instanceName, ConcurrentLinkedQueue<HistoryRecord> historyRecords) {
synchronized (FlinkDatabaseHistory.ALL_RECORDS) {
FlinkDatabaseHistory.ALL_RECORDS.put(instanceName, historyRecords);
}
}
/**
* Registers an empty HistoryRecords into global variable under the given instance name, in
* order to be accessed by instance of {@link FlinkDatabaseHistory}.
*/
public static void registerEmptyHistoryRecord(String instanceName) {
registerHistoryRecords(instanceName, new ConcurrentLinkedQueue<>());
}
/** Gets the registered HistoryRecords under the given instance name. */
public static ConcurrentLinkedQueue<HistoryRecord> getRegisteredHistoryRecord(
String instanceName) {
synchronized (ALL_RECORDS) {
if (ALL_RECORDS.containsKey(instanceName)) {
return ALL_RECORDS.get(instanceName);
}
}
return null;
private ConcurrentLinkedQueue<SchemaRecord> getRegisteredHistoryRecord(String instanceName) {
Collection<SchemaRecord> historyRecords = StateUtils.retrieveHistory(instanceName);
return new ConcurrentLinkedQueue<>(historyRecords);
}
@Override
@ -102,39 +59,32 @@ public class FlinkDatabaseHistory extends AbstractDatabaseHistory {
boolean useCatalogBeforeSchema) {
super.configure(config, comparator, listener, useCatalogBeforeSchema);
this.instanceName = config.getString(DATABASE_HISTORY_INSTANCE_NAME);
this.records = getRegisteredHistoryRecord(instanceName);
if (records == null) {
throw new IllegalStateException(
String.format(
"Couldn't find engine instance %s in the global records.",
instanceName));
}
this.schemaRecords = getRegisteredHistoryRecord(instanceName);
// register the schema changes into state
// every change should be visible to the source function
StateUtils.registerHistory(instanceName, schemaRecords);
}
@Override
public void stop() {
super.stop();
if (instanceName != null) {
synchronized (ALL_RECORDS) {
// clear memory
ALL_RECORDS.remove(instanceName);
}
}
StateUtils.removeHistory(instanceName);
}
@Override
protected void storeRecord(HistoryRecord record) throws DatabaseHistoryException {
this.records.add(record);
this.schemaRecords.add(new SchemaRecord(record));
}
@Override
protected void recoverRecords(Consumer<HistoryRecord> records) {
this.records.forEach(records);
this.schemaRecords.stream().map(SchemaRecord::getHistoryRecord).forEach(records);
}
@Override
public boolean exists() {
return !records.isEmpty();
return !schemaRecords.isEmpty();
}
@Override
@ -146,4 +96,19 @@ public class FlinkDatabaseHistory extends AbstractDatabaseHistory {
public String toString() {
return "Flink Database History";
}
/**
* Determine whether the {@link FlinkDatabaseHistory} is compatible with the specified state.
*/
public static boolean isCompatible(Collection<SchemaRecord> records) {
for (SchemaRecord record : records) {
// check the source/position/ddl is not null
if (!record.isHistoryRecord()) {
return false;
} else {
break;
}
}
return true;
}
}

@ -0,0 +1,199 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.alibaba.ververica.cdc.debezium.internal;
import com.alibaba.ververica.cdc.debezium.DebeziumSourceFunction;
import com.alibaba.ververica.cdc.debezium.DebeziumSourceFunction.StateUtils;
import io.debezium.config.Configuration;
import io.debezium.relational.TableId;
import io.debezium.relational.Tables;
import io.debezium.relational.ddl.DdlParser;
import io.debezium.relational.history.DatabaseHistory;
import io.debezium.relational.history.DatabaseHistoryException;
import io.debezium.relational.history.DatabaseHistoryListener;
import io.debezium.relational.history.HistoryRecord;
import io.debezium.relational.history.HistoryRecordComparator;
import io.debezium.relational.history.JsonTableChangeSerializer;
import io.debezium.relational.history.TableChanges;
import io.debezium.schema.DatabaseSchema;
import java.util.Collection;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import static io.debezium.relational.history.TableChanges.TableChange;
/**
* The {@link FlinkDatabaseSchemaHistory} only stores the latest schema of the monitored tables.
* When recovering from the checkpoint, it should apply all the tables to the {@link
* DatabaseSchema}, which doesn't need to replay the history anymore.
*
* <p>Considering the data structure maintained in the {@link FlinkDatabaseSchemaHistory} is much
* different from the {@link FlinkDatabaseHistory}, it's not compatible with the {@link
* FlinkDatabaseHistory}. Because it only maintains the latest schema of the table rather than all
* history DDLs, it's useful to prevent OOM when meet massive history DDLs.
*/
public class FlinkDatabaseSchemaHistory implements DatabaseHistory {
public static final String DATABASE_HISTORY_INSTANCE_NAME = "database.history.instance.name";
private final JsonTableChangeSerializer tableChangesSerializer =
new JsonTableChangeSerializer();
private ConcurrentMap<TableId, SchemaRecord> latestTables;
private String instanceName;
private DatabaseHistoryListener listener;
private boolean storeOnlyMonitoredTablesDdl;
private boolean skipUnparseableDDL;
private boolean useCatalogBeforeSchema;
@Override
public void configure(
Configuration config,
HistoryRecordComparator comparator,
DatabaseHistoryListener listener,
boolean useCatalogBeforeSchema) {
this.instanceName = config.getString(DATABASE_HISTORY_INSTANCE_NAME);
this.listener = listener;
this.storeOnlyMonitoredTablesDdl = config.getBoolean(STORE_ONLY_MONITORED_TABLES_DDL);
this.skipUnparseableDDL = config.getBoolean(SKIP_UNPARSEABLE_DDL_STATEMENTS);
this.useCatalogBeforeSchema = useCatalogBeforeSchema;
// recover
this.latestTables = new ConcurrentHashMap<>();
for (SchemaRecord schemaRecord : StateUtils.retrieveHistory(instanceName)) {
// validate here
TableChange tableChange =
JsonTableChangeSerializer.fromDocument(
schemaRecord.toDocument(), useCatalogBeforeSchema);
latestTables.put(tableChange.getId(), schemaRecord);
}
// register
StateUtils.registerHistory(instanceName, latestTables.values());
}
@Override
public void start() {
listener.started();
}
@Override
public void record(
Map<String, ?> source, Map<String, ?> position, String databaseName, String ddl)
throws DatabaseHistoryException {
throw new UnsupportedOperationException(
String.format(
"The %s cannot work with 'debezium.internal.implementation' = 'legacy',"
+ "please use %s",
FlinkDatabaseSchemaHistory.class.getCanonicalName(),
FlinkDatabaseHistory.class.getCanonicalName()));
}
@Override
public void record(
Map<String, ?> source,
Map<String, ?> position,
String databaseName,
String schemaName,
String ddl,
TableChanges changes)
throws DatabaseHistoryException {
for (TableChanges.TableChange change : changes) {
switch (change.getType()) {
case CREATE:
case ALTER:
latestTables.put(
change.getId(),
new SchemaRecord(tableChangesSerializer.toDocument(change)));
break;
case DROP:
latestTables.remove(change.getId());
break;
default:
// impossible
throw new RuntimeException(
String.format("Unknown change type: %s.", change.getType()));
}
}
listener.onChangeApplied(
new HistoryRecord(source, position, databaseName, schemaName, ddl, changes));
}
@Override
public void recover(
Map<String, ?> source, Map<String, ?> position, Tables schema, DdlParser ddlParser) {
listener.recoveryStarted();
for (SchemaRecord record : latestTables.values()) {
TableChange tableChange =
JsonTableChangeSerializer.fromDocument(
record.getTableChangeDoc(), useCatalogBeforeSchema);
schema.overwriteTable(tableChange.getTable());
}
listener.recoveryStopped();
}
@Override
public void stop() {
if (instanceName != null) {
DebeziumSourceFunction.StateUtils.removeHistory(instanceName);
}
listener.stopped();
}
@Override
public boolean exists() {
return latestTables != null && !latestTables.isEmpty();
}
@Override
public boolean storageExists() {
return true;
}
@Override
public void initializeStorage() {
// do nothing
}
@Override
public boolean storeOnlyMonitoredTables() {
return storeOnlyMonitoredTablesDdl;
}
@Override
public boolean skipUnparseableDdlStatements() {
return skipUnparseableDDL;
}
/**
* Determine whether the {@link FlinkDatabaseSchemaHistory} is compatible with the specified
* state.
*/
public static boolean isCompatible(Collection<SchemaRecord> records) {
for (SchemaRecord record : records) {
if (!record.isTableChangeRecord()) {
return false;
} else {
break;
}
}
return true;
}
}

@ -0,0 +1,85 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.alibaba.ververica.cdc.debezium.internal;
import io.debezium.document.Document;
import io.debezium.relational.history.HistoryRecord;
import io.debezium.relational.history.TableChanges.TableChange;
import javax.annotation.Nullable;
/**
* The Record represents a schema change event, it contains either one {@link HistoryRecord} or
* {@link TableChange}.
*
* <p>The {@link HistoryRecord} will be used by {@link FlinkDatabaseHistory} which keeps full
* history of table change events for all tables, the {@link TableChange} will be used by {@link
* FlinkDatabaseSchemaHistory} which keeps the latest latest table change for each table.
*/
public class SchemaRecord {
@Nullable private final HistoryRecord historyRecord;
@Nullable private final Document tableChangeDoc;
public SchemaRecord(HistoryRecord historyRecord) {
this.historyRecord = historyRecord;
this.tableChangeDoc = null;
}
public SchemaRecord(Document document) {
if (isHistoryRecordDocument(document)) {
this.historyRecord = new HistoryRecord(document);
this.tableChangeDoc = null;
} else {
this.tableChangeDoc = document;
this.historyRecord = null;
}
}
@Nullable
public HistoryRecord getHistoryRecord() {
return historyRecord;
}
@Nullable
public Document getTableChangeDoc() {
return tableChangeDoc;
}
public boolean isHistoryRecord() {
return historyRecord != null;
}
public boolean isTableChangeRecord() {
return tableChangeDoc != null;
}
public Document toDocument() {
if (historyRecord != null) {
return historyRecord.document();
} else {
return tableChangeDoc;
}
}
private boolean isHistoryRecordDocument(Document document) {
return new HistoryRecord(document).isValid();
}
}

@ -28,6 +28,8 @@ import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import static com.alibaba.ververica.cdc.debezium.DebeziumSourceFunction.LEGACY_IMPLEMENTATION_KEY;
import static com.alibaba.ververica.cdc.debezium.DebeziumSourceFunction.LEGACY_IMPLEMENTATION_VALUE;
import static org.apache.flink.util.Preconditions.checkNotNull;
/** A builder to build a SourceFunction which can read snapshot and continue to consume binlog. */
@ -215,6 +217,15 @@ public class MySQLSource {
if (dbzProperties != null) {
dbzProperties.forEach(props::put);
// Add default configurations for compatibility when set the legacy mysql connector
// implementation
if (LEGACY_IMPLEMENTATION_VALUE.equals(
dbzProperties.get(LEGACY_IMPLEMENTATION_KEY))) {
props.put("transforms", "snapshotasinsert");
props.put(
"transforms.snapshotasinsert.type",
"io.debezium.connector.mysql.transforms.ReadToInsertEvent");
}
}
return new DebeziumSourceFunction<>(deserializer, props, specificOffset);

@ -40,7 +40,17 @@ import com.alibaba.ververica.cdc.connectors.mysql.utils.UniqueDatabase;
import com.alibaba.ververica.cdc.connectors.utils.TestSourceContext;
import com.alibaba.ververica.cdc.debezium.DebeziumDeserializationSchema;
import com.alibaba.ververica.cdc.debezium.DebeziumSourceFunction;
import com.fasterxml.jackson.core.JsonParseException;
import com.jayway.jsonpath.JsonPath;
import io.debezium.document.Document;
import io.debezium.document.DocumentWriter;
import io.debezium.relational.Column;
import io.debezium.relational.Table;
import io.debezium.relational.TableEditor;
import io.debezium.relational.TableId;
import io.debezium.relational.history.HistoryRecord;
import io.debezium.relational.history.JsonTableChangeSerializer;
import io.debezium.relational.history.TableChanges;
import org.apache.kafka.connect.source.SourceRecord;
import org.junit.Before;
import org.junit.Test;
@ -54,7 +64,9 @@ import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.ExecutorService;
@ -70,6 +82,7 @@ import static com.alibaba.ververica.cdc.connectors.utils.AssertUtils.assertUpdat
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
/** Tests for {@link MySQLSource} which also heavily tests {@link DebeziumSourceFunction}. */
@RunWith(Parameterized.class)
@ -401,6 +414,89 @@ public class MySQLSourceTest extends MySQLTestBase {
source4.close();
runThread4.sync();
}
{
// ---------------------------------------------------------------------------
// Step-9: insert partial and alter table
// ---------------------------------------------------------------------------
final DebeziumSourceFunction<SourceRecord> source5 = createMySqlBinlogSource();
final TestSourceContext<SourceRecord> sourceContext5 = new TestSourceContext<>();
setupSource(source5, true, offsetState, historyState, true, 0, 1);
// restart the source
final CheckedThread runThread5 =
new CheckedThread() {
@Override
public void go() throws Exception {
source5.run(sourceContext5);
}
};
runThread5.start();
try (Connection connection = database.getJdbcConnection();
Statement statement = connection.createStatement()) {
statement.execute(
"INSERT INTO products(id, description, weight) VALUES (default, 'Go go go', 111.1)");
statement.execute(
"ALTER TABLE products ADD comment_col VARCHAR(100) DEFAULT 'cdc'");
List<SourceRecord> records = drain(sourceContext5, 1);
assertInsert(records.get(0), "id", 1002);
}
// ---------------------------------------------------------------------------
// Step-10: trigger checkpoint-4
// ---------------------------------------------------------------------------
synchronized (sourceContext5.getCheckpointLock()) {
// trigger checkpoint-4
source5.snapshotState(new StateSnapshotContextSynchronousImpl(300, 300));
}
assertHistoryState(historyState); // assert the DDL is stored in the history state
assertEquals(1, offsetState.list.size());
String state = new String(offsetState.list.get(0), StandardCharsets.UTF_8);
assertEquals("mysql_binlog_source", JsonPath.read(state, "$.sourcePartition.server"));
assertEquals("mysql-bin.000003", JsonPath.read(state, "$.sourceOffset.file"));
assertEquals("1", JsonPath.read(state, "$.sourceOffset.row").toString());
assertEquals("223344", JsonPath.read(state, "$.sourceOffset.server_id").toString());
assertEquals("2", JsonPath.read(state, "$.sourceOffset.event").toString());
int pos = JsonPath.read(state, "$.sourceOffset.pos");
assertTrue(pos > prevPos);
source5.cancel();
source5.close();
runThread5.sync();
}
{
// ---------------------------------------------------------------------------
// Step-11: restore from the checkpoint-4 and insert the partial value
// ---------------------------------------------------------------------------
final DebeziumSourceFunction<SourceRecord> source6 = createMySqlBinlogSource();
final TestSourceContext<SourceRecord> sourceContext6 = new TestSourceContext<>();
setupSource(source6, true, offsetState, historyState, true, 0, 1);
// restart the source
final CheckedThread runThread6 =
new CheckedThread() {
@Override
public void go() throws Exception {
source6.run(sourceContext6);
}
};
runThread6.start();
try (Connection connection = database.getJdbcConnection();
Statement statement = connection.createStatement()) {
statement.execute(
"INSERT INTO products(id, description, weight) VALUES (default, 'Run!', 22.2)");
List<SourceRecord> records = drain(sourceContext6, 1);
assertInsert(records.get(0), "id", 1003);
}
source6.cancel();
source6.close();
runThread6.sync();
}
}
@Test
@ -505,7 +601,8 @@ public class MySQLSourceTest extends MySQLTestBase {
"INSERT INTO products VALUES (default,'robot','Toy robot',1.304)"); // 110
}
Tuple2<String, Integer> offset = currentMySQLLatestOffset(database, "products", 10);
Tuple2<String, Integer> offset =
currentMySQLLatestOffset(database, "products", 10, useLegacyImplementation);
final String offsetFile = offset.f0;
final int offsetPos = offset.f1;
final TestingListState<byte[]> offsetState = new TestingListState<>();
@ -669,23 +766,86 @@ public class MySQLSourceTest extends MySQLTestBase {
}
}
private void assertHistoryState(TestingListState<String> historyState) {
// assert the DDL is stored in the history state
assertTrue(historyState.list.size() > 0);
boolean hasDDL =
historyState.list.stream()
.skip(1)
.anyMatch(
history ->
JsonPath.read(history, "$.source.server")
.equals("mysql_binlog_source")
&& JsonPath.read(history, "$.position.snapshot")
.toString()
.equals("true")
&& JsonPath.read(history, "$.ddl")
.toString()
.startsWith("CREATE TABLE `products`"));
assertTrue(hasDDL);
@Test
public void testChooseDatabase() throws Exception {
final TestingListState<byte[]> offsetState = new TestingListState<>();
final TestingListState<String> historyState = new TestingListState<>();
historyState.add("engine-name");
DocumentWriter writer = DocumentWriter.defaultWriter();
if (useLegacyImplementation) {
// build a non-legacy state
JsonTableChangeSerializer tableChangesSerializer = new JsonTableChangeSerializer();
historyState.add(
writer.write(
tableChangesSerializer.toDocument(
new TableChanges.TableChange(
TableChanges.TableChangeType.CREATE,
MockedTable.INSTANCE))));
} else {
// build a legacy state
Document document =
new HistoryRecord(
Collections.emptyMap(),
Collections.emptyMap(),
"test",
"test",
"CREATE TABLE test(a int)",
null)
.document();
historyState.add(writer.write(document));
}
final DebeziumSourceFunction<SourceRecord> source = createMySqlBinlogSource();
setupSource(source, true, offsetState, historyState, true, 0, 1);
TestSourceContext<SourceRecord> sourceContext = new TestSourceContext<>();
final CheckedThread runThread =
new CheckedThread() {
@Override
public void go() throws Exception {
source.run(sourceContext);
}
};
runThread.start();
if (useLegacyImplementation) {
// should fail because user specifies to use the legacy implementation
try {
runThread.sync();
fail("Should fail.");
} catch (Exception e) {
assertTrue(e instanceof IllegalStateException);
assertEquals(
"The configured option 'debezium.internal.implementation' is 'legacy', but the state of source is incompatible with this implementation, you should remove the the option.",
e.getMessage());
}
} else {
// check the debezium status to verify
waitDebeziumStartWithTimeout(source, 5_000L);
source.cancel();
source.close();
runThread.sync();
}
}
@Test
public void testLoadIllegalState() throws Exception {
final TestingListState<byte[]> offsetState = new TestingListState<>();
final TestingListState<String> historyState = new TestingListState<>();
historyState.add("engine-name");
historyState.add("IllegalState");
final DebeziumSourceFunction<SourceRecord> source = createMySqlBinlogSource();
try {
setupSource(source, true, offsetState, historyState, true, 0, 1);
fail("Should fail.");
} catch (Exception e) {
assertTrue(e instanceof JsonParseException);
assertTrue(e.getMessage().contains("Unrecognized token 'IllegalState'"));
}
}
// ------------------------------------------------------------------------------------------
@ -694,7 +854,11 @@ public class MySQLSourceTest extends MySQLTestBase {
/** Gets the latest offset of current MySQL server. */
public static Tuple2<String, Integer> currentMySQLLatestOffset(
UniqueDatabase database, String table, int expectedRecordCount) throws Exception {
UniqueDatabase database,
String table,
int expectedRecordCount,
boolean useLegacyImplementation)
throws Exception {
DebeziumSourceFunction<SourceRecord> source =
MySQLSource.<SourceRecord>builder()
.hostname(MYSQL_CONTAINER.getHost())
@ -704,6 +868,7 @@ public class MySQLSourceTest extends MySQLTestBase {
.username(MYSQL_CONTAINER.getUsername())
.password(MYSQL_CONTAINER.getPassword())
.deserializer(new MySQLSourceTest.ForwardDeserializeSchema())
.debeziumProperties(createDebeziumProperties(useLegacyImplementation))
.build();
final TestingListState<byte[]> offsetState = new TestingListState<>();
final TestingListState<String> historyState = new TestingListState<>();
@ -744,10 +909,72 @@ public class MySQLSourceTest extends MySQLTestBase {
return Tuple2.of(offsetFile, offsetPos);
}
private static Properties createDebeziumProperties(boolean useLegacyImplementation) {
Properties debeziumProps = new Properties();
if (useLegacyImplementation) {
debeziumProps.put("internal.implementation", "legacy");
// check legacy mysql record type
debeziumProps.put("transforms", "snapshotasinsert");
debeziumProps.put(
"transforms.snapshotasinsert.type",
"io.debezium.connector.mysql.transforms.ReadToInsertEvent");
}
return debeziumProps;
}
// ------------------------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------------------------
private void waitDebeziumStartWithTimeout(
DebeziumSourceFunction<SourceRecord> source, Long timeout) throws Exception {
long start = System.currentTimeMillis();
long end = start + timeout;
while (!source.getDebeziumStarted()) {
Thread.sleep(100);
long now = System.currentTimeMillis();
if (now > end) {
fail("Should fail.");
}
}
}
private void assertHistoryState(TestingListState<String> historyState) {
assertTrue(historyState.list.size() > 0);
// assert the DDL is stored in the history state
if (!useLegacyImplementation) {
boolean hasTable =
historyState.list.stream()
.skip(1)
.anyMatch(
history ->
!((Map<?, ?>) JsonPath.read(history, "$.table"))
.isEmpty()
&& (JsonPath.read(history, "$.type")
.toString()
.equals("CREATE")
|| JsonPath.read(history, "$.type")
.toString()
.equals("ALTER")));
assertTrue(hasTable);
} else {
boolean hasDDL =
historyState.list.stream()
.skip(1)
.anyMatch(
history ->
JsonPath.read(history, "$.source.server")
.equals("mysql_binlog_source")
&& JsonPath.read(history, "$.position.snapshot")
.toString()
.equals("true")
&& JsonPath.read(history, "$.ddl")
.toString()
.startsWith("CREATE TABLE `products`"));
assertTrue(hasDDL);
}
}
private DebeziumSourceFunction<SourceRecord> createMySqlBinlogSource(
String offsetFile, int offsetPos) {
return basicSourceBuilder()
@ -760,16 +987,7 @@ public class MySQLSourceTest extends MySQLTestBase {
}
private MySQLSource.Builder<SourceRecord> basicSourceBuilder() {
Properties debeziumProps = new Properties();
if (useLegacyImplementation) {
debeziumProps.put("internal.implementation", "legacy");
// check legacy mysql record type
debeziumProps.put("transforms", "snapshotasinsert");
debeziumProps.put(
"transforms.snapshotasinsert.type",
"io.debezium.connector.mysql.transforms.ReadToInsertEvent");
}
Properties debeziumProps = createDebeziumProperties(useLegacyImplementation);
return MySQLSource.<SourceRecord>builder()
.hostname(MYSQL_CONTAINER.getHost())
.port(MYSQL_CONTAINER.getDatabasePort())
@ -1031,4 +1249,46 @@ public class MySQLSourceTest extends MySQLTestBase {
}
}
}
private static class MockedTable implements Table {
private static final Table INSTANCE = new MockedTable();
private MockedTable() {}
@Override
public TableId id() {
return TableId.parse("Test");
}
@Override
public List<String> primaryKeyColumnNames() {
return Collections.emptyList();
}
@Override
public List<String> retrieveColumnNames() {
return Collections.emptyList();
}
@Override
public List<Column> columns() {
return Collections.emptyList();
}
@Override
public Column columnWithName(String name) {
throw new UnsupportedOperationException("Not implemented.");
}
@Override
public String defaultCharsetName() {
return "UTF-8";
}
@Override
public TableEditor edit() {
throw new UnsupportedOperationException("Not implemented.");
}
}
}

@ -63,17 +63,18 @@ public class MySQLConnectorITCase extends MySQLTestBase {
StreamTableEnvironment.create(
env,
EnvironmentSettings.newInstance().useBlinkPlanner().inStreamingMode().build());
private final String implementation;
// use legacy debezium mysql connector or not
private final boolean useLegacyImplementation;
@ClassRule public static LegacyRowResource usesLegacyRows = LegacyRowResource.INSTANCE;
public MySQLConnectorITCase(String implementation) {
this.implementation = implementation;
public MySQLConnectorITCase(boolean useLegacyImplementation) {
this.useLegacyImplementation = useLegacyImplementation;
}
@Parameterized.Parameters(name = "implementation: {0}")
public static Collection<String> parameters() {
return Arrays.asList("non-legacy", "legacy");
@Parameterized.Parameters(name = "useLegacyImplementation: {0}")
public static Collection<Boolean> parameters() {
return Arrays.asList(false, true);
}
@Before
@ -109,7 +110,7 @@ public class MySQLConnectorITCase extends MySQLTestBase {
inventoryDatabase.getPassword(),
inventoryDatabase.getDatabaseName(),
"products",
implementation);
getImplementation());
String sinkDDL =
"CREATE TABLE sink ("
+ " name STRING,"
@ -148,34 +149,27 @@ public class MySQLConnectorITCase extends MySQLTestBase {
waitForSinkSize("sink", 20);
// The final database table looks like this:
//
// > SELECT * FROM products;
// +-----+--------------------+---------------------------------------------------------+--------+
// | id | name | description |
// weight |
// +-----+--------------------+---------------------------------------------------------+--------+
// | 101 | scooter | Small 2-wheel scooter |
// 3.14 |
// | 102 | car battery | 12V car battery |
// 8.1 |
// | 103 | 12-pack drill bits | 12-pack of drill bits with sizes ranging from #40 to #3 |
// 0.8 |
// | 104 | hammer | 12oz carpenter's hammer |
// 0.75 |
// | 105 | hammer | 14oz carpenter's hammer |
// 0.875 |
// | 106 | hammer | 18oz carpenter hammer |
// 1 |
// | 107 | rocks | box of assorted rocks |
// 5.1 |
// | 108 | jacket | water resistent black wind breaker |
// 0.1 |
// | 109 | spare tire | 24 inch spare tire |
// 22.2 |
// | 110 | jacket | new water resistent white wind breaker |
// 0.5 |
// +-----+--------------------+---------------------------------------------------------+--------+
/*
* <pre>
* The final database table looks like this:
*
* > SELECT * FROM products;
* +-----+--------------------+---------------------------------------------------------+--------+
* | id | name | description | weight |
* +-----+--------------------+---------------------------------------------------------+--------+
* | 101 | scooter | Small 2-wheel scooter | 3.14 |
* | 102 | car battery | 12V car battery | 8.1 |
* | 103 | 12-pack drill bits | 12-pack of drill bits with sizes ranging from #40 to #3 | 0.8 |
* | 104 | hammer | 12oz carpenter's hammer | 0.75 |
* | 105 | hammer | 14oz carpenter's hammer | 0.875 |
* | 106 | hammer | 18oz carpenter hammer | 1 |
* | 107 | rocks | box of assorted rocks | 5.1 |
* | 108 | jacket | water resistent black wind breaker | 0.1 |
* | 109 | spare tire | 24 inch spare tire | 22.2 |
* | 110 | jacket | new water resistent white wind breaker | 0.5 |
* +-----+--------------------+---------------------------------------------------------+--------+
* </pre>
*/
String[] expected =
new String[] {
@ -237,7 +231,7 @@ public class MySQLConnectorITCase extends MySQLTestBase {
fullTypesDatabase.getPassword(),
fullTypesDatabase.getDatabaseName(),
"full_types",
implementation);
getImplementation());
String sinkDDL =
"CREATE TABLE sink (\n"
+ " id INT NOT NULL,\n"
@ -331,7 +325,8 @@ public class MySQLConnectorITCase extends MySQLTestBase {
"UPDATE products SET description='18oz carpenter hammer' WHERE id=106;");
statement.execute("UPDATE products SET weight='5.1' WHERE id=107;");
}
Tuple2<String, Integer> offset = currentMySQLLatestOffset(inventoryDatabase, "products", 9);
Tuple2<String, Integer> offset =
currentMySQLLatestOffset(inventoryDatabase, "products", 9, useLegacyImplementation);
String sourceDDL =
String.format(
@ -361,7 +356,7 @@ public class MySQLConnectorITCase extends MySQLTestBase {
"products",
offset.f0,
offset.f1,
implementation);
getImplementation());
String sinkDDL =
"CREATE TABLE sink "
+ " WITH ("
@ -430,7 +425,7 @@ public class MySQLConnectorITCase extends MySQLTestBase {
inventoryDatabase.getPassword(),
inventoryDatabase.getDatabaseName(),
"products",
implementation);
getImplementation());
String sinkDDL =
"CREATE TABLE sink "
+ " WITH ("
@ -508,7 +503,7 @@ public class MySQLConnectorITCase extends MySQLTestBase {
inventoryDatabase.getPassword(),
inventoryDatabase.getDatabaseName(),
"products",
implementation);
getImplementation());
String sinkDDL =
"CREATE TABLE sink "
+ " WITH ("
@ -576,7 +571,7 @@ public class MySQLConnectorITCase extends MySQLTestBase {
inventoryDatabase.getDatabaseName(),
"products",
System.currentTimeMillis(),
implementation);
getImplementation());
String sinkDDL =
"CREATE TABLE sink "
+ " WITH ("
@ -617,6 +612,10 @@ public class MySQLConnectorITCase extends MySQLTestBase {
// ------------------------------------------------------------------------------------
private String getImplementation() {
return useLegacyImplementation ? "legacy" : "";
}
private static void waitForSnapshotStarted(String sinkName) throws InterruptedException {
while (sinkSize(sinkName) == 0) {
Thread.sleep(100);

@ -20,7 +20,7 @@
-- Create and populate our products using a single insert with many rows
CREATE TABLE products (
id INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY,
name VARCHAR(255) NOT NULL,
name VARCHAR(255) NOT NULL DEFAULT 'flink',
description VARCHAR(512),
weight FLOAT
);

@ -357,7 +357,7 @@ public class PostgreSQLSourceTest extends PostgresTestBase {
assertFalse(waitForAvailableRecords(Duration.ofSeconds(5), sourceContext4));
// ---------------------------------------------------------------------------
// Step-6: trigger checkpoint-2 to make sure we can continue to to further checkpoints
// Step-8: trigger checkpoint-2 to make sure we can continue to to further checkpoints
// ---------------------------------------------------------------------------
synchronized (sourceContext4.getCheckpointLock()) {
// checkpoint 3
@ -371,11 +371,95 @@ public class PostgreSQLSourceTest extends PostgresTestBase {
assertFalse(state.contains("snapshot"));
int lsn = JsonPath.read(state, "$.sourceOffset.lsn");
assertTrue(lsn > prevLsn);
prevLsn = lsn;
source4.cancel();
source4.close();
runThread4.sync();
}
{
// ---------------------------------------------------------------------------
// Step-9: insert partial and alter table
// ---------------------------------------------------------------------------
final DebeziumSourceFunction<SourceRecord> source5 =
createPostgreSqlSourceWithHeartbeatDisabled();
final TestSourceContext<SourceRecord> sourceContext5 = new TestSourceContext<>();
setupSource(source5, true, offsetState, historyState, true, 0, 1);
// restart the source
final CheckedThread runThread5 =
new CheckedThread() {
@Override
public void go() throws Exception {
source5.run(sourceContext5);
}
};
runThread5.start();
try (Connection connection = getJdbcConnection();
Statement statement = connection.createStatement()) {
statement.execute(
"INSERT INTO inventory.products(id, description, weight) VALUES (default, 'Go go go', 111.1)");
statement.execute(
"ALTER TABLE inventory.products ADD comment_col VARCHAR(100) DEFAULT 'cdc'");
List<SourceRecord> records = drain(sourceContext5, 1);
assertInsert(records.get(0), "id", 111);
}
// ---------------------------------------------------------------------------
// Step-10: trigger checkpoint-4
// ---------------------------------------------------------------------------
synchronized (sourceContext5.getCheckpointLock()) {
// trigger checkpoint-4
source5.snapshotState(new StateSnapshotContextSynchronousImpl(300, 300));
}
assertEquals(1, offsetState.list.size());
String state = new String(offsetState.list.get(0), StandardCharsets.UTF_8);
assertEquals("postgres_cdc_source", JsonPath.read(state, "$.sourcePartition.server"));
assertEquals("562", JsonPath.read(state, "$.sourceOffset.txId").toString());
assertTrue(state.contains("ts_usec"));
assertFalse(state.contains("snapshot"));
int pos = JsonPath.read(state, "$.sourceOffset.lsn");
assertTrue(pos > prevLsn);
source5.cancel();
source5.close();
runThread5.sync();
}
{
// ---------------------------------------------------------------------------
// Step-11: restore from the checkpoint-4 and insert the partial value
// ---------------------------------------------------------------------------
final DebeziumSourceFunction<SourceRecord> source6 =
createPostgreSqlSourceWithHeartbeatDisabled();
final TestSourceContext<SourceRecord> sourceContext6 = new TestSourceContext<>();
setupSource(source6, true, offsetState, historyState, true, 0, 1);
// restart the source
final CheckedThread runThread6 =
new CheckedThread() {
@Override
public void go() throws Exception {
source6.run(sourceContext6);
}
};
runThread6.start();
try (Connection connection = getJdbcConnection();
Statement statement = connection.createStatement()) {
statement.execute(
"INSERT INTO inventory.products(id, description, weight) VALUES (default, 'Run!', 22.2)");
List<SourceRecord> records = drain(sourceContext6, 1);
assertInsert(records.get(0), "id", 112);
}
source6.cancel();
source6.close();
runThread6.sync();
}
}
@Test

@ -128,34 +128,27 @@ public class PostgreSQLConnectorITCase extends PostgresTestBase {
waitForSinkSize("sink", 20);
// The final database table looks like this:
//
// > SELECT * FROM inventory.products;
// +-----+--------------------+---------------------------------------------------------+--------+
// | id | name | description |
// weight |
// +-----+--------------------+---------------------------------------------------------+--------+
// | 101 | scooter | Small 2-wheel scooter |
// 3.14 |
// | 102 | car battery | 12V car battery |
// 8.1 |
// | 103 | 12-pack drill bits | 12-pack of drill bits with sizes ranging from #40 to #3 |
// 0.8 |
// | 104 | hammer | 12oz carpenter's hammer |
// 0.75 |
// | 105 | hammer | 14oz carpenter's hammer |
// 0.875 |
// | 106 | hammer | 18oz carpenter hammer |
// 1 |
// | 107 | rocks | box of assorted rocks |
// 5.1 |
// | 108 | jacket | water resistent black wind breaker |
// 0.1 |
// | 109 | spare tire | 24 inch spare tire |
// 22.2 |
// | 110 | jacket | new water resistent white wind breaker |
// 0.5 |
// +-----+--------------------+---------------------------------------------------------+--------+
/*
* <pre>
* The final database table looks like this:
*
* > SELECT * FROM products;
* +-----+--------------------+---------------------------------------------------------+--------+
* | id | name | description | weight |
* +-----+--------------------+---------------------------------------------------------+--------+
* | 101 | scooter | Small 2-wheel scooter | 3.14 |
* | 102 | car battery | 12V car battery | 8.1 |
* | 103 | 12-pack drill bits | 12-pack of drill bits with sizes ranging from #40 to #3 | 0.8 |
* | 104 | hammer | 12oz carpenter's hammer | 0.75 |
* | 105 | hammer | 14oz carpenter's hammer | 0.875 |
* | 106 | hammer | 18oz carpenter hammer | 1 |
* | 107 | rocks | box of assorted rocks | 5.1 |
* | 108 | jacket | water resistent black wind breaker | 0.1 |
* | 109 | spare tire | 24 inch spare tire | 22.2 |
* | 110 | jacket | new water resistent white wind breaker | 0.5 |
* +-----+--------------------+---------------------------------------------------------+--------+
* </pre>
*/
String[] expected =
new String[] {

@ -21,7 +21,7 @@ SET search_path TO inventory;
-- Create and populate our products using a single insert with many rows
CREATE TABLE products (
id SERIAL NOT NULL PRIMARY KEY,
name VARCHAR(255) NOT NULL,
name VARCHAR(255) NOT NULL DEFAULT 'flink',
description VARCHAR(512),
weight FLOAT
);

Loading…
Cancel
Save