238 lines
6.5 KiB
Plaintext
238 lines
6.5 KiB
Plaintext
--source connect.inc
|
|
--source include/have_debug.inc
|
|
--source suite/ndb/include/backup_restore_setup.inc
|
|
|
|
#
|
|
# Schema detection + synchronization test
|
|
# This test contains all the cases that require debug options to be
|
|
# set in order to cause mismatches between NDB Dictionary and DD.
|
|
#
|
|
|
|
#
|
|
# Case 1:
|
|
# - Logfile group exists in DD but not NDB
|
|
# - Tablespace exists in DD but not NDB
|
|
#
|
|
|
|
SET GLOBAL debug = '+d,ndb_dd_client_lfg_force_commit,ndb_schema_trans_commit_fail,ndb_skip_create_tablespace_in_NDB';
|
|
|
|
SET @old_ndb_metadata_check = @@global.ndb_metadata_check;
|
|
SET @old_ndb_metadata_check_interval = @@global.ndb_metadata_check_interval;
|
|
|
|
--let $initial_detected_count = query_get_value(SHOW STATUS LIKE 'Ndb_metadata_detected_count', Value, 1)
|
|
--let $initial_synced_count = query_get_value(SHOW STATUS LIKE 'Ndb_metadata_synced_count', Value, 1)
|
|
|
|
CREATE LOGFILE GROUP lg1
|
|
ADD UNDOFILE 'lg1_undofile.dat'
|
|
INITIAL_SIZE 1M
|
|
UNDO_BUFFER_SIZE = 1M
|
|
ENGINE NDB;
|
|
|
|
CREATE TABLESPACE ts1
|
|
ADD DATAFILE 'ts1_datafile.dat'
|
|
USE LOGFILE GROUP lg1
|
|
INITIAL_SIZE 2M
|
|
ENGINE NDB;
|
|
|
|
SET GLOBAL debug = '-d,ndb_dd_client_lfg_force_commit,ndb_schema_trans_commit_fail,ndb_skip_create_tablespace_in_NDB';
|
|
|
|
# Enable metadata check with no interval so changes are detected quickly
|
|
SET GLOBAL ndb_metadata_check_interval = 0;
|
|
SET GLOBAL ndb_metadata_check = true;
|
|
|
|
# Wait until 2 mismatches (lg1 and ts1) are detected
|
|
--let $expected_changes = 2
|
|
--let $max_wait = 30
|
|
--source wait_metadata_changes_detected.inc
|
|
|
|
# Reset values
|
|
SET GLOBAL ndb_metadata_check = @old_ndb_metadata_check;
|
|
SET GLOBAL ndb_metadata_check_interval = @old_ndb_metadata_check_interval;
|
|
|
|
# Wait until the changes detected have been synced
|
|
--let $max_wait = 30
|
|
--source wait_metadata_synced.inc
|
|
|
|
# Check if the tablespace and logfile group have been removed from DD
|
|
CREATE LOGFILE GROUP lg1
|
|
ADD UNDOFILE 'lg1_undofile.dat'
|
|
INITIAL_SIZE 1M
|
|
UNDO_BUFFER_SIZE = 1M
|
|
ENGINE NDB;
|
|
|
|
CREATE TABLESPACE ts1
|
|
ADD DATAFILE 'ts1_datafile.dat'
|
|
USE LOGFILE GROUP lg1
|
|
INITIAL_SIZE 2M
|
|
ENGINE NDB;
|
|
ALTER TABLESPACE ts1
|
|
ADD DATAFILE 'ts1_datafile2.dat';
|
|
CREATE TABLE t1 (
|
|
a INT PRIMARY KEY,
|
|
b INT
|
|
) ENGINE NDB
|
|
TABLESPACE ts1
|
|
STORAGE DISK;
|
|
INSERT INTO t1 VALUES(1,1);
|
|
SELECT * FROM t1;
|
|
|
|
CREATE TABLESPACE ts2
|
|
ADD DATAFILE 'ts2_datafile.dat'
|
|
USE LOGFILE GROUP lg1
|
|
INITIAL_SIZE 2M
|
|
ENGINE NDB;
|
|
|
|
# Cleanup
|
|
DROP TABLE t1;
|
|
ALTER TABLESPACE ts1
|
|
DROP DATAFILE 'ts1_datafile.dat';
|
|
ALTER TABLESPACE ts1
|
|
DROP DATAFILE 'ts1_datafile2.dat';
|
|
DROP TABLESPACE ts1;
|
|
ALTER TABLESPACE ts2
|
|
DROP DATAFILE 'ts2_datafile.dat';
|
|
DROP TABLESPACE ts2;
|
|
DROP LOGFILE GROUP lg1
|
|
ENGINE NDB;
|
|
|
|
#
|
|
# Case 2:
|
|
# Check if the failure to synch objects is handled as expected. This involves
|
|
# the addition of the objects to a blacklist. While these objects are in the
|
|
# blacklist, they shall not be detected.
|
|
#
|
|
|
|
CREATE TABLE t1 (
|
|
a INT PRIMARY KEY,
|
|
b VARCHAR(255)
|
|
) ENGINE NDB;
|
|
|
|
INSERT INTO t1 VALUES
|
|
(1, 'Rino'),
|
|
(2, 'Rino'),
|
|
(3, 'Rino Anto');
|
|
|
|
CREATE TABLE t2 (
|
|
a INT PRIMARY KEY,
|
|
b VARCHAR(255)
|
|
) ENGINE NDB;
|
|
|
|
INSERT INTO t2 VALUES
|
|
(4, 'Rino'),
|
|
(5, 'Rino'),
|
|
(6, 'Rino Anto');
|
|
|
|
CREATE TABLE t3 (
|
|
a INT PRIMARY KEY,
|
|
b VARCHAR(255)
|
|
) ENGINE NDB;
|
|
|
|
INSERT INTO t3 VALUES
|
|
(7, 'Rino'),
|
|
(8, 'Rino'),
|
|
(9, 'Rino Anto');
|
|
|
|
CREATE TABLE t4 (
|
|
a INT PRIMARY KEY,
|
|
b VARCHAR(255)
|
|
) ENGINE NDB;
|
|
|
|
INSERT INTO t4 VALUES(1, 'Hey Rino Anto');
|
|
|
|
# backup
|
|
--disable_query_log
|
|
--source include/ndb_backup.inc
|
|
--enable_query_log
|
|
|
|
# Drop tables
|
|
DROP TABLE t1,t2,t3,t4;
|
|
|
|
# Restore into NDB
|
|
--exec $NDB_RESTORE -b $the_backup_id -n 1 -m -r $NDB_BACKUPS-$the_backup_id >> $NDB_TOOLS_OUTPUT
|
|
--exec $NDB_RESTORE -b $the_backup_id -n 2 -r $NDB_BACKUPS-$the_backup_id >> $NDB_TOOLS_OUTPUT
|
|
|
|
CREATE TABLE t5 (
|
|
a INT PRIMARY KEY,
|
|
b VARCHAR(255)
|
|
) ENGINE NDB;
|
|
|
|
# Drop table from NDB only to cause mismatch
|
|
--exec $NDB_DROP_TABLE --no-defaults -d test t5 >> $NDB_TOOLS_OUTPUT
|
|
|
|
# Store initial counts of both detected and synchronized objects
|
|
--let $initial_detected_count = query_get_value(SHOW STATUS LIKE 'Ndb_metadata_detected_count', Value, 1)
|
|
--let $initial_synced_count = query_get_value(SHOW STATUS LIKE 'Ndb_metadata_synced_count', Value, 1)
|
|
|
|
# Force synch to fail
|
|
SET GLOBAL debug = '+d,ndb_metadata_sync_fail';
|
|
# Suppress error
|
|
CALL mtr.add_suppression("NDB Binlog: Failed to synchronize table");
|
|
|
|
# Enable metadata check with no interval so changes are detected quickly
|
|
SET GLOBAL ndb_metadata_check_interval = 0;
|
|
SET GLOBAL ndb_metadata_check = true;
|
|
|
|
# Wait until the following 5 object changes are detected:
|
|
# Table 'ndb_ddl_test.t1'
|
|
# Table 'ndb_ddl_test.t2'
|
|
# Table 'ndb_ddl_test.t3'
|
|
# Table 'ndb_ddl_test.t4'
|
|
# Table 'ndb_ddl_test.t5'
|
|
--let $expected_changes = 5
|
|
--let $max_wait = 60
|
|
--source wait_metadata_changes_detected.inc
|
|
|
|
# Reset values
|
|
SET GLOBAL ndb_metadata_check = @old_ndb_metadata_check;
|
|
SET GLOBAL ndb_metadata_check_interval = @old_ndb_metadata_check_interval;
|
|
|
|
# Wait until the changes detected have been synced
|
|
--let $max_wait = 60
|
|
--source wait_metadata_synced.inc
|
|
|
|
# Verify that the objects have been blacklisted
|
|
SHOW STATUS LIKE 'Ndb_metadata_blacklist_size';
|
|
|
|
# Sleep for some time to let a couple of cycles of detection
|
|
# occur. Since the objects have been blacklisted, the mismatches
|
|
# should not be detected and the number of changes detected
|
|
# should remain unchanged
|
|
--let $initial_detected_count = query_get_value(SHOW STATUS LIKE 'Ndb_metadata_detected_count', Value, 1)
|
|
# Enable metadata check with no interval so changes are detected quickly
|
|
SET GLOBAL ndb_metadata_check_interval = 0;
|
|
SET GLOBAL ndb_metadata_check = true;
|
|
--sleep 5
|
|
--let $current_changes_detected = query_get_value(SHOW STATUS LIKE 'Ndb_metadata_detected_count', Value, 1)
|
|
if ($current_changes_detected != $initial_detected_count)
|
|
{
|
|
--echo $current_changes_detected changes detected when $initial_detected_count changes were expected
|
|
--die Changes detected even when objects were blacklisted
|
|
}
|
|
|
|
# Manually synch the tables through "discovery".
|
|
# This will result in the tables being removed from the
|
|
# blacklist during the next detection cycle
|
|
--sorted_result
|
|
SELECT * FROM t1;
|
|
--sorted_result
|
|
SELECT * FROM t2;
|
|
--sorted_result
|
|
SELECT * FROM t3;
|
|
--sorted_result
|
|
SELECT * FROM t4;
|
|
DROP TABLE IF EXISTS t5;
|
|
|
|
# Wait until the blacklist is empty
|
|
--let $max_wait = 30
|
|
--source wait_metadata_sync_blacklist_empty.inc
|
|
|
|
# Reset values
|
|
SET GLOBAL ndb_metadata_check = @old_ndb_metadata_check;
|
|
SET GLOBAL ndb_metadata_check_interval = @old_ndb_metadata_check_interval;
|
|
SET GLOBAL debug = '-d,ndb_metadata_sync_fail';
|
|
|
|
# Cleanup
|
|
DROP TABLE t1,t2,t3,t4;
|
|
--source suite/ndb/include/backup_restore_cleanup.inc
|
|
--remove_file $NDB_TOOLS_OUTPUT
|