polardbxengine/mysql-test/suite/clone/t/local_file_extend.test

189 lines
5.9 KiB
Plaintext

# Test clone when tablespace file size is increasing in different stages
# This would follow a rollback during recovery
--source include/have_debug_sync.inc
--source include/count_sessions.inc
# Compressed table do not support more than 16K page size
--source include/have_innodb_max_16k.inc
## Install plugin
--let $CLONE_DATADIR = $MYSQL_TMP_DIR/data_new
--replace_result $CLONE_PLUGIN CLONE_PLUGIN
--eval INSTALL PLUGIN clone SONAME '$CLONE_PLUGIN'
## Create test schema
--source ../include/create_schema.inc
# Modify the schema and add compressed table
DROP TABLE t1;
DROP TABLE t2;
# Create table with compressed row format using default KEY_BLOCK_SIZE.
CREATE TABLE t1(col1 INT PRIMARY KEY, col2 int, col3 varchar(64), col4 BLOB)
ROW_FORMAT = COMPRESSED;
# Create table in general tablespace
CREATE TABLESPACE tbs1 ADD DATAFILE 'tbs1_data1.ibd';
CREATE TABLE t2(col1 INT PRIMARY KEY, col2 int, col3 varchar(64), col4 BLOB)
TABLESPACE = tbs1;
## Execute Clone while concurrent DMLs are in progress
# Insert 20 rows
call execute_dml(0, 0, 20, 20, 10, 0);
# Check base rows
SHOW CREATE TABLE t1;
SELECT count(*) from t1;
SELECT col1, col2, col3, SUBSTRING(col4, 1000, 32) FROM t1 ORDER BY col1 LIMIT 10;
SELECT col1, col2, col3, SUBSTRING(col4, 1000, 32) FROM t1 ORDER BY col1 DESC LIMIT 10;
SHOW CREATE TABLE t2;
SELECT count(*) from t2;
SELECT col1, col2, col3, SUBSTRING(col4, 1000, 32) FROM t2 ORDER BY col1 LIMIT 10;
SELECT col1, col2, col3, SUBSTRING(col4, 1000, 32) FROM t2 ORDER BY col1 DESC LIMIT 10;
## Test-1: Extend tablespace file during file copy
--echo # In connection default - Cloning database
SET DEBUG_SYNC = 'clone_file_copy SIGNAL start_insert1 WAIT_FOR resume_clone1';
--source ../include/clone_command_send.inc
--echo # In connection con1 - Insert [20 Rows - No commit]
connect (con1,localhost,root,,);
SET DEBUG_SYNC = 'now WAIT_FOR start_insert1';
START TRANSACTION;
call execute_dml(0, 50, 20, 20, 500, 0);
--echo # Flush all dirty buffers
SET GLOBAL innodb_buf_flush_list_now = 1;
SET DEBUG_SYNC = 'now SIGNAL resume_clone1';
connection default;
--echo # In connection default - Cloning database
--reap
--echo # In connection con1
connection con1;
ROLLBACK;
connection default;
--echo # In connection default - Cloning database
disconnect con1;
--echo # Restart cloned database
--replace_result $CLONE_DATADIR CLONE_DATADIR
--let restart_parameters="restart: --datadir=$CLONE_DATADIR"
--source include/restart_mysqld.inc
# Check table in cloned database
SHOW CREATE TABLE t1;
SELECT count(*) from t1;
SELECT col1, col3, SUBSTRING(col4, 1000, 32) FROM t1 ORDER BY col1 LIMIT 10;
SELECT col1, col3, SUBSTRING(col4, 1000, 32) FROM t1 ORDER BY col1 DESC LIMIT 10;
SHOW CREATE TABLE t2;
SELECT count(*) from t2;
SELECT col1, col3, SUBSTRING(col4, 1000, 32) FROM t2 ORDER BY col1 LIMIT 10;
SELECT col1, col3, SUBSTRING(col4, 1000, 32) FROM t2 ORDER BY col1 DESC LIMIT 10;
# Execute procedure to delete all rows and insert
call execute_dml(3, 0, 1, 1, 1, 0);
call execute_dml(0, 0, 10, 10, 2, 0);
commit;
SELECT col1, col2, col3, SUBSTRING(col4, 1000, 32) FROM t1 ORDER BY col1 LIMIT 10;
SELECT col1, col2, col3, SUBSTRING(col4, 1000, 32) FROM t1 ORDER BY col1 DESC LIMIT 10;
SELECT col1, col2, col3, SUBSTRING(col4, 1000, 32) FROM t2 ORDER BY col1 LIMIT 10;
SELECT col1, col2, col3, SUBSTRING(col4, 1000, 32) FROM t2 ORDER BY col1 DESC LIMIT 10;
#Cleanup
--let restart_parameters="restart:"
--source include/restart_mysqld.inc
--force-rmdir $CLONE_DATADIR
## Test-2: Extend tablespace file during page copy
--echo # In connection default - Cloning database
SET DEBUG_SYNC = 'clone_file_copy SIGNAL start_dml WAIT_FOR resume_clone2';
SET DEBUG_SYNC = 'clone_page_copy SIGNAL start_insert2 WAIT_FOR resume_clone3';
--source ../include/clone_command_send.inc
--echo # In connection con1 - Insert [20 Rows - No commit]
connect (con1,localhost,root,,);
SET DEBUG_SYNC = 'now WAIT_FOR start_dml';
START TRANSACTION;
call execute_dml(1, 0, 20, 20, 10, 1);
COMMIT;
--echo # Flush all dirty buffers
SET GLOBAL innodb_buf_flush_list_now = 1;
SET DEBUG_SYNC = 'now SIGNAL resume_clone2';
connection con1;
SET DEBUG_SYNC = 'now WAIT_FOR start_insert2';
START TRANSACTION;
call execute_dml(0, 50, 20, 20, 500, 0);
--echo # Flush all dirty buffers
SET GLOBAL innodb_buf_flush_list_now = 1;
SET DEBUG_SYNC = 'now SIGNAL resume_clone3';
connection default;
--echo # In connection default - Cloning database
--replace_result $CLONE_DATADIR CLONE_DATADIR
--reap
--echo # In connection con1
connection con1;
ROLLBACK;
connection default;
--echo # In connection default - Cloning database
disconnect con1;
--echo # Restart cloned database
--replace_result $CLONE_DATADIR CLONE_DATADIR
--let restart_parameters="restart: --datadir=$CLONE_DATADIR"
--source include/restart_mysqld.inc
# Check table in cloned database
SHOW CREATE TABLE t1;
SELECT count(*) from t1;
SELECT col1, col3, SUBSTRING(col4, 1000, 32) FROM t1 ORDER BY col1 LIMIT 10;
SELECT col1, col3, SUBSTRING(col4, 1000, 32) FROM t1 ORDER BY col1 DESC LIMIT 10;
SHOW CREATE TABLE t2;
SELECT count(*) from t2;
SELECT col1, col3, SUBSTRING(col4, 1000, 32) FROM t2 ORDER BY col1 LIMIT 10;
SELECT col1, col3, SUBSTRING(col4, 1000, 32) FROM t2 ORDER BY col1 DESC LIMIT 10;
# Execute procedure to delete all rows and insert
call execute_dml(3, 0, 1, 1, 1, 0);
call execute_dml(0, 0, 10, 10, 2, 0);
commit;
SELECT col1, col2, col3, SUBSTRING(col4, 1000, 32) FROM t1 ORDER BY col1 LIMIT 10;
SELECT col1, col2, col3, SUBSTRING(col4, 1000, 32) FROM t1 ORDER BY col1 DESC LIMIT 10;
SELECT col1, col2, col3, SUBSTRING(col4, 1000, 32) FROM t2 ORDER BY col1 LIMIT 10;
SELECT col1, col2, col3, SUBSTRING(col4, 1000, 32) FROM t2 ORDER BY col1 DESC LIMIT 10;
#Cleanup
--let restart_parameters="restart:"
--source include/restart_mysqld.inc
--source ../include/drop_schema.inc
DROP TABLESPACE tbs1;
UNINSTALL PLUGIN clone;
SET DEBUG_SYNC = 'RESET';
--source include/wait_until_count_sessions.inc
--force-rmdir $CLONE_DATADIR