112 lines
4.3 KiB
Plaintext
112 lines
4.3 KiB
Plaintext
--source include/have_ndb.inc
|
|
|
|
# Test is using error insert, check that binaries support it
|
|
--source suite/ndb/t/have_ndb_error_insert.inc
|
|
|
|
# Use small LoadFactors to force sparse hash table
|
|
--exec $NDB_MGM --no-defaults --ndb-connectstring="$NDB_CONNECTSTRING" -e "all error 3003" >> $NDB_TOOLS_OUTPUT
|
|
|
|
set max_heap_table_size = 286720000;
|
|
create table t1 (a int primary key) engine=memory;
|
|
load data local infile 'suite/ndb/data/table_data10000.dat' into table t1 columns terminated by ' ' (a, @col2);
|
|
let $i = 4;
|
|
let $b = 10000;
|
|
while ($i)
|
|
{
|
|
--eval insert into t1 select a + $b from t1;
|
|
let $b = $b * 2;
|
|
dec $i;
|
|
}
|
|
select count(*) from t1;
|
|
alter table t1 engine=ndbcluster comment='NDB_TABLE=NOLOGGING' partition by key() partitions 1;
|
|
--exec $NDB_MGM --no-defaults --ndb-connectstring="$NDB_CONNECTSTRING" -e "all report memory" >> $NDB_TOOLS_OUTPUT
|
|
create table t2 (a int primary key) engine=memory;
|
|
insert into t2 select a from t1;
|
|
|
|
--echo the left join below should result in scanning t2 and do pk lookups in t1
|
|
--replace_column 10 # 11 #
|
|
explain select if(isnull(t1.a),t2.a,NULL) missed, count(*) row_count from t2 left join t1 on t1.a=t2.a group by if(isnull(t1.a),t2.a,NULL);
|
|
--echo the result rows with missed equal to NULL should count all rows (160000)
|
|
--echo the other rows are the failed lookups and there should not be any such
|
|
select if(isnull(t1.a),t2.a,NULL) missed, count(*) row_count from t2 left join t1 on t1.a=t2.a group by if(isnull(t1.a),t2.a,NULL);
|
|
|
|
--echo verify that it is not possible to reinsert all rows in t1 to itself
|
|
--echo affected rows should be zero
|
|
|
|
# All the below INSERT IGNORE statements generate 10000 duplicate key
|
|
# warnings each. By default only the first 64 warnings are saved on the
|
|
# warning stack (and thus printed to the .result file). Since the order of
|
|
# the inserts are slightly random the first 64 warnings are also random.
|
|
# Fix by setting the number of saved warnigns to zero, since the test are
|
|
# only interested in checking that no records are allowed to be inserted
|
|
# by looking at "affected_rows"
|
|
set @save_max_error_count = @@max_error_count;
|
|
set @@max_error_count = 0;
|
|
|
|
--enable_info
|
|
insert ignore into t1 select a from t2 limit 0, 10000;
|
|
insert ignore into t1 select a from t2 limit 10000, 10000;
|
|
insert ignore into t1 select a from t2 limit 20000, 10000;
|
|
insert ignore into t1 select a from t2 limit 30000, 10000;
|
|
insert ignore into t1 select a from t2 limit 40000, 10000;
|
|
insert ignore into t1 select a from t2 limit 50000, 10000;
|
|
insert ignore into t1 select a from t2 limit 60000, 10000;
|
|
insert ignore into t1 select a from t2 limit 70000, 10000;
|
|
insert ignore into t1 select a from t2 limit 80000, 10000;
|
|
insert ignore into t1 select a from t2 limit 90000, 10000;
|
|
insert ignore into t1 select a from t2 limit 100000, 10000;
|
|
insert ignore into t1 select a from t2 limit 110000, 10000;
|
|
insert ignore into t1 select a from t2 limit 120000, 10000;
|
|
insert ignore into t1 select a from t2 limit 130000, 10000;
|
|
insert ignore into t1 select a from t2 limit 140000, 10000;
|
|
insert ignore into t1 select a from t2 limit 150000, 10000;
|
|
--disable_info
|
|
|
|
set @@max_error_count = @save_max_error_count;
|
|
|
|
# Reset error insertion
|
|
--exec $NDB_MGM --verbose=0 -e "all error 0"
|
|
|
|
# Verify no data memory leakage on drop table (see bug#86247).
|
|
|
|
# Avoid problem with too large transactions.
|
|
set ndb_use_transactions=0;
|
|
|
|
drop table t1;
|
|
|
|
create table t1 (a int primary key)
|
|
engine=ndbcluster
|
|
comment='NDB_TABLE=NOLOGGING'
|
|
partition by key() partitions 1;
|
|
insert into t1 select * from t2;
|
|
drop table t1;
|
|
|
|
# Keep value to check against after several create and drop table.
|
|
select max(used_pages) into @data_memory_pages
|
|
from ndbinfo.memoryusage where memory_type = 'Data memory';
|
|
|
|
let $lap = 3;
|
|
while ($lap)
|
|
{
|
|
create table t1 (a int primary key)
|
|
engine=ndbcluster
|
|
comment='NDB_TABLE=NOLOGGING'
|
|
partition by key() partitions 1;
|
|
insert into t1 select * from t2;
|
|
drop table t1;
|
|
dec $lap;
|
|
}
|
|
|
|
# Verify page count, allow 15% variation.
|
|
if (`select max(used_pages) > 1.15 * @data_memory_pages
|
|
from ndbinfo.memoryusage where memory_type = 'Data memory'`)
|
|
{
|
|
select @data_memory_pages 'Initial data memory pages in use';
|
|
select max(used_pages) 'Final data memory pages in use'
|
|
from ndbinfo.memoryusage where memory_type = 'Data memory';
|
|
die "Data memory leakage?";
|
|
}
|
|
|
|
drop table t2;
|
|
--remove_file $NDB_TOOLS_OUTPUT
|