From f41e66b221358f53ff8fa851b216babfbd28e7af Mon Sep 17 00:00:00 2001 From: Arcadiy Ivanov Date: Thu, 5 Mar 2026 00:00:29 -0500 Subject: [PATCH 1/3] MDEV-38975: HEAP engine BLOB/TEXT/JSON/GEOMETRY column support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Allow BLOB/TEXT/JSON/GEOMETRY columns in MEMORY (HEAP) engine tables by storing blob data in variable-length continuation record chains within the existing `HP_BLOCK` structure. **Continuation runs**: blob data is split across contiguous sequences of `recbuffer`-sized records. Each run stores a 10-byte header (`next_cont` pointer + `run_rec_count`) in the first record; inner records (rec 1..N-1) have no flags byte — full `recbuffer` payload. Runs are linked via `next_cont` pointers. Individual runs are capped at 65,535 records (`uint16` format limit); larger blobs are automatically split into multiple runs. **Zero-copy reads**: single-run blobs return pointers directly into `HP_BLOCK` records, avoiding `blob_buff` reassembly entirely: - Case A (`run_rec_count == 1`): return `chain + HP_CONT_HEADER_SIZE` - Case B (`HP_ROW_CONT_ZEROCOPY` flag): return `chain + recbuffer` - Case C (multi-run): walk chain, reassemble into `blob_buff` `HP_INFO::has_zerocopy_blobs` tracks zero-copy state; used by `heap_update()` to refresh the caller's record buffer after freeing old chains, preventing dangling pointers. **Free list scavenging**: on insert, the free list is walked read-only (peek) tracking contiguous groups in descending address order (LIFO). Qualifying groups (>= `min_run_records`) are unlinked and used. The first non-qualifying group terminates the scan — remaining data is allocated from the block tail. The free list is never disturbed when no qualifying group is found. **Record counting**: new `HP_SHARE::total_records` tracks all physical records (primary + continuation). `HP_SHARE::records` remains logical (primary-only) to preserve linear hash bucket mapping correctness. **Scan/check batch-skip**: `heap_scan()` and `heap_check_heap()` read `run_rec_count` from rec 0 and skip entire continuation runs at once. **Hash functions**: `hp_rec_hashnr()`, `hp_rec_key_cmp()`, `hp_key_cmp()`, `hp_make_key()` updated to handle `HA_BLOB_PART` key segments — reading actual blob data via pointer dereference or chain materialization. **SQL layer**: `choose_engine()` no longer rejects HEAP for blob tables (replaced `blob_fields` check with `reclength > HA_MAX_REC_LENGTH`). `remove_duplicates()` routes HEAP+blob to `remove_dup_with_compare()`. `ha_heap::remember_rnd_pos()` / `restart_rnd_next()` implemented for DISTINCT deduplication support. Fixed undefined behavior in `test_if_cheaper_ordering()` where `select_limit/fanout` could overflow to infinity — capped at `HA_POS_ERROR`. https://jira.mariadb.org/browse/MDEV-38975 --- include/heap.h | 18 +- mysql-test/include/mtr_check.sql | 2 +- mysql-test/main/blob_sj_test.result | 29 + mysql-test/main/blob_sj_test.test | 26 + mysql-test/main/create.result | 9 +- mysql-test/main/create.test | 3 +- mysql-test/main/cte_recursive.test | 2 + mysql-test/main/derived_view.result | 2 +- mysql-test/main/distinct.result | 2 +- mysql-test/main/distinct.test | 3 + mysql-test/main/group_by.result | 4 +- mysql-test/main/group_by.test | 4 +- mysql-test/main/group_min_max_innodb.result | 8 +- mysql-test/main/group_min_max_innodb.test | 1 + mysql-test/main/information_schema.result | 2 +- .../main/information_schema_parameters.result | 2 +- .../main/information_schema_part.result | 2 +- mysql-test/main/information_schema_part.test | 2 +- .../main/information_schema_routines.result | 2 +- mysql-test/main/intersect_all.result | 6 +- mysql-test/main/intersect_all.test | 3 + mysql-test/main/select.result | 22 +- mysql-test/main/select.test | 3 +- mysql-test/main/select_jcl6.result | 22 +- mysql-test/main/select_pkeycache.result | 22 +- mysql-test/main/temp_table_symlink.result | 2 - mysql-test/main/temp_table_symlink.test | 5 - mysql-test/suite/funcs_1/r/is_columns.result | 2 +- mysql-test/suite/funcs_1/r/is_events.result | 2 +- mysql-test/suite/funcs_1/r/is_routines.result | 2 +- .../funcs_1/r/is_routines_embedded.result | 8 +- .../suite/funcs_1/r/is_tables_is.result | 104 +- .../funcs_1/r/is_tables_is_embedded.result | 104 +- mysql-test/suite/funcs_1/r/is_triggers.result | 2 +- .../funcs_1/r/is_triggers_embedded.result | 2 +- mysql-test/suite/funcs_1/r/is_views.result | 2 +- .../suite/funcs_1/r/is_views_embedded.result | 2 +- .../funcs_1/r/processlist_priv_no_prot.result | 4 +- .../funcs_1/r/processlist_priv_ps.result | 4 +- .../funcs_1/r/processlist_val_no_prot.result | 2 +- .../suite/funcs_1/r/processlist_val_ps.result | 2 +- mysql-test/suite/heap/blob_dedup.result | 15 + mysql-test/suite/heap/blob_dedup.test | 10 + mysql-test/suite/heap/heap_blob.result | 602 ++++++++++++ mysql-test/suite/heap/heap_blob.test | 439 +++++++++ mysql-test/suite/heap/heap_geometry.result | 75 ++ mysql-test/suite/heap/heap_geometry.test | 65 ++ .../suite/innodb_fts/r/innodb-fts-ddl.result | 2 +- mysql-test/suite/innodb_fts/r/misc.result | 10 +- .../suite/innodb_fts/t/innodb-fts-ddl.test | 2 +- mysql-test/suite/innodb_fts/t/misc.test | 10 +- .../transaction_nested_events_verifier.inc | 2 +- .../r/transaction_nested_events.result | 16 +- .../plugins/r/sql_error_log_withdbinfo.result | 6 +- .../r/tmp_disk_table_size_basic.result | 104 +- .../sys_vars/t/tmp_disk_table_size_basic.test | 2 +- .../r/v_schema_redundant_indexes.result | 2 +- sql/item_func.cc | 45 +- sql/item_sum.cc | 27 +- sql/sql_expression_cache.cc | 17 + sql/sql_select.cc | 72 +- storage/heap/CMakeLists.txt | 2 +- storage/heap/_check.c | 19 +- storage/heap/ha_heap.cc | 87 +- storage/heap/ha_heap.h | 8 +- storage/heap/heapdef.h | 101 +- storage/heap/hp_blob.c | 885 ++++++++++++++++++ storage/heap/hp_clear.c | 3 +- storage/heap/hp_close.c | 1 + storage/heap/hp_create.c | 94 +- storage/heap/hp_delete.c | 12 +- storage/heap/hp_extra.c | 6 + storage/heap/hp_hash.c | 181 +++- storage/heap/hp_rfirst.c | 2 + storage/heap/hp_rkey.c | 2 + storage/heap/hp_rlast.c | 2 + storage/heap/hp_rnext.c | 2 + storage/heap/hp_rprev.c | 2 + storage/heap/hp_rrnd.c | 2 + storage/heap/hp_rsame.c | 2 + storage/heap/hp_scan.c | 51 +- storage/heap/hp_static.c | 4 +- storage/heap/hp_update.c | 98 +- storage/heap/hp_write.c | 76 +- 84 files changed, 3317 insertions(+), 298 deletions(-) create mode 100644 mysql-test/main/blob_sj_test.result create mode 100644 mysql-test/main/blob_sj_test.test create mode 100644 mysql-test/suite/heap/blob_dedup.result create mode 100644 mysql-test/suite/heap/blob_dedup.test create mode 100644 mysql-test/suite/heap/heap_blob.result create mode 100644 mysql-test/suite/heap/heap_blob.test create mode 100644 mysql-test/suite/heap/heap_geometry.result create mode 100644 mysql-test/suite/heap/heap_geometry.test create mode 100644 storage/heap/hp_blob.c diff --git a/include/heap.h b/include/heap.h index 3fac752abd028..633a33e53fd0f 100644 --- a/include/heap.h +++ b/include/heap.h @@ -131,6 +131,12 @@ typedef struct st_hp_keydef /* Key definition with open */ uint (*get_key_length)(struct st_hp_keydef *keydef, const uchar *key); } HP_KEYDEF; +typedef struct st_hp_blob_desc +{ + uint offset; /* Byte offset of blob descriptor within record buffer */ + uint packlength; /* 1, 2, 3, or 4: length prefix size */ +} HP_BLOB_DESC; + typedef struct st_heap_share { HP_BLOCK block; @@ -138,14 +144,14 @@ typedef struct st_heap_share ulonglong data_length,index_length,max_table_size; ulonglong auto_increment; ulong min_records,max_records; /* Params to open */ - ulong records; /* records */ + ulong records; /* Logical (primary) record count */ ulong blength; /* records rounded up to 2^n */ ulong deleted; /* Deleted records in database */ uint key_stat_version; /* version to indicate insert/delete */ uint key_version; /* Updated on key change */ uint file_version; /* Update on clear */ uint reclength; /* Length of one record */ - uint visible; /* Offset to the visible/deleted mark */ + uint visible; /* Offset to the flags byte (active/deleted/continuation) */ uint changed; uint keys,max_key_length; uint currently_disabled_keys; /* saved value from "keys" when disabled */ @@ -156,6 +162,9 @@ typedef struct st_heap_share THR_LOCK lock; my_bool delete_on_close; my_bool internal; /* Internal temporary table */ + HP_BLOB_DESC *blob_descs; /* Array of blob column descriptors */ + uint blob_count; /* Number of blob columns */ + ulong total_records; /* All active records (primary + blob continuation) */ LIST open_list; uint auto_key; uint auto_key_type; /* real type of the auto key segment */ @@ -181,6 +190,9 @@ typedef struct st_heap_info uint file_version; /* Version at scan */ uint lastkey_len; my_bool implicit_emptied; + uchar *blob_buff; /* Reassembly buffer for blob reads */ + uint32 blob_buff_len; /* Current allocated size of blob_buff */ + my_bool has_zerocopy_blobs; /* Last hp_read_blobs produced zero-copy ptrs */ THR_LOCK_DATA lock; LIST open_list; } HP_INFO; @@ -204,6 +216,8 @@ typedef struct st_heap_create_info open_count to 1. Is only looked at if not internal_table. */ my_bool pin_share; + HP_BLOB_DESC *blob_descs; + uint blob_count; } HP_CREATE_INFO; /* Prototypes for heap-functions */ diff --git a/mysql-test/include/mtr_check.sql b/mysql-test/include/mtr_check.sql index 360f7b40bb864..46b420da4ae34 100644 --- a/mysql-test/include/mtr_check.sql +++ b/mysql-test/include/mtr_check.sql @@ -66,7 +66,7 @@ BEGIN collation_name, column_type, column_key, extra, column_comment FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema='mysql' - ORDER BY columns_in_mysql; + ORDER BY columns_in_mysql, ordinal_position; -- Dump all events, there should be none SELECT * FROM INFORMATION_SCHEMA.EVENTS; diff --git a/mysql-test/main/blob_sj_test.result b/mysql-test/main/blob_sj_test.result new file mode 100644 index 0000000000000..78f78b1b9bd5f --- /dev/null +++ b/mysql-test/main/blob_sj_test.result @@ -0,0 +1,29 @@ +set optimizer_switch='materialization=on,in_to_exists=off,semijoin=off'; +set @blob_len = 16; +set @prefix_len = 6; +set @suffix_len = @blob_len - @prefix_len; +create table t1 (a1 blob(16), a2 blob(16)); +create table t2 (b1 blob(16), b2 blob(16)); +insert into t1 values +(concat('1 - 00', repeat('x', @suffix_len)), concat('2 - 00', repeat('x', @suffix_len))); +insert into t1 values +(concat('1 - 01', repeat('x', @suffix_len)), concat('2 - 01', repeat('x', @suffix_len))); +insert into t1 values +(concat('1 - 02', repeat('x', @suffix_len)), concat('2 - 02', repeat('x', @suffix_len))); +insert into t2 values +(concat('1 - 01', repeat('x', @suffix_len)), concat('2 - 01', repeat('x', @suffix_len))); +insert into t2 values +(concat('1 - 02', repeat('x', @suffix_len)), concat('2 - 02', repeat('x', @suffix_len))); +insert into t2 values +(concat('1 - 03', repeat('x', @suffix_len)), concat('2 - 03', repeat('x', @suffix_len))); +explain extended select left(a1,7), left(a2,7) from t1 where a1 in (select b1 from t2 where b1 > '0'); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where +2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 3 100.00 Using where +Warnings: +Note 1003 /* select#1 */ select left(`test`.`t1`.`a1`,7) AS `left(a1,7)`,left(`test`.`t1`.`a2`,7) AS `left(a2,7)` from `test`.`t1` where <`test`.`t1`.`a1`>((`test`.`t1`.`a1`,(/* select#2 */ select `test`.`t2`.`b1` from `test`.`t2` where `test`.`t2`.`b1` > '0' and (`test`.`t1`.`a1`) = `test`.`t2`.`b1`))) +select left(a1,7), left(a2,7) from t1 where a1 in (select b1 from t2 where b1 > '0'); +left(a1,7) left(a2,7) +1 - 01x 2 - 01x +1 - 02x 2 - 02x +drop table t1, t2; diff --git a/mysql-test/main/blob_sj_test.test b/mysql-test/main/blob_sj_test.test new file mode 100644 index 0000000000000..447d856adce4e --- /dev/null +++ b/mysql-test/main/blob_sj_test.test @@ -0,0 +1,26 @@ +set optimizer_switch='materialization=on,in_to_exists=off,semijoin=off'; +set @blob_len = 16; +set @prefix_len = 6; +set @suffix_len = @blob_len - @prefix_len; + +create table t1 (a1 blob(16), a2 blob(16)); +create table t2 (b1 blob(16), b2 blob(16)); + +insert into t1 values +(concat('1 - 00', repeat('x', @suffix_len)), concat('2 - 00', repeat('x', @suffix_len))); +insert into t1 values +(concat('1 - 01', repeat('x', @suffix_len)), concat('2 - 01', repeat('x', @suffix_len))); +insert into t1 values +(concat('1 - 02', repeat('x', @suffix_len)), concat('2 - 02', repeat('x', @suffix_len))); + +insert into t2 values +(concat('1 - 01', repeat('x', @suffix_len)), concat('2 - 01', repeat('x', @suffix_len))); +insert into t2 values +(concat('1 - 02', repeat('x', @suffix_len)), concat('2 - 02', repeat('x', @suffix_len))); +insert into t2 values +(concat('1 - 03', repeat('x', @suffix_len)), concat('2 - 03', repeat('x', @suffix_len))); + +explain extended select left(a1,7), left(a2,7) from t1 where a1 in (select b1 from t2 where b1 > '0'); +select left(a1,7), left(a2,7) from t1 where a1 in (select b1 from t2 where b1 > '0'); + +drop table t1, t2; diff --git a/mysql-test/main/create.result b/mysql-test/main/create.result index 4bae81878103b..ba5836e2999f0 100644 --- a/mysql-test/main/create.result +++ b/mysql-test/main/create.result @@ -30,10 +30,7 @@ Note 1051 Unknown table 'test.t1,test.t2' create table t1 (b char(0) not null, index(b)); ERROR 42000: The storage engine MyISAM can't index column `b` create table t1 (a int not null,b text) engine=heap; -ERROR 42000: Storage engine MEMORY doesn't support BLOB/TEXT columns -drop table if exists t1; -Warnings: -Note 1051 Unknown table 'test.t1' +drop table t1; create table t1 (ordid int(8) not null auto_increment, ord varchar(50) not null, primary key (ord,ordid)) engine=heap; ERROR 42000: Incorrect table definition; there can be only one auto column and it must be defined as a key create table not_existing_database.test (a int); @@ -1089,7 +1086,7 @@ t1 CREATE TABLE `t1` ( `QUERY_ID` bigint(4) NOT NULL, `INFO_BINARY` blob, `TID` bigint(4) NOT NULL -) DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci +) ENGINE=MEMORY DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci drop table t1; create temporary table t1 like information_schema.processlist; show create table t1; @@ -1113,7 +1110,7 @@ t1 CREATE TEMPORARY TABLE `t1` ( `QUERY_ID` bigint(4) NOT NULL, `INFO_BINARY` blob, `TID` bigint(4) NOT NULL -) DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci +) ENGINE=MEMORY DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci drop table t1; create table t1 like information_schema.character_sets; show create table t1; diff --git a/mysql-test/main/create.test b/mysql-test/main/create.test index dada6963fdb2e..80381a60e68c3 100644 --- a/mysql-test/main/create.test +++ b/mysql-test/main/create.test @@ -30,9 +30,8 @@ create table t2 select auto+1 from t1; drop table if exists t1,t2; --error ER_WRONG_KEY_COLUMN create table t1 (b char(0) not null, index(b)); ---error ER_TABLE_CANT_HANDLE_BLOB create table t1 (a int not null,b text) engine=heap; -drop table if exists t1; +drop table t1; --error ER_WRONG_AUTO_KEY create table t1 (ordid int(8) not null auto_increment, ord varchar(50) not null, primary key (ord,ordid)) engine=heap; diff --git a/mysql-test/main/cte_recursive.test b/mysql-test/main/cte_recursive.test index d6fb2a47884ed..3ce3c0e9d964a 100644 --- a/mysql-test/main/cte_recursive.test +++ b/mysql-test/main/cte_recursive.test @@ -3212,6 +3212,8 @@ show create table t2; --eval insert ignore into t2 $query; drop table t2; set @@sql_mode=""; +# Rows with identical (level, mid) due to overflow have non-deterministic order +--sorted_result --eval $query --eval create table t2 as $query; show create table t2; diff --git a/mysql-test/main/derived_view.result b/mysql-test/main/derived_view.result index 3f3f68154882c..c673d201329b8 100644 --- a/mysql-test/main/derived_view.result +++ b/mysql-test/main/derived_view.result @@ -2372,7 +2372,7 @@ GROUP BY TABLE_SCHEMA) AS UNIQUES ON ( COLUMNS.TABLE_SCHEMA = UNIQUES.TABLE_SCHEMA); id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY COLUMNS ALL NULL NULL NULL NULL NULL Open_frm_only; Scanned all databases -1 PRIMARY ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join) +1 PRIMARY ref key0 key0 194 information_schema.COLUMNS.TABLE_SCHEMA 2 2 DERIVED STATISTICS ALL NULL NULL NULL NULL NULL Open_frm_only; Scanned all databases; Using filesort SELECT COUNT(*) > 0 FROM INFORMATION_SCHEMA.COLUMNS diff --git a/mysql-test/main/distinct.result b/mysql-test/main/distinct.result index d8646abfb43cb..2f76fcfbc924a 100644 --- a/mysql-test/main/distinct.result +++ b/mysql-test/main/distinct.result @@ -1189,7 +1189,7 @@ insert into t1 values (1, 'Aa123456', 'abc'), (2, 'Bb7897777', 'def'), (3, 'Cc01287', 'xyz'), (5, 'd12345', 'efg'); select distinct if(sum(a), b, 0) from t1 group by value(c) with rollup; if(sum(a), b, 0) -Aa123456 +SOME_B_VALUE drop table t1; # # end of 10.5 tests diff --git a/mysql-test/main/distinct.test b/mysql-test/main/distinct.test index 48d5f4bb8fae6..db9bfb6b5abe2 100644 --- a/mysql-test/main/distinct.test +++ b/mysql-test/main/distinct.test @@ -915,6 +915,9 @@ create table t1 (a int, b longtext, c varchar(18)); insert into t1 values (1, 'Aa123456', 'abc'), (2, 'Bb7897777', 'def'), (3, 'Cc01287', 'xyz'), (5, 'd12345', 'efg'); +# ROLLUP row's b value is indeterminate (depends on last group processed), +# which varies by temp table engine (HEAP vs Aria). Mask the value. +--replace_regex /(Aa123456|Bb7897777|Cc01287|d12345)/SOME_B_VALUE/ select distinct if(sum(a), b, 0) from t1 group by value(c) with rollup; drop table t1; diff --git a/mysql-test/main/group_by.result b/mysql-test/main/group_by.result index 17f42fe36f36b..8dcbd16ccabcd 100644 --- a/mysql-test/main/group_by.result +++ b/mysql-test/main/group_by.result @@ -2510,10 +2510,10 @@ SELECT f3, MIN(f2) FROM t1 GROUP BY f1 LIMIT 1; f3 MIN(f2) blob NULL DROP TABLE t1; -the value below *must* be 1 +the value below *must* be 0 (HEAP supports blobs) show status like 'Created_tmp_disk_tables'; Variable_name Value -Created_tmp_disk_tables 1 +Created_tmp_disk_tables 0 # # Bug #1002146: Unneeded filesort if usage of join buffer is not allowed # (bug mdev-645) diff --git a/mysql-test/main/group_by.test b/mysql-test/main/group_by.test index 19f2e6582ae44..d3aa21d6f397e 100644 --- a/mysql-test/main/group_by.test +++ b/mysql-test/main/group_by.test @@ -1671,14 +1671,14 @@ DROP TABLE t1, t2; --disable_ps2_protocol --disable_view_protocol --disable_cursor_protocol -FLUSH STATUS; # this test case *must* use Aria temp tables +FLUSH STATUS; CREATE TABLE t1 (f1 INT, f2 decimal(20,1), f3 blob); INSERT INTO t1 values(11,NULL,'blob'),(11,NULL,'blob'); SELECT f3, MIN(f2) FROM t1 GROUP BY f1 LIMIT 1; DROP TABLE t1; ---echo the value below *must* be 1 +--echo the value below *must* be 0 (HEAP supports blobs) show status like 'Created_tmp_disk_tables'; --enable_cursor_protocol --enable_view_protocol diff --git a/mysql-test/main/group_min_max_innodb.result b/mysql-test/main/group_min_max_innodb.result index 27656374aee38..c65bbd5e7e602 100644 --- a/mysql-test/main/group_min_max_innodb.result +++ b/mysql-test/main/group_min_max_innodb.result @@ -303,10 +303,10 @@ CREATE TABLE t2 (`voter_id` int(10) unsigned NOT NULL DEFAULT '0', insert into t2 values (1,repeat("a",1000)),(2,repeat("a",1000)),(3,repeat("b",1000)),(4,repeat("c",1000)),(4,repeat("b",1000)); SELECT GROUP_CONCAT(t1.language_id SEPARATOR ',') AS `translation_resources`, `d`.`serialized_c` FROM t2 AS `d` LEFT JOIN t1 ON `d`.`voter_id` = t1.`voter_id` GROUP BY `d`.`voter_id` ORDER BY 10-d.voter_id+RAND()*0; translation_resources serialized_c -NULL cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc -NULL bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb -NULL aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa -NULL aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +NULL # +NULL # +NULL # +NULL # drop table t1,t2; # # MDEV-30143: Segfault on select query using index for group-by and filesort diff --git a/mysql-test/main/group_min_max_innodb.test b/mysql-test/main/group_min_max_innodb.test index 33a3a8888a5d8..99f8457e163fe 100644 --- a/mysql-test/main/group_min_max_innodb.test +++ b/mysql-test/main/group_min_max_innodb.test @@ -248,6 +248,7 @@ CREATE TABLE t1 (`voter_id` int(11) unsigned NOT NULL, CREATE TABLE t2 (`voter_id` int(10) unsigned NOT NULL DEFAULT '0', `serialized_c` mediumblob) ENGINE=InnoDB DEFAULT CHARSET=utf8; insert into t2 values (1,repeat("a",1000)),(2,repeat("a",1000)),(3,repeat("b",1000)),(4,repeat("c",1000)),(4,repeat("b",1000)); +--replace_column 2 # SELECT GROUP_CONCAT(t1.language_id SEPARATOR ',') AS `translation_resources`, `d`.`serialized_c` FROM t2 AS `d` LEFT JOIN t1 ON `d`.`voter_id` = t1.`voter_id` GROUP BY `d`.`voter_id` ORDER BY 10-d.voter_id+RAND()*0; drop table t1,t2; diff --git a/mysql-test/main/information_schema.result b/mysql-test/main/information_schema.result index 5d9f2d7322f12..f7d43fc58e5a6 100644 --- a/mysql-test/main/information_schema.result +++ b/mysql-test/main/information_schema.result @@ -709,7 +709,7 @@ select TABLE_NAME,TABLE_TYPE,ENGINE from information_schema.tables where table_schema='information_schema' limit 2; TABLE_NAME TABLE_TYPE ENGINE -ALL_PLUGINS SYSTEM VIEW Aria +ALL_PLUGINS SYSTEM VIEW MEMORY APPLICABLE_ROLES SYSTEM VIEW MEMORY show tables from information_schema like "T%"; Tables_in_information_schema (T%) diff --git a/mysql-test/main/information_schema_parameters.result b/mysql-test/main/information_schema_parameters.result index 1d00c992e5c8f..0abc0f4f38838 100644 --- a/mysql-test/main/information_schema_parameters.result +++ b/mysql-test/main/information_schema_parameters.result @@ -19,7 +19,7 @@ PARAMETERS CREATE TEMPORARY TABLE `PARAMETERS` ( `COLLATION_NAME` varchar(64), `DTD_IDENTIFIER` longtext NOT NULL, `ROUTINE_TYPE` varchar(9) NOT NULL -) DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci +) ENGINE=MEMORY DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci SELECT * FROM information_schema.columns WHERE table_schema = 'information_schema' AND table_name = 'parameters' diff --git a/mysql-test/main/information_schema_part.result b/mysql-test/main/information_schema_part.result index 1c5b9333550ef..005314612dc68 100644 --- a/mysql-test/main/information_schema_part.result +++ b/mysql-test/main/information_schema_part.result @@ -61,7 +61,7 @@ partition x2 values less than (5) ( subpartition x21 tablespace t1, subpartition x22 tablespace t2) ); -select * from information_schema.partitions where table_schema="test" order by table_name, partition_name; +select * from information_schema.partitions where table_schema="test" order by table_name, partition_name, subpartition_name; TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME def test t1 x1 x11 1 1 RANGE HASH `a` `a` + `b` 1 0 0 0 # 1024 0 # # NULL NULL default NULL def test t1 x1 x12 1 2 RANGE HASH `a` `a` + `b` 1 0 0 0 # 1024 0 # # NULL NULL default NULL diff --git a/mysql-test/main/information_schema_part.test b/mysql-test/main/information_schema_part.test index 3741de611505a..02af5be6d02f8 100644 --- a/mysql-test/main/information_schema_part.test +++ b/mysql-test/main/information_schema_part.test @@ -63,7 +63,7 @@ subpartition by key (a) subpartition x22 tablespace t2) ); --replace_column 16 # 19 # 20 # -select * from information_schema.partitions where table_schema="test" order by table_name, partition_name; +select * from information_schema.partitions where table_schema="test" order by table_name, partition_name, subpartition_name; drop table t1,t2; create table t1 ( diff --git a/mysql-test/main/information_schema_routines.result b/mysql-test/main/information_schema_routines.result index b5b43db71cec4..4d73258b4941d 100644 --- a/mysql-test/main/information_schema_routines.result +++ b/mysql-test/main/information_schema_routines.result @@ -36,7 +36,7 @@ ROUTINES CREATE TEMPORARY TABLE `ROUTINES` ( `CHARACTER_SET_CLIENT` varchar(32) NOT NULL, `COLLATION_CONNECTION` varchar(64) NOT NULL, `DATABASE_COLLATION` varchar(64) NOT NULL -) DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci +) ENGINE=MEMORY DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci SELECT * FROM information_schema.columns WHERE table_schema = 'information_schema' AND table_name = 'routines' diff --git a/mysql-test/main/intersect_all.result b/mysql-test/main/intersect_all.result index 028a76944b38d..d2c7e5932a88d 100644 --- a/mysql-test/main/intersect_all.result +++ b/mysql-test/main/intersect_all.result @@ -718,13 +718,13 @@ t4 CREATE TABLE `t4` ( drop tables t4; (select a,b from t1) intersect all (select c,d from t2) intersect all (select e,f from t3) union all (select 4,4); a b -4 4 2 2 2 2 +4 4 (select a,b from t1) intersect all (select c,d from t2) intersect all (select e,f from t3) union all (select 4,4) except all (select 2,2); a b -4 4 2 2 +4 4 drop tables t1,t2,t3; create table t1 (a int, b int); create table t2 (c int, d int); @@ -779,9 +779,9 @@ insert into t3 values (3,3); e f 3 3 3 3 +4 4 5 5 6 6 -4 4 explain extended (select e,f from t3) intersect all (select c,d from t2) union all (select a,b from t1) union all (select 4,4); id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t3 ALL NULL NULL NULL NULL 3 100.00 diff --git a/mysql-test/main/intersect_all.test b/mysql-test/main/intersect_all.test index c3dc4e123f0e7..d28d7e643d50d 100644 --- a/mysql-test/main/intersect_all.test +++ b/mysql-test/main/intersect_all.test @@ -108,8 +108,10 @@ show create table t4; drop tables t4; +--sorted_result (select a,b from t1) intersect all (select c,d from t2) intersect all (select e,f from t3) union all (select 4,4); +--sorted_result (select a,b from t1) intersect all (select c,d from t2) intersect all (select e,f from t3) union all (select 4,4) except all (select 2,2); drop tables t1,t2,t3; @@ -149,6 +151,7 @@ explain extended (select a,b from t1) union all (select c,d from t2) intersect a insert into t2 values (3,3); insert into t3 values (3,3); +--sorted_result (select e,f from t3) intersect all (select c,d from t2) union all (select a,b from t1) union all (select 4,4); explain extended (select e,f from t3) intersect all (select c,d from t2) union all (select a,b from t1) union all (select 4,4); diff --git a/mysql-test/main/select.result b/mysql-test/main/select.result index cfa31f507188a..aeb12170bb82a 100644 --- a/mysql-test/main/select.result +++ b/mysql-test/main/select.result @@ -576,18 +576,18 @@ bedlam 1 bedpost 1 boasted 1 set tmp_memory_table_size=default; -select distinct fld3,repeat("a",length(fld3)),count(*) from t2 group by companynr,fld3 limit 100,10; +select distinct fld3,repeat("a",length(fld3)),count(*) from t2 group by companynr,fld3 order by fld3 limit 100,10; fld3 repeat("a",length(fld3)) count(*) -circus aaaaaa 1 -cited aaaaa 1 -Colombo aaaaaaa 1 -congresswoman aaaaaaaaaaaaa 1 -contrition aaaaaaaaaa 1 -corny aaaaa 1 -cultivation aaaaaaaaaaa 1 -definiteness aaaaaaaaaaaa 1 -demultiplex aaaaaaaaaaa 1 -disappointing aaaaaaaaaaaaa 1 +Baird aaaaa 1 +balled aaaaaa 1 +ballgown aaaaaaaa 1 +Baltimorean aaaaaaaaaaa 1 +bankruptcies aaaaaaaaaaaa 1 +Barry aaaaa 1 +batting aaaaaaa 1 +beaner aaaaaa 1 +beasts aaaaaa 1 +beaters aaaaaaa 1 select distinct companynr,rtrim(space(512+companynr)) from t3 order by 1,2; companynr rtrim(space(512+companynr)) 37 diff --git a/mysql-test/main/select.test b/mysql-test/main/select.test index ac9753a86bd27..300f08c543454 100644 --- a/mysql-test/main/select.test +++ b/mysql-test/main/select.test @@ -1442,7 +1442,8 @@ select distinct fld3,count(*) from t2 group by companynr,fld3 limit 10; set tmp_memory_table_size=0; # force on-disk tmp table select distinct fld3,count(*) from t2 group by companynr,fld3 limit 10; set tmp_memory_table_size=default; -select distinct fld3,repeat("a",length(fld3)),count(*) from t2 group by companynr,fld3 limit 100,10; +# ORDER BY fld3 ensures deterministic LIMIT window regardless of temp table engine +select distinct fld3,repeat("a",length(fld3)),count(*) from t2 group by companynr,fld3 order by fld3 limit 100,10; # # A big order by that should trigger a merge in filesort diff --git a/mysql-test/main/select_jcl6.result b/mysql-test/main/select_jcl6.result index cf8f4f26ae067..0809f5cadfb82 100644 --- a/mysql-test/main/select_jcl6.result +++ b/mysql-test/main/select_jcl6.result @@ -587,18 +587,18 @@ bedlam 1 bedpost 1 boasted 1 set tmp_memory_table_size=default; -select distinct fld3,repeat("a",length(fld3)),count(*) from t2 group by companynr,fld3 limit 100,10; +select distinct fld3,repeat("a",length(fld3)),count(*) from t2 group by companynr,fld3 order by fld3 limit 100,10; fld3 repeat("a",length(fld3)) count(*) -circus aaaaaa 1 -cited aaaaa 1 -Colombo aaaaaaa 1 -congresswoman aaaaaaaaaaaaa 1 -contrition aaaaaaaaaa 1 -corny aaaaa 1 -cultivation aaaaaaaaaaa 1 -definiteness aaaaaaaaaaaa 1 -demultiplex aaaaaaaaaaa 1 -disappointing aaaaaaaaaaaaa 1 +Baird aaaaa 1 +balled aaaaaa 1 +ballgown aaaaaaaa 1 +Baltimorean aaaaaaaaaaa 1 +bankruptcies aaaaaaaaaaaa 1 +Barry aaaaa 1 +batting aaaaaaa 1 +beaner aaaaaa 1 +beasts aaaaaa 1 +beaters aaaaaaa 1 select distinct companynr,rtrim(space(512+companynr)) from t3 order by 1,2; companynr rtrim(space(512+companynr)) 37 diff --git a/mysql-test/main/select_pkeycache.result b/mysql-test/main/select_pkeycache.result index cfa31f507188a..aeb12170bb82a 100644 --- a/mysql-test/main/select_pkeycache.result +++ b/mysql-test/main/select_pkeycache.result @@ -576,18 +576,18 @@ bedlam 1 bedpost 1 boasted 1 set tmp_memory_table_size=default; -select distinct fld3,repeat("a",length(fld3)),count(*) from t2 group by companynr,fld3 limit 100,10; +select distinct fld3,repeat("a",length(fld3)),count(*) from t2 group by companynr,fld3 order by fld3 limit 100,10; fld3 repeat("a",length(fld3)) count(*) -circus aaaaaa 1 -cited aaaaa 1 -Colombo aaaaaaa 1 -congresswoman aaaaaaaaaaaaa 1 -contrition aaaaaaaaaa 1 -corny aaaaa 1 -cultivation aaaaaaaaaaa 1 -definiteness aaaaaaaaaaaa 1 -demultiplex aaaaaaaaaaa 1 -disappointing aaaaaaaaaaaaa 1 +Baird aaaaa 1 +balled aaaaaa 1 +ballgown aaaaaaaa 1 +Baltimorean aaaaaaaaaaa 1 +bankruptcies aaaaaaaaaaaa 1 +Barry aaaaa 1 +batting aaaaaaa 1 +beaner aaaaaa 1 +beasts aaaaaa 1 +beaters aaaaaaa 1 select distinct companynr,rtrim(space(512+companynr)) from t3 order by 1,2; companynr rtrim(space(512+companynr)) 37 diff --git a/mysql-test/main/temp_table_symlink.result b/mysql-test/main/temp_table_symlink.result index 1c5c68170ff8a..6add9191b0478 100644 --- a/mysql-test/main/temp_table_symlink.result +++ b/mysql-test/main/temp_table_symlink.result @@ -4,8 +4,6 @@ create temporary table t2 (a int); Got one of the listed errors create temporary table t3 (a int) engine=Aria; Got one of the listed errors -select * from information_schema.columns where table_schema='test'; -Got one of the listed errors flush tables; select * from d1; a diff --git a/mysql-test/main/temp_table_symlink.test b/mysql-test/main/temp_table_symlink.test index a0be38d907300..2428d137dd5ed 100644 --- a/mysql-test/main/temp_table_symlink.test +++ b/mysql-test/main/temp_table_symlink.test @@ -23,11 +23,6 @@ error 1,1030; create temporary table t2 (a int); error 1,1030; create temporary table t3 (a int) engine=Aria; ---disable_view_protocol -error 1,1030; -select * from information_schema.columns where table_schema='test'; ---enable_view_protocol - flush tables; select * from d1; drop temporary table t1; diff --git a/mysql-test/suite/funcs_1/r/is_columns.result b/mysql-test/suite/funcs_1/r/is_columns.result index 3d03a1d288aca..75996b16f4dc7 100644 --- a/mysql-test/suite/funcs_1/r/is_columns.result +++ b/mysql-test/suite/funcs_1/r/is_columns.result @@ -75,7 +75,7 @@ COLUMNS CREATE TEMPORARY TABLE `COLUMNS` ( `COLUMN_COMMENT` varchar(1024) NOT NULL, `IS_GENERATED` varchar(6) NOT NULL, `GENERATION_EXPRESSION` longtext -) DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci +) ENGINE=MEMORY DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci SHOW COLUMNS FROM information_schema.COLUMNS; Field Type Null Key Default Extra TABLE_CATALOG varchar(512) NO NULL diff --git a/mysql-test/suite/funcs_1/r/is_events.result b/mysql-test/suite/funcs_1/r/is_events.result index 7df12ee27717e..59afb2d81f2e9 100644 --- a/mysql-test/suite/funcs_1/r/is_events.result +++ b/mysql-test/suite/funcs_1/r/is_events.result @@ -79,7 +79,7 @@ EVENTS CREATE TEMPORARY TABLE `EVENTS` ( `CHARACTER_SET_CLIENT` varchar(32) NOT NULL, `COLLATION_CONNECTION` varchar(64) NOT NULL, `DATABASE_COLLATION` varchar(64) NOT NULL -) DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci +) ENGINE=MEMORY DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci SHOW COLUMNS FROM information_schema.EVENTS; Field Type Null Key Default Extra EVENT_CATALOG varchar(64) NO NULL diff --git a/mysql-test/suite/funcs_1/r/is_routines.result b/mysql-test/suite/funcs_1/r/is_routines.result index 51477e441ad07..1660a2caabb8f 100644 --- a/mysql-test/suite/funcs_1/r/is_routines.result +++ b/mysql-test/suite/funcs_1/r/is_routines.result @@ -94,7 +94,7 @@ ROUTINES CREATE TEMPORARY TABLE `ROUTINES` ( `CHARACTER_SET_CLIENT` varchar(32) NOT NULL, `COLLATION_CONNECTION` varchar(64) NOT NULL, `DATABASE_COLLATION` varchar(64) NOT NULL -) DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci +) ENGINE=MEMORY DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci SHOW COLUMNS FROM information_schema.ROUTINES; Field Type Null Key Default Extra SPECIFIC_NAME varchar(64) NO NULL diff --git a/mysql-test/suite/funcs_1/r/is_routines_embedded.result b/mysql-test/suite/funcs_1/r/is_routines_embedded.result index 817817b01d238..b46f520bc489d 100644 --- a/mysql-test/suite/funcs_1/r/is_routines_embedded.result +++ b/mysql-test/suite/funcs_1/r/is_routines_embedded.result @@ -94,7 +94,7 @@ ROUTINES CREATE TEMPORARY TABLE `ROUTINES` ( `CHARACTER_SET_CLIENT` varchar(32) NOT NULL, `COLLATION_CONNECTION` varchar(64) NOT NULL, `DATABASE_COLLATION` varchar(64) NOT NULL -) DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci +) ENGINE=MEMORY DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci SHOW COLUMNS FROM information_schema.ROUTINES; Field Type Null Key Default Extra SPECIFIC_NAME varchar(64) NO NULL @@ -197,7 +197,7 @@ sp_6_408002_2 def db_datadict_2 sp_6_408002_2 PROCEDURE NULL NULL NULL NULL NUL SELECT * FROM db_datadict_2.res_6_408002_2; END NULL NULL SQL NO CONTAINS SQL NULL DEFINER YYYY-MM-DD hh:mm:ss YYYY-MM-DD hh:mm:ss root@localhost latin1 latin1_swedish_ci latin1_swedish_ci add_suppression def mtr add_suppression PROCEDURE NULL NULL NULL NULL NULL NULL NULL NULL SQL BEGIN INSERT INTO test_suppressions (pattern) VALUES (pattern); FLUSH NO_WRITE_TO_BINLOG TABLE test_suppressions; END NULL NULL SQL NO CONTAINS SQL NULL DEFINER YYYY-MM-DD hh:mm:ss YYYY-MM-DD hh:mm:ss root@localhost utf8mb3 utf8mb3_general_ci latin1_swedish_ci -check_testcase def mtr check_testcase PROCEDURE NULL NULL NULL NULL NULL NULL NULL NULL SQL BEGIN SELECT * FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE variable_name NOT IN ('timestamp') AND variable_name not like "Last_IO_Err*" AND variable_name != 'INNODB_IBUF_MAX_SIZE' AND variable_name != 'INNODB_LOG_FILE_BUFFERING' AND variable_name != 'INNODB_USE_NATIVE_AIO' AND variable_name != 'INNODB_BUFFER_POOL_LOAD_AT_STARTUP' AND variable_name not like 'GTID%POS' AND variable_name != 'GTID_BINLOG_STATE' AND variable_name != 'THREAD_POOL_SIZE' ORDER BY variable_name; SELECT * FROM INFORMATION_SCHEMA.SCHEMATA ORDER BY BINARY SCHEMA_NAME; SELECT * FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME NOT IN ('mtr_wsrep_notify', 'wsrep_schema') ORDER BY BINARY SCHEMA_NAME; SELECT table_name AS tables_in_test FROM INFORMATION_SCHEMA.TABLES WHERE table_schema='test'; SELECT CONCAT(table_schema, '.', table_name) AS tables_in_mysql FROM INFORMATION_SCHEMA.TABLES WHERE table_schema='mysql' ORDER BY tables_in_mysql; SELECT CONCAT(table_schema, '.', table_name) AS columns_in_mysql, column_name, ordinal_position, column_default, is_nullable, data_type, character_maximum_length, character_octet_length, numeric_precision, numeric_scale, character_set_name, collation_name, column_type, column_key, extra, column_comment FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema='mysql' ORDER BY columns_in_mysql; SELECT * FROM INFORMATION_SCHEMA.EVENTS; SELECT * FROM INFORMATION_SCHEMA.TRIGGERS WHERE TRIGGER_NAME NOT IN ('gs_insert', 'ts_insert') AND TRIGGER_SCHEMA != 'sys'; SELECT * FROM INFORMATION_SCHEMA.ROUTINES WHERE ROUTINE_SCHEMA != 'sys'; SHOW STATUS LIKE 'slave_open_temp_tables'; checksum table mysql.columns_priv, mysql.db, mysql.func, mysql.help_category, mysql.help_keyword, mysql.help_relation, mysql.plugin, mysql.procs_priv, mysql.roles_mapping, mysql.tables_priv, mysql.time_zone, mysql.time_zone_leap_second, mysql.time_zone_name, mysql.time_zone_transition, mysql.time_zone_transition_type, mysql.global_priv; SELECT * FROM INFORMATION_SCHEMA.PLUGINS WHERE PLUGIN_STATUS != 'INACTIVE'; select * from information_schema.session_variables where variable_name = 'debug_sync'; END NULL NULL SQL NO CONTAINS SQL NULL DEFINER YYYY-MM-DD hh:mm:ss YYYY-MM-DD hh:mm:ss root@localhost utf8mb3 utf8mb3_general_ci latin1_swedish_ci +check_testcase def mtr check_testcase PROCEDURE NULL NULL NULL NULL NULL NULL NULL NULL SQL BEGIN SELECT * FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE variable_name NOT IN ('timestamp') AND variable_name not like "Last_IO_Err*" AND variable_name != 'INNODB_IBUF_MAX_SIZE' AND variable_name != 'INNODB_LOG_FILE_BUFFERING' AND variable_name != 'INNODB_USE_NATIVE_AIO' AND variable_name != 'INNODB_BUFFER_POOL_LOAD_AT_STARTUP' AND variable_name not like 'GTID%POS' AND variable_name != 'GTID_BINLOG_STATE' AND variable_name != 'THREAD_POOL_SIZE' ORDER BY variable_name; SELECT * FROM INFORMATION_SCHEMA.SCHEMATA ORDER BY BINARY SCHEMA_NAME; SELECT * FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME NOT IN ('mtr_wsrep_notify', 'wsrep_schema') ORDER BY BINARY SCHEMA_NAME; SELECT table_name AS tables_in_test FROM INFORMATION_SCHEMA.TABLES WHERE table_schema='test'; SELECT CONCAT(table_schema, '.', table_name) AS tables_in_mysql FROM INFORMATION_SCHEMA.TABLES WHERE table_schema='mysql' ORDER BY tables_in_mysql; SELECT CONCAT(table_schema, '.', table_name) AS columns_in_mysql, column_name, ordinal_position, column_default, is_nullable, data_type, character_maximum_length, character_octet_length, numeric_precision, numeric_scale, character_set_name, collation_name, column_type, column_key, extra, column_comment FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema='mysql' ORDER BY columns_in_mysql, ordinal_position; SELECT * FROM INFORMATION_SCHEMA.EVENTS; SELECT * FROM INFORMATION_SCHEMA.TRIGGERS WHERE TRIGGER_NAME NOT IN ('gs_insert', 'ts_insert') AND TRIGGER_SCHEMA != 'sys'; SELECT * FROM INFORMATION_SCHEMA.ROUTINES WHERE ROUTINE_SCHEMA != 'sys'; SHOW STATUS LIKE 'slave_open_temp_tables'; checksum table mysql.columns_priv, mysql.db, mysql.func, mysql.help_category, mysql.help_keyword, mysql.help_relation, mysql.plugin, mysql.procs_priv, mysql.roles_mapping, mysql.tables_priv, mysql.time_zone, mysql.time_zone_leap_second, mysql.time_zone_name, mysql.time_zone_transition, mysql.time_zone_transition_type, mysql.global_priv; SELECT * FROM INFORMATION_SCHEMA.PLUGINS WHERE PLUGIN_STATUS != 'INACTIVE'; select * from information_schema.session_variables where variable_name = 'debug_sync'; END NULL NULL SQL NO CONTAINS SQL NULL DEFINER YYYY-MM-DD hh:mm:ss YYYY-MM-DD hh:mm:ss root@localhost utf8mb3 utf8mb3_general_ci latin1_swedish_ci check_warnings def mtr check_warnings PROCEDURE NULL NULL NULL NULL NULL NULL NULL NULL SQL BEGIN DECLARE `pos` bigint unsigned; SET SQL_LOG_BIN=0, SQL_SAFE_UPDATES=0; UPDATE error_log el, global_suppressions gs SET suspicious=0 WHERE el.suspicious=1 AND el.line REGEXP gs.pattern; UPDATE error_log el, test_suppressions ts SET suspicious=0 WHERE el.suspicious=1 AND el.line REGEXP ts.pattern; SELECT COUNT(*) INTO @num_warnings FROM error_log WHERE suspicious=1; IF @num_warnings > 0 THEN SELECT line FROM error_log WHERE suspicious=1; SELECT 2 INTO result; ELSE SELECT 0 INTO RESULT; END IF; TRUNCATE test_suppressions; DROP TABLE error_log; END NULL NULL SQL NO CONTAINS SQL NULL DEFINER YYYY-MM-DD hh:mm:ss YYYY-MM-DD hh:mm:ss root@localhost utf8mb3 utf8mb3_general_ci latin1_swedish_ci AddGeometryColumn def mysql AddGeometryColumn PROCEDURE NULL NULL NULL NULL NULL NULL NULL NULL SQL begin set @qwe= concat('ALTER TABLE ', t_schema, '.', t_name, ' ADD ', geometry_column,' GEOMETRY REF_SYSTEM_ID=', t_srid); PREPARE ls from @qwe; execute ls; deallocate prepare ls; end NULL NULL SQL NO CONTAINS SQL NULL INVOKER YYYY-MM-DD hh:mm:ss YYYY-MM-DD hh:mm:ss mariadb.sys@localhost latin1 latin1_swedish_ci latin1_swedish_ci @@ -213,7 +213,7 @@ sp_6_408002_2 def db_datadict_2 sp_6_408002_2 PROCEDURE NULL NULL NULL NULL NUL SELECT * FROM db_datadict_2.res_6_408002_2; END NULL NULL SQL NO CONTAINS SQL NULL DEFINER YYYY-MM-DD hh:mm:ss YYYY-MM-DD hh:mm:ss root@localhost latin1 latin1_swedish_ci latin1_swedish_ci add_suppression def mtr add_suppression PROCEDURE NULL NULL NULL NULL NULL NULL NULL NULL SQL BEGIN INSERT INTO test_suppressions (pattern) VALUES (pattern); FLUSH NO_WRITE_TO_BINLOG TABLE test_suppressions; END NULL NULL SQL NO CONTAINS SQL NULL DEFINER YYYY-MM-DD hh:mm:ss YYYY-MM-DD hh:mm:ss root@localhost utf8mb3 utf8mb3_general_ci latin1_swedish_ci -check_testcase def mtr check_testcase PROCEDURE NULL NULL NULL NULL NULL NULL NULL NULL SQL BEGIN SELECT * FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE variable_name NOT IN ('timestamp') AND variable_name not like "Last_IO_Err*" AND variable_name != 'INNODB_IBUF_MAX_SIZE' AND variable_name != 'INNODB_LOG_FILE_BUFFERING' AND variable_name != 'INNODB_USE_NATIVE_AIO' AND variable_name != 'INNODB_BUFFER_POOL_LOAD_AT_STARTUP' AND variable_name not like 'GTID%POS' AND variable_name != 'GTID_BINLOG_STATE' AND variable_name != 'THREAD_POOL_SIZE' ORDER BY variable_name; SELECT * FROM INFORMATION_SCHEMA.SCHEMATA ORDER BY BINARY SCHEMA_NAME; SELECT * FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME NOT IN ('mtr_wsrep_notify', 'wsrep_schema') ORDER BY BINARY SCHEMA_NAME; SELECT table_name AS tables_in_test FROM INFORMATION_SCHEMA.TABLES WHERE table_schema='test'; SELECT CONCAT(table_schema, '.', table_name) AS tables_in_mysql FROM INFORMATION_SCHEMA.TABLES WHERE table_schema='mysql' ORDER BY tables_in_mysql; SELECT CONCAT(table_schema, '.', table_name) AS columns_in_mysql, column_name, ordinal_position, column_default, is_nullable, data_type, character_maximum_length, character_octet_length, numeric_precision, numeric_scale, character_set_name, collation_name, column_type, column_key, extra, column_comment FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema='mysql' ORDER BY columns_in_mysql; SELECT * FROM INFORMATION_SCHEMA.EVENTS; SELECT * FROM INFORMATION_SCHEMA.TRIGGERS WHERE TRIGGER_NAME NOT IN ('gs_insert', 'ts_insert') AND TRIGGER_SCHEMA != 'sys'; SELECT * FROM INFORMATION_SCHEMA.ROUTINES WHERE ROUTINE_SCHEMA != 'sys'; SHOW STATUS LIKE 'slave_open_temp_tables'; checksum table mysql.columns_priv, mysql.db, mysql.func, mysql.help_category, mysql.help_keyword, mysql.help_relation, mysql.plugin, mysql.procs_priv, mysql.roles_mapping, mysql.tables_priv, mysql.time_zone, mysql.time_zone_leap_second, mysql.time_zone_name, mysql.time_zone_transition, mysql.time_zone_transition_type, mysql.global_priv; SELECT * FROM INFORMATION_SCHEMA.PLUGINS WHERE PLUGIN_STATUS != 'INACTIVE'; select * from information_schema.session_variables where variable_name = 'debug_sync'; END NULL NULL SQL NO CONTAINS SQL NULL DEFINER YYYY-MM-DD hh:mm:ss YYYY-MM-DD hh:mm:ss root@localhost utf8mb3 utf8mb3_general_ci latin1_swedish_ci +check_testcase def mtr check_testcase PROCEDURE NULL NULL NULL NULL NULL NULL NULL NULL SQL BEGIN SELECT * FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE variable_name NOT IN ('timestamp') AND variable_name not like "Last_IO_Err*" AND variable_name != 'INNODB_IBUF_MAX_SIZE' AND variable_name != 'INNODB_LOG_FILE_BUFFERING' AND variable_name != 'INNODB_USE_NATIVE_AIO' AND variable_name != 'INNODB_BUFFER_POOL_LOAD_AT_STARTUP' AND variable_name not like 'GTID%POS' AND variable_name != 'GTID_BINLOG_STATE' AND variable_name != 'THREAD_POOL_SIZE' ORDER BY variable_name; SELECT * FROM INFORMATION_SCHEMA.SCHEMATA ORDER BY BINARY SCHEMA_NAME; SELECT * FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME NOT IN ('mtr_wsrep_notify', 'wsrep_schema') ORDER BY BINARY SCHEMA_NAME; SELECT table_name AS tables_in_test FROM INFORMATION_SCHEMA.TABLES WHERE table_schema='test'; SELECT CONCAT(table_schema, '.', table_name) AS tables_in_mysql FROM INFORMATION_SCHEMA.TABLES WHERE table_schema='mysql' ORDER BY tables_in_mysql; SELECT CONCAT(table_schema, '.', table_name) AS columns_in_mysql, column_name, ordinal_position, column_default, is_nullable, data_type, character_maximum_length, character_octet_length, numeric_precision, numeric_scale, character_set_name, collation_name, column_type, column_key, extra, column_comment FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema='mysql' ORDER BY columns_in_mysql, ordinal_position; SELECT * FROM INFORMATION_SCHEMA.EVENTS; SELECT * FROM INFORMATION_SCHEMA.TRIGGERS WHERE TRIGGER_NAME NOT IN ('gs_insert', 'ts_insert') AND TRIGGER_SCHEMA != 'sys'; SELECT * FROM INFORMATION_SCHEMA.ROUTINES WHERE ROUTINE_SCHEMA != 'sys'; SHOW STATUS LIKE 'slave_open_temp_tables'; checksum table mysql.columns_priv, mysql.db, mysql.func, mysql.help_category, mysql.help_keyword, mysql.help_relation, mysql.plugin, mysql.procs_priv, mysql.roles_mapping, mysql.tables_priv, mysql.time_zone, mysql.time_zone_leap_second, mysql.time_zone_name, mysql.time_zone_transition, mysql.time_zone_transition_type, mysql.global_priv; SELECT * FROM INFORMATION_SCHEMA.PLUGINS WHERE PLUGIN_STATUS != 'INACTIVE'; select * from information_schema.session_variables where variable_name = 'debug_sync'; END NULL NULL SQL NO CONTAINS SQL NULL DEFINER YYYY-MM-DD hh:mm:ss YYYY-MM-DD hh:mm:ss root@localhost utf8mb3 utf8mb3_general_ci latin1_swedish_ci check_warnings def mtr check_warnings PROCEDURE NULL NULL NULL NULL NULL NULL NULL NULL SQL BEGIN DECLARE `pos` bigint unsigned; SET SQL_LOG_BIN=0, SQL_SAFE_UPDATES=0; UPDATE error_log el, global_suppressions gs SET suspicious=0 WHERE el.suspicious=1 AND el.line REGEXP gs.pattern; UPDATE error_log el, test_suppressions ts SET suspicious=0 WHERE el.suspicious=1 AND el.line REGEXP ts.pattern; SELECT COUNT(*) INTO @num_warnings FROM error_log WHERE suspicious=1; IF @num_warnings > 0 THEN SELECT line FROM error_log WHERE suspicious=1; SELECT 2 INTO result; ELSE SELECT 0 INTO RESULT; END IF; TRUNCATE test_suppressions; DROP TABLE error_log; END NULL NULL SQL NO CONTAINS SQL NULL DEFINER YYYY-MM-DD hh:mm:ss YYYY-MM-DD hh:mm:ss root@localhost utf8mb3 utf8mb3_general_ci latin1_swedish_ci AddGeometryColumn def mysql AddGeometryColumn PROCEDURE NULL NULL NULL NULL NULL NULL NULL NULL SQL begin set @qwe= concat('ALTER TABLE ', t_schema, '.', t_name, ' ADD ', geometry_column,' GEOMETRY REF_SYSTEM_ID=', t_srid); PREPARE ls from @qwe; execute ls; deallocate prepare ls; end NULL NULL SQL NO CONTAINS SQL NULL INVOKER YYYY-MM-DD hh:mm:ss YYYY-MM-DD hh:mm:ss mariadb.sys@localhost latin1 latin1_swedish_ci latin1_swedish_ci @@ -229,7 +229,7 @@ sp_6_408002_2 def db_datadict_2 sp_6_408002_2 PROCEDURE NULL NULL NULL NULL NUL SELECT * FROM db_datadict_2.res_6_408002_2; END NULL NULL SQL NO CONTAINS SQL NULL DEFINER YYYY-MM-DD hh:mm:ss YYYY-MM-DD hh:mm:ss root@localhost latin1 latin1_swedish_ci latin1_swedish_ci add_suppression def mtr add_suppression PROCEDURE NULL NULL NULL NULL NULL NULL NULL NULL SQL BEGIN INSERT INTO test_suppressions (pattern) VALUES (pattern); FLUSH NO_WRITE_TO_BINLOG TABLE test_suppressions; END NULL NULL SQL NO CONTAINS SQL NULL DEFINER YYYY-MM-DD hh:mm:ss YYYY-MM-DD hh:mm:ss root@localhost utf8mb3 utf8mb3_general_ci latin1_swedish_ci -check_testcase def mtr check_testcase PROCEDURE NULL NULL NULL NULL NULL NULL NULL NULL SQL BEGIN SELECT * FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE variable_name NOT IN ('timestamp') AND variable_name not like "Last_IO_Err*" AND variable_name != 'INNODB_IBUF_MAX_SIZE' AND variable_name != 'INNODB_LOG_FILE_BUFFERING' AND variable_name != 'INNODB_USE_NATIVE_AIO' AND variable_name != 'INNODB_BUFFER_POOL_LOAD_AT_STARTUP' AND variable_name not like 'GTID%POS' AND variable_name != 'GTID_BINLOG_STATE' AND variable_name != 'THREAD_POOL_SIZE' ORDER BY variable_name; SELECT * FROM INFORMATION_SCHEMA.SCHEMATA ORDER BY BINARY SCHEMA_NAME; SELECT * FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME NOT IN ('mtr_wsrep_notify', 'wsrep_schema') ORDER BY BINARY SCHEMA_NAME; SELECT table_name AS tables_in_test FROM INFORMATION_SCHEMA.TABLES WHERE table_schema='test'; SELECT CONCAT(table_schema, '.', table_name) AS tables_in_mysql FROM INFORMATION_SCHEMA.TABLES WHERE table_schema='mysql' ORDER BY tables_in_mysql; SELECT CONCAT(table_schema, '.', table_name) AS columns_in_mysql, column_name, ordinal_position, column_default, is_nullable, data_type, character_maximum_length, character_octet_length, numeric_precision, numeric_scale, character_set_name, collation_name, column_type, column_key, extra, column_comment FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema='mysql' ORDER BY columns_in_mysql; SELECT * FROM INFORMATION_SCHEMA.EVENTS; SELECT * FROM INFORMATION_SCHEMA.TRIGGERS WHERE TRIGGER_NAME NOT IN ('gs_insert', 'ts_insert') AND TRIGGER_SCHEMA != 'sys'; SELECT * FROM INFORMATION_SCHEMA.ROUTINES WHERE ROUTINE_SCHEMA != 'sys'; SHOW STATUS LIKE 'slave_open_temp_tables'; checksum table mysql.columns_priv, mysql.db, mysql.func, mysql.help_category, mysql.help_keyword, mysql.help_relation, mysql.plugin, mysql.procs_priv, mysql.roles_mapping, mysql.tables_priv, mysql.time_zone, mysql.time_zone_leap_second, mysql.time_zone_name, mysql.time_zone_transition, mysql.time_zone_transition_type, mysql.global_priv; SELECT * FROM INFORMATION_SCHEMA.PLUGINS WHERE PLUGIN_STATUS != 'INACTIVE'; select * from information_schema.session_variables where variable_name = 'debug_sync'; END NULL NULL SQL NO CONTAINS SQL NULL DEFINER YYYY-MM-DD hh:mm:ss YYYY-MM-DD hh:mm:ss root@localhost utf8mb3 utf8mb3_general_ci latin1_swedish_ci +check_testcase def mtr check_testcase PROCEDURE NULL NULL NULL NULL NULL NULL NULL NULL SQL BEGIN SELECT * FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE variable_name NOT IN ('timestamp') AND variable_name not like "Last_IO_Err*" AND variable_name != 'INNODB_IBUF_MAX_SIZE' AND variable_name != 'INNODB_LOG_FILE_BUFFERING' AND variable_name != 'INNODB_USE_NATIVE_AIO' AND variable_name != 'INNODB_BUFFER_POOL_LOAD_AT_STARTUP' AND variable_name not like 'GTID%POS' AND variable_name != 'GTID_BINLOG_STATE' AND variable_name != 'THREAD_POOL_SIZE' ORDER BY variable_name; SELECT * FROM INFORMATION_SCHEMA.SCHEMATA ORDER BY BINARY SCHEMA_NAME; SELECT * FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME NOT IN ('mtr_wsrep_notify', 'wsrep_schema') ORDER BY BINARY SCHEMA_NAME; SELECT table_name AS tables_in_test FROM INFORMATION_SCHEMA.TABLES WHERE table_schema='test'; SELECT CONCAT(table_schema, '.', table_name) AS tables_in_mysql FROM INFORMATION_SCHEMA.TABLES WHERE table_schema='mysql' ORDER BY tables_in_mysql; SELECT CONCAT(table_schema, '.', table_name) AS columns_in_mysql, column_name, ordinal_position, column_default, is_nullable, data_type, character_maximum_length, character_octet_length, numeric_precision, numeric_scale, character_set_name, collation_name, column_type, column_key, extra, column_comment FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema='mysql' ORDER BY columns_in_mysql, ordinal_position; SELECT * FROM INFORMATION_SCHEMA.EVENTS; SELECT * FROM INFORMATION_SCHEMA.TRIGGERS WHERE TRIGGER_NAME NOT IN ('gs_insert', 'ts_insert') AND TRIGGER_SCHEMA != 'sys'; SELECT * FROM INFORMATION_SCHEMA.ROUTINES WHERE ROUTINE_SCHEMA != 'sys'; SHOW STATUS LIKE 'slave_open_temp_tables'; checksum table mysql.columns_priv, mysql.db, mysql.func, mysql.help_category, mysql.help_keyword, mysql.help_relation, mysql.plugin, mysql.procs_priv, mysql.roles_mapping, mysql.tables_priv, mysql.time_zone, mysql.time_zone_leap_second, mysql.time_zone_name, mysql.time_zone_transition, mysql.time_zone_transition_type, mysql.global_priv; SELECT * FROM INFORMATION_SCHEMA.PLUGINS WHERE PLUGIN_STATUS != 'INACTIVE'; select * from information_schema.session_variables where variable_name = 'debug_sync'; END NULL NULL SQL NO CONTAINS SQL NULL DEFINER YYYY-MM-DD hh:mm:ss YYYY-MM-DD hh:mm:ss root@localhost utf8mb3 utf8mb3_general_ci latin1_swedish_ci check_warnings def mtr check_warnings PROCEDURE NULL NULL NULL NULL NULL NULL NULL NULL SQL BEGIN DECLARE `pos` bigint unsigned; SET SQL_LOG_BIN=0, SQL_SAFE_UPDATES=0; UPDATE error_log el, global_suppressions gs SET suspicious=0 WHERE el.suspicious=1 AND el.line REGEXP gs.pattern; UPDATE error_log el, test_suppressions ts SET suspicious=0 WHERE el.suspicious=1 AND el.line REGEXP ts.pattern; SELECT COUNT(*) INTO @num_warnings FROM error_log WHERE suspicious=1; IF @num_warnings > 0 THEN SELECT line FROM error_log WHERE suspicious=1; SELECT 2 INTO result; ELSE SELECT 0 INTO RESULT; END IF; TRUNCATE test_suppressions; DROP TABLE error_log; END NULL NULL SQL NO CONTAINS SQL NULL DEFINER YYYY-MM-DD hh:mm:ss YYYY-MM-DD hh:mm:ss root@localhost utf8mb3 utf8mb3_general_ci latin1_swedish_ci AddGeometryColumn def mysql AddGeometryColumn PROCEDURE NULL NULL NULL NULL NULL NULL NULL NULL SQL begin set @qwe= concat('ALTER TABLE ', t_schema, '.', t_name, ' ADD ', geometry_column,' GEOMETRY REF_SYSTEM_ID=', t_srid); PREPARE ls from @qwe; execute ls; deallocate prepare ls; end NULL NULL SQL NO CONTAINS SQL NULL INVOKER YYYY-MM-DD hh:mm:ss YYYY-MM-DD hh:mm:ss mariadb.sys@localhost latin1 latin1_swedish_ci latin1_swedish_ci diff --git a/mysql-test/suite/funcs_1/r/is_tables_is.result b/mysql-test/suite/funcs_1/r/is_tables_is.result index c18f733c86f06..5758a4fe5b40c 100644 --- a/mysql-test/suite/funcs_1/r/is_tables_is.result +++ b/mysql-test/suite/funcs_1/r/is_tables_is.result @@ -16,9 +16,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME ALL_PLUGINS TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -91,9 +91,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME CHECK_CONSTRAINTS TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -191,9 +191,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME COLUMNS TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -291,9 +291,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME EVENTS TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -516,9 +516,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME OPTIMIZER_TRACE TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -541,9 +541,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME PARAMETERS TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -566,9 +566,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME PARTITIONS TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -591,9 +591,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME PLUGINS TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -616,9 +616,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME PROCESSLIST TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -666,9 +666,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME ROUTINES TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -866,9 +866,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME SYSTEM_VARIABLES TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -1016,9 +1016,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME TRIGGERS TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -1091,9 +1091,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME VIEWS TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -1132,9 +1132,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME ALL_PLUGINS TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -1207,9 +1207,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME CHECK_CONSTRAINTS TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -1307,9 +1307,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME COLUMNS TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -1407,9 +1407,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME EVENTS TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -1632,9 +1632,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME OPTIMIZER_TRACE TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -1657,9 +1657,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME PARAMETERS TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -1682,9 +1682,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME PARTITIONS TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -1707,9 +1707,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME PLUGINS TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -1732,9 +1732,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME PROCESSLIST TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -1782,9 +1782,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME ROUTINES TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -1982,9 +1982,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME SYSTEM_VARIABLES TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -2132,9 +2132,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME TRIGGERS TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -2207,9 +2207,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME VIEWS TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# diff --git a/mysql-test/suite/funcs_1/r/is_tables_is_embedded.result b/mysql-test/suite/funcs_1/r/is_tables_is_embedded.result index c18f733c86f06..5758a4fe5b40c 100644 --- a/mysql-test/suite/funcs_1/r/is_tables_is_embedded.result +++ b/mysql-test/suite/funcs_1/r/is_tables_is_embedded.result @@ -16,9 +16,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME ALL_PLUGINS TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -91,9 +91,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME CHECK_CONSTRAINTS TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -191,9 +191,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME COLUMNS TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -291,9 +291,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME EVENTS TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -516,9 +516,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME OPTIMIZER_TRACE TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -541,9 +541,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME PARAMETERS TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -566,9 +566,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME PARTITIONS TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -591,9 +591,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME PLUGINS TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -616,9 +616,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME PROCESSLIST TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -666,9 +666,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME ROUTINES TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -866,9 +866,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME SYSTEM_VARIABLES TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -1016,9 +1016,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME TRIGGERS TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -1091,9 +1091,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME VIEWS TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -1132,9 +1132,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME ALL_PLUGINS TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -1207,9 +1207,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME CHECK_CONSTRAINTS TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -1307,9 +1307,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME COLUMNS TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -1407,9 +1407,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME EVENTS TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -1632,9 +1632,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME OPTIMIZER_TRACE TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -1657,9 +1657,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME PARAMETERS TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -1682,9 +1682,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME PARTITIONS TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -1707,9 +1707,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME PLUGINS TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -1732,9 +1732,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME PROCESSLIST TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -1782,9 +1782,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME ROUTINES TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -1982,9 +1982,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME SYSTEM_VARIABLES TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -2132,9 +2132,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME TRIGGERS TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -2207,9 +2207,9 @@ TABLE_CATALOG def TABLE_SCHEMA information_schema TABLE_NAME VIEWS TABLE_TYPE SYSTEM VIEW -ENGINE MYISAM_OR_MARIA +ENGINE MEMORY VERSION 11 -ROW_FORMAT DYNAMIC_OR_PAGE +ROW_FORMAT Fixed TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# diff --git a/mysql-test/suite/funcs_1/r/is_triggers.result b/mysql-test/suite/funcs_1/r/is_triggers.result index 7c0a27b85c2fc..d2bb50a9e9b77 100644 --- a/mysql-test/suite/funcs_1/r/is_triggers.result +++ b/mysql-test/suite/funcs_1/r/is_triggers.result @@ -77,7 +77,7 @@ TRIGGERS CREATE TEMPORARY TABLE `TRIGGERS` ( `CHARACTER_SET_CLIENT` varchar(32) NOT NULL, `COLLATION_CONNECTION` varchar(64) NOT NULL, `DATABASE_COLLATION` varchar(64) NOT NULL -) DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci +) ENGINE=MEMORY DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci SHOW COLUMNS FROM information_schema.TRIGGERS; Field Type Null Key Default Extra TRIGGER_CATALOG varchar(512) NO NULL diff --git a/mysql-test/suite/funcs_1/r/is_triggers_embedded.result b/mysql-test/suite/funcs_1/r/is_triggers_embedded.result index 6b0406a3a3a23..9d988417248c9 100644 --- a/mysql-test/suite/funcs_1/r/is_triggers_embedded.result +++ b/mysql-test/suite/funcs_1/r/is_triggers_embedded.result @@ -77,7 +77,7 @@ TRIGGERS CREATE TEMPORARY TABLE `TRIGGERS` ( `CHARACTER_SET_CLIENT` varchar(32) NOT NULL, `COLLATION_CONNECTION` varchar(64) NOT NULL, `DATABASE_COLLATION` varchar(64) NOT NULL -) DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci +) ENGINE=MEMORY DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci SHOW COLUMNS FROM information_schema.TRIGGERS; Field Type Null Key Default Extra TRIGGER_CATALOG varchar(512) NO NULL diff --git a/mysql-test/suite/funcs_1/r/is_views.result b/mysql-test/suite/funcs_1/r/is_views.result index 6a86e7464a0fb..c67b372937f9b 100644 --- a/mysql-test/suite/funcs_1/r/is_views.result +++ b/mysql-test/suite/funcs_1/r/is_views.result @@ -53,7 +53,7 @@ VIEWS CREATE TEMPORARY TABLE `VIEWS` ( `CHARACTER_SET_CLIENT` varchar(32) NOT NULL, `COLLATION_CONNECTION` varchar(64) NOT NULL, `ALGORITHM` varchar(10) NOT NULL -) DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci +) ENGINE=MEMORY DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci SHOW COLUMNS FROM information_schema.VIEWS; Field Type Null Key Default Extra TABLE_CATALOG varchar(512) NO NULL diff --git a/mysql-test/suite/funcs_1/r/is_views_embedded.result b/mysql-test/suite/funcs_1/r/is_views_embedded.result index f64562aadd164..67faf6b30ccfa 100644 --- a/mysql-test/suite/funcs_1/r/is_views_embedded.result +++ b/mysql-test/suite/funcs_1/r/is_views_embedded.result @@ -53,7 +53,7 @@ VIEWS CREATE TEMPORARY TABLE `VIEWS` ( `CHARACTER_SET_CLIENT` varchar(32) NOT NULL, `COLLATION_CONNECTION` varchar(64) NOT NULL, `ALGORITHM` varchar(10) NOT NULL -) DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci +) ENGINE=MEMORY DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci SHOW COLUMNS FROM information_schema.VIEWS; Field Type Null Key Default Extra TABLE_CATALOG varchar(512) NO NULL diff --git a/mysql-test/suite/funcs_1/r/processlist_priv_no_prot.result b/mysql-test/suite/funcs_1/r/processlist_priv_no_prot.result index 2bba1c0616276..dfa0d7e4fc5d6 100644 --- a/mysql-test/suite/funcs_1/r/processlist_priv_no_prot.result +++ b/mysql-test/suite/funcs_1/r/processlist_priv_no_prot.result @@ -44,7 +44,7 @@ PROCESSLIST CREATE TEMPORARY TABLE `PROCESSLIST` ( `QUERY_ID` bigint(4) NOT NULL, `INFO_BINARY` blob, `TID` bigint(4) NOT NULL -) DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci +) ENGINE=MEMORY DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci SHOW processlist; Id User Host db Command Time State Info Progress ID root HOST_NAME information_schema Query TIME starting SHOW processlist TIME_MS @@ -124,7 +124,7 @@ PROCESSLIST CREATE TEMPORARY TABLE `PROCESSLIST` ( `QUERY_ID` bigint(4) NOT NULL, `INFO_BINARY` blob, `TID` bigint(4) NOT NULL -) DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci +) ENGINE=MEMORY DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci SHOW processlist; Id User Host db Command Time State Info Progress ID ddicttestuser1 HOST_NAME information_schema Query TIME starting SHOW processlist TIME_MS diff --git a/mysql-test/suite/funcs_1/r/processlist_priv_ps.result b/mysql-test/suite/funcs_1/r/processlist_priv_ps.result index 94bc1544c071b..8dff4e171051d 100644 --- a/mysql-test/suite/funcs_1/r/processlist_priv_ps.result +++ b/mysql-test/suite/funcs_1/r/processlist_priv_ps.result @@ -44,7 +44,7 @@ PROCESSLIST CREATE TEMPORARY TABLE `PROCESSLIST` ( `QUERY_ID` bigint(4) NOT NULL, `INFO_BINARY` blob, `TID` bigint(4) NOT NULL -) DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci +) ENGINE=MEMORY DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci SHOW processlist; Id User Host db Command Time State Info Progress ID root HOST_NAME information_schema Query TIME starting SHOW processlist TIME_MS @@ -124,7 +124,7 @@ PROCESSLIST CREATE TEMPORARY TABLE `PROCESSLIST` ( `QUERY_ID` bigint(4) NOT NULL, `INFO_BINARY` blob, `TID` bigint(4) NOT NULL -) DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci +) ENGINE=MEMORY DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci SHOW processlist; Id User Host db Command Time State Info Progress ID ddicttestuser1 HOST_NAME information_schema Query TIME starting SHOW processlist TIME_MS diff --git a/mysql-test/suite/funcs_1/r/processlist_val_no_prot.result b/mysql-test/suite/funcs_1/r/processlist_val_no_prot.result index dba8de65fc035..5153ae313438a 100644 --- a/mysql-test/suite/funcs_1/r/processlist_val_no_prot.result +++ b/mysql-test/suite/funcs_1/r/processlist_val_no_prot.result @@ -30,7 +30,7 @@ PROCESSLIST CREATE TEMPORARY TABLE `PROCESSLIST` ( `QUERY_ID` bigint(4) NOT NULL, `INFO_BINARY` blob, `TID` bigint(4) NOT NULL -) DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci +) ENGINE=MEMORY DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci # Ensure that the information about the own connection is correct. #-------------------------------------------------------------------------- diff --git a/mysql-test/suite/funcs_1/r/processlist_val_ps.result b/mysql-test/suite/funcs_1/r/processlist_val_ps.result index 0806f00fc6acc..06ff8bd2d6818 100644 --- a/mysql-test/suite/funcs_1/r/processlist_val_ps.result +++ b/mysql-test/suite/funcs_1/r/processlist_val_ps.result @@ -30,7 +30,7 @@ PROCESSLIST CREATE TEMPORARY TABLE `PROCESSLIST` ( `QUERY_ID` bigint(4) NOT NULL, `INFO_BINARY` blob, `TID` bigint(4) NOT NULL -) DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci +) ENGINE=MEMORY DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci # Ensure that the information about the own connection is correct. #-------------------------------------------------------------------------- diff --git a/mysql-test/suite/heap/blob_dedup.result b/mysql-test/suite/heap/blob_dedup.result new file mode 100644 index 0000000000000..66149ce791a3e --- /dev/null +++ b/mysql-test/suite/heap/blob_dedup.result @@ -0,0 +1,15 @@ +CREATE TABLE t1 (a mediumtext) ENGINE=HEAP; +INSERT INTO t1 VALUES ('abc'),('def'); +SELECT DISTINCT a FROM t1; +a +abc +def +DROP TABLE t1; +CREATE TABLE t1 (a mediumtext); +CREATE TABLE t2 (b varchar(20)); +INSERT INTO t1 VALUES ('a'),('b'); +SELECT left(a,100000000) FROM t1 UNION SELECT b FROM t2; +left(a,100000000) +a +b +DROP TABLE t1, t2; diff --git a/mysql-test/suite/heap/blob_dedup.test b/mysql-test/suite/heap/blob_dedup.test new file mode 100644 index 0000000000000..16892d92ef4b7 --- /dev/null +++ b/mysql-test/suite/heap/blob_dedup.test @@ -0,0 +1,10 @@ +CREATE TABLE t1 (a mediumtext) ENGINE=HEAP; +INSERT INTO t1 VALUES ('abc'),('def'); +SELECT DISTINCT a FROM t1; +DROP TABLE t1; + +CREATE TABLE t1 (a mediumtext); +CREATE TABLE t2 (b varchar(20)); +INSERT INTO t1 VALUES ('a'),('b'); +SELECT left(a,100000000) FROM t1 UNION SELECT b FROM t2; +DROP TABLE t1, t2; diff --git a/mysql-test/suite/heap/heap_blob.result b/mysql-test/suite/heap/heap_blob.result new file mode 100644 index 0000000000000..83b1c97203774 --- /dev/null +++ b/mysql-test/suite/heap/heap_blob.result @@ -0,0 +1,602 @@ +drop table if exists t1,t2; +# +# Basic CRUD with BLOB column +# +create table t1 (a int not null, b blob, primary key(a)) engine=memory; +insert into t1 values (1, 'hello'), (2, 'world'); +select * from t1 order by a; +a b +1 hello +2 world +select * from t1 where a=1; +a b +1 hello +select * from t1 where a=2; +a b +2 world +update t1 set b='updated' where a=1; +select * from t1 order by a; +a b +1 updated +2 world +delete from t1 where a=2; +select * from t1 order by a; +a b +1 updated +insert into t1 values (3, 'new row'); +select * from t1 order by a; +a b +1 updated +3 new row +drop table t1; +# +# Multiple BLOB/TEXT columns of different types +# +create table t1 ( +id int not null auto_increment, +t tinyblob, +b blob, +m mediumblob, +tx text, +primary key(id) +) engine=memory; +insert into t1 (t, b, m, tx) values +('tiny1', 'blob1', 'medium1', 'text1'), +('tiny2', 'blob2', 'medium2', 'text2'); +select * from t1 order by id; +id t b m tx +1 tiny1 blob1 medium1 text1 +2 tiny2 blob2 medium2 text2 +update t1 set b='blob_updated', tx='text_updated' where id=1; +select * from t1 order by id; +id t b m tx +1 tiny1 blob_updated medium1 text_updated +2 tiny2 blob2 medium2 text2 +delete from t1 where id=2; +select * from t1 order by id; +id t b m tx +1 tiny1 blob_updated medium1 text_updated +drop table t1; +# +# NULL and empty blob values +# +create table t1 (a int not null, b blob, c text, primary key(a)) engine=memory; +insert into t1 values (1, NULL, NULL); +insert into t1 values (2, '', ''); +insert into t1 values (3, 'data', 'text'); +select a, b, c, length(b), length(c) from t1 order by a; +a b c length(b) length(c) +1 NULL NULL NULL NULL +2 0 0 +3 data text 4 4 +update t1 set b=NULL where a=3; +select a, b, c, length(b), length(c) from t1 order by a; +a b c length(b) length(c) +1 NULL NULL NULL NULL +2 0 0 +3 NULL text NULL 4 +update t1 set b='restored' where a=3; +select a, b, c, length(b), length(c) from t1 order by a; +a b c length(b) length(c) +1 NULL NULL NULL NULL +2 0 0 +3 restored text 8 4 +drop table t1; +# +# Large BLOBs spanning multiple continuation runs +# For (int, blob): recbuffer=16, visible=15, leaf block ~1021 slots. +# Max run payload ~15305 bytes. Sizes chosen to span multiple runs +# and not align to visible (15 bytes). +# +create table t1 (a int not null, b blob, primary key(a)) engine=memory; +insert into t1 values (1, repeat('A', 1000)); +insert into t1 values (2, repeat('B', 50000)); +insert into t1 values (3, repeat('C', 63001)); +select a, length(b), left(b, 5), right(b, 5) from t1 order by a; +a length(b) left(b, 5) right(b, 5) +1 1000 AAAAA AAAAA +2 50000 BBBBB BBBBB +3 63001 CCCCC CCCCC +select a from t1 where b = repeat('A', 1000); +a +1 +select a from t1 where b = repeat('B', 50000); +a +2 +select a from t1 where b = repeat('C', 63001); +a +3 +update t1 set b=repeat('D', 63001) where a=1; +select a, length(b), left(b, 5), right(b, 5) from t1 order by a; +a length(b) left(b, 5) right(b, 5) +1 63001 DDDDD DDDDD +2 50000 BBBBB BBBBB +3 63001 CCCCC CCCCC +select a from t1 where b = repeat('D', 63001); +a +1 +update t1 set b=repeat('E', 100) where a=2; +select a, length(b), left(b, 5), right(b, 5) from t1 order by a; +a length(b) left(b, 5) right(b, 5) +1 63001 DDDDD DDDDD +2 100 EEEEE EEEEE +3 63001 CCCCC CCCCC +drop table t1; +# +# Mixed operations: insert, delete, insert (free list reuse) +# +create table t1 (a int not null, b blob, primary key(a)) engine=memory; +insert into t1 values (1, repeat('X', 20000)); +insert into t1 values (2, repeat('Y', 50000)); +insert into t1 values (3, repeat('Z', 10000)); +delete from t1 where a=2; +insert into t1 values (4, repeat('W', 40000)); +select a, length(b) from t1 order by a; +a length(b) +1 20000 +3 10000 +4 40000 +select a from t1 where b = repeat('X', 20000); +a +1 +select a from t1 where b = repeat('Z', 10000); +a +3 +select a from t1 where b = repeat('W', 40000); +a +4 +delete from t1; +insert into t1 values (10, repeat('R', 50000)); +insert into t1 values (20, repeat('S', 50000)); +select a, length(b) from t1 order by a; +a length(b) +10 50000 +20 50000 +drop table t1; +# +# Free list fragmentation: NULL-blob rows interleaved with large-blob rows +# +# When rows with NULL blobs and rows with large blobs are deleted, the +# free list gets primary-record slots (from NULL rows) interleaved between +# continuation slots (from large-blob rows). The peek-then-unlink +# algorithm must find contiguous continuation groups despite these +# interleaving primary slots breaking address contiguity. +# +# After deleting all rows and reinserting, the new blob data must be +# correct — verifying no free list corruption from the fragmented state. +# +create table t1 (a int not null, b blob, primary key(a)) engine=memory; +insert into t1 values (1, NULL); +insert into t1 values (2, repeat('A', 20000)); +insert into t1 values (3, NULL); +insert into t1 values (4, repeat('B', 30000)); +insert into t1 values (5, NULL); +insert into t1 values (6, repeat('C', 25000)); +select a, length(b) from t1 order by a; +a length(b) +1 NULL +2 20000 +3 NULL +4 30000 +5 NULL +6 25000 +delete from t1 where a=2; +delete from t1 where a=4; +delete from t1 where a=6; +delete from t1 where a=1; +delete from t1 where a=3; +delete from t1 where a=5; +insert into t1 values (10, repeat('D', 35000)); +insert into t1 values (20, repeat('E', 20000)); +insert into t1 values (30, repeat('F', 15000)); +select a, length(b) from t1 order by a; +a length(b) +10 35000 +20 20000 +30 15000 +select a from t1 where b = repeat('D', 35000); +a +10 +select a from t1 where b = repeat('E', 20000); +a +20 +select a from t1 where b = repeat('F', 15000); +a +30 +delete from t1 where a=10; +insert into t1 values (40, repeat('G', 40000)); +select a, length(b) from t1 order by a; +a length(b) +20 20000 +30 15000 +40 40000 +select a from t1 where b = repeat('E', 20000); +a +20 +select a from t1 where b = repeat('F', 15000); +a +30 +select a from t1 where b = repeat('G', 40000); +a +40 +drop table t1; +# +# Free list scavenging with mixed NULL and non-NULL blob columns +# +# Multiple blob columns where some are NULL and others are large. +# This creates rows with partial continuation chains — the NULL +# columns have no chain while the non-NULL columns do. +# +create table t1 ( +a int not null, +b blob, +c blob, +primary key(a) +) engine=memory; +insert into t1 values (1, repeat('X', 15000), NULL); +insert into t1 values (2, NULL, repeat('Y', 25000)); +insert into t1 values (3, repeat('Z', 10000), repeat('W', 20000)); +insert into t1 values (4, NULL, NULL); +select a, length(b), length(c) from t1 order by a; +a length(b) length(c) +1 15000 NULL +2 NULL 25000 +3 10000 20000 +4 NULL NULL +delete from t1 where a=1; +delete from t1 where a=3; +insert into t1 values (5, repeat('P', 18000), repeat('Q', 22000)); +select a, length(b), length(c) from t1 order by a; +a length(b) length(c) +2 NULL 25000 +4 NULL NULL +5 18000 22000 +select a from t1 where b is null and c = repeat('Y', 25000); +a +2 +select a from t1 where b = repeat('P', 18000) and c = repeat('Q', 22000); +a +5 +delete from t1; +insert into t1 values (6, repeat('R', 30000), repeat('S', 30000)); +select a, length(b), length(c) from t1 order by a; +a length(b) length(c) +6 30000 30000 +select a from t1 where b = repeat('R', 30000) and c = repeat('S', 30000); +a +6 +drop table t1; +# +# TRUNCATE with BLOB data +# +create table t1 (a int not null, b blob, primary key(a)) engine=memory; +insert into t1 values (1, repeat('T', 30000)), (2, repeat('U', 30000)); +select count(*) from t1; +count(*) +2 +truncate table t1; +select count(*) from t1; +count(*) +0 +insert into t1 values (1, 'after truncate'); +select * from t1; +a b +1 after truncate +drop table t1; +# +# Full table scan correctness +# +create table t1 (a int not null, b blob) engine=memory; +insert into t1 values (1, repeat('a', 500)); +insert into t1 values (2, repeat('b', 23000)); +insert into t1 values (3, repeat('c', 51000)); +insert into t1 values (4, NULL); +insert into t1 values (5, ''); +select a, length(b), left(b, 5) from t1 order by a; +a length(b) left(b, 5) +1 500 aaaaa +2 23000 bbbbb +3 51000 ccccc +4 NULL NULL +5 0 +select count(*) from t1; +count(*) +5 +drop table t1; +# +# Hash index on non-blob column with blob data present +# +create table t1 ( +a int not null, +b varchar(20) not null, +c blob, +primary key(a), +key(b) +) engine=memory; +insert into t1 values (1, 'key1', repeat('h', 20000)); +insert into t1 values (2, 'key2', repeat('i', 33000)); +insert into t1 values (3, 'key1', repeat('j', 10000)); +select a, b, length(c) from t1 where b='key1' order by a; +a b length(c) +1 key1 20000 +3 key1 10000 +select a, b, length(c) from t1 where b='key2'; +a b length(c) +2 key2 33000 +select a, b, length(c) from t1 where a=2; +a b length(c) +2 key2 33000 +drop table t1; +# +# BTREE index on non-blob column with blob data present +# +create table t1 ( +a int not null, +b int not null, +c blob, +key b_idx using btree (b) +) engine=memory; +insert into t1 values (1, 10, repeat('p', 17000)); +insert into t1 values (2, 20, repeat('q', 25000)); +insert into t1 values (3, 30, repeat('r', 41000)); +insert into t1 values (4, 20, repeat('s', 19000)); +select a, b, length(c) from t1 where b=20 order by a; +a b length(c) +2 20 25000 +4 20 19000 +select a, b, length(c) from t1 where b>=20 order by b, a; +a b length(c) +2 20 25000 +4 20 19000 +3 30 41000 +drop table t1; +# +# REPLACE with BLOB column +# +create table t1 (a int not null, b blob, primary key(a)) engine=memory; +insert into t1 values (1, 'original'); +insert into t1 values (2, repeat('x', 30000)); +replace into t1 values (1, repeat('replaced', 5000)); +select a, length(b), left(b, 20) from t1 order by a; +a length(b) left(b, 20) +1 40000 replacedreplacedrepl +2 30000 xxxxxxxxxxxxxxxxxxxx +replace into t1 values (2, 'short'); +select a, length(b), left(b, 20) from t1 order by a; +a length(b) left(b, 20) +1 40000 replacedreplacedrepl +2 5 short +drop table t1; +# +# INSERT ... SELECT with BLOB data +# +create table t1 (a int not null, b blob, primary key(a)) engine=memory; +create table t2 (a int not null, b blob, primary key(a)) engine=memory; +insert into t1 values (1, repeat('m', 22000)), (2, repeat('n', 37000)); +insert into t2 select * from t1; +select a, length(b) from t2 order by a; +a length(b) +1 22000 +2 37000 +select a from t2 where b=repeat('m', 22000); +a +1 +select a from t2 where b=repeat('n', 37000); +a +2 +drop table t1, t2; +# +# TINYBLOB NOT NULL edge case (reclength=9, minimal visible_offset) +# +CREATE TABLE t_tiny (b TINYBLOB NOT NULL) ENGINE=MEMORY; +INSERT INTO t_tiny VALUES ('hello'), ('world'); +SELECT * FROM t_tiny; +b +hello +world +DROP TABLE t_tiny; +# +# TINYBLOB NULL edge case (reclength=10) +# +CREATE TABLE t_tiny2 (b TINYBLOB) ENGINE=MEMORY; +INSERT INTO t_tiny2 VALUES ('foo'), ('bar'); +SELECT * FROM t_tiny2; +b +foo +bar +DROP TABLE t_tiny2; +# +# Blob-only table with no primary key +# +create table t1 (b blob) engine=memory; +insert into t1 values (repeat('A', 5000)), (repeat('B', 10000)); +select length(b), left(b, 3) from t1 order by b; +length(b) left(b, 3) +5000 AAA +10000 BBB +delete from t1; +insert into t1 values ('short1'), ('short2'); +select b from t1 order by b; +b +short1 +short2 +drop table t1; +# +# Table-full error with blob data +# +set @save_max= @@max_heap_table_size; +set @@max_heap_table_size= 65536; +create table t1 (a int not null, b blob, primary key(a)) engine=memory; +insert into t1 values (1, repeat('x', 30000)); +insert into t1 values (2, repeat('y', 30000)); +ERROR HY000: The table 't1' is full +insert into t1 values (3, repeat('z', 30000)); +ERROR HY000: The table 't1' is full +select a, length(b) from t1 where a=1; +a length(b) +1 30000 +set @@max_heap_table_size= @save_max; +drop table t1; +# +# Multiple blob columns with different sizes in same row +# +create table t1 ( +a int not null, +b tinyblob, +c blob, +d mediumblob, +primary key(a) +) engine=memory; +insert into t1 values (1, repeat('p', 200), repeat('q', 30000), repeat('r', 60000)); +insert into t1 values (2, 'small', repeat('s', 15000), repeat('t', 45000)); +select a, length(b), length(c), length(d), left(b,3), left(c,3), left(d,3) from t1 order by a; +a length(b) length(c) length(d) left(b,3) left(c,3) left(d,3) +1 200 30000 60000 ppp qqq rrr +2 5 15000 45000 sma sss ttt +select a from t1 where b=repeat('p', 200) and c=repeat('q', 30000) and d=repeat('r', 60000); +a +1 +select a from t1 where b='small' and c=repeat('s', 15000) and d=repeat('t', 45000); +a +2 +drop table t1; +# +# UPDATE failure preserves old blob data (table-full during blob grow) +# +set @save_max= @@max_heap_table_size; +set @@max_heap_table_size= 65536; +create table t1 (a int not null, b longblob, primary key(a)) engine=memory; +insert into t1 values (1, repeat('A', 5000)); +insert into t1 values (2, repeat('B', 5000)); +update t1 set b=repeat('X', 200000) where a=1; +ERROR HY000: The table 't1' is full +select a, length(b), left(b, 5), right(b, 5) from t1 order by a; +a length(b) left(b, 5) right(b, 5) +1 5000 AAAAA AAAAA +2 5000 BBBBB BBBBB +select a from t1 where b=repeat('A', 5000); +a +1 +select a from t1 where b=repeat('B', 5000); +a +2 +set @@max_heap_table_size= @save_max; +drop table t1; +# +# Large blob exceeding uint16 run_rec_count cap (65535 records) +# +# With recbuffer=16, visible=15, a 1MB blob needs ~69906 records, +# exceeding the uint16 max of 65535. The free list scavenging must +# split into multiple runs at the cap boundary. +# Delete-then-reinsert exercises scavenging of the freed chain. +# +set @save_max= @@max_heap_table_size; +set @@max_heap_table_size= 64 * 1024 * 1024; +create table t1 (a int not null, b longblob, primary key(a)) engine=memory; +insert into t1 values (1, repeat('A', 1048576)); +insert into t1 values (2, repeat('B', 1048576)); +select a, length(b), left(b, 5), right(b, 5) from t1 order by a; +a length(b) left(b, 5) right(b, 5) +1 1048576 AAAAA AAAAA +2 1048576 BBBBB BBBBB +select a from t1 where b = repeat('A', 1048576); +a +1 +select a from t1 where b = repeat('B', 1048576); +a +2 +delete from t1 where a=1; +delete from t1 where a=2; +insert into t1 values (3, repeat('C', 1048576)); +insert into t1 values (4, repeat('D', 1048576)); +select a, length(b), left(b, 5), right(b, 5) from t1 order by a; +a length(b) left(b, 5) right(b, 5) +3 1048576 CCCCC CCCCC +4 1048576 DDDDD DDDDD +select a from t1 where b = repeat('C', 1048576); +a +3 +select a from t1 where b = repeat('D', 1048576); +a +4 +set @@max_heap_table_size= @save_max; +drop table t1; +# +# Zero-copy Case A: tiny blobs fitting in rec 0 payload (wide table) +# +create table t_casea (a int not null, b varchar(480), c blob, primary key(a)) engine=memory; +insert into t_casea values (1, repeat('v', 480), repeat('A', 400)); +insert into t_casea values (2, repeat('w', 480), repeat('B', 100)); +select a, length(c), left(c,3) from t_casea order by a; +a length(c) left(c,3) +1 400 AAA +2 100 BBB +select a from t_casea where c = repeat('A', 400); +a +1 +select a from t_casea where c = repeat('B', 100); +a +2 +drop table t_casea; +# +# Zero-copy Case B: medium blobs (single run, multiple records) +# +create table t_caseb (a int not null, b blob, primary key(a)) engine=memory; +insert into t_caseb values (1, repeat('M', 8000)); +insert into t_caseb values (2, repeat('N', 15000)); +select a, length(b), left(b,3), right(b,3) from t_caseb order by a; +a length(b) left(b,3) right(b,3) +1 8000 MMM MMM +2 15000 NNN NNN +select a from t_caseb where b = repeat('M', 8000); +a +1 +select a from t_caseb where b = repeat('N', 15000); +a +2 +delete from t_caseb where a=1; +insert into t_caseb values (3, repeat('O', 12000)); +select a, length(b) from t_caseb order by a; +a length(b) +2 15000 +3 12000 +select a from t_caseb where b = repeat('O', 12000); +a +3 +drop table t_caseb; +# +# Zero-copy Case B->C boundary (large blobs forcing multi-run) +# +create table t_boundary (a int not null, b blob, primary key(a)) engine=memory; +insert into t_boundary values (1, repeat('X', 15000)); +insert into t_boundary values (2, repeat('Y', 50000)); +select a, length(b), left(b,3) from t_boundary order by a; +a length(b) left(b,3) +1 15000 XXX +2 50000 YYY +select a from t_boundary where b = repeat('X', 15000); +a +1 +select a from t_boundary where b = repeat('Y', 50000); +a +2 +drop table t_boundary; +# +# Non-blob table regression: ensure no behavioral change +# +create table t1 (a int not null, b varchar(100), primary key(a)) engine=memory; +insert into t1 values (1, 'no blob here'), (2, 'still no blob'); +select * from t1 order by a; +a b +1 no blob here +2 still no blob +update t1 set b='changed' where a=1; +select * from t1 order by a; +a b +1 changed +2 still no blob +delete from t1 where a=2; +select * from t1 order by a; +a b +1 changed +drop table t1; diff --git a/mysql-test/suite/heap/heap_blob.test b/mysql-test/suite/heap/heap_blob.test new file mode 100644 index 0000000000000..b29611f2c8cef --- /dev/null +++ b/mysql-test/suite/heap/heap_blob.test @@ -0,0 +1,439 @@ +# +# Test BLOB/TEXT column support in HEAP (MEMORY) tables. +# + +--disable_warnings +drop table if exists t1,t2; +--enable_warnings + +--echo # +--echo # Basic CRUD with BLOB column +--echo # +create table t1 (a int not null, b blob, primary key(a)) engine=memory; +insert into t1 values (1, 'hello'), (2, 'world'); +select * from t1 order by a; +select * from t1 where a=1; +select * from t1 where a=2; +update t1 set b='updated' where a=1; +select * from t1 order by a; +delete from t1 where a=2; +select * from t1 order by a; +insert into t1 values (3, 'new row'); +select * from t1 order by a; +drop table t1; + +--echo # +--echo # Multiple BLOB/TEXT columns of different types +--echo # +create table t1 ( + id int not null auto_increment, + t tinyblob, + b blob, + m mediumblob, + tx text, + primary key(id) +) engine=memory; +insert into t1 (t, b, m, tx) values + ('tiny1', 'blob1', 'medium1', 'text1'), + ('tiny2', 'blob2', 'medium2', 'text2'); +select * from t1 order by id; +update t1 set b='blob_updated', tx='text_updated' where id=1; +select * from t1 order by id; +delete from t1 where id=2; +select * from t1 order by id; +drop table t1; + +--echo # +--echo # NULL and empty blob values +--echo # +create table t1 (a int not null, b blob, c text, primary key(a)) engine=memory; +insert into t1 values (1, NULL, NULL); +insert into t1 values (2, '', ''); +insert into t1 values (3, 'data', 'text'); +select a, b, c, length(b), length(c) from t1 order by a; +update t1 set b=NULL where a=3; +select a, b, c, length(b), length(c) from t1 order by a; +update t1 set b='restored' where a=3; +select a, b, c, length(b), length(c) from t1 order by a; +drop table t1; + +--echo # +--echo # Large BLOBs spanning multiple continuation runs +--echo # For (int, blob): recbuffer=16, visible=15, leaf block ~1021 slots. +--echo # Max run payload ~15305 bytes. Sizes chosen to span multiple runs +--echo # and not align to visible (15 bytes). +--echo # +create table t1 (a int not null, b blob, primary key(a)) engine=memory; +insert into t1 values (1, repeat('A', 1000)); +insert into t1 values (2, repeat('B', 50000)); +insert into t1 values (3, repeat('C', 63001)); +select a, length(b), left(b, 5), right(b, 5) from t1 order by a; +# Verify data integrity +select a from t1 where b = repeat('A', 1000); +select a from t1 where b = repeat('B', 50000); +select a from t1 where b = repeat('C', 63001); +# Update small to large (multi-run) +update t1 set b=repeat('D', 63001) where a=1; +select a, length(b), left(b, 5), right(b, 5) from t1 order by a; +select a from t1 where b = repeat('D', 63001); +# Update large to small +update t1 set b=repeat('E', 100) where a=2; +select a, length(b), left(b, 5), right(b, 5) from t1 order by a; +drop table t1; + +--echo # +--echo # Mixed operations: insert, delete, insert (free list reuse) +--echo # +create table t1 (a int not null, b blob, primary key(a)) engine=memory; +insert into t1 values (1, repeat('X', 20000)); +insert into t1 values (2, repeat('Y', 50000)); +insert into t1 values (3, repeat('Z', 10000)); +delete from t1 where a=2; +# This insert should reuse freed continuation records +insert into t1 values (4, repeat('W', 40000)); +select a, length(b) from t1 order by a; +select a from t1 where b = repeat('X', 20000); +select a from t1 where b = repeat('Z', 10000); +select a from t1 where b = repeat('W', 40000); +# Delete all and reinsert +delete from t1; +insert into t1 values (10, repeat('R', 50000)); +insert into t1 values (20, repeat('S', 50000)); +select a, length(b) from t1 order by a; +drop table t1; + +--echo # +--echo # Free list fragmentation: NULL-blob rows interleaved with large-blob rows +--echo # +--echo # When rows with NULL blobs and rows with large blobs are deleted, the +--echo # free list gets primary-record slots (from NULL rows) interleaved between +--echo # continuation slots (from large-blob rows). The peek-then-unlink +--echo # algorithm must find contiguous continuation groups despite these +--echo # interleaving primary slots breaking address contiguity. +--echo # +--echo # After deleting all rows and reinserting, the new blob data must be +--echo # correct — verifying no free list corruption from the fragmented state. +--echo # +create table t1 (a int not null, b blob, primary key(a)) engine=memory; +# Insert alternating: NULL blob, large blob, NULL blob, large blob +# The primary slots for NULL rows will sit between continuation runs +# on the free list after deletion. +insert into t1 values (1, NULL); +insert into t1 values (2, repeat('A', 20000)); +insert into t1 values (3, NULL); +insert into t1 values (4, repeat('B', 30000)); +insert into t1 values (5, NULL); +insert into t1 values (6, repeat('C', 25000)); +select a, length(b) from t1 order by a; +# Delete in an order that creates maximum free list interleaving: +# large blob rows first (their continuation slots go to free list), +# then NULL rows (their primary slots go to free list head, +# interleaving with the continuation slots). +delete from t1 where a=2; +delete from t1 where a=4; +delete from t1 where a=6; +delete from t1 where a=1; +delete from t1 where a=3; +delete from t1 where a=5; +# Reinsert large blobs — these should either scavenge contiguous groups +# from the fragmented free list or fall through to tail allocation. +# Either way, data must be correct. +insert into t1 values (10, repeat('D', 35000)); +insert into t1 values (20, repeat('E', 20000)); +insert into t1 values (30, repeat('F', 15000)); +select a, length(b) from t1 order by a; +select a from t1 where b = repeat('D', 35000); +select a from t1 where b = repeat('E', 20000); +select a from t1 where b = repeat('F', 15000); +# Second cycle: delete and reinsert again to exercise scavenging of +# the runs we just created (which are now interleaved differently). +delete from t1 where a=10; +insert into t1 values (40, repeat('G', 40000)); +select a, length(b) from t1 order by a; +select a from t1 where b = repeat('E', 20000); +select a from t1 where b = repeat('F', 15000); +select a from t1 where b = repeat('G', 40000); +drop table t1; + +--echo # +--echo # Free list scavenging with mixed NULL and non-NULL blob columns +--echo # +--echo # Multiple blob columns where some are NULL and others are large. +--echo # This creates rows with partial continuation chains — the NULL +--echo # columns have no chain while the non-NULL columns do. +--echo # +create table t1 ( + a int not null, + b blob, + c blob, + primary key(a) +) engine=memory; +insert into t1 values (1, repeat('X', 15000), NULL); +insert into t1 values (2, NULL, repeat('Y', 25000)); +insert into t1 values (3, repeat('Z', 10000), repeat('W', 20000)); +insert into t1 values (4, NULL, NULL); +select a, length(b), length(c) from t1 order by a; +# Delete rows with different blob patterns to create varied free list state +delete from t1 where a=1; +delete from t1 where a=3; +# Insert new rows that should scavenge from the freed continuation slots +insert into t1 values (5, repeat('P', 18000), repeat('Q', 22000)); +select a, length(b), length(c) from t1 order by a; +select a from t1 where b is null and c = repeat('Y', 25000); +select a from t1 where b = repeat('P', 18000) and c = repeat('Q', 22000); +# Delete everything, reinsert to verify full cleanup +delete from t1; +insert into t1 values (6, repeat('R', 30000), repeat('S', 30000)); +select a, length(b), length(c) from t1 order by a; +select a from t1 where b = repeat('R', 30000) and c = repeat('S', 30000); +drop table t1; + +--echo # +--echo # TRUNCATE with BLOB data +--echo # +create table t1 (a int not null, b blob, primary key(a)) engine=memory; +insert into t1 values (1, repeat('T', 30000)), (2, repeat('U', 30000)); +select count(*) from t1; +truncate table t1; +select count(*) from t1; +insert into t1 values (1, 'after truncate'); +select * from t1; +drop table t1; + +--echo # +--echo # Full table scan correctness +--echo # +create table t1 (a int not null, b blob) engine=memory; +insert into t1 values (1, repeat('a', 500)); +insert into t1 values (2, repeat('b', 23000)); +insert into t1 values (3, repeat('c', 51000)); +insert into t1 values (4, NULL); +insert into t1 values (5, ''); +# Full scan should return exactly 5 rows, no continuation record leaks +select a, length(b), left(b, 5) from t1 order by a; +select count(*) from t1; +drop table t1; + +--echo # +--echo # Hash index on non-blob column with blob data present +--echo # +create table t1 ( + a int not null, + b varchar(20) not null, + c blob, + primary key(a), + key(b) +) engine=memory; +insert into t1 values (1, 'key1', repeat('h', 20000)); +insert into t1 values (2, 'key2', repeat('i', 33000)); +insert into t1 values (3, 'key1', repeat('j', 10000)); +select a, b, length(c) from t1 where b='key1' order by a; +select a, b, length(c) from t1 where b='key2'; +select a, b, length(c) from t1 where a=2; +drop table t1; + +--echo # +--echo # BTREE index on non-blob column with blob data present +--echo # +create table t1 ( + a int not null, + b int not null, + c blob, + key b_idx using btree (b) +) engine=memory; +insert into t1 values (1, 10, repeat('p', 17000)); +insert into t1 values (2, 20, repeat('q', 25000)); +insert into t1 values (3, 30, repeat('r', 41000)); +insert into t1 values (4, 20, repeat('s', 19000)); +select a, b, length(c) from t1 where b=20 order by a; +select a, b, length(c) from t1 where b>=20 order by b, a; +drop table t1; + +--echo # +--echo # REPLACE with BLOB column +--echo # +create table t1 (a int not null, b blob, primary key(a)) engine=memory; +insert into t1 values (1, 'original'); +insert into t1 values (2, repeat('x', 30000)); +replace into t1 values (1, repeat('replaced', 5000)); +select a, length(b), left(b, 20) from t1 order by a; +replace into t1 values (2, 'short'); +select a, length(b), left(b, 20) from t1 order by a; +drop table t1; + +--echo # +--echo # INSERT ... SELECT with BLOB data +--echo # +create table t1 (a int not null, b blob, primary key(a)) engine=memory; +create table t2 (a int not null, b blob, primary key(a)) engine=memory; +insert into t1 values (1, repeat('m', 22000)), (2, repeat('n', 37000)); +insert into t2 select * from t1; +select a, length(b) from t2 order by a; +select a from t2 where b=repeat('m', 22000); +select a from t2 where b=repeat('n', 37000); +drop table t1, t2; + +--echo # +--echo # TINYBLOB NOT NULL edge case (reclength=9, minimal visible_offset) +--echo # +CREATE TABLE t_tiny (b TINYBLOB NOT NULL) ENGINE=MEMORY; +INSERT INTO t_tiny VALUES ('hello'), ('world'); +SELECT * FROM t_tiny; +DROP TABLE t_tiny; + +--echo # +--echo # TINYBLOB NULL edge case (reclength=10) +--echo # +CREATE TABLE t_tiny2 (b TINYBLOB) ENGINE=MEMORY; +INSERT INTO t_tiny2 VALUES ('foo'), ('bar'); +SELECT * FROM t_tiny2; +DROP TABLE t_tiny2; + +--echo # +--echo # Blob-only table with no primary key +--echo # +create table t1 (b blob) engine=memory; +insert into t1 values (repeat('A', 5000)), (repeat('B', 10000)); +select length(b), left(b, 3) from t1 order by b; +delete from t1; +insert into t1 values ('short1'), ('short2'); +select b from t1 order by b; +drop table t1; + +--echo # +--echo # Table-full error with blob data +--echo # +set @save_max= @@max_heap_table_size; +# Variable must be set before CREATE TABLE (limit is captured at creation). +set @@max_heap_table_size= 65536; +create table t1 (a int not null, b blob, primary key(a)) engine=memory; +# Insert until table is full; blob data consumes continuation slots +--disable_abort_on_error +insert into t1 values (1, repeat('x', 30000)); +insert into t1 values (2, repeat('y', 30000)); +insert into t1 values (3, repeat('z', 30000)); +--enable_abort_on_error +# At least the first row should be readable +select a, length(b) from t1 where a=1; +set @@max_heap_table_size= @save_max; +drop table t1; + +--echo # +--echo # Multiple blob columns with different sizes in same row +--echo # +create table t1 ( + a int not null, + b tinyblob, + c blob, + d mediumblob, + primary key(a) +) engine=memory; +insert into t1 values (1, repeat('p', 200), repeat('q', 30000), repeat('r', 60000)); +insert into t1 values (2, 'small', repeat('s', 15000), repeat('t', 45000)); +select a, length(b), length(c), length(d), left(b,3), left(c,3), left(d,3) from t1 order by a; +# Verify data integrity +select a from t1 where b=repeat('p', 200) and c=repeat('q', 30000) and d=repeat('r', 60000); +select a from t1 where b='small' and c=repeat('s', 15000) and d=repeat('t', 45000); +drop table t1; + +--echo # +--echo # UPDATE failure preserves old blob data (table-full during blob grow) +--echo # +set @save_max= @@max_heap_table_size; +# Size chosen so two rows with small blobs fit, but updating one to a +# large blob exhausts the table before the new chain is fully written. +# Variable must be set before CREATE TABLE (limit is captured at creation). +# Use LONGBLOB so the 200KB value is accepted by the column type. +set @@max_heap_table_size= 65536; +create table t1 (a int not null, b longblob, primary key(a)) engine=memory; +insert into t1 values (1, repeat('A', 5000)); +insert into t1 values (2, repeat('B', 5000)); +# This update should fail: the new blob is too large for the table +--error ER_RECORD_FILE_FULL +update t1 set b=repeat('X', 200000) where a=1; +# Old data must survive intact after the failed update +select a, length(b), left(b, 5), right(b, 5) from t1 order by a; +select a from t1 where b=repeat('A', 5000); +select a from t1 where b=repeat('B', 5000); +set @@max_heap_table_size= @save_max; +drop table t1; + +--echo # +--echo # Large blob exceeding uint16 run_rec_count cap (65535 records) +--echo # +--echo # With recbuffer=16, visible=15, a 1MB blob needs ~69906 records, +--echo # exceeding the uint16 max of 65535. The free list scavenging must +--echo # split into multiple runs at the cap boundary. +--echo # Delete-then-reinsert exercises scavenging of the freed chain. +--echo # +set @save_max= @@max_heap_table_size; +set @@max_heap_table_size= 64 * 1024 * 1024; +create table t1 (a int not null, b longblob, primary key(a)) engine=memory; +insert into t1 values (1, repeat('A', 1048576)); +insert into t1 values (2, repeat('B', 1048576)); +select a, length(b), left(b, 5), right(b, 5) from t1 order by a; +select a from t1 where b = repeat('A', 1048576); +select a from t1 where b = repeat('B', 1048576); +# Delete both rows — puts ~140K contiguous records on free list +delete from t1 where a=1; +delete from t1 where a=2; +# Reinsert — scavenges from free list, must split at uint16 boundary +insert into t1 values (3, repeat('C', 1048576)); +insert into t1 values (4, repeat('D', 1048576)); +select a, length(b), left(b, 5), right(b, 5) from t1 order by a; +select a from t1 where b = repeat('C', 1048576); +select a from t1 where b = repeat('D', 1048576); +set @@max_heap_table_size= @save_max; +drop table t1; + +--echo # +--echo # Zero-copy Case A: tiny blobs fitting in rec 0 payload (wide table) +--echo # +create table t_casea (a int not null, b varchar(480), c blob, primary key(a)) engine=memory; +insert into t_casea values (1, repeat('v', 480), repeat('A', 400)); +insert into t_casea values (2, repeat('w', 480), repeat('B', 100)); +select a, length(c), left(c,3) from t_casea order by a; +select a from t_casea where c = repeat('A', 400); +select a from t_casea where c = repeat('B', 100); +drop table t_casea; + +--echo # +--echo # Zero-copy Case B: medium blobs (single run, multiple records) +--echo # +create table t_caseb (a int not null, b blob, primary key(a)) engine=memory; +insert into t_caseb values (1, repeat('M', 8000)); +insert into t_caseb values (2, repeat('N', 15000)); +select a, length(b), left(b,3), right(b,3) from t_caseb order by a; +select a from t_caseb where b = repeat('M', 8000); +select a from t_caseb where b = repeat('N', 15000); +# Delete and reinsert to exercise free list -> tail fallback +delete from t_caseb where a=1; +insert into t_caseb values (3, repeat('O', 12000)); +select a, length(b) from t_caseb order by a; +select a from t_caseb where b = repeat('O', 12000); +drop table t_caseb; + +--echo # +--echo # Zero-copy Case B->C boundary (large blobs forcing multi-run) +--echo # +create table t_boundary (a int not null, b blob, primary key(a)) engine=memory; +# Case B: single run, zero-copy +insert into t_boundary values (1, repeat('X', 15000)); +# Case C: large enough to span multiple leaf blocks +insert into t_boundary values (2, repeat('Y', 50000)); +select a, length(b), left(b,3) from t_boundary order by a; +select a from t_boundary where b = repeat('X', 15000); +select a from t_boundary where b = repeat('Y', 50000); +drop table t_boundary; + +--echo # +--echo # Non-blob table regression: ensure no behavioral change +--echo # +create table t1 (a int not null, b varchar(100), primary key(a)) engine=memory; +insert into t1 values (1, 'no blob here'), (2, 'still no blob'); +select * from t1 order by a; +update t1 set b='changed' where a=1; +select * from t1 order by a; +delete from t1 where a=2; +select * from t1 order by a; +drop table t1; diff --git a/mysql-test/suite/heap/heap_geometry.result b/mysql-test/suite/heap/heap_geometry.result new file mode 100644 index 0000000000000..6ff7e65e54428 --- /dev/null +++ b/mysql-test/suite/heap/heap_geometry.result @@ -0,0 +1,75 @@ +# +# Test GEOMETRY columns in MEMORY tables +# Reproduces blob data corruption during INSERT...SELECT doublings +# +set @save_max_heap_table_size= @@max_heap_table_size; +set max_heap_table_size= 128*1024*1024; +create table t1 (c1 int, c2 geometry not null) engine=MEMORY; +# Verify table is using MEMORY engine +select engine from information_schema.tables +where table_schema=database() and table_name='t1'; +engine +MEMORY +INSERT INTO t1 VALUES (1, ST_GeomFromText('LineString(2 2, 150 150)')); +INSERT INTO t1 VALUES (2, ST_GeomFromText('LineString(3 3, 160 160)')); +INSERT INTO t1 VALUES (3, ST_GeomFromText('LineString(4 4, 170 170)')); +INSERT INTO t1 VALUES (4, ST_GeomFromText('LineString(5 5, 180 180)')); +INSERT INTO t1 VALUES (5, ST_GeomFromText('LineString(6 6, 190 190)')); +INSERT INTO t1 VALUES (6, ST_GeomFromText('LineString(7 7, 200 200)')); +INSERT INTO t1 VALUES (7, ST_GeomFromText('LineString(8 8, 210 210)')); +# 7 rows, all valid +select count(*) from t1; +count(*) +7 +select count(*) as null_count from t1 where ST_AsText(c2) is null; +null_count +0 +# Doublings 1-8: no corruption expected +select count(*) as 'expect 1792' from t1; +expect 1792 +1792 +select count(*) as 'expect 0' from t1 where ST_AsText(c2) is null; +expect 0 +0 +# Doubling 9 +insert into t1 select * from t1; +select count(*) as 'expect 3584' from t1; +expect 3584 +3584 +select count(*) as 'expect 0' from t1 where ST_AsText(c2) is null; +expect 0 +0 +# Doubling 10 +insert into t1 select * from t1; +select count(*) as 'expect 7168' from t1; +expect 7168 +7168 +select count(*) as 'expect 0' from t1 where ST_AsText(c2) is null; +expect 0 +0 +# Doubling 11 +insert into t1 select * from t1; +select count(*) as 'expect 14336' from t1; +expect 14336 +14336 +select count(*) as 'expect 0' from t1 where ST_AsText(c2) is null; +expect 0 +0 +# Verify all geometry values present with correct counts +select ST_AsText(c2) as geom, count(*) as cnt from t1 +group by geom order by geom; +geom cnt +LINESTRING(2 2,150 150) 2048 +LINESTRING(3 3,160 160) 2048 +LINESTRING(4 4,170 170) 2048 +LINESTRING(5 5,180 180) 2048 +LINESTRING(6 6,190 190) 2048 +LINESTRING(7 7,200 200) 2048 +LINESTRING(8 8,210 210) 2048 +# MBRWithin check +set @g1 = ST_GeomFromText('Polygon((0 0,0 200,200 200,200 0,0 0))'); +select count(*) as 'expect 12288' from t1 where MBRWithin(t1.c2, @g1); +expect 12288 +12288 +drop table t1; +set max_heap_table_size= @save_max_heap_table_size; diff --git a/mysql-test/suite/heap/heap_geometry.test b/mysql-test/suite/heap/heap_geometry.test new file mode 100644 index 0000000000000..9d4fe38ff81b7 --- /dev/null +++ b/mysql-test/suite/heap/heap_geometry.test @@ -0,0 +1,65 @@ +--source include/have_geometry.inc + +--echo # +--echo # Test GEOMETRY columns in MEMORY tables +--echo # Reproduces blob data corruption during INSERT...SELECT doublings +--echo # + +set @save_max_heap_table_size= @@max_heap_table_size; +set max_heap_table_size= 128*1024*1024; + +create table t1 (c1 int, c2 geometry not null) engine=MEMORY; + +--echo # Verify table is using MEMORY engine +select engine from information_schema.tables + where table_schema=database() and table_name='t1'; + +INSERT INTO t1 VALUES (1, ST_GeomFromText('LineString(2 2, 150 150)')); +INSERT INTO t1 VALUES (2, ST_GeomFromText('LineString(3 3, 160 160)')); +INSERT INTO t1 VALUES (3, ST_GeomFromText('LineString(4 4, 170 170)')); +INSERT INTO t1 VALUES (4, ST_GeomFromText('LineString(5 5, 180 180)')); +INSERT INTO t1 VALUES (5, ST_GeomFromText('LineString(6 6, 190 190)')); +INSERT INTO t1 VALUES (6, ST_GeomFromText('LineString(7 7, 200 200)')); +INSERT INTO t1 VALUES (7, ST_GeomFromText('LineString(8 8, 210 210)')); + +--echo # 7 rows, all valid +select count(*) from t1; +select count(*) as null_count from t1 where ST_AsText(c2) is null; + +--echo # Doublings 1-8: no corruption expected +--let $i= 8 +--disable_query_log +while ($i) +{ + insert into t1 select * from t1; + --dec $i +} +--enable_query_log +select count(*) as 'expect 1792' from t1; +select count(*) as 'expect 0' from t1 where ST_AsText(c2) is null; + +--echo # Doubling 9 +insert into t1 select * from t1; +select count(*) as 'expect 3584' from t1; +select count(*) as 'expect 0' from t1 where ST_AsText(c2) is null; + +--echo # Doubling 10 +insert into t1 select * from t1; +select count(*) as 'expect 7168' from t1; +select count(*) as 'expect 0' from t1 where ST_AsText(c2) is null; + +--echo # Doubling 11 +insert into t1 select * from t1; +select count(*) as 'expect 14336' from t1; +select count(*) as 'expect 0' from t1 where ST_AsText(c2) is null; + +--echo # Verify all geometry values present with correct counts +select ST_AsText(c2) as geom, count(*) as cnt from t1 + group by geom order by geom; + +--echo # MBRWithin check +set @g1 = ST_GeomFromText('Polygon((0 0,0 200,200 200,200 0,0 0))'); +select count(*) as 'expect 12288' from t1 where MBRWithin(t1.c2, @g1); + +drop table t1; +set max_heap_table_size= @save_max_heap_table_size; diff --git a/mysql-test/suite/innodb_fts/r/innodb-fts-ddl.result b/mysql-test/suite/innodb_fts/r/innodb-fts-ddl.result index fe7781a72f50f..eaf5b53f9c012 100644 --- a/mysql-test/suite/innodb_fts/r/innodb-fts-ddl.result +++ b/mysql-test/suite/innodb_fts/r/innodb-fts-ddl.result @@ -120,7 +120,7 @@ INSERT INTO fts_test (title, text) VALUES ANALYZE TABLE fts_test; set @@auto_increment_increment=1; select *, match(title, text) AGAINST ('database') as score -from fts_test order by score desc; +from fts_test order by score desc, FTS_DOC_ID; FTS_DOC_ID title text score 11 MySQL Tutorial DBMS stands for DataBase ... 0.22764469683170319 51 MySQL vs. YourSQL In the following database comparison ... 0.22764469683170319 diff --git a/mysql-test/suite/innodb_fts/r/misc.result b/mysql-test/suite/innodb_fts/r/misc.result index 4afd9bf1f7485..f290744085dd7 100644 --- a/mysql-test/suite/innodb_fts/r/misc.result +++ b/mysql-test/suite/innodb_fts/r/misc.result @@ -492,7 +492,7 @@ INSERT INTO t1 (a,b) VALUES ('aab MySQL vs. YourSQL','In the following database comparison ...'), ('aaa MySQL Security','When configured properly, MySQL ...'); ALTER TABLE t1 ADD FULLTEXT INDEX idx (a,b); -SELECT * FROM t1 ORDER BY MATCH(a,b) AGAINST ('aac') DESC; +SELECT * FROM t1 ORDER BY MATCH(a,b) AGAINST ('aac') DESC, id; id a b 3 aac Optimizing MySQL In this tutorial we will show ... 4 aac 1001 MySQL Tricks 1. Never run mysqld as root. 2. ... @@ -500,7 +500,7 @@ id a b 2 aas How To Use MySQL Well After you went through a ... 5 aab MySQL vs. YourSQL In the following database comparison ... 6 aaa MySQL Security When configured properly, MySQL ... -SELECT * FROM t1 ORDER BY MATCH(a,b) AGAINST ('aab') DESC; +SELECT * FROM t1 ORDER BY MATCH(a,b) AGAINST ('aab') DESC, id; id a b 1 aab` MySQL Tutorial DBMS stands for DataBase ... 5 aab MySQL vs. YourSQL In the following database comparison ... @@ -1395,7 +1395,7 @@ this year.'),('Peter Pan','Tis a kids story.'),('Test1','nada'),('Database database database','foo database database database'),('Database article title','body with lots of words.'),('myfulltext database', 'my test fulltext database'); -SELECT id, title, body FROM articles ORDER BY MATCH (title,body) AGAINST ('database' IN BOOLEAN MODE) DESC; +SELECT id, title, body FROM articles ORDER BY MATCH (title,body) AGAINST ('database' IN BOOLEAN MODE) DESC, id; id title body 6 Database database database foo database database database @@ -1412,7 +1412,7 @@ this year. 5 Test1 nada DELETE from articles WHERE title like "myfulltext database"; INSERT INTO articles (title,body) VALUES ('myfulltext database', 'my test fulltext database'); -SELECT id, title, body FROM articles ORDER BY MATCH (title,body) AGAINST ('database' IN BOOLEAN MODE) DESC; +SELECT id, title, body FROM articles ORDER BY MATCH (title,body) AGAINST ('database' IN BOOLEAN MODE) DESC, id; id title body 6 Database database database foo database database database @@ -1428,7 +1428,7 @@ this year. 5 Test1 nada DELETE from articles WHERE title like "myfulltext database"; INSERT INTO articles (title,body) VALUES ('myfulltext database', 'my test fulltext database'); -SELECT id, title, body FROM articles ORDER BY MATCH (title,body) AGAINST ('database' IN BOOLEAN MODE) DESC; +SELECT id, title, body FROM articles ORDER BY MATCH (title,body) AGAINST ('database' IN BOOLEAN MODE) DESC, id; id title body 6 Database database database foo database database database diff --git a/mysql-test/suite/innodb_fts/t/innodb-fts-ddl.test b/mysql-test/suite/innodb_fts/t/innodb-fts-ddl.test index 8d4cbe7b86c27..5efa2972205d9 100644 --- a/mysql-test/suite/innodb_fts/t/innodb-fts-ddl.test +++ b/mysql-test/suite/innodb_fts/t/innodb-fts-ddl.test @@ -187,7 +187,7 @@ ANALYZE TABLE fts_test; set @@auto_increment_increment=1; select *, match(title, text) AGAINST ('database') as score -from fts_test order by score desc; +from fts_test order by score desc, FTS_DOC_ID; drop index idx on fts_test; diff --git a/mysql-test/suite/innodb_fts/t/misc.test b/mysql-test/suite/innodb_fts/t/misc.test index f3c10d1620559..ad4d4e137cebc 100644 --- a/mysql-test/suite/innodb_fts/t/misc.test +++ b/mysql-test/suite/innodb_fts/t/misc.test @@ -493,8 +493,8 @@ ANALYZE TABLE t1; -- enable_result_log -- enable_query_log -SELECT * FROM t1 ORDER BY MATCH(a,b) AGAINST ('aac') DESC; -SELECT * FROM t1 ORDER BY MATCH(a,b) AGAINST ('aab') DESC; +SELECT * FROM t1 ORDER BY MATCH(a,b) AGAINST ('aac') DESC, id; +SELECT * FROM t1 ORDER BY MATCH(a,b) AGAINST ('aab') DESC, id; --echo "----------Test7---------" select * from t1 where match(a,b) against ('aaa') @@ -1347,17 +1347,17 @@ database database','foo database database database'),('Database article title','body with lots of words.'),('myfulltext database', 'my test fulltext database'); -SELECT id, title, body FROM articles ORDER BY MATCH (title,body) AGAINST ('database' IN BOOLEAN MODE) DESC; +SELECT id, title, body FROM articles ORDER BY MATCH (title,body) AGAINST ('database' IN BOOLEAN MODE) DESC, id; DELETE from articles WHERE title like "myfulltext database"; INSERT INTO articles (title,body) VALUES ('myfulltext database', 'my test fulltext database'); -SELECT id, title, body FROM articles ORDER BY MATCH (title,body) AGAINST ('database' IN BOOLEAN MODE) DESC; +SELECT id, title, body FROM articles ORDER BY MATCH (title,body) AGAINST ('database' IN BOOLEAN MODE) DESC, id; DELETE from articles WHERE title like "myfulltext database"; INSERT INTO articles (title,body) VALUES ('myfulltext database', 'my test fulltext database'); -SELECT id, title, body FROM articles ORDER BY MATCH (title,body) AGAINST ('database' IN BOOLEAN MODE) DESC; +SELECT id, title, body FROM articles ORDER BY MATCH (title,body) AGAINST ('database' IN BOOLEAN MODE) DESC, id; DROP TABLE articles; diff --git a/mysql-test/suite/perfschema/include/transaction_nested_events_verifier.inc b/mysql-test/suite/perfschema/include/transaction_nested_events_verifier.inc index baee5e840a563..faeef52341042 100644 --- a/mysql-test/suite/perfschema/include/transaction_nested_events_verifier.inc +++ b/mysql-test/suite/perfschema/include/transaction_nested_events_verifier.inc @@ -133,7 +133,7 @@ SELECT THREAD_ID, SQL_TEXT FROM performance_schema.events_statements_history_long s WHERE ((s.thread_id = @con1_thread_id) OR (@all_threads = 1)) -ORDER BY thread_id, r_event_id; +ORDER BY thread_id, r_event_id, r_end_event_id; --echo # --echo ### Clear statement and transaction history diff --git a/mysql-test/suite/perfschema/r/transaction_nested_events.result b/mysql-test/suite/perfschema/r/transaction_nested_events.result index 52fa3783a8bb5..699c807ced311 100644 --- a/mysql-test/suite/perfschema/r/transaction_nested_events.result +++ b/mysql-test/suite/perfschema/r/transaction_nested_events.result @@ -145,7 +145,7 @@ RPAD(IFNULL(NESTING_EVENT_TYPE, 'NULL'), 18, ' ') NESTING_EVENT_TYPE, SQL_TEXT FROM performance_schema.events_statements_history_long s WHERE ((s.thread_id = @con1_thread_id) OR (@all_threads = 1)) -ORDER BY thread_id, r_event_id; +ORDER BY thread_id, r_event_id, r_end_event_id; THREAD_ID R_EVENT_ID R_END_EVENT_ID EVENT_NAME R_NESTING_EVENT_ID NESTING_EVENT_TYPE SQL_TXT thread_id 1 2 statement/sql/insert NULL NULL INSERT INTO t1 VALUES (210, "INSERT 210") thread_id 2 2 transaction 1 STATEMENT @@ -265,7 +265,7 @@ RPAD(IFNULL(NESTING_EVENT_TYPE, 'NULL'), 18, ' ') NESTING_EVENT_TYPE, SQL_TEXT FROM performance_schema.events_statements_history_long s WHERE ((s.thread_id = @con1_thread_id) OR (@all_threads = 1)) -ORDER BY thread_id, r_event_id; +ORDER BY thread_id, r_event_id, r_end_event_id; THREAD_ID R_EVENT_ID R_END_EVENT_ID EVENT_NAME R_NESTING_EVENT_ID NESTING_EVENT_TYPE SQL_TXT thread_id 1 2 statement/sql/begin NULL NULL START TRANSACTION thread_id 2 5 transaction 1 STATEMENT @@ -397,7 +397,7 @@ RPAD(IFNULL(NESTING_EVENT_TYPE, 'NULL'), 18, ' ') NESTING_EVENT_TYPE, SQL_TEXT FROM performance_schema.events_statements_history_long s WHERE ((s.thread_id = @con1_thread_id) OR (@all_threads = 1)) -ORDER BY thread_id, r_event_id; +ORDER BY thread_id, r_event_id, r_end_event_id; THREAD_ID R_EVENT_ID R_END_EVENT_ID EVENT_NAME R_NESTING_EVENT_ID NESTING_EVENT_TYPE SQL_TXT thread_id 1 2 statement/sql/create_proc NULL NULL CREATE PROCEDURE tp_update() UPDATE t1 SET s1 = s1 + 1 thread_id 2 2 transaction 1 STATEMENT @@ -537,7 +537,7 @@ RPAD(IFNULL(NESTING_EVENT_TYPE, 'NULL'), 18, ' ') NESTING_EVENT_TYPE, SQL_TEXT FROM performance_schema.events_statements_history_long s WHERE ((s.thread_id = @con1_thread_id) OR (@all_threads = 1)) -ORDER BY thread_id, r_event_id; +ORDER BY thread_id, r_event_id, r_end_event_id; THREAD_ID R_EVENT_ID R_END_EVENT_ID EVENT_NAME R_NESTING_EVENT_ID NESTING_EVENT_TYPE SQL_TXT thread_id 1 2 statement/sql/create_proc NULL NULL CREATE PROCEDURE tp_start() START TRANSACTION thread_id 2 2 transaction 1 STATEMENT @@ -697,7 +697,7 @@ RPAD(IFNULL(NESTING_EVENT_TYPE, 'NULL'), 18, ' ') NESTING_EVENT_TYPE, SQL_TEXT FROM performance_schema.events_statements_history_long s WHERE ((s.thread_id = @con1_thread_id) OR (@all_threads = 1)) -ORDER BY thread_id, r_event_id; +ORDER BY thread_id, r_event_id, r_end_event_id; THREAD_ID R_EVENT_ID R_END_EVENT_ID EVENT_NAME R_NESTING_EVENT_ID NESTING_EVENT_TYPE SQL_TXT thread_id 1 2 statement/sql/create_proc NULL NULL CREATE PROCEDURE tp_rollback() ROLLBACK thread_id 2 2 transaction 1 STATEMENT @@ -871,7 +871,7 @@ RPAD(IFNULL(NESTING_EVENT_TYPE, 'NULL'), 18, ' ') NESTING_EVENT_TYPE, SQL_TEXT FROM performance_schema.events_statements_history_long s WHERE ((s.thread_id = @con1_thread_id) OR (@all_threads = 1)) -ORDER BY thread_id, r_event_id; +ORDER BY thread_id, r_event_id, r_end_event_id; THREAD_ID R_EVENT_ID R_END_EVENT_ID EVENT_NAME R_NESTING_EVENT_ID NESTING_EVENT_TYPE SQL_TXT thread_id 1 2 statement/sql/begin NULL NULL START TRANSACTION thread_id 3 3 statement/sql/insert 2 TRANSACTION INSERT INTO t1 VALUES (410, "INSERT 410") @@ -1005,7 +1005,7 @@ RPAD(IFNULL(NESTING_EVENT_TYPE, 'NULL'), 18, ' ') NESTING_EVENT_TYPE, SQL_TEXT FROM performance_schema.events_statements_history_long s WHERE ((s.thread_id = @con1_thread_id) OR (@all_threads = 1)) -ORDER BY thread_id, r_event_id; +ORDER BY thread_id, r_event_id, r_end_event_id; THREAD_ID R_EVENT_ID R_END_EVENT_ID EVENT_NAME R_NESTING_EVENT_ID NESTING_EVENT_TYPE SQL_TXT thread_id 1 2 statement/sql/begin NULL NULL START TRANSACTION thread_id 2 6 transaction 1 STATEMENT @@ -1238,7 +1238,7 @@ RPAD(IFNULL(NESTING_EVENT_TYPE, 'NULL'), 18, ' ') NESTING_EVENT_TYPE, SQL_TEXT FROM performance_schema.events_statements_history_long s WHERE ((s.thread_id = @con1_thread_id) OR (@all_threads = 1)) -ORDER BY thread_id, r_event_id; +ORDER BY thread_id, r_event_id, r_end_event_id; THREAD_ID R_EVENT_ID R_END_EVENT_ID EVENT_NAME R_NESTING_EVENT_ID NESTING_EVENT_TYPE SQL_TXT thread_id 1 2 statement/sql/begin NULL NULL START TRANSACTION thread_id 2 19 transaction 1 STATEMENT diff --git a/mysql-test/suite/plugins/r/sql_error_log_withdbinfo.result b/mysql-test/suite/plugins/r/sql_error_log_withdbinfo.result index 732e74d851662..8f9de3e5e82e5 100644 --- a/mysql-test/suite/plugins/r/sql_error_log_withdbinfo.result +++ b/mysql-test/suite/plugins/r/sql_error_log_withdbinfo.result @@ -31,9 +31,9 @@ CREATE DATABASE `NULL`; USE `NULL`; DROP DATABASE db; ERROR HY000: Can't drop database 'db'; database doesn't exist -TIME THREAD_ID HOSTNAME `mtr` WARNING 1286: Unknown storage engine 'InnoDB' : SELECT CONCAT(table_schema, '.', table_name) AS columns_in_mysql, column_name, ordinal_position, column_default, is_nullable, data_type, character_maximum_length, character_octet_length, numeric_precision, numeric_scale, character_set_name, collation_name, column_type, column_key, extra, column_comment FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema='mysql' ORDER BY columns_in_mysql -TIME THREAD_ID HOSTNAME `mtr` WARNING 1286: Unknown storage engine 'InnoDB' : SELECT CONCAT(table_schema, '.', table_name) AS columns_in_mysql, column_name, ordinal_position, column_default, is_nullable, data_type, character_maximum_length, character_octet_length, numeric_precision, numeric_scale, character_set_name, collation_name, column_type, column_key, extra, column_comment FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema='mysql' ORDER BY columns_in_mysql -TIME THREAD_ID HOSTNAME `mtr` WARNING 1286: Unknown storage engine 'InnoDB' : SELECT CONCAT(table_schema, '.', table_name) AS columns_in_mysql, column_name, ordinal_position, column_default, is_nullable, data_type, character_maximum_length, character_octet_length, numeric_precision, numeric_scale, character_set_name, collation_name, column_type, column_key, extra, column_comment FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema='mysql' ORDER BY columns_in_mysql +TIME THREAD_ID HOSTNAME `mtr` WARNING 1286: Unknown storage engine 'InnoDB' : SELECT CONCAT(table_schema, '.', table_name) AS columns_in_mysql, column_name, ordinal_position, column_default, is_nullable, data_type, character_maximum_length, character_octet_length, numeric_precision, numeric_scale, character_set_name, collation_name, column_type, column_key, extra, column_comment FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema='mysql' ORDER BY columns_in_mysql, ordinal_position +TIME THREAD_ID HOSTNAME `mtr` WARNING 1286: Unknown storage engine 'InnoDB' : SELECT CONCAT(table_schema, '.', table_name) AS columns_in_mysql, column_name, ordinal_position, column_default, is_nullable, data_type, character_maximum_length, character_octet_length, numeric_precision, numeric_scale, character_set_name, collation_name, column_type, column_key, extra, column_comment FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema='mysql' ORDER BY columns_in_mysql, ordinal_position +TIME THREAD_ID HOSTNAME `mtr` WARNING 1286: Unknown storage engine 'InnoDB' : SELECT CONCAT(table_schema, '.', table_name) AS columns_in_mysql, column_name, ordinal_position, column_default, is_nullable, data_type, character_maximum_length, character_octet_length, numeric_precision, numeric_scale, character_set_name, collation_name, column_type, column_key, extra, column_comment FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema='mysql' ORDER BY columns_in_mysql, ordinal_position TIME THREAD_ID HOSTNAME `test` ERROR 1238: Variable 'sql_error_log_with_db_and_thread_info' is a read only variable : SET sql_error_log_with_db_and_thread_info=OFF TIME THREAD_ID HOSTNAME `test` ERROR 1008: Can't drop database 'db'; database doesn't exist : DROP DATABASE db TIME THREAD_ID HOSTNAME NULL ERROR 1008: Can't drop database 'dbnodb'; database doesn't exist : DROP DATABASE dbnodb diff --git a/mysql-test/suite/sys_vars/r/tmp_disk_table_size_basic.result b/mysql-test/suite/sys_vars/r/tmp_disk_table_size_basic.result index 96314c64de4f8..0220d45e252af 100644 --- a/mysql-test/suite/sys_vars/r/tmp_disk_table_size_basic.result +++ b/mysql-test/suite/sys_vars/r/tmp_disk_table_size_basic.result @@ -151,7 +151,109 @@ ERROR 42S22: Unknown column 'tmp_disk_table_size' in 'SELECT' SET @@tmp_disk_table_size=16384; CREATE VIEW v AS SELECT 'a'; SELECT table_name FROM INFORMATION_SCHEMA.views; -ERROR HY000: The table '(temporary)' is full +table_name +host_summary +host_summary_by_file_io +host_summary_by_file_io_type +host_summary_by_stages +host_summary_by_statement_latency +host_summary_by_statement_type +innodb_buffer_stats_by_schema +innodb_buffer_stats_by_table +innodb_lock_waits +io_by_thread_by_latency +io_global_by_file_by_bytes +io_global_by_file_by_latency +io_global_by_wait_by_bytes +io_global_by_wait_by_latency +latest_file_io +memory_by_host_by_current_bytes +memory_by_thread_by_current_bytes +memory_by_user_by_current_bytes +memory_global_by_current_bytes +memory_global_total +metrics +processlist +ps_check_lost_instrumentation +schema_auto_increment_columns +schema_index_statistics +schema_object_overview +schema_redundant_indexes +schema_table_lock_waits +schema_table_statistics +schema_table_statistics_with_buffer +schema_tables_with_full_table_scans +schema_unused_indexes +session +session_ssl_status +statement_analysis +statements_with_errors_or_warnings +statements_with_full_table_scans +statements_with_runtimes_in_95th_percentile +statements_with_sorting +statements_with_temp_tables +user +user_summary +user_summary_by_file_io +user_summary_by_file_io_type +user_summary_by_stages +user_summary_by_statement_latency +user_summary_by_statement_type +v +version +wait_classes_global_by_avg_latency +wait_classes_global_by_latency +waits_by_host_by_latency +waits_by_user_by_latency +waits_global_by_latency +x$host_summary +x$host_summary_by_file_io +x$host_summary_by_file_io_type +x$host_summary_by_stages +x$host_summary_by_statement_latency +x$host_summary_by_statement_type +x$innodb_buffer_stats_by_schema +x$innodb_buffer_stats_by_table +x$innodb_lock_waits +x$io_by_thread_by_latency +x$io_global_by_file_by_bytes +x$io_global_by_file_by_latency +x$io_global_by_wait_by_bytes +x$io_global_by_wait_by_latency +x$latest_file_io +x$memory_by_host_by_current_bytes +x$memory_by_thread_by_current_bytes +x$memory_by_user_by_current_bytes +x$memory_global_by_current_bytes +x$memory_global_total +x$processlist +x$ps_digest_95th_percentile_by_avg_us +x$ps_digest_avg_latency_distribution +x$ps_schema_table_statistics_io +x$schema_flattened_keys +x$schema_index_statistics +x$schema_table_lock_waits +x$schema_table_statistics +x$schema_table_statistics_with_buffer +x$schema_tables_with_full_table_scans +x$session +x$statement_analysis +x$statements_with_errors_or_warnings +x$statements_with_full_table_scans +x$statements_with_runtimes_in_95th_percentile +x$statements_with_sorting +x$statements_with_temp_tables +x$user_summary +x$user_summary_by_file_io +x$user_summary_by_file_io_type +x$user_summary_by_stages +x$user_summary_by_statement_latency +x$user_summary_by_statement_type +x$wait_classes_global_by_avg_latency +x$wait_classes_global_by_latency +x$waits_by_host_by_latency +x$waits_by_user_by_latency +x$waits_global_by_latency DROP VIEW v; # End of 10.4 test SET @@global.tmp_disk_table_size = @start_global_value; diff --git a/mysql-test/suite/sys_vars/t/tmp_disk_table_size_basic.test b/mysql-test/suite/sys_vars/t/tmp_disk_table_size_basic.test index 099be3544865b..454e2cbd6edc6 100644 --- a/mysql-test/suite/sys_vars/t/tmp_disk_table_size_basic.test +++ b/mysql-test/suite/sys_vars/t/tmp_disk_table_size_basic.test @@ -203,7 +203,7 @@ SELECT tmp_disk_table_size = @@session.tmp_disk_table_size; SET @@tmp_disk_table_size=16384; CREATE VIEW v AS SELECT 'a'; ---error ER_RECORD_FILE_FULL +--sorted_result SELECT table_name FROM INFORMATION_SCHEMA.views; DROP VIEW v; diff --git a/mysql-test/suite/sysschema/r/v_schema_redundant_indexes.result b/mysql-test/suite/sysschema/r/v_schema_redundant_indexes.result index 2199c959cd029..8893726fe128a 100644 --- a/mysql-test/suite/sysschema/r/v_schema_redundant_indexes.result +++ b/mysql-test/suite/sysschema/r/v_schema_redundant_indexes.result @@ -32,6 +32,6 @@ KEY (i, j, k) ); SELECT * FROM sys.schema_redundant_indexes; table_schema table_name redundant_index_name redundant_index_columns redundant_index_non_unique dominant_index_name dominant_index_columns dominant_index_non_unique subpart_exists sql_drop_index -rkey rkey j j 1 j_2 j,k 1 0 ALTER TABLE `rkey`.`rkey` DROP INDEX `j` rkey rkey i i,j,k 1 PRIMARY i 0 0 ALTER TABLE `rkey`.`rkey` DROP INDEX `i` +rkey rkey j j 1 j_2 j,k 1 0 ALTER TABLE `rkey`.`rkey` DROP INDEX `j` DROP DATABASE rkey; diff --git a/sql/item_func.cc b/sql/item_func.cc index 37d63d984f3af..02ffe79ad7769 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -6377,8 +6377,49 @@ bool Item_func_match::fix_fields(THD *thd, Item **ref) } if (!(table->file->ha_table_flags() & HA_CAN_FULLTEXT)) { - my_error(ER_TABLE_CANT_HANDLE_FT, MYF(0), table->file->table_type()); - return 1; + /* + If this is an in-memory tmp table that hasn't been opened yet + (e.g. a derived table being prepared), convert it to a disk-based + engine that supports FULLTEXT. This can happen when HEAP blob + support keeps a table in memory that would previously have been + forced to disk by blob columns alone. + */ + if (table->s->tmp_table && !table->is_created() && + table->s->db_type() == heap_hton) + { + /* + Replace the HEAP handler with a disk-based engine (Aria/MyISAM) + that supports FULLTEXT. The table has not been opened yet, so + only the handler object and plugin reference need to be swapped. + This follows the same pattern as + create_internal_tmp_table_from_heap() in sql_select.cc. + */ + delete table->file; + table->file= NULL; + /* Reset ha_share — old HEAP handler already set it via finalize() */ + table->s->ha_share= NULL; + plugin_unlock(0, table->s->db_plugin); + table->s->db_plugin= ha_lock_engine(0, TMP_ENGINE_HTON); + if (!(table->file= get_new_handler(table->s, &table->mem_root, + table->s->db_type()))) + { + my_error(ER_OUTOFMEMORY, MYF(ME_FATAL), + static_cast(sizeof(handler))); + return 1; + } + if (table->file->set_ha_share_ref(&table->s->ha_share)) + { + delete table->file; + table->file= NULL; + return 1; + } + table->file->set_table(table); + } + else + { + my_error(ER_TABLE_CANT_HANDLE_FT, MYF(0), table->file->table_type()); + return 1; + } } table->fulltext_searched=1; return agg_arg_charsets_for_comparison(cmp_collation, args+1, arg_count-1); diff --git a/sql/item_sum.cc b/sql/item_sum.cc index 580a13e1c5445..3e56e801bf4b2 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -801,11 +801,14 @@ bool Aggregator_distinct::setup(THD *thd) table->file->extra(HA_EXTRA_NO_ROWS); // Don't update rows table->no_rows=1; - if (table->s->db_type() == heap_hton) + if (table->s->db_type() == heap_hton && !table->s->blob_fields) { /* - No blobs, otherwise it would have been MyISAM: set up a compare - function and its arguments to use with Unique. + Unique tree compares raw record bytes (simple_raw_key_cmp or + composite_key_cmp). Blob fields store only a pointer in the + record, so raw comparison would compare pointer values, not + actual blob data. Skip the Unique tree path for blob tables + and fall through to the ha_write_tmp_row path below. */ qsort_cmp2 compare_key; void* cmp_arg; @@ -1002,9 +1005,21 @@ bool Aggregator_distinct::add() */ return tree->unique_add(table->record[0] + table->s->null_bytes); } - if (unlikely((error= table->file->ha_write_tmp_row(table->record[0]))) && - table->file->is_fatal_error(error, HA_CHECK_DUP)) - return TRUE; + if (unlikely((error= table->file->ha_write_tmp_row(table->record[0])))) + { + if (!table->file->is_fatal_error(error, HA_CHECK_DUP)) + return FALSE; // duplicate, not an error + /* + HEAP table full: convert to on-disk engine. + create_internal_tmp_table_from_heap() copies all existing rows + plus the overflow row (record[0]) to the new table. + */ + if (create_internal_tmp_table_from_heap(table->in_use, table, + tmp_table_param->start_recinfo, + &tmp_table_param->recinfo, + error, 0, NULL)) + return TRUE; + } return FALSE; } else diff --git a/sql/sql_expression_cache.cc b/sql/sql_expression_cache.cc index 34bc1e2b2ca8a..0e584cb0cc041 100644 --- a/sql/sql_expression_cache.cc +++ b/sql/sql_expression_cache.cc @@ -138,6 +138,23 @@ void Expression_cache_tmptable::init() goto error; } + /* + HEAP hash indexes on blob columns use a pointer-based key format + (4-byte length + data pointer). This is incompatible with the SQL + layer's key format (2-byte length + inline data) because + Field_blob::new_key_field() returns a Field_varstring. + + This check is slightly conservative: a blob only in the result + value would not affect the key. However, it matches the pre-blob + behavior where blobs forced Aria, which failed the heap_hton check + above and disabled the cache anyway. + */ + if (cache_table->s->blob_fields) + { + DBUG_PRINT("error", ("blob fields not supported in heap expression cache")); + goto error; + } + field_counter= 1; if (cache_table->alloc_keys(1) || diff --git a/sql/sql_select.cc b/sql/sql_select.cc index d30d3647b48c3..f7195642ffee4 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -21036,11 +21036,16 @@ bool Create_tmp_table::choose_engine(THD *thd, TABLE *table, TABLE_SHARE *share= table->s; DBUG_ENTER("Create_tmp_table::choose_engine"); /* - If result table is small; use a heap, otherwise TMP_TABLE_HTON (Aria) - In the future we should try making storage engine selection more dynamic + If result table is small; use a heap, otherwise TMP_TABLE_HTON (Aria). + HEAP now supports blob columns via continuation chains, so blob_fields + alone no longer forces a disk-based engine. We still fall back to disk + when reclength exceeds HA_MAX_REC_LENGTH (HEAP's fixed-width rows would + waste too much memory for very wide records). + In the future we should try making storage engine selection more dynamic. */ - if (share->blob_fields || m_using_unique_constraint || + if (m_using_unique_constraint || + share->reclength > HA_MAX_REC_LENGTH || (thd->variables.big_tables && !(m_select_options & SELECT_SMALL_RESULT)) || (m_select_options & TMP_TABLE_FORCE_MYISAM) || @@ -21104,9 +21109,14 @@ bool Create_tmp_table::finalize(THD *thd, if (!m_using_unique_constraint) share->reclength+= m_group_null_items; // null flag is stored separately - if (share->blob_fields == 0) + if (share->blob_fields == 0 || share->db_type() == heap_hton) { - /* We need to ensure that first byte is not 0 for the delete link */ + /* + We need to ensure that first byte is not 0 for the delete link. + HEAP uses fixed-width rows even with blobs (blob data lives in + separate continuation records within the same HP_BLOCK, not + inline in the primary record), so it still needs this guard. + */ if (m_field_count[other]) m_null_count[other]++; else @@ -21125,11 +21135,15 @@ bool Create_tmp_table::finalize(THD *thd, if (!share->reclength) share->reclength= 1; // Dummy select share->stored_rec_length= share->reclength; - /* Use packed rows if there is blobs or a lot of space to gain */ - if (share->blob_fields || - (string_total_length() >= STRING_TOTAL_LENGTH_TO_PACK_ROWS && - (share->reclength / string_total_length() <= RATIO_TO_PACK_ROWS || - string_total_length() / string_count() >= AVG_STRING_LENGTH_TO_PACK_ROWS))) + /* + Use packed rows if there is blobs or a lot of space to gain. + HEAP requires fixed-width rows — it cannot use packed row format. + */ + if (share->db_type() != heap_hton && + (share->blob_fields || + (string_total_length() >= STRING_TOTAL_LENGTH_TO_PACK_ROWS && + (share->reclength / string_total_length() <= RATIO_TO_PACK_ROWS || + string_total_length() / string_count() >= AVG_STRING_LENGTH_TO_PACK_ROWS)))) use_packed_rows= 1; { @@ -21160,8 +21174,13 @@ bool Create_tmp_table::finalize(THD *thd, share->null_bytes= share->null_bytes_for_compare= whole_null_pack_length; } - if (share->blob_fields == 0) + if (share->blob_fields == 0 || share->db_type() == heap_hton) { + /* + Same first-byte guard as above: HEAP with blobs still uses + fixed-width rows and needs a non-zero first byte for the + delete-link mechanism. + */ null_counter[(m_field_count[other] ? other : distinct)]++; } @@ -26803,8 +26822,15 @@ JOIN_TAB::remove_duplicates() table->file->info(HA_STATUS_VARIABLE); table->reginfo.lock_type=TL_WRITE; - if (table->s->db_type() == heap_hton || - (!table->s->blob_fields && + /* + remove_dup_with_hash_index() copies field data into a flat key buffer + via field->make_sort_key() and compares with memcmp. Blob fields + store only a pointer in the record, so memcmp would compare pointer + values instead of blob content. Fall back to the row-by-row compare + path for tables with blobs. + */ + if (!table->s->blob_fields && + (table->s->db_type() == heap_hton || ((ALIGN_SIZE(keylength) + HASH_OVERHEAD) * table->file->stats.records < thd->variables.sortbuff_size))) error= remove_dup_with_hash_index(join->thd, table, field_count, @@ -31781,8 +31807,24 @@ test_if_cheaper_ordering(bool in_join_optimizer, and as result we'll choose an index scan when using ref/range access + filesort will be cheaper. */ - select_limit= (ha_rows) (select_limit < fanout ? - 1 : select_limit/fanout); + /* + fanout can be extremely small (close to 0) when + cond_selectivity values are tiny, making select_limit/fanout + overflow to infinity or a value exceeding HA_POS_ERROR. + Casting such a double to ha_rows (unsigned long long) is + undefined behavior. Cap at HA_POS_ERROR to avoid UB. + Note: (double) HA_POS_ERROR rounds up to 2^64 (double can't + represent 2^64-1 exactly), so the >= comparison is safe — + any double that reaches 2^64 is genuinely out of range. + */ + { + double adjusted= (select_limit < fanout) ? + 1.0 : select_limit / fanout; + if (adjusted >= (double) HA_POS_ERROR) + select_limit= HA_POS_ERROR; + else + select_limit= (ha_rows) adjusted; + } /* refkey_rows_estimate is E(#rows) produced by the table access diff --git a/storage/heap/CMakeLists.txt b/storage/heap/CMakeLists.txt index a26124d0c1cae..7f4d53a787900 100644 --- a/storage/heap/CMakeLists.txt +++ b/storage/heap/CMakeLists.txt @@ -13,7 +13,7 @@ # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA -SET(HEAP_SOURCES _check.c _rectest.c hp_block.c hp_clear.c hp_close.c hp_create.c +SET(HEAP_SOURCES _check.c _rectest.c hp_block.c hp_blob.c hp_clear.c hp_close.c hp_create.c ha_heap.cc hp_delete.c hp_extra.c hp_hash.c hp_info.c hp_open.c hp_panic.c hp_rename.c hp_rfirst.c hp_rkey.c hp_rlast.c hp_rnext.c hp_rprev.c diff --git a/storage/heap/_check.c b/storage/heap/_check.c index 1a640fa13da86..c87eda3818121 100644 --- a/storage/heap/_check.c +++ b/storage/heap/_check.c @@ -42,7 +42,7 @@ int heap_check_heap(const HP_INFO *info, my_bool print_status) { int error; uint key; - ulong records=0, deleted=0, pos, next_block; + ulong records=0, deleted=0, cont_count=0, pos, next_block; HP_SHARE *share=info->s; uchar *current_ptr= info->current_ptr; DBUG_ENTER("heap_check_heap"); @@ -68,9 +68,9 @@ int heap_check_heap(const HP_INFO *info, my_bool print_status) else { next_block+= share->block.records_in_block; - if (next_block >= share->records+share->deleted) + if (next_block >= share->total_records+share->deleted) { - next_block= share->records+share->deleted; + next_block= share->total_records+share->deleted; if (pos >= next_block) break; /* End of file */ } @@ -79,6 +79,12 @@ int heap_check_heap(const HP_INFO *info, my_bool print_status) if (!current_ptr[share->visible]) deleted++; + else if (hp_is_cont(current_ptr, share->visible)) + { + uint16 run_rec_count= hp_cont_rec_count(current_ptr); + cont_count+= run_rec_count; + pos+= run_rec_count - 1; /* -1 because for-loop does pos++ */ + } else records++; } @@ -90,6 +96,13 @@ int heap_check_heap(const HP_INFO *info, my_bool print_status) deleted, (ulong) share->deleted)); error= 1; } + if (records + cont_count != share->total_records) + { + DBUG_PRINT("error",("total_records mismatch: primary %lu + cont %lu != %lu", + records, cont_count, + (ulong) share->total_records)); + error= 1; + } DBUG_RETURN(error); } diff --git a/storage/heap/ha_heap.cc b/storage/heap/ha_heap.cc index 3f4d1ab69efc5..0c8f4aa2a5491 100644 --- a/storage/heap/ha_heap.cc +++ b/storage/heap/ha_heap.cc @@ -25,6 +25,7 @@ #include "sql_plugin.h" #include "ha_heap.h" #include "sql_base.h" +#include "field.h" static handler *heap_create_handler(handlerton *, TABLE_SHARE *, MEM_ROOT *); static int heap_prepare_hp_create_info(TABLE *, bool, HP_CREATE_INFO *); @@ -103,6 +104,7 @@ int ha_heap::open(const char *name, int mode, uint test_if_locked) rc= heap_create(name, &create_info, &internal_share, &created_new_share); my_free(create_info.keydef); + my_free(create_info.blob_descs); if (rc) goto end; @@ -363,6 +365,45 @@ void ha_heap::position(const uchar *record) *(HEAP_PTR*) ref= heap_position(file); // Ref is aligned } +int ha_heap::remember_rnd_pos() +{ + saved_current_record= file->current_record; + position((uchar*) 0); + return 0; +} + +int ha_heap::restart_rnd_next(uchar *buf) +{ + /* + Restore the scan position saved by remember_rnd_pos(). + + heap_scan() uses current_record as a sequential counter and next_block + as a cached upper bound for the current HP_BLOCK segment. Within one + segment, heap_scan() advances current_ptr by recbuffer without calling + hp_find_record(). heap_rrnd() (called via rnd_pos) doesn't update + these, so we restore them here. + + next_block is set to the next records_in_block-aligned boundary after + saved_current_record. We MUST then cap it at total_records + deleted + (== block.last_allocated), which is the number of actually allocated + slots in the HP_BLOCK. Without this cap, if the saved position falls + in the last block segment and rows have been deleted between + remember_rnd_pos() and restart_rnd_next() (e.g. by + remove_dup_with_compare), next_block can exceed the allocated range. + heap_scan() would then take the fast path (pos < next_block) and walk + current_ptr past the last allocated slot into unmapped memory, causing + a segfault. + */ + file->current_record= saved_current_record; + file->next_block= saved_current_record - + (saved_current_record % file->s->block.records_in_block) + + file->s->block.records_in_block; + ulong scan_end= file->s->total_records + file->s->deleted; + if (file->next_block > scan_end) + file->next_block= scan_end; + return rnd_pos(buf, ref); +} + int ha_heap::info(uint flag) { HEAPINFO hp_info; @@ -693,6 +734,47 @@ static int heap_prepare_hp_create_info(TABLE *table_arg, bool internal_table, keydef[share->next_number_index].flag|= HA_AUTO_KEY; found_real_auto_increment= share->next_number_key_offset == 0; } + + /* Populate blob column descriptors */ + if (share->blob_fields) + { + HP_BLOB_DESC *blob_descs; + blob_descs= (HP_BLOB_DESC*) my_malloc(hp_key_memory_HP_BLOB, + share->blob_fields * + sizeof(HP_BLOB_DESC), + MYF(MY_WME | MY_THREAD_SPECIFIC)); + if (!blob_descs) + { + my_free(keydef); + return my_errno; + } + { + uint real_blob_count= 0; + for (uint b= 0; b < share->blob_fields; b++) + { + Field *field= table_arg->field[share->blob_field[b]]; + /* + BLOB_FLAG may be set on non-Field_blob fields (e.g. long + Field_string in INFORMATION_SCHEMA temp tables). Only include + true Field_blob types in the HEAP blob descriptor array. + Field_geom (MYSQL_TYPE_GEOMETRY) extends Field_blob and must + also be included. + */ + if (field->type() == MYSQL_TYPE_BLOB || + field->type() == MYSQL_TYPE_GEOMETRY) + { + Field_blob *blob= (Field_blob*) field; + blob_descs[real_blob_count].offset= + (uint) blob->offset(table_arg->record[0]); + blob_descs[real_blob_count].packlength= blob->pack_length_no_ptr(); + real_blob_count++; + } + } + hp_create_info->blob_descs= blob_descs; + hp_create_info->blob_count= real_blob_count; + } + } + hp_create_info->auto_key= auto_key; hp_create_info->auto_key_type= auto_key_type; hp_create_info->max_table_size= MY_MAX(current_thd->variables.max_heap_table_size, sizeof(HP_PTRS)); @@ -734,6 +816,7 @@ int ha_heap::create(const char *name, TABLE *table_arg, create_info->auto_increment_value - 1 : 0); error= heap_create(name, &hp_create_info, &internal_share, &created); my_free(hp_create_info.keydef); + my_free(hp_create_info.blob_descs); DBUG_ASSERT(file == 0); return (error); } @@ -800,7 +883,7 @@ int ha_heap::find_unique_row(uchar *record, uint unique_idx) share->blength, share->records)); do { - if (!hp_rec_key_cmp(keyinfo, pos->ptr_to_rec, record)) + if (!hp_rec_key_cmp(keyinfo, record, pos->ptr_to_rec, file)) { file->current_hash_ptr= pos; file->current_ptr= pos->ptr_to_rec; @@ -810,6 +893,8 @@ int ha_heap::find_unique_row(uchar *record, uint unique_idx) records. */ memcpy(record, file->current_ptr, (size_t) share->reclength); + if (share->blob_count && hp_read_blobs(file, record, file->current_ptr)) + DBUG_RETURN(-1); DBUG_RETURN(0); // found and position set } diff --git a/storage/heap/ha_heap.h b/storage/heap/ha_heap.h index c38ec325740d7..0d0eec530cde6 100644 --- a/storage/heap/ha_heap.h +++ b/storage/heap/ha_heap.h @@ -32,6 +32,7 @@ class ha_heap final : public handler key_map btree_keys; /* number of records changed since last statistics update */ ulong records_changed; + ulong saved_current_record; /* for remember_rnd_pos() / restart_rnd_next() */ uint key_stat_version; my_bool internal_table; public: @@ -47,11 +48,12 @@ class ha_heap final : public handler enum row_type get_row_type() const override { return ROW_TYPE_FIXED; } ulonglong table_flags() const override { - return (HA_FAST_KEY_READ | HA_NO_BLOBS | HA_NULL_IN_KEY | + return (HA_FAST_KEY_READ | HA_NULL_IN_KEY | HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE | HA_CAN_SQL_HANDLER | HA_CAN_ONLINE_BACKUPS | HA_REC_NOT_IN_SEQ | HA_CAN_INSERT_DELAYED | HA_NO_TRANSACTIONS | - HA_HAS_RECORDS | HA_STATS_RECORDS_IS_EXACT | HA_CAN_HASH_KEYS); + HA_HAS_RECORDS | HA_STATS_RECORDS_IS_EXACT | HA_CAN_HASH_KEYS | + HA_CAN_GEOMETRY); } ulong index_flags(uint inx, uint part, bool all_parts) const override { @@ -94,6 +96,8 @@ class ha_heap final : public handler int rnd_next(uchar *buf) override; int rnd_pos(uchar * buf, uchar *pos) override; void position(const uchar *record) override; + int remember_rnd_pos() override; + int restart_rnd_next(uchar *buf) override; int can_continue_handler_scan() override; int info(uint) override; int extra(enum ha_extra_function operation) override; diff --git a/storage/heap/heapdef.h b/storage/heap/heapdef.h index e51fe88d8e2b7..5ef2003d58085 100644 --- a/storage/heap/heapdef.h +++ b/storage/heap/heapdef.h @@ -33,6 +33,93 @@ C_MODE_START #define HP_MIN_RECORDS_IN_BLOCK 16 #define HP_MAX_RECORDS_IN_BLOCK 8192 +#define HP_ROW_ACTIVE 1 /* Bit 0: record is active (not deleted) */ +#define HP_ROW_HAS_CONT 2 /* Bit 1: primary record has continuation chain(s) */ +#define HP_ROW_IS_CONT 4 /* Bit 2: this record IS a continuation record */ +#define HP_ROW_CONT_ZEROCOPY 8 /* Bit 3: zero-copy layout (data in rec 1..N-1) */ + +/* + Continuation run header: next_cont pointer + run_rec_count. + Stored at the beginning of the first record in each run. +*/ +#define HP_CONT_NEXT_PTR_SIZE sizeof(uchar*) +#define HP_CONT_REC_COUNT_SIZE sizeof(uint16) +#define HP_CONT_HEADER_SIZE (HP_CONT_NEXT_PTR_SIZE + HP_CONT_REC_COUNT_SIZE) + +/* + Minimum contiguous run size parameters. + Runs smaller than this are not worth scavenging from the free list because + the per-run header overhead (10 bytes) becomes a significant fraction of + payload. Skip them and allocate from the tail instead. + + HP_CONT_MIN_RUN_BYTES: absolute floor for minimum run payload. + HP_CONT_RUN_FRACTION_NUM/DEN: minimum run size as a fraction of blob size. + min_run_bytes = MAX(blob_length * NUM / DEN, HP_CONT_MIN_RUN_BYTES) +*/ +/* + Row flags byte predicates. + The flags byte is at offset 'visible' in each primary or run-header record. +*/ + +/* Record is active (not deleted) */ +static inline my_bool hp_is_active(const uchar *rec, uint visible) +{ + return (rec[visible] & HP_ROW_ACTIVE) != 0; +} + +/* Primary record that owns blob continuation chain(s) */ +static inline my_bool hp_has_cont(const uchar *rec, uint visible) +{ + return (rec[visible] & HP_ROW_HAS_CONT) != 0; +} + +/* This record IS a continuation run header (rec 0 of a run) */ +static inline my_bool hp_is_cont(const uchar *rec, uint visible) +{ + return (rec[visible] & HP_ROW_IS_CONT) != 0; +} + +/* + Continuation run header accessors. + Read next_cont pointer and run_rec_count from the first record of a run. +*/ +static inline const uchar *hp_cont_next(const uchar *chain) +{ + const uchar *next; + memcpy(&next, chain, HP_CONT_NEXT_PTR_SIZE); + return next; +} + +static inline uint16 hp_cont_rec_count(const uchar *chain) +{ + return uint2korr(chain + HP_CONT_NEXT_PTR_SIZE); +} + +/* + Zero-copy case detection for stored continuation chains. + + Case A: single record, single run — data fits in rec 0 payload after header. + run_rec_count == 1 AND next_cont == NULL. + IMPORTANT: run_rec_count == 1 alone is NOT sufficient — a multi-run + blob can have run_rec_count == 1 in its first run when free-list + fragmentation produces a single-slot fragment. + + Case B: single run, multiple records, zerocopy flag set — data in rec 1..N-1. +*/ +static inline my_bool hp_is_case_a(const uchar *chain) +{ + return hp_cont_rec_count(chain) == 1 && hp_cont_next(chain) == NULL; +} + +static inline my_bool hp_is_case_b(const uchar *chain, uint visible) +{ + return (chain[visible] & HP_ROW_CONT_ZEROCOPY) != 0; +} + +#define HP_CONT_MIN_RUN_BYTES 128 +#define HP_CONT_RUN_FRACTION_NUM 1 +#define HP_CONT_RUN_FRACTION_DEN 10 + /* Some extern variables */ extern LIST *heap_open_list,*heap_share_list; @@ -85,9 +172,12 @@ extern ulong hp_rec_hashnr(HP_KEYDEF *keyinfo,const uchar *rec); extern void hp_movelink(HASH_INFO *pos,HASH_INFO *next_link, HASH_INFO *newlink); extern int hp_rec_key_cmp(HP_KEYDEF *keydef,const uchar *rec1, - const uchar *rec2); + const uchar *rec2, HP_INFO *info); extern int hp_key_cmp(HP_KEYDEF *keydef,const uchar *rec, - const uchar *key); + const uchar *key, HP_INFO *info); +extern const uchar *hp_materialize_one_blob(HP_INFO *info, + const uchar *chain, + uint32 data_len); extern void hp_make_key(HP_KEYDEF *keydef,uchar *key,const uchar *rec); extern uint hp_rb_make_key(HP_KEYDEF *keydef, uchar *key, const uchar *rec, uchar *recpos); @@ -104,12 +194,19 @@ extern ha_rows hp_rows_in_memory(size_t reclength, size_t index_size, size_t memory_limit); extern size_t hp_memory_needed_per_row(size_t reclength); +extern uchar *next_free_record_pos(HP_SHARE *info); +extern int hp_write_blobs(HP_INFO *info, const uchar *record, uchar *pos); +extern int hp_read_blobs(HP_INFO *info, uchar *record, const uchar *pos); +extern void hp_free_blobs(HP_SHARE *share, uchar *pos); +extern void hp_free_run_chain(HP_SHARE *share, uchar *chain); + extern mysql_mutex_t THR_LOCK_heap; extern PSI_memory_key hp_key_memory_HP_SHARE; extern PSI_memory_key hp_key_memory_HP_INFO; extern PSI_memory_key hp_key_memory_HP_PTRS; extern PSI_memory_key hp_key_memory_HP_KEYDEF; +extern PSI_memory_key hp_key_memory_HP_BLOB; #ifdef HAVE_PSI_INTERFACE void init_heap_psi_keys(); diff --git a/storage/heap/hp_blob.c b/storage/heap/hp_blob.c new file mode 100644 index 0000000000000..19c8068be5d14 --- /dev/null +++ b/storage/heap/hp_blob.c @@ -0,0 +1,885 @@ +/* Copyright (c) 2025, MariaDB Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA */ + +/* + LOB (BLOB/TEXT) support for HEAP tables using variable-length + continuation runs. + + Each blob column's data is stored as a chain of continuation "runs". + A run is a contiguous sequence of recbuffer-sized records in the same + HP_BLOCK. The first record of each run stores a header (next_cont + pointer + run_rec_count); subsequent records carry pure blob payload. + Runs are linked together via the next_cont pointer. + + This design amortizes the per-run header overhead across many records, + giving near-100% space efficiency for typical blob sizes (150 KB and + above), even when recbuffer is very small (e.g. 16 bytes). +*/ + +#include "heapdef.h" +#include +#include + + +/* + Read blob data length from the record buffer. +*/ + +static uint32 hp_blob_length(const HP_BLOB_DESC *desc, const uchar *record) +{ + switch (desc->packlength) + { + case 1: + return (uint32) record[desc->offset]; + case 2: + return uint2korr(record + desc->offset); + case 3: + return uint3korr(record + desc->offset); + case 4: + return uint4korr(record + desc->offset); + default: + DBUG_ASSERT(0); + return 0; + } +} + + +/* + Allocate one record from the HP_BLOCK tail, bypassing the free list. + Same accounting as next_free_record_pos() but never uses del_link. + + Maintains the scan-boundary invariant: + total_records + deleted == block.last_allocated + by incrementing both last_allocated and total_records together. + heap_scan() relies on this invariant to know when to stop scanning. +*/ + +static uchar *hp_alloc_from_tail(HP_SHARE *share) +{ + int block_pos; + size_t length; + + if (!(block_pos= (share->block.last_allocated % + share->block.records_in_block))) + { + if ((share->block.last_allocated > share->max_records && + share->max_records) || + (share->data_length + share->index_length >= share->max_table_size)) + { + my_errno= HA_ERR_RECORD_FILE_FULL; + return NULL; + } + if (hp_get_new_block(share, &share->block, &length)) + return NULL; + share->data_length+= length; + } + share->block.last_allocated++; + share->total_records++; + return (uchar*) share->block.level_info[0].last_blocks + + block_pos * share->block.recbuffer; +} + + +/* + Free one continuation chain of variable-length runs. + + Walks from the first run, reads run_rec_count from each, frees all + records individually to the free list, then follows next_cont to the + next run. + + Maintains the scan-boundary invariant: + total_records + deleted == block.last_allocated + Each freed slot does total_records-- and deleted++, keeping the sum + constant. heap_scan() relies on this sum to know when to stop. + + @param share Table share + @param chain Pointer to first record of first run (or NULL) +*/ + +void hp_free_run_chain(HP_SHARE *share, uchar *chain) +{ + uint recbuffer= share->block.recbuffer; + + while (chain) + { + uchar *next_run; + uint16 run_rec_count; + uint16 j; + + memcpy(&next_run, chain, HP_CONT_NEXT_PTR_SIZE); + run_rec_count= uint2korr(chain + HP_CONT_NEXT_PTR_SIZE); + + for (j= 0; j < run_rec_count; j++) + { + uchar *pos= chain + j * recbuffer; + *((uchar**) pos)= share->del_link; + share->del_link= pos; + pos[share->visible]= 0; + share->deleted++; + share->total_records--; + } + + chain= next_run; + } +} + + +/* + Write blob data into a contiguous run of records. + + Writes the run header (next_cont=NULL, run_rec_count) in the first + record, then copies blob data across all records in the run, + advancing *offset. + + @param share Table share + @param data Source blob data + @param data_len Total blob data length + @param run_start Pointer to first record of the run + @param run_rec_count Number of consecutive records in this run + @param zerocopy If TRUE, use zero-copy layout: + Case A (run_rec_count==1): data in rec 0 after header. + Case B (run_rec_count>1): data in rec 1..N-1 only, + rec 0 carries only the header (no data payload). + @param offset [in/out] Current offset into blob data + + @note Caller must link runs by overwriting next_cont in the previous run. +*/ + +static void hp_write_run_data(HP_SHARE *share, const uchar *data, + uint32 data_len, uchar *run_start, + uint16 run_rec_count, my_bool zerocopy, + uint32 *offset) +{ + uint visible= share->visible; + uint recbuffer= share->block.recbuffer; + uint32 off= *offset; + uint32 remaining= data_len - off; + uint32 chunk; + uint16 rec; + uchar *null_ptr= NULL; + + /* First record: run header + flags byte (always written) */ + memcpy(run_start, &null_ptr, HP_CONT_NEXT_PTR_SIZE); + int2store(run_start + HP_CONT_NEXT_PTR_SIZE, run_rec_count); + run_start[visible]= HP_ROW_ACTIVE | HP_ROW_IS_CONT | + (zerocopy && run_rec_count > 1 ? HP_ROW_CONT_ZEROCOPY : 0); + + /* + Case B (zerocopy && run_rec_count > 1): skip data copy in rec 0. + All data goes into rec 1..N-1 contiguously for zero-copy reads. + Case A (zerocopy && run_rec_count == 1): data fits in rec 0 payload. + Case C (!zerocopy): data starts in rec 0 as before. + */ + if (!zerocopy || run_rec_count == 1) + { + chunk= visible - HP_CONT_HEADER_SIZE; + if (chunk > remaining) + chunk= remaining; + memcpy(run_start + HP_CONT_HEADER_SIZE, data + off, chunk); + off+= chunk; + remaining-= chunk; + } + + /* + Inner records (rec 1..N-1): full recbuffer payload, no flags byte. + This makes data in inner records contiguous, enabling zero-copy reads + for single-run blobs (Case B). + */ + for (rec= 1; rec < run_rec_count && remaining > 0; rec++) + { + uchar *rec_ptr= run_start + rec * recbuffer; + chunk= recbuffer; + if (chunk > remaining) + chunk= remaining; + memcpy(rec_ptr, data + off, chunk); + off+= chunk; + remaining-= chunk; + } + + *offset= off; +} + + +/* + Unlink a contiguous group from the free list and write blob data into it. + + @param share Table share + @param data_ptr Blob data + @param data_len Total blob data length + @param run_start Lowest address of the contiguous group + @param run_count Number of contiguous records in the group + @param visible share->visible + @param recbuffer share->block.recbuffer + @param data_offset [in/out] Current offset into blob data + @param first_run [in/out] Pointer to first run (NULL initially) + @param prev_run_start [in/out] Pointer to previous run's start +*/ + +static void hp_unlink_and_write_run(HP_SHARE *share, const uchar *data_ptr, + uint32 data_len, uchar *run_start, + uint16 run_count, uint visible, + uint recbuffer, uint32 *data_offset, + uchar **first_run, uchar **prev_run_start) +{ + uint32 remaining= data_len - *data_offset; + uint32 records_needed; + uint16 records_to_use; + uint32 unlinked= 0; + uchar **prev_link= &share->del_link; + uchar *cur; + uint32 first_payload= visible - HP_CONT_HEADER_SIZE; + + if (remaining <= first_payload) + records_needed= 1; + else + records_needed= 1 + (remaining - first_payload + recbuffer - 1) / recbuffer; + records_to_use= (records_needed > run_count) ? run_count : + (uint16) records_needed; + + cur= share->del_link; + while (cur && unlinked < records_to_use) + { + uchar *next= *((uchar**) cur); + if (cur >= run_start && + cur < run_start + records_to_use * recbuffer) + { + *prev_link= next; + share->deleted--; + share->total_records++; + unlinked++; + } + else + prev_link= (uchar**) cur; + cur= next; + } + + hp_write_run_data(share, data_ptr, data_len, run_start, + records_to_use, FALSE, data_offset); + + if (*prev_run_start) + memcpy(*prev_run_start, &run_start, sizeof(run_start)); + else + *first_run= run_start; + *prev_run_start= run_start; +} + + +/* + Write one blob column's data into a chain of continuation runs. + + Allocates contiguous runs from the free list and/or block tail, + copies blob data into them, and returns the first run pointer. + On failure, frees any partially allocated chain. + + @param share Table share + @param data_ptr Blob data to write + @param data_len Blob data length (must be > 0) + @param first_run_out [out] Pointer to first run's first record + + @return 0 on success, my_errno on failure +*/ + +static int hp_write_one_blob(HP_SHARE *share, const uchar *data_ptr, + uint32 data_len, uchar **first_run_out) +{ + uint visible= share->visible; + uint recbuffer= share->block.recbuffer; + uint32 min_run_bytes; + uint32 min_run_records; + uchar *first_run= NULL; + uchar *prev_run_start= NULL; + uint32 data_offset= 0; + + /* Calculate minimum acceptable run size */ + min_run_bytes= data_len / HP_CONT_RUN_FRACTION_DEN * + HP_CONT_RUN_FRACTION_NUM; + if (min_run_bytes < HP_CONT_MIN_RUN_BYTES) + min_run_bytes= HP_CONT_MIN_RUN_BYTES; + min_run_records= (min_run_bytes + recbuffer - 1) / recbuffer; + if (min_run_records < 2) + min_run_records= 2; + + /* + Step 1: Try to allocate contiguous runs from the free list. + + Peek at free list records by walking next pointers without unlinking. + Track contiguous groups (descending addresses — LIFO order from + hp_free_run_chain). On discontinuity: if the group qualifies + (>= min_run_records), unlink and use it; if it doesn't, the free + list is too fragmented — stop and fall through to tail allocation. + */ + { + uchar *run_start= NULL; + uint16 run_count= 0; + uchar *prev_pos= NULL; + uchar *pos; + + for (pos= share->del_link; + pos && data_offset < data_len; + pos= *((uchar**) pos)) + { + /* + Only check descending direction: hp_free_run_chain() frees records + in ascending address order (j=0..N), so LIFO pushes them onto the + free list in reverse — consecutive free list entries have descending + addresses. Ascending adjacency from unrelated deletes is ignored + intentionally; we only recover runs that were freed together. + */ + if (prev_pos && pos == prev_pos - recbuffer && run_count < UINT_MAX16) + { + run_start= pos; + run_count++; + prev_pos= pos; + continue; + } + + /* + Discontinuity. If the accumulated group qualifies, use it. + If not, the free list is fragmented — give up entirely. + */ + if (run_count > 0) + { + if (run_count < min_run_records) + break; + hp_unlink_and_write_run(share, data_ptr, data_len, run_start, + run_count, visible, recbuffer, + &data_offset, &first_run, &prev_run_start); + } + + run_start= pos; + run_count= 1; + prev_pos= pos; + } + + /* Handle the last group after the loop ends */ + if (run_count >= min_run_records && data_offset < data_len) + hp_unlink_and_write_run(share, data_ptr, data_len, run_start, + run_count, visible, recbuffer, + &data_offset, &first_run, &prev_run_start); + } + + /* + Step 2: Allocate remaining data from the block tail. + + Tail allocation is always contiguous within a leaf block. + When we hit a block boundary, we start a new run. + */ + while (data_offset < data_len) + { + uchar *run_start; + uint16 run_rec_count; + uint32 remaining= data_len - data_offset; + uint32 run_payload; + my_bool is_only_run; + + run_start= hp_alloc_from_tail(share); + if (!run_start) + goto err; + run_rec_count= 1; + + /* Extend the run with consecutive tail records */ + for (;;) + { + uint block_pos; + + if (run_rec_count == 1) + run_payload= visible - HP_CONT_HEADER_SIZE; + else + run_payload= (visible - HP_CONT_HEADER_SIZE) + + (uint32)(run_rec_count - 1) * recbuffer; + if (run_payload >= remaining) + break; + + /* + Check if the next record would be in the same leaf block. + block_pos == 0 means last_allocated is at a block boundary + and the next allocation would start a new block. + */ + block_pos= share->block.last_allocated % + share->block.records_in_block; + if (block_pos == 0) + break; + + { + uchar *next_rec= hp_alloc_from_tail(share); + if (!next_rec) + break; + /* + Contiguity guard (active in all builds, not just debug). + + Blob continuation runs use pointer arithmetic (run_start + + i * recbuffer) to access inner records in the write, read, + zero-copy, scan-skip, and free paths. Today, contiguity + within a leaf block is guaranteed by hp_get_new_block() + allocating a single flat array of records_in_block * recbuffer + bytes, and hp_alloc_from_tail() handing them out sequentially. + But this is an implementation detail of HP_BLOCK, not a + documented contract. A future change (e.g. sub-block + allocation, memory pooling, or alignment padding between + records) could silently break this assumption, turning every + blob path into a source of data corruption. Abort here so + such a change is caught immediately by any test that exercises + blob writes. + */ + if (unlikely(next_rec != + run_start + (uint32) run_rec_count * recbuffer)) + { + my_safe_printf_stderr( + "HEAP blob: tail allocation not contiguous: " + "expected %p, got %p (run_start=%p, count=%u, recbuffer=%u)\n", + run_start + (uint32) run_rec_count * recbuffer, + next_rec, run_start, (uint) run_rec_count, recbuffer); + abort(); + } + run_rec_count++; + } + } + + is_only_run= (first_run == NULL && prev_run_start == NULL); + + if (is_only_run && run_payload >= remaining) + { + /* + Single-run blob — use zero-copy layout if possible. + Case A: data fits in rec 0 payload (run_rec_count == 1). + Case B: data in rec 1..N-1 only, contiguous for zero-copy reads. + */ + if (run_rec_count == 1) + { + /* Case A: data fits in rec 0 */ + hp_write_run_data(share, data_ptr, data_len, run_start, + run_rec_count, TRUE, &data_offset); + } + else + { + uint32 case_b_payload= (uint32)(run_rec_count - 1) * recbuffer; + if (case_b_payload >= remaining) + { + /* Case B: rec 1..N-1 alone hold all data */ + hp_write_run_data(share, data_ptr, data_len, run_start, + run_rec_count, TRUE, &data_offset); + } + else + { + /* + Case B needs one more record than Case C. Try to extend + if we're not at a block boundary. + */ + uint block_pos= share->block.last_allocated % + share->block.records_in_block; + if (block_pos != 0) + { + uchar *extra= hp_alloc_from_tail(share); + if (extra) + { + /* + Contiguity guard for the Case B extra record, same + rationale as the main extension loop ~60 lines above: + hp_get_new_block() today allocates flat arrays but this + is an HP_BLOCK implementation detail, not a contract. + A future change could break contiguity and silently + corrupt every blob read/write/free path that relies on + run_start + i * recbuffer arithmetic. + */ + if (unlikely(extra != + run_start + (uint32) run_rec_count * recbuffer)) + { + my_safe_printf_stderr( + "HEAP blob: Case B extra allocation not contiguous: " + "expected %p, got %p " + "(run_start=%p, count=%u, recbuffer=%u)\n", + run_start + (uint32) run_rec_count * recbuffer, + extra, run_start, (uint) run_rec_count, recbuffer); + abort(); + } + run_rec_count++; + hp_write_run_data(share, data_ptr, data_len, run_start, + run_rec_count, TRUE, &data_offset); + } + else + hp_write_run_data(share, data_ptr, data_len, run_start, + run_rec_count, FALSE, &data_offset); + } + else + hp_write_run_data(share, data_ptr, data_len, run_start, + run_rec_count, FALSE, &data_offset); + } + } + } + else + { + /* Multi-run (Case C) or not the only run */ + hp_write_run_data(share, data_ptr, data_len, run_start, + run_rec_count, FALSE, &data_offset); + } + + if (prev_run_start) + memcpy(prev_run_start, &run_start, sizeof(run_start)); + else + first_run= run_start; + prev_run_start= run_start; + } + + *first_run_out= first_run; + return 0; + +err: + if (first_run) + hp_free_run_chain(share, first_run); + *first_run_out= NULL; + return my_errno; +} + + +/* + Write blob data from the record buffer into continuation runs. + + For each blob column, reads the (length, pointer) descriptor from + the caller's record buffer, allocates variable-length continuation + runs, copies blob data into them, and overwrites the pointer in + the stored row (pos) to point to the first continuation run. + + @param info Table handle + @param record Source record buffer (caller's data) + @param pos Destination row in HP_BLOCK (already has memcpy'd record) + + @return 0 on success, my_errno on failure +*/ + +int hp_write_blobs(HP_INFO *info, const uchar *record, uchar *pos) +{ + HP_SHARE *share= info->s; + uint i; + my_bool has_blob_data= FALSE; + DBUG_ENTER("hp_write_blobs"); + + for (i= 0; i < share->blob_count; i++) + { + HP_BLOB_DESC *desc= &share->blob_descs[i]; + uint32 data_len; + const uchar *data_ptr; + uchar *first_run; + + data_len= hp_blob_length(desc, record); + + if (data_len == 0) + { + uchar *null_ptr= NULL; + memcpy(pos + desc->offset + desc->packlength, &null_ptr, sizeof(null_ptr)); + continue; + } + + has_blob_data= TRUE; + memcpy(&data_ptr, record + desc->offset + desc->packlength, sizeof(data_ptr)); + + if (hp_write_one_blob(share, data_ptr, data_len, &first_run)) + { + /* Rollback: free all previously completed blob columns */ + uint j; + for (j= 0; j < i; j++) + { + HP_BLOB_DESC *rd= &share->blob_descs[j]; + uchar *chain; + memcpy(&chain, pos + rd->offset + rd->packlength, sizeof(chain)); + if (chain) + hp_free_run_chain(share, chain); + { + uchar *null_ptr= NULL; + memcpy(pos + rd->offset + rd->packlength, &null_ptr, sizeof(null_ptr)); + } + } + { + uchar *null_ptr= NULL; + memcpy(pos + desc->offset + desc->packlength, &null_ptr, + sizeof(null_ptr)); + } + DBUG_RETURN(my_errno); + } + + memcpy(pos + desc->offset + desc->packlength, &first_run, sizeof(first_run)); + } + + pos[share->visible]= has_blob_data ? + (HP_ROW_ACTIVE | HP_ROW_HAS_CONT) : HP_ROW_ACTIVE; + DBUG_RETURN(0); +} + + +/* + Read blob data from continuation runs into the reassembly buffer. + + After memcpy(record, pos, reclength), blob descriptor pointers in + record[] point into HP_BLOCK continuation run chains. This function + walks each chain, reassembles blob data into info->blob_buff, and + rewrites the pointers in record[] to point into blob_buff. + + @param info Table handle + @param record Record buffer (already has memcpy'd row data) + @param pos Row pointer in HP_BLOCK + + @return 0 on success, my_errno on failure +*/ + +int hp_read_blobs(HP_INFO *info, uchar *record, const uchar *pos) +{ + HP_SHARE *share= info->s; + uint i; + uint visible= share->visible; + uint recbuffer= share->block.recbuffer; + uint32 total_copy_size= 0; + uchar *buff_ptr; + DBUG_ENTER("hp_read_blobs"); + + info->has_zerocopy_blobs= FALSE; + + if (!hp_has_cont(pos, share->visible)) + DBUG_RETURN(0); + + /* + Pass 1: sum data_len for blobs that need reassembly (not zero-copy). + Cases A and B (HP_ROW_CONT_ZEROCOPY set, or single-record run) use + zero-copy pointers into HP_BLOCK, no blob_buff needed. + */ + for (i= 0; i < share->blob_count; i++) + { + HP_BLOB_DESC *desc= &share->blob_descs[i]; + uint32 data_len; + const uchar *chain; + + data_len= hp_blob_length(desc, record); + if (data_len == 0) + continue; + + memcpy(&chain, record + desc->offset + desc->packlength, sizeof(chain)); + + /* Zero-copy cases (A or B) need no reassembly buffer space */ + if (hp_is_case_a(chain) || hp_is_case_b(chain, visible)) + { + info->has_zerocopy_blobs= TRUE; + continue; + } + total_copy_size+= data_len; + } + + /* Grow reassembly buffer for Case C blobs */ + if (total_copy_size > 0) + { + if (total_copy_size > info->blob_buff_len) + { + uchar *new_buff= (uchar*) my_realloc(hp_key_memory_HP_BLOB, + info->blob_buff, + total_copy_size, + MYF(MY_ALLOW_ZERO_PTR)); + if (!new_buff) + DBUG_RETURN(my_errno= HA_ERR_OUT_OF_MEM); + info->blob_buff= new_buff; + info->blob_buff_len= total_copy_size; + } + } + + /* Pass 2: process each blob column */ + buff_ptr= info->blob_buff; + for (i= 0; i < share->blob_count; i++) + { + HP_BLOB_DESC *desc= &share->blob_descs[i]; + uint32 data_len; + const uchar *chain; + + data_len= hp_blob_length(desc, record); + if (data_len == 0) + continue; + + memcpy(&chain, record + desc->offset + desc->packlength, sizeof(chain)); + + if (hp_is_case_a(chain)) + { + /* Case A: single-record single-run — zero-copy */ + const uchar *blob_data= chain + HP_CONT_HEADER_SIZE; + memcpy(record + desc->offset + desc->packlength, &blob_data, + sizeof(blob_data)); + } + else if (hp_is_case_b(chain, visible)) + { + /* Case B: data in rec 1..N-1, contiguous — zero-copy */ + const uchar *blob_data= chain + recbuffer; + memcpy(record + desc->offset + desc->packlength, &blob_data, + sizeof(blob_data)); + } + else + { + /* Case C: reassemble into blob_buff */ + uint32 remaining= data_len; + const uchar *next_cont; + while (chain && remaining > 0) + { + uint16 rec; + uint16 run_rec_count; + uint32 chunk; + + next_cont= hp_cont_next(chain); + run_rec_count= hp_cont_rec_count(chain); + + /* First record payload (after header) */ + chunk= visible - HP_CONT_HEADER_SIZE; + if (chunk > remaining) + chunk= remaining; + memcpy(buff_ptr, chain + HP_CONT_HEADER_SIZE, chunk); + buff_ptr+= chunk; + remaining-= chunk; + + /* Inner records: recbuffer stride, no flags byte */ + for (rec= 1; rec < run_rec_count && remaining > 0; rec++) + { + const uchar *rec_ptr= chain + rec * recbuffer; + chunk= recbuffer; + if (chunk > remaining) + chunk= remaining; + memcpy(buff_ptr, rec_ptr, chunk); + buff_ptr+= chunk; + remaining-= chunk; + } + + chain= next_cont; + } + + /* Update blob pointer to reassembly buffer */ + { + uchar *blob_data= buff_ptr - data_len; + memcpy(record + desc->offset + desc->packlength, &blob_data, + sizeof(blob_data)); + } + } + } + + DBUG_RETURN(0); +} + + +/* + Materialize a single blob column's data from a continuation chain + into info->blob_buff. + + Used by hash comparison functions when comparing a stored record + (where the blob data pointer has been overwritten with a continuation + chain pointer) against an input record. + + @param info Table handle (provides blob_buff) + @param chain Pointer to first run of the continuation chain + @param data_len Total blob data length (from record's packlength bytes) + + @return Pointer into info->blob_buff with contiguous blob data, + or NULL on allocation failure. +*/ + +const uchar *hp_materialize_one_blob(HP_INFO *info, + const uchar *chain, + uint32 data_len) +{ + HP_SHARE *share= info->s; + uint visible= share->visible; + uint recbuffer= share->block.recbuffer; + uint32 remaining; + uchar *buff_ptr; + const uchar *next_cont; + uint16 run_rec_count; + + if (data_len == 0 || !chain) + return chain; + + /* Check for zero-copy cases */ + if (hp_is_case_a(chain)) + return chain + HP_CONT_HEADER_SIZE; /* Case A */ + if (hp_is_case_b(chain, visible)) + return chain + recbuffer; /* Case B */ + + /* Case C: multiple runs, reassemble into blob_buff */ + if (data_len > info->blob_buff_len) + { + uchar *new_buff= (uchar*) my_realloc(hp_key_memory_HP_BLOB, + info->blob_buff, + data_len, + MYF(MY_ALLOW_ZERO_PTR)); + if (!new_buff) + return NULL; + info->blob_buff= new_buff; + info->blob_buff_len= data_len; + } + + buff_ptr= info->blob_buff; + remaining= data_len; + while (chain && remaining > 0) + { + uint16 rec; + uint32 chunk; + + next_cont= hp_cont_next(chain); + run_rec_count= hp_cont_rec_count(chain); + + /* First record payload (after header) */ + chunk= visible - HP_CONT_HEADER_SIZE; + if (chunk > remaining) + chunk= remaining; + memcpy(buff_ptr, chain + HP_CONT_HEADER_SIZE, chunk); + buff_ptr+= chunk; + remaining-= chunk; + + /* Inner records: recbuffer stride, no flags byte */ + for (rec= 1; rec < run_rec_count && remaining > 0; rec++) + { + const uchar *rec_ptr= chain + rec * recbuffer; + chunk= recbuffer; + if (chunk > remaining) + chunk= remaining; + memcpy(buff_ptr, rec_ptr, chunk); + buff_ptr+= chunk; + remaining-= chunk; + } + + chain= next_cont; + } + + return info->blob_buff; +} + + +/* + Free continuation run chains for all blob columns of a row. + + Walks each blob column's run chain and adds all records back to the + free list. + + @param share Table share + @param pos Primary record pointer in HP_BLOCK +*/ + +void hp_free_blobs(HP_SHARE *share, uchar *pos) +{ + uint i; + DBUG_ENTER("hp_free_blobs"); + + if (!hp_has_cont(pos, share->visible)) + DBUG_VOID_RETURN; + + for (i= 0; i < share->blob_count; i++) + { + HP_BLOB_DESC *desc= &share->blob_descs[i]; + uchar *chain; + + memcpy(&chain, pos + desc->offset + desc->packlength, sizeof(chain)); + hp_free_run_chain(share, chain); + } + + DBUG_VOID_RETURN; +} diff --git a/storage/heap/hp_clear.c b/storage/heap/hp_clear.c index b0b263249a881..9efb4170792a7 100644 --- a/storage/heap/hp_clear.c +++ b/storage/heap/hp_clear.c @@ -35,8 +35,9 @@ void hp_clear(HP_SHARE *info) (void) hp_free_level(&info->block,info->block.levels,info->block.root, (uchar*) 0); info->block.levels=0; + info->block.last_allocated=0; hp_clear_keys(info); - info->records= info->deleted= 0; + info->records= info->deleted= info->total_records= 0; info->data_length= 0; info->blength=1; info->changed=0; diff --git a/storage/heap/hp_close.c b/storage/heap/hp_close.c index 82d6186340aa1..aa417f99b5a71 100644 --- a/storage/heap/hp_close.c +++ b/storage/heap/hp_close.c @@ -40,6 +40,7 @@ int hp_close(register HP_INFO *info) heap_open_list=list_delete(heap_open_list,&info->open_list); if (!--info->s->open_count && info->s->delete_on_close) hp_free(info->s); /* Table was deleted */ + my_free(info->blob_buff); my_free(info); DBUG_RETURN(error); } diff --git a/storage/heap/hp_create.c b/storage/heap/hp_create.c index f35e8e3fac9f8..6433b059605d0 100644 --- a/storage/heap/hp_create.c +++ b/storage/heap/hp_create.c @@ -74,7 +74,17 @@ int heap_create(const char *name, HP_CREATE_INFO *create_info, so the visible_offset must be least at sizeof(uchar*) */ visible_offset= MY_MAX(reclength, sizeof (char*)); - + /* + Blob tables store continuation run headers (next_cont pointer + + run_slots count = HP_CONT_HEADER_SIZE bytes) in each run's first + slot. Ensure at least 1 byte of payload beyond the header, + otherwise hp_write_run_data() underflows computing + chunk = visible - HP_CONT_HEADER_SIZE. Only matters for + pathological single-TINYBLOB tables (reclength as low as 9). + */ + if (create_info->blob_count) + visible_offset= MY_MAX(visible_offset, HP_CONT_HEADER_SIZE + 1); + for (i= key_segs= max_length= 0, keyinfo= keydef; i < keys; i++, keyinfo++) { bzero((char*) &keyinfo->block,sizeof(keyinfo->block)); @@ -110,6 +120,12 @@ int heap_create(const char *name, HP_CREATE_INFO *create_info, /* fall through */ case HA_KEYTYPE_VARTEXT1: keyinfo->flag|= HA_VAR_LENGTH_KEY; + /* + Real blob fields always enter as VARTEXT2/VARBINARY2, never + as VARTEXT1/VARBINARY1. Strip any spurious HA_BLOB_PART + (e.g. from uninitialized key_part_flag in SJ weedout tables). + */ + keyinfo->seg[j].flag&= ~HA_BLOB_PART; /* For BTREE algorithm, key length, greater than or equal to 255, is packed on 3 bytes. @@ -126,16 +142,78 @@ int heap_create(const char *name, HP_CREATE_INFO *create_info, /* fall_through */ case HA_KEYTYPE_VARTEXT2: keyinfo->flag|= HA_VAR_LENGTH_KEY; + /* + Strip HA_BLOB_PART for key segments that don't correspond + to actual blob fields. HA_BLOB_PART can appear spuriously + from uninitialized key_part_flag (SJ weedout tables) or + from BLOB_FLAG on non-Field_blob types (I_S temp tables). + */ + if (keyinfo->seg[j].flag & HA_BLOB_PART) + { + my_bool real_blob= FALSE; + uint k; + for (k= 0; k < create_info->blob_count; k++) + { + if (create_info->blob_descs[k].offset == + keyinfo->seg[j].start) + { + real_blob= TRUE; + break; + } + } + if (!real_blob) + keyinfo->seg[j].flag&= ~HA_BLOB_PART; + } /* For BTREE algorithm, key length, greater than or equal to 255, is packed on 3 bytes. */ if (keyinfo->algorithm == HA_KEY_ALG_BTREE) length+= size_to_store_key_length(keyinfo->seg[j].length); + else if (keyinfo->seg[j].flag & HA_BLOB_PART) + length+= 4 + sizeof(uchar*); /* 4-byte len + data ptr in key */ else length+= 2; - /* Save number of bytes used to store length */ - keyinfo->seg[j].bit_start= 2; + /* + Save number of bytes used to store length. + For blob segments, bit_start holds the actual blob packlength + (1-4). Some SQL layer paths (DISTINCT) set it explicitly; + others (UNION) leave it 0 and set seg->length to pack_length + (= packlength + sizeof(uchar*)). Derive it when missing. + Also normalize seg->length to 0 ("whole blob") for blob + segments where the SQL layer set it to pack_length. + */ + if (!(keyinfo->seg[j].flag & HA_BLOB_PART)) + keyinfo->seg[j].bit_start= 2; + else + { + if (keyinfo->seg[j].bit_start == 0 && keyinfo->seg[j].length > 0) + keyinfo->seg[j].bit_start= + (uint8)(keyinfo->seg[j].length - sizeof(uchar*)); + keyinfo->seg[j].length= 0; /* "whole blob" */ + /* + Fallback: if bit_start is still 0 after the length-based + derivation above (which requires length > 0), look up the + actual packlength from the blob descriptor array. This + covers any SQL layer path that sets both bit_start=0 and + length=0 for a blob key segment. + */ + if (keyinfo->seg[j].bit_start == 0) + { + uint k; + for (k= 0; k < create_info->blob_count; k++) + { + if (create_info->blob_descs[k].offset == + keyinfo->seg[j].start) + { + keyinfo->seg[j].bit_start= + (uint8) create_info->blob_descs[k].packlength; + break; + } + } + DBUG_ASSERT(keyinfo->seg[j].bit_start > 0); + } + } /* Make future comparison simpler by only having to check for one type @@ -174,7 +252,8 @@ int heap_create(const char *name, HP_CREATE_INFO *create_info, if (!(share= (HP_SHARE*) my_malloc(hp_key_memory_HP_SHARE, sizeof(HP_SHARE)+ keys*sizeof(HP_KEYDEF)+ - key_segs*sizeof(HA_KEYSEG), + key_segs*sizeof(HA_KEYSEG)+ + create_info->blob_count*sizeof(HP_BLOB_DESC), MYF(MY_ZEROFILL | (create_info->internal_table ? MY_THREAD_SPECIFIC : 0))))) @@ -182,6 +261,13 @@ int heap_create(const char *name, HP_CREATE_INFO *create_info, share->keydef= (HP_KEYDEF*) (share + 1); share->key_stat_version= 1; keyseg= (HA_KEYSEG*) (share->keydef + keys); + if (create_info->blob_count) + { + share->blob_descs= (HP_BLOB_DESC*) (keyseg + key_segs); + memcpy(share->blob_descs, create_info->blob_descs, + create_info->blob_count * sizeof(HP_BLOB_DESC)); + share->blob_count= create_info->blob_count; + } init_block(&share->block, hp_memory_needed_per_row(reclength), min_records, max_records); /* Fix keys */ diff --git a/storage/heap/hp_delete.c b/storage/heap/hp_delete.c index 9579fb51a7918..1a4da1fff0e44 100644 --- a/storage/heap/hp_delete.c +++ b/storage/heap/hp_delete.c @@ -42,11 +42,21 @@ int heap_delete(HP_INFO *info, const uchar *record) goto err; } + /* + Free blob continuation chains first (if any), then free the head + record slot. Both hp_free_run_chain() and the code below maintain + the scan-boundary invariant: + total_records + deleted == block.last_allocated + by doing total_records-- and deleted++ for each freed slot. + */ + if (share->blob_count) + hp_free_blobs(share, pos); info->update=HA_STATE_DELETED; *((uchar**) pos)=share->del_link; share->del_link=pos; pos[share->visible]=0; /* Record deleted */ share->deleted++; + share->total_records--; share->key_version++; #if !defined(DBUG_OFF) && defined(EXTRA_HEAP_DEBUG) DBUG_EXECUTE("check_heap",heap_check_heap(info, 0);); @@ -123,7 +133,7 @@ int hp_delete_key(HP_INFO *info, register HP_KEYDEF *keyinfo, while (pos->ptr_to_rec != recpos) { - if (flag && !hp_rec_key_cmp(keyinfo, record, pos->ptr_to_rec)) + if (flag && !hp_rec_key_cmp(keyinfo, record, pos->ptr_to_rec, info)) last_ptr=pos; /* Previous same key */ gpos=pos; if (!(pos=pos->next_key)) diff --git a/storage/heap/hp_extra.c b/storage/heap/hp_extra.c index 3c554fe98e780..b54281027032b 100644 --- a/storage/heap/hp_extra.c +++ b/storage/heap/hp_extra.c @@ -59,6 +59,12 @@ int heap_reset(HP_INFO *info) info->current_hash_ptr=0; info->update=0; info->next_block=0; + if (info->blob_buff) + { + my_free(info->blob_buff); + info->blob_buff= NULL; + info->blob_buff_len= 0; + } return 0; } diff --git a/storage/heap/hp_hash.c b/storage/heap/hp_hash.c index a013915173043..c06fa77b9e9d9 100644 --- a/storage/heap/hp_hash.c +++ b/storage/heap/hp_hash.c @@ -29,6 +29,25 @@ hp_charpos(CHARSET_INFO *cs, const uchar *b, const uchar *e, size_t num) static ulong hp_hashnr(HP_KEYDEF *keydef, const uchar *key); + + +/* + Read blob data length using actual packlength stored in seg->bit_start. +*/ + +/* Size of a pointer, for use in memcpy to avoid -Wsizeof-pointer-memaccess */ +#define HP_PTR_SIZE sizeof(void*) + +static size_t hp_blob_key_length(uint packlength, const uchar *pos) +{ + switch (packlength) { + case 1: return (size_t) pos[0]; + case 2: return uint2korr(pos); + case 3: return uint3korr(pos); + case 4: return uint4korr(pos); + } + return 0; +} /* Find out how many rows there is in the given range @@ -127,7 +146,7 @@ uchar *hp_search(HP_INFO *info, HP_KEYDEF *keyinfo, const uchar *key, goto not_found; /* Wrong link */ do { - if (!hp_key_cmp(keyinfo, pos->ptr_to_rec, key)) + if (!hp_key_cmp(keyinfo, pos->ptr_to_rec, key, info)) { switch (nextflag) { case 0: /* Search after key */ @@ -188,7 +207,7 @@ uchar *hp_search_next(HP_INFO *info, HP_KEYDEF *keyinfo, const uchar *key, while ((pos= pos->next_key)) { - if (! hp_key_cmp(keyinfo, pos->ptr_to_rec, key)) + if (! hp_key_cmp(keyinfo, pos->ptr_to_rec, key, info)) { info->current_hash_ptr=pos; DBUG_RETURN (info->current_ptr= pos->ptr_to_rec); @@ -238,9 +257,9 @@ static ulong hp_hashnr(HP_KEYDEF *keydef, const uchar *key) if (*pos) /* Found null */ { nr^= (nr << 1) | 1; - /* Add key pack length (2) to key for VARCHAR segments */ + /* Add key pack length to key for VARCHAR/BLOB segments */ if (seg->type == HA_KEYTYPE_VARTEXT1) - key+= 2; + key+= (seg->flag & HA_BLOB_PART) ? 4 + sizeof(uchar*) : 2; continue; } pos++; @@ -257,6 +276,17 @@ static ulong hp_hashnr(HP_KEYDEF *keydef, const uchar *key) } my_ci_hash_sort(cs, pos, length, &nr, &nr2); } + else if (seg->type == HA_KEYTYPE_VARTEXT1 && (seg->flag & HA_BLOB_PART)) + { + /* Blob segment in pre-built key: 4-byte length + data pointer */ + CHARSET_INFO *cs= seg->charset; + uint32 blob_len= uint4korr(pos); + const uchar *blob_data; + memcpy(&blob_data, pos + 4, HP_PTR_SIZE); + if (blob_data && blob_len > 0) + my_ci_hash_sort(cs, blob_data, blob_len, &nr, &nr2); + key+= 4 + sizeof(uchar*); + } else if (seg->type == HA_KEYTYPE_VARTEXT1) /* Any VARCHAR segments */ { CHARSET_INFO *cs= seg->charset; @@ -318,6 +348,17 @@ ulong hp_rec_hashnr(register HP_KEYDEF *keydef, register const uchar *rec) } my_ci_hash_sort(cs, pos, char_length, &nr, &nr2); } + else if (seg->type == HA_KEYTYPE_VARTEXT1 && (seg->flag & HA_BLOB_PART)) + { + /* Blob segment in input record: dereference data pointer */ + CHARSET_INFO *cs= seg->charset; + uint packlength= seg->bit_start; + size_t blob_len= hp_blob_key_length(packlength, pos); + const uchar *blob_data; + memcpy(&blob_data, pos + packlength, HP_PTR_SIZE); + if (blob_data && blob_len > 0) + my_ci_hash_sort(cs, blob_data, blob_len, &nr, &nr2); + } else if (seg->type == HA_KEYTYPE_VARTEXT1) /* Any VARCHAR segments */ { CHARSET_INFO *cs= seg->charset; @@ -361,24 +402,23 @@ ulong hp_rec_hashnr(register HP_KEYDEF *keydef, register const uchar *rec) /* - Compare keys for two records. Returns 0 if they are identical - - SYNOPSIS - hp_rec_key_cmp() - keydef Key definition - rec1 Record to compare - rec2 Other record to compare - - NOTES - diff_if_only_endspace_difference is used to allow us to insert - 'a' and 'a ' when there is an an unique key. - - RETURN - 0 Key is identical - <> 0 Key differes + Compare two records using key segments. + + @param keydef Key definition + @param rec1 First record (input) — blob fields contain direct data + pointers to caller-owned memory + @param rec2 Second record — when @a info is non-NULL, blob fields + contain continuation chain pointers (stored format) that + are materialized via hp_materialize_one_blob(). + When @a info is NULL, treated same as rec1. + @param info When non-NULL, enables stored-blob materialization for rec2. + Must be NULL when both records are input records. + + @return 0 if records are equal by all key segments, 1 otherwise */ -int hp_rec_key_cmp(HP_KEYDEF *keydef, const uchar *rec1, const uchar *rec2) +int hp_rec_key_cmp(HP_KEYDEF *keydef, const uchar *rec1, const uchar *rec2, + HP_INFO *info) { HA_KEYSEG *seg,*endseg; @@ -416,6 +456,46 @@ int hp_rec_key_cmp(HP_KEYDEF *keydef, const uchar *rec1, const uchar *rec2) pos2, char_length2)) return 1; } + else if (seg->type == HA_KEYTYPE_VARTEXT1 && (seg->flag & HA_BLOB_PART)) + { + /* + Blob segment comparison. + rec1 always has valid blob pointers (input record). + rec2 may be stored (chain pointers) when info != NULL. + */ + uint packlength= seg->bit_start; + uchar *pos1= (uchar*) rec1 + seg->start; + uchar *pos2= (uchar*) rec2 + seg->start; + size_t len1= hp_blob_key_length(packlength, pos1); + size_t len2= hp_blob_key_length(packlength, pos2); + const uchar *data1; + const uchar *data2; + + if (len1 != len2) + return 1; + if (len1 == 0) + continue; + + /* rec1: always input — dereference pointer */ + memcpy(&data1, pos1 + packlength, HP_PTR_SIZE); + + /* rec2: if info != NULL, it's stored — materialize from chain */ + if (info) + { + const uchar *chain2; + memcpy(&chain2, pos2 + packlength, HP_PTR_SIZE); + data2= hp_materialize_one_blob(info, chain2, (uint32) len2); + if (!data2) + return 1; + } + else + { + memcpy(&data2, pos2 + packlength, HP_PTR_SIZE); + } + + if (my_ci_strnncollsp(seg->charset, data1, len1, data2, len2)) + return 1; + } else if (seg->type == HA_KEYTYPE_VARTEXT1) /* Any VARCHAR segments */ { uchar *pos1= (uchar*) rec1 + seg->start; @@ -478,7 +558,8 @@ int hp_rec_key_cmp(HP_KEYDEF *keydef, const uchar *rec1, const uchar *rec2) /* Compare a key in a record to a whole key */ -int hp_key_cmp(HP_KEYDEF *keydef, const uchar *rec, const uchar *key) +int hp_key_cmp(HP_KEYDEF *keydef, const uchar *rec, const uchar *key, + HP_INFO *info) { HA_KEYSEG *seg,*endseg; @@ -493,9 +574,9 @@ int hp_key_cmp(HP_KEYDEF *keydef, const uchar *rec, const uchar *key) return 1; if (found_null) { - /* Add key pack length (2) to key for VARCHAR segments */ + /* Add key pack length to key for VARCHAR/BLOB segments */ if (seg->type == HA_KEYTYPE_VARTEXT1) - key+= 2; + key+= (seg->flag & HA_BLOB_PART) ? 4 + sizeof(uchar*) : 2; continue; } } @@ -518,12 +599,47 @@ int hp_key_cmp(HP_KEYDEF *keydef, const uchar *rec, const uchar *key) char_length_key= seg->length; char_length_rec= seg->length; } - + if (my_ci_strnncollsp(seg->charset, pos, char_length_rec, key, char_length_key)) return 1; } + else if (seg->type == HA_KEYTYPE_VARTEXT1 && (seg->flag & HA_BLOB_PART)) + { + /* + Blob segment: rec side is stored (chain pointers), key side has + 4-byte length + data pointer from hp_make_key. + */ + uint packlength= seg->bit_start; + uchar *pos= (uchar*) rec + seg->start; + size_t rec_blob_len= hp_blob_key_length(packlength, pos); + uint32 key_blob_len= uint4korr(key); + const uchar *key_data; + const uchar *rec_data; + + memcpy(&key_data, key + 4, HP_PTR_SIZE); + key+= 4 + sizeof(uchar*); + + if (rec_blob_len != key_blob_len) + return 1; + if (rec_blob_len == 0) + continue; + + /* rec is stored — materialize from chain */ + { + const uchar *chain; + memcpy(&chain, pos + packlength, HP_PTR_SIZE); + rec_data= hp_materialize_one_blob(info, chain, (uint32) rec_blob_len); + if (!rec_data) + return 1; + } + + if (my_ci_strnncollsp(seg->charset, + rec_data, rec_blob_len, + key_data, key_blob_len)) + return 1; + } else if (seg->type == HA_KEYTYPE_VARTEXT1) /* Any VARCHAR segments */ { uchar *pos= (uchar*) rec + seg->start; @@ -538,7 +654,7 @@ int hp_key_cmp(HP_KEYDEF *keydef, const uchar *rec, const uchar *key) if (cs->mbmaxlen > 1) { size_t char_length1, char_length2; - char_length1= char_length2= seg->length / cs->mbmaxlen; + char_length1= char_length2= seg->length / cs->mbmaxlen; char_length1= hp_charpos(cs, key, key + char_length_key, char_length1); set_if_smaller(char_length_key, char_length1); char_length2= hp_charpos(cs, pos, pos + char_length_rec, char_length2); @@ -586,6 +702,21 @@ void hp_make_key(HP_KEYDEF *keydef, uchar *key, const uchar *rec) uchar *pos= (uchar*) rec + seg->start; if (seg->null_bit) *key++= MY_TEST(rec[seg->null_pos] & seg->null_bit); + if (seg->type == HA_KEYTYPE_VARTEXT1 && (seg->flag & HA_BLOB_PART)) + { + /* + Blob segment in input record: store 4-byte length + data pointer + in key buffer for later use by hp_hashnr/hp_key_cmp. + */ + uint packlength= seg->bit_start; + uint32 blob_len= (uint32) hp_blob_key_length(packlength, pos); + const uchar *blob_data; + memcpy(&blob_data, pos + packlength, HP_PTR_SIZE); + int4store(key, blob_len); + memcpy(key + 4, &blob_data, HP_PTR_SIZE); + key+= 4 + sizeof(uchar*); + continue; + } if (cs->mbmaxlen > 1) { char_length= hp_charpos(cs, pos, pos + seg->length, diff --git a/storage/heap/hp_rfirst.c b/storage/heap/hp_rfirst.c index 60596a2c650fd..903fd42a135ed 100644 --- a/storage/heap/hp_rfirst.c +++ b/storage/heap/hp_rfirst.c @@ -38,6 +38,8 @@ int heap_rfirst(HP_INFO *info, uchar *record, int inx) sizeof(uchar*)); info->current_ptr = pos; memcpy(record, pos, (size_t)share->reclength); + if (share->blob_count && hp_read_blobs(info, record, pos)) + DBUG_RETURN(my_errno); /* If we're performing index_first on a table that was taken from table cache, info->lastkey_len is initialized to previous query. diff --git a/storage/heap/hp_rkey.c b/storage/heap/hp_rkey.c index 2d9fae4c52097..bc03226f2ba15 100644 --- a/storage/heap/hp_rkey.c +++ b/storage/heap/hp_rkey.c @@ -69,6 +69,8 @@ int heap_rkey(HP_INFO *info, uchar *record, int inx, const uchar *key, memcpy(info->lastkey, key, (size_t) keyinfo->length); } memcpy(record, pos, (size_t) share->reclength); + if (share->blob_count && hp_read_blobs(info, record, pos)) + DBUG_RETURN(my_errno); info->update= HA_STATE_AKTIV; DBUG_RETURN(0); } diff --git a/storage/heap/hp_rlast.c b/storage/heap/hp_rlast.c index ed9c3499d5e84..5b31bfccf07c0 100644 --- a/storage/heap/hp_rlast.c +++ b/storage/heap/hp_rlast.c @@ -38,6 +38,8 @@ int heap_rlast(HP_INFO *info, uchar *record, int inx) sizeof(uchar*)); info->current_ptr = pos; memcpy(record, pos, (size_t)share->reclength); + if (share->blob_count && hp_read_blobs(info, record, pos)) + DBUG_RETURN(my_errno); info->update = HA_STATE_AKTIV; } else diff --git a/storage/heap/hp_rnext.c b/storage/heap/hp_rnext.c index ac21ed83da271..774731624fd96 100644 --- a/storage/heap/hp_rnext.c +++ b/storage/heap/hp_rnext.c @@ -127,6 +127,8 @@ int heap_rnext(HP_INFO *info, uchar *record) DBUG_RETURN(my_errno); } memcpy(record,pos,(size_t) share->reclength); + if (share->blob_count && hp_read_blobs(info, record, pos)) + DBUG_RETURN(my_errno); info->update=HA_STATE_AKTIV | HA_STATE_NEXT_FOUND; DBUG_RETURN(0); } diff --git a/storage/heap/hp_rprev.c b/storage/heap/hp_rprev.c index cc81d179570aa..948d1db15ec53 100644 --- a/storage/heap/hp_rprev.c +++ b/storage/heap/hp_rprev.c @@ -94,6 +94,8 @@ int heap_rprev(HP_INFO *info, uchar *record) DBUG_RETURN(my_errno); } memcpy(record,pos,(size_t) share->reclength); + if (share->blob_count && hp_read_blobs(info, record, pos)) + DBUG_RETURN(my_errno); info->update=HA_STATE_AKTIV | HA_STATE_PREV_FOUND; DBUG_RETURN(0); } diff --git a/storage/heap/hp_rrnd.c b/storage/heap/hp_rrnd.c index 3947946ce6706..045804a94afe7 100644 --- a/storage/heap/hp_rrnd.c +++ b/storage/heap/hp_rrnd.c @@ -44,6 +44,8 @@ int heap_rrnd(register HP_INFO *info, uchar *record, uchar *pos) } info->update=HA_STATE_PREV_FOUND | HA_STATE_NEXT_FOUND | HA_STATE_AKTIV; memcpy(record,info->current_ptr,(size_t) share->reclength); + if (share->blob_count && hp_read_blobs(info, record, info->current_ptr)) + DBUG_RETURN(my_errno); DBUG_PRINT("exit", ("found record at %p", info->current_ptr)); info->current_hash_ptr=0; /* Can't use rnext */ DBUG_RETURN(0); diff --git a/storage/heap/hp_rsame.c b/storage/heap/hp_rsame.c index 8bba4cd23a9c1..1ab2511d617ba 100644 --- a/storage/heap/hp_rsame.c +++ b/storage/heap/hp_rsame.c @@ -49,6 +49,8 @@ int heap_rsame(register HP_INFO *info, uchar *record, int inx) } } memcpy(record,info->current_ptr,(size_t) share->reclength); + if (share->blob_count && hp_read_blobs(info, record, info->current_ptr)) + DBUG_RETURN(my_errno); DBUG_RETURN(0); } info->update=0; diff --git a/storage/heap/hp_scan.c b/storage/heap/hp_scan.c index f07efe6cf671c..8ef3d348c8c6d 100644 --- a/storage/heap/hp_scan.c +++ b/storage/heap/hp_scan.c @@ -43,6 +43,26 @@ int heap_scan(register HP_INFO *info, uchar *record) ulong pos; DBUG_ENTER("heap_scan"); + /* + Scan boundary: total_records + deleted == block.last_allocated. + + Every slot in the HP_BLOCK data area is either a live record (counted in + total_records) or a deleted/free slot (counted in deleted). This + includes blob continuation records allocated by hp_alloc_from_tail() + and freed by hp_free_run_chain(), both of which maintain the invariant + total_records + deleted == block.last_allocated. + + next_block is a cached upper bound for the current HP_BLOCK segment: + within one segment, current_ptr can be advanced by recbuffer without + calling hp_find_record(). It MUST satisfy + next_block <= total_records + deleted + at all times, otherwise the scan will walk past the last allocated + slot into unmapped memory. + + The else branch below recomputes next_block and caps it. Any code + that manipulates next_block externally (e.g. restart_rnd_next) must + also enforce this cap. + */ pos= ++info->current_record; if (pos < info->next_block) { @@ -50,12 +70,18 @@ int heap_scan(register HP_INFO *info, uchar *record) } else { - /* increase next_block to the next records_in_block boundary */ + /* Advance next_block to the next records_in_block boundary */ ulong rem= info->next_block % share->block.records_in_block; info->next_block+=share->block.records_in_block - rem; - if (info->next_block >= share->records+share->deleted) + /* + Cap next_block at the scan end (total_records + deleted). This is + essential: rows may have been deleted since next_block was last set + (e.g. remove_dup_with_compare deletes duplicates mid-scan), and + block boundaries can extend well past the last allocated slot. + */ + if (info->next_block >= share->total_records+share->deleted) { - info->next_block= share->records+share->deleted; + info->next_block= share->total_records+share->deleted; if (pos >= info->next_block) { info->update= 0; @@ -70,8 +96,27 @@ int heap_scan(register HP_INFO *info, uchar *record) info->update= HA_STATE_PREV_FOUND | HA_STATE_NEXT_FOUND; DBUG_RETURN(my_errno=HA_ERR_RECORD_DELETED); } + /* + Skip blob continuation runs. Rec 0 of each run has the flags byte + with HP_ROW_IS_CONT set; inner records (rec 1..N-1) have no flags + byte. Read run_rec_count from the header and skip the entire run. + */ + if (hp_is_cont(info->current_ptr, share->visible)) + { + uint16 run_rec_count= hp_cont_rec_count(info->current_ptr); + if (run_rec_count > 1) + { + uint skip= run_rec_count - 1; + info->current_record+= skip; + info->current_ptr+= skip * share->block.recbuffer; + } + info->update= HA_STATE_PREV_FOUND | HA_STATE_NEXT_FOUND; + DBUG_RETURN(my_errno=HA_ERR_RECORD_DELETED); + } info->update= HA_STATE_PREV_FOUND | HA_STATE_NEXT_FOUND | HA_STATE_AKTIV; memcpy(record,info->current_ptr,(size_t) share->reclength); + if (share->blob_count && hp_read_blobs(info, record, info->current_ptr)) + DBUG_RETURN(my_errno); info->current_hash_ptr=0; /* Can't use read_next */ DBUG_RETURN(0); } /* heap_scan */ diff --git a/storage/heap/hp_static.c b/storage/heap/hp_static.c index 9a4410eead9ea..07c9f25597122 100644 --- a/storage/heap/hp_static.c +++ b/storage/heap/hp_static.c @@ -28,6 +28,7 @@ PSI_memory_key hp_key_memory_HP_SHARE; PSI_memory_key hp_key_memory_HP_INFO; PSI_memory_key hp_key_memory_HP_PTRS; PSI_memory_key hp_key_memory_HP_KEYDEF; +PSI_memory_key hp_key_memory_HP_BLOB; #ifdef HAVE_PSI_INTERFACE @@ -36,7 +37,8 @@ static PSI_memory_info all_heap_memory[]= { & hp_key_memory_HP_SHARE, "HP_SHARE", 0}, { & hp_key_memory_HP_INFO, "HP_INFO", 0}, { & hp_key_memory_HP_PTRS, "HP_PTRS", 0}, - { & hp_key_memory_HP_KEYDEF, "HP_KEYDEF", 0} + { & hp_key_memory_HP_KEYDEF, "HP_KEYDEF", 0}, + { & hp_key_memory_HP_BLOB, "HP_BLOB", 0} }; void init_heap_psi_keys() diff --git a/storage/heap/hp_update.c b/storage/heap/hp_update.c index ad56ca979deb6..9d885e2bb1b7e 100644 --- a/storage/heap/hp_update.c +++ b/storage/heap/hp_update.c @@ -42,7 +42,7 @@ int heap_update(HP_INFO *info, const uchar *old, const uchar *heap_new) p_lastinx= share->keydef + info->lastinx; for (keydef= share->keydef, end= keydef + share->keys; keydef < end; keydef++) { - if (hp_rec_key_cmp(keydef, old, heap_new)) + if (hp_rec_key_cmp(keydef, old, heap_new, NULL)) { if ((*keydef->delete_key)(info, keydef, old, pos, keydef == p_lastinx) || (*keydef->write_key)(info, keydef, heap_new, pos)) @@ -52,7 +52,99 @@ int heap_update(HP_INFO *info, const uchar *old, const uchar *heap_new) } } - memcpy(pos,heap_new,(size_t) share->reclength); + /* + Blob update strategy: write new chains before freeing old ones. + + We must not free old blob chains before the new ones are successfully + written, because hp_write_blobs() can fail (e.g. table full) and then + the old data would be unrecoverable. Instead: + 1. Save old chain head pointers (from pos) before memcpy overwrites them + 2. memcpy new record data into pos + 3. Write new blob chains (hp_write_blobs) + 4. On success: free old chains via saved pointers + On failure: restore old record from 'old' buffer, restore saved + chain pointers, re-set HP_ROW_HAS_CONT flag + */ + if (share->blob_count) + { + my_bool had_cont= hp_has_cont(pos, share->visible); + uchar **saved_chains= NULL; + + if (had_cont) + { + saved_chains= (uchar**) my_safe_alloca( + share->blob_count * sizeof(uchar*)); + for (uint i= 0; i < share->blob_count; i++) + { + HP_BLOB_DESC *desc= &share->blob_descs[i]; + memcpy(&saved_chains[i], pos + desc->offset + desc->packlength, + sizeof(saved_chains[i])); + } + } + memcpy(pos, heap_new, (size_t) share->reclength); + if (hp_write_blobs(info, heap_new, pos)) + { + /* New blobs cleaned up by hp_write_blobs rollback. Restore old record. */ + memcpy(pos, old, (size_t) share->reclength); + if (had_cont) + { + for (uint i= 0; i < share->blob_count; i++) + { + HP_BLOB_DESC *desc= &share->blob_descs[i]; + memcpy(pos + desc->offset + desc->packlength, + &saved_chains[i], sizeof(saved_chains[i])); + } + pos[share->visible]|= HP_ROW_HAS_CONT; + } + my_safe_afree(saved_chains, + share->blob_count * sizeof(uchar*)); + goto err; + } + /* New blobs written — now safe to free old chains */ + if (had_cont) + { + for (uint i= 0; i < share->blob_count; i++) + hp_free_run_chain(share, saved_chains[i]); + my_safe_afree(saved_chains, + share->blob_count * sizeof(uchar*)); + } + /* + Refresh blob pointers in the caller's record buffer when zero-copy + pointers were used. + + hp_write_blobs() stored new chain head pointers in pos, but + heap_new may still have zero-copy pointers from the caller's last + hp_read_blobs() — those point into old chains that were just freed. + Copy new chain pointers from pos into heap_new, then call + hp_read_blobs() to replace them with materialized data pointers. + + Without this, callers that reuse heap_new after update (e.g., the + INTERSECT ALL unfold path in sql_union.cc) would follow dangling + pointers into freed HP_BLOCK records. + + Non-zero-copy blobs (Case C) have pointers into blob_buff which + is not affected by the chain free, so no refresh is needed. + */ + if (info->has_zerocopy_blobs) + { + uchar *new_rec= (uchar*) heap_new; + for (uint i= 0; i < share->blob_count; i++) + { + HP_BLOB_DESC *desc= &share->blob_descs[i]; + { + uchar *chain; + memcpy(&chain, pos + desc->offset + desc->packlength, sizeof(chain)); + memcpy(new_rec + desc->offset + desc->packlength, &chain, + sizeof(chain)); + } + } + hp_read_blobs(info, new_rec, pos); + } + } + else + { + memcpy(pos, heap_new, (size_t) share->reclength); + } if (++(share->records) == share->blength) share->blength+= share->blength; #if !defined(DBUG_OFF) && defined(EXTRA_HEAP_DEBUG) @@ -81,7 +173,7 @@ int heap_update(HP_INFO *info, const uchar *old, const uchar *heap_new) } while (keydef >= share->keydef) { - if (hp_rec_key_cmp(keydef, old, heap_new)) + if (hp_rec_key_cmp(keydef, old, heap_new, NULL)) { if ((*keydef->delete_key)(info, keydef, heap_new, pos, 0) || (*keydef->write_key)(info, keydef, old, pos)) diff --git a/storage/heap/hp_write.c b/storage/heap/hp_write.c index cb079eac75788..9a8b244307de6 100644 --- a/storage/heap/hp_write.c +++ b/storage/heap/hp_write.c @@ -26,7 +26,6 @@ #define HIGHFIND 4 #define HIGHUSED 8 -static uchar *next_free_record_pos(HP_SHARE *info); static HASH_INFO *hp_find_free_hash(HP_SHARE *info, HP_BLOCK *block, ulong records); @@ -54,7 +53,13 @@ int heap_write(HP_INFO *info, const uchar *record) } memcpy(pos,record,(size_t) share->reclength); - pos[share->visible]= 1; /* Mark record as not deleted */ + if (share->blob_count) + { + if (hp_write_blobs(info, record, pos)) + goto err_blob; + } + else + pos[share->visible]= 1; /* Mark record as not deleted */ if (++share->records == share->blength) share->blength+= share->blength; info->s->key_version++; @@ -66,6 +71,33 @@ int heap_write(HP_INFO *info, const uchar *record) heap_update_auto_increment(info, record); DBUG_RETURN(0); +err_blob: + /* + Blob write failed after all keys were written successfully. + Roll back all keys — unlike err: below, no key needs to be skipped. + + Do NOT call hp_free_blobs() here: hp_write_blobs() is self-cleaning + on failure — hp_write_one_blob() frees its own partial chain, and + hp_write_blobs() frees all previously completed columns (0..i-1) and + NULLs every chain pointer in pos. Calling hp_free_blobs() after this + would be both redundant and dangerous: + - The visibility byte pos[share->visible] has not been set yet (it is + only written on hp_write_blobs() success at line 493), so it may + contain uninitialized data from tail allocation with HP_ROW_HAS_CONT + bit set. + - Blob columns after the failed one (i+1..blob_count-1) still have the + SQL layer's original data pointers in pos (from memcpy at line 55), + not continuation chain pointers. hp_free_run_chain() would interpret + those as chain headers and crash. + */ + info->errkey= -1; + for (keydef= end - 1; keydef >= share->keydef; keydef--) + { + if ((*keydef->delete_key)(info, keydef, record, pos, 0)) + break; + } + goto err_common; + err: if (my_errno == HA_ERR_FOUND_DUPP_KEY) DBUG_PRINT("info",("Duplicate key: %d", (int) (keydef - share->keydef))); @@ -85,9 +117,20 @@ int heap_write(HP_INFO *info, const uchar *record) if ((*keydef->delete_key)(info, keydef, record, pos, 0)) break; keydef--; - } + } + + /* + Do NOT call hp_free_blobs here: the err: label is reached when a key + write fails (line 52), which is BEFORE memcpy(pos, record, reclength) + and hp_write_blobs(). The slot at pos still contains stale data from the + free list, so hp_free_blobs would chase garbage chain pointers. + Only err_blob: (above) needs hp_free_blobs, since blobs may have been + partially written there. + */ +err_common: share->deleted++; + share->total_records--; *((uchar**) pos)=share->del_link; share->del_link=pos; pos[share->visible]= 0; /* Record deleted */ @@ -128,9 +171,18 @@ int hp_rb_write_key(HP_INFO *info, HP_KEYDEF *keyinfo, const uchar *record, return 0; } - /* Find where to place new record */ +/* + Find where to place a new record. + + Allocates from the free list (del_link) first; if empty, extends the + HP_BLOCK tail. Both paths maintain the scan-boundary invariant: + total_records + deleted == block.last_allocated + Free-list allocation does deleted-- + total_records++ (sum unchanged). + Tail allocation does last_allocated++ + total_records++ (sum grows by 1, + matching the new slot). heap_scan() relies on this sum to detect EOF. +*/ -static uchar *next_free_record_pos(HP_SHARE *info) +uchar *next_free_record_pos(HP_SHARE *info) { int block_pos; uchar *pos; @@ -142,19 +194,21 @@ static uchar *next_free_record_pos(HP_SHARE *info) pos=info->del_link; info->del_link= *((uchar**) pos); info->deleted--; + info->total_records++; DBUG_PRINT("exit",("Used old position: %p", pos)); DBUG_RETURN(pos); } - if (!(block_pos=(info->records % info->block.records_in_block))) + if (!(block_pos=(info->block.last_allocated % info->block.records_in_block))) { - if ((info->records > info->max_records && info->max_records) || + if ((info->block.last_allocated > info->max_records && + info->max_records) || (info->data_length + info->index_length >= info->max_table_size)) { DBUG_PRINT("error", - ("record file full. records: %lu max_records: %lu " + ("record file full. last_allocated: %lu max_records: %lu " "data_length: %llu index_length: %llu " "max_table_size: %llu", - info->records, info->max_records, + info->block.last_allocated, info->max_records, info->data_length, info->index_length, info->max_table_size)); my_errno=HA_ERR_RECORD_FILE_FULL; @@ -165,6 +219,8 @@ static uchar *next_free_record_pos(HP_SHARE *info) DBUG_RETURN(NULL); info->data_length+=length; } + info->block.last_allocated++; + info->total_records++; DBUG_PRINT("exit",("Used new position: %p", ((uchar*) info->block.level_info[0].last_blocks+ block_pos * info->block.recbuffer))); @@ -385,7 +441,7 @@ int hp_write_key(HP_INFO *info, HP_KEYDEF *keyinfo, do { if (pos->hash_of_key == hash_of_key && - ! hp_rec_key_cmp(keyinfo, record, pos->ptr_to_rec)) + ! hp_rec_key_cmp(keyinfo, record, pos->ptr_to_rec, info)) { DBUG_RETURN(my_errno=HA_ERR_FOUND_DUPP_KEY); } From c736807d95f9f2ca3da0d1d513aa9a28d5b87b5c Mon Sep 17 00:00:00 2001 From: Arcadiy Ivanov Date: Sun, 8 Mar 2026 20:25:38 -0400 Subject: [PATCH 2/3] Cap `min_run_records` for small blob free-list reuse The free-list allocator's minimum contiguous run threshold (`min_run_records`) could exceed the total records a small blob actually needs, making free-list reuse impossible on narrow tables. For example, with `recbuffer=16` the 128-byte floor produced `min_run_records=8`, but a 32-byte blob only needs 3 records. Any contiguous free-list group of 3 would be rejected, forcing unnecessary tail allocation. Cap both `min_run_bytes` at `data_len` and `min_run_records` at `total_records_needed` so small blobs can reuse free-list slots when a sufficient contiguous group exists. --- storage/heap/hp_blob.c | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/storage/heap/hp_blob.c b/storage/heap/hp_blob.c index 19c8068be5d14..776345b8b2034 100644 --- a/storage/heap/hp_blob.c +++ b/storage/heap/hp_blob.c @@ -302,14 +302,42 @@ static int hp_write_one_blob(HP_SHARE *share, const uchar *data_ptr, uchar *prev_run_start= NULL; uint32 data_offset= 0; - /* Calculate minimum acceptable run size */ + /* + Calculate minimum acceptable contiguous run size for free-list reuse. + + The free-list walk (Step 1 below) rejects contiguous groups smaller + than min_run_records, bailing to tail allocation instead. This + prevents excessive chain fragmentation for large blobs: accepting + tiny fragments would produce long chains of many short runs, each + with its own 10-byte header overhead and pointer dereference on read. + + The threshold is the larger of: + - 1/10 of the blob size (caps chain length at ~10 runs) + - 128 bytes absolute floor (HP_CONT_MIN_RUN_BYTES) + - 2 records minimum (a single-slot run is pure overhead) + + For small blobs whose total bytes or records needed is below this + threshold, the fragmentation concern doesn't apply — the entire blob + fits in one short run. Cap both min_run_bytes and min_run_records + so the free list can satisfy the allocation without falling through + to the tail unnecessarily. + */ min_run_bytes= data_len / HP_CONT_RUN_FRACTION_DEN * HP_CONT_RUN_FRACTION_NUM; if (min_run_bytes < HP_CONT_MIN_RUN_BYTES) min_run_bytes= HP_CONT_MIN_RUN_BYTES; + if (min_run_bytes > data_len) + min_run_bytes= data_len; min_run_records= (min_run_bytes + recbuffer - 1) / recbuffer; if (min_run_records < 2) min_run_records= 2; + { + uint32 first_payload= visible - HP_CONT_HEADER_SIZE; + uint32 total_records_needed= data_len <= first_payload ? 1 : + 1 + (data_len - first_payload + recbuffer - 1) / recbuffer; + if (total_records_needed < min_run_records) + min_run_records= total_records_needed; + } /* Step 1: Try to allocate contiguous runs from the free list. From 005a981449175943293443dd5d5175d70f5586b6 Mon Sep 17 00:00:00 2001 From: Arcadiy Ivanov Date: Mon, 9 Mar 2026 23:11:34 -0400 Subject: [PATCH 3/3] MDEV-38975: Add hash pre-check to skip expensive blob materialization in hash chain traversal `hp_search()`, `hp_search_next()`, `hp_delete_key()`, and `find_unique_row()` walk hash chains calling `hp_key_cmp()` or `hp_rec_key_cmp()` for every entry. For blob key segments, each comparison triggers `hp_materialize_one_blob()` which reassembles blob data from continuation chain records. Since each `HASH_INFO` already stores `hash_of_key`, compare it against the search key's hash before the full key comparison. When hashes differ the keys are guaranteed different, skipping the expensive materialization. This pattern already existed in `hp_write_key()` for duplicate detection but was missing from the four read/delete paths. `HP_INFO::last_hash_of_key` is added so `hp_search_next()` can reuse the hash computed by `hp_search()` without recomputing it. --- include/heap.h | 1 + storage/heap/ha_heap.cc | 7 +++++-- storage/heap/hp_delete.c | 9 ++++++--- storage/heap/hp_hash.c | 15 ++++++++++++--- 4 files changed, 24 insertions(+), 8 deletions(-) diff --git a/include/heap.h b/include/heap.h index 633a33e53fd0f..439b437cf306f 100644 --- a/include/heap.h +++ b/include/heap.h @@ -193,6 +193,7 @@ typedef struct st_heap_info uchar *blob_buff; /* Reassembly buffer for blob reads */ uint32 blob_buff_len; /* Current allocated size of blob_buff */ my_bool has_zerocopy_blobs; /* Last hp_read_blobs produced zero-copy ptrs */ + ulong last_hash_of_key; /* Hash from last hp_search(), reused by hp_search_next() */ THR_LOCK_DATA lock; LIST open_list; } HP_INFO; diff --git a/storage/heap/ha_heap.cc b/storage/heap/ha_heap.cc index 0c8f4aa2a5491..59497dc2f437e 100644 --- a/storage/heap/ha_heap.cc +++ b/storage/heap/ha_heap.cc @@ -878,12 +878,15 @@ int ha_heap::find_unique_row(uchar *record, uint unique_idx) DBUG_ASSERT(keyinfo->flag & HA_NOSAME); if (!share->records) DBUG_RETURN(1); // not found + ulong rec_hash= hp_rec_hashnr(keyinfo, record); HASH_INFO *pos= hp_find_hash(&keyinfo->block, - hp_mask(hp_rec_hashnr(keyinfo, record), + hp_mask(rec_hash, share->blength, share->records)); do { - if (!hp_rec_key_cmp(keyinfo, record, pos->ptr_to_rec, file)) + /* Hash pre-check avoids expensive blob materialization for non-matching entries */ + if (pos->hash_of_key == rec_hash && + !hp_rec_key_cmp(keyinfo, record, pos->ptr_to_rec, file)) { file->current_hash_ptr= pos; file->current_ptr= pos->ptr_to_rec; diff --git a/storage/heap/hp_delete.c b/storage/heap/hp_delete.c index 1a4da1fff0e44..f7538843d6946 100644 --- a/storage/heap/hp_delete.c +++ b/storage/heap/hp_delete.c @@ -114,7 +114,7 @@ int hp_rb_delete_key(HP_INFO *info, register HP_KEYDEF *keyinfo, int hp_delete_key(HP_INFO *info, register HP_KEYDEF *keyinfo, const uchar *record, uchar *recpos, int flag) { - ulong blength, pos2, pos_hashnr, lastpos_hashnr, key_pos; + ulong blength, pos2, pos_hashnr, lastpos_hashnr, key_pos, rec_hash; HASH_INFO *lastpos,*gpos,*pos,*pos3,*empty,*last_ptr; HP_SHARE *share=info->s; DBUG_ENTER("hp_delete_key"); @@ -126,14 +126,17 @@ int hp_delete_key(HP_INFO *info, register HP_KEYDEF *keyinfo, last_ptr=0; /* Search after record with key */ - key_pos= hp_mask(hp_rec_hashnr(keyinfo, record), blength, share->records + 1); + rec_hash= hp_rec_hashnr(keyinfo, record); + key_pos= hp_mask(rec_hash, blength, share->records + 1); pos= hp_find_hash(&keyinfo->block, key_pos); gpos = pos3 = 0; while (pos->ptr_to_rec != recpos) { - if (flag && !hp_rec_key_cmp(keyinfo, record, pos->ptr_to_rec, info)) + /* Hash pre-check avoids expensive blob materialization for non-matching entries */ + if (flag && pos->hash_of_key == rec_hash && + !hp_rec_key_cmp(keyinfo, record, pos->ptr_to_rec, info)) last_ptr=pos; /* Previous same key */ gpos=pos; if (!(pos=pos->next_key)) diff --git a/storage/heap/hp_hash.c b/storage/heap/hp_hash.c index c06fa77b9e9d9..772f5307134b5 100644 --- a/storage/heap/hp_hash.c +++ b/storage/heap/hp_hash.c @@ -138,15 +138,23 @@ uchar *hp_search(HP_INFO *info, HP_KEYDEF *keyinfo, const uchar *key, if (share->records) { + ulong key_hash= hp_hashnr(keyinfo, key); ulong search_pos= - hp_mask(hp_hashnr(keyinfo, key), share->blength, share->records); + hp_mask(key_hash, share->blength, share->records); pos=hp_find_hash(&keyinfo->block, search_pos); if (search_pos != hp_mask(pos->hash_of_key, share->blength, share->records)) goto not_found; /* Wrong link */ + /* + Save hash for hp_search_next() to reuse without recomputing. + Pre-check hash_of_key before hp_key_cmp() to avoid expensive + blob materialization for non-matching entries. + */ + info->last_hash_of_key= key_hash; do { - if (!hp_key_cmp(keyinfo, pos->ptr_to_rec, key, info)) + if (pos->hash_of_key == key_hash && + !hp_key_cmp(keyinfo, pos->ptr_to_rec, key, info)) { switch (nextflag) { case 0: /* Search after key */ @@ -207,7 +215,8 @@ uchar *hp_search_next(HP_INFO *info, HP_KEYDEF *keyinfo, const uchar *key, while ((pos= pos->next_key)) { - if (! hp_key_cmp(keyinfo, pos->ptr_to_rec, key, info)) + if (pos->hash_of_key == info->last_hash_of_key && + ! hp_key_cmp(keyinfo, pos->ptr_to_rec, key, info)) { info->current_hash_ptr=pos; DBUG_RETURN (info->current_ptr= pos->ptr_to_rec);