From 6e23edb1391c72c4f8e196af8218154a7d3095ff Mon Sep 17 00:00:00 2001 From: Mats Kindahl Date: Tue, 7 Jan 2025 15:54:36 +0100 Subject: [PATCH] Add error for DELETE triggers with transition tables DELETE triggers cannot be supported for compressed tables not using the hypercore table access method since it can delete an entire compressed segment and a transition table is not built for this case, so generating an error for this case. Adds a bunch of tests for triggers with transition tables for normal compressed tables. --- src/hypertable.c | 4 + src/hypertable.h | 1 + src/process_utility.c | 15 +++ tsl/test/expected/compression_trigger.out | 150 ++++++++++++++++++++++ tsl/test/sql/CMakeLists.txt | 1 + tsl/test/sql/compression_trigger.sql | 99 ++++++++++++++ tsl/test/sql/hypercore_trigger.sql | 9 ++ 7 files changed, 279 insertions(+) create mode 100644 tsl/test/expected/compression_trigger.out create mode 100644 tsl/test/sql/compression_trigger.sql diff --git a/src/hypertable.c b/src/hypertable.c index ac3c2c5e765..100a163c42b 100644 --- a/src/hypertable.c +++ b/src/hypertable.c @@ -240,6 +240,7 @@ Hypertable * ts_hypertable_from_tupleinfo(const TupleInfo *ti) { Hypertable *h = MemoryContextAllocZero(ti->mctx, sizeof(Hypertable)); + char relkind; ts_hypertable_formdata_fill(&h->fd, ti); h->main_table_relid = @@ -249,6 +250,9 @@ ts_hypertable_from_tupleinfo(const TupleInfo *ti) ts_subspace_store_init(h->space, ti->mctx, ts_guc_max_cached_chunks_per_hypertable); h->chunk_sizing_func = get_chunk_sizing_func_oid(&h->fd); + if (OidIsValid(h->main_table_relid)) + ts_get_rel_info(h->main_table_relid, &h->amoid, &relkind); + if (ts_guc_enable_chunk_skipping) { h->range_space = diff --git a/src/hypertable.h b/src/hypertable.h index 9b961b801e3..77168b94fa3 100644 --- a/src/hypertable.h +++ b/src/hypertable.h @@ -51,6 +51,7 @@ typedef struct Hypertable FormData_hypertable fd; Oid main_table_relid; Oid chunk_sizing_func; + Oid amoid; Hyperspace *space; SubspaceStore *chunk_cache; ChunkRangeSpace *range_space; diff --git a/src/process_utility.c b/src/process_utility.c index 1aee1d67243..a2da3e42c9a 100644 --- a/src/process_utility.c +++ b/src/process_utility.c @@ -4482,6 +4482,21 @@ process_create_trigger_start(ProcessUtilityArgs *args) errmsg("ROW triggers with transition tables are not supported on hypertables"))); } + /* + * We currently cannot support delete triggers on compressed tables that + * are not using hypercore table access method since deleting a complete + * segment will not build a transition table for the delete. + */ + if (stmt->transitionRels && TRIGGER_FOR_DELETE(tgtype) && + TS_HYPERTABLE_HAS_COMPRESSION_ENABLED(ht) && !ts_is_hypercore_am(ht->amoid)) + { + ts_cache_release(hcache); + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("DELETE triggers with transition tables only supported on compressed " + "hypertables using the \"hypercore\" table access method"))); + } + add_hypertable_to_process_args(args, ht); /* diff --git a/tsl/test/expected/compression_trigger.out b/tsl/test/expected/compression_trigger.out new file mode 100644 index 00000000000..c24dfb34fd6 --- /dev/null +++ b/tsl/test/expected/compression_trigger.out @@ -0,0 +1,150 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- This is copied from hypercore_trigger.sql +set client_min_messages to warning; +create table readings( + metric_id serial, + created_at timestamptz not null unique, + location_id smallint, --segmentby attribute with index + owner_id bigint, --segmentby attribute without index + device_id bigint, --non-segmentby attribute + temp float8, + humidity float4 +); +select create_hypertable('readings', by_range('created_at')); + create_hypertable +------------------- + (1,t) +(1 row) + +select setseed(1); + setseed +--------- + +(1 row) + +insert into readings(created_at, location_id, device_id, owner_id, temp, humidity) +select t, ceil(random()*10), ceil(random()*30), ceil(random() * 5), random()*40, random()*100 +from generate_series('2022-06-01'::timestamptz, '2022-07-01', '5m') t; +alter table readings set ( + timescaledb.compress, + timescaledb.compress_orderby = 'created_at', + timescaledb.compress_segmentby = 'location_id, owner_id' +); +select compress_chunk(show_chunks('readings')); + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk + _timescaledb_internal._hyper_1_2_chunk + _timescaledb_internal._hyper_1_3_chunk + _timescaledb_internal._hyper_1_4_chunk + _timescaledb_internal._hyper_1_5_chunk + _timescaledb_internal._hyper_1_6_chunk +(6 rows) + +create table saved_rows (like readings, new_row bool not null, kind text); +create function save_transition_table() returns trigger as $$ +begin + case tg_op + when 'INSERT' then + insert into saved_rows select n.*, true, tg_op from new_table n; + when 'DELETE' then + insert into saved_rows select o.*, false, tg_op from old_table o; + when 'UPDATE' then + insert into saved_rows select n.*, true, tg_op from new_table n; + insert into saved_rows select o.*, false, tg_op from old_table o; + end case; + return null; +end; +$$ language plpgsql; +create trigger save_insert_transition_table_trg + after insert on readings + referencing new table as new_table + for each statement execute function save_transition_table(); +insert into readings(created_at, location_id, device_id, owner_id, temp, humidity) +values ('2022-06-01 00:01:23', 999, 666, 111, 3.14, 3.14), + ('2022-06-01 00:02:23', 999, 666, 112, 3.14, 3.14); +select * from saved_rows order by metric_id; + metric_id | created_at | location_id | owner_id | device_id | temp | humidity | new_row | kind +-----------+------------------------------+-------------+----------+-----------+------+----------+---------+-------- + 8642 | Wed Jun 01 00:01:23 2022 PDT | 999 | 111 | 666 | 3.14 | 3.14 | t | INSERT + 8643 | Wed Jun 01 00:02:23 2022 PDT | 999 | 112 | 666 | 3.14 | 3.14 | t | INSERT +(2 rows) + +truncate saved_rows; +select compress_chunk(show_chunks('readings')); + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk + _timescaledb_internal._hyper_1_2_chunk + _timescaledb_internal._hyper_1_3_chunk + _timescaledb_internal._hyper_1_4_chunk + _timescaledb_internal._hyper_1_5_chunk + _timescaledb_internal._hyper_1_6_chunk +(6 rows) + +copy readings(created_at, location_id, device_id, owner_id, temp, humidity) from stdin with (format csv); +select * from saved_rows order by metric_id; + metric_id | created_at | location_id | owner_id | device_id | temp | humidity | new_row | kind +-----------+------------------------------+-------------+----------+-----------+------+----------+---------+-------- + 8644 | Wed Jun 01 00:01:35 2022 PDT | 999 | 111 | 666 | 3.14 | 3.14 | t | INSERT +(1 row) + +truncate saved_rows; +select compress_chunk(show_chunks('readings')); + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk + _timescaledb_internal._hyper_1_2_chunk + _timescaledb_internal._hyper_1_3_chunk + _timescaledb_internal._hyper_1_4_chunk + _timescaledb_internal._hyper_1_5_chunk + _timescaledb_internal._hyper_1_6_chunk +(6 rows) + +create trigger save_update_transition_table_trg + after update on readings + referencing new table as new_table old table as old_table + for each statement execute function save_transition_table(); +select * from readings where location_id = 999 order by metric_id; + metric_id | created_at | location_id | owner_id | device_id | temp | humidity +-----------+------------------------------+-------------+----------+-----------+------+---------- + 8642 | Wed Jun 01 00:01:23 2022 PDT | 999 | 111 | 666 | 3.14 | 3.14 + 8643 | Wed Jun 01 00:02:23 2022 PDT | 999 | 112 | 666 | 3.14 | 3.14 + 8644 | Wed Jun 01 00:01:35 2022 PDT | 999 | 111 | 666 | 3.14 | 3.14 +(3 rows) + +update readings set humidity = 99.99 where location_id = 999; +select * from saved_rows order by metric_id; + metric_id | created_at | location_id | owner_id | device_id | temp | humidity | new_row | kind +-----------+------------------------------+-------------+----------+-----------+------+----------+---------+-------- + 8642 | Wed Jun 01 00:01:23 2022 PDT | 999 | 111 | 666 | 3.14 | 99.99 | t | UPDATE + 8642 | Wed Jun 01 00:01:23 2022 PDT | 999 | 111 | 666 | 3.14 | 3.14 | f | UPDATE + 8643 | Wed Jun 01 00:02:23 2022 PDT | 999 | 112 | 666 | 3.14 | 99.99 | t | UPDATE + 8643 | Wed Jun 01 00:02:23 2022 PDT | 999 | 112 | 666 | 3.14 | 3.14 | f | UPDATE + 8644 | Wed Jun 01 00:01:35 2022 PDT | 999 | 111 | 666 | 3.14 | 99.99 | t | UPDATE + 8644 | Wed Jun 01 00:01:35 2022 PDT | 999 | 111 | 666 | 3.14 | 3.14 | f | UPDATE +(6 rows) + +truncate saved_rows; +select compress_chunk(show_chunks('readings')); + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk + _timescaledb_internal._hyper_1_2_chunk + _timescaledb_internal._hyper_1_3_chunk + _timescaledb_internal._hyper_1_4_chunk + _timescaledb_internal._hyper_1_5_chunk + _timescaledb_internal._hyper_1_6_chunk +(6 rows) + +-- This is not supported since it is possible to delete an entire +-- segment without executing the trigger. +\set ON_ERROR_STOP 0 +create trigger save_delete_transition_table_trg + after delete on readings + referencing old table as old_table + for each statement execute function save_transition_table(); +ERROR: DELETE triggers with transition tables only supported on compressed hypertables using the "hypercore" table access method +\set ON_ERROR_STOP 1 diff --git a/tsl/test/sql/CMakeLists.txt b/tsl/test/sql/CMakeLists.txt index 2b3a007d7a2..b2ea71019e7 100644 --- a/tsl/test/sql/CMakeLists.txt +++ b/tsl/test/sql/CMakeLists.txt @@ -33,6 +33,7 @@ set(TEST_FILES compression_settings.sql compression_sorted_merge_columns.sql compression_sorted_merge_distinct.sql + compression_trigger.sql decompress_index.sql foreign_keys.sql move.sql diff --git a/tsl/test/sql/compression_trigger.sql b/tsl/test/sql/compression_trigger.sql new file mode 100644 index 00000000000..668874e7c8f --- /dev/null +++ b/tsl/test/sql/compression_trigger.sql @@ -0,0 +1,99 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. + +-- This is copied from hypercore_trigger.sql + +set client_min_messages to warning; + +create table readings( + metric_id serial, + created_at timestamptz not null unique, + location_id smallint, --segmentby attribute with index + owner_id bigint, --segmentby attribute without index + device_id bigint, --non-segmentby attribute + temp float8, + humidity float4 +); + +select create_hypertable('readings', by_range('created_at')); + +select setseed(1); + +insert into readings(created_at, location_id, device_id, owner_id, temp, humidity) +select t, ceil(random()*10), ceil(random()*30), ceil(random() * 5), random()*40, random()*100 +from generate_series('2022-06-01'::timestamptz, '2022-07-01', '5m') t; + +alter table readings set ( + timescaledb.compress, + timescaledb.compress_orderby = 'created_at', + timescaledb.compress_segmentby = 'location_id, owner_id' +); + +select compress_chunk(show_chunks('readings')); + +create table saved_rows (like readings, new_row bool not null, kind text); + +create function save_transition_table() returns trigger as $$ +begin + case tg_op + when 'INSERT' then + insert into saved_rows select n.*, true, tg_op from new_table n; + when 'DELETE' then + insert into saved_rows select o.*, false, tg_op from old_table o; + when 'UPDATE' then + insert into saved_rows select n.*, true, tg_op from new_table n; + insert into saved_rows select o.*, false, tg_op from old_table o; + end case; + return null; +end; +$$ language plpgsql; + +create trigger save_insert_transition_table_trg + after insert on readings + referencing new table as new_table + for each statement execute function save_transition_table(); + +insert into readings(created_at, location_id, device_id, owner_id, temp, humidity) +values ('2022-06-01 00:01:23', 999, 666, 111, 3.14, 3.14), + ('2022-06-01 00:02:23', 999, 666, 112, 3.14, 3.14); + +select * from saved_rows order by metric_id; + +truncate saved_rows; + +select compress_chunk(show_chunks('readings')); + +copy readings(created_at, location_id, device_id, owner_id, temp, humidity) from stdin with (format csv); +"2022-06-01 00:01:35",999,666,111,3.14,3.14 +\. + +select * from saved_rows order by metric_id; + +truncate saved_rows; + +select compress_chunk(show_chunks('readings')); + +create trigger save_update_transition_table_trg + after update on readings + referencing new table as new_table old table as old_table + for each statement execute function save_transition_table(); + +select * from readings where location_id = 999 order by metric_id; + +update readings set humidity = 99.99 where location_id = 999; + +select * from saved_rows order by metric_id; + +truncate saved_rows; + +select compress_chunk(show_chunks('readings')); + +-- This is not supported since it is possible to delete an entire +-- segment without executing the trigger. +\set ON_ERROR_STOP 0 +create trigger save_delete_transition_table_trg + after delete on readings + referencing old table as old_table + for each statement execute function save_transition_table(); +\set ON_ERROR_STOP 1 diff --git a/tsl/test/sql/hypercore_trigger.sql b/tsl/test/sql/hypercore_trigger.sql index 7c590961ae8..5a6d5bea9e5 100644 --- a/tsl/test/sql/hypercore_trigger.sql +++ b/tsl/test/sql/hypercore_trigger.sql @@ -207,6 +207,9 @@ select * from saved_rows; truncate saved_rows; +-- Compress the data again to make sure that it is fully compressed. +select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true); + copy readings(created_at, location_id, device_id, owner_id, temp, humidity) from stdin with (format csv); "2022-06-01 00:01:35",999,666,111,3.14,3.14 \. @@ -215,6 +218,9 @@ select * from saved_rows; truncate saved_rows; +-- Compress the data again to make sure that it is fully compressed. +select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true); + create trigger save_update_transition_table_trg after update on readings referencing new table as new_table old table as old_table @@ -228,6 +234,9 @@ select * from saved_rows; truncate saved_rows; +-- Compress the data again to make sure that it is fully compressed. +select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true); + create trigger save_delete_transition_table_trg after delete on readings referencing old table as old_table