Skip to content

Commit

Permalink
Add error for DELETE triggers with transition tables
Browse files Browse the repository at this point in the history
DELETE triggers cannot be supported for compressed tables not using the
hypercore table access method since it can delete an entire compressed
segment and a transition table is not built for this case, so
generating an error for this case.

Adds a bunch of tests for triggers with transition tables for normal
compressed tables.
  • Loading branch information
mkindahl committed Jan 10, 2025
1 parent 3170014 commit 6e23edb
Show file tree
Hide file tree
Showing 7 changed files with 279 additions and 0 deletions.
4 changes: 4 additions & 0 deletions src/hypertable.c
Original file line number Diff line number Diff line change
Expand Up @@ -240,6 +240,7 @@ Hypertable *
ts_hypertable_from_tupleinfo(const TupleInfo *ti)
{
Hypertable *h = MemoryContextAllocZero(ti->mctx, sizeof(Hypertable));
char relkind;

ts_hypertable_formdata_fill(&h->fd, ti);
h->main_table_relid =
Expand All @@ -249,6 +250,9 @@ ts_hypertable_from_tupleinfo(const TupleInfo *ti)
ts_subspace_store_init(h->space, ti->mctx, ts_guc_max_cached_chunks_per_hypertable);
h->chunk_sizing_func = get_chunk_sizing_func_oid(&h->fd);

if (OidIsValid(h->main_table_relid))
ts_get_rel_info(h->main_table_relid, &h->amoid, &relkind);

if (ts_guc_enable_chunk_skipping)
{
h->range_space =
Expand Down
1 change: 1 addition & 0 deletions src/hypertable.h
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ typedef struct Hypertable
FormData_hypertable fd;
Oid main_table_relid;
Oid chunk_sizing_func;
Oid amoid;
Hyperspace *space;
SubspaceStore *chunk_cache;
ChunkRangeSpace *range_space;
Expand Down
15 changes: 15 additions & 0 deletions src/process_utility.c
Original file line number Diff line number Diff line change
Expand Up @@ -4482,6 +4482,21 @@ process_create_trigger_start(ProcessUtilityArgs *args)
errmsg("ROW triggers with transition tables are not supported on hypertables")));
}

/*
* We currently cannot support delete triggers on compressed tables that
* are not using hypercore table access method since deleting a complete
* segment will not build a transition table for the delete.
*/
if (stmt->transitionRels && TRIGGER_FOR_DELETE(tgtype) &&
TS_HYPERTABLE_HAS_COMPRESSION_ENABLED(ht) && !ts_is_hypercore_am(ht->amoid))
{
ts_cache_release(hcache);
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("DELETE triggers with transition tables only supported on compressed "
"hypertables using the \"hypercore\" table access method")));
}

add_hypertable_to_process_args(args, ht);

/*
Expand Down
150 changes: 150 additions & 0 deletions tsl/test/expected/compression_trigger.out
Original file line number Diff line number Diff line change
@@ -0,0 +1,150 @@
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- This is copied from hypercore_trigger.sql
set client_min_messages to warning;
create table readings(
metric_id serial,
created_at timestamptz not null unique,
location_id smallint, --segmentby attribute with index
owner_id bigint, --segmentby attribute without index
device_id bigint, --non-segmentby attribute
temp float8,
humidity float4
);
select create_hypertable('readings', by_range('created_at'));
create_hypertable
-------------------
(1,t)
(1 row)

select setseed(1);
setseed
---------

(1 row)

insert into readings(created_at, location_id, device_id, owner_id, temp, humidity)
select t, ceil(random()*10), ceil(random()*30), ceil(random() * 5), random()*40, random()*100
from generate_series('2022-06-01'::timestamptz, '2022-07-01', '5m') t;
alter table readings set (
timescaledb.compress,
timescaledb.compress_orderby = 'created_at',
timescaledb.compress_segmentby = 'location_id, owner_id'
);
select compress_chunk(show_chunks('readings'));
compress_chunk
----------------------------------------
_timescaledb_internal._hyper_1_1_chunk
_timescaledb_internal._hyper_1_2_chunk
_timescaledb_internal._hyper_1_3_chunk
_timescaledb_internal._hyper_1_4_chunk
_timescaledb_internal._hyper_1_5_chunk
_timescaledb_internal._hyper_1_6_chunk
(6 rows)

create table saved_rows (like readings, new_row bool not null, kind text);
create function save_transition_table() returns trigger as $$
begin
case tg_op
when 'INSERT' then
insert into saved_rows select n.*, true, tg_op from new_table n;
when 'DELETE' then
insert into saved_rows select o.*, false, tg_op from old_table o;
when 'UPDATE' then
insert into saved_rows select n.*, true, tg_op from new_table n;
insert into saved_rows select o.*, false, tg_op from old_table o;
end case;
return null;
end;
$$ language plpgsql;
create trigger save_insert_transition_table_trg
after insert on readings
referencing new table as new_table
for each statement execute function save_transition_table();
insert into readings(created_at, location_id, device_id, owner_id, temp, humidity)
values ('2022-06-01 00:01:23', 999, 666, 111, 3.14, 3.14),
('2022-06-01 00:02:23', 999, 666, 112, 3.14, 3.14);
select * from saved_rows order by metric_id;
metric_id | created_at | location_id | owner_id | device_id | temp | humidity | new_row | kind
-----------+------------------------------+-------------+----------+-----------+------+----------+---------+--------
8642 | Wed Jun 01 00:01:23 2022 PDT | 999 | 111 | 666 | 3.14 | 3.14 | t | INSERT
8643 | Wed Jun 01 00:02:23 2022 PDT | 999 | 112 | 666 | 3.14 | 3.14 | t | INSERT
(2 rows)

truncate saved_rows;
select compress_chunk(show_chunks('readings'));
compress_chunk
----------------------------------------
_timescaledb_internal._hyper_1_1_chunk
_timescaledb_internal._hyper_1_2_chunk
_timescaledb_internal._hyper_1_3_chunk
_timescaledb_internal._hyper_1_4_chunk
_timescaledb_internal._hyper_1_5_chunk
_timescaledb_internal._hyper_1_6_chunk
(6 rows)

copy readings(created_at, location_id, device_id, owner_id, temp, humidity) from stdin with (format csv);
select * from saved_rows order by metric_id;
metric_id | created_at | location_id | owner_id | device_id | temp | humidity | new_row | kind
-----------+------------------------------+-------------+----------+-----------+------+----------+---------+--------
8644 | Wed Jun 01 00:01:35 2022 PDT | 999 | 111 | 666 | 3.14 | 3.14 | t | INSERT
(1 row)

truncate saved_rows;
select compress_chunk(show_chunks('readings'));
compress_chunk
----------------------------------------
_timescaledb_internal._hyper_1_1_chunk
_timescaledb_internal._hyper_1_2_chunk
_timescaledb_internal._hyper_1_3_chunk
_timescaledb_internal._hyper_1_4_chunk
_timescaledb_internal._hyper_1_5_chunk
_timescaledb_internal._hyper_1_6_chunk
(6 rows)

create trigger save_update_transition_table_trg
after update on readings
referencing new table as new_table old table as old_table
for each statement execute function save_transition_table();
select * from readings where location_id = 999 order by metric_id;
metric_id | created_at | location_id | owner_id | device_id | temp | humidity
-----------+------------------------------+-------------+----------+-----------+------+----------
8642 | Wed Jun 01 00:01:23 2022 PDT | 999 | 111 | 666 | 3.14 | 3.14
8643 | Wed Jun 01 00:02:23 2022 PDT | 999 | 112 | 666 | 3.14 | 3.14
8644 | Wed Jun 01 00:01:35 2022 PDT | 999 | 111 | 666 | 3.14 | 3.14
(3 rows)

update readings set humidity = 99.99 where location_id = 999;
select * from saved_rows order by metric_id;
metric_id | created_at | location_id | owner_id | device_id | temp | humidity | new_row | kind
-----------+------------------------------+-------------+----------+-----------+------+----------+---------+--------
8642 | Wed Jun 01 00:01:23 2022 PDT | 999 | 111 | 666 | 3.14 | 99.99 | t | UPDATE
8642 | Wed Jun 01 00:01:23 2022 PDT | 999 | 111 | 666 | 3.14 | 3.14 | f | UPDATE
8643 | Wed Jun 01 00:02:23 2022 PDT | 999 | 112 | 666 | 3.14 | 99.99 | t | UPDATE
8643 | Wed Jun 01 00:02:23 2022 PDT | 999 | 112 | 666 | 3.14 | 3.14 | f | UPDATE
8644 | Wed Jun 01 00:01:35 2022 PDT | 999 | 111 | 666 | 3.14 | 99.99 | t | UPDATE
8644 | Wed Jun 01 00:01:35 2022 PDT | 999 | 111 | 666 | 3.14 | 3.14 | f | UPDATE
(6 rows)

truncate saved_rows;
select compress_chunk(show_chunks('readings'));
compress_chunk
----------------------------------------
_timescaledb_internal._hyper_1_1_chunk
_timescaledb_internal._hyper_1_2_chunk
_timescaledb_internal._hyper_1_3_chunk
_timescaledb_internal._hyper_1_4_chunk
_timescaledb_internal._hyper_1_5_chunk
_timescaledb_internal._hyper_1_6_chunk
(6 rows)

-- This is not supported since it is possible to delete an entire
-- segment without executing the trigger.
\set ON_ERROR_STOP 0
create trigger save_delete_transition_table_trg
after delete on readings
referencing old table as old_table
for each statement execute function save_transition_table();
ERROR: DELETE triggers with transition tables only supported on compressed hypertables using the "hypercore" table access method
\set ON_ERROR_STOP 1
1 change: 1 addition & 0 deletions tsl/test/sql/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ set(TEST_FILES
compression_settings.sql
compression_sorted_merge_columns.sql
compression_sorted_merge_distinct.sql
compression_trigger.sql
decompress_index.sql
foreign_keys.sql
move.sql
Expand Down
99 changes: 99 additions & 0 deletions tsl/test/sql/compression_trigger.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.

-- This is copied from hypercore_trigger.sql

set client_min_messages to warning;

create table readings(
metric_id serial,
created_at timestamptz not null unique,
location_id smallint, --segmentby attribute with index
owner_id bigint, --segmentby attribute without index
device_id bigint, --non-segmentby attribute
temp float8,
humidity float4
);

select create_hypertable('readings', by_range('created_at'));

select setseed(1);

insert into readings(created_at, location_id, device_id, owner_id, temp, humidity)
select t, ceil(random()*10), ceil(random()*30), ceil(random() * 5), random()*40, random()*100
from generate_series('2022-06-01'::timestamptz, '2022-07-01', '5m') t;

alter table readings set (
timescaledb.compress,
timescaledb.compress_orderby = 'created_at',
timescaledb.compress_segmentby = 'location_id, owner_id'
);

select compress_chunk(show_chunks('readings'));

create table saved_rows (like readings, new_row bool not null, kind text);

create function save_transition_table() returns trigger as $$
begin
case tg_op
when 'INSERT' then
insert into saved_rows select n.*, true, tg_op from new_table n;
when 'DELETE' then
insert into saved_rows select o.*, false, tg_op from old_table o;
when 'UPDATE' then
insert into saved_rows select n.*, true, tg_op from new_table n;
insert into saved_rows select o.*, false, tg_op from old_table o;
end case;
return null;
end;
$$ language plpgsql;

create trigger save_insert_transition_table_trg
after insert on readings
referencing new table as new_table
for each statement execute function save_transition_table();

insert into readings(created_at, location_id, device_id, owner_id, temp, humidity)
values ('2022-06-01 00:01:23', 999, 666, 111, 3.14, 3.14),
('2022-06-01 00:02:23', 999, 666, 112, 3.14, 3.14);

select * from saved_rows order by metric_id;

truncate saved_rows;

select compress_chunk(show_chunks('readings'));

copy readings(created_at, location_id, device_id, owner_id, temp, humidity) from stdin with (format csv);
"2022-06-01 00:01:35",999,666,111,3.14,3.14
\.

select * from saved_rows order by metric_id;

truncate saved_rows;

select compress_chunk(show_chunks('readings'));

create trigger save_update_transition_table_trg
after update on readings
referencing new table as new_table old table as old_table
for each statement execute function save_transition_table();

select * from readings where location_id = 999 order by metric_id;

update readings set humidity = 99.99 where location_id = 999;

select * from saved_rows order by metric_id;

truncate saved_rows;

select compress_chunk(show_chunks('readings'));

-- This is not supported since it is possible to delete an entire
-- segment without executing the trigger.
\set ON_ERROR_STOP 0
create trigger save_delete_transition_table_trg
after delete on readings
referencing old table as old_table
for each statement execute function save_transition_table();
\set ON_ERROR_STOP 1
9 changes: 9 additions & 0 deletions tsl/test/sql/hypercore_trigger.sql
Original file line number Diff line number Diff line change
Expand Up @@ -207,6 +207,9 @@ select * from saved_rows;

truncate saved_rows;

-- Compress the data again to make sure that it is fully compressed.
select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);

copy readings(created_at, location_id, device_id, owner_id, temp, humidity) from stdin with (format csv);
"2022-06-01 00:01:35",999,666,111,3.14,3.14
\.
Expand All @@ -215,6 +218,9 @@ select * from saved_rows;

truncate saved_rows;

-- Compress the data again to make sure that it is fully compressed.
select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);

create trigger save_update_transition_table_trg
after update on readings
referencing new table as new_table old table as old_table
Expand All @@ -228,6 +234,9 @@ select * from saved_rows;

truncate saved_rows;

-- Compress the data again to make sure that it is fully compressed.
select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);

create trigger save_delete_transition_table_trg
after delete on readings
referencing old table as old_table
Expand Down

0 comments on commit 6e23edb

Please sign in to comment.