Skip to content

Commit

Permalink
Fix broken tests
Browse files Browse the repository at this point in the history
  • Loading branch information
kpan2034 committed Jan 7, 2025
1 parent afcc18e commit 36f796f
Show file tree
Hide file tree
Showing 16 changed files with 442 additions and 610 deletions.
10 changes: 5 additions & 5 deletions tsl/test/expected/compression.out
Original file line number Diff line number Diff line change
Expand Up @@ -2794,12 +2794,12 @@ COPY compressed_table (time,a,b,c) FROM stdin;
SELECT compress_chunk(i, if_not_compressed => true) FROM show_chunks('compressed_table') i;
compress_chunk
-------------------------------------------
_timescaledb_internal._hyper_49_108_chunk
_timescaledb_internal._hyper_49_107_chunk
(1 row)

\set ON_ERROR_STOP 0
COPY compressed_table (time,a,b,c) FROM stdin;
ERROR: duplicate key value violates unique constraint "_hyper_49_108_chunk_compressed_table_index"
ERROR: duplicate key value violates unique constraint "_hyper_49_107_chunk_compressed_table_index"
\set ON_ERROR_STOP 1
COPY compressed_table (time,a,b,c) FROM stdin;
SELECT * FROM compressed_table;
Expand All @@ -2813,7 +2813,7 @@ SELECT * FROM compressed_table;
SELECT compress_chunk(i, if_not_compressed => true) FROM show_chunks('compressed_table') i;
compress_chunk
-------------------------------------------
_timescaledb_internal._hyper_49_108_chunk
_timescaledb_internal._hyper_49_107_chunk
(1 row)

-- Check DML decompression limit
Expand All @@ -2837,15 +2837,15 @@ NOTICE: default order by for hypertable "hyper_84" is set to ""time" DESC"
SELECT compress_chunk(ch) FROM show_chunks('hyper_84') ch;
compress_chunk
-------------------------------------------
_timescaledb_internal._hyper_51_110_chunk
_timescaledb_internal._hyper_51_109_chunk
(1 row)

-- indexscan for decompression: UPDATE
UPDATE hyper_84 SET temp = 100 where device = 1;
SELECT compress_chunk(ch) FROM show_chunks('hyper_84') ch;
compress_chunk
-------------------------------------------
_timescaledb_internal._hyper_51_110_chunk
_timescaledb_internal._hyper_51_109_chunk
(1 row)

-- indexscan for decompression: DELETE
Expand Down
12 changes: 6 additions & 6 deletions tsl/test/expected/compression_bgw.out
Original file line number Diff line number Diff line change
Expand Up @@ -175,8 +175,8 @@ WHERE compression_status LIKE 'Compressed'
ORDER BY chunk_name;
chunk_name | before_compression_total_bytes | after_compression_total_bytes
------------------+--------------------------------+-------------------------------
_hyper_3_5_chunk | 24576 | 24576
_hyper_3_6_chunk | 24576 | 24576
_hyper_3_5_chunk | 24576 | 40960
_hyper_3_6_chunk | 24576 | 40960
(2 rows)

--integer tests
Expand Down Expand Up @@ -215,8 +215,8 @@ WHERE compression_status LIKE 'Compressed'
ORDER BY chunk_name;
chunk_name | before_compression_total_bytes | after_compression_total_bytes
-------------------+--------------------------------+-------------------------------
_hyper_5_12_chunk | 24576 | 24576
_hyper_5_13_chunk | 24576 | 24576
_hyper_5_12_chunk | 24576 | 40960
_hyper_5_13_chunk | 24576 | 40960
(2 rows)

--bigint test
Expand Down Expand Up @@ -255,8 +255,8 @@ WHERE compression_status LIKE 'Compressed'
ORDER BY chunk_name;
chunk_name | before_compression_total_bytes | after_compression_total_bytes
-------------------+--------------------------------+-------------------------------
_hyper_7_19_chunk | 24576 | 24576
_hyper_7_20_chunk | 24576 | 24576
_hyper_7_19_chunk | 24576 | 40960
_hyper_7_20_chunk | 24576 | 40960
(2 rows)

--TEST 8
Expand Down
236 changes: 82 additions & 154 deletions tsl/test/expected/compression_ddl.out

Large diffs are not rendered by default.

60 changes: 25 additions & 35 deletions tsl/test/expected/compression_insert.out
Original file line number Diff line number Diff line change
Expand Up @@ -790,13 +790,11 @@ SELECT compress_chunk(format('%I.%I',chunk_schema,chunk_name), true) FROM timesc

-- should be ordered append
:PREFIX SELECT * FROM test_ordering ORDER BY 1;
QUERY PLAN
--------------------------------------------------------------------------------------------------------
QUERY PLAN
----------------------------------------------------------------------------------------------------------------------------
Custom Scan (DecompressChunk) on _hyper_13_20_chunk
-> Sort
Sort Key: compress_hyper_14_21_chunk._ts_meta_min_1, compress_hyper_14_21_chunk._ts_meta_max_1
-> Seq Scan on compress_hyper_14_21_chunk
(4 rows)
-> Index Scan Backward using compress_hyper_14_21_chunk__ts_meta_min_1__ts_meta_max_1_idx on compress_hyper_14_21_chunk
(2 rows)

INSERT INTO test_ordering SELECT 1;
-- should not be ordered append
Expand All @@ -807,39 +805,35 @@ INSERT INTO test_ordering SELECT 1;
-- It was hard to include a path without pushed down sort for consideration, as `add_path` would reject
-- the path with sort pushdown, which is desirable in most cases
:PREFIX SELECT * FROM test_ordering ORDER BY 1;
QUERY PLAN
--------------------------------------------------------------------------------------------------------------------
QUERY PLAN
----------------------------------------------------------------------------------------------------------------------------------------
Custom Scan (ChunkAppend) on test_ordering
Order: test_ordering."time"
-> Merge Append
Sort Key: _hyper_13_20_chunk."time"
-> Custom Scan (DecompressChunk) on _hyper_13_20_chunk
-> Sort
Sort Key: compress_hyper_14_21_chunk._ts_meta_min_1, compress_hyper_14_21_chunk._ts_meta_max_1
-> Seq Scan on compress_hyper_14_21_chunk
-> Index Scan Backward using compress_hyper_14_21_chunk__ts_meta_min_1__ts_meta_max_1_idx on compress_hyper_14_21_chunk
-> Sort
Sort Key: _hyper_13_20_chunk."time"
-> Seq Scan on _hyper_13_20_chunk
(11 rows)
(9 rows)

INSERT INTO test_ordering VALUES (105),(104),(103);
-- should be ordered append
:PREFIX SELECT * FROM test_ordering ORDER BY 1;
QUERY PLAN
--------------------------------------------------------------------------------------------------------------------
QUERY PLAN
----------------------------------------------------------------------------------------------------------------------------------------
Custom Scan (ChunkAppend) on test_ordering
Order: test_ordering."time"
-> Merge Append
Sort Key: _hyper_13_20_chunk."time"
-> Custom Scan (DecompressChunk) on _hyper_13_20_chunk
-> Sort
Sort Key: compress_hyper_14_21_chunk._ts_meta_min_1, compress_hyper_14_21_chunk._ts_meta_max_1
-> Seq Scan on compress_hyper_14_21_chunk
-> Index Scan Backward using compress_hyper_14_21_chunk__ts_meta_min_1__ts_meta_max_1_idx on compress_hyper_14_21_chunk
-> Sort
Sort Key: _hyper_13_20_chunk."time"
-> Seq Scan on _hyper_13_20_chunk
-> Index Only Scan Backward using _hyper_13_22_chunk_test_ordering_time_idx on _hyper_13_22_chunk
(12 rows)
(10 rows)

--insert into compressed + uncompressed chunk
INSERT INTO test_ordering VALUES (21), (22),(113);
Expand Down Expand Up @@ -881,19 +875,15 @@ SELECT compress_chunk(format('%I.%I',chunk_schema,chunk_name), true) FROM timesc

-- should be ordered append
:PREFIX SELECT * FROM test_ordering ORDER BY 1;
QUERY PLAN
--------------------------------------------------------------------------------------------------------------
QUERY PLAN
----------------------------------------------------------------------------------------------------------------------------------
Custom Scan (ChunkAppend) on test_ordering
Order: test_ordering."time"
-> Custom Scan (DecompressChunk) on _hyper_13_20_chunk
-> Sort
Sort Key: compress_hyper_14_23_chunk._ts_meta_min_1, compress_hyper_14_23_chunk._ts_meta_max_1
-> Seq Scan on compress_hyper_14_23_chunk
-> Index Scan Backward using compress_hyper_14_21_chunk__ts_meta_min_1__ts_meta_max_1_idx on compress_hyper_14_21_chunk
-> Custom Scan (DecompressChunk) on _hyper_13_22_chunk
-> Sort
Sort Key: compress_hyper_14_24_chunk._ts_meta_min_1, compress_hyper_14_24_chunk._ts_meta_max_1
-> Seq Scan on compress_hyper_14_24_chunk
(10 rows)
-> Index Scan Backward using compress_hyper_14_23_chunk__ts_meta_min_1__ts_meta_max_1_idx on compress_hyper_14_23_chunk
(6 rows)

SET timescaledb.enable_decompression_sorted_merge = 1;
-- TEST cagg triggers with insert into compressed chunk
Expand Down Expand Up @@ -930,15 +920,15 @@ NOTICE: default order by for hypertable "conditions" is set to "timec DESC"
SELECT compress_chunk(ch) FROM show_chunks('conditions') ch;
compress_chunk
------------------------------------------
_timescaledb_internal._hyper_15_25_chunk
_timescaledb_internal._hyper_15_24_chunk
(1 row)

SELECT chunk_name, range_start, range_end, is_compressed
FROM timescaledb_information.chunks
WHERE hypertable_name = 'conditions';
chunk_name | range_start | range_end | is_compressed
--------------------+------------------------------+------------------------------+---------------
_hyper_15_25_chunk | Wed Dec 30 16:00:00 2009 PST | Wed Jan 06 16:00:00 2010 PST | t
_hyper_15_24_chunk | Wed Dec 30 16:00:00 2009 PST | Wed Jan 06 16:00:00 2010 PST | t
(1 row)

--now insert into compressed chunk
Expand Down Expand Up @@ -1091,11 +1081,11 @@ SET timescaledb.max_tuples_decompressed_per_dml_transaction = 1;
\set ON_ERROR_STOP 0
-- Inserting in the same period should decompress tuples
INSERT INTO test_limit SELECT t, 2 FROM generate_series(1,6000,1000) t;
ERROR: duplicate key value violates unique constraint "_hyper_24_54_chunk_timestamp_id_idx"
ERROR: duplicate key value violates unique constraint "_hyper_24_53_chunk_timestamp_id_idx"
-- Setting to 0 should remove the limit.
SET timescaledb.max_tuples_decompressed_per_dml_transaction = 0;
INSERT INTO test_limit SELECT t, 2 FROM generate_series(1,6000,1000) t;
ERROR: duplicate key value violates unique constraint "_hyper_24_54_chunk_timestamp_id_idx"
ERROR: duplicate key value violates unique constraint "_hyper_24_53_chunk_timestamp_id_idx"
\set ON_ERROR_STOP 1
DROP TABLE test_limit;
RESET timescaledb.max_tuples_decompressed_per_dml_transaction;
Expand All @@ -1119,13 +1109,13 @@ SELECT count(compress_chunk(c)) FROM show_chunks('multi_unique') c;
\set ON_ERROR_STOP 0
-- all INSERTS should fail with constraint violation
BEGIN; INSERT INTO multi_unique VALUES('2024-01-01', 0, 0, 1.0); ROLLBACK;
ERROR: duplicate key value violates unique constraint "76_1_multi_unique_time_u1_key"
ERROR: duplicate key value violates unique constraint "75_1_multi_unique_time_u1_key"
DETAIL: Key ("time", u1)=(Mon Jan 01 00:00:00 2024 PST, 0) already exists.
BEGIN; INSERT INTO multi_unique VALUES('2024-01-01', 0, 1, 1.0); ROLLBACK;
ERROR: duplicate key value violates unique constraint "76_1_multi_unique_time_u1_key"
ERROR: duplicate key value violates unique constraint "75_1_multi_unique_time_u1_key"
DETAIL: Key ("time", u1)=(Mon Jan 01 00:00:00 2024 PST, 0) already exists.
BEGIN; INSERT INTO multi_unique VALUES('2024-01-01', 1, 0, 1.0); ROLLBACK;
ERROR: duplicate key value violates unique constraint "76_2_multi_unique_time_u2_key"
ERROR: duplicate key value violates unique constraint "75_2_multi_unique_time_u2_key"
DETAIL: Key ("time", u2)=(Mon Jan 01 00:00:00 2024 PST, 0) already exists.
\set ON_ERROR_STOP 1
DROP TABLE multi_unique;
Expand All @@ -1149,7 +1139,7 @@ SELECT count(compress_chunk(c)) FROM show_chunks('unique_null') c;
\set ON_ERROR_STOP 0
-- all INSERTS should fail with constraint violation
BEGIN; INSERT INTO unique_null VALUES('2024-01-01', 0, 0, 1.0); ROLLBACK;
ERROR: duplicate key value violates unique constraint "78_3_unique_null_time_u1_u2_key"
ERROR: duplicate key value violates unique constraint "77_3_unique_null_time_u1_u2_key"
\set ON_ERROR_STOP 1
-- neither of these should need to decompress
:ANALYZE INSERT INTO unique_null VALUES('2024-01-01', NULL, 1, 1.0);
Expand Down
4 changes: 2 additions & 2 deletions tsl/test/expected/compression_qualpushdown.out
Original file line number Diff line number Diff line change
Expand Up @@ -135,9 +135,9 @@ order by factorid, end_dt;
Output: _hyper_3_4_chunk.factorid, _hyper_3_4_chunk.end_dt, _hyper_3_4_chunk.logret
Filter: ((_hyper_3_4_chunk.end_dt >= '12-10-2012'::date) AND (_hyper_3_4_chunk.end_dt <= '12-11-2012'::date))
Vectorized Filter: (_hyper_3_4_chunk.fmid = 56)
-> Seq Scan on _timescaledb_internal.compress_hyper_4_5_chunk
-> Index Scan using compress_hyper_4_5_chunk__ts_meta_min_1__ts_meta_max_1_idx on _timescaledb_internal.compress_hyper_4_5_chunk
Output: compress_hyper_4_5_chunk._ts_meta_count, compress_hyper_4_5_chunk.fmid, compress_hyper_4_5_chunk.factorid, compress_hyper_4_5_chunk.start_dt, compress_hyper_4_5_chunk._ts_meta_min_1, compress_hyper_4_5_chunk._ts_meta_max_1, compress_hyper_4_5_chunk.end_dt, compress_hyper_4_5_chunk.interval_number, compress_hyper_4_5_chunk.logret, compress_hyper_4_5_chunk.knowledge_date
Filter: ((compress_hyper_4_5_chunk._ts_meta_max_1 >= '12-10-2012'::date) AND (compress_hyper_4_5_chunk._ts_meta_min_1 <= '12-11-2012'::date))
Index Cond: ((compress_hyper_4_5_chunk._ts_meta_min_1 <= '12-11-2012'::date) AND (compress_hyper_4_5_chunk._ts_meta_max_1 >= '12-10-2012'::date))
(10 rows)

--no pushdown here
Expand Down
Loading

0 comments on commit 36f796f

Please sign in to comment.