diff --git a/.unreleased/pr_7521 b/.unreleased/pr_7521 new file mode 100644 index 00000000000..a230550a94c --- /dev/null +++ b/.unreleased/pr_7521 @@ -0,0 +1 @@ +Implements: #7521 Add optional `force` argument to `refresh_continuous_aggregate` diff --git a/sql/ddl_api.sql b/sql/ddl_api.sql index 7f07a1fd432..5048ddadfee 100644 --- a/sql/ddl_api.sql +++ b/sql/ddl_api.sql @@ -212,6 +212,7 @@ AS '@MODULE_PATHNAME@', 'ts_tablespace_show' LANGUAGE C VOLATILE STRICT; CREATE OR REPLACE PROCEDURE @extschema@.refresh_continuous_aggregate( continuous_aggregate REGCLASS, window_start "any", - window_end "any" + window_end "any", + force BOOLEAN = FALSE ) LANGUAGE C AS '@MODULE_PATHNAME@', 'ts_continuous_agg_refresh'; diff --git a/sql/updates/latest-dev.sql b/sql/updates/latest-dev.sql index d233b0ae5f8..5255e9e3903 100644 --- a/sql/updates/latest-dev.sql +++ b/sql/updates/latest-dev.sql @@ -114,3 +114,14 @@ CREATE FUNCTION @extschema@.hypertable_columnstore_stats (hypertable REGCLASS) STABLE STRICT AS 'SELECT * FROM @extschema@.hypertable_compression_stats($1)' SET search_path TO pg_catalog, pg_temp; + +-- Recreate `refresh_continuous_aggregate` procedure to add `force` argument +DROP PROCEDURE IF EXISTS @extschema@.refresh_continuous_aggregate (continuous_aggregate REGCLASS, window_start "any", window_end "any"); + +CREATE PROCEDURE @extschema@.refresh_continuous_aggregate( + continuous_aggregate REGCLASS, + window_start "any", + window_end "any", + force BOOLEAN = FALSE +) LANGUAGE C AS '@MODULE_PATHNAME@', 'ts_update_placeholder'; + diff --git a/sql/updates/reverse-dev.sql b/sql/updates/reverse-dev.sql index 6b75bc1c851..845fbe823c1 100644 --- a/sql/updates/reverse-dev.sql +++ b/sql/updates/reverse-dev.sql @@ -57,3 +57,11 @@ ALTER EXTENSION timescaledb DROP VIEW timescaledb_information.chunk_columnstore_ DROP VIEW timescaledb_information.hypertable_columnstore_settings; DROP VIEW timescaledb_information.chunk_columnstore_settings; +-- Recreate `refresh_continuous_aggregate` procedure to remove the `force` argument +DROP PROCEDURE IF EXISTS @extschema@.refresh_continuous_aggregate (continuous_aggregate REGCLASS, window_start "any", window_end "any", force BOOLEAN); + +CREATE PROCEDURE @extschema@.refresh_continuous_aggregate( + continuous_aggregate REGCLASS, + window_start "any", + window_end "any" +) LANGUAGE C AS '@MODULE_PATHNAME@', 'ts_continuous_agg_refresh'; diff --git a/tsl/src/bgw_policy/job.c b/tsl/src/bgw_policy/job.c index 706789e748f..f4a9dfdd966 100644 --- a/tsl/src/bgw_policy/job.c +++ b/tsl/src/bgw_policy/job.c @@ -377,7 +377,8 @@ policy_refresh_cagg_execute(int32 job_id, Jsonb *config) &policy_data.refresh_window, CAGG_REFRESH_POLICY, policy_data.start_is_null, - policy_data.end_is_null); + policy_data.end_is_null, + false); return true; } diff --git a/tsl/src/continuous_aggs/create.c b/tsl/src/continuous_aggs/create.c index f290cd38956..7c7bd044010 100644 --- a/tsl/src/continuous_aggs/create.c +++ b/tsl/src/continuous_aggs/create.c @@ -940,7 +940,12 @@ tsl_process_continuous_agg_viewstmt(Node *node, const char *query_string, void * refresh_window.start = cagg_get_time_min(cagg); refresh_window.end = ts_time_get_noend_or_max(refresh_window.type); - continuous_agg_refresh_internal(cagg, &refresh_window, CAGG_REFRESH_CREATION, true, true); + continuous_agg_refresh_internal(cagg, + &refresh_window, + CAGG_REFRESH_CREATION, + true, + true, + false); } return DDL_DONE; diff --git a/tsl/src/continuous_aggs/invalidation.c b/tsl/src/continuous_aggs/invalidation.c index 339e6997b89..374a95ef8a7 100644 --- a/tsl/src/continuous_aggs/invalidation.c +++ b/tsl/src/continuous_aggs/invalidation.c @@ -140,7 +140,8 @@ static Invalidation cut_cagg_invalidation_and_compute_remainder( const CaggInvalidationState *state, const InternalTimeRange *refresh_window, const Invalidation *mergedentry, const Invalidation *current_remainder); static void clear_cagg_invalidations_for_refresh(const CaggInvalidationState *state, - const InternalTimeRange *refresh_window); + const InternalTimeRange *refresh_window, + const bool force); static void invalidation_state_init(CaggInvalidationState *state, const ContinuousAgg *cagg, Oid dimtype, const CaggsInfo *all_caggs); static void invalidation_state_cleanup(const CaggInvalidationState *state); @@ -878,7 +879,7 @@ cut_cagg_invalidation_and_compute_remainder(const CaggInvalidationState *state, */ static void clear_cagg_invalidations_for_refresh(const CaggInvalidationState *state, - const InternalTimeRange *refresh_window) + const InternalTimeRange *refresh_window, const bool force) { ScanIterator iterator; int32 cagg_hyper_id = state->mat_hypertable_id; @@ -892,6 +893,20 @@ clear_cagg_invalidations_for_refresh(const CaggInvalidationState *state, MemoryContextReset(state->per_tuple_mctx); + /* Force refresh within the entire window */ + if (force) + { + Invalidation logentry; + + logentry.hyper_id = cagg_hyper_id; + logentry.lowest_modified_value = refresh_window->start; + logentry.greatest_modified_value = refresh_window->end; + logentry.is_modified = false; + ItemPointerSet(&logentry.tid, InvalidBlockNumber, 0); + + save_invalidation_for_refresh(state, &logentry); + } + /* Process all invalidations for the continuous aggregate */ ts_scanner_foreach(&iterator) { @@ -981,7 +996,7 @@ InvalidationStore * invalidation_process_cagg_log(const ContinuousAgg *cagg, const InternalTimeRange *refresh_window, const CaggsInfo *all_caggs_info, const long max_materializations, bool *do_merged_refresh, InternalTimeRange *ret_merged_refresh_window, - const CaggRefreshCallContext callctx) + const CaggRefreshCallContext callctx, const bool force) { CaggInvalidationState state; InvalidationStore *store = NULL; @@ -991,7 +1006,7 @@ invalidation_process_cagg_log(const ContinuousAgg *cagg, const InternalTimeRange invalidation_state_init(&state, cagg, refresh_window->type, all_caggs_info); state.invalidations = tuplestore_begin_heap(false, false, work_mem); - clear_cagg_invalidations_for_refresh(&state, refresh_window); + clear_cagg_invalidations_for_refresh(&state, refresh_window, force); count = tuplestore_tuple_count(state.invalidations); if (count == 0) diff --git a/tsl/src/continuous_aggs/invalidation.h b/tsl/src/continuous_aggs/invalidation.h index 71b8b07f7e5..3fa324eb5bd 100644 --- a/tsl/src/continuous_aggs/invalidation.h +++ b/tsl/src/continuous_aggs/invalidation.h @@ -49,6 +49,6 @@ extern InvalidationStore * invalidation_process_cagg_log(const ContinuousAgg *cagg, const InternalTimeRange *refresh_window, const CaggsInfo *all_caggs_info, const long max_materializations, bool *do_merged_refresh, InternalTimeRange *ret_merged_refresh_window, - const CaggRefreshCallContext callctx); + const CaggRefreshCallContext callctx, const bool force); extern void invalidation_store_free(InvalidationStore *store); diff --git a/tsl/src/continuous_aggs/refresh.c b/tsl/src/continuous_aggs/refresh.c index b6097dbc8c5..99f79675ca1 100644 --- a/tsl/src/continuous_aggs/refresh.c +++ b/tsl/src/continuous_aggs/refresh.c @@ -77,7 +77,7 @@ static void emit_up_to_date_notice(const ContinuousAgg *cagg, const CaggRefreshC static bool process_cagg_invalidations_and_refresh(const ContinuousAgg *cagg, const InternalTimeRange *refresh_window, const CaggRefreshCallContext callctx, - int32 chunk_id); + int32 chunk_id, const bool force); static void fill_bucket_offset_origin(const ContinuousAgg *cagg, const InternalTimeRange *const refresh_window, NullableDatum *offset, NullableDatum *origin); @@ -628,6 +628,7 @@ Datum continuous_agg_refresh(PG_FUNCTION_ARGS) { Oid cagg_relid = PG_ARGISNULL(0) ? InvalidOid : PG_GETARG_OID(0); + bool force = PG_ARGISNULL(3) ? false : PG_GETARG_BOOL(3); ContinuousAgg *cagg; InternalTimeRange refresh_window = { .type = InvalidOid, @@ -659,7 +660,8 @@ continuous_agg_refresh(PG_FUNCTION_ARGS) &refresh_window, CAGG_REFRESH_WINDOW, PG_ARGISNULL(1), - PG_ARGISNULL(2)); + PG_ARGISNULL(2), + force); PG_RETURN_VOID(); } @@ -703,7 +705,8 @@ continuous_agg_calculate_merged_refresh_window(const ContinuousAgg *cagg, static bool process_cagg_invalidations_and_refresh(const ContinuousAgg *cagg, const InternalTimeRange *refresh_window, - const CaggRefreshCallContext callctx, int32 chunk_id) + const CaggRefreshCallContext callctx, int32 chunk_id, + const bool force) { InvalidationStore *invalidations; Oid hyper_relid = ts_hypertable_id_to_relid(cagg->data.mat_hypertable_id, false); @@ -727,7 +730,8 @@ process_cagg_invalidations_and_refresh(const ContinuousAgg *cagg, ts_guc_cagg_max_individual_materializations, &do_merged_refresh, &merged_refresh_window, - callctx); + callctx, + force); if (invalidations != NULL || do_merged_refresh) { @@ -759,7 +763,7 @@ void continuous_agg_refresh_internal(const ContinuousAgg *cagg, const InternalTimeRange *refresh_window_arg, const CaggRefreshCallContext callctx, const bool start_isnull, - const bool end_isnull) + const bool end_isnull, const bool force) { int32 mat_id = cagg->data.mat_hypertable_id; InternalTimeRange refresh_window = *refresh_window_arg; @@ -881,7 +885,11 @@ continuous_agg_refresh_internal(const ContinuousAgg *cagg, cagg = ts_continuous_agg_find_by_mat_hypertable_id(mat_id, false); - if (!process_cagg_invalidations_and_refresh(cagg, &refresh_window, callctx, INVALID_CHUNK_ID)) + if (!process_cagg_invalidations_and_refresh(cagg, + &refresh_window, + callctx, + INVALID_CHUNK_ID, + force)) emit_up_to_date_notice(cagg, callctx); /* Restore search_path */ diff --git a/tsl/src/continuous_aggs/refresh.h b/tsl/src/continuous_aggs/refresh.h index c050dfefb32..f789dccd9a9 100644 --- a/tsl/src/continuous_aggs/refresh.h +++ b/tsl/src/continuous_aggs/refresh.h @@ -20,4 +20,5 @@ extern void continuous_agg_calculate_merged_refresh_window( extern void continuous_agg_refresh_internal(const ContinuousAgg *cagg, const InternalTimeRange *refresh_window, const CaggRefreshCallContext callctx, - const bool start_isnull, const bool end_isnull); + const bool start_isnull, const bool end_isnull, + const bool force); diff --git a/tsl/test/expected/cagg_refresh.out b/tsl/test/expected/cagg_refresh.out index 291ce335272..5b753f76da3 100644 --- a/tsl/test/expected/cagg_refresh.out +++ b/tsl/test/expected/cagg_refresh.out @@ -223,6 +223,49 @@ psql:include/cagg_refresh_common.sql:105: ERROR: invalid time argument type "te CALL refresh_continuous_aggregate('daily_temp', 0, '2020-05-01'); psql:include/cagg_refresh_common.sql:106: ERROR: invalid time argument type "integer" \set ON_ERROR_STOP 1 +-- Test forceful refreshment. Here we simulate the situation that we've seen +-- with tiered data when `timescaledb.enable_tiered_reads` were disabled on the +-- server level. In that case we would not see materialized tiered data and +-- we wouldn't be able to re-materialize the data using a normal refresh call +-- because it would skip previously materialized ranges, but it should be +-- possible with `force=>true` parameter. To simulate this use-case we clear +-- the materialization hypertable and forefully re-materialize it. +SELECT ht.schema_name || '.' || ht.table_name AS mat_ht, mat_hypertable_id FROM _timescaledb_catalog.continuous_agg cagg +JOIN _timescaledb_catalog.hypertable ht ON cagg.mat_hypertable_id = ht.id +WHERE user_view_name = 'daily_temp' \gset +-- Delete the data from the materialization hypertable +DELETE FROM :mat_ht; +-- Run regular refresh, it should not touch previously materialized range +-- CALL refresh_continuous_aggregate('daily_temp', '2020-05-04 00:00 UTC', '2020-05-05 00:00 UTC'); +CALL refresh_continuous_aggregate('daily_temp', '2020-05-02', '2020-05-05 17:00'); +psql:include/cagg_refresh_common.sql:125: NOTICE: continuous aggregate "daily_temp" is already up-to-date +SELECT * FROM daily_temp +ORDER BY day DESC, device; + day | device | avg_temp +-----+--------+---------- +(0 rows) + +-- Run it again with force=>true, the data should be rematerialized +-- CALL refresh_continuous_aggregate('daily_temp', '2020-05-04 00:00 UTC', '2020-05-05 00:00 UTC', force=>true); +CALL refresh_continuous_aggregate('daily_temp', '2020-05-02', '2020-05-05 17:00', force=>true); +SELECT * FROM daily_temp +ORDER BY day DESC, device; + day | device | avg_temp +------------------------------+--------+------------------ + Mon May 04 17:00:00 2020 PDT | 0 | 19.3846153846154 + Mon May 04 17:00:00 2020 PDT | 1 | 16.5555555555556 + Mon May 04 17:00:00 2020 PDT | 2 | 18.5714285714286 + Mon May 04 17:00:00 2020 PDT | 3 | 23.5714285714286 + Sun May 03 17:00:00 2020 PDT | 0 | 15.7647058823529 + Sun May 03 17:00:00 2020 PDT | 1 | 24.3142857142857 + Sun May 03 17:00:00 2020 PDT | 2 | 14.8205128205128 + Sun May 03 17:00:00 2020 PDT | 3 | 18.1111111111111 + Sat May 02 17:00:00 2020 PDT | 0 | 17 + Sat May 02 17:00:00 2020 PDT | 1 | 18.75 + Sat May 02 17:00:00 2020 PDT | 2 | 20 + Sat May 02 17:00:00 2020 PDT | 3 | 21.5217391304348 +(12 rows) + -- Test different time types CREATE TABLE conditions_date (time date NOT NULL, device int, temp float); SELECT create_hypertable('conditions_date', 'time'); @@ -268,7 +311,7 @@ AS SELECT time_bucket(SMALLINT '20', time) AS bucket, device, avg(temp) AS avg_temp FROM conditions_smallint c GROUP BY 1,2 WITH NO DATA; -psql:include/cagg_refresh_common.sql:150: ERROR: custom time function required on hypertable "conditions_smallint" +psql:include/cagg_refresh_common.sql:175: ERROR: custom time function required on hypertable "conditions_smallint" \set ON_ERROR_STOP 1 SELECT set_integer_now_func('conditions_smallint', 'smallint_now'); set_integer_now_func @@ -423,7 +466,7 @@ AS SELECT time_bucket('7 days', time) AS day, device, avg(temp) AS avg_temp FROM conditions GROUP BY 1,2 WITH DATA; -psql:include/cagg_refresh_common.sql:255: NOTICE: refreshing continuous aggregate "weekly_temp_with_data" +psql:include/cagg_refresh_common.sql:280: NOTICE: refreshing continuous aggregate "weekly_temp_with_data" SELECT * FROM weekly_temp_without_data; day | device | avg_temp -----+--------+---------- @@ -445,7 +488,7 @@ SELECT * FROM weekly_temp_with_data ORDER BY 1,2; \set ON_ERROR_STOP 0 -- REFRESH MATERIALIZED VIEW is blocked on continuous aggregates REFRESH MATERIALIZED VIEW weekly_temp_without_data; -psql:include/cagg_refresh_common.sql:262: ERROR: operation not supported on continuous aggregate +psql:include/cagg_refresh_common.sql:287: ERROR: operation not supported on continuous aggregate -- These should fail since we do not allow refreshing inside a -- transaction, not even as part of CREATE MATERIALIZED VIEW. DO LANGUAGE PLPGSQL $$ BEGIN @@ -457,7 +500,7 @@ SELECT time_bucket('7 days', time) AS day, device, avg(temp) AS avg_temp FROM conditions GROUP BY 1,2 WITH DATA; END $$; -psql:include/cagg_refresh_common.sql:274: ERROR: CREATE MATERIALIZED VIEW ... WITH DATA cannot be executed from a function +psql:include/cagg_refresh_common.sql:299: ERROR: CREATE MATERIALIZED VIEW ... WITH DATA cannot be executed from a function BEGIN; CREATE MATERIALIZED VIEW weekly_conditions WITH (timescaledb.continuous, @@ -466,7 +509,7 @@ AS SELECT time_bucket('7 days', time) AS day, device, avg(temp) AS avg_temp FROM conditions GROUP BY 1,2 WITH DATA; -psql:include/cagg_refresh_common.sql:283: ERROR: CREATE MATERIALIZED VIEW ... WITH DATA cannot run inside a transaction block +psql:include/cagg_refresh_common.sql:308: ERROR: CREATE MATERIALIZED VIEW ... WITH DATA cannot run inside a transaction block COMMIT; \set ON_ERROR_STOP 1 -- This should not fail since we do not refresh the continuous diff --git a/tsl/test/expected/cagg_refresh_using_merge.out b/tsl/test/expected/cagg_refresh_using_merge.out index 6ad8fa3a46b..8d6f3b3d486 100644 --- a/tsl/test/expected/cagg_refresh_using_merge.out +++ b/tsl/test/expected/cagg_refresh_using_merge.out @@ -224,6 +224,49 @@ psql:include/cagg_refresh_common.sql:105: ERROR: invalid time argument type "te CALL refresh_continuous_aggregate('daily_temp', 0, '2020-05-01'); psql:include/cagg_refresh_common.sql:106: ERROR: invalid time argument type "integer" \set ON_ERROR_STOP 1 +-- Test forceful refreshment. Here we simulate the situation that we've seen +-- with tiered data when `timescaledb.enable_tiered_reads` were disabled on the +-- server level. In that case we would not see materialized tiered data and +-- we wouldn't be able to re-materialize the data using a normal refresh call +-- because it would skip previously materialized ranges, but it should be +-- possible with `force=>true` parameter. To simulate this use-case we clear +-- the materialization hypertable and forefully re-materialize it. +SELECT ht.schema_name || '.' || ht.table_name AS mat_ht, mat_hypertable_id FROM _timescaledb_catalog.continuous_agg cagg +JOIN _timescaledb_catalog.hypertable ht ON cagg.mat_hypertable_id = ht.id +WHERE user_view_name = 'daily_temp' \gset +-- Delete the data from the materialization hypertable +DELETE FROM :mat_ht; +-- Run regular refresh, it should not touch previously materialized range +-- CALL refresh_continuous_aggregate('daily_temp', '2020-05-04 00:00 UTC', '2020-05-05 00:00 UTC'); +CALL refresh_continuous_aggregate('daily_temp', '2020-05-02', '2020-05-05 17:00'); +psql:include/cagg_refresh_common.sql:125: NOTICE: continuous aggregate "daily_temp" is already up-to-date +SELECT * FROM daily_temp +ORDER BY day DESC, device; + day | device | avg_temp +-----+--------+---------- +(0 rows) + +-- Run it again with force=>true, the data should be rematerialized +-- CALL refresh_continuous_aggregate('daily_temp', '2020-05-04 00:00 UTC', '2020-05-05 00:00 UTC', force=>true); +CALL refresh_continuous_aggregate('daily_temp', '2020-05-02', '2020-05-05 17:00', force=>true); +SELECT * FROM daily_temp +ORDER BY day DESC, device; + day | device | avg_temp +------------------------------+--------+------------------ + Mon May 04 17:00:00 2020 PDT | 0 | 19.3846153846154 + Mon May 04 17:00:00 2020 PDT | 1 | 16.5555555555556 + Mon May 04 17:00:00 2020 PDT | 2 | 18.5714285714286 + Mon May 04 17:00:00 2020 PDT | 3 | 23.5714285714286 + Sun May 03 17:00:00 2020 PDT | 0 | 15.7647058823529 + Sun May 03 17:00:00 2020 PDT | 1 | 24.3142857142857 + Sun May 03 17:00:00 2020 PDT | 2 | 14.8205128205128 + Sun May 03 17:00:00 2020 PDT | 3 | 18.1111111111111 + Sat May 02 17:00:00 2020 PDT | 0 | 17 + Sat May 02 17:00:00 2020 PDT | 1 | 18.75 + Sat May 02 17:00:00 2020 PDT | 2 | 20 + Sat May 02 17:00:00 2020 PDT | 3 | 21.5217391304348 +(12 rows) + -- Test different time types CREATE TABLE conditions_date (time date NOT NULL, device int, temp float); SELECT create_hypertable('conditions_date', 'time'); @@ -269,7 +312,7 @@ AS SELECT time_bucket(SMALLINT '20', time) AS bucket, device, avg(temp) AS avg_temp FROM conditions_smallint c GROUP BY 1,2 WITH NO DATA; -psql:include/cagg_refresh_common.sql:150: ERROR: custom time function required on hypertable "conditions_smallint" +psql:include/cagg_refresh_common.sql:175: ERROR: custom time function required on hypertable "conditions_smallint" \set ON_ERROR_STOP 1 SELECT set_integer_now_func('conditions_smallint', 'smallint_now'); set_integer_now_func @@ -424,7 +467,7 @@ AS SELECT time_bucket('7 days', time) AS day, device, avg(temp) AS avg_temp FROM conditions GROUP BY 1,2 WITH DATA; -psql:include/cagg_refresh_common.sql:255: NOTICE: refreshing continuous aggregate "weekly_temp_with_data" +psql:include/cagg_refresh_common.sql:280: NOTICE: refreshing continuous aggregate "weekly_temp_with_data" SELECT * FROM weekly_temp_without_data; day | device | avg_temp -----+--------+---------- @@ -446,7 +489,7 @@ SELECT * FROM weekly_temp_with_data ORDER BY 1,2; \set ON_ERROR_STOP 0 -- REFRESH MATERIALIZED VIEW is blocked on continuous aggregates REFRESH MATERIALIZED VIEW weekly_temp_without_data; -psql:include/cagg_refresh_common.sql:262: ERROR: operation not supported on continuous aggregate +psql:include/cagg_refresh_common.sql:287: ERROR: operation not supported on continuous aggregate -- These should fail since we do not allow refreshing inside a -- transaction, not even as part of CREATE MATERIALIZED VIEW. DO LANGUAGE PLPGSQL $$ BEGIN @@ -458,7 +501,7 @@ SELECT time_bucket('7 days', time) AS day, device, avg(temp) AS avg_temp FROM conditions GROUP BY 1,2 WITH DATA; END $$; -psql:include/cagg_refresh_common.sql:274: ERROR: CREATE MATERIALIZED VIEW ... WITH DATA cannot be executed from a function +psql:include/cagg_refresh_common.sql:299: ERROR: CREATE MATERIALIZED VIEW ... WITH DATA cannot be executed from a function BEGIN; CREATE MATERIALIZED VIEW weekly_conditions WITH (timescaledb.continuous, @@ -467,7 +510,7 @@ AS SELECT time_bucket('7 days', time) AS day, device, avg(temp) AS avg_temp FROM conditions GROUP BY 1,2 WITH DATA; -psql:include/cagg_refresh_common.sql:283: ERROR: CREATE MATERIALIZED VIEW ... WITH DATA cannot run inside a transaction block +psql:include/cagg_refresh_common.sql:308: ERROR: CREATE MATERIALIZED VIEW ... WITH DATA cannot run inside a transaction block COMMIT; \set ON_ERROR_STOP 1 -- This should not fail since we do not refresh the continuous diff --git a/tsl/test/expected/chunk_utils_internal.out b/tsl/test/expected/chunk_utils_internal.out index bcaead5d1eb..520484a8651 100644 --- a/tsl/test/expected/chunk_utils_internal.out +++ b/tsl/test/expected/chunk_utils_internal.out @@ -785,6 +785,40 @@ SET timescaledb.enable_tiered_reads=true; Index Cond: (timec < 'Sun Jan 01 01:00:00 2023 PST'::timestamp with time zone) (4 rows) +-- Test forceful refreshment. Here we simulate the situation that we've seen +-- with tiered data when `timescaledb.enable_tiered_reads` were disabled on the +-- server level. In that case we would not see materialized tiered data and +-- we wouldn't be able to re-materialize the data using a normal refresh call +-- because it would skip previously materialized ranges, but it should be +-- possible with `force=>true` parameter. +CREATE MATERIALIZED VIEW ht_try_weekly +WITH (timescaledb.continuous) AS +SELECT time_bucket(interval '1 week', timec) AS ts_bucket, avg(value) +FROM ht_try +GROUP BY 1 +WITH NO DATA; +SELECT * FROM ht_try_weekly; + ts_bucket | avg +-----------+----- +(0 rows) + +SET timescaledb.enable_tiered_reads=false; +CALL refresh_continuous_aggregate('ht_try_weekly', '2019-12-29', '2020-01-10', force=>false); +SELECT * FROM ht_try_weekly; + ts_bucket | avg +-----------+----- +(0 rows) + +SET timescaledb.enable_tiered_reads=true; +CALL refresh_continuous_aggregate('ht_try_weekly', '2019-12-29', '2020-01-10', force=>true); +SELECT * FROM ht_try_weekly; + ts_bucket | avg +------------------------------+----------------------- + Sun Dec 29 16:00:00 2019 PST | 1000.0000000000000000 +(1 row) + +DROP MATERIALIZED VIEW ht_try_weekly; +NOTICE: drop cascades to table _timescaledb_internal._hyper_6_12_chunk -- This test verifies that a bugfix regarding the way `ROWID_VAR`s are adjusted -- in the chunks' targetlists on DELETE/UPDATE works (including partially -- compressed chunks) @@ -797,7 +831,7 @@ SELECT compress_chunk(show_chunks('ht_try', newer_than => '2021-01-01'::timestam compress_chunk ----------------------------------------- _timescaledb_internal._hyper_5_10_chunk - _timescaledb_internal._hyper_5_12_chunk + _timescaledb_internal._hyper_5_13_chunk (2 rows) INSERT INTO ht_try VALUES ('2021-06-05 01:00', 10, 222); @@ -900,7 +934,7 @@ Indexes: Triggers: ts_insert_blocker BEFORE INSERT ON ht_try FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker() Child tables: _timescaledb_internal._hyper_5_10_chunk, - _timescaledb_internal._hyper_5_12_chunk + _timescaledb_internal._hyper_5_13_chunk -- verify that still can read from the table after catalog manipulations EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, SUMMARY OFF) SELECT * FROM ht_try; @@ -908,10 +942,10 @@ EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, SUMMARY OFF) SELECT * FROM ht_try; ---------------------------------------------------------------------------------- Append (actual rows=3 loops=1) -> Custom Scan (DecompressChunk) on _hyper_5_10_chunk (actual rows=1 loops=1) - -> Seq Scan on compress_hyper_6_13_chunk (actual rows=1 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_5_12_chunk (actual rows=1 loops=1) - -> Seq Scan on compress_hyper_6_14_chunk (actual rows=1 loops=1) - -> Seq Scan on _hyper_5_12_chunk (actual rows=1 loops=1) + -> Seq Scan on compress_hyper_7_14_chunk (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_5_13_chunk (actual rows=1 loops=1) + -> Seq Scan on compress_hyper_7_15_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_5_13_chunk (actual rows=1 loops=1) (6 rows) ROLLBACK; @@ -964,7 +998,7 @@ RESTRICT SELECT create_hypertable('hyper_constr', 'time', chunk_time_interval => 10); create_hypertable --------------------------- - (7,public,hyper_constr,t) + (8,public,hyper_constr,t) (1 row) INSERT INTO hyper_constr VALUES( 10, 200, 22, 1, 111, 44); @@ -997,7 +1031,7 @@ WHERE hypertable_id IN (SELECT id from _timescaledb_catalog.hypertable ORDER BY table_name; table_name | status | osm_chunk --------------------+--------+----------- - _hyper_7_15_chunk | 0 | f + _hyper_8_16_chunk | 0 | f child_hyper_constr | 0 | t (2 rows) @@ -1085,15 +1119,15 @@ where hypertable_id = (Select id from _timescaledb_catalog.hypertable where tabl ORDER BY id; id | table_name ----+-------------------- - 15 | _hyper_7_15_chunk - 16 | child_hyper_constr + 16 | _hyper_8_16_chunk + 17 | child_hyper_constr (2 rows) -- show_chunks will not show the OSM chunk which is visible via the above query SELECT show_chunks('hyper_constr'); show_chunks ----------------------------------------- - _timescaledb_internal._hyper_7_15_chunk + _timescaledb_internal._hyper_8_16_chunk (1 row) ROLLBACK; @@ -1125,7 +1159,7 @@ CREATE TABLE test1.copy_test ( SELECT create_hypertable('test1.copy_test', 'time', chunk_time_interval => interval '1 day'); create_hypertable ----------------------- - (8,test1,copy_test,t) + (9,test1,copy_test,t) (1 row) COPY test1.copy_test FROM STDIN DELIMITER ','; @@ -1146,13 +1180,13 @@ SELECT table_name, status FROM _timescaledb_catalog.chunk WHERE table_name = :'COPY_CHUNK_NAME'; table_name | status -------------------+-------- - _hyper_8_17_chunk | 4 + _hyper_9_18_chunk | 4 (1 row) \set ON_ERROR_STOP 0 -- Copy should fail because one of che chunks is frozen COPY test1.copy_test FROM STDIN DELIMITER ','; -ERROR: cannot INSERT into frozen chunk "_hyper_8_17_chunk" +ERROR: cannot INSERT into frozen chunk "_hyper_9_18_chunk" \set ON_ERROR_STOP 1 -- Count existing rows SELECT COUNT(*) FROM test1.copy_test; @@ -1166,13 +1200,13 @@ SELECT table_name, status FROM _timescaledb_catalog.chunk WHERE table_name = :'COPY_CHUNK_NAME'; table_name | status -------------------+-------- - _hyper_8_17_chunk | 4 + _hyper_9_18_chunk | 4 (1 row) \set ON_ERROR_STOP 0 -- Copy should fail because one of che chunks is frozen COPY test1.copy_test FROM STDIN DELIMITER ','; -ERROR: cannot INSERT into frozen chunk "_hyper_8_17_chunk" +ERROR: cannot INSERT into frozen chunk "_hyper_9_18_chunk" \set ON_ERROR_STOP 1 -- Count existing rows SELECT COUNT(*) FROM test1.copy_test; @@ -1193,7 +1227,7 @@ SELECT table_name, status FROM _timescaledb_catalog.chunk WHERE table_name = :'COPY_CHUNK_NAME'; table_name | status -------------------+-------- - _hyper_8_17_chunk | 0 + _hyper_9_18_chunk | 0 (1 row) -- Copy should work now @@ -1296,12 +1330,12 @@ WHERE ht.table_name LIKE 'osm%' ORDER BY 2,3; table_name | id | dimension_id | range_start | range_end ------------+----+--------------+---------------------+--------------------- - osm_int2 | 16 | 7 | 9223372036854775806 | 9223372036854775807 - osm_int4 | 17 | 8 | 9223372036854775806 | 9223372036854775807 - osm_int8 | 18 | 9 | 9223372036854775806 | 9223372036854775807 - osm_date | 19 | 10 | 9223372036854775806 | 9223372036854775807 - osm_ts | 20 | 11 | 9223372036854775806 | 9223372036854775807 - osm_tstz | 21 | 12 | 9223372036854775806 | 9223372036854775807 + osm_int2 | 17 | 8 | 9223372036854775806 | 9223372036854775807 + osm_int4 | 18 | 9 | 9223372036854775806 | 9223372036854775807 + osm_int8 | 19 | 10 | 9223372036854775806 | 9223372036854775807 + osm_date | 20 | 11 | 9223372036854775806 | 9223372036854775807 + osm_ts | 21 | 12 | 9223372036854775806 | 9223372036854775807 + osm_tstz | 22 | 13 | 9223372036854775806 | 9223372036854775807 (6 rows) -- test that correct slice is found and updated for table with multiple chunk constraints @@ -1314,8 +1348,8 @@ _timescaledb_catalog.chunk c, _timescaledb_catalog.chunk_constraint cc WHERE c.h AND c.id = cc.chunk_id; id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk | chunk_id | dimension_slice_id | constraint_name | hypertable_constraint_name ----+---------------+-----------------------+--------------------+---------------------+---------+--------+-----------+----------+--------------------+-----------------------------+---------------------------- - 25 | 15 | _timescaledb_internal | _hyper_15_25_chunk | | f | 0 | f | 25 | | 25_5_test_multicon_time_key | test_multicon_time_key - 25 | 15 | _timescaledb_internal | _hyper_15_25_chunk | | f | 0 | f | 25 | 22 | constraint_22 | + 26 | 16 | _timescaledb_internal | _hyper_16_26_chunk | | f | 0 | f | 26 | | 26_5_test_multicon_time_key | test_multicon_time_key + 26 | 16 | _timescaledb_internal | _hyper_16_26_chunk | | f | 0 | f | 26 | 23 | constraint_23 | (2 rows) \c :TEST_DBNAME :ROLE_SUPERUSER ; @@ -1333,7 +1367,7 @@ FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.chunk_constraint cc, _ti WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_slice_id; chunk_id | table_name | status | osm_chunk | dimension_slice_id | range_start | range_end ----------+--------------------+--------+-----------+--------------------+------------------+------------------ - 25 | _hyper_15_25_chunk | 0 | t | 22 | 1577955600000000 | 1578128400000000 + 26 | _hyper_16_26_chunk | 0 | t | 23 | 1577955600000000 | 1578128400000000 (1 row) -- check that range was reset to default - infinity @@ -1361,7 +1395,7 @@ FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.chunk_constraint cc, _ti WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_slice_id ORDER BY cc.chunk_id; chunk_id | table_name | status | osm_chunk | dimension_slice_id | range_start | range_end ----------+--------------------+--------+-----------+--------------------+---------------------+--------------------- - 25 | _hyper_15_25_chunk | 0 | t | 22 | 9223372036854775806 | 9223372036854775807 + 26 | _hyper_16_26_chunk | 0 | t | 23 | 9223372036854775806 | 9223372036854775807 (1 row) -- TEST for orderedappend that depends on hypertable_osm_range_update functionality @@ -1386,9 +1420,9 @@ FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.chunk_constraint cc, _ti WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_slice_id ORDER BY cc.chunk_id; chunk_id | table_name | status | osm_chunk | dimension_slice_id | range_start | range_end ----------+-------------------------+--------+-----------+--------------------+---------------------+--------------------- - 26 | _hyper_16_26_chunk | 0 | f | 23 | 1577836800000000 | 1577923200000000 - 27 | _hyper_16_27_chunk | 0 | f | 24 | 1577923200000000 | 1578009600000000 - 28 | test_chunkapp_fdw_child | 0 | t | 25 | 9223372036854775806 | 9223372036854775807 + 27 | _hyper_17_27_chunk | 0 | f | 24 | 1577836800000000 | 1577923200000000 + 28 | _hyper_17_28_chunk | 0 | f | 25 | 1577923200000000 | 1578009600000000 + 29 | test_chunkapp_fdw_child | 0 | t | 26 | 9223372036854775806 | 9223372036854775807 (3 rows) -- attempt to update overlapping range, should fail @@ -1409,9 +1443,9 @@ FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.chunk_constraint cc, _ti WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_slice_id ORDER BY cc.chunk_id; chunk_id | table_name | status | osm_chunk | dimension_slice_id | range_start | range_end ----------+-------------------------+--------+-----------+--------------------+------------------+------------------ - 26 | _hyper_16_26_chunk | 0 | f | 23 | 1577836800000000 | 1577923200000000 - 27 | _hyper_16_27_chunk | 0 | f | 24 | 1577923200000000 | 1578009600000000 - 28 | test_chunkapp_fdw_child | 0 | t | 25 | 1578038400000000 | 1578124800000000 + 27 | _hyper_17_27_chunk | 0 | f | 24 | 1577836800000000 | 1577923200000000 + 28 | _hyper_17_28_chunk | 0 | f | 25 | 1577923200000000 | 1578009600000000 + 29 | test_chunkapp_fdw_child | 0 | t | 26 | 1578038400000000 | 1578124800000000 (3 rows) -- ordered append should be possible as ranges do not overlap @@ -1420,8 +1454,8 @@ WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_sl ------------------------------------------------------------------------------------------------- Custom Scan (ChunkAppend) on test_chunkapp Order: test_chunkapp."time" - -> Index Scan Backward using _hyper_16_26_chunk_test_chunkapp_time_idx on _hyper_16_26_chunk - -> Index Scan Backward using _hyper_16_27_chunk_test_chunkapp_time_idx on _hyper_16_27_chunk + -> Index Scan Backward using _hyper_17_27_chunk_test_chunkapp_time_idx on _hyper_17_27_chunk + -> Index Scan Backward using _hyper_17_28_chunk_test_chunkapp_time_idx on _hyper_17_28_chunk -> Foreign Scan on test_chunkapp_fdw_child (5 rows) @@ -1462,9 +1496,9 @@ SELECT _timescaledb_functions.hypertable_osm_range_update('test_chunkapp',empty: QUERY PLAN ------------------------------------------------------------------------------------------------- Merge Append - Sort Key: _hyper_16_26_chunk."time" - -> Index Scan Backward using _hyper_16_26_chunk_test_chunkapp_time_idx on _hyper_16_26_chunk - -> Index Scan Backward using _hyper_16_27_chunk_test_chunkapp_time_idx on _hyper_16_27_chunk + Sort Key: _hyper_17_27_chunk."time" + -> Index Scan Backward using _hyper_17_27_chunk_test_chunkapp_time_idx on _hyper_17_27_chunk + -> Index Scan Backward using _hyper_17_28_chunk_test_chunkapp_time_idx on _hyper_17_28_chunk -> Foreign Scan on test_chunkapp_fdw_child (5 rows) @@ -1481,9 +1515,9 @@ FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.chunk_constraint cc, _ti WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_slice_id ORDER BY cc.chunk_id; chunk_id | table_name | status | osm_chunk | dimension_slice_id | range_start | range_end ----------+-------------------------+--------+-----------+--------------------+---------------------+--------------------- - 26 | _hyper_16_26_chunk | 0 | f | 23 | 1577836800000000 | 1577923200000000 - 27 | _hyper_16_27_chunk | 0 | f | 24 | 1577923200000000 | 1578009600000000 - 28 | test_chunkapp_fdw_child | 0 | t | 25 | 9223372036854775806 | 9223372036854775807 + 27 | _hyper_17_27_chunk | 0 | f | 24 | 1577836800000000 | 1577923200000000 + 28 | _hyper_17_28_chunk | 0 | f | 25 | 1577923200000000 | 1578009600000000 + 29 | test_chunkapp_fdw_child | 0 | t | 26 | 9223372036854775806 | 9223372036854775807 (3 rows) -- but also, OSM chunk should be included in the scan, since range is invalid and chunk is not empty @@ -1491,10 +1525,10 @@ WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_sl QUERY PLAN ------------------------------------------------------------------------------------------------- Merge Append - Sort Key: _hyper_16_26_chunk."time" - -> Index Scan Backward using _hyper_16_26_chunk_test_chunkapp_time_idx on _hyper_16_26_chunk + Sort Key: _hyper_17_27_chunk."time" + -> Index Scan Backward using _hyper_17_27_chunk_test_chunkapp_time_idx on _hyper_17_27_chunk Index Cond: ("time" < 'Sun Jan 01 00:00:00 2023 PST'::timestamp with time zone) - -> Index Scan Backward using _hyper_16_27_chunk_test_chunkapp_time_idx on _hyper_16_27_chunk + -> Index Scan Backward using _hyper_17_28_chunk_test_chunkapp_time_idx on _hyper_17_28_chunk Index Cond: ("time" < 'Sun Jan 01 00:00:00 2023 PST'::timestamp with time zone) -> Foreign Scan on test_chunkapp_fdw_child (7 rows) @@ -1522,8 +1556,8 @@ SELECT _timescaledb_functions.hypertable_osm_range_update('test_chunkapp', NULL: ------------------------------------------------------------------------------------------------- Custom Scan (ChunkAppend) on test_chunkapp Order: test_chunkapp."time" - -> Index Scan Backward using _hyper_16_26_chunk_test_chunkapp_time_idx on _hyper_16_26_chunk - -> Index Scan Backward using _hyper_16_27_chunk_test_chunkapp_time_idx on _hyper_16_27_chunk + -> Index Scan Backward using _hyper_17_27_chunk_test_chunkapp_time_idx on _hyper_17_27_chunk + -> Index Scan Backward using _hyper_17_28_chunk_test_chunkapp_time_idx on _hyper_17_28_chunk -> Foreign Scan on test_chunkapp_fdw_child (5 rows) @@ -1540,9 +1574,9 @@ SELECT * FROM test_chunkapp ORDER BY 1; ------------------------------------------------------------------------------------------------- Custom Scan (ChunkAppend) on test_chunkapp Order: test_chunkapp."time" - -> Index Scan Backward using _hyper_16_26_chunk_test_chunkapp_time_idx on _hyper_16_26_chunk + -> Index Scan Backward using _hyper_17_27_chunk_test_chunkapp_time_idx on _hyper_17_27_chunk Index Cond: ("time" < 'Sun Jan 01 00:00:00 2023 PST'::timestamp with time zone) - -> Index Scan Backward using _hyper_16_27_chunk_test_chunkapp_time_idx on _hyper_16_27_chunk + -> Index Scan Backward using _hyper_17_28_chunk_test_chunkapp_time_idx on _hyper_17_28_chunk Index Cond: ("time" < 'Sun Jan 01 00:00:00 2023 PST'::timestamp with time zone) (6 rows) @@ -1579,7 +1613,7 @@ CREATE TABLE test2(time timestamptz not null, a int); SELECT create_hypertable('test2', 'time'); create_hypertable --------------------- - (17,public,test2,t) + (18,public,test2,t) (1 row) INSERT INTO test2 VALUES ('2020-01-01'::timestamptz, 1); @@ -1590,7 +1624,7 @@ psql:include/chunk_utils_internal_orderedappend.sql:138: NOTICE: default order SELECT compress_chunk(show_chunks('test2')); compress_chunk ------------------------------------------ - _timescaledb_internal._hyper_17_29_chunk + _timescaledb_internal._hyper_18_30_chunk (1 row) -- find internal compression table, call API function on it @@ -1599,7 +1633,7 @@ FROM _timescaledb_catalog.hypertable ht, _timescaledb_catalog.hypertable cht WHERE ht.table_name = 'test2' and cht.id = ht.compressed_hypertable_id \gset \set ON_ERROR_STOP 0 SELECT _timescaledb_functions.hypertable_osm_range_update(:'COMPRESSION_TBLNM'::regclass, '2020-01-01'::timestamptz); -psql:include/chunk_utils_internal_orderedappend.sql:145: ERROR: could not find time dimension for hypertable _timescaledb_internal._compressed_hypertable_18 +psql:include/chunk_utils_internal_orderedappend.sql:145: ERROR: could not find time dimension for hypertable _timescaledb_internal._compressed_hypertable_19 \set ON_ERROR_STOP 1 -- test wrong/incompatible data types with hypertable time dimension -- update range of int2 with int4 diff --git a/tsl/test/sql/chunk_utils_internal.sql b/tsl/test/sql/chunk_utils_internal.sql index 1ac6ed7c817..feeb83bc54b 100644 --- a/tsl/test/sql/chunk_utils_internal.sql +++ b/tsl/test/sql/chunk_utils_internal.sql @@ -419,6 +419,27 @@ SET timescaledb.enable_tiered_reads=true; :EXPLAIN SELECT * from ht_try WHERE timec > '2022-01-01 01:00'; :EXPLAIN SELECT * from ht_try WHERE timec < '2023-01-01 01:00'; +-- Test forceful refreshment. Here we simulate the situation that we've seen +-- with tiered data when `timescaledb.enable_tiered_reads` were disabled on the +-- server level. In that case we would not see materialized tiered data and +-- we wouldn't be able to re-materialize the data using a normal refresh call +-- because it would skip previously materialized ranges, but it should be +-- possible with `force=>true` parameter. +CREATE MATERIALIZED VIEW ht_try_weekly +WITH (timescaledb.continuous) AS +SELECT time_bucket(interval '1 week', timec) AS ts_bucket, avg(value) +FROM ht_try +GROUP BY 1 +WITH NO DATA; +SELECT * FROM ht_try_weekly; +SET timescaledb.enable_tiered_reads=false; +CALL refresh_continuous_aggregate('ht_try_weekly', '2019-12-29', '2020-01-10', force=>false); +SELECT * FROM ht_try_weekly; +SET timescaledb.enable_tiered_reads=true; +CALL refresh_continuous_aggregate('ht_try_weekly', '2019-12-29', '2020-01-10', force=>true); +SELECT * FROM ht_try_weekly; +DROP MATERIALIZED VIEW ht_try_weekly; + -- This test verifies that a bugfix regarding the way `ROWID_VAR`s are adjusted -- in the chunks' targetlists on DELETE/UPDATE works (including partially -- compressed chunks) diff --git a/tsl/test/sql/include/cagg_refresh_common.sql b/tsl/test/sql/include/cagg_refresh_common.sql index 83400d9a245..88cc5b53fd2 100644 --- a/tsl/test/sql/include/cagg_refresh_common.sql +++ b/tsl/test/sql/include/cagg_refresh_common.sql @@ -106,6 +106,31 @@ CALL refresh_continuous_aggregate('daily_temp', '2020-05-01'::text, '2020-05-03' CALL refresh_continuous_aggregate('daily_temp', 0, '2020-05-01'); \set ON_ERROR_STOP 1 +-- Test forceful refreshment. Here we simulate the situation that we've seen +-- with tiered data when `timescaledb.enable_tiered_reads` were disabled on the +-- server level. In that case we would not see materialized tiered data and +-- we wouldn't be able to re-materialize the data using a normal refresh call +-- because it would skip previously materialized ranges, but it should be +-- possible with `force=>true` parameter. To simulate this use-case we clear +-- the materialization hypertable and forefully re-materialize it. +SELECT ht.schema_name || '.' || ht.table_name AS mat_ht, mat_hypertable_id FROM _timescaledb_catalog.continuous_agg cagg +JOIN _timescaledb_catalog.hypertable ht ON cagg.mat_hypertable_id = ht.id +WHERE user_view_name = 'daily_temp' \gset + +-- Delete the data from the materialization hypertable +DELETE FROM :mat_ht; + +-- Run regular refresh, it should not touch previously materialized range +-- CALL refresh_continuous_aggregate('daily_temp', '2020-05-04 00:00 UTC', '2020-05-05 00:00 UTC'); +CALL refresh_continuous_aggregate('daily_temp', '2020-05-02', '2020-05-05 17:00'); +SELECT * FROM daily_temp +ORDER BY day DESC, device; +-- Run it again with force=>true, the data should be rematerialized +-- CALL refresh_continuous_aggregate('daily_temp', '2020-05-04 00:00 UTC', '2020-05-05 00:00 UTC', force=>true); +CALL refresh_continuous_aggregate('daily_temp', '2020-05-02', '2020-05-05 17:00', force=>true); +SELECT * FROM daily_temp +ORDER BY day DESC, device; + -- Test different time types CREATE TABLE conditions_date (time date NOT NULL, device int, temp float); SELECT create_hypertable('conditions_date', 'time');