From 50db3cc39f7a88149b4b2158bc9dd782a8214894 Mon Sep 17 00:00:00 2001 From: Prashant Shahi Date: Sat, 28 Dec 2024 00:39:24 +0530 Subject: [PATCH 01/13] feat(releaser): update releaser workflow based on new enhancements (#6729) * feat(releaser): update releaser workflow based on new enhancements * ci(releaser): set release type to minor if run by cron schedule * feat(releaser): pass signoz project name for releaser --------- Signed-off-by: Prashant Shahi --- .github/workflows/releaser-signoz.yaml | 27 ++++++++++++++++++++++++++ .github/workflows/releaser.yaml | 16 --------------- 2 files changed, 27 insertions(+), 16 deletions(-) create mode 100644 .github/workflows/releaser-signoz.yaml delete mode 100644 .github/workflows/releaser.yaml diff --git a/.github/workflows/releaser-signoz.yaml b/.github/workflows/releaser-signoz.yaml new file mode 100644 index 0000000000..8b061b7227 --- /dev/null +++ b/.github/workflows/releaser-signoz.yaml @@ -0,0 +1,27 @@ +name: releaser-signoz + +on: + # schedule every wednesday 9:30 AM UTC (3pm IST) + schedule: + - cron: '30 9 * * 3' + + # allow manual triggering of the workflow by a maintainer with no inputs + workflow_dispatch: + inputs: + release_type: + description: "Type of the release" + type: choice + required: true + options: + - 'patch' + - 'minor' + - 'major' + +jobs: + signoz: + uses: signoz/primus.workflows/.github/workflows/releaser.yaml@main + secrets: inherit + with: + PRIMUS_REF: main + PROJECT_NAME: signoz + RELEASE_TYPE: ${{ inputs.release_type || 'minor' }} diff --git a/.github/workflows/releaser.yaml b/.github/workflows/releaser.yaml deleted file mode 100644 index 51e18cc263..0000000000 --- a/.github/workflows/releaser.yaml +++ /dev/null @@ -1,16 +0,0 @@ -name: releaser - -on: - # schedule every wednesday 9:30 AM UTC (3pm IST) - schedule: - - cron: '30 9 * * 3' - - # allow manual triggering of the workflow by a maintainer with no inputs - workflow_dispatch: {} - -jobs: - releaser: - uses: signoz/primus.workflows/.github/workflows/releaser-signoz.yaml@main - secrets: inherit - with: - PRIMUS_REF: main From f11161ddb8445d8a6e8896373375e7f8fe770d37 Mon Sep 17 00:00:00 2001 From: "primus-bot[bot]" <171087277+primus-bot[bot]@users.noreply.github.com> Date: Sat, 28 Dec 2024 01:07:49 +0530 Subject: [PATCH 02/13] chore(release): bump to v0.65.1 (#6731) #### Summary - Release SigNoz v0.65.1 - Bump SigNoz OTel Collector to v0.111.18 Created by [Primus-Bot](https://github.com/apps/primus-bot) --- .../clickhouse-setup/docker-compose.yaml | 8 ++-- .../clickhouse-setup/docker-compose-core.yaml | 40 +++++-------------- .../docker-compose-minimal.yaml | 10 ++--- .../docker-compose.testing.yaml | 8 ++-- 4 files changed, 23 insertions(+), 43 deletions(-) diff --git a/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml b/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml index e23c9abd7c..e7d56c0194 100644 --- a/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml +++ b/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml @@ -130,7 +130,7 @@ services: restart_policy: condition: on-failure query-service: - image: signoz/query-service:0.65.0 + image: signoz/query-service:0.65.1 command: ["-config=/root/config/prometheus.yml", "--use-logs-new-schema=true", "--use-trace-new-schema=true"] # ports: # - "6060:6060" # pprof port @@ -158,7 +158,7 @@ services: condition: on-failure !!merge <<: *db-depend frontend: - image: signoz/frontend:0.65.0 + image: signoz/frontend:0.65.1 deploy: restart_policy: condition: on-failure @@ -170,7 +170,7 @@ services: volumes: - ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf otel-collector: - image: signoz/signoz-otel-collector:0.111.16 + image: signoz/signoz-otel-collector:0.111.18 command: ["--config=/etc/otel-collector-config.yaml", "--manager-config=/etc/manager-config.yaml", "--feature-gates=-pkg.translator.prometheus.NormalizeName"] user: root # required for reading docker container logs volumes: @@ -202,7 +202,7 @@ services: - otel-collector-migrator - query-service otel-collector-migrator: - image: signoz/signoz-schema-migrator:0.111.16 + image: signoz/signoz-schema-migrator:0.111.18 deploy: restart_policy: condition: on-failure diff --git a/deploy/docker/clickhouse-setup/docker-compose-core.yaml b/deploy/docker/clickhouse-setup/docker-compose-core.yaml index b6a6fdacad..3cc2dd8a35 100644 --- a/deploy/docker/clickhouse-setup/docker-compose-core.yaml +++ b/deploy/docker/clickhouse-setup/docker-compose-core.yaml @@ -1,8 +1,6 @@ version: "2.4" - include: - test-app-docker-compose.yaml - services: zookeeper-1: image: bitnami/zookeeper:3.7.1 @@ -20,7 +18,6 @@ services: # - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888 - ALLOW_ANONYMOUS_LOGIN=yes - ZOO_AUTOPURGE_INTERVAL=1 - clickhouse: image: clickhouse/clickhouse-server:24.1.2-alpine container_name: signoz-clickhouse @@ -43,18 +40,10 @@ services: max-file: "3" healthcheck: # "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'" - test: - [ - "CMD", - "wget", - "--spider", - "-q", - "0.0.0.0:8123/ping" - ] + test: ["CMD", "wget", "--spider", "-q", "0.0.0.0:8123/ping"] interval: 30s timeout: 5s retries: 3 - alertmanager: container_name: signoz-alertmanager image: signoz/alertmanager:0.23.7 @@ -67,33 +56,25 @@ services: command: - --queryService.url=http://query-service:8085 - --storage.path=/data - otel-collector-migrator: - image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.16} + image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.18} container_name: otel-migrator command: - - "sync" + - "sync" - "--dsn=tcp://clickhouse:9000" - - "--up=" + - "--up=" depends_on: clickhouse: condition: service_healthy - # clickhouse-2: - # condition: service_healthy - # clickhouse-3: - # condition: service_healthy - + # clickhouse-2: + # condition: service_healthy + # clickhouse-3: + # condition: service_healthy # Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md` otel-collector: container_name: signoz-otel-collector - image: signoz/signoz-otel-collector:0.111.16 - command: - [ - "--config=/etc/otel-collector-config.yaml", - "--manager-config=/etc/manager-config.yaml", - "--copy-path=/var/tmp/collector-config.yaml", - "--feature-gates=-pkg.translator.prometheus.NormalizeName" - ] + image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.18} + command: ["--config=/etc/otel-collector-config.yaml", "--manager-config=/etc/manager-config.yaml", "--copy-path=/var/tmp/collector-config.yaml", "--feature-gates=-pkg.translator.prometheus.NormalizeName"] # user: root # required for reading docker container logs volumes: - ./otel-collector-config.yaml:/etc/otel-collector-config.yaml @@ -122,7 +103,6 @@ services: condition: service_completed_successfully query-service: condition: service_healthy - logspout: image: "gliderlabs/logspout:v3.2.14" container_name: signoz-logspout diff --git a/deploy/docker/clickhouse-setup/docker-compose-minimal.yaml b/deploy/docker/clickhouse-setup/docker-compose-minimal.yaml index 7df95924f3..0c8f5fe75f 100644 --- a/deploy/docker/clickhouse-setup/docker-compose-minimal.yaml +++ b/deploy/docker/clickhouse-setup/docker-compose-minimal.yaml @@ -145,7 +145,7 @@ services: - --storage.path=/data # Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md` query-service: - image: signoz/query-service:${DOCKER_TAG:-0.65.0} + image: signoz/query-service:${DOCKER_TAG:-0.65.1} container_name: signoz-query-service command: ["-config=/root/config/prometheus.yml", "--use-logs-new-schema=true", "--use-trace-new-schema=true"] # ports: @@ -172,7 +172,7 @@ services: retries: 3 !!merge <<: *db-depend frontend: - image: signoz/frontend:${DOCKER_TAG:-0.65.0} + image: signoz/frontend:${DOCKER_TAG:-0.65.1} container_name: signoz-frontend restart: on-failure depends_on: @@ -183,7 +183,7 @@ services: volumes: - ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf otel-collector-migrator-sync: - image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.16} + image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.18} container_name: otel-migrator-sync command: - "sync" @@ -197,7 +197,7 @@ services: # clickhouse-3: # condition: service_healthy otel-collector-migrator-async: - image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.16} + image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.18} container_name: otel-migrator-async command: - "async" @@ -213,7 +213,7 @@ services: # clickhouse-3: # condition: service_healthy otel-collector: - image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.16} + image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.18} container_name: signoz-otel-collector command: ["--config=/etc/otel-collector-config.yaml", "--manager-config=/etc/manager-config.yaml", "--copy-path=/var/tmp/collector-config.yaml", "--feature-gates=-pkg.translator.prometheus.NormalizeName"] user: root # required for reading docker container logs diff --git a/deploy/docker/clickhouse-setup/docker-compose.testing.yaml b/deploy/docker/clickhouse-setup/docker-compose.testing.yaml index 5317b1a8ce..0e0e9fab66 100644 --- a/deploy/docker/clickhouse-setup/docker-compose.testing.yaml +++ b/deploy/docker/clickhouse-setup/docker-compose.testing.yaml @@ -148,7 +148,7 @@ services: - --storage.path=/data # Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md` query-service: - image: signoz/query-service:${DOCKER_TAG:-0.65.0} + image: signoz/query-service:${DOCKER_TAG:-0.65.1} container_name: signoz-query-service command: ["-config=/root/config/prometheus.yml", "-gateway-url=https://api.staging.signoz.cloud", "--use-logs-new-schema=true", "--use-trace-new-schema=true"] # ports: @@ -176,7 +176,7 @@ services: retries: 3 !!merge <<: *db-depend frontend: - image: signoz/frontend:${DOCKER_TAG:-0.65.0} + image: signoz/frontend:${DOCKER_TAG:-0.65.1} container_name: signoz-frontend restart: on-failure depends_on: @@ -187,7 +187,7 @@ services: volumes: - ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf otel-collector-migrator: - image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.16} + image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.18} container_name: otel-migrator command: - "--dsn=tcp://clickhouse:9000" @@ -199,7 +199,7 @@ services: # clickhouse-3: # condition: service_healthy otel-collector: - image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.16} + image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.18} container_name: signoz-otel-collector command: ["--config=/etc/otel-collector-config.yaml", "--manager-config=/etc/manager-config.yaml", "--copy-path=/var/tmp/collector-config.yaml", "--feature-gates=-pkg.translator.prometheus.NormalizeName"] user: root # required for reading docker container logs From 93e9d15004b2ea5bcf68a0d7c610d74aa7483e0f Mon Sep 17 00:00:00 2001 From: aniketio-ctrl Date: Sat, 28 Dec 2024 22:25:19 +0530 Subject: [PATCH 03/13] fix: removed caching for all other panel type for expression except time series (#6720) Co-authored-by: Aniket Co-authored-by: Srikanth Chekuri --- .../app/queryBuilder/query_builder.go | 3 + .../app/queryBuilder/query_builder_test.go | 81 +++++++++++++++++++ 2 files changed, 84 insertions(+) diff --git a/pkg/query-service/app/queryBuilder/query_builder.go b/pkg/query-service/app/queryBuilder/query_builder.go index e824c15ac3..9b5be469af 100644 --- a/pkg/query-service/app/queryBuilder/query_builder.go +++ b/pkg/query-service/app/queryBuilder/query_builder.go @@ -441,6 +441,9 @@ func (c *cacheKeyGenerator) GenerateKeys(params *v3.QueryRangeParamsV3) map[stri // Build keys for each expression for _, query := range params.CompositeQuery.BuilderQueries { if query.Expression != query.QueryName { + if params.Version != "v4" && params.CompositeQuery.PanelType != v3.PanelTypeGraph { + continue + } expression, _ := govaluate.NewEvaluableExpressionWithFunctions(query.Expression, EvalFuncs) if !isMetricExpression(expression, params) && !isLogExpression(expression, params) { diff --git a/pkg/query-service/app/queryBuilder/query_builder_test.go b/pkg/query-service/app/queryBuilder/query_builder_test.go index eba5a56868..36365060c3 100644 --- a/pkg/query-service/app/queryBuilder/query_builder_test.go +++ b/pkg/query-service/app/queryBuilder/query_builder_test.go @@ -1206,6 +1206,87 @@ func TestGenerateCacheKeysMetricsBuilder(t *testing.T) { "A": "source=metrics&step=60&aggregate=sum_rate&timeAggregation=&spaceAggregation=&aggregateAttribute=signoz_latency_bucket---false&filter-0=key:service_name---false,op:=,value:A&groupBy-0=service_name---false&groupBy-1=le---false&having-0=column:value,op:>,value:100", }, }, + { + name: "version=v3;panelType=value;dataSource=metrics;queryType=builder with expression", //not caching panel type value for v3 + query: &v3.QueryRangeParamsV3{ + Version: "v3", + CompositeQuery: &v3.CompositeQuery{ + QueryType: v3.QueryTypeBuilder, + PanelType: v3.PanelTypeValue, + FillGaps: false, + BuilderQueries: map[string]*v3.BuilderQuery{ + "A": { + QueryName: "A", + DataSource: v3.DataSourceMetrics, + AggregateOperator: v3.AggregateOperatorNoOp, + AggregateAttribute: v3.AttributeKey{ + Key: "system_memory_usage", + DataType: v3.AttributeKeyDataTypeFloat64, + Type: v3.AttributeKeyType("Gauge"), + IsColumn: true, + }, + TimeAggregation: v3.TimeAggregationUnspecified, + SpaceAggregation: v3.SpaceAggregationSum, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{ + Key: "state", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + IsColumn: false, + }, + Operator: v3.FilterOperatorEqual, + Value: "cached", + }, + }, + }, + Expression: "A", + Disabled: true, + StepInterval: 60, + }, + "B": { + QueryName: "B", + DataSource: v3.DataSourceMetrics, + AggregateOperator: v3.AggregateOperatorNoOp, + AggregateAttribute: v3.AttributeKey{ + Key: "system_memory_usage", + DataType: v3.AttributeKeyDataTypeFloat64, + Type: v3.AttributeKeyType("Gauge"), + IsColumn: true, + }, + TimeAggregation: v3.TimeAggregationUnspecified, + SpaceAggregation: v3.SpaceAggregationSum, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{ + Key: "state", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + IsColumn: false, + }, + Operator: v3.FilterOperatorEqual, + Value: "cached", + }, + }, + }, + Expression: "B", + Disabled: true, + StepInterval: 60, + }, + "F1": { + QueryName: "F1", + Expression: "A+B", + Disabled: false, + }, + }, + }, + }, + expectedCacheKeys: map[string]string{}, + }, { name: "version=v4;panelType=table;dataSource=metrics;queryType=builder", query: &v3.QueryRangeParamsV3{ From 196b17dd1e65b546b814405fd43f52c54071e5cf Mon Sep 17 00:00:00 2001 From: Prashant Shahi Date: Mon, 30 Dec 2024 10:19:09 +0530 Subject: [PATCH 04/13] ci(releaser): trigger charts releaser workflow on new release (#6732) ### Summary - GH workflow to trigger releaser workflow in charts repository on new SigNoz release Signed-off-by: Prashant Shahi --- ...{releaser-signoz.yaml => prereleaser.yaml} | 4 +- .github/workflows/releaser.yaml | 39 +++++++++++++++++++ 2 files changed, 41 insertions(+), 2 deletions(-) rename .github/workflows/{releaser-signoz.yaml => prereleaser.yaml} (84%) create mode 100644 .github/workflows/releaser.yaml diff --git a/.github/workflows/releaser-signoz.yaml b/.github/workflows/prereleaser.yaml similarity index 84% rename from .github/workflows/releaser-signoz.yaml rename to .github/workflows/prereleaser.yaml index 8b061b7227..c65beb3fd1 100644 --- a/.github/workflows/releaser-signoz.yaml +++ b/.github/workflows/prereleaser.yaml @@ -1,11 +1,11 @@ -name: releaser-signoz +name: prereleaser on: # schedule every wednesday 9:30 AM UTC (3pm IST) schedule: - cron: '30 9 * * 3' - # allow manual triggering of the workflow by a maintainer with no inputs + # allow manual triggering of the workflow by a maintainer workflow_dispatch: inputs: release_type: diff --git a/.github/workflows/releaser.yaml b/.github/workflows/releaser.yaml new file mode 100644 index 0000000000..5e0faefe90 --- /dev/null +++ b/.github/workflows/releaser.yaml @@ -0,0 +1,39 @@ +name: releaser + +on: + # trigger on new latest release + release: + types: [published] + +jobs: + charts: + runs-on: ubuntu-latest + steps: + - id: token + name: github-token-gen + uses: actions/create-github-app-token@v1 + with: + app-id: ${{ secrets.PRIMUS_APP_ID }} + private-key: ${{ secrets.PRIMUS_PRIVATE_KEY }} + owner: ${{ github.repository_owner }} + - name: trigger-charts-prereleaser + run: | + # Variables + repo_owner="signoz" + repo_name="charts" + event_type="prereleaser" + + # identify the release type + release_tag=${{ github.event.release.tag_name }} + patch_number=$(echo $release_tag | awk -F. '{print $3}') + release_type="minor" + if [[ $patch_number -ne 0 ]]; then + release_type="patch" + fi + + # trigger the releaser workflow in signoz/charts repo + curl -L -X POST \ + -H "Accept: application/vnd.github.v3+json" \ + -H "Authorization: Bearer ${{ steps.token.outputs.token }}" \ + "https://api.github.com/repos/${repo_owner}/${repo_name}/dispatches" \ + -d "{\"event_type\": \"$event_type\", \"client_payload\": {\"release_type\": \"$release_type\"}}" From 03fb388cd1dc3bde21f8af874c33219c76ba5b97 Mon Sep 17 00:00:00 2001 From: Eng Zer Jun Date: Mon, 30 Dec 2024 19:13:14 +0800 Subject: [PATCH 05/13] chore(deps): update `cespare/xxhash` to v2 version (#6714) --- go.mod | 3 +-- go.sum | 7 ------- pkg/query-service/utils/labels/labels.go | 2 +- 3 files changed, 2 insertions(+), 10 deletions(-) diff --git a/go.mod b/go.mod index 866498192d..5931d118f8 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/SigNoz/zap_otlp/zap_otlp_sync v0.0.0-20230822164844-1b861a431974 github.com/antonmedv/expr v1.15.3 github.com/auth0/go-jwt-middleware v1.0.1 - github.com/cespare/xxhash v1.1.0 + github.com/cespare/xxhash/v2 v2.3.0 github.com/coreos/go-oidc/v3 v3.11.0 github.com/dustin/go-humanize v1.0.1 github.com/go-co-op/gocron v1.30.1 @@ -89,7 +89,6 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect - github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dennwc/varint v1.0.0 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect diff --git a/go.sum b/go.sum index 1b0b407b45..b97fc0ff95 100644 --- a/go.sum +++ b/go.sum @@ -64,8 +64,6 @@ github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7Oputl github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd h1:Bk43AsDYe0fhkbj57eGXx8H3ZJ4zhmQXBnrW523ktj8= github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd/go.mod h1:nxRcH/OEdM8QxzH37xkGzomr1O0JpYBRS6pwjsWW6Pc= github.com/SigNoz/prometheus v1.12.0 h1:+BXeIHyMOOWWa+xjhJ+x80JFva7r1WzWIfIhQ5PUmIE= @@ -125,8 +123,6 @@ github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dR github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= @@ -714,9 +710,6 @@ github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sS github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9CzCZL03bI28W60= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= -github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= diff --git a/pkg/query-service/utils/labels/labels.go b/pkg/query-service/utils/labels/labels.go index 59f10bbb57..a04d3ce58c 100644 --- a/pkg/query-service/utils/labels/labels.go +++ b/pkg/query-service/utils/labels/labels.go @@ -7,7 +7,7 @@ import ( "strconv" "strings" - "github.com/cespare/xxhash" + "github.com/cespare/xxhash/v2" ) const sep = '\xff' From 05c9dd68ddcbec01ce7a7fdfb505665af08728f7 Mon Sep 17 00:00:00 2001 From: Vikrant Gupta Date: Tue, 31 Dec 2024 18:43:29 +0530 Subject: [PATCH 06/13] fix(ci): fix jest coverage since github workflow (#6735) --- frontend/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/package.json b/frontend/package.json index 0379ac9bff..6d6184a35d 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -21,7 +21,7 @@ "husky:configure": "cd .. && husky install frontend/.husky && cd frontend && chmod ug+x .husky/*", "commitlint": "commitlint --edit $1", "test": "jest --coverage", - "test:changedsince": "jest --changedSince=develop --coverage --silent" + "test:changedsince": "jest --changedSince=main --coverage --silent" }, "engines": { "node": ">=16.15.0" From 3e675bb9a59a36544eabbb2d9b945fe325dc2645 Mon Sep 17 00:00:00 2001 From: "primus-bot[bot]" <171087277+primus-bot[bot]@users.noreply.github.com> Date: Wed, 1 Jan 2025 15:51:52 +0530 Subject: [PATCH 07/13] chore(release): bump to v0.66.0 (#6737) #### Summary - Release SigNoz v0.66.0 - Bump SigNoz OTel Collector to v0.111.21 Created by [Primus-Bot](https://github.com/apps/primus-bot) --- .../docker-swarm/clickhouse-setup/docker-compose.yaml | 8 ++++---- .../docker/clickhouse-setup/docker-compose-core.yaml | 4 ++-- .../clickhouse-setup/docker-compose-minimal.yaml | 10 +++++----- .../clickhouse-setup/docker-compose.testing.yaml | 8 ++++---- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml b/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml index e7d56c0194..8290969be2 100644 --- a/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml +++ b/deploy/docker-swarm/clickhouse-setup/docker-compose.yaml @@ -130,7 +130,7 @@ services: restart_policy: condition: on-failure query-service: - image: signoz/query-service:0.65.1 + image: signoz/query-service:0.66.0 command: ["-config=/root/config/prometheus.yml", "--use-logs-new-schema=true", "--use-trace-new-schema=true"] # ports: # - "6060:6060" # pprof port @@ -158,7 +158,7 @@ services: condition: on-failure !!merge <<: *db-depend frontend: - image: signoz/frontend:0.65.1 + image: signoz/frontend:0.66.0 deploy: restart_policy: condition: on-failure @@ -170,7 +170,7 @@ services: volumes: - ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf otel-collector: - image: signoz/signoz-otel-collector:0.111.18 + image: signoz/signoz-otel-collector:0.111.21 command: ["--config=/etc/otel-collector-config.yaml", "--manager-config=/etc/manager-config.yaml", "--feature-gates=-pkg.translator.prometheus.NormalizeName"] user: root # required for reading docker container logs volumes: @@ -202,7 +202,7 @@ services: - otel-collector-migrator - query-service otel-collector-migrator: - image: signoz/signoz-schema-migrator:0.111.18 + image: signoz/signoz-schema-migrator:0.111.21 deploy: restart_policy: condition: on-failure diff --git a/deploy/docker/clickhouse-setup/docker-compose-core.yaml b/deploy/docker/clickhouse-setup/docker-compose-core.yaml index 3cc2dd8a35..c4864aee0e 100644 --- a/deploy/docker/clickhouse-setup/docker-compose-core.yaml +++ b/deploy/docker/clickhouse-setup/docker-compose-core.yaml @@ -57,7 +57,7 @@ services: - --queryService.url=http://query-service:8085 - --storage.path=/data otel-collector-migrator: - image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.18} + image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.21} container_name: otel-migrator command: - "sync" @@ -73,7 +73,7 @@ services: # Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md` otel-collector: container_name: signoz-otel-collector - image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.18} + image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.21} command: ["--config=/etc/otel-collector-config.yaml", "--manager-config=/etc/manager-config.yaml", "--copy-path=/var/tmp/collector-config.yaml", "--feature-gates=-pkg.translator.prometheus.NormalizeName"] # user: root # required for reading docker container logs volumes: diff --git a/deploy/docker/clickhouse-setup/docker-compose-minimal.yaml b/deploy/docker/clickhouse-setup/docker-compose-minimal.yaml index 0c8f5fe75f..a1ecb30228 100644 --- a/deploy/docker/clickhouse-setup/docker-compose-minimal.yaml +++ b/deploy/docker/clickhouse-setup/docker-compose-minimal.yaml @@ -145,7 +145,7 @@ services: - --storage.path=/data # Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md` query-service: - image: signoz/query-service:${DOCKER_TAG:-0.65.1} + image: signoz/query-service:${DOCKER_TAG:-0.66.0} container_name: signoz-query-service command: ["-config=/root/config/prometheus.yml", "--use-logs-new-schema=true", "--use-trace-new-schema=true"] # ports: @@ -172,7 +172,7 @@ services: retries: 3 !!merge <<: *db-depend frontend: - image: signoz/frontend:${DOCKER_TAG:-0.65.1} + image: signoz/frontend:${DOCKER_TAG:-0.66.0} container_name: signoz-frontend restart: on-failure depends_on: @@ -183,7 +183,7 @@ services: volumes: - ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf otel-collector-migrator-sync: - image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.18} + image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.21} container_name: otel-migrator-sync command: - "sync" @@ -197,7 +197,7 @@ services: # clickhouse-3: # condition: service_healthy otel-collector-migrator-async: - image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.18} + image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.21} container_name: otel-migrator-async command: - "async" @@ -213,7 +213,7 @@ services: # clickhouse-3: # condition: service_healthy otel-collector: - image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.18} + image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.21} container_name: signoz-otel-collector command: ["--config=/etc/otel-collector-config.yaml", "--manager-config=/etc/manager-config.yaml", "--copy-path=/var/tmp/collector-config.yaml", "--feature-gates=-pkg.translator.prometheus.NormalizeName"] user: root # required for reading docker container logs diff --git a/deploy/docker/clickhouse-setup/docker-compose.testing.yaml b/deploy/docker/clickhouse-setup/docker-compose.testing.yaml index 0e0e9fab66..1a80fae771 100644 --- a/deploy/docker/clickhouse-setup/docker-compose.testing.yaml +++ b/deploy/docker/clickhouse-setup/docker-compose.testing.yaml @@ -148,7 +148,7 @@ services: - --storage.path=/data # Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md` query-service: - image: signoz/query-service:${DOCKER_TAG:-0.65.1} + image: signoz/query-service:${DOCKER_TAG:-0.66.0} container_name: signoz-query-service command: ["-config=/root/config/prometheus.yml", "-gateway-url=https://api.staging.signoz.cloud", "--use-logs-new-schema=true", "--use-trace-new-schema=true"] # ports: @@ -176,7 +176,7 @@ services: retries: 3 !!merge <<: *db-depend frontend: - image: signoz/frontend:${DOCKER_TAG:-0.65.1} + image: signoz/frontend:${DOCKER_TAG:-0.66.0} container_name: signoz-frontend restart: on-failure depends_on: @@ -187,7 +187,7 @@ services: volumes: - ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf otel-collector-migrator: - image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.18} + image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.21} container_name: otel-migrator command: - "--dsn=tcp://clickhouse:9000" @@ -199,7 +199,7 @@ services: # clickhouse-3: # condition: service_healthy otel-collector: - image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.18} + image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.21} container_name: signoz-otel-collector command: ["--config=/etc/otel-collector-config.yaml", "--manager-config=/etc/manager-config.yaml", "--copy-path=/var/tmp/collector-config.yaml", "--feature-gates=-pkg.translator.prometheus.NormalizeName"] user: root # required for reading docker container logs From 28d27bc5c17fe6c5b51ac5c9a78d5ae0729b2389 Mon Sep 17 00:00:00 2001 From: Vikrant Gupta Date: Thu, 2 Jan 2025 13:30:31 +0530 Subject: [PATCH 08/13] fix(frontend): do not use redirects outside the react router (#6739) * fix(frontend): use history.replace to something went wrong instead of redirects * fix(frontend): update the something went wrong page to error boundary fallback --- frontend/src/AppRoutes/Private.tsx | 2 +- frontend/src/AppRoutes/index.tsx | 15 +++++++++++---- frontend/src/AppRoutes/pageComponents.ts | 2 +- .../TopNav/DateTimeSelectionV2/config.ts | 1 + 4 files changed, 14 insertions(+), 6 deletions(-) diff --git a/frontend/src/AppRoutes/Private.tsx b/frontend/src/AppRoutes/Private.tsx index 35c4f11e20..ab46d96a8e 100644 --- a/frontend/src/AppRoutes/Private.tsx +++ b/frontend/src/AppRoutes/Private.tsx @@ -189,7 +189,7 @@ function PrivateRoute({ children }: PrivateRouteProps): JSX.Element { if (fromPathname) { history.push(fromPathname); setLocalStorageApi(LOCALSTORAGE.UNAUTHENTICATED_ROUTE_HIT, ''); - } else { + } else if (pathname !== ROUTES.SOMETHING_WENT_WRONG) { history.push(ROUTES.APPLICATION); } } else { diff --git a/frontend/src/AppRoutes/index.tsx b/frontend/src/AppRoutes/index.tsx index cd77215682..5875a31d87 100644 --- a/frontend/src/AppRoutes/index.tsx +++ b/frontend/src/AppRoutes/index.tsx @@ -22,7 +22,7 @@ import { IUser } from 'providers/App/types'; import { DashboardProvider } from 'providers/Dashboard/Dashboard'; import { QueryBuilderProvider } from 'providers/QueryBuilder'; import { Suspense, useCallback, useEffect, useState } from 'react'; -import { Redirect, Route, Router, Switch } from 'react-router-dom'; +import { Route, Router, Switch } from 'react-router-dom'; import { CompatRouter } from 'react-router-dom-v5-compat'; import { extractDomain, isCloudUser, isEECloudUser } from 'utils/app'; @@ -240,12 +240,19 @@ function App(): JSX.Element { // if the required calls fails then return a something went wrong error // this needs to be on top of data missing error because if there is an error, data will never be loaded and it will // move to indefinitive loading - if (userFetchError || licensesFetchError) { - return ; + if ( + (userFetchError || licensesFetchError) && + pathname !== ROUTES.SOMETHING_WENT_WRONG + ) { + history.replace(ROUTES.SOMETHING_WENT_WRONG); } // if all of the data is not set then return a spinner, this is required because there is some gap between loading states and data setting - if (!licenses || !user.email || !featureFlags) { + if ( + (!licenses || !user.email || !featureFlags) && + !userFetchError && + !licensesFetchError + ) { return ; } } diff --git a/frontend/src/AppRoutes/pageComponents.ts b/frontend/src/AppRoutes/pageComponents.ts index e623357ab5..fb63e865cb 100644 --- a/frontend/src/AppRoutes/pageComponents.ts +++ b/frontend/src/AppRoutes/pageComponents.ts @@ -180,7 +180,7 @@ export const PasswordReset = Loadable( export const SomethingWentWrong = Loadable( () => import( - /* webpackChunkName: "SomethingWentWrong" */ 'pages/SomethingWentWrong' + /* webpackChunkName: "ErrorBoundaryFallback" */ 'pages/ErrorBoundaryFallback/ErrorBoundaryFallback' ), ); diff --git a/frontend/src/container/TopNav/DateTimeSelectionV2/config.ts b/frontend/src/container/TopNav/DateTimeSelectionV2/config.ts index 408ed6c11e..a84e1b2c7e 100644 --- a/frontend/src/container/TopNav/DateTimeSelectionV2/config.ts +++ b/frontend/src/container/TopNav/DateTimeSelectionV2/config.ts @@ -214,6 +214,7 @@ export const routesToSkip = [ ROUTES.MESSAGING_QUEUES, ROUTES.MESSAGING_QUEUES_DETAIL, ROUTES.INFRASTRUCTURE_MONITORING_HOSTS, + ROUTES.SOMETHING_WENT_WRONG, ]; export const routesToDisable = [ROUTES.LOGS_EXPLORER, ROUTES.LIVE_LOGS]; From dad72dd2950919d1d54716507b8bbe0546de6f04 Mon Sep 17 00:00:00 2001 From: SagarRajput-7 <162284829+SagarRajput-7@users.noreply.github.com> Date: Thu, 2 Jan 2025 15:45:01 +0530 Subject: [PATCH 09/13] feat: updated the logic for variable update queue (#6586) * feat: updated the logic for variable update queue * feat: added API limiting to reduce unnecessary api call for dashboard variables (#6609) * feat: added API limiting to reduce unneccesary api call for dashboard variables * feat: fixed dropdown open triggering the api calls for single-select and misc * feat: add jest test cases for new logic's utils, functions and processors - dashboardVariables (#6621) * feat: added API limiting to reduce unneccesary api call for dashboard variables * feat: fixed dropdown open triggering the api calls for single-select and misc * feat: add jest test cases for new logic's utils, functions and processors - dashboardVariables * feat: added test for checkAPIInvocation * feat: refactor code * feat: added more test on graph utilities * feat: resolved comments and removed mount related handlings * feat: fixed test cases and added multiple variable formats --------- Co-authored-by: Srikanth Chekuri --- .../DashboardVariableSelection.tsx | 86 ++++-- .../VariableItem.test.tsx | 30 +++ .../VariableItem.tsx | 148 +++++------ .../__test__/dashboardVariables.test.tsx | 241 +++++++++++++++++ .../__test__/mock.ts | 251 ++++++++++++++++++ .../DashboardVariablesSelection/util.ts | 176 ++++++++++++ 6 files changed, 825 insertions(+), 107 deletions(-) create mode 100644 frontend/src/container/NewDashboard/DashboardVariablesSelection/__test__/dashboardVariables.test.tsx create mode 100644 frontend/src/container/NewDashboard/DashboardVariablesSelection/__test__/mock.ts diff --git a/frontend/src/container/NewDashboard/DashboardVariablesSelection/DashboardVariableSelection.tsx b/frontend/src/container/NewDashboard/DashboardVariablesSelection/DashboardVariableSelection.tsx index 813185e0b6..5a3c48ad46 100644 --- a/frontend/src/container/NewDashboard/DashboardVariablesSelection/DashboardVariableSelection.tsx +++ b/frontend/src/container/NewDashboard/DashboardVariablesSelection/DashboardVariableSelection.tsx @@ -1,9 +1,19 @@ import { Row } from 'antd'; -import { isNull } from 'lodash-es'; +import { isEmpty } from 'lodash-es'; import { useDashboard } from 'providers/Dashboard/Dashboard'; import { memo, useEffect, useState } from 'react'; +import { useSelector } from 'react-redux'; +import { AppState } from 'store/reducers'; import { IDashboardVariable } from 'types/api/dashboard/getAll'; - +import { GlobalReducer } from 'types/reducer/globalTime'; + +import { + buildDependencies, + buildDependencyGraph, + buildParentDependencyGraph, + IDependencyData, + onUpdateVariableNode, +} from './util'; import VariableItem from './VariableItem'; function DashboardVariableSelection(): JSX.Element | null { @@ -21,6 +31,14 @@ function DashboardVariableSelection(): JSX.Element | null { const [variablesTableData, setVariablesTableData] = useState([]); + const [dependencyData, setDependencyData] = useState( + null, + ); + + const { maxTime, minTime } = useSelector( + (state) => state.globalTime, + ); + useEffect(() => { if (variables) { const tableRowData = []; @@ -43,35 +61,37 @@ function DashboardVariableSelection(): JSX.Element | null { } }, [variables]); - const onVarChanged = (name: string): void => { - /** - * this function takes care of adding the dependent variables to current update queue and removing - * the updated variable name from the queue - */ - const dependentVariables = variablesTableData - ?.map((variable: any) => { - if (variable.type === 'QUERY') { - const re = new RegExp(`\\{\\{\\s*?\\.${name}\\s*?\\}\\}`); // regex for `{{.var}}` - const queryValue = variable.queryValue || ''; - const dependVarReMatch = queryValue.match(re); - if (dependVarReMatch !== null && dependVarReMatch.length > 0) { - return variable.name; - } - } - return null; - }) - .filter((val: string | null) => !isNull(val)); - setVariablesToGetUpdated((prev) => [ - ...prev.filter((v) => v !== name), - ...dependentVariables, - ]); - }; + useEffect(() => { + if (variablesTableData.length > 0) { + const depGrp = buildDependencies(variablesTableData); + const { order, graph } = buildDependencyGraph(depGrp); + const parentDependencyGraph = buildParentDependencyGraph(graph); + setDependencyData({ + order, + graph, + parentDependencyGraph, + }); + } + }, [setVariablesToGetUpdated, variables, variablesTableData]); + + // this handles the case where the dependency order changes i.e. variable list updated via creation or deletion etc. and we need to refetch the variables + // also trigger when the global time changes + useEffect( + () => { + if (!isEmpty(dependencyData?.order)) { + setVariablesToGetUpdated(dependencyData?.order || []); + } + }, + // eslint-disable-next-line react-hooks/exhaustive-deps + [JSON.stringify(dependencyData?.order), minTime, maxTime], + ); const onValueUpdate = ( name: string, id: string, value: IDashboardVariable['selectedValue'], allSelected: boolean, + // isMountedCall?: boolean, // eslint-disable-next-line sonarjs/cognitive-complexity ): void => { if (id) { @@ -111,7 +131,20 @@ function DashboardVariableSelection(): JSX.Element | null { }); } - onVarChanged(name); + if (dependencyData) { + const updatedVariables: string[] = []; + onUpdateVariableNode( + name, + dependencyData.graph, + dependencyData.order, + (node) => updatedVariables.push(node), + ); + setVariablesToGetUpdated((prev) => [ + ...new Set([...prev, ...updatedVariables.filter((v) => v !== name)]), + ]); + } else { + setVariablesToGetUpdated((prev) => prev.filter((v) => v !== name)); + } } }; @@ -139,6 +172,7 @@ function DashboardVariableSelection(): JSX.Element | null { onValueUpdate={onValueUpdate} variablesToGetUpdated={variablesToGetUpdated} setVariablesToGetUpdated={setVariablesToGetUpdated} + dependencyData={dependencyData} /> ))} diff --git a/frontend/src/container/NewDashboard/DashboardVariablesSelection/VariableItem.test.tsx b/frontend/src/container/NewDashboard/DashboardVariablesSelection/VariableItem.test.tsx index 1cb89d6b95..823cf53923 100644 --- a/frontend/src/container/NewDashboard/DashboardVariablesSelection/VariableItem.test.tsx +++ b/frontend/src/container/NewDashboard/DashboardVariablesSelection/VariableItem.test.tsx @@ -49,6 +49,11 @@ describe('VariableItem', () => { onValueUpdate={mockOnValueUpdate} variablesToGetUpdated={[]} setVariablesToGetUpdated={(): void => {}} + dependencyData={{ + order: [], + graph: {}, + parentDependencyGraph: {}, + }} /> , ); @@ -65,6 +70,11 @@ describe('VariableItem', () => { onValueUpdate={mockOnValueUpdate} variablesToGetUpdated={[]} setVariablesToGetUpdated={(): void => {}} + dependencyData={{ + order: [], + graph: {}, + parentDependencyGraph: {}, + }} /> , ); @@ -80,6 +90,11 @@ describe('VariableItem', () => { onValueUpdate={mockOnValueUpdate} variablesToGetUpdated={[]} setVariablesToGetUpdated={(): void => {}} + dependencyData={{ + order: [], + graph: {}, + parentDependencyGraph: {}, + }} /> , ); @@ -109,6 +124,11 @@ describe('VariableItem', () => { onValueUpdate={mockOnValueUpdate} variablesToGetUpdated={[]} setVariablesToGetUpdated={(): void => {}} + dependencyData={{ + order: [], + graph: {}, + parentDependencyGraph: {}, + }} /> , ); @@ -133,6 +153,11 @@ describe('VariableItem', () => { onValueUpdate={mockOnValueUpdate} variablesToGetUpdated={[]} setVariablesToGetUpdated={(): void => {}} + dependencyData={{ + order: [], + graph: {}, + parentDependencyGraph: {}, + }} /> , ); @@ -149,6 +174,11 @@ describe('VariableItem', () => { onValueUpdate={mockOnValueUpdate} variablesToGetUpdated={[]} setVariablesToGetUpdated={(): void => {}} + dependencyData={{ + order: [], + graph: {}, + parentDependencyGraph: {}, + }} /> , ); diff --git a/frontend/src/container/NewDashboard/DashboardVariablesSelection/VariableItem.tsx b/frontend/src/container/NewDashboard/DashboardVariablesSelection/VariableItem.tsx index 398ade8259..7e6c050653 100644 --- a/frontend/src/container/NewDashboard/DashboardVariablesSelection/VariableItem.tsx +++ b/frontend/src/container/NewDashboard/DashboardVariablesSelection/VariableItem.tsx @@ -35,12 +35,10 @@ import { popupContainer } from 'utils/selectPopupContainer'; import { variablePropsToPayloadVariables } from '../utils'; import { SelectItemStyle } from './styles'; -import { areArraysEqual } from './util'; +import { areArraysEqual, checkAPIInvocation, IDependencyData } from './util'; const ALL_SELECT_VALUE = '__ALL__'; -const variableRegexPattern = /\{\{\s*?\.([^\s}]+)\s*?\}\}/g; - enum ToggleTagValue { Only = 'Only', All = 'All', @@ -57,6 +55,7 @@ interface VariableItemProps { ) => void; variablesToGetUpdated: string[]; setVariablesToGetUpdated: React.Dispatch>; + dependencyData: IDependencyData | null; } const getSelectValue = ( @@ -79,6 +78,7 @@ function VariableItem({ onValueUpdate, variablesToGetUpdated, setVariablesToGetUpdated, + dependencyData, }: VariableItemProps): JSX.Element { const [optionsData, setOptionsData] = useState<(string | number | boolean)[]>( [], @@ -88,60 +88,20 @@ function VariableItem({ (state) => state.globalTime, ); - useEffect(() => { - if (variableData.allSelected && variableData.type === 'QUERY') { - setVariablesToGetUpdated((prev) => { - const variablesQueue = [...prev.filter((v) => v !== variableData.name)]; - if (variableData.name) { - variablesQueue.push(variableData.name); - } - return variablesQueue; - }); + const validVariableUpdate = (): boolean => { + if (!variableData.name) { + return false; } - // eslint-disable-next-line react-hooks/exhaustive-deps - }, [minTime, maxTime]); - const [errorMessage, setErrorMessage] = useState(null); - - const getDependentVariables = (queryValue: string): string[] => { - const matches = queryValue.match(variableRegexPattern); - - // Extract variable names from the matches array without {{ . }} - return matches - ? matches.map((match) => match.replace(variableRegexPattern, '$1')) - : []; - }; - - const getQueryKey = (variableData: IDashboardVariable): string[] => { - let dependentVariablesStr = ''; - - const dependentVariables = getDependentVariables( - variableData.queryValue || '', + // variableData.name is present as the top element or next in the queue - variablesToGetUpdated + return Boolean( + variablesToGetUpdated.length && + variablesToGetUpdated[0] === variableData.name, ); - - const variableName = variableData.name || ''; - - dependentVariables?.forEach((element) => { - const [, variable] = - Object.entries(existingVariables).find( - ([, value]) => value.name === element, - ) || []; - - dependentVariablesStr += `${element}${variable?.selectedValue}`; - }); - - const variableKey = dependentVariablesStr.replace(/\s/g, ''); - - // added this time dependency for variables query as API respects the passed time range now - return [ - REACT_QUERY_KEY.DASHBOARD_BY_ID, - variableName, - variableKey, - `${minTime}`, - `${maxTime}`, - ]; }; + const [errorMessage, setErrorMessage] = useState(null); + // eslint-disable-next-line sonarjs/cognitive-complexity const getOptions = (variablesRes: VariableResponseProps | null): void => { if (variablesRes && variableData.type === 'QUERY') { @@ -184,9 +144,7 @@ function VariableItem({ if ( variableData.type === 'QUERY' && variableData.name && - (variablesToGetUpdated.includes(variableData.name) || - valueNotInList || - variableData.allSelected) + (validVariableUpdate() || valueNotInList || variableData.allSelected) ) { let value = variableData.selectedValue; let allSelected = false; @@ -224,36 +182,64 @@ function VariableItem({ } }; - const { isLoading } = useQuery(getQueryKey(variableData), { - enabled: variableData && variableData.type === 'QUERY', - queryFn: () => - dashboardVariablesQuery({ - query: variableData.queryValue || '', - variables: variablePropsToPayloadVariables(existingVariables), - }), - refetchOnWindowFocus: false, - onSuccess: (response) => { - getOptions(response.payload); - }, - onError: (error: { - details: { - error: string; - }; - }) => { - const { details } = error; - - if (details.error) { - let message = details.error; - if (details.error.includes('Syntax error:')) { - message = - 'Please make sure query is valid and dependent variables are selected'; + const { isLoading } = useQuery( + [ + REACT_QUERY_KEY.DASHBOARD_BY_ID, + variableData.name || '', + `${minTime}`, + `${maxTime}`, + JSON.stringify(dependencyData?.order), + ], + { + enabled: + variableData && + variableData.type === 'QUERY' && + checkAPIInvocation( + variablesToGetUpdated, + variableData, + dependencyData?.parentDependencyGraph, + ), + queryFn: () => + dashboardVariablesQuery({ + query: variableData.queryValue || '', + variables: variablePropsToPayloadVariables(existingVariables), + }), + refetchOnWindowFocus: false, + onSuccess: (response) => { + getOptions(response.payload); + setVariablesToGetUpdated((prev) => + prev.filter((v) => v !== variableData.name), + ); + }, + onError: (error: { + details: { + error: string; + }; + }) => { + const { details } = error; + + if (details.error) { + let message = details.error; + if (details.error.includes('Syntax error:')) { + message = + 'Please make sure query is valid and dependent variables are selected'; + } + setErrorMessage(message); } - setErrorMessage(message); - } + }, }, - }); + ); const handleChange = (value: string | string[]): void => { + // if value is equal to selected value then return + if ( + value === variableData.selectedValue || + (Array.isArray(value) && + Array.isArray(variableData.selectedValue) && + areArraysEqual(value, variableData.selectedValue)) + ) { + return; + } if (variableData.name) { if ( value === ALL_SELECT_VALUE || diff --git a/frontend/src/container/NewDashboard/DashboardVariablesSelection/__test__/dashboardVariables.test.tsx b/frontend/src/container/NewDashboard/DashboardVariablesSelection/__test__/dashboardVariables.test.tsx new file mode 100644 index 0000000000..0add4c5cad --- /dev/null +++ b/frontend/src/container/NewDashboard/DashboardVariablesSelection/__test__/dashboardVariables.test.tsx @@ -0,0 +1,241 @@ +import { + buildDependencies, + buildDependencyGraph, + buildParentDependencyGraph, + checkAPIInvocation, + onUpdateVariableNode, + VariableGraph, +} from '../util'; +import { + buildDependenciesMock, + buildGraphMock, + checkAPIInvocationMock, + onUpdateVariableNodeMock, +} from './mock'; + +describe('dashboardVariables - utilities and processors', () => { + describe('onUpdateVariableNode', () => { + const { graph, topologicalOrder } = onUpdateVariableNodeMock; + const testCases = [ + { + scenario: 'root element', + nodeToUpdate: 'deployment_environment', + expected: [ + 'deployment_environment', + 'service_name', + 'endpoint', + 'http_status_code', + ], + }, + { + scenario: 'middle child', + nodeToUpdate: 'k8s_node_name', + expected: ['k8s_node_name', 'k8s_namespace_name'], + }, + { + scenario: 'leaf element', + nodeToUpdate: 'http_status_code', + expected: ['http_status_code'], + }, + { + scenario: 'node not in graph', + nodeToUpdate: 'unknown', + expected: [], + }, + { + scenario: 'node not in topological order', + nodeToUpdate: 'unknown', + expected: [], + }, + ]; + + test.each(testCases)( + 'should update variable node when $scenario', + ({ nodeToUpdate, expected }) => { + const updatedVariables: string[] = []; + const callback = (node: string): void => { + updatedVariables.push(node); + }; + + onUpdateVariableNode(nodeToUpdate, graph, topologicalOrder, callback); + + expect(updatedVariables).toEqual(expected); + }, + ); + + it('should return empty array when topological order is empty', () => { + const updatedVariables: string[] = []; + onUpdateVariableNode('http_status_code', graph, [], (node) => + updatedVariables.push(node), + ); + expect(updatedVariables).toEqual([]); + }); + }); + + describe('checkAPIInvocation', () => { + const { + variablesToGetUpdated, + variableData, + parentDependencyGraph, + } = checkAPIInvocationMock; + + const mockRootElement = { + name: 'deployment_environment', + key: '036a47cd-9ffc-47de-9f27-0329198964a8', + id: '036a47cd-9ffc-47de-9f27-0329198964a8', + modificationUUID: '5f71b591-f583-497c-839d-6a1590c3f60f', + selectedValue: 'production', + type: 'QUERY', + // ... other properties omitted for brevity + } as any; + + describe('edge cases', () => { + it('should return false when variableData is empty', () => { + expect( + checkAPIInvocation( + variablesToGetUpdated, + variableData, + parentDependencyGraph, + ), + ).toBeFalsy(); + }); + + it('should return true when parentDependencyGraph is empty', () => { + expect( + checkAPIInvocation(variablesToGetUpdated, variableData, {}), + ).toBeFalsy(); + }); + }); + + describe('variable sequences', () => { + it('should return true for valid sequence', () => { + expect( + checkAPIInvocation( + ['k8s_node_name', 'k8s_namespace_name'], + variableData, + parentDependencyGraph, + ), + ).toBeTruthy(); + }); + + it('should return false for invalid sequence', () => { + expect( + checkAPIInvocation( + ['k8s_cluster_name', 'k8s_node_name', 'k8s_namespace_name'], + variableData, + parentDependencyGraph, + ), + ).toBeFalsy(); + }); + + it('should return false when variableData is not in sequence', () => { + expect( + checkAPIInvocation( + ['deployment_environment', 'service_name', 'endpoint'], + variableData, + parentDependencyGraph, + ), + ).toBeFalsy(); + }); + }); + + describe('root element behavior', () => { + it('should return true for valid root element sequence', () => { + expect( + checkAPIInvocation( + [ + 'deployment_environment', + 'service_name', + 'endpoint', + 'http_status_code', + ], + mockRootElement, + parentDependencyGraph, + ), + ).toBeTruthy(); + }); + + it('should return true for empty variablesToGetUpdated array', () => { + expect( + checkAPIInvocation([], mockRootElement, parentDependencyGraph), + ).toBeTruthy(); + }); + }); + }); + + describe('Graph Building Utilities', () => { + const { graph } = buildGraphMock; + const { variables } = buildDependenciesMock; + + describe('buildParentDependencyGraph', () => { + it('should build parent dependency graph with correct relationships', () => { + const expected = { + deployment_environment: [], + service_name: ['deployment_environment'], + endpoint: ['deployment_environment', 'service_name'], + http_status_code: ['endpoint'], + k8s_cluster_name: [], + k8s_node_name: ['k8s_cluster_name'], + k8s_namespace_name: ['k8s_cluster_name', 'k8s_node_name'], + environment: [], + }; + + expect(buildParentDependencyGraph(graph)).toEqual(expected); + }); + + it('should handle empty graph', () => { + expect(buildParentDependencyGraph({})).toEqual({}); + }); + }); + + describe('buildDependencyGraph', () => { + it('should build complete dependency graph with correct structure and order', () => { + const expected = { + graph: { + deployment_environment: ['service_name', 'endpoint'], + service_name: ['endpoint'], + endpoint: ['http_status_code'], + http_status_code: [], + k8s_cluster_name: ['k8s_node_name', 'k8s_namespace_name'], + k8s_node_name: ['k8s_namespace_name'], + k8s_namespace_name: [], + environment: [], + }, + order: [ + 'deployment_environment', + 'k8s_cluster_name', + 'environment', + 'service_name', + 'k8s_node_name', + 'endpoint', + 'k8s_namespace_name', + 'http_status_code', + ], + }; + + expect(buildDependencyGraph(graph)).toEqual(expected); + }); + }); + + describe('buildDependencies', () => { + it('should build dependency map from variables array', () => { + const expected: VariableGraph = { + deployment_environment: ['service_name', 'endpoint'], + service_name: ['endpoint'], + endpoint: ['http_status_code'], + http_status_code: [], + k8s_cluster_name: ['k8s_node_name', 'k8s_namespace_name'], + k8s_node_name: ['k8s_namespace_name'], + k8s_namespace_name: [], + environment: [], + }; + + expect(buildDependencies(variables)).toEqual(expected); + }); + + it('should handle empty variables array', () => { + expect(buildDependencies([])).toEqual({}); + }); + }); + }); +}); diff --git a/frontend/src/container/NewDashboard/DashboardVariablesSelection/__test__/mock.ts b/frontend/src/container/NewDashboard/DashboardVariablesSelection/__test__/mock.ts new file mode 100644 index 0000000000..c39841fcf4 --- /dev/null +++ b/frontend/src/container/NewDashboard/DashboardVariablesSelection/__test__/mock.ts @@ -0,0 +1,251 @@ +/* eslint-disable sonarjs/no-duplicate-string */ +export const checkAPIInvocationMock = { + variablesToGetUpdated: [], + variableData: { + name: 'k8s_node_name', + key: '4d71d385-beaf-4434-8dbf-c62be68049fc', + allSelected: false, + customValue: '', + description: '', + id: '4d71d385-beaf-4434-8dbf-c62be68049fc', + modificationUUID: '77233d3c-96d7-4ccb-aa9d-11b04d563068', + multiSelect: false, + order: 6, + queryValue: + "SELECT JSONExtractString(labels, 'k8s_node_name') AS k8s_node_name\nFROM signoz_metrics.distributed_time_series_v4_1day\nWHERE metric_name = 'k8s_node_cpu_time' AND JSONExtractString(labels, 'k8s_cluster_name') = {{.k8s_cluster_name}}\nGROUP BY k8s_node_name", + selectedValue: 'gke-signoz-saas-si-consumer-bsc-e2sd4-a6d430fa-gvm2', + showALLOption: false, + sort: 'DISABLED', + textboxValue: '', + type: 'QUERY', + }, + parentDependencyGraph: { + deployment_environment: [], + service_name: ['deployment_environment'], + endpoint: ['deployment_environment', 'service_name'], + http_status_code: ['endpoint'], + k8s_cluster_name: [], + environment: [], + k8s_node_name: ['k8s_cluster_name'], + k8s_namespace_name: ['k8s_cluster_name', 'k8s_node_name'], + }, +} as any; + +export const onUpdateVariableNodeMock = { + nodeToUpdate: 'deployment_environment', + graph: { + deployment_environment: ['service_name', 'endpoint'], + service_name: ['endpoint'], + endpoint: ['http_status_code'], + http_status_code: [], + k8s_cluster_name: ['k8s_node_name', 'k8s_namespace_name'], + environment: [], + k8s_node_name: ['k8s_namespace_name'], + k8s_namespace_name: [], + }, + topologicalOrder: [ + 'deployment_environment', + 'k8s_cluster_name', + 'environment', + 'service_name', + 'k8s_node_name', + 'endpoint', + 'k8s_namespace_name', + 'http_status_code', + ], + callback: jest.fn(), +}; + +export const buildGraphMock = { + graph: { + deployment_environment: ['service_name', 'endpoint'], + service_name: ['endpoint'], + endpoint: ['http_status_code'], + http_status_code: [], + k8s_cluster_name: ['k8s_node_name', 'k8s_namespace_name'], + environment: [], + k8s_node_name: ['k8s_namespace_name'], + k8s_namespace_name: [], + }, +}; + +export const buildDependenciesMock = { + variables: [ + { + key: '036a47cd-9ffc-47de-9f27-0329198964a8', + name: 'deployment_environment', + allSelected: false, + customValue: '', + description: '', + id: '036a47cd-9ffc-47de-9f27-0329198964a8', + modificationUUID: '5f71b591-f583-497c-839d-6a1590c3f60f', + multiSelect: false, + order: 0, + queryValue: + "SELECT DISTINCT JSONExtractString(labels, 'deployment_environment') AS deployment_environment\nFROM signoz_metrics.distributed_time_series_v4_1day\nWHERE metric_name = 'signoz_calls_total'", + selectedValue: 'production', + showALLOption: false, + sort: 'DISABLED', + textboxValue: '', + type: 'QUERY', + }, + { + key: 'eed5c917-1860-4c7e-bf6d-a05b97bafbc9', + name: 'service_name', + allSelected: true, + customValue: '', + description: '', + id: 'eed5c917-1860-4c7e-bf6d-a05b97bafbc9', + modificationUUID: '85db928b-ac9b-4e9f-b274-791112102fdf', + multiSelect: true, + order: 1, + queryValue: + "SELECT DISTINCT JSONExtractString(labels, 'service_name') FROM signoz_metrics.distributed_time_series_v4_1day\n WHERE metric_name = 'signoz_calls_total' and JSONExtractString(labels, 'deployment_environment') = {{.deployment_environment}}", + selectedValue: ['otelgateway'], + showALLOption: true, + sort: 'ASC', + textboxValue: '', + type: 'QUERY', + }, + { + key: '4022d3c1-e845-4952-8984-78f25f575c7a', + name: 'endpoint', + allSelected: true, + customValue: '', + description: '', + id: '4022d3c1-e845-4952-8984-78f25f575c7a', + modificationUUID: 'c0107fa1-ebb7-4dd3-aa9d-6ba08ecc594d', + multiSelect: true, + order: 2, + queryValue: + "SELECT DISTINCT JSONExtractString(labels, 'operation') FROM signoz_metrics.distributed_time_series_v4_1day\n WHERE metric_name = 'signoz_calls_total' AND JSONExtractString(labels, 'service_name') IN {{.service_name}} and JSONExtractString(labels, 'deployment_environment') = {{.deployment_environment}}", + selectedValue: [ + '//v1/traces', + '/logs/heroku', + '/logs/json', + '/logs/vector', + '/v1/logs', + '/v1/metrics', + '/v1/traces', + 'SELECT', + 'exporter/signozkafka/logs', + 'exporter/signozkafka/metrics', + 'exporter/signozkafka/traces', + 'extension/signozkeyauth/Authenticate', + 'get', + 'hmget', + 'opentelemetry.proto.collector.logs.v1.LogsService/Export', + 'opentelemetry.proto.collector.metrics.v1.MetricsService/Export', + 'opentelemetry.proto.collector.trace.v1.TraceService/Export', + 'processor/signozlimiter/LogsProcessed', + 'processor/signozlimiter/MetricsProcessed', + 'processor/signozlimiter/TracesProcessed', + 'receiver/otlp/LogsReceived', + 'receiver/otlp/MetricsReceived', + 'receiver/otlp/TraceDataReceived', + 'receiver/signozhttplog/heroku/LogsReceived', + 'receiver/signozhttplog/json/LogsReceived', + 'receiver/signozhttplog/vector/LogsReceived', + 'redis.dial', + 'redis.pipeline eval', + 'sadd', + 'set', + 'sismember', + ], + showALLOption: true, + sort: 'ASC', + textboxValue: '', + type: 'QUERY', + }, + { + key: '5e8a3cd9-3cd9-42df-a76c-79471a0f75bd', + name: 'http_status_code', + customValue: '', + description: '', + id: '5e8a3cd9-3cd9-42df-a76c-79471a0f75bd', + modificationUUID: '9a4021cc-a80a-4f15-8899-78892b763ca7', + multiSelect: true, + order: 3, + queryValue: + "SELECT DISTINCT JSONExtractString(labels, 'http_status_code') FROM signoz_metrics.distributed_time_series_v4_1day\n WHERE metric_name = 'signoz_calls_total' AND JSONExtractString(labels, 'operation') IN {{.endpoint}}", + showALLOption: true, + sort: 'ASC', + textboxValue: '', + type: 'QUERY', + selectedValue: ['', '200', '301', '400', '401', '405', '415', '429'], + allSelected: true, + }, + { + key: '48e9aa64-05ca-41c2-a1bd-6c8aeca659f1', + name: 'k8s_cluster_name', + allSelected: false, + customValue: 'test-1,\ntest-2,\ntest-3', + description: '', + id: '48e9aa64-05ca-41c2-a1bd-6c8aeca659f1', + modificationUUID: '44722322-368c-4613-bb7f-d0b12867d57a', + multiSelect: false, + order: 4, + queryValue: + "SELECT JSONExtractString(labels, 'k8s_cluster_name') AS k8s_cluster_name\nFROM signoz_metrics.distributed_time_series_v4_1day\nWHERE metric_name = 'k8s_node_cpu_time'\nGROUP BY k8s_cluster_name", + selectedValue: 'saasmonitor-cluster', + showALLOption: false, + sort: 'DISABLED', + textboxValue: '', + type: 'QUERY', + }, + { + key: '3ea18ba2-30cf-4220-b03b-720b5eaf35f8', + name: 'environment', + allSelected: false, + customValue: '', + description: '', + id: '3ea18ba2-30cf-4220-b03b-720b5eaf35f8', + modificationUUID: '9f76cb06-1b9f-460f-a174-0b210bb3cf93', + multiSelect: false, + order: 5, + queryValue: + "SELECT DISTINCT JSONExtractString(labels, 'deployment_environment') AS environment\nFROM signoz_metrics.distributed_time_series_v4_1day\nWHERE metric_name = 'signoz_calls_total'", + selectedValue: 'production', + showALLOption: false, + sort: 'DISABLED', + textboxValue: '', + type: 'QUERY', + }, + { + key: '4d71d385-beaf-4434-8dbf-c62be68049fc', + name: 'k8s_node_name', + allSelected: false, + customValue: '', + description: '', + id: '4d71d385-beaf-4434-8dbf-c62be68049fc', + modificationUUID: '77233d3c-96d7-4ccb-aa9d-11b04d563068', + multiSelect: false, + order: 6, + queryValue: + "SELECT JSONExtractString(labels, 'k8s_node_name') AS k8s_node_name\nFROM signoz_metrics.distributed_time_series_v4_1day\nWHERE metric_name = 'k8s_node_cpu_time' AND JSONExtractString(labels, 'k8s_cluster_name') = {{.k8s_cluster_name}}\nGROUP BY k8s_node_name", + selectedValue: 'gke-signoz-saas-si-consumer-bsc-e2sd4-a6d430fa-gvm2', + showALLOption: false, + sort: 'DISABLED', + textboxValue: '', + type: 'QUERY', + }, + { + key: '937ecbae-b24b-4d6d-8cc4-5d5b8d53569b', + name: 'k8s_namespace_name', + customValue: '', + description: '', + id: '937ecbae-b24b-4d6d-8cc4-5d5b8d53569b', + modificationUUID: '8ad2442d-8b4d-4c64-848e-af847d1d0eec', + multiSelect: false, + order: 7, + queryValue: + "SELECT JSONExtractString(labels, 'k8s_namespace_name') AS k8s_namespace_name\nFROM signoz_metrics.distributed_time_series_v4_1day\nWHERE metric_name = 'k8s_pod_cpu_time' AND JSONExtractString(labels, 'k8s_cluster_name') = {{.k8s_cluster_name}} AND JSONExtractString(labels, 'k8s_node_name') IN {{.k8s_node_name}}\nGROUP BY k8s_namespace_name", + showALLOption: false, + sort: 'DISABLED', + textboxValue: '', + type: 'QUERY', + selectedValue: 'saasmonitor', + allSelected: false, + }, + ] as any, +}; diff --git a/frontend/src/container/NewDashboard/DashboardVariablesSelection/util.ts b/frontend/src/container/NewDashboard/DashboardVariablesSelection/util.ts index a3fe59ccd8..03c6f2c585 100644 --- a/frontend/src/container/NewDashboard/DashboardVariablesSelection/util.ts +++ b/frontend/src/container/NewDashboard/DashboardVariablesSelection/util.ts @@ -1,3 +1,4 @@ +import { isEmpty } from 'lodash-es'; import { Dashboard, IDashboardVariable } from 'types/api/dashboard/getAll'; export function areArraysEqual( @@ -29,3 +30,178 @@ export const convertVariablesToDbFormat = ( result[id] = obj; return result; }, {}); + +const getDependentVariables = (queryValue: string): string[] => { + // Combined pattern for all formats: + // {{.variable_name}} - original format + // $variable_name - dollar prefix format + // [[variable_name]] - square bracket format + // {{variable_name}} - without dot format + const variableRegexPattern = /(?:\{\{\s*\.?([^\s}]+)\s*\}\}|\$([^\s\W]+)|\[\[([^\]]+)\]\])/g; + + const matches = queryValue.match(variableRegexPattern); + + // Extract variable names from the matches array, handling all formats + return matches + ? matches.map((match) => { + if (match.startsWith('$')) { + return match.slice(1); // Remove $ prefix + } + if (match.startsWith('[[')) { + return match.slice(2, -2); // Remove [[ and ]] + } + // Handle both {{.var}} and {{var}} formats + return match.replace(/\{\{\s*\.?([^\s}]+)\s*\}\}/, '$1'); + }) + : []; +}; +export type VariableGraph = Record; + +export const buildDependencies = ( + variables: IDashboardVariable[], +): VariableGraph => { + const graph: VariableGraph = {}; + + // Initialize empty arrays for all variables first + variables.forEach((variable) => { + if (variable.name && variable.type === 'QUERY') { + graph[variable.name] = []; + } + }); + + // For each QUERY variable, add it as a dependent to its referenced variables + variables.forEach((variable) => { + if (variable.type === 'QUERY' && variable.name) { + const dependentVariables = getDependentVariables(variable.queryValue || ''); + + // For each referenced variable, add the current query as a dependent + dependentVariables.forEach((referencedVar) => { + if (graph[referencedVar]) { + graph[referencedVar].push(variable.name as string); + } else { + graph[referencedVar] = [variable.name as string]; + } + }); + } + }); + + return graph; +}; + +// Function to build the dependency graph +export const buildDependencyGraph = ( + dependencies: VariableGraph, +): { order: string[]; graph: VariableGraph } => { + const inDegree: Record = {}; + const adjList: VariableGraph = {}; + + // Initialize in-degree and adjacency list + Object.keys(dependencies).forEach((node) => { + if (!inDegree[node]) inDegree[node] = 0; + if (!adjList[node]) adjList[node] = []; + dependencies[node].forEach((child) => { + if (!inDegree[child]) inDegree[child] = 0; + inDegree[child]++; + adjList[node].push(child); + }); + }); + + // Topological sort using Kahn's Algorithm + const queue: string[] = Object.keys(inDegree).filter( + (node) => inDegree[node] === 0, + ); + const topologicalOrder: string[] = []; + + while (queue.length > 0) { + const current = queue.shift(); + if (current === undefined) { + break; + } + topologicalOrder.push(current); + + adjList[current].forEach((neighbor) => { + inDegree[neighbor]--; + if (inDegree[neighbor] === 0) queue.push(neighbor); + }); + } + + if (topologicalOrder.length !== Object.keys(dependencies).length) { + console.error('Cycle detected in the dependency graph!'); + } + + return { order: topologicalOrder, graph: adjList }; +}; + +export const onUpdateVariableNode = ( + nodeToUpdate: string, + graph: VariableGraph, + topologicalOrder: string[], + callback: (node: string) => void, +): void => { + const visited = new Set(); + + // Start processing from the node to update + topologicalOrder.forEach((node) => { + if (node === nodeToUpdate || visited.has(node)) { + visited.add(node); + callback(node); + (graph[node] || []).forEach((child) => { + visited.add(child); + }); + } + }); +}; + +export const buildParentDependencyGraph = ( + graph: VariableGraph, +): VariableGraph => { + const parentGraph: VariableGraph = {}; + + // Initialize empty arrays for all nodes + Object.keys(graph).forEach((node) => { + parentGraph[node] = []; + }); + + // For each node and its children in the original graph + Object.entries(graph).forEach(([node, children]) => { + // For each child, add the current node as its parent + children.forEach((child) => { + parentGraph[child].push(node); + }); + }); + + return parentGraph; +}; + +export const checkAPIInvocation = ( + variablesToGetUpdated: string[], + variableData: IDashboardVariable, + parentDependencyGraph?: VariableGraph, +): boolean => { + if (isEmpty(variableData.name)) { + return false; + } + + if (isEmpty(parentDependencyGraph)) { + return false; + } + + // if no dependency then true + const haveDependency = + parentDependencyGraph?.[variableData.name || '']?.length > 0; + if (!haveDependency) { + return true; + } + + // if variable is in the list and has dependency then check if its the top element in the queue then true else false + return ( + variablesToGetUpdated.length > 0 && + variablesToGetUpdated[0] === variableData.name + ); +}; + +export interface IDependencyData { + order: string[]; + graph: VariableGraph; + parentDependencyGraph: VariableGraph; +} From d48cdbfc4a5b386fc72aaaf98da2f01c1049b911 Mon Sep 17 00:00:00 2001 From: Vikrant Gupta Date: Thu, 2 Jan 2025 22:55:30 +0530 Subject: [PATCH 10/13] feat: enable the new where clause for all the log based queries (#6671) * feat: enable the new where clause for logs dashboards --- .../QueryBuilder/components/Query/Query.tsx | 6 +----- .../QueryBuilderSearchV2/QueryBuilderSearchV2.tsx | 15 ++++++--------- 2 files changed, 7 insertions(+), 14 deletions(-) diff --git a/frontend/src/container/QueryBuilder/components/Query/Query.tsx b/frontend/src/container/QueryBuilder/components/Query/Query.tsx index e35086f3b0..23c5f09528 100644 --- a/frontend/src/container/QueryBuilder/components/Query/Query.tsx +++ b/frontend/src/container/QueryBuilder/components/Query/Query.tsx @@ -82,10 +82,6 @@ export const Query = memo(function Query({ entityVersion: version, }); - const isLogsExplorerPage = useMemo(() => pathname === ROUTES.LOGS_EXPLORER, [ - pathname, - ]); - const handleChangeAggregateEvery = useCallback( (value: IBuilderQuery['stepInterval']) => { handleChangeQueryData('stepInterval', value); @@ -457,7 +453,7 @@ export const Query = memo(function Query({ )} - {isLogsExplorerPage ? ( + {query.dataSource === DataSource.LOGS ? ( (false); - const { pathname } = useLocation(); - const isLogsExplorerPage = useMemo(() => pathname === ROUTES.LOGS_EXPLORER, [ - pathname, + const isLogsDataSource = useMemo(() => query.dataSource === DataSource.LOGS, [ + query.dataSource, ]); const memoizedSearchParams = useMemo( @@ -235,7 +232,7 @@ function QueryBuilderSearchV2( }, { queryKey: [searchParams], - enabled: isQueryEnabled && !isLogsExplorerPage, + enabled: isQueryEnabled && !isLogsDataSource, }, ); @@ -250,7 +247,7 @@ function QueryBuilderSearchV2( }, { queryKey: [suggestionsParams], - enabled: isQueryEnabled && isLogsExplorerPage, + enabled: isQueryEnabled && isLogsDataSource, }, ); @@ -651,7 +648,7 @@ function QueryBuilderSearchV2( useEffect(() => { if (currentState === DropdownState.ATTRIBUTE_KEY) { const { tagKey } = getTagToken(searchValue); - if (isLogsExplorerPage) { + if (isLogsDataSource) { // add the user typed option in the dropdown to select that and move ahead irrespective of the matches and all setDropdownOptions([ ...(!isEmpty(tagKey) && @@ -756,7 +753,7 @@ function QueryBuilderSearchV2( currentFilterItem?.key?.dataType, currentState, data?.payload?.attributeKeys, - isLogsExplorerPage, + isLogsDataSource, searchValue, suggestionsData?.payload?.attributes, ]); From 9feee6ff46daf602d6ce3917aefc12bb6efb42a6 Mon Sep 17 00:00:00 2001 From: Srikanth Chekuri Date: Fri, 3 Jan 2025 09:36:52 +0530 Subject: [PATCH 11/13] chore: add option skip web (#6736) --- ee/query-service/app/server.go | 9 ++++++--- ee/query-service/main.go | 6 ++++-- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/ee/query-service/app/server.go b/ee/query-service/app/server.go index 5f36aaaf66..afd9dad4c5 100644 --- a/ee/query-service/app/server.go +++ b/ee/query-service/app/server.go @@ -79,6 +79,7 @@ type ServerOptions struct { GatewayUrl string UseLogsNewSchema bool UseTraceNewSchema bool + SkipWebFrontend bool } // Server runs HTTP api service @@ -383,9 +384,11 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler, web *web.Web) (* handler = handlers.CompressHandler(handler) - err := web.AddToRouter(r) - if err != nil { - return nil, err + if !s.serverOptions.SkipWebFrontend { + err := web.AddToRouter(r) + if err != nil { + return nil, err + } } return &http.Server{ diff --git a/ee/query-service/main.go b/ee/query-service/main.go index 5fbbca1e18..3514376213 100644 --- a/ee/query-service/main.go +++ b/ee/query-service/main.go @@ -108,6 +108,7 @@ func main() { var dialTimeout time.Duration var gatewayUrl string var useLicensesV3 bool + var skipWebFrontend bool flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs") flag.BoolVar(&useTraceNewSchema, "use-trace-new-schema", false, "use new schema for traces") @@ -125,7 +126,7 @@ func main() { flag.StringVar(&cluster, "cluster", "cluster", "(cluster name - defaults to 'cluster')") flag.StringVar(&gatewayUrl, "gateway-url", "", "(url to the gateway)") flag.BoolVar(&useLicensesV3, "use-licenses-v3", false, "use licenses_v3 schema for licenses") - + flag.BoolVar(&skipWebFrontend, "skip-web-frontend", false, "skip web frontend") flag.Parse() loggerMgr := initZapLog(enableQueryServiceLogOTLPExport) @@ -148,7 +149,7 @@ func main() { } web, err := signozweb.New(zap.L(), config.Web) - if err != nil { + if err != nil && !skipWebFrontend { zap.L().Fatal("Failed to create web", zap.Error(err)) } @@ -169,6 +170,7 @@ func main() { GatewayUrl: gatewayUrl, UseLogsNewSchema: useLogsNewSchema, UseTraceNewSchema: useTraceNewSchema, + SkipWebFrontend: skipWebFrontend, } // Read the jwt secret key From c5938b6c10ffa5c66cfc2e3cd3e74fa24c640c2f Mon Sep 17 00:00:00 2001 From: aniketio-ctrl Date: Fri, 3 Jan 2025 10:52:55 +0530 Subject: [PATCH 12/13] =?UTF-8?q?fix:=20added=20backticks=20for=20tags=20c?= =?UTF-8?q?ontaining=20dots=20while=20generating=20query=20=E2=80=A6=20(#6?= =?UTF-8?q?727)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pkg/query-service/app/http_handler.go | 2 +- .../app/metrics/v3/query_builder.go | 5 + .../app/metrics/v3/query_builder_test.go | 160 ++++++++++++++++++ .../app/metrics/v4/cumulative/table_test.go | 55 ++++++ .../metrics/v4/cumulative/timeseries_test.go | 55 ++++++ .../app/metrics/v4/delta/table_test.go | 48 ++++++ .../app/metrics/v4/delta/time_series_test.go | 55 ++++++ .../app/metrics/v4/helpers/clauses.go | 7 + .../app/metrics/v4/helpers/sub_query.go | 4 +- .../app/metrics/v4/query_builder_test.go | 69 ++++++++ .../app/queryBuilder/query_builder_test.go | 130 ++++++++++++++ pkg/query-service/utils/format.go | 16 ++ 12 files changed, 603 insertions(+), 3 deletions(-) diff --git a/pkg/query-service/app/http_handler.go b/pkg/query-service/app/http_handler.go index a1833c534b..4161a665ce 100644 --- a/pkg/query-service/app/http_handler.go +++ b/pkg/query-service/app/http_handler.go @@ -4196,7 +4196,7 @@ func (aH *APIHandler) autocompleteAggregateAttributes(w http.ResponseWriter, r * switch req.DataSource { case v3.DataSourceMetrics: - response, err = aH.reader.GetMetricAggregateAttributes(r.Context(), req, true) + response, err = aH.reader.GetMetricAggregateAttributes(r.Context(), req, false) case v3.DataSourceLogs: response, err = aH.reader.GetLogAggregateAttributes(r.Context(), req) case v3.DataSourceTraces: diff --git a/pkg/query-service/app/metrics/v3/query_builder.go b/pkg/query-service/app/metrics/v3/query_builder.go index 25f5990148..9013b0461a 100644 --- a/pkg/query-service/app/metrics/v3/query_builder.go +++ b/pkg/query-service/app/metrics/v3/query_builder.go @@ -217,6 +217,7 @@ func buildMetricQuery(start, end, step int64, mq *v3.BuilderQuery) (string, erro // groupingSets returns a string of comma separated tags for group by clause // `ts` is always added to the group by clause func groupingSets(tags ...string) string { + tags = utils.AddBackTickToFormatTags(tags...) withTs := append(tags, "ts") return strings.Join(withTs, ", ") } @@ -224,12 +225,14 @@ func groupingSets(tags ...string) string { // groupBy returns a string of comma separated tags for group by clause // `ts` is always added to the group by clause func groupBy(tags ...string) string { + tags = utils.AddBackTickToFormatTags(tags...) tags = append(tags, "ts") return strings.Join(tags, ",") } // groupSelect returns a string of comma separated tags for select clause func groupSelect(tags ...string) string { + tags = utils.AddBackTickToFormatTags(tags...) groupTags := strings.Join(tags, ",") if len(tags) != 0 { groupTags += ", " @@ -270,11 +273,13 @@ func orderBy(items []v3.OrderBy, tags []string) string { for _, item := range items { if item.ColumnName == tag { found = true + item.ColumnName = utils.AddBackTickToFormatTag(item.ColumnName) orderBy = append(orderBy, fmt.Sprintf("%s %s", item.ColumnName, item.Order)) break } } if !found { + tag = utils.AddBackTickToFormatTag(tag) orderBy = append(orderBy, fmt.Sprintf("%s ASC", tag)) } } diff --git a/pkg/query-service/app/metrics/v3/query_builder_test.go b/pkg/query-service/app/metrics/v3/query_builder_test.go index 5b85036007..78d4901234 100644 --- a/pkg/query-service/app/metrics/v3/query_builder_test.go +++ b/pkg/query-service/app/metrics/v3/query_builder_test.go @@ -380,3 +380,163 @@ func TestBuildQueryAdjustedTimes(t *testing.T) { }) } } + +func TestBuildQueryWithDotInMetricAndAttributes(t *testing.T) { + cases := []struct { + name string + params *v3.QueryRangeParamsV3 + expected string + }{ + { + name: "TestBuildQueryWithDotInMetricAndAttributes with dot in metric and attributes", + params: &v3.QueryRangeParamsV3{ + Start: 1735036101000, + End: 1735637901000, + Step: 60, + Variables: map[string]interface{}{ + "SIGNOZ_START_TIME": 1735034992000, + "SIGNOZ_END_TIME": 1735036792000, + }, + FormatForWeb: false, + CompositeQuery: &v3.CompositeQuery{ + QueryType: v3.QueryTypeBuilder, + PanelType: v3.PanelTypeValue, + FillGaps: false, + BuilderQueries: map[string]*v3.BuilderQuery{ + "A": { + QueryName: "A", + DataSource: v3.DataSourceMetrics, + AggregateOperator: v3.AggregateOperatorAvg, + AggregateAttribute: v3.AttributeKey{ + Key: "system.memory.usage", + DataType: v3.AttributeKeyDataTypeFloat64, + Type: v3.AttributeKeyType("Gauge"), + IsColumn: true, + }, + TimeAggregation: v3.TimeAggregationAvg, + SpaceAggregation: v3.SpaceAggregationSum, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{ + Key: "os.type", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + IsColumn: false, + }, + Operator: v3.FilterOperatorEqual, + Value: "linux", + }, + }, + }, + Expression: "A", + Disabled: false, + StepInterval: 60, + OrderBy: []v3.OrderBy{ + { + ColumnName: "os.type", + Order: v3.DirectionAsc, + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + }, + }, + GroupBy: []v3.AttributeKey{ + { + Key: "os.type", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + IsColumn: false, + }, + }, + Legend: "", + ReduceTo: v3.ReduceToOperatorAvg, + Having: []v3.Having{}, + }, + }, + }, + }, + expected: "SELECT *, now() AS ts FROM (SELECT avgIf(value, toUnixTimestamp(ts) != 0) as value, anyIf(ts, toUnixTimestamp(ts) != 0) AS timestamp FROM (SELECT `os.type`, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, avg(value) as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'os.type') as `os.type`, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name = 'system.memory.usage' AND temporality = '' AND unix_milli >= 1734998400000 AND unix_milli < 1735637880000 AND JSONExtractString(labels, 'os.type') = 'linux') as filtered_time_series USING fingerprint WHERE metric_name = 'system.memory.usage' AND unix_milli >= 1735036080000 AND unix_milli < 1735637880000 GROUP BY `os.type`, ts ORDER BY `os.type` asc, ts) )", + }, + { + name: "TestBuildQueryWithDotInMetricAndAttributes with dot in metric and attributes with rate_avg aggregation", + params: &v3.QueryRangeParamsV3{ + Start: 1735036101000, + End: 1735637901000, + Step: 60, + Variables: map[string]interface{}{ + "SIGNOZ_START_TIME": 1735034992000, + "SIGNOZ_END_TIME": 1735036792000, + }, + FormatForWeb: false, + CompositeQuery: &v3.CompositeQuery{ + QueryType: v3.QueryTypeBuilder, + PanelType: v3.PanelTypeValue, + FillGaps: false, + BuilderQueries: map[string]*v3.BuilderQuery{ + "A": { + QueryName: "A", + DataSource: v3.DataSourceMetrics, + AggregateOperator: v3.AggregateOperatorRateAvg, + AggregateAttribute: v3.AttributeKey{ + Key: "system.memory.usage", + DataType: v3.AttributeKeyDataTypeFloat64, + Type: v3.AttributeKeyType("Gauge"), + IsColumn: true, + }, + TimeAggregation: v3.TimeAggregationAvg, + SpaceAggregation: v3.SpaceAggregationSum, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{ + Key: "os.type", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + IsColumn: false, + }, + Operator: v3.FilterOperatorEqual, + Value: "linux", + }, + }, + }, + Expression: "A", + Disabled: false, + StepInterval: 60, + OrderBy: []v3.OrderBy{ + { + ColumnName: "os.type", + Order: v3.DirectionAsc, + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + }, + }, + GroupBy: []v3.AttributeKey{ + { + Key: "os.type", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + IsColumn: false, + }, + }, + Legend: "", + ReduceTo: v3.ReduceToOperatorAvg, + Having: []v3.Having{}, + }, + }, + }, + }, + expected: "SELECT *, now() AS ts FROM (SELECT avgIf(value, toUnixTimestamp(ts) != 0) as value, anyIf(ts, toUnixTimestamp(ts) != 0) AS timestamp FROM (SELECT `os.type`, ts, If((value - lagInFrame(value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (value - lagInFrame(value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as value FROM(SELECT `os.type`, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, avg(value) as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'os.type') as `os.type`, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name = 'system.memory.usage' AND temporality = '' AND unix_milli >= 1734998400000 AND unix_milli < 1735637880000 AND JSONExtractString(labels, 'os.type') = 'linux') as filtered_time_series USING fingerprint WHERE metric_name = 'system.memory.usage' AND unix_milli >= 1735036020000 AND unix_milli < 1735637880000 GROUP BY `os.type`, ts ORDER BY `os.type` asc, ts) WINDOW rate_window as (PARTITION BY `os.type` ORDER BY `os.type`, ts) ) )", + }, + } + for _, testCase := range cases { + t.Run(testCase.name, func(t *testing.T) { + q := testCase.params + query, err := PrepareMetricQuery(q.Start, q.End, q.CompositeQuery.QueryType, q.CompositeQuery.PanelType, q.CompositeQuery.BuilderQueries["A"], Options{PreferRPM: false}) + require.NoError(t, err) + + require.Contains(t, query, testCase.expected) + }) + } +} diff --git a/pkg/query-service/app/metrics/v4/cumulative/table_test.go b/pkg/query-service/app/metrics/v4/cumulative/table_test.go index 70613c9b44..c1a9042b8a 100644 --- a/pkg/query-service/app/metrics/v4/cumulative/table_test.go +++ b/pkg/query-service/app/metrics/v4/cumulative/table_test.go @@ -95,6 +95,61 @@ func TestPrepareTableQuery(t *testing.T) { end: 1701796780000, expectedQueryContains: "SELECT service_name, ts, sum(per_series_value) as value FROM (SELECT service_name, ts, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as per_series_value FROM (SELECT fingerprint, any(service_name) as service_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name = 'http_requests' AND temporality = 'Cumulative' AND unix_milli >= 1701792000000 AND unix_milli < 1701796780000 AND like(JSONExtractString(labels, 'service_name'), '%payment_service%')) as filtered_time_series USING fingerprint WHERE metric_name = 'http_requests' AND unix_milli >= 1701794980000 AND unix_milli < 1701796780000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts)) WHERE isNaN(per_series_value) = 0 GROUP BY service_name, ts ORDER BY service_name ASC, ts ASC", }, + { + name: "test time aggregation = avg, space aggregation = avg, temporality = unspecified, testing metrics and attribute name with dot", + builderQuery: &v3.BuilderQuery{ + QueryName: "A", + DataSource: v3.DataSourceMetrics, + AggregateOperator: v3.AggregateOperatorAvg, + AggregateAttribute: v3.AttributeKey{ + Key: "system.memory.usage", + DataType: v3.AttributeKeyDataTypeFloat64, + Type: v3.AttributeKeyType("Gauge"), + IsColumn: true, + }, + Temporality: v3.Unspecified, + TimeAggregation: v3.TimeAggregationAvg, + SpaceAggregation: v3.SpaceAggregationAvg, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{ + Key: "host.name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + IsColumn: false, + }, + Operator: v3.FilterOperatorEqual, + Value: "signoz-host", + }, + }, + }, + Expression: "A", + Disabled: false, + StepInterval: 60, + OrderBy: []v3.OrderBy{ + { + ColumnName: "state", + Order: v3.DirectionDesc, + }, + }, + GroupBy: []v3.AttributeKey{ + { + Key: "state", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + IsColumn: false, + }, + }, + Legend: "", + ReduceTo: v3.ReduceToOperatorAvg, + Having: []v3.Having{}, + }, + start: 1735295140000, + end: 1735554340000, + expectedQueryContains: "SELECT state, ts, avg(per_series_value) as value FROM (SELECT fingerprint, any(state) as state, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(sum) / sum(count) as per_series_value FROM signoz_metrics.distributed_samples_v4_agg_5m INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'state') as state, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name = 'system.memory.usage' AND temporality = 'Unspecified' AND unix_milli >= 1735257600000 AND unix_milli < 1735554340000 AND JSONExtractString(labels, 'host.name') = 'signoz-host') as filtered_time_series USING fingerprint WHERE metric_name = 'system.memory.usage' AND unix_milli >= 1735295140000 AND unix_milli < 1735554340000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(per_series_value) = 0 GROUP BY state, ts ORDER BY state desc, ts ASC", + }, } for _, testCase := range testCases { diff --git a/pkg/query-service/app/metrics/v4/cumulative/timeseries_test.go b/pkg/query-service/app/metrics/v4/cumulative/timeseries_test.go index ce47e10b10..68930ea841 100644 --- a/pkg/query-service/app/metrics/v4/cumulative/timeseries_test.go +++ b/pkg/query-service/app/metrics/v4/cumulative/timeseries_test.go @@ -212,6 +212,61 @@ func TestPrepareTimeseriesQuery(t *testing.T) { end: 1701796780000, expectedQueryContains: "SELECT service_name, ts, sum(per_series_value) as value FROM (SELECT service_name, ts, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, nan, If((ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window) >= 86400, nan, (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDate('1970-01-01')) OVER rate_window))) as per_series_value FROM (SELECT fingerprint, any(service_name) as service_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name = 'http_requests' AND temporality = 'Cumulative' AND unix_milli >= 1701792000000 AND unix_milli < 1701796780000 AND like(JSONExtractString(labels, 'service_name'), '%payment_service%')) as filtered_time_series USING fingerprint WHERE metric_name = 'http_requests' AND unix_milli >= 1701794980000 AND unix_milli < 1701796780000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WINDOW rate_window as (PARTITION BY fingerprint ORDER BY fingerprint, ts)) WHERE isNaN(per_series_value) = 0 GROUP BY service_name, ts ORDER BY service_name ASC, ts ASC", }, + { + name: "test time aggregation = avg, space aggregation = avg, temporality = unspecified, testing metrics and attribute name with dot", + builderQuery: &v3.BuilderQuery{ + QueryName: "A", + DataSource: v3.DataSourceMetrics, + AggregateOperator: v3.AggregateOperatorAvg, + AggregateAttribute: v3.AttributeKey{ + Key: "system.memory.usage", + DataType: v3.AttributeKeyDataTypeFloat64, + Type: v3.AttributeKeyType("Gauge"), + IsColumn: true, + }, + Temporality: v3.Unspecified, + TimeAggregation: v3.TimeAggregationAvg, + SpaceAggregation: v3.SpaceAggregationAvg, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{ + Key: "host.name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + IsColumn: false, + }, + Operator: v3.FilterOperatorEqual, + Value: "signoz-host", + }, + }, + }, + Expression: "A", + Disabled: false, + StepInterval: 60, + OrderBy: []v3.OrderBy{ + { + ColumnName: "state", + Order: v3.DirectionDesc, + }, + }, + GroupBy: []v3.AttributeKey{ + { + Key: "state", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + IsColumn: false, + }, + }, + Legend: "", + ReduceTo: v3.ReduceToOperatorAvg, + Having: []v3.Having{}, + }, + start: 1735295140000, + end: 1735554340000, + expectedQueryContains: "SELECT state, ts, avg(per_series_value) as value FROM (SELECT fingerprint, any(state) as state, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(sum) / sum(count) as per_series_value FROM signoz_metrics.distributed_samples_v4_agg_5m INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'state') as state, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name = 'system.memory.usage' AND temporality = 'Unspecified' AND unix_milli >= 1735257600000 AND unix_milli < 1735554340000 AND JSONExtractString(labels, 'host.name') = 'signoz-host') as filtered_time_series USING fingerprint WHERE metric_name = 'system.memory.usage' AND unix_milli >= 1735295140000 AND unix_milli < 1735554340000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(per_series_value) = 0 GROUP BY state, ts ORDER BY state desc, ts ASC", + }, } for _, testCase := range testCases { diff --git a/pkg/query-service/app/metrics/v4/delta/table_test.go b/pkg/query-service/app/metrics/v4/delta/table_test.go index 40b9297dc2..59687c3429 100644 --- a/pkg/query-service/app/metrics/v4/delta/table_test.go +++ b/pkg/query-service/app/metrics/v4/delta/table_test.go @@ -97,6 +97,54 @@ func TestPrepareTableQuery(t *testing.T) { end: 1701796780000, expectedQueryContains: "SELECT service_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name = 'http_requests' AND temporality = 'Delta' AND unix_milli >= 1701792000000 AND unix_milli < 1701796780000 AND like(JSONExtractString(labels, 'service_name'), '%payment_service%')) as filtered_time_series USING fingerprint WHERE metric_name = 'http_requests' AND unix_milli >= 1701794980000 AND unix_milli < 1701796780000 GROUP BY service_name, ts ORDER BY service_name ASC, ts ASC", }, + { + name: "test time aggregation = rate, space aggregation = avg, temporality = delta, testing metrics and attribute name with dot", + builderQuery: &v3.BuilderQuery{ + QueryName: "A", + DataSource: v3.DataSourceMetrics, + AggregateOperator: v3.AggregateOperatorRate, + AggregateAttribute: v3.AttributeKey{ + Key: "signoz.latency.sum", + DataType: v3.AttributeKeyDataTypeFloat64, + Type: v3.AttributeKeyType("Sum"), + IsColumn: true, + }, + Temporality: v3.Delta, + TimeAggregation: v3.TimeAggregationRate, + SpaceAggregation: v3.SpaceAggregationAvg, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{ + Key: "host.name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + IsColumn: false, + }, + Operator: v3.FilterOperatorEqual, + Value: "4f6ec470feea", + }, + }, + }, + Expression: "A", + Disabled: false, + StepInterval: 60, + OrderBy: []v3.OrderBy{ + { + ColumnName: "status.code", + Order: v3.DirectionAsc, + }, + }, + GroupBy: []v3.AttributeKey{}, + Legend: "", + ReduceTo: v3.ReduceToOperatorLast, + Having: []v3.Having{}, + }, + start: 1701794980000, + end: 1701796780000, + expectedQueryContains: "SELECT ts, avg(per_series_value) as value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(value)/60 as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name = 'signoz.latency.sum' AND temporality = 'Delta' AND unix_milli >= 1701792000000 AND unix_milli < 1701796780000 AND JSONExtractString(labels, 'host.name') = '4f6ec470feea') as filtered_time_series USING fingerprint WHERE metric_name = 'signoz.latency.sum' AND unix_milli >= 1701794980000 AND unix_milli < 1701796780000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(per_series_value) = 0 GROUP BY ts ORDER BY ts ASC", + }, } for _, testCase := range testCases { diff --git a/pkg/query-service/app/metrics/v4/delta/time_series_test.go b/pkg/query-service/app/metrics/v4/delta/time_series_test.go index 5b6d71b3de..f55dfc8b08 100644 --- a/pkg/query-service/app/metrics/v4/delta/time_series_test.go +++ b/pkg/query-service/app/metrics/v4/delta/time_series_test.go @@ -246,6 +246,61 @@ func TestPrepareTimeseriesQuery(t *testing.T) { end: 1701796780000, expectedQueryContains: "SELECT service_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, quantilesDDMerge(0.01, 0.990000)(sketch)[1] as value FROM signoz_metrics.distributed_exp_hist INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'service_name') as service_name, fingerprint FROM signoz_metrics.time_series_v4 WHERE metric_name = 'signoz_latency' AND temporality = 'Delta' AND unix_milli >= 1701792000000 AND unix_milli < 1701796780000) as filtered_time_series USING fingerprint WHERE metric_name = 'signoz_latency' AND unix_milli >= 1701794980000 AND unix_milli < 1701796780000 GROUP BY service_name, ts ORDER BY service_name ASC, ts ASC", }, + { + name: "test time aggregation = rate, space aggregation = max, temporality = delta, testing metrics and attribute name with dot", + builderQuery: &v3.BuilderQuery{ + QueryName: "A", + DataSource: v3.DataSourceMetrics, + AggregateOperator: v3.AggregateOperatorRate, + AggregateAttribute: v3.AttributeKey{ + Key: "signoz.latency.sum", + DataType: v3.AttributeKeyDataTypeFloat64, + Type: v3.AttributeKeyType("Sum"), + IsColumn: true, + }, + Temporality: v3.Delta, + TimeAggregation: v3.TimeAggregationRate, + SpaceAggregation: v3.SpaceAggregationMax, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{ + Key: "host_name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + IsColumn: false, + }, + Operator: v3.FilterOperatorEqual, + Value: "4f6ec470feea", + }, + }, + }, + Expression: "A", + Disabled: false, + StepInterval: 60, + OrderBy: []v3.OrderBy{ + { + ColumnName: "status.code", + Order: v3.DirectionAsc, + }, + }, + GroupBy: []v3.AttributeKey{ + { + Key: "host.name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + IsColumn: false, + }, + }, + Legend: "", + ReduceTo: v3.ReduceToOperatorAvg, + Having: []v3.Having{}, + }, + start: 1735036101000, + end: 1735637901000, + expectedQueryContains: "SELECT `host.name`, ts, max(per_series_value) as value FROM (SELECT fingerprint, any(`host.name`) as `host.name`, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(sum)/60 as per_series_value FROM signoz_metrics.distributed_samples_v4_agg_5m INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'host.name') as `host.name`, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name = 'signoz.latency.sum' AND temporality = 'Delta' AND unix_milli >= 1734998400000 AND unix_milli < 1735637901000 AND JSONExtractString(labels, 'host_name') = '4f6ec470feea') as filtered_time_series USING fingerprint WHERE metric_name = 'signoz.latency.sum' AND unix_milli >= 1735036101000 AND unix_milli < 1735637901000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(per_series_value) = 0 GROUP BY `host.name`, ts ORDER BY `host.name` ASC, ts ASC", + }, } for _, testCase := range testCases { diff --git a/pkg/query-service/app/metrics/v4/helpers/clauses.go b/pkg/query-service/app/metrics/v4/helpers/clauses.go index e99951df69..3787b40a33 100644 --- a/pkg/query-service/app/metrics/v4/helpers/clauses.go +++ b/pkg/query-service/app/metrics/v4/helpers/clauses.go @@ -2,6 +2,7 @@ package helpers import ( "fmt" + "go.signoz.io/signoz/pkg/query-service/utils" "strings" v3 "go.signoz.io/signoz/pkg/query-service/model/v3" @@ -18,6 +19,7 @@ func groupingSets(tags ...string) string { func GroupingSetsByAttributeKeyTags(tags ...v3.AttributeKey) string { groupTags := []string{} for _, tag := range tags { + tag.Key = utils.AddBackTickToFormatTag(tag.Key) groupTags = append(groupTags, tag.Key) } return groupingSets(groupTags...) @@ -27,6 +29,7 @@ func GroupingSetsByAttributeKeyTags(tags ...v3.AttributeKey) string { func GroupByAttributeKeyTags(tags ...v3.AttributeKey) string { groupTags := []string{} for _, tag := range tags { + tag.Key = utils.AddBackTickToFormatTag(tag.Key) groupTags = append(groupTags, tag.Key) } groupTags = append(groupTags, "ts") @@ -42,11 +45,13 @@ func OrderByAttributeKeyTags(items []v3.OrderBy, tags []v3.AttributeKey) string for _, item := range items { if item.ColumnName == tag.Key { found = true + item.ColumnName = utils.AddBackTickToFormatTag(item.ColumnName) orderBy = append(orderBy, fmt.Sprintf("%s %s", item.ColumnName, item.Order)) break } } if !found { + tag.Key = utils.AddBackTickToFormatTag(tag.Key) orderBy = append(orderBy, fmt.Sprintf("%s ASC", tag.Key)) } } @@ -59,6 +64,7 @@ func OrderByAttributeKeyTags(items []v3.OrderBy, tags []v3.AttributeKey) string func SelectLabelsAny(tags []v3.AttributeKey) string { var selectLabelsAny []string for _, tag := range tags { + tag.Key = utils.AddBackTickToFormatTag(tag.Key) selectLabelsAny = append(selectLabelsAny, fmt.Sprintf("any(%s) as %s,", tag.Key, tag.Key)) } return strings.Join(selectLabelsAny, " ") @@ -67,6 +73,7 @@ func SelectLabelsAny(tags []v3.AttributeKey) string { func SelectLabels(tags []v3.AttributeKey) string { var selectLabels []string for _, tag := range tags { + tag.Key = utils.AddBackTickToFormatTag(tag.Key) selectLabels = append(selectLabels, fmt.Sprintf("%s,", tag.Key)) } return strings.Join(selectLabels, " ") diff --git a/pkg/query-service/app/metrics/v4/helpers/sub_query.go b/pkg/query-service/app/metrics/v4/helpers/sub_query.go index 3ce933661a..793cf2c375 100644 --- a/pkg/query-service/app/metrics/v4/helpers/sub_query.go +++ b/pkg/query-service/app/metrics/v4/helpers/sub_query.go @@ -322,7 +322,7 @@ func PrepareTimeseriesFilterQuery(start, end int64, mq *v3.BuilderQuery) (string var selectLabels string for _, tag := range groupTags { - selectLabels += fmt.Sprintf("JSONExtractString(labels, '%s') as %s, ", tag.Key, tag.Key) + selectLabels += fmt.Sprintf("JSONExtractString(labels, '%s') as %s, ", tag.Key, utils.AddBackTickToFormatTag(tag.Key)) } // The table JOIN key always exists @@ -406,7 +406,7 @@ func PrepareTimeseriesFilterQueryV3(start, end int64, mq *v3.BuilderQuery) (stri selectLabels += "labels, " } else { for _, tag := range groupTags { - selectLabels += fmt.Sprintf("JSONExtractString(labels, '%s') as %s, ", tag.Key, tag.Key) + selectLabels += fmt.Sprintf("JSONExtractString(labels, '%s') as %s, ", tag.Key, utils.AddBackTickToFormatTag(tag.Key)) } } diff --git a/pkg/query-service/app/metrics/v4/query_builder_test.go b/pkg/query-service/app/metrics/v4/query_builder_test.go index 2fc83a9e1f..be1f5f65ba 100644 --- a/pkg/query-service/app/metrics/v4/query_builder_test.go +++ b/pkg/query-service/app/metrics/v4/query_builder_test.go @@ -533,6 +533,75 @@ func TestPrepareMetricQueryGauge(t *testing.T) { }, expectedQueryContains: "SELECT host_name, ts, sum(per_series_value) as value FROM (SELECT fingerprint, any(host_name) as host_name, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, avg(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'host_name') as host_name, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name = 'system_cpu_usage' AND temporality = 'Unspecified' AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000) as filtered_time_series USING fingerprint WHERE metric_name = 'system_cpu_usage' AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(per_series_value) = 0 GROUP BY host_name, ts ORDER BY host_name ASC, ts ASC", }, + { + name: "test gauge query with multiple group by with metric and attribute name containing dot", + builderQuery: &v3.BuilderQuery{ + QueryName: "A", + DataSource: v3.DataSourceMetrics, + AggregateOperator: v3.AggregateOperatorMax, + AggregateAttribute: v3.AttributeKey{ + Key: "system.memory.usage", + DataType: v3.AttributeKeyDataTypeFloat64, + Type: v3.AttributeKeyType("Gauge"), + IsColumn: true, + }, + Temporality: v3.Unspecified, + TimeAggregation: v3.TimeAggregationMax, + SpaceAggregation: v3.SpaceAggregationMax, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{ + Key: "host.name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + IsColumn: false, + }, + Operator: v3.FilterOperatorEqual, + Value: "signoz-host", + }, + }, + }, + Expression: "A", + Disabled: false, + StepInterval: 60, + OrderBy: []v3.OrderBy{ + { + ColumnName: "os.type", + Order: v3.DirectionDesc, + }, + { + ColumnName: "state", + Order: v3.DirectionAsc, + }, + }, + GroupBy: []v3.AttributeKey{ + { + Key: "os.type", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + IsColumn: false, + }, + { + Key: "state", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + IsColumn: false, + }, + { + Key: "host.name", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + IsColumn: false, + }, + }, + Legend: "", + ReduceTo: v3.ReduceToOperatorAvg, + Having: []v3.Having{}, + }, + expectedQueryContains: "SELECT `os.type`, state, `host.name`, ts, max(per_series_value) as value FROM (SELECT fingerprint, any(`os.type`) as `os.type`, any(state) as state, any(`host.name`) as `host.name`, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, max(value) as per_series_value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'os.type') as `os.type`, JSONExtractString(labels, 'state') as state, JSONExtractString(labels, 'host.name') as `host.name`, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name = 'system.memory.usage' AND temporality = 'Unspecified' AND unix_milli >= 1650931200000 AND unix_milli < 1651078380000 AND JSONExtractString(labels, 'host.name') = 'signoz-host') as filtered_time_series USING fingerprint WHERE metric_name = 'system.memory.usage' AND unix_milli >= 1650991980000 AND unix_milli < 1651078380000 GROUP BY fingerprint, ts ORDER BY fingerprint, ts) WHERE isNaN(per_series_value) = 0 GROUP BY `os.type`, state, `host.name`, ts ORDER BY `os.type` desc, state asc, `host.name` ASC, ts ASC", + }, } for _, testCase := range testCases { diff --git a/pkg/query-service/app/queryBuilder/query_builder_test.go b/pkg/query-service/app/queryBuilder/query_builder_test.go index 36365060c3..1ee73cd095 100644 --- a/pkg/query-service/app/queryBuilder/query_builder_test.go +++ b/pkg/query-service/app/queryBuilder/query_builder_test.go @@ -216,6 +216,136 @@ func TestBuildQueryWithThreeOrMoreQueriesRefAndFormula(t *testing.T) { // So(queries["F5"], ShouldContainSubstring, "SELECT A.ts as ts, ((A.value - B.value) / B.value) * 100") // So(strings.Count(queries["F5"], " ON "), ShouldEqual, 1) }) + t.Run("TestBuildQueryWithDotMetricNameAndAttribute", func(t *testing.T) { + q := &v3.QueryRangeParamsV3{ + Start: 1735036101000, + End: 1735637901000, + Step: 60, + Variables: map[string]interface{}{ + "SIGNOZ_START_TIME": 1735034992000, + "SIGNOZ_END_TIME": 1735036792000, + }, + FormatForWeb: false, + CompositeQuery: &v3.CompositeQuery{ + QueryType: v3.QueryTypeBuilder, + PanelType: v3.PanelTypeGraph, + FillGaps: false, + BuilderQueries: map[string]*v3.BuilderQuery{ + "A": { + QueryName: "A", + DataSource: v3.DataSourceMetrics, + AggregateOperator: v3.AggregateOperatorAvg, + AggregateAttribute: v3.AttributeKey{ + Key: "system.memory.usage", + DataType: v3.AttributeKeyDataTypeFloat64, + Type: v3.AttributeKeyType("Gauge"), + IsColumn: true, + }, + TimeAggregation: v3.TimeAggregationAvg, + SpaceAggregation: v3.SpaceAggregationSum, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{ + { + Key: v3.AttributeKey{ + Key: "os.type", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + IsColumn: false, + }, + Operator: v3.FilterOperatorEqual, + Value: "linux", + }, + }, + }, + Expression: "A", + Disabled: true, + StepInterval: 60, + OrderBy: []v3.OrderBy{ + { + ColumnName: "#SIGNOZ_VALUE", + Order: v3.DirectionAsc, + }, + }, + GroupBy: []v3.AttributeKey{ + { + Key: "os.type", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + IsColumn: false, + }, + }, + Legend: "", + ReduceTo: v3.ReduceToOperatorAvg, + Having: []v3.Having{}, + }, + "B": { + QueryName: "B", + DataSource: v3.DataSourceMetrics, + AggregateOperator: v3.AggregateOperatorSum, + AggregateAttribute: v3.AttributeKey{ + Key: "system.network.io", + DataType: v3.AttributeKeyDataTypeFloat64, + Type: v3.AttributeKeyType("Sum"), + IsColumn: true, + }, + TimeAggregation: v3.TimeAggregationIncrease, + SpaceAggregation: v3.SpaceAggregationSum, + Filters: &v3.FilterSet{ + Operator: "AND", + Items: []v3.FilterItem{}, + }, + Expression: "B", + Disabled: true, + StepInterval: 60, + OrderBy: []v3.OrderBy{ + { + Key: "os.type", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + IsColumn: false, + }, + }, + GroupBy: []v3.AttributeKey{ + { + Key: "os.type", + DataType: v3.AttributeKeyDataTypeString, + Type: v3.AttributeKeyTypeTag, + IsColumn: false, + }, + }, + Legend: "", + ReduceTo: v3.ReduceToOperatorAvg, + Having: []v3.Having{ + { + ColumnName: "SUM(system.network.io)", + Operator: v3.HavingOperatorGreaterThan, + Value: 4, + }, + }, + }, + "F1": { + QueryName: "F1", + Expression: "A + B", + Disabled: false, + Legend: "", + OrderBy: []v3.OrderBy{}, + Limit: 2, + }, + }, + }, + } + qbOptions := QueryBuilderOptions{ + BuildMetricQuery: metricsv3.PrepareMetricQuery, + } + fm := featureManager.StartManager() + qb := NewQueryBuilder(qbOptions, fm) + + queries, err := qb.PrepareQueries(q) + require.Contains(t, queries["F1"], "SELECT A.`os.type` as `os.type`, A.`ts` as `ts`, A.value + B.value as value FROM (SELECT `os.type`, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, avg(value) as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'os.type') as `os.type`, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name = 'system.memory.usage' AND temporality = '' AND unix_milli >= 1734998400000 AND unix_milli < 1735637880000 AND JSONExtractString(labels, 'os.type') = 'linux') as filtered_time_series USING fingerprint WHERE metric_name = 'system.memory.usage' AND unix_milli >= 1735036080000 AND unix_milli < 1735637880000 GROUP BY `os.type`, ts ORDER BY `os.type` ASC, ts) as A INNER JOIN (SELECT * FROM (SELECT `os.type`, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), INTERVAL 60 SECOND) as ts, sum(value) as value FROM signoz_metrics.distributed_samples_v4 INNER JOIN (SELECT DISTINCT JSONExtractString(labels, 'os.type') as `os.type`, fingerprint FROM signoz_metrics.time_series_v4_1day WHERE metric_name = 'system.network.io' AND temporality = '' AND unix_milli >= 1734998400000 AND unix_milli < 1735637880000) as filtered_time_series USING fingerprint WHERE metric_name = 'system.network.io' AND unix_milli >= 1735036020000 AND unix_milli < 1735637880000 GROUP BY `os.type`, ts ORDER BY `os.type` ASC, ts) HAVING value > 4) as B ON A.`os.type` = B.`os.type` AND A.`ts` = B.`ts`") + require.NoError(t, err) + + }) } func TestDeltaQueryBuilder(t *testing.T) { diff --git a/pkg/query-service/utils/format.go b/pkg/query-service/utils/format.go index 3533f803a5..3a34d3d80f 100644 --- a/pkg/query-service/utils/format.go +++ b/pkg/query-service/utils/format.go @@ -230,6 +230,22 @@ func ClickHouseFormattedValue(v interface{}) string { } } +func AddBackTickToFormatTag(str string) string { + if strings.Contains(str, ".") { + return "`" + str + "`" + } else { + return str + } +} + +func AddBackTickToFormatTags(inputs ...string) []string { + result := make([]string, len(inputs)) + for i, str := range inputs { + result[i] = AddBackTickToFormatTag(str) + } + return result +} + func getPointerValue(v interface{}) interface{} { switch x := v.(type) { case *uint8: From 4967696da856349257c92d54b5b64bdb8a925d47 Mon Sep 17 00:00:00 2001 From: Vikrant Gupta Date: Sat, 4 Jan 2025 01:28:54 +0530 Subject: [PATCH 13/13] feat: added new cache package for query service (#6733) * feat: added new cache package for query service * feat: handle type checking for inmemory * feat: some copy corrections * feat: added inmemory test cases * chore: some renaming * feat: added redis handling * chore: add redis tests * feat(cache): refactor the code * feat(cache): refactor the code * feat(cache): added defaults for redis config * feat(cache): update makefile to run all tetss * feat(cache): update tests and docs * feat(cache): update tests and docs * feat(cache): handle signoz web flag * feat(cache): handle signoz web flag * feat(cache): handle signoz web flag --- Makefile | 2 +- conf/defaults.yaml | 23 +- ee/query-service/app/server.go | 6 +- ee/query-service/main.go | 11 +- pkg/cache/cache.go | 71 ++++++ pkg/cache/config.go | 49 +++++ pkg/cache/strategy/memory/memory.go | 96 +++++++++ pkg/cache/strategy/memory/memory_test.go | 264 +++++++++++++++++++++++ pkg/cache/strategy/redis/redis.go | 120 +++++++++++ pkg/cache/strategy/redis/redis_test.go | 139 ++++++++++++ pkg/config/config.go | 3 + pkg/config/config_test.go | 43 ++-- pkg/signoz/signoz.go | 37 ++++ 13 files changed, 830 insertions(+), 34 deletions(-) create mode 100644 pkg/cache/cache.go create mode 100644 pkg/cache/config.go create mode 100644 pkg/cache/strategy/memory/memory.go create mode 100644 pkg/cache/strategy/memory/memory_test.go create mode 100644 pkg/cache/strategy/redis/redis.go create mode 100644 pkg/cache/strategy/redis/redis_test.go create mode 100644 pkg/signoz/signoz.go diff --git a/Makefile b/Makefile index c7dd0e4a9a..8e460b3042 100644 --- a/Makefile +++ b/Makefile @@ -190,4 +190,4 @@ check-no-ee-references: fi test: - go test ./pkg/query-service/... + go test ./pkg/... diff --git a/conf/defaults.yaml b/conf/defaults.yaml index dd571e89fb..9239005fdf 100644 --- a/conf/defaults.yaml +++ b/conf/defaults.yaml @@ -8,4 +8,25 @@ web: # The prefix to serve web on prefix: / # The directory containing the static build files. - directory: /etc/signoz/web \ No newline at end of file + directory: /etc/signoz/web + +##################### Cache ##################### +cache: + # specifies the caching provider to use. + provider: memory + # memory: Uses in-memory caching. + memory: + # Time-to-live for cache entries in memory. Specify the duration in ns + ttl: 60000000000 + # The interval at which the cache will be cleaned up + cleanupInterval: + # redis: Uses Redis as the caching backend. + redis: + # The hostname or IP address of the Redis server. + host: localhost + # The port on which the Redis server is running. Default is usually 6379. + port: 6379 + # The password for authenticating with the Redis server, if required. + password: + # The Redis database number to use + db: 0 \ No newline at end of file diff --git a/ee/query-service/app/server.go b/ee/query-service/app/server.go index afd9dad4c5..1b17ca43d0 100644 --- a/ee/query-service/app/server.go +++ b/ee/query-service/app/server.go @@ -32,6 +32,7 @@ import ( baseauth "go.signoz.io/signoz/pkg/query-service/auth" "go.signoz.io/signoz/pkg/query-service/migrate" v3 "go.signoz.io/signoz/pkg/query-service/model/v3" + "go.signoz.io/signoz/pkg/signoz" "go.signoz.io/signoz/pkg/web" licensepkg "go.signoz.io/signoz/ee/query-service/license" @@ -62,6 +63,7 @@ import ( const AppDbEngine = "sqlite" type ServerOptions struct { + SigNoz *signoz.SigNoz PromConfigPath string SkipTopLvlOpsPath string HTTPHostPort string @@ -109,7 +111,7 @@ func (s Server) HealthCheckStatus() chan healthcheck.Status { } // NewServer creates and initializes Server -func NewServer(serverOptions *ServerOptions, web *web.Web) (*Server, error) { +func NewServer(serverOptions *ServerOptions) (*Server, error) { modelDao, err := dao.InitDao("sqlite", baseconst.RELATIONAL_DATASOURCE_PATH) if err != nil { @@ -291,7 +293,7 @@ func NewServer(serverOptions *ServerOptions, web *web.Web) (*Server, error) { usageManager: usageManager, } - httpServer, err := s.createPublicServer(apiHandler, web) + httpServer, err := s.createPublicServer(apiHandler, serverOptions.SigNoz.Web) if err != nil { return nil, err diff --git a/ee/query-service/main.go b/ee/query-service/main.go index 3514376213..f9713c22bd 100644 --- a/ee/query-service/main.go +++ b/ee/query-service/main.go @@ -20,7 +20,7 @@ import ( baseconst "go.signoz.io/signoz/pkg/query-service/constants" "go.signoz.io/signoz/pkg/query-service/migrate" "go.signoz.io/signoz/pkg/query-service/version" - signozweb "go.signoz.io/signoz/pkg/web" + "go.signoz.io/signoz/pkg/signoz" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" @@ -148,12 +148,13 @@ func main() { zap.L().Fatal("Failed to create config", zap.Error(err)) } - web, err := signozweb.New(zap.L(), config.Web) - if err != nil && !skipWebFrontend { - zap.L().Fatal("Failed to create web", zap.Error(err)) + signoz, err := signoz.New(config, skipWebFrontend) + if err != nil { + zap.L().Fatal("Failed to create signoz struct", zap.Error(err)) } serverOptions := &app.ServerOptions{ + SigNoz: signoz, HTTPHostPort: baseconst.HTTPHostPort, PromConfigPath: promConfigPath, SkipTopLvlOpsPath: skipTopLvlOpsPath, @@ -188,7 +189,7 @@ func main() { zap.L().Info("Migration successful") } - server, err := app.NewServer(serverOptions, web) + server, err := app.NewServer(serverOptions) if err != nil { zap.L().Fatal("Failed to create server", zap.Error(err)) } diff --git a/pkg/cache/cache.go b/pkg/cache/cache.go new file mode 100644 index 0000000000..c064883f96 --- /dev/null +++ b/pkg/cache/cache.go @@ -0,0 +1,71 @@ +package cache + +import ( + "context" + "encoding" + "fmt" + "reflect" + "time" +) + +// cacheable entity +type CacheableEntity interface { + encoding.BinaryMarshaler + encoding.BinaryUnmarshaler +} + +func WrapCacheableEntityErrors(rt reflect.Type, caller string) error { + if rt == nil { + return fmt.Errorf("%s: (nil)", caller) + } + + if rt.Kind() != reflect.Pointer { + return fmt.Errorf("%s: (non-pointer \"%s\")", caller, rt.String()) + } + + return fmt.Errorf("%s: (nil \"%s\")", caller, rt.String()) + +} + +// cache status +type RetrieveStatus int + +const ( + RetrieveStatusHit = RetrieveStatus(iota) + RetrieveStatusPartialHit + RetrieveStatusRangeMiss + RetrieveStatusKeyMiss + RetrieveStatusRevalidated + + RetrieveStatusError +) + +func (s RetrieveStatus) String() string { + switch s { + case RetrieveStatusHit: + return "hit" + case RetrieveStatusPartialHit: + return "partial hit" + case RetrieveStatusRangeMiss: + return "range miss" + case RetrieveStatusKeyMiss: + return "key miss" + case RetrieveStatusRevalidated: + return "revalidated" + case RetrieveStatusError: + return "error" + default: + return "unknown" + } +} + +// cache interface +type Cache interface { + Connect(ctx context.Context) error + Store(ctx context.Context, cacheKey string, data CacheableEntity, ttl time.Duration) error + Retrieve(ctx context.Context, cacheKey string, dest CacheableEntity, allowExpired bool) (RetrieveStatus, error) + SetTTL(ctx context.Context, cacheKey string, ttl time.Duration) + Remove(ctx context.Context, cacheKey string) + BulkRemove(ctx context.Context, cacheKeys []string) + Close(ctx context.Context) error +} diff --git a/pkg/cache/config.go b/pkg/cache/config.go new file mode 100644 index 0000000000..213fcaba0e --- /dev/null +++ b/pkg/cache/config.go @@ -0,0 +1,49 @@ +package cache + +import ( + "time" + + go_cache "github.com/patrickmn/go-cache" + "go.signoz.io/signoz/pkg/confmap" +) + +// Config satisfies the confmap.Config interface +var _ confmap.Config = (*Config)(nil) + +type Memory struct { + TTL time.Duration `mapstructure:"ttl"` + CleanupInterval time.Duration `mapstructure:"cleanupInterval"` +} + +type Redis struct { + Host string `mapstructure:"host"` + Port int `mapstructure:"port"` + Password string `mapstructure:"password"` + DB int `mapstructure:"db"` +} + +type Config struct { + Provider string `mapstructure:"provider"` + Memory Memory `mapstructure:"memory"` + Redis Redis `mapstructure:"redis"` +} + +func (c *Config) NewWithDefaults() confmap.Config { + return &Config{ + Provider: "memory", + Memory: Memory{ + TTL: go_cache.NoExpiration, + CleanupInterval: 1 * time.Minute, + }, + Redis: Redis{ + Host: "localhost", + Port: 6379, + Password: "", + DB: 0, + }, + } +} + +func (c *Config) Validate() error { + return nil +} diff --git a/pkg/cache/strategy/memory/memory.go b/pkg/cache/strategy/memory/memory.go new file mode 100644 index 0000000000..5649eecf54 --- /dev/null +++ b/pkg/cache/strategy/memory/memory.go @@ -0,0 +1,96 @@ +package memory + +import ( + "context" + "fmt" + "reflect" + "time" + + go_cache "github.com/patrickmn/go-cache" + _cache "go.signoz.io/signoz/pkg/cache" +) + +type cache struct { + cc *go_cache.Cache +} + +func New(opts *_cache.Memory) *cache { + return &cache{cc: go_cache.New(opts.TTL, opts.CleanupInterval)} +} + +// Connect does nothing +func (c *cache) Connect(_ context.Context) error { + return nil +} + +// Store stores the data in the cache +func (c *cache) Store(_ context.Context, cacheKey string, data _cache.CacheableEntity, ttl time.Duration) error { + // check if the data being passed is a pointer and is not nil + rv := reflect.ValueOf(data) + if rv.Kind() != reflect.Pointer || rv.IsNil() { + return _cache.WrapCacheableEntityErrors(reflect.TypeOf(data), "inmemory") + } + + c.cc.Set(cacheKey, data, ttl) + return nil +} + +// Retrieve retrieves the data from the cache +func (c *cache) Retrieve(_ context.Context, cacheKey string, dest _cache.CacheableEntity, allowExpired bool) (_cache.RetrieveStatus, error) { + // check if the destination being passed is a pointer and is not nil + dstv := reflect.ValueOf(dest) + if dstv.Kind() != reflect.Pointer || dstv.IsNil() { + return _cache.RetrieveStatusError, _cache.WrapCacheableEntityErrors(reflect.TypeOf(dest), "inmemory") + } + + // check if the destination value is settable + if !dstv.Elem().CanSet() { + return _cache.RetrieveStatusError, fmt.Errorf("destination value is not settable, %s", dstv.Elem()) + } + + data, found := c.cc.Get(cacheKey) + if !found { + return _cache.RetrieveStatusKeyMiss, nil + } + + // check the type compatbility between the src and dest + srcv := reflect.ValueOf(data) + if !srcv.Type().AssignableTo(dstv.Type()) { + return _cache.RetrieveStatusError, fmt.Errorf("src type is not assignable to dst type") + } + + // set the value to from src to dest + dstv.Elem().Set(srcv.Elem()) + return _cache.RetrieveStatusHit, nil +} + +// SetTTL sets the TTL for the cache entry +func (c *cache) SetTTL(_ context.Context, cacheKey string, ttl time.Duration) { + item, found := c.cc.Get(cacheKey) + if !found { + return + } + c.cc.Replace(cacheKey, item, ttl) +} + +// Remove removes the cache entry +func (c *cache) Remove(_ context.Context, cacheKey string) { + c.cc.Delete(cacheKey) +} + +// BulkRemove removes the cache entries +func (c *cache) BulkRemove(_ context.Context, cacheKeys []string) { + for _, cacheKey := range cacheKeys { + c.cc.Delete(cacheKey) + } +} + +// Close does nothing +func (c *cache) Close(_ context.Context) error { + return nil +} + +// Configuration returns the cache configuration +func (c *cache) Configuration() *_cache.Memory { + return nil +} diff --git a/pkg/cache/strategy/memory/memory_test.go b/pkg/cache/strategy/memory/memory_test.go new file mode 100644 index 0000000000..d8434e6b2e --- /dev/null +++ b/pkg/cache/strategy/memory/memory_test.go @@ -0,0 +1,264 @@ +package memory + +import ( + "context" + "encoding/json" + "testing" + "time" + + "github.com/stretchr/testify/assert" + _cache "go.signoz.io/signoz/pkg/cache" +) + +// TestNew tests the New function +func TestNew(t *testing.T) { + opts := &_cache.Memory{ + TTL: 10 * time.Second, + CleanupInterval: 10 * time.Second, + } + c := New(opts) + assert.NotNil(t, c) + assert.NotNil(t, c.cc) + assert.NoError(t, c.Connect(context.Background())) +} + +type CacheableEntity struct { + Key string + Value int + Expiry time.Duration +} + +func (ce CacheableEntity) MarshalBinary() ([]byte, error) { + return json.Marshal(ce) +} + +func (ce CacheableEntity) UnmarshalBinary(data []byte) error { + return nil +} + +type DCacheableEntity struct { + Key string + Value int + Expiry time.Duration +} + +func (dce DCacheableEntity) MarshalBinary() ([]byte, error) { + return json.Marshal(dce) +} + +func (dce DCacheableEntity) UnmarshalBinary(data []byte) error { + return nil +} + +// TestStore tests the Store function +// this should fail because of nil pointer error +func TestStoreWithNilPointer(t *testing.T) { + opts := &_cache.Memory{ + TTL: 10 * time.Second, + CleanupInterval: 10 * time.Second, + } + c := New(opts) + var storeCacheableEntity *CacheableEntity + assert.Error(t, c.Store(context.Background(), "key", storeCacheableEntity, 10*time.Second)) +} + +// this should fail because of no pointer error +func TestStoreWithStruct(t *testing.T) { + opts := &_cache.Memory{ + TTL: 10 * time.Second, + CleanupInterval: 10 * time.Second, + } + c := New(opts) + var storeCacheableEntity CacheableEntity + assert.Error(t, c.Store(context.Background(), "key", storeCacheableEntity, 10*time.Second)) +} + +func TestStoreWithNonNilPointer(t *testing.T) { + opts := &_cache.Memory{ + TTL: 10 * time.Second, + CleanupInterval: 10 * time.Second, + } + c := New(opts) + storeCacheableEntity := &CacheableEntity{ + Key: "some-random-key", + Value: 1, + Expiry: time.Microsecond, + } + assert.NoError(t, c.Store(context.Background(), "key", storeCacheableEntity, 10*time.Second)) +} + +// TestRetrieve tests the Retrieve function +func TestRetrieveWithNilPointer(t *testing.T) { + opts := &_cache.Memory{ + TTL: 10 * time.Second, + CleanupInterval: 10 * time.Second, + } + c := New(opts) + storeCacheableEntity := &CacheableEntity{ + Key: "some-random-key", + Value: 1, + Expiry: time.Microsecond, + } + assert.NoError(t, c.Store(context.Background(), "key", storeCacheableEntity, 10*time.Second)) + + var retrieveCacheableEntity *CacheableEntity + + retrieveStatus, err := c.Retrieve(context.Background(), "key", retrieveCacheableEntity, false) + assert.Error(t, err) + assert.Equal(t, retrieveStatus, _cache.RetrieveStatusError) +} + +func TestRetrieveWitNonPointer(t *testing.T) { + opts := &_cache.Memory{ + TTL: 10 * time.Second, + CleanupInterval: 10 * time.Second, + } + c := New(opts) + storeCacheableEntity := &CacheableEntity{ + Key: "some-random-key", + Value: 1, + Expiry: time.Microsecond, + } + assert.NoError(t, c.Store(context.Background(), "key", storeCacheableEntity, 10*time.Second)) + + var retrieveCacheableEntity CacheableEntity + + retrieveStatus, err := c.Retrieve(context.Background(), "key", retrieveCacheableEntity, false) + assert.Error(t, err) + assert.Equal(t, retrieveStatus, _cache.RetrieveStatusError) +} + +func TestRetrieveWithDifferentTypes(t *testing.T) { + opts := &_cache.Memory{ + TTL: 10 * time.Second, + CleanupInterval: 10 * time.Second, + } + c := New(opts) + storeCacheableEntity := &CacheableEntity{ + Key: "some-random-key", + Value: 1, + Expiry: time.Microsecond, + } + assert.NoError(t, c.Store(context.Background(), "key", storeCacheableEntity, 10*time.Second)) + + retrieveCacheableEntity := new(DCacheableEntity) + retrieveStatus, err := c.Retrieve(context.Background(), "key", retrieveCacheableEntity, false) + assert.Error(t, err) + assert.Equal(t, retrieveStatus, _cache.RetrieveStatusError) +} + +func TestRetrieveWithSameTypes(t *testing.T) { + opts := &_cache.Memory{ + TTL: 10 * time.Second, + CleanupInterval: 10 * time.Second, + } + c := New(opts) + storeCacheableEntity := &CacheableEntity{ + Key: "some-random-key", + Value: 1, + Expiry: time.Microsecond, + } + assert.NoError(t, c.Store(context.Background(), "key", storeCacheableEntity, 10*time.Second)) + + retrieveCacheableEntity := new(CacheableEntity) + retrieveStatus, err := c.Retrieve(context.Background(), "key", retrieveCacheableEntity, false) + assert.NoError(t, err) + assert.Equal(t, retrieveStatus, _cache.RetrieveStatusHit) + assert.Equal(t, storeCacheableEntity, retrieveCacheableEntity) +} + +// TestSetTTL tests the SetTTL function +func TestSetTTL(t *testing.T) { + c := New(&_cache.Memory{TTL: 10 * time.Second, CleanupInterval: 1 * time.Second}) + storeCacheableEntity := &CacheableEntity{ + Key: "some-random-key", + Value: 1, + Expiry: time.Microsecond, + } + retrieveCacheableEntity := new(CacheableEntity) + assert.NoError(t, c.Store(context.Background(), "key", storeCacheableEntity, 2*time.Second)) + time.Sleep(3 * time.Second) + retrieveStatus, err := c.Retrieve(context.Background(), "key", retrieveCacheableEntity, false) + assert.NoError(t, err) + assert.Equal(t, retrieveStatus, _cache.RetrieveStatusKeyMiss) + assert.Equal(t, new(CacheableEntity), retrieveCacheableEntity) + + assert.NoError(t, c.Store(context.Background(), "key", storeCacheableEntity, 2*time.Second)) + c.SetTTL(context.Background(), "key", 4*time.Second) + time.Sleep(3 * time.Second) + retrieveStatus, err = c.Retrieve(context.Background(), "key", retrieveCacheableEntity, false) + assert.NoError(t, err) + assert.Equal(t, retrieveStatus, _cache.RetrieveStatusHit) + assert.Equal(t, retrieveCacheableEntity, storeCacheableEntity) +} + +// TestRemove tests the Remove function +func TestRemove(t *testing.T) { + opts := &_cache.Memory{ + TTL: 10 * time.Second, + CleanupInterval: 10 * time.Second, + } + c := New(opts) + storeCacheableEntity := &CacheableEntity{ + Key: "some-random-key", + Value: 1, + Expiry: time.Microsecond, + } + retrieveCacheableEntity := new(CacheableEntity) + assert.NoError(t, c.Store(context.Background(), "key", storeCacheableEntity, 10*time.Second)) + c.Remove(context.Background(), "key") + + retrieveStatus, err := c.Retrieve(context.Background(), "key", retrieveCacheableEntity, false) + assert.NoError(t, err) + assert.Equal(t, retrieveStatus, _cache.RetrieveStatusKeyMiss) + assert.Equal(t, new(CacheableEntity), retrieveCacheableEntity) +} + +// TestBulkRemove tests the BulkRemove function +func TestBulkRemove(t *testing.T) { + opts := &_cache.Memory{ + TTL: 10 * time.Second, + CleanupInterval: 10 * time.Second, + } + c := New(opts) + storeCacheableEntity := &CacheableEntity{ + Key: "some-random-key", + Value: 1, + Expiry: time.Microsecond, + } + retrieveCacheableEntity := new(CacheableEntity) + assert.NoError(t, c.Store(context.Background(), "key1", storeCacheableEntity, 10*time.Second)) + assert.NoError(t, c.Store(context.Background(), "key2", storeCacheableEntity, 10*time.Second)) + c.BulkRemove(context.Background(), []string{"key1", "key2"}) + + retrieveStatus, err := c.Retrieve(context.Background(), "key1", retrieveCacheableEntity, false) + assert.NoError(t, err) + assert.Equal(t, retrieveStatus, _cache.RetrieveStatusKeyMiss) + assert.Equal(t, new(CacheableEntity), retrieveCacheableEntity) + + retrieveStatus, err = c.Retrieve(context.Background(), "key2", retrieveCacheableEntity, false) + assert.NoError(t, err) + assert.Equal(t, retrieveStatus, _cache.RetrieveStatusKeyMiss) + assert.Equal(t, new(CacheableEntity), retrieveCacheableEntity) +} + +// TestCache tests the cache +func TestCache(t *testing.T) { + opts := &_cache.Memory{ + TTL: 10 * time.Second, + CleanupInterval: 10 * time.Second, + } + c := New(opts) + storeCacheableEntity := &CacheableEntity{ + Key: "some-random-key", + Value: 1, + Expiry: time.Microsecond, + } + retrieveCacheableEntity := new(CacheableEntity) + assert.NoError(t, c.Store(context.Background(), "key", storeCacheableEntity, 10*time.Second)) + retrieveStatus, err := c.Retrieve(context.Background(), "key", retrieveCacheableEntity, false) + assert.NoError(t, err) + assert.Equal(t, retrieveStatus, _cache.RetrieveStatusHit) + assert.Equal(t, storeCacheableEntity, retrieveCacheableEntity) + c.Remove(context.Background(), "key") +} diff --git a/pkg/cache/strategy/redis/redis.go b/pkg/cache/strategy/redis/redis.go new file mode 100644 index 0000000000..0309072656 --- /dev/null +++ b/pkg/cache/strategy/redis/redis.go @@ -0,0 +1,120 @@ +package redis + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/go-redis/redis/v8" + _cache "go.signoz.io/signoz/pkg/cache" + "go.uber.org/zap" +) + +type cache struct { + client *redis.Client + opts *_cache.Redis +} + +func New(opts *_cache.Redis) *cache { + return &cache{opts: opts} +} + +// WithClient creates a new cache with the given client +func WithClient(client *redis.Client) *cache { + return &cache{client: client} +} + +// Connect connects to the redis server +func (c *cache) Connect(_ context.Context) error { + c.client = redis.NewClient(&redis.Options{ + Addr: fmt.Sprintf("%s:%d", c.opts.Host, c.opts.Port), + Password: c.opts.Password, + DB: c.opts.DB, + }) + return nil +} + +// Store stores the data in the cache +func (c *cache) Store(ctx context.Context, cacheKey string, data _cache.CacheableEntity, ttl time.Duration) error { + return c.client.Set(ctx, cacheKey, data, ttl).Err() +} + +// Retrieve retrieves the data from the cache +func (c *cache) Retrieve(ctx context.Context, cacheKey string, dest _cache.CacheableEntity, allowExpired bool) (_cache.RetrieveStatus, error) { + err := c.client.Get(ctx, cacheKey).Scan(dest) + if err != nil { + if errors.Is(err, redis.Nil) { + return _cache.RetrieveStatusKeyMiss, nil + } + return _cache.RetrieveStatusError, err + } + return _cache.RetrieveStatusHit, nil +} + +// SetTTL sets the TTL for the cache entry +func (c *cache) SetTTL(ctx context.Context, cacheKey string, ttl time.Duration) { + err := c.client.Expire(ctx, cacheKey, ttl).Err() + if err != nil { + zap.L().Error("error setting TTL for cache key", zap.String("cacheKey", cacheKey), zap.Duration("ttl", ttl), zap.Error(err)) + } +} + +// Remove removes the cache entry +func (c *cache) Remove(ctx context.Context, cacheKey string) { + c.BulkRemove(ctx, []string{cacheKey}) +} + +// BulkRemove removes the cache entries +func (c *cache) BulkRemove(ctx context.Context, cacheKeys []string) { + if err := c.client.Del(ctx, cacheKeys...).Err(); err != nil { + zap.L().Error("error deleting cache keys", zap.Strings("cacheKeys", cacheKeys), zap.Error(err)) + } +} + +// Close closes the connection to the redis server +func (c *cache) Close(_ context.Context) error { + return c.client.Close() +} + +// Ping pings the redis server +func (c *cache) Ping(ctx context.Context) error { + return c.client.Ping(ctx).Err() +} + +// GetClient returns the redis client +func (c *cache) GetClient() *redis.Client { + return c.client +} + +// GetOptions returns the options +func (c *cache) GetOptions() *_cache.Redis { + return c.opts +} + +// GetTTL returns the TTL for the cache entry +func (c *cache) GetTTL(ctx context.Context, cacheKey string) time.Duration { + ttl, err := c.client.TTL(ctx, cacheKey).Result() + if err != nil { + zap.L().Error("error getting TTL for cache key", zap.String("cacheKey", cacheKey), zap.Error(err)) + } + return ttl +} + +// GetKeys returns the keys matching the pattern +func (c *cache) GetKeys(ctx context.Context, pattern string) ([]string, error) { + return c.client.Keys(ctx, pattern).Result() +} + +// GetKeysWithTTL returns the keys matching the pattern with their TTL +func (c *cache) GetKeysWithTTL(ctx context.Context, pattern string) (map[string]time.Duration, error) { + keys, err := c.GetKeys(ctx, pattern) + if err != nil { + return nil, err + } + result := make(map[string]time.Duration) + for _, key := range keys { + result[key] = c.GetTTL(ctx, key) + } + return result, nil +} diff --git a/pkg/cache/strategy/redis/redis_test.go b/pkg/cache/strategy/redis/redis_test.go new file mode 100644 index 0000000000..2b1539f2bd --- /dev/null +++ b/pkg/cache/strategy/redis/redis_test.go @@ -0,0 +1,139 @@ +package redis + +import ( + "context" + "encoding/json" + "testing" + "time" + + "github.com/go-redis/redismock/v8" + "github.com/stretchr/testify/assert" + _cache "go.signoz.io/signoz/pkg/cache" +) + +type CacheableEntity struct { + Key string + Value int + Expiry time.Duration +} + +func (ce *CacheableEntity) MarshalBinary() ([]byte, error) { + return json.Marshal(ce) +} + +func (ce *CacheableEntity) UnmarshalBinary(data []byte) error { + return json.Unmarshal(data, ce) +} + +func TestStore(t *testing.T) { + db, mock := redismock.NewClientMock() + cache := WithClient(db) + storeCacheableEntity := &CacheableEntity{ + Key: "some-random-key", + Value: 1, + Expiry: time.Microsecond, + } + + mock.ExpectSet("key", storeCacheableEntity, 10*time.Second).RedisNil() + cache.Store(context.Background(), "key", storeCacheableEntity, 10*time.Second) + + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unfulfilled expectations: %s", err) + } +} + +func TestRetrieve(t *testing.T) { + db, mock := redismock.NewClientMock() + cache := WithClient(db) + storeCacheableEntity := &CacheableEntity{ + Key: "some-random-key", + Value: 1, + Expiry: time.Microsecond, + } + retrieveCacheableEntity := new(CacheableEntity) + + mock.ExpectSet("key", storeCacheableEntity, 10*time.Second).RedisNil() + cache.Store(context.Background(), "key", storeCacheableEntity, 10*time.Second) + + data, err := storeCacheableEntity.MarshalBinary() + assert.NoError(t, err) + + mock.ExpectGet("key").SetVal(string(data)) + retrieveStatus, err := cache.Retrieve(context.Background(), "key", retrieveCacheableEntity, false) + if err != nil { + t.Errorf("unexpected error: %s", err) + } + + if retrieveStatus != _cache.RetrieveStatusHit { + t.Errorf("expected status %d, got %d", _cache.RetrieveStatusHit, retrieveStatus) + } + + assert.Equal(t, storeCacheableEntity, retrieveCacheableEntity) + + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unfulfilled expectations: %s", err) + } +} + +func TestSetTTL(t *testing.T) { + db, mock := redismock.NewClientMock() + cache := WithClient(db) + storeCacheableEntity := &CacheableEntity{ + Key: "some-random-key", + Value: 1, + Expiry: time.Microsecond, + } + + mock.ExpectSet("key", storeCacheableEntity, 10*time.Second).RedisNil() + cache.Store(context.Background(), "key", storeCacheableEntity, 10*time.Second) + + mock.ExpectExpire("key", 4*time.Second).RedisNil() + cache.SetTTL(context.Background(), "key", 4*time.Second) + + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unfulfilled expectations: %s", err) + } +} + +func TestRemove(t *testing.T) { + db, mock := redismock.NewClientMock() + c := WithClient(db) + storeCacheableEntity := &CacheableEntity{ + Key: "some-random-key", + Value: 1, + Expiry: time.Microsecond, + } + + mock.ExpectSet("key", storeCacheableEntity, 10*time.Second).RedisNil() + c.Store(context.Background(), "key", storeCacheableEntity, 10*time.Second) + + mock.ExpectDel("key").RedisNil() + c.Remove(context.Background(), "key") + + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unfulfilled expectations: %s", err) + } +} + +func TestBulkRemove(t *testing.T) { + db, mock := redismock.NewClientMock() + c := WithClient(db) + storeCacheableEntity := &CacheableEntity{ + Key: "some-random-key", + Value: 1, + Expiry: time.Microsecond, + } + + mock.ExpectSet("key", storeCacheableEntity, 10*time.Second).RedisNil() + c.Store(context.Background(), "key", storeCacheableEntity, 10*time.Second) + + mock.ExpectSet("key2", storeCacheableEntity, 10*time.Second).RedisNil() + c.Store(context.Background(), "key2", storeCacheableEntity, 10*time.Second) + + mock.ExpectDel("key", "key2").RedisNil() + c.BulkRemove(context.Background(), []string{"key", "key2"}) + + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unfulfilled expectations: %s", err) + } +} diff --git a/pkg/config/config.go b/pkg/config/config.go index 6d88cacb61..a1333a89da 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -3,6 +3,7 @@ package config import ( "context" + "go.signoz.io/signoz/pkg/cache" signozconfmap "go.signoz.io/signoz/pkg/confmap" "go.signoz.io/signoz/pkg/instrumentation" "go.signoz.io/signoz/pkg/web" @@ -13,6 +14,7 @@ var ( defaults = map[string]signozconfmap.Config{ "instrumentation": &instrumentation.Config{}, "web": &web.Config{}, + "cache": &cache.Config{}, } ) @@ -20,6 +22,7 @@ var ( type Config struct { Instrumentation instrumentation.Config `mapstructure:"instrumentation"` Web web.Config `mapstructure:"web"` + Cache cache.Config `mapstructure:"cache"` } func New(ctx context.Context, settings ProviderSettings) (*Config, error) { diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go index b3e3007bb4..ac2ce3e762 100644 --- a/pkg/config/config_test.go +++ b/pkg/config/config_test.go @@ -3,22 +3,22 @@ package config import ( "context" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/confmap" - contribsdkconfig "go.opentelemetry.io/contrib/config" + "go.signoz.io/signoz/pkg/cache" "go.signoz.io/signoz/pkg/confmap/provider/signozenvprovider" - "go.signoz.io/signoz/pkg/instrumentation" "go.signoz.io/signoz/pkg/web" ) func TestNewWithSignozEnvProvider(t *testing.T) { - t.Setenv("SIGNOZ__INSTRUMENTATION__LOGS__ENABLED", "true") - t.Setenv("SIGNOZ__INSTRUMENTATION__LOGS__PROCESSORS__BATCH__EXPORTER__OTLP__ENDPOINT", "0.0.0.0:4317") - t.Setenv("SIGNOZ__INSTRUMENTATION__LOGS__PROCESSORS__BATCH__EXPORT_TIMEOUT", "10") + t.Setenv("SIGNOZ__WEB__PREFIX", "/web") t.Setenv("SIGNOZ__WEB__DIRECTORY", "/build") + t.Setenv("SIGNOZ__CACHE__PROVIDER", "redis") + t.Setenv("SIGNOZ__CACHE__REDIS__HOST", "127.0.0.1") config, err := New(context.Background(), ProviderSettings{ ResolverSettings: confmap.ResolverSettings{ @@ -30,31 +30,24 @@ func TestNewWithSignozEnvProvider(t *testing.T) { }) require.NoError(t, err) - i := 10 expected := &Config{ - Instrumentation: instrumentation.Config{ - Logs: instrumentation.LogsConfig{ - Enabled: true, - LoggerProvider: contribsdkconfig.LoggerProvider{ - Processors: []contribsdkconfig.LogRecordProcessor{ - { - Batch: &contribsdkconfig.BatchLogRecordProcessor{ - ExportTimeout: &i, - Exporter: contribsdkconfig.LogRecordExporter{ - OTLP: &contribsdkconfig.OTLP{ - Endpoint: "0.0.0.0:4317", - }, - }, - }, - }, - }, - }, - }, - }, Web: web.Config{ Prefix: "/web", Directory: "/build", }, + Cache: cache.Config{ + Provider: "redis", + Memory: cache.Memory{ + TTL: time.Duration(-1), + CleanupInterval: 1 * time.Minute, + }, + Redis: cache.Redis{ + Host: "127.0.0.1", + Port: 6379, + Password: "", + DB: 0, + }, + }, } assert.Equal(t, expected, config) diff --git a/pkg/signoz/signoz.go b/pkg/signoz/signoz.go new file mode 100644 index 0000000000..a2a42d3073 --- /dev/null +++ b/pkg/signoz/signoz.go @@ -0,0 +1,37 @@ +package signoz + +import ( + "go.signoz.io/signoz/pkg/cache" + "go.signoz.io/signoz/pkg/cache/strategy/memory" + "go.signoz.io/signoz/pkg/cache/strategy/redis" + "go.signoz.io/signoz/pkg/config" + "go.signoz.io/signoz/pkg/web" + "go.uber.org/zap" +) + +type SigNoz struct { + Cache cache.Cache + Web *web.Web +} + +func New(config *config.Config, skipWebFrontend bool) (*SigNoz, error) { + var cache cache.Cache + + // init for the cache + switch config.Cache.Provider { + case "memory": + cache = memory.New(&config.Cache.Memory) + case "redis": + cache = redis.New(&config.Cache.Redis) + } + + web, err := web.New(zap.L(), config.Web) + if err != nil && !skipWebFrontend { + return nil, err + } + + return &SigNoz{ + Cache: cache, + Web: web, + }, nil +}