diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
deleted file mode 100644
index 7f7cec9cda..0000000000
--- a/.github/CODEOWNERS
+++ /dev/null
@@ -1,10 +0,0 @@
-/web/ui @juliusv
-/web/ui/module @juliusv @nexucis
-/storage/remote @cstyan @bwplotka @tomwilkie
-/storage/remote/otlptranslator @aknuds1 @jesusvazquez
-/discovery/kubernetes @brancz
-/tsdb @jesusvazquez
-/promql @roidelapluie
-/cmd/promtool @dgl
-/documentation/prometheus-mixin @metalmatze
-
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
deleted file mode 100644
index 99a9ce05a4..0000000000
--- a/.github/dependabot.yml
+++ /dev/null
@@ -1,33 +0,0 @@
-version: 2
-updates:
- - package-ecosystem: "docker"
- directory: "/"
- schedule:
- interval: "monthly"
- - package-ecosystem: "github-actions"
- directories:
- - "/"
- - "/scripts"
- schedule:
- interval: "monthly"
- - package-ecosystem: "gomod"
- directories:
- - "/"
- - "/documentation/examples/remote_storage"
- - "/internal/tools"
- schedule:
- interval: "monthly"
- groups:
- aws:
- patterns:
- - "github.com/aws/*"
- azure:
- patterns:
- - "github.com/Azure/*"
- k8s.io:
- patterns:
- - "k8s.io/*"
- go.opentelemetry.io:
- patterns:
- - "go.opentelemetry.io/*"
- open-pull-requests-limit: 20
diff --git a/.github/workflows/buf-lint.yml b/.github/workflows/buf-lint.yml
index 4e942f1f3b..7b835b36f8 100644
--- a/.github/workflows/buf-lint.yml
+++ b/.github/workflows/buf-lint.yml
@@ -12,7 +12,7 @@ jobs:
name: lint
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
persist-credentials: false
- uses: bufbuild/buf-setup-action@a47c93e0b1648d5651a065437926377d060baa99 # v1.50.0
diff --git a/.github/workflows/buf.yml b/.github/workflows/buf.yml
index add72cc89c..5de3c133b9 100644
--- a/.github/workflows/buf.yml
+++ b/.github/workflows/buf.yml
@@ -12,7 +12,7 @@ jobs:
runs-on: ubuntu-latest
if: github.repository_owner == 'prometheus'
steps:
- - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
persist-credentials: false
- uses: bufbuild/buf-setup-action@a47c93e0b1648d5651a065437926377d060baa99 # v1.50.0
@@ -25,7 +25,7 @@ jobs:
with:
input: 'prompb'
against: 'https://github.com/prometheus/prometheus.git#branch=main,ref=HEAD~1,subdir=prompb'
- - uses: bufbuild/buf-push-action@a654ff18effe4641ebea4a4ce242c49800728459 # v1.1.1
+ - uses: bufbuild/buf-push-action@a654ff18effe4641ebea4a4ce242c49800728459 # v1.2.0
with:
input: 'prompb'
buf_token: ${{ secrets.BUF_TOKEN }}
diff --git a/.github/workflows/check_release_notes.yml b/.github/workflows/check_release_notes.yml
index b8381aff07..171af5f213 100644
--- a/.github/workflows/check_release_notes.yml
+++ b/.github/workflows/check_release_notes.yml
@@ -20,7 +20,7 @@ jobs:
# Don't run it on dependabot PRs either as humans would take control in case a bump introduces a breaking change.
if: (github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community') && github.event.pull_request.user.login != 'dependabot[bot]'
steps:
- - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- env:
PR_DESCRIPTION: ${{ github.event.pull_request.body }}
run: |
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index ed4cfbf356..8d25176252 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -16,10 +16,10 @@ jobs:
# should also be updated.
image: quay.io/prometheus/golang-builder:1.25-base
steps:
- - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
persist-credentials: false
- - uses: prometheus/promci@443c7fc2397e946bc9f5029e313a9c3441b9b86d # v0.4.7
+ - uses: prometheus/promci@c0916f0a41f13444612a8f0f5e700ea34edd7c19 # v0.5.3
- uses: ./.github/promci/actions/setup_environment
with:
enable_npm: true
@@ -34,10 +34,10 @@ jobs:
container:
image: quay.io/prometheus/golang-builder:1.25-base
steps:
- - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
persist-credentials: false
- - uses: prometheus/promci@443c7fc2397e946bc9f5029e313a9c3441b9b86d # v0.4.7
+ - uses: prometheus/promci@c0916f0a41f13444612a8f0f5e700ea34edd7c19 # v0.5.3
- uses: ./.github/promci/actions/setup_environment
- run: go test --tags=dedupelabels ./...
- run: go test --tags=slicelabels -race ./cmd/prometheus ./model/textparse ./prompb/...
@@ -57,9 +57,9 @@ jobs:
GOEXPERIMENT: synctest
container:
# The go version in this image should be N-1 wrt test_go.
- image: quay.io/prometheus/golang-builder:1.24-base
+ image: quay.io/prometheus/golang-builder:1.25-base
steps:
- - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
persist-credentials: false
- run: make build
@@ -78,10 +78,10 @@ jobs:
image: quay.io/prometheus/golang-builder:1.25-base
steps:
- - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
persist-credentials: false
- - uses: prometheus/promci@443c7fc2397e946bc9f5029e313a9c3441b9b86d # v0.4.7
+ - uses: prometheus/promci@c0916f0a41f13444612a8f0f5e700ea34edd7c19 # v0.5.3
- uses: ./.github/promci/actions/setup_environment
with:
enable_go: false
@@ -97,10 +97,10 @@ jobs:
name: Go tests on Windows
runs-on: windows-latest
steps:
- - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
persist-credentials: false
- - uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
+ - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
with:
go-version: 1.25.x
- run: |
@@ -116,7 +116,7 @@ jobs:
container:
image: quay.io/prometheus/golang-builder:1.25-base
steps:
- - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
persist-credentials: false
- run: go install ./cmd/promtool/.
@@ -143,10 +143,10 @@ jobs:
matrix:
thread: [ 0, 1, 2 ]
steps:
- - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
persist-credentials: false
- - uses: prometheus/promci@443c7fc2397e946bc9f5029e313a9c3441b9b86d # v0.4.7
+ - uses: prometheus/promci@c0916f0a41f13444612a8f0f5e700ea34edd7c19 # v0.5.3
- uses: ./.github/promci/actions/build
with:
promu_opts: "-p linux/amd64 -p windows/amd64 -p linux/arm64 -p darwin/amd64 -p darwin/arm64 -p linux/386"
@@ -170,10 +170,10 @@ jobs:
# Whenever the Go version is updated here, .promu.yml
# should also be updated.
steps:
- - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
persist-credentials: false
- - uses: prometheus/promci@443c7fc2397e946bc9f5029e313a9c3441b9b86d # v0.4.7
+ - uses: prometheus/promci@c0916f0a41f13444612a8f0f5e700ea34edd7c19 # v0.5.3
- uses: ./.github/promci/actions/build
with:
parallelism: 12
@@ -202,30 +202,32 @@ jobs:
if: ${{ contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') }}
run: exit 1
check_generated_parser:
+ # Checks generated parser and UI functions list. Not renaming as it is a required check.
name: Check generated parser
runs-on: ubuntu-latest
+ container:
+ image: quay.io/prometheus/golang-builder:1.25-base
steps:
- name: Checkout repository
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
persist-credentials: false
- - name: Install Go
- uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
+ - uses: prometheus/promci@c0916f0a41f13444612a8f0f5e700ea34edd7c19 # v0.5.3
+ - uses: ./.github/promci/actions/setup_environment
with:
- cache: false
- go-version: 1.25.x
- - name: Run goyacc and check for diff
- run: make install-goyacc check-generated-parser
+ enable_npm: true
+ - run: make install-goyacc check-generated-parser
+ - run: make check-generated-promql-functions
golangci:
name: golangci-lint
runs-on: ubuntu-latest
steps:
- name: Checkout repository
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
persist-credentials: false
- name: Install Go
- uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
+ uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
with:
go-version: 1.25.x
- name: Install snmp_exporter/generator dependencies
@@ -235,18 +237,18 @@ jobs:
id: golangci-lint-version
run: echo "version=$(make print-golangci-lint-version)" >> $GITHUB_OUTPUT
- name: Lint
- uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0
+ uses: golangci/golangci-lint-action@1e7e51e771db61008b38414a730f564565cf7c20 # v9.2.0
with:
args: --verbose
version: ${{ steps.golangci-lint-version.outputs.version }}
- name: Lint with slicelabels
- uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0
+ uses: golangci/golangci-lint-action@1e7e51e771db61008b38414a730f564565cf7c20 # v9.2.0
with:
# goexperiment.synctest to ensure we don't miss files that depend on it.
args: --verbose --build-tags=slicelabels,goexperiment.synctest
version: ${{ steps.golangci-lint-version.outputs.version }}
- name: Lint with dedupelabels
- uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0
+ uses: golangci/golangci-lint-action@1e7e51e771db61008b38414a730f564565cf7c20 # v9.2.0
with:
args: --verbose --build-tags=dedupelabels
version: ${{ steps.golangci-lint-version.outputs.version }}
@@ -265,10 +267,10 @@ jobs:
needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all]
if: github.event_name == 'push' && github.event.ref == 'refs/heads/main'
steps:
- - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
persist-credentials: false
- - uses: prometheus/promci@443c7fc2397e946bc9f5029e313a9c3441b9b86d # v0.4.7
+ - uses: prometheus/promci@c0916f0a41f13444612a8f0f5e700ea34edd7c19 # v0.5.3
- uses: ./.github/promci/actions/publish_main
with:
docker_hub_login: ${{ secrets.docker_hub_login }}
@@ -284,10 +286,10 @@ jobs:
||
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.'))
steps:
- - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
persist-credentials: false
- - uses: prometheus/promci@443c7fc2397e946bc9f5029e313a9c3441b9b86d # v0.4.7
+ - uses: prometheus/promci@c0916f0a41f13444612a8f0f5e700ea34edd7c19 # v0.5.3
- uses: ./.github/promci/actions/publish_release
with:
docker_hub_login: ${{ secrets.docker_hub_login }}
@@ -301,16 +303,16 @@ jobs:
needs: [test_ui, codeql]
steps:
- name: Checkout
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
persist-credentials: false
- - uses: prometheus/promci@443c7fc2397e946bc9f5029e313a9c3441b9b86d # v0.4.7
+ - uses: prometheus/promci@c0916f0a41f13444612a8f0f5e700ea34edd7c19 # v0.5.3
- name: Install nodejs
- uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0
+ uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0
with:
node-version-file: "web/ui/.nvmrc"
registry-url: "https://registry.npmjs.org"
- - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
+ - uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
with:
path: ~/.npm
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
index 2e2143f4c8..8dfa6049f2 100644
--- a/.github/workflows/codeql-analysis.yml
+++ b/.github/workflows/codeql-analysis.yml
@@ -24,17 +24,17 @@ jobs:
steps:
- name: Checkout repository
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
persist-credentials: false
- name: Initialize CodeQL
- uses: github/codeql-action/init@0499de31b99561a6d14a36a5f662c2a54f91beee # v4.31.2
+ uses: github/codeql-action/init@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9
with:
languages: ${{ matrix.language }}
- name: Autobuild
- uses: github/codeql-action/autobuild@0499de31b99561a6d14a36a5f662c2a54f91beee # v4.31.2
+ uses: github/codeql-action/autobuild@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9
- name: Perform CodeQL Analysis
- uses: github/codeql-action/analyze@0499de31b99561a6d14a36a5f662c2a54f91beee # v4.31.2
+ uses: github/codeql-action/analyze@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9
diff --git a/.github/workflows/container_description.yml b/.github/workflows/container_description.yml
index 7de8bb8da7..7b46e9532f 100644
--- a/.github/workflows/container_description.yml
+++ b/.github/workflows/container_description.yml
@@ -18,7 +18,7 @@ jobs:
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
steps:
- name: git checkout
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
persist-credentials: false
- name: Set docker hub repo name
@@ -42,7 +42,7 @@ jobs:
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
steps:
- name: git checkout
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
persist-credentials: false
- name: Set quay.io org name
diff --git a/.github/workflows/fuzzing.yml b/.github/workflows/fuzzing.yml
index 3d3aa82d1c..f9f7abafd6 100644
--- a/.github/workflows/fuzzing.yml
+++ b/.github/workflows/fuzzing.yml
@@ -10,12 +10,12 @@ jobs:
steps:
- name: Build Fuzzers
id: build
- uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@cafd7a0eb8ecb4e007c56897996a9b65c49c972f # master
+ uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@4bf20ff8dfda18ad651583ebca9fb17a7ce1940a # master
with:
oss-fuzz-project-name: "prometheus"
dry-run: false
- name: Run Fuzzers
- uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@cafd7a0eb8ecb4e007c56897996a9b65c49c972f # master
+ uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@4bf20ff8dfda18ad651583ebca9fb17a7ce1940a # master
# Note: Regularly check for updates to the pinned commit hash at:
# https://github.com/google/oss-fuzz/tree/master/infra/cifuzz/actions/run_fuzzers
with:
@@ -23,7 +23,7 @@ jobs:
fuzz-seconds: 600
dry-run: false
- name: Upload Crash
- uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
+ uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
if: failure() && steps.build.outcome == 'success'
with:
name: artifacts
diff --git a/.github/workflows/lock.yml b/.github/workflows/lock.yml
index e7e813e3b6..8f34aad204 100644
--- a/.github/workflows/lock.yml
+++ b/.github/workflows/lock.yml
@@ -16,7 +16,7 @@ jobs:
runs-on: ubuntu-latest
if: github.repository_owner == 'prometheus'
steps:
- - uses: dessant/lock-threads@1bf7ec25051fe7c00bdd17e6a7cf3d7bfb7dc771 # v5.0.1
+ - uses: dessant/lock-threads@7266a7ce5c1df01b1c6db85bf8cd86c737dadbe7 # v6.0.0
with:
process-only: 'issues'
issue-inactive-days: '180'
diff --git a/.github/workflows/repo_sync.yml b/.github/workflows/repo_sync.yml
index fea1422fdc..afc589c6d7 100644
--- a/.github/workflows/repo_sync.yml
+++ b/.github/workflows/repo_sync.yml
@@ -14,7 +14,7 @@ jobs:
container:
image: quay.io/prometheus/golang-builder
steps:
- - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
persist-credentials: false
- run: ./scripts/sync_repo_files.sh
diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml
index 658e140f27..81dcbf5c2a 100644
--- a/.github/workflows/scorecards.yml
+++ b/.github/workflows/scorecards.yml
@@ -21,7 +21,7 @@ jobs:
steps:
- name: "Checkout code"
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # tag=v4.2.2
+ uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # tag=v6.0.1
with:
persist-credentials: false
@@ -37,7 +37,7 @@ jobs:
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
# format to the repository Actions tab.
- name: "Upload artifact"
- uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # tag=v5.0.0
+ uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # tag=v6.0.0
with:
name: SARIF file
path: results.sarif
@@ -45,6 +45,6 @@ jobs:
# Upload the results to GitHub's code scanning dashboard.
- name: "Upload to code-scanning"
- uses: github/codeql-action/upload-sarif@0499de31b99561a6d14a36a5f662c2a54f91beee # tag=v4.31.2
+ uses: github/codeql-action/upload-sarif@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9
with:
sarif_file: results.sarif
diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml
index 86deb94097..947e670fd8 100644
--- a/.github/workflows/stale.yml
+++ b/.github/workflows/stale.yml
@@ -11,7 +11,7 @@ jobs:
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
runs-on: ubuntu-latest
steps:
- - uses: actions/stale@5f858e3efba33a5ca4407a664cc011ad407f2008 # v10.1.0
+ - uses: actions/stale@997185467fa4f803885201cee163a9f38240193d # v10.1.1
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
# opt out of defaults to avoid marking issues as stale and closing them
diff --git a/.gitignore b/.gitignore
index 0d99305f69..f64f775993 100644
--- a/.gitignore
+++ b/.gitignore
@@ -26,6 +26,7 @@ npm_licenses.tar.bz2
/vendor
/.build
+/go.work.sum
/**/node_modules
diff --git a/.golangci.yml b/.golangci.yml
index 22c89a6beb..0c866611e9 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -31,6 +31,7 @@ linters:
- govet
- loggercheck
- misspell
+ - modernize
- nilnesserr
# TODO(bwplotka): Enable once https://github.com/golangci/golangci-lint/issues/3228 is fixed.
# - nolintlint
@@ -117,6 +118,12 @@ linters:
- shadow
- fieldalignment
enable-all: true
+ modernize:
+ disable:
+ # Suggest replacing omitempty with omitzero for struct fields.
+ # Disable this check for now since it introduces too many changes in our existing codebase.
+ # See https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#hdr-Analyzer_omitzero for more details.
+ - omitzero
perfsprint:
# Optimizes even if it requires an int or uint type cast.
int-conversion: true
@@ -175,6 +182,11 @@ linters:
- name: unused-receiver
- name: var-declaration
- name: var-naming
+ # TODO(SuperQ): See: https://github.com/prometheus/prometheus/issues/17766
+ arguments:
+ - []
+ - []
+ - - skip-package-name-checks: true
testifylint:
disable:
- float-compare
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 01da079725..d43bb24720 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,9 +1,87 @@
# Changelog
-## main / unreleased
+## 3.9.1 / 2026-01-07
-* [FEATURE] Templates: Add urlQueryEscape to template functions. #17403
-* [BUGFIX] TSDB: Register `prometheus_tsdb_sample_ooo_delta` metric properly. #17477
+ - [BUGFIX] Agent: fix crash shortly after startup from invalid type of object. #17802
+ - [BUGFIX] Scraping: fix relabel keep/drop not working. #17807
+
+## 3.9.0 / 2026-01-06
+
+- [CHANGE] Native Histograms are no longer experimental! Make the `native-histogram` feature flag a no-op. Use `scrape_native_histograms` config option instead. #17528
+- [CHANGE] API: Add maximum limit of 10,000 sets of statistics to TSDB status endpoint. #17647
+- [FEATURE] API: Add /api/v1/features for clients to understand which features are supported. #17427
+- [FEATURE] Promtool: Add `start_timestamp` field for unit tests. #17636
+- [FEATURE] Promtool: Add `--format seriesjson` option to `tsdb dump` to output just series labels in JSON format. #13409
+- [FEATURE] Add `--storage.tsdb.delay-compact-file.path` flag for better interoperability with Thanos. #17435
+- [FEATURE] UI: Add an option on the query drop-down menu to duplicate that query panel. #17714
+- [ENHANCEMENT]: TSDB: add flag `--storage.tsdb.block-reload-interval` to configure TSDB Block Reload Interval. #16728
+- [ENHANCEMENT] UI: Add graph option to start the chart's Y axis at zero. #17565
+- [ENHANCEMENT] Scraping: Classic protobuf format no longer requires the unit in the metric name. #16834
+- [ENHANCEMENT] PromQL, Rules, SD, Scraping: Add native histograms to complement existing summaries. #17374
+- [ENHANCEMENT] Notifications: Add a histogram `prometheus_notifications_latency_histogram_seconds` to complement the existing summary. #16637
+- [ENHANCEMENT] Remote-write: Add custom scope support for AzureAD authentication. #17483
+- [ENHANCEMENT] SD: add a `config` label with job name for most `prometheus_sd_refresh` metrics. #17138
+- [ENHANCEMENT] TSDB: New histogram `prometheus_tsdb_sample_ooo_delta`, the distribution of out-of-order samples in seconds. Collected for all samples, accepted or not. #17477
+- [ENHANCEMENT] Remote-read: Validate histograms received via remote-read. #17561
+- [PERF] TSDB: Small optimizations to postings index. #17439
+- [PERF] Scraping: Speed up relabelling of series. #17530
+- [PERF] PromQL: Small optimisations in binary operators. #17524, #17519.
+- [BUGFIX] UI: PromQL autocomplete now shows the correct type and HELP text for OpenMetrics counters whose samples end in `_total`. #17682
+- [BUGFIX] UI: Fixed codemirror-promql incorrectly showing label completion suggestions after the closing curly brace of a vector selector. #17602
+- [BUGFIX] UI: Query editor no longer suggests a duration unit if one is already present after a number. #17605
+- [BUGFIX] PromQL: Fix some "vector cannot contain metrics with the same labelset" errors when experimental delayed name removal is enabled. #17678
+- [BUGFIX] PromQL: Fix possible corruption of PromQL text if the query had an empty `ignoring()` and non-empty grouping. #17643
+- [BUGFIX] PromQL: Fix resets/changes to return empty results for anchored selectors when all samples are outside the range. #17479
+- [BUGFIX] PromQL: Check more consistently for many-to-one matching in filter binary operators. #17668
+- [BUGFIX] PromQL: Fix collision in unary negation with non-overlapping series. #17708
+- [BUGFIX] PromQL: Fix collision in label_join and label_replace with non-overlapping series. #17703
+- [BUGFIX] PromQL: Fix bug with inconsistent results for queries with OR expression when experimental delayed name removal is enabled. #17161
+- [BUGFIX] PromQL: Ensure that `rate`/`increase`/`delta` of histograms results in a gauge histogram. #17608
+- [BUGFIX] PromQL: Do not panic while iterating over invalid histograms. #17559
+- [BUGFIX] TSDB: Reject chunk files whose encoded chunk length overflows int. #17533
+- [BUGFIX] TSDB: Do not panic during resolution reduction of invalid histograms. #17561
+- [BUGFIX] Remote-write Receive: Avoid duplicate labels when experimental type-and-unit-label feature is enabled. #17546
+- [BUGFIX] OTLP Receiver: Only write metadata to disk when experimental metadata-wal-records feature is enabled. #17472
+
+## 3.8.1 / 2025-12-16
+
+* [BUGFIX] remote: Fix Remote Write receiver, so it does not send wrong response headers for v1 flow and cause Prometheus senders to emit false partial error log and metrics. #17683
+
+## 3.8.0 / 2025-11-28
+
+* [CHANGE] Remote-write: Update receiving to [2.0-rc.4 spec](https://github.com/prometheus/docs/blob/60c24e450010df38cfcb4f65df874f6f9b26dbcb/docs/specs/prw/remote_write_spec_2_0.md). "created timestamp" (CT) is now called "start timestamp" (ST). #17411
+* [CHANGE] TSDB: Native Histogram Custom Bounds with a NaN threshold are now rejected. #17287
+* [FEATURE] OAuth2: support jwt-bearer grant-type (RFC7523 3.1). #17592
+* [FEATURE] Dockerfile: Add OpenContainers spec labels to Dockerfile. #16483
+* [FEATURE] SD: Add unified AWS service discovery for ec2, lightsail and ecs services. #17406
+* [FEATURE] Native histograms are now a stable, but optional feature, use the `scrape_native_histogram` config setting. #17232 #17315
+* [FEATURE] UI: Support anchored and smoothed keyword in promql editor. #17239
+* [FEATURE] UI: Show detailed relabeling steps for each discovered target. #17337
+* [FEATURE] Alerting: Add urlQueryEscape to template functions. #17403
+* [FEATURE] Promtool: Add Remote-Write 2.0 support to `promtool push metrics` via the `--protobuf_message` flag. #17417
+* [ENHANCEMENT] Clarify the docs about handling negative native histograms. #17249
+* [ENHANCEMENT] Mixin: Add static UID to the remote-write dashboard. #17256
+* [ENHANCEMENT] PromQL: Reconcile mismatched NHCB bounds in `Add` and `Sub`. #17278
+* [ENHANCEMENT] Alerting: Add "unknown" state for alerting rules that haven't been evaluated yet. #17282
+* [ENHANCEMENT] Scrape: Allow simultaneous use of classic histogram → NHCB conversion and zero-timestamp ingestion. #17305
+* [ENHANCEMENT] UI: Add smoothed/anchored in explain. #17334
+* [ENHANCEMENT] OTLP: De-duplicate any `target_info` samples with the same timestamp for the same series. #17400
+* [ENHANCEMENT] Document `use_fips_sts_endpoint` in `sigv4` config sections. #17304
+* [ENHANCEMENT] Document Prometheus Agent. #14519
+* [PERF] PromQL: Speed up parsing of variadic functions. #17316
+* [PERF] UI: Speed up alerts/rules/... pages by not rendering collapsed content. #17485
+* [PERF] UI: Performance improvement when getting label name and values in promql editor. #17194
+* [PERF] UI: Speed up /alerts for many firing alerts via virtual scrolling. #17254
+* [BUGFIX] PromQL: Fix slice indexing bug in info function on churning series. #17199
+* [BUGFIX] API: Reduce lock contention on `/api/v1/targets`. #17306
+* [BUGFIX] PromQL: Consistent handling of gauge vs. counter histograms in aggregations. #17312
+* [BUGFIX] TSDB: Allow NHCB with -Inf as the first custom value. #17320
+* [BUGFIX] UI: Fix duplicate loading of data from the API speed up rendering of some pages. #17357
+* [BUGFIX] Old UI: Fix createExpressionLink to correctly build /graph URLs so links from Alerts/Rules work again. #17365
+* [BUGFIX] PromQL: Avoid panic when parsing malformed `info` call. #17379
+* [BUGFIX] PromQL: Include histograms when enforcing sample_limit. #17390
+* [BUGFIX] Config: Fix panic if TLS CA file is absent. #17418
+* [BUGFIX] PromQL: Fix `histogram_fraction` for classic histograms and NHCB if lower bound is in the first bucket. #17424
## 3.7.3 / 2025-10-29
@@ -201,7 +279,7 @@
## 3.2.1 / 2025-02-25
-* [BUGFIX] Don't send Accept` header `escape=allow-utf-8` when `metric_name_validation_scheme: legacy` is configured. #16061
+* [BUGFIX] Don't send `Accept` header `escape=allow-utf-8` when `metric_name_validation_scheme: legacy` is configured. #16061
## 3.2.0 / 2025-02-17
@@ -212,10 +290,10 @@
* [ENHANCEMENT] scrape: Add metadata for automatic metrics to WAL for `metadata-wal-records` feature. #15837
* [ENHANCEMENT] promtool: Support linting of scrape interval, through lint option `too-long-scrape-interval`. #15719
* [ENHANCEMENT] promtool: Add --ignore-unknown-fields option. #15706
-* [ENHANCEMENT] ui: Make "hide empty rules" and hide empty rules" persistent #15807
+* [ENHANCEMENT] ui: Make "hide empty rules" and "hide empty rules" persistent #15807
* [ENHANCEMENT] web/api: Add a limit parameter to `/query` and `/query_range`. #15552
* [ENHANCEMENT] api: Add fields Node and ServerTime to `/status`. #15784
-* [PERF] Scraping: defer computing labels for dropped targets until they are needed by the UI. #15261
+* [PERF] Scraping: defer computing labels for dropped targets until they are needed by the UI. #15261
* [BUGFIX] remotewrite2: Fix invalid metadata bug for metrics without metadata. #15829
* [BUGFIX] remotewrite2: Fix the unit field propagation. #15825
* [BUGFIX] scrape: Fix WAL metadata for histograms and summaries. #15832
@@ -232,9 +310,9 @@
* [ENHANCEMENT] TSDB: Improve calculation of space used by labels. #13880
* [ENHANCEMENT] Rules: new metric rule_group_last_rule_duration_sum_seconds. #15672
* [ENHANCEMENT] Observability: Export 'go_sync_mutex_wait_total_seconds_total' metric. #15339
- * [ENHANCEMEN] Remote-Write: optionally use a DNS resolver that picks a random IP. #15329
+ * [ENHANCEMENT] Remote-Write: optionally use a DNS resolver that picks a random IP. #15329
* [PERF] Optimize `l=~".+"` matcher. #15474, #15684
- * [PERF] TSDB: Cache all symbols for compaction . #15455
+ * [PERF] TSDB: Cache all symbols for compaction. #15455
* [PERF] TSDB: MemPostings: keep a map of label values slices. #15426
* [PERF] Remote-Write: Remove interning hook. #15456
* [PERF] Scrape: optimize string manipulation for experimental native histograms with custom buckets. #15453
diff --git a/CODEOWNERS b/CODEOWNERS
index c5b7f25349..f28cdbf832 100644
--- a/CODEOWNERS
+++ b/CODEOWNERS
@@ -1,10 +1,26 @@
-# Prometheus team members are members of the "default maintainers" github team.
-# They are code owners by default for the whole repo.
-* @prometheus/default-maintainers
+#
+# Please keep this file in sync with the MAINTAINERS.md file!
+#
-# Example adding a dedicated maintainer for AWS SD, and also "default
-# maintainers" so that they do not need to bypass codeowners check to merge
-# something.
-# Example comes from
+# Subsystems.
+/Makefile @simonpasquier @SuperQ
+/cmd/promtool @dgl
+/documentation/prometheus-mixin @metalmatze
+/model/histogram @beorn7 @krajorama
+/web/ui @juliusv
+/web/ui/module @juliusv @nexucis
+/promql @roidelapluie
+/storage/remote @cstyan @bwplotka @tomwilkie @npazosmendez @alexgreenbank
+/storage/remote/otlptranslator @aknuds1 @jesusvazquez @ArthurSens
+/tsdb @jesusvazquez @codesome @bwplotka @krajorama
+
+# Service discovery.
+/discovery/kubernetes @brancz
+/discovery/stackit @jkroepke
+# Pending
# https://github.com/prometheus/prometheus/pull/17105#issuecomment-3248209452
-# /discovery/aws/ @matt-gp @prometheus/default-maintainers
+# /discovery/aws/ @matt-gp @sysadmind
+# https://github.com/prometheus/prometheus/pull/15212#issuecomment-3575225179
+# /discovery/aliyun @KeyOfSpectator
+# https://github.com/prometheus/prometheus/pull/14108#issuecomment-2639515421
+# /discovery/nomad @jaloren @jrasell
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 9b1b286ccf..cfb346e4d0 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -14,7 +14,7 @@ Prometheus uses GitHub to manage reviews of pull requests.
of inspiration. Also please see our [non-goals issue](https://github.com/prometheus/docs/issues/149) on areas that the Prometheus community doesn't plan to work on.
* Relevant coding style guidelines are the [Go Code Review
- Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
+ Comments](https://go.dev/wiki/CodeReviewComments)
and the _Formatting and style_ section of Peter Bourgon's [Go: Best
Practices for Production
Environments](https://peter.bourgon.org/go-in-production/#formatting-and-style).
@@ -78,8 +78,7 @@ go get example.com/some/module/pkg@vX.Y.Z
Tidy up the `go.mod` and `go.sum` files:
```bash
-# The GO111MODULE variable can be omitted when the code isn't located in GOPATH.
-GO111MODULE=on go mod tidy
+go mod tidy
```
You have to commit the changes to `go.mod` and `go.sum` before submitting the pull request.
diff --git a/MAINTAINERS.md b/MAINTAINERS.md
index d36f82ca61..f23c7fbd63 100644
--- a/MAINTAINERS.md
+++ b/MAINTAINERS.md
@@ -1,5 +1,7 @@
# Maintainers
+## Please keep this file in sync with the CODEOWNERS file!
+
General maintainers:
* Bryan Boreham (bjboreham@gmail.com / @bboreham)
* Ayoub Mrini (ayoubmrini424@gmail.com / @machine424)
@@ -16,12 +18,12 @@ Maintainers for specific parts of the codebase:
* `model/histogram` and other code related to native histograms: Björn Rabenstein ( / @beorn7),
George Krajcsovits ( / @krajorama)
* `storage`
- * `remote`: Callum Styan ( / @cstyan), Bartłomiej Płotka ( / @bwplotka), Tom Wilkie (tom.wilkie@gmail.com / @tomwilkie), Nicolás Pazos ( / @npazosmendez), Alex Greenbank ( / @alexgreenbank)
+ * `remote`: Callum Styan ( / @cstyan), Bartłomiej Płotka ( / @bwplotka), Tom Wilkie (tom.wilkie@gmail.com / @tomwilkie), Nicolás Pazos ( / @npazosmendez), Alex Greenbank ( / @alexgreenbank)
* `otlptranslator`: Arthur Silva Sens ( / @ArthurSens), Arve Knudsen ( / @aknuds1), Jesús Vázquez ( / @jesusvazquez)
-* `tsdb`: Ganesh Vernekar ( / @codesome), Bartłomiej Płotka ( / @bwplotka), Jesús Vázquez ( / @jesusvazquez)
+* `tsdb`: Ganesh Vernekar ( / @codesome), Bartłomiej Płotka ( / @bwplotka), Jesús Vázquez ( / @jesusvazquez), George Krajcsovits ( / @krajorama)
* `web`
* `ui`: Julius Volz ( / @juliusv)
- * `module`: Augustin Husson ( @nexucis)
+ * `module`: Augustin Husson ( / @nexucis)
* `Makefile` and related build configuration: Simon Pasquier ( / @simonpasquier), Ben Kochie ( / @SuperQ)
For the sake of brevity, not all subtrees are explicitly listed. Due to the
diff --git a/Makefile b/Makefile
index 43020998ef..8c15ceb2e9 100644
--- a/Makefile
+++ b/Makefile
@@ -1,4 +1,4 @@
-# Copyright 2018 The Prometheus Authors
+# Copyright The Prometheus Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
@@ -79,6 +79,20 @@ ui-lint:
# new Mantine-based UI is fully integrated and the old app can be removed.
cd $(UI_PATH)/react-app && npm run lint
+.PHONY: generate-promql-functions
+generate-promql-functions: ui-install
+ @echo ">> generating PromQL function signatures"
+ @cd $(UI_PATH)/mantine-ui/src/promql/tools && $(GO) run ./gen_functions_list > ../functionSignatures.ts
+ @echo ">> generating PromQL function documentation"
+ @cd $(UI_PATH)/mantine-ui/src/promql/tools && $(GO) run ./gen_functions_docs $(CURDIR)/docs/querying/functions.md > ../functionDocs.tsx
+ @echo ">> formatting generated files"
+ @cd $(UI_PATH)/mantine-ui && npx prettier --write --print-width 120 src/promql/functionSignatures.ts src/promql/functionDocs.tsx
+
+.PHONY: check-generated-promql-functions
+check-generated-promql-functions: generate-promql-functions
+ @echo ">> checking generated PromQL functions"
+ @git diff --exit-code -- $(UI_PATH)/mantine-ui/src/promql/functionSignatures.ts $(UI_PATH)/mantine-ui/src/promql/functionDocs.tsx || (echo "Generated PromQL function files are out of date. Please run 'make generate-promql-functions' and commit the changes." && false)
+
.PHONY: assets
ifndef SKIP_UI_BUILD
assets: check-node-version ui-install ui-build
@@ -152,15 +166,8 @@ tarball: npm_licenses common-tarball
.PHONY: docker
docker: npm_licenses common-docker
-plugins/plugins.go: plugins.yml plugins/generate.go
- @echo ">> creating plugins list"
- $(GO) generate -tags plugins ./plugins
-
-.PHONY: plugins
-plugins: plugins/plugins.go
-
.PHONY: build
-build: assets npm_licenses assets-compress plugins common-build
+build: assets npm_licenses assets-compress common-build
.PHONY: bench_tsdb
bench_tsdb: $(PROMU)
@@ -184,14 +191,26 @@ check-go-mod-version:
@echo ">> checking go.mod version matching"
@./scripts/check-go-mod-version.sh
+.PHONY: update-features-testdata
+update-features-testdata:
+ @echo ">> updating features testdata"
+ @$(GO) test ./cmd/prometheus -run TestFeaturesAPI -update-features
+
+GO_SUBMODULE_DIRS := documentation/examples/remote_storage internal/tools web/ui/mantine-ui/src/promql/tools
+
.PHONY: update-all-go-deps
-update-all-go-deps:
- @$(MAKE) update-go-deps
- @echo ">> updating Go dependencies in ./documentation/examples/remote_storage/"
- @cd ./documentation/examples/remote_storage/ && for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \
+update-all-go-deps: update-go-deps
+ $(foreach dir,$(GO_SUBMODULE_DIRS),$(MAKE) update-go-deps-in-dir DIR=$(dir);)
+ @echo ">> syncing Go workspace"
+ @$(GO) work sync
+
+.PHONY: update-go-deps-in-dir
+update-go-deps-in-dir:
+ @echo ">> updating Go dependencies in ./$(DIR)/"
+ @cd ./$(DIR) && for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \
$(GO) get $$m; \
done
- @cd ./documentation/examples/remote_storage/ && $(GO) mod tidy
+ @cd ./$(DIR) && $(GO) mod tidy
.PHONY: check-node-version
check-node-version:
diff --git a/Makefile.common b/Makefile.common
index 3ed717b460..7beae6e58f 100644
--- a/Makefile.common
+++ b/Makefile.common
@@ -1,4 +1,4 @@
-# Copyright 2018 The Prometheus Authors
+# Copyright The Prometheus Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
@@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
-GOLANGCI_LINT_VERSION ?= v2.6.0
+GOLANGCI_LINT_VERSION ?= v2.7.2
GOLANGCI_FMT_OPTS ?=
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
# windows isn't included here because of the path separator being different.
@@ -129,6 +129,12 @@ common-check_license:
echo "license header checking failed:"; echo "$${licRes}"; \
exit 1; \
fi
+ @echo ">> checking for copyright years 2026 or later"
+ @futureYearRes=$$(git grep -E 'Copyright (202[6-9]|20[3-9][0-9])' -- '*.go' ':!:vendor/*' || true); \
+ if [ -n "$${futureYearRes}" ]; then \
+ echo "Files with copyright year 2026 or later found (should use 'Copyright The Prometheus Authors'):"; echo "$${futureYearRes}"; \
+ exit 1; \
+ fi
.PHONY: common-deps
common-deps:
diff --git a/README.md b/README.md
index 1743c5a4b8..7b04a51cee 100644
--- a/README.md
+++ b/README.md
@@ -69,7 +69,7 @@ To build Prometheus from source code, You need:
* Go: Version specified in [go.mod](./go.mod) or greater.
* NodeJS: Version specified in [.nvmrc](./web/ui/.nvmrc) or greater.
-* npm: Version 8 or greater (check with `npm --version` and [here](https://www.npmjs.com/)).
+* npm: Version 10 or greater (check with `npm --version` and [here](https://www.npmjs.com/)).
Start by cloning the repository:
@@ -82,15 +82,15 @@ You can use the `go` tool to build and install the `prometheus`
and `promtool` binaries into your `GOPATH`:
```bash
-GO111MODULE=on go install github.com/prometheus/prometheus/cmd/...
+go install github.com/prometheus/prometheus/cmd/...
prometheus --config.file=your_config.yml
```
*However*, when using `go install` to build Prometheus, Prometheus will expect to be able to
-read its web assets from local filesystem directories under `web/ui/static` and
-`web/ui/templates`. In order for these assets to be found, you will have to run Prometheus
-from the root of the cloned repository. Note also that these directories do not include the
-React UI unless it has been built explicitly using `make assets` or `make build`.
+read its web assets from local filesystem directories under `web/ui/static`. In order for
+these assets to be found, you will have to run Prometheus from the root of the cloned
+repository. Note also that this directory does not include the React UI unless it has been
+built explicitly using `make assets` or `make build`.
An example of the above configuration file can be found [here.](https://github.com/prometheus/prometheus/blob/main/documentation/examples/prometheus.yml)
@@ -113,16 +113,31 @@ The Makefile provides several targets:
### Service discovery plugins
-Prometheus is bundled with many service discovery plugins.
-When building Prometheus from source, you can edit the [plugins.yml](./plugins.yml)
-file to disable some service discoveries. The file is a yaml-formatted list of go
-import path that will be built into the Prometheus binary.
+Prometheus is bundled with many service discovery plugins. You can customize
+which service discoveries are included in your build using Go build tags.
-After you have changed the file, you
-need to run `make build` again.
+To exclude service discoveries when building with `make build`, add the desired
+tags to the `.promu.yml` file under `build.tags.all`:
-If you are using another method to compile Prometheus, `make plugins` will
-generate the plugins file accordingly.
+```yaml
+build:
+ tags:
+ all:
+ - netgo
+ - builtinassets
+ - remove_all_sd # Exclude all optional SDs
+ - enable_kubernetes_sd # Re-enable only kubernetes
+```
+
+Then run `make build` as usual. Alternatively, when using `go build` directly:
+
+```bash
+go build -tags "remove_all_sd,enable_kubernetes_sd" ./cmd/prometheus
+```
+
+Available build tags:
+* `remove_all_sd` - Exclude all optional service discoveries (keeps file_sd, static_sd, and http_sd)
+* `enable__sd` - Re-enable a specific SD when using `remove_all_sd`
If you add out-of-tree plugins, which we do not endorse at the moment,
additional steps might be needed to adjust the `go.mod` and `go.sum` files. As
diff --git a/RELEASE.md b/RELEASE.md
index 952f9f010d..c7375b35aa 100644
--- a/RELEASE.md
+++ b/RELEASE.md
@@ -18,7 +18,8 @@ Please see [the v2.55 RELEASE.md](https://github.com/prometheus/prometheus/blob/
| v3.6 | 2025-08-01 | Ayoub Mrini (Github: @machine424) |
| v3.7 | 2025-09-25 | Arthur Sens and George Krajcsovits (Github: @ArthurSens and @krajorama)|
| v3.8 | 2025-11-06 | Jan Fajerski (GitHub: @jan--f) |
-| v3.9 | 2025-12-18 | **volunteer welcome** |
+| v3.9 | 2025-12-18 | Bryan Boreham (GitHub: @bboreham) |
+| v3.10 | 2026-02-05 | **volunteer welcome** |
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.
diff --git a/VERSION b/VERSION
index c1e43e6d45..6bd10744ae 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-3.7.3
+3.9.1
diff --git a/cmd/prometheus/features_test.go b/cmd/prometheus/features_test.go
new file mode 100644
index 0000000000..5907c87247
--- /dev/null
+++ b/cmd/prometheus/features_test.go
@@ -0,0 +1,125 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "bytes"
+ "encoding/json"
+ "flag"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "path/filepath"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/prometheus/prometheus/util/testutil"
+)
+
+var updateFeatures = flag.Bool("update-features", false, "update features.json golden file")
+
+func TestFeaturesAPI(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping test in short mode.")
+ }
+ t.Parallel()
+
+ tmpDir := t.TempDir()
+ configFile := filepath.Join(tmpDir, "prometheus.yml")
+ require.NoError(t, os.WriteFile(configFile, []byte{}, 0o644))
+
+ port := testutil.RandomUnprivilegedPort(t)
+ prom := prometheusCommandWithLogging(
+ t,
+ configFile,
+ port,
+ fmt.Sprintf("--storage.tsdb.path=%s", tmpDir),
+ )
+ require.NoError(t, prom.Start())
+
+ baseURL := fmt.Sprintf("http://127.0.0.1:%d", port)
+
+ // Wait for Prometheus to be ready.
+ require.Eventually(t, func() bool {
+ resp, err := http.Get(baseURL + "/-/ready")
+ if err != nil {
+ return false
+ }
+ defer resp.Body.Close()
+ return resp.StatusCode == http.StatusOK
+ }, 10*time.Second, 100*time.Millisecond, "Prometheus didn't become ready in time")
+
+ // Fetch features from the API.
+ resp, err := http.Get(baseURL + "/api/v1/features")
+ require.NoError(t, err)
+ defer resp.Body.Close()
+ require.Equal(t, http.StatusOK, resp.StatusCode)
+
+ body, err := io.ReadAll(resp.Body)
+ require.NoError(t, err)
+
+ // Parse API response.
+ var apiResponse struct {
+ Status string `json:"status"`
+ Data map[string]map[string]bool `json:"data"`
+ }
+ require.NoError(t, json.Unmarshal(body, &apiResponse))
+ require.Equal(t, "success", apiResponse.Status)
+
+ goldenPath := filepath.Join("testdata", "features.json")
+
+ // If update flag is set, write the current features to the golden file.
+ if *updateFeatures {
+ var buf bytes.Buffer
+ encoder := json.NewEncoder(&buf)
+ encoder.SetEscapeHTML(false)
+ encoder.SetIndent("", " ")
+ require.NoError(t, encoder.Encode(apiResponse.Data))
+ // Ensure testdata directory exists.
+ require.NoError(t, os.MkdirAll(filepath.Dir(goldenPath), 0o755))
+ require.NoError(t, os.WriteFile(goldenPath, buf.Bytes(), 0o644))
+ t.Logf("Updated golden file: %s", goldenPath)
+ return
+ }
+
+ // Load golden file.
+ goldenData, err := os.ReadFile(goldenPath)
+ require.NoError(t, err, "Failed to read golden file %s. Run 'make update-features-testdata' to generate it.", goldenPath)
+
+ var expectedFeatures map[string]map[string]bool
+ require.NoError(t, json.Unmarshal(goldenData, &expectedFeatures))
+
+ // The labels implementation depends on build tags (stringlabels, slicelabels, or dedupelabels).
+ // We need to update the expected features to match the current build.
+ if prometheusFeatures, ok := expectedFeatures["prometheus"]; ok {
+ // Remove all label implementation features from expected.
+ delete(prometheusFeatures, "stringlabels")
+ delete(prometheusFeatures, "slicelabels")
+ delete(prometheusFeatures, "dedupelabels")
+ // Add the current implementation.
+ if actualPrometheus, ok := apiResponse.Data["prometheus"]; ok {
+ for _, impl := range []string{"stringlabels", "slicelabels", "dedupelabels"} {
+ if actualPrometheus[impl] {
+ prometheusFeatures[impl] = true
+ }
+ }
+ }
+ }
+
+ // Compare the features data with the golden file.
+ require.Equal(t, expectedFeatures, apiResponse.Data, "Features mismatch. Run 'make update-features-testdata' to update the golden file.")
+}
diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go
index 12905ef92e..0fa48c72b9 100644
--- a/cmd/prometheus/main.go
+++ b/cmd/prometheus/main.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -16,6 +16,7 @@ package main
import (
"context"
+ "encoding/json"
"errors"
"fmt"
"log/slog"
@@ -72,11 +73,13 @@ import (
"github.com/prometheus/prometheus/scrape"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/storage/remote"
+ "github.com/prometheus/prometheus/template"
"github.com/prometheus/prometheus/tracing"
"github.com/prometheus/prometheus/tsdb"
"github.com/prometheus/prometheus/tsdb/agent"
"github.com/prometheus/prometheus/util/compression"
"github.com/prometheus/prometheus/util/documentcli"
+ "github.com/prometheus/prometheus/util/features"
"github.com/prometheus/prometheus/util/logging"
"github.com/prometheus/prometheus/util/notifications"
prom_runtime "github.com/prometheus/prometheus/util/runtime"
@@ -230,11 +233,14 @@ func (c *flagConfig) setFeatureListOptions(logger *slog.Logger) error {
c.tsdb.EnableMemorySnapshotOnShutdown = true
logger.Info("Experimental memory snapshot on shutdown enabled")
case "extra-scrape-metrics":
- c.scrape.ExtraMetrics = true
- logger.Info("Experimental additional scrape metrics enabled")
+ t := true
+ config.DefaultConfig.GlobalConfig.ExtraScrapeMetrics = &t
+ config.DefaultGlobalConfig.ExtraScrapeMetrics = &t
+ logger.Warn("This option for --enable-feature is being phased out. It currently changes the default for the extra_scrape_metrics config setting to true, but will become a no-op in a future version. Stop using this option and set extra_scrape_metrics in the config instead.", "option", o)
case "metadata-wal-records":
c.scrape.AppendMetadata = true
c.web.AppendMetadata = true
+ features.Enable(features.TSDB, "metadata_wal_records")
logger.Info("Experimental metadata records in WAL enabled")
case "promql-per-step-stats":
c.enablePerStepStats = true
@@ -261,6 +267,7 @@ func (c *flagConfig) setFeatureListOptions(logger *slog.Logger) error {
case "created-timestamp-zero-ingestion":
c.scrape.EnableStartTimestampZeroIngestion = true
c.web.STZeroIngestionEnabled = true
+ c.agent.EnableSTAsZeroSample = true
// Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers.
config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
@@ -341,10 +348,14 @@ func main() {
Registerer: prometheus.DefaultRegisterer,
},
web: web.Options{
- Registerer: prometheus.DefaultRegisterer,
- Gatherer: prometheus.DefaultGatherer,
+ Registerer: prometheus.DefaultRegisterer,
+ Gatherer: prometheus.DefaultGatherer,
+ FeatureRegistry: features.DefaultRegistry,
},
promslogConfig: promslog.Config{},
+ scrape: scrape.Options{
+ FeatureRegistry: features.DefaultRegistry,
+ },
}
a := kingpin.New(filepath.Base(os.Args[0]), "The Prometheus monitoring server").UsageWriter(os.Stdout)
@@ -456,8 +467,9 @@ func main() {
Default("true").Hidden().BoolVar(&cfg.tsdb.EnableOverlappingCompaction)
var (
- tsdbWALCompression bool
- tsdbWALCompressionType string
+ tsdbWALCompression bool
+ tsdbWALCompressionType string
+ tsdbDelayCompactFilePath string
)
serverOnlyFlag(a, "storage.tsdb.wal-compression", "Compress the tsdb WAL. If false, the --storage.tsdb.wal-compression-type flag is ignored.").
Hidden().Default("true").BoolVar(&tsdbWALCompression)
@@ -474,6 +486,12 @@ func main() {
serverOnlyFlag(a, "storage.tsdb.delayed-compaction.max-percent", "Sets the upper limit for the random compaction delay, specified as a percentage of the head chunk range. 100 means the compaction can be delayed by up to the entire head chunk range. Only effective when the delayed-compaction feature flag is enabled.").
Default("10").Hidden().IntVar(&cfg.tsdb.CompactionDelayMaxPercent)
+ serverOnlyFlag(a, "storage.tsdb.delay-compact-file.path", "Path to a JSON file with uploaded TSDB blocks e.g. Thanos shipper meta file. If set TSDB will only compact 1 level blocks that are marked as uploaded in that file, improving external storage integrations e.g. with Thanos sidecar. 1+ level compactions won't be delayed.").
+ Default("").StringVar(&tsdbDelayCompactFilePath)
+
+ serverOnlyFlag(a, "storage.tsdb.block-reload-interval", "Interval at which to check for new or removed blocks in storage. Users who manually backfill or drop blocks must wait up to this duration before changes become available.").
+ Default("1m").Hidden().SetValue(&cfg.tsdb.BlockReloadInterval)
+
agentOnlyFlag(a, "storage.agent.path", "Base path for metrics storage.").
Default("data-agent/").StringVar(&cfg.agentStoragePath)
@@ -665,6 +683,10 @@ func main() {
}
cfg.tsdb.MaxExemplars = cfgFile.StorageConfig.ExemplarsConfig.MaxExemplars
}
+ if cfg.tsdb.BlockReloadInterval < model.Duration(1*time.Second) {
+ logger.Warn("The option --storage.tsdb.block-reload-interval is set to a value less than 1s. Setting it to 1s to avoid overload.")
+ cfg.tsdb.BlockReloadInterval = model.Duration(1 * time.Second)
+ }
if cfgFile.StorageConfig.TSDBConfig != nil {
cfg.tsdb.OutOfOrderTimeWindow = cfgFile.StorageConfig.TSDBConfig.OutOfOrderTimeWindow
cfg.tsdb.StaleSeriesCompactionThreshold = cfgFile.StorageConfig.TSDBConfig.StaleSeriesCompactionThreshold
@@ -704,6 +726,12 @@ func main() {
}
}
+ if tsdbDelayCompactFilePath != "" {
+ logger.Info("Compactions will be delayed for blocks not marked as uploaded in the file tracking uploads", "path", tsdbDelayCompactFilePath)
+ cfg.tsdb.BlockCompactionExcludeFunc = exludeBlocksPendingUpload(
+ logger, tsdbDelayCompactFilePath)
+ }
+
// Now that the validity of the config is established, set the config
// success metrics accordingly, although the config isn't really loaded
// yet. This will happen later (including setting these metrics again),
@@ -787,6 +815,12 @@ func main() {
"vm_limits", prom_runtime.VMLimits(),
)
+ features.Set(features.Prometheus, "agent_mode", agentMode)
+ features.Set(features.Prometheus, "server_mode", !agentMode)
+ features.Set(features.Prometheus, "auto_reload_config", cfg.enableAutoReload)
+ features.Enable(features.Prometheus, labels.ImplementationName)
+ template.RegisterFeatures(features.DefaultRegistry)
+
var (
localStorage = &readyStorage{stats: tsdb.NewDBStats()}
scraper = &readyScrapeManager{}
@@ -823,13 +857,13 @@ func main() {
os.Exit(1)
}
- discoveryManagerScrape = discovery.NewManager(ctxScrape, logger.With("component", "discovery manager scrape"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("scrape"))
+ discoveryManagerScrape = discovery.NewManager(ctxScrape, logger.With("component", "discovery manager scrape"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("scrape"), discovery.FeatureRegistry(features.DefaultRegistry))
if discoveryManagerScrape == nil {
logger.Error("failed to create a discovery manager scrape")
os.Exit(1)
}
- discoveryManagerNotify = discovery.NewManager(ctxNotify, logger.With("component", "discovery manager notify"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("notify"))
+ discoveryManagerNotify = discovery.NewManager(ctxNotify, logger.With("component", "discovery manager notify"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("notify"), discovery.FeatureRegistry(features.DefaultRegistry))
if discoveryManagerNotify == nil {
logger.Error("failed to create a discovery manager notify")
os.Exit(1)
@@ -870,6 +904,7 @@ func main() {
EnablePerStepStats: cfg.enablePerStepStats,
EnableDelayedNameRemoval: cfg.promqlEnableDelayedNameRemoval,
EnableTypeAndUnitLabels: cfg.scrape.EnableTypeAndUnitLabels,
+ FeatureRegistry: features.DefaultRegistry,
}
queryEngine = promql.NewEngine(opts)
@@ -892,6 +927,7 @@ func main() {
DefaultRuleQueryOffset: func() time.Duration {
return time.Duration(cfgFile.GlobalConfig.RuleQueryOffset)
},
+ FeatureRegistry: features.DefaultRegistry,
})
}
@@ -1328,6 +1364,7 @@ func main() {
"RetentionDuration", cfg.tsdb.RetentionDuration,
"WALSegmentSize", cfg.tsdb.WALSegmentSize,
"WALCompressionType", cfg.tsdb.WALCompressionType,
+ "BlockReloadInterval", cfg.tsdb.BlockReloadInterval,
)
startTimeMargin := int64(2 * time.Duration(cfg.tsdb.MinBlockDuration).Seconds() * 1000)
@@ -1384,6 +1421,7 @@ func main() {
"MinWALTime", cfg.agent.MinWALTime,
"MaxWALTime", cfg.agent.MaxWALTime,
"OutOfOrderTimeWindow", cfg.agent.OutOfOrderTimeWindow,
+ "EnableSTAsZeroSample", cfg.agent.EnableSTAsZeroSample,
)
localStorage.Set(db, 0)
@@ -1884,6 +1922,8 @@ type tsdbOptions struct {
CompactionDelayMaxPercent int
EnableOverlappingCompaction bool
UseUncachedIO bool
+ BlockCompactionExcludeFunc tsdb.BlockExcludeFilterFunc
+ BlockReloadInterval model.Duration
StaleSeriesCompactionThreshold float64
}
@@ -1908,6 +1948,9 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options {
CompactionDelayMaxPercent: opts.CompactionDelayMaxPercent,
EnableOverlappingCompaction: opts.EnableOverlappingCompaction,
UseUncachedIO: opts.UseUncachedIO,
+ BlockCompactionExcludeFunc: opts.BlockCompactionExcludeFunc,
+ BlockReloadInterval: time.Duration(opts.BlockReloadInterval),
+ FeatureRegistry: features.DefaultRegistry,
StaleSeriesCompactionThreshold: opts.StaleSeriesCompactionThreshold,
}
}
@@ -1921,7 +1964,8 @@ type agentOptions struct {
TruncateFrequency model.Duration
MinWALTime, MaxWALTime model.Duration
NoLockfile bool
- OutOfOrderTimeWindow int64
+ OutOfOrderTimeWindow int64 // TODO(bwplotka): Unused option, fix it or remove.
+ EnableSTAsZeroSample bool
}
func (opts agentOptions) ToAgentOptions(outOfOrderTimeWindow int64) agent.Options {
@@ -1937,6 +1981,7 @@ func (opts agentOptions) ToAgentOptions(outOfOrderTimeWindow int64) agent.Option
MaxWALTime: durationToInt64Millis(time.Duration(opts.MaxWALTime)),
NoLockfile: opts.NoLockfile,
OutOfOrderTimeWindow: outOfOrderTimeWindow,
+ EnableSTAsZeroSample: opts.EnableSTAsZeroSample,
}
}
@@ -1973,3 +2018,48 @@ func (p *rwProtoMsgFlagParser) Set(opt string) error {
*p.msgs = append(*p.msgs, t)
return nil
}
+
+type UploadMeta struct {
+ Uploaded []string `json:"uploaded"`
+}
+
+// Cache the last read UploadMeta.
+var (
+ tsdbDelayCompactLastMeta *UploadMeta // The content of uploadMetaPath from the last time we've opened it.
+ tsdbDelayCompactLastMetaTime time.Time // The timestamp at which we stored tsdbDelayCompactLastMeta last time.
+)
+
+func exludeBlocksPendingUpload(logger *slog.Logger, uploadMetaPath string) tsdb.BlockExcludeFilterFunc {
+ return func(meta *tsdb.BlockMeta) bool {
+ if meta.Compaction.Level > 1 {
+ // Blocks with level > 1 are assumed to be not uploaded, thus no need to delay those.
+ // See `storage.tsdb.delay-compact-file.path` flag for detail.
+ return false
+ }
+
+ // If we have cached uploadMetaPath content that was stored in the last minute the use it.
+ if tsdbDelayCompactLastMeta != nil &&
+ tsdbDelayCompactLastMetaTime.After(time.Now().UTC().Add(time.Minute*-1)) {
+ return !slices.Contains(tsdbDelayCompactLastMeta.Uploaded, meta.ULID.String())
+ }
+
+ // We don't have anything cached or it's older than a minute. Try to open and parse the uploadMetaPath path.
+ data, err := os.ReadFile(uploadMetaPath)
+ if err != nil {
+ logger.Warn("cannot open TSDB upload meta file", slog.String("path", uploadMetaPath), slog.Any("err", err))
+ return false
+ }
+
+ var uploadMeta UploadMeta
+ if err = json.Unmarshal(data, &uploadMeta); err != nil {
+ logger.Warn("cannot parse TSDB upload meta file", slog.String("path", uploadMetaPath), slog.Any("err", err))
+ return false
+ }
+
+ // We have parsed the uploadMetaPath file, cache it.
+ tsdbDelayCompactLastMeta = &uploadMeta
+ tsdbDelayCompactLastMetaTime = time.Now().UTC()
+
+ return !slices.Contains(uploadMeta.Uploaded, meta.ULID.String())
+ }
+}
diff --git a/cmd/prometheus/main_test.go b/cmd/prometheus/main_test.go
index 607e422868..6765bae900 100644
--- a/cmd/prometheus/main_test.go
+++ b/cmd/prometheus/main_test.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -979,6 +979,7 @@ remote_write:
// | dataPending | 0 | 1228.8 |
// | desiredShards | 0.6 | 369.2 |.
func TestRemoteWrite_ReshardingWithoutDeadlock(t *testing.T) {
+ t.Skip("flaky test, see https://github.com/prometheus/prometheus/issues/17489")
t.Parallel()
tmpDir := t.TempDir()
diff --git a/cmd/prometheus/main_unix_test.go b/cmd/prometheus/main_unix_test.go
index 66bfe9b60a..ea130b3bf9 100644
--- a/cmd/prometheus/main_unix_test.go
+++ b/cmd/prometheus/main_unix_test.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/cmd/prometheus/query_log_test.go b/cmd/prometheus/query_log_test.go
index 645ac31145..5e5a9ac3b7 100644
--- a/cmd/prometheus/query_log_test.go
+++ b/cmd/prometheus/query_log_test.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/cmd/prometheus/reload_test.go b/cmd/prometheus/reload_test.go
index 6feb2bf3a5..bbe108c9a6 100644
--- a/cmd/prometheus/reload_test.go
+++ b/cmd/prometheus/reload_test.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/cmd/prometheus/scrape_failure_log_test.go b/cmd/prometheus/scrape_failure_log_test.go
index f35cb7bee6..c3f459f601 100644
--- a/cmd/prometheus/scrape_failure_log_test.go
+++ b/cmd/prometheus/scrape_failure_log_test.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/cmd/prometheus/testdata/features.json b/cmd/prometheus/testdata/features.json
new file mode 100644
index 0000000000..145bb04d77
--- /dev/null
+++ b/cmd/prometheus/testdata/features.json
@@ -0,0 +1,249 @@
+{
+ "api": {
+ "admin": false,
+ "exclude_alerts": true,
+ "label_values_match": true,
+ "lifecycle": false,
+ "otlp_write_receiver": false,
+ "query_stats": true,
+ "query_warnings": true,
+ "remote_write_receiver": false,
+ "time_range_labels": true,
+ "time_range_series": true
+ },
+ "otlp_receiver": {
+ "delta_conversion": false,
+ "native_delta_ingestion": false
+ },
+ "prometheus": {
+ "agent_mode": false,
+ "auto_reload_config": false,
+ "server_mode": true,
+ "stringlabels": true
+ },
+ "promql": {
+ "anchored": false,
+ "at_modifier": true,
+ "bool": true,
+ "by": true,
+ "delayed_name_removal": false,
+ "duration_expr": false,
+ "group_left": true,
+ "group_right": true,
+ "ignoring": true,
+ "negative_offset": true,
+ "offset": true,
+ "on": true,
+ "per_query_lookback_delta": true,
+ "per_step_stats": false,
+ "smoothed": false,
+ "subqueries": true,
+ "type_and_unit_labels": false,
+ "without": true
+ },
+ "promql_functions": {
+ "abs": true,
+ "absent": true,
+ "absent_over_time": true,
+ "acos": true,
+ "acosh": true,
+ "asin": true,
+ "asinh": true,
+ "atan": true,
+ "atanh": true,
+ "avg_over_time": true,
+ "ceil": true,
+ "changes": true,
+ "clamp": true,
+ "clamp_max": true,
+ "clamp_min": true,
+ "cos": true,
+ "cosh": true,
+ "count_over_time": true,
+ "day_of_month": true,
+ "day_of_week": true,
+ "day_of_year": true,
+ "days_in_month": true,
+ "deg": true,
+ "delta": true,
+ "deriv": true,
+ "double_exponential_smoothing": false,
+ "exp": true,
+ "first_over_time": false,
+ "floor": true,
+ "histogram_avg": true,
+ "histogram_count": true,
+ "histogram_fraction": true,
+ "histogram_quantile": true,
+ "histogram_stddev": true,
+ "histogram_stdvar": true,
+ "histogram_sum": true,
+ "hour": true,
+ "idelta": true,
+ "increase": true,
+ "info": false,
+ "irate": true,
+ "label_join": true,
+ "label_replace": true,
+ "last_over_time": true,
+ "ln": true,
+ "log10": true,
+ "log2": true,
+ "mad_over_time": false,
+ "max_over_time": true,
+ "min_over_time": true,
+ "minute": true,
+ "month": true,
+ "pi": true,
+ "predict_linear": true,
+ "present_over_time": true,
+ "quantile_over_time": true,
+ "rad": true,
+ "rate": true,
+ "resets": true,
+ "round": true,
+ "scalar": true,
+ "sgn": true,
+ "sin": true,
+ "sinh": true,
+ "sort": true,
+ "sort_by_label": false,
+ "sort_by_label_desc": false,
+ "sort_desc": true,
+ "sqrt": true,
+ "stddev_over_time": true,
+ "stdvar_over_time": true,
+ "sum_over_time": true,
+ "tan": true,
+ "tanh": true,
+ "time": true,
+ "timestamp": true,
+ "ts_of_first_over_time": false,
+ "ts_of_last_over_time": false,
+ "ts_of_max_over_time": false,
+ "ts_of_min_over_time": false,
+ "vector": true,
+ "year": true
+ },
+ "promql_operators": {
+ "!=": true,
+ "!~": true,
+ "%": true,
+ "*": true,
+ "+": true,
+ "-": true,
+ "/": true,
+ "<": true,
+ "<=": true,
+ "==": true,
+ "=~": true,
+ ">": true,
+ ">=": true,
+ "@": true,
+ "^": true,
+ "and": true,
+ "atan2": true,
+ "avg": true,
+ "bottomk": true,
+ "count": true,
+ "count_values": true,
+ "group": true,
+ "limit_ratio": false,
+ "limitk": false,
+ "max": true,
+ "min": true,
+ "or": true,
+ "quantile": true,
+ "stddev": true,
+ "stdvar": true,
+ "sum": true,
+ "topk": true,
+ "unless": true
+ },
+ "rules": {
+ "concurrent_rule_eval": false,
+ "keep_firing_for": true,
+ "query_offset": true
+ },
+ "scrape": {
+ "extra_scrape_metrics": true,
+ "start_timestamp_zero_ingestion": false,
+ "type_and_unit_labels": false
+ },
+ "service_discovery_providers": {
+ "aws": true,
+ "azure": true,
+ "consul": true,
+ "digitalocean": true,
+ "dns": true,
+ "docker": true,
+ "dockerswarm": true,
+ "ec2": true,
+ "ecs": true,
+ "eureka": true,
+ "file": true,
+ "gce": true,
+ "hetzner": true,
+ "http": true,
+ "ionos": true,
+ "kubernetes": true,
+ "kuma": true,
+ "lightsail": true,
+ "linode": true,
+ "marathon": true,
+ "nerve": true,
+ "nomad": true,
+ "openstack": true,
+ "ovhcloud": true,
+ "puppetdb": true,
+ "scaleway": true,
+ "serverset": true,
+ "stackit": true,
+ "static": true,
+ "triton": true,
+ "uyuni": true,
+ "vultr": true
+ },
+ "templating_functions": {
+ "args": true,
+ "externalURL": true,
+ "first": true,
+ "graphLink": true,
+ "humanize": true,
+ "humanize1024": true,
+ "humanizeDuration": true,
+ "humanizePercentage": true,
+ "humanizeTimestamp": true,
+ "label": true,
+ "match": true,
+ "now": true,
+ "parseDuration": true,
+ "pathPrefix": true,
+ "query": true,
+ "reReplaceAll": true,
+ "safeHtml": true,
+ "sortByLabel": true,
+ "stripDomain": true,
+ "stripPort": true,
+ "strvalue": true,
+ "tableLink": true,
+ "title": true,
+ "toDuration": true,
+ "toLower": true,
+ "toTime": true,
+ "toUpper": true,
+ "urlQueryEscape": true,
+ "value": true
+ },
+ "tsdb": {
+ "delayed_compaction": false,
+ "exemplar_storage": false,
+ "isolation": true,
+ "native_histograms": true,
+ "use_uncached_io": false
+ },
+ "ui": {
+ "ui_v2": false,
+ "ui_v3": true
+ }
+}
diff --git a/cmd/prometheus/upload_test.go b/cmd/prometheus/upload_test.go
new file mode 100644
index 0000000000..97a98351a7
--- /dev/null
+++ b/cmd/prometheus/upload_test.go
@@ -0,0 +1,144 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "encoding/json"
+ "os"
+ "path"
+ "testing"
+ "time"
+
+ "github.com/oklog/ulid/v2"
+ "github.com/prometheus/common/promslog"
+ "github.com/stretchr/testify/require"
+
+ "github.com/prometheus/prometheus/tsdb"
+)
+
+func TestBlockExcludeFilter(t *testing.T) {
+ for _, test := range []struct {
+ summary string // Description of the test case.
+ uploaded []ulid.ULID // List of blocks marked as uploaded inside the shipper file.
+ setupFn func(string) // Optional function to run before the test, takes the path to the shipper file.
+ meta tsdb.BlockMeta // Meta of the block we're checking.
+ isExcluded bool // What do we expect to be returned.
+ }{
+ {
+ summary: "missing file",
+ setupFn: func(path string) {
+ // Delete shipper file to test error handling.
+ os.Remove(path)
+ },
+ meta: tsdb.BlockMeta{ULID: ulid.MustNew(1, nil)},
+ isExcluded: false,
+ },
+ {
+ summary: "corrupt file",
+ setupFn: func(path string) {
+ // Overwrite the shipper file content with invalid JSON.
+ os.WriteFile(path, []byte("{["), 0o644)
+ },
+ meta: tsdb.BlockMeta{ULID: ulid.MustNew(1, nil)},
+ isExcluded: false,
+ },
+ {
+ summary: "empty uploaded list",
+ uploaded: []ulid.ULID{},
+ meta: tsdb.BlockMeta{ULID: ulid.MustNew(1, nil)},
+ isExcluded: true,
+ },
+ {
+ summary: "block meta not present in the uploaded list, level=1",
+ uploaded: []ulid.ULID{ulid.MustNew(1, nil), ulid.MustNew(3, nil)},
+ meta: tsdb.BlockMeta{
+ ULID: ulid.MustNew(2, nil),
+ Compaction: tsdb.BlockMetaCompaction{Level: 1},
+ },
+ isExcluded: true,
+ },
+ {
+ summary: "block meta not present in the uploaded list, level=2",
+ uploaded: []ulid.ULID{ulid.MustNew(1, nil), ulid.MustNew(3, nil)},
+ meta: tsdb.BlockMeta{
+ ULID: ulid.MustNew(2, nil),
+ Compaction: tsdb.BlockMetaCompaction{Level: 2},
+ },
+ isExcluded: false,
+ },
+ {
+ summary: "block meta present in the uploaded list",
+ uploaded: []ulid.ULID{ulid.MustNew(1, nil), ulid.MustNew(2, nil), ulid.MustNew(3, nil)},
+ meta: tsdb.BlockMeta{ULID: ulid.MustNew(2, nil)},
+ isExcluded: false,
+ },
+ {
+ summary: "don't read the file if there's valid cache",
+ setupFn: func(path string) {
+ // Remove the shipper file, cache should be used instead.
+ require.NoError(t, os.Remove(path))
+ // Set cached values
+ tsdbDelayCompactLastMeta = &UploadMeta{
+ Uploaded: []string{
+ ulid.MustNew(1, nil).String(),
+ ulid.MustNew(2, nil).String(),
+ ulid.MustNew(3, nil).String(),
+ },
+ }
+ tsdbDelayCompactLastMetaTime = time.Now().UTC().Add(time.Second * -1)
+ },
+ uploaded: []ulid.ULID{},
+ meta: tsdb.BlockMeta{ULID: ulid.MustNew(2, nil)},
+ isExcluded: false,
+ },
+ {
+ summary: "read the file if there's cache but expired",
+ setupFn: func(_ string) {
+ // Set the cache but make it too old
+ tsdbDelayCompactLastMeta = &UploadMeta{
+ Uploaded: []string{},
+ }
+ tsdbDelayCompactLastMetaTime = time.Now().UTC().Add(time.Second * -61)
+ },
+ uploaded: []ulid.ULID{ulid.MustNew(1, nil), ulid.MustNew(2, nil), ulid.MustNew(3, nil)},
+ meta: tsdb.BlockMeta{ULID: ulid.MustNew(2, nil)},
+ isExcluded: false,
+ },
+ } {
+ t.Run(test.summary, func(t *testing.T) {
+ dir := t.TempDir()
+ shipperPath := path.Join(dir, "shipper.json")
+
+ uploaded := make([]string, 0, len(test.uploaded))
+ for _, ul := range test.uploaded {
+ uploaded = append(uploaded, ul.String())
+ }
+ ts := UploadMeta{Uploaded: uploaded}
+ data, err := json.Marshal(ts)
+ require.NoError(t, err, "failed to marshall upload meta file")
+ require.NoError(t, os.WriteFile(shipperPath, data, 0o644), "failed to write upload meta file")
+
+ tsdbDelayCompactLastMeta = nil
+ tsdbDelayCompactLastMetaTime = time.Time{}
+
+ if test.setupFn != nil {
+ test.setupFn(shipperPath)
+ }
+
+ fn := exludeBlocksPendingUpload(promslog.NewNopLogger(), shipperPath)
+ isExcluded := fn(&test.meta)
+ require.Equal(t, test.isExcluded, isExcluded)
+ })
+ }
+}
diff --git a/cmd/promtool/analyze.go b/cmd/promtool/analyze.go
index aea72a193b..a725772f5d 100644
--- a/cmd/promtool/analyze.go
+++ b/cmd/promtool/analyze.go
@@ -1,4 +1,4 @@
-// Copyright 2023 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/cmd/promtool/analyze_test.go b/cmd/promtool/analyze_test.go
index 3de4283a15..d2e81da2c8 100644
--- a/cmd/promtool/analyze_test.go
+++ b/cmd/promtool/analyze_test.go
@@ -1,4 +1,4 @@
-// Copyright 2023 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/cmd/promtool/archive.go b/cmd/promtool/archive.go
index 7b565c57cc..23baea2700 100644
--- a/cmd/promtool/archive.go
+++ b/cmd/promtool/archive.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/cmd/promtool/backfill.go b/cmd/promtool/backfill.go
index 47de3b5c1c..f04a76b0a5 100644
--- a/cmd/promtool/backfill.go
+++ b/cmd/promtool/backfill.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/cmd/promtool/backfill_test.go b/cmd/promtool/backfill_test.go
index 8a599510a9..499b90e99a 100644
--- a/cmd/promtool/backfill_test.go
+++ b/cmd/promtool/backfill_test.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/cmd/promtool/debug.go b/cmd/promtool/debug.go
index 6383aaface..b6e82ef981 100644
--- a/cmd/promtool/debug.go
+++ b/cmd/promtool/debug.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go
index 460e47fd25..16cc40233a 100644
--- a/cmd/promtool/main.go
+++ b/cmd/promtool/main.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -162,7 +162,11 @@ func main() {
checkRulesIgnoreUnknownFields := checkRulesCmd.Flag("ignore-unknown-fields", "Ignore unknown fields in the rule files. This is useful when you want to extend rule files with custom metadata. Ensure that those fields are removed before loading them into the Prometheus server as it performs strict checks by default.").Default("false").Bool()
checkMetricsCmd := checkCmd.Command("metrics", checkMetricsUsage)
- checkMetricsExtended := checkCmd.Flag("extended", "Print extended information related to the cardinality of the metrics.").Bool()
+ checkMetricsExtended := checkMetricsCmd.Flag("extended", "Print extended information related to the cardinality of the metrics.").Bool()
+ checkMetricsLint := checkMetricsCmd.Flag(
+ "lint",
+ "Linting checks to apply for metrics. Available options are: all, none. Use --lint=none to disable metrics linting.",
+ ).Default(lintOptionAll).String()
agentMode := checkConfigCmd.Flag("agent", "Check config file for Prometheus in Agent mode.").Bool()
queryCmd := app.Command("query", "Run query against a Prometheus server.")
@@ -257,12 +261,13 @@ func main() {
listHumanReadable := tsdbListCmd.Flag("human-readable", "Print human readable values.").Short('r').Bool()
listPath := tsdbListCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String()
- tsdbDumpCmd := tsdbCmd.Command("dump", "Dump samples from a TSDB.")
+ tsdbDumpCmd := tsdbCmd.Command("dump", "Dump data (series+samples or optionally just series) from a TSDB.")
dumpPath := tsdbDumpCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String()
dumpSandboxDirRoot := tsdbDumpCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory will be created, this sandbox is used in case WAL replay generates chunks (default is the database path). The sandbox is cleaned up at the end.").String()
dumpMinTime := tsdbDumpCmd.Flag("min-time", "Minimum timestamp to dump, in milliseconds since the Unix epoch.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64()
dumpMaxTime := tsdbDumpCmd.Flag("max-time", "Maximum timestamp to dump, in milliseconds since the Unix epoch.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64()
dumpMatch := tsdbDumpCmd.Flag("match", "Series selector. Can be specified multiple times.").Default("{__name__=~'(?s:.*)'}").Strings()
+ dumpFormat := tsdbDumpCmd.Flag("format", "Output format of the dump (prom (default) or seriesjson).").Default("prom").Enum("prom", "seriesjson")
tsdbDumpOpenMetricsCmd := tsdbCmd.Command("dump-openmetrics", "[Experimental] Dump samples from a TSDB into OpenMetrics text format, excluding native histograms and staleness markers, which are not representable in OpenMetrics.")
dumpOpenMetricsPath := tsdbDumpOpenMetricsCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String()
@@ -374,7 +379,7 @@ func main() {
os.Exit(CheckRules(newRulesLintConfig(*checkRulesLint, *checkRulesLintFatal, *checkRulesIgnoreUnknownFields, model.UTF8Validation), *ruleFiles...))
case checkMetricsCmd.FullCommand():
- os.Exit(CheckMetrics(*checkMetricsExtended))
+ os.Exit(CheckMetrics(*checkMetricsExtended, *checkMetricsLint))
case pushMetricsCmd.FullCommand():
os.Exit(PushMetrics(remoteWriteURL, httpRoundTripper, *pushMetricsHeaders, *pushMetricsTimeout, *pushMetricsProtoMsg, *pushMetricsLabels, *metricFiles...))
@@ -428,9 +433,14 @@ func main() {
os.Exit(checkErr(listBlocks(*listPath, *listHumanReadable)))
case tsdbDumpCmd.FullCommand():
- os.Exit(checkErr(dumpSamples(ctx, *dumpPath, *dumpSandboxDirRoot, *dumpMinTime, *dumpMaxTime, *dumpMatch, formatSeriesSet)))
+ format := formatSeriesSet
+ if *dumpFormat == "seriesjson" {
+ format = formatSeriesSetLabelsToJSON
+ }
+ os.Exit(checkErr(dumpTSDBData(ctx, *dumpPath, *dumpSandboxDirRoot, *dumpMinTime, *dumpMaxTime, *dumpMatch, format)))
+
case tsdbDumpOpenMetricsCmd.FullCommand():
- os.Exit(checkErr(dumpSamples(ctx, *dumpOpenMetricsPath, *dumpOpenMetricsSandboxDirRoot, *dumpOpenMetricsMinTime, *dumpOpenMetricsMaxTime, *dumpOpenMetricsMatch, formatSeriesSetOpenMetrics)))
+ os.Exit(checkErr(dumpTSDBData(ctx, *dumpOpenMetricsPath, *dumpOpenMetricsSandboxDirRoot, *dumpOpenMetricsMinTime, *dumpOpenMetricsMaxTime, *dumpOpenMetricsMatch, formatSeriesSetOpenMetrics)))
// TODO(aSquare14): Work on adding support for custom block size.
case openMetricsImportCmd.FullCommand():
os.Exit(backfillOpenMetrics(*importFilePath, *importDBPath, *importHumanReadable, *importQuiet, *maxBlockDuration, *openMetricsLabels))
@@ -1012,36 +1022,53 @@ func ruleMetric(rule rulefmt.Rule) string {
}
var checkMetricsUsage = strings.TrimSpace(`
-Pass Prometheus metrics over stdin to lint them for consistency and correctness.
+Pass Prometheus metrics over stdin to lint them for consistency and correctness, and optionally perform cardinality analysis.
examples:
$ cat metrics.prom | promtool check metrics
-$ curl -s http://localhost:9090/metrics | promtool check metrics
+$ curl -s http://localhost:9090/metrics | promtool check metrics --extended
+
+$ curl -s http://localhost:9100/metrics | promtool check metrics --extended --lint=none
`)
// CheckMetrics performs a linting pass on input metrics.
-func CheckMetrics(extended bool) int {
- var buf bytes.Buffer
- tee := io.TeeReader(os.Stdin, &buf)
- l := promlint.New(tee)
- problems, err := l.Lint()
- if err != nil {
- fmt.Fprintln(os.Stderr, "error while linting:", err)
+func CheckMetrics(extended bool, lint string) int {
+ // Validate that at least one feature is enabled.
+ if !extended && lint == lintOptionNone {
+ fmt.Fprintln(os.Stderr, "error: at least one of --extended or linting must be enabled")
+ fmt.Fprintln(os.Stderr, "Use --extended for cardinality analysis, or remove --lint=none to enable linting")
return failureExitCode
}
- for _, p := range problems {
- fmt.Fprintln(os.Stderr, p.Metric, p.Text)
+ var buf bytes.Buffer
+ var (
+ problems []promlint.Problem
+ reader io.Reader
+ err error
+ )
+
+ if lint != lintOptionNone {
+ tee := io.TeeReader(os.Stdin, &buf)
+ l := promlint.New(tee)
+ problems, err = l.Lint()
+ if err != nil {
+ fmt.Fprintln(os.Stderr, "error while linting:", err)
+ return failureExitCode
+ }
+ for _, p := range problems {
+ fmt.Fprintln(os.Stderr, p.Metric, p.Text)
+ }
+ reader = &buf
+ } else {
+ reader = os.Stdin
}
- if len(problems) > 0 {
- return lintErrExitCode
- }
+ hasLintProblems := len(problems) > 0
if extended {
- stats, total, err := checkMetricsExtended(&buf)
+ stats, total, err := checkMetricsExtended(reader)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return failureExitCode
@@ -1055,6 +1082,10 @@ func CheckMetrics(extended bool) int {
w.Flush()
}
+ if hasLintProblems {
+ return lintErrExitCode
+ }
+
return successExitCode
}
diff --git a/cmd/promtool/main_test.go b/cmd/promtool/main_test.go
index a9a54f6d5f..4f4ca3de71 100644
--- a/cmd/promtool/main_test.go
+++ b/cmd/promtool/main_test.go
@@ -1,4 +1,4 @@
-// Copyright 2018 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -18,6 +18,7 @@ import (
"context"
"errors"
"fmt"
+ "io"
"net/http"
"net/http/httptest"
"net/url"
@@ -402,6 +403,99 @@ func TestCheckMetricsExtended(t *testing.T) {
}, stats)
}
+func TestCheckMetricsLintOptions(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("Skipping on windows")
+ }
+
+ const testMetrics = `
+# HELP testMetric_CamelCase A test metric with camelCase
+# TYPE testMetric_CamelCase gauge
+testMetric_CamelCase{label="value1"} 1
+`
+
+ tests := []struct {
+ name string
+ lint string
+ extended bool
+ wantErrCode int
+ wantLint bool
+ wantCard bool
+ }{
+ {
+ name: "default_all_with_extended",
+ lint: lintOptionAll,
+ extended: true,
+ wantErrCode: lintErrExitCode,
+ wantLint: true,
+ wantCard: true,
+ },
+ {
+ name: "lint_none_with_extended",
+ lint: lintOptionNone,
+ extended: true,
+ wantErrCode: successExitCode,
+ wantLint: false,
+ wantCard: true,
+ },
+ {
+ name: "both_disabled_fails",
+ lint: lintOptionNone,
+ extended: false,
+ wantErrCode: failureExitCode,
+ wantLint: false,
+ wantCard: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ r, w, err := os.Pipe()
+ require.NoError(t, err)
+ _, err = w.WriteString(testMetrics)
+ require.NoError(t, err)
+ w.Close()
+
+ oldStdin := os.Stdin
+ os.Stdin = r
+ defer func() { os.Stdin = oldStdin }()
+
+ oldStdout := os.Stdout
+ oldStderr := os.Stderr
+ rOut, wOut, err := os.Pipe()
+ require.NoError(t, err)
+ rErr, wErr, err := os.Pipe()
+ require.NoError(t, err)
+ os.Stdout = wOut
+ os.Stderr = wErr
+
+ code := CheckMetrics(tt.extended, tt.lint)
+
+ wOut.Close()
+ wErr.Close()
+ os.Stdout = oldStdout
+ os.Stderr = oldStderr
+
+ var outBuf, errBuf bytes.Buffer
+ _, _ = io.Copy(&outBuf, rOut)
+ _, _ = io.Copy(&errBuf, rErr)
+
+ require.Equal(t, tt.wantErrCode, code)
+ if tt.wantLint {
+ require.Contains(t, errBuf.String(), "testMetric_CamelCase")
+ } else {
+ require.NotContains(t, errBuf.String(), "testMetric_CamelCase")
+ }
+
+ if tt.wantCard {
+ require.Contains(t, outBuf.String(), "Cardinality")
+ } else {
+ require.NotContains(t, outBuf.String(), "Cardinality")
+ }
+ })
+ }
+}
+
func TestExitCodes(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
diff --git a/cmd/promtool/metrics.go b/cmd/promtool/metrics.go
index c21ef15fd8..b1a2beb72e 100644
--- a/cmd/promtool/metrics.go
+++ b/cmd/promtool/metrics.go
@@ -1,4 +1,4 @@
-// Copyright 2023 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/cmd/promtool/metrics_test.go b/cmd/promtool/metrics_test.go
index 938f1cadfd..d5a3bf63cc 100644
--- a/cmd/promtool/metrics_test.go
+++ b/cmd/promtool/metrics_test.go
@@ -1,4 +1,4 @@
-// Copyright 2025 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/cmd/promtool/query.go b/cmd/promtool/query.go
index 0d7cb12cf4..1342f148f8 100644
--- a/cmd/promtool/query.go
+++ b/cmd/promtool/query.go
@@ -1,4 +1,4 @@
-// Copyright 2023 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/cmd/promtool/rules.go b/cmd/promtool/rules.go
index 98f2c38b58..3960206f6b 100644
--- a/cmd/promtool/rules.go
+++ b/cmd/promtool/rules.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/cmd/promtool/rules_test.go b/cmd/promtool/rules_test.go
index 6fe7d8c5a1..678e2b4d50 100644
--- a/cmd/promtool/rules_test.go
+++ b/cmd/promtool/rules_test.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/cmd/promtool/sd.go b/cmd/promtool/sd.go
index 884864205c..6b844c699a 100644
--- a/cmd/promtool/sd.go
+++ b/cmd/promtool/sd.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/cmd/promtool/sd_test.go b/cmd/promtool/sd_test.go
index e41c9893b2..9f43764f55 100644
--- a/cmd/promtool/sd_test.go
+++ b/cmd/promtool/sd_test.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/cmd/promtool/testdata/dump-series-1.prom b/cmd/promtool/testdata/dump-series-1.prom
new file mode 100644
index 0000000000..5e44c0bf1b
--- /dev/null
+++ b/cmd/promtool/testdata/dump-series-1.prom
@@ -0,0 +1,3 @@
+{"__name__":"heavy_metric","foo":"bar"}
+{"__name__":"heavy_metric","foo":"foo"}
+{"__name__":"metric","baz":"abc","foo":"bar"}
diff --git a/cmd/promtool/testdata/dump-series-2.prom b/cmd/promtool/testdata/dump-series-2.prom
new file mode 100644
index 0000000000..fefefa6d1b
--- /dev/null
+++ b/cmd/promtool/testdata/dump-series-2.prom
@@ -0,0 +1,2 @@
+{"__name__":"heavy_metric","foo":"foo"}
+{"__name__":"metric","baz":"abc","foo":"bar"}
diff --git a/cmd/promtool/testdata/dump-series-3.prom b/cmd/promtool/testdata/dump-series-3.prom
new file mode 100644
index 0000000000..dd98e8707d
--- /dev/null
+++ b/cmd/promtool/testdata/dump-series-3.prom
@@ -0,0 +1 @@
+{"__name__":"metric","baz":"abc","foo":"bar"}
diff --git a/cmd/promtool/testdata/start-time-test.yml b/cmd/promtool/testdata/start-time-test.yml
new file mode 100644
index 0000000000..b7365366f4
--- /dev/null
+++ b/cmd/promtool/testdata/start-time-test.yml
@@ -0,0 +1,76 @@
+rule_files:
+ - rules.yml
+
+evaluation_interval: 1m
+
+tests:
+ # Test with default start_time (0 / Unix epoch).
+ - name: default_start_time
+ interval: 1m
+ promql_expr_test:
+ - expr: time()
+ eval_time: 0m
+ exp_samples:
+ - value: 0
+ - expr: time()
+ eval_time: 5m
+ exp_samples:
+ - value: 300
+
+ # Test with RFC3339 start_timestamp.
+ - name: rfc3339_start_timestamp
+ interval: 1m
+ start_timestamp: "2024-01-01T00:00:00Z"
+ promql_expr_test:
+ - expr: time()
+ eval_time: 0m
+ exp_samples:
+ - value: 1704067200
+ - expr: time()
+ eval_time: 5m
+ exp_samples:
+ - value: 1704067500
+
+ # Test with Unix timestamp start_timestamp.
+ - name: unix_timestamp_start_timestamp
+ interval: 1m
+ start_timestamp: 1609459200
+ input_series:
+ - series: test_metric
+ values: "1 1 1"
+ promql_expr_test:
+ - expr: time()
+ eval_time: 0m
+ exp_samples:
+ - value: 1609459200
+ - expr: time()
+ eval_time: 10m
+ exp_samples:
+ - value: 1609459800
+
+ # Test that input series samples are correctly timestamped with custom start_timestamp.
+ - name: samples_with_start_timestamp
+ interval: 1m
+ start_timestamp: "2024-01-01T00:00:00Z"
+ input_series:
+ - series: 'my_metric{label="test"}'
+ values: "10+10x15"
+ promql_expr_test:
+ # Query at absolute timestamp (start_timestamp = 1704067200).
+ - expr: my_metric@1704067200
+ eval_time: 5m
+ exp_samples:
+ - labels: 'my_metric{label="test"}'
+ value: 10
+ # Query at 2 minutes after start_timestamp (1704067200 + 120 = 1704067320).
+ - expr: my_metric@1704067320
+ eval_time: 5m
+ exp_samples:
+ - labels: 'my_metric{label="test"}'
+ value: 30
+ # Verify timestamp() function returns the absolute timestamp.
+ - expr: timestamp(my_metric)
+ eval_time: 5m
+ exp_samples:
+ - labels: '{label="test"}'
+ value: 1704067500
diff --git a/cmd/promtool/tsdb.go b/cmd/promtool/tsdb.go
index 1da95e862c..9ccd1da714 100644
--- a/cmd/promtool/tsdb.go
+++ b/cmd/promtool/tsdb.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -17,6 +17,7 @@ import (
"bufio"
"bytes"
"context"
+ "encoding/json"
"errors"
"fmt"
"io"
@@ -706,7 +707,7 @@ func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb.
type SeriesSetFormatter func(series storage.SeriesSet) error
-func dumpSamples(ctx context.Context, dbDir, sandboxDirRoot string, mint, maxt int64, match []string, formatter SeriesSetFormatter) (err error) {
+func dumpTSDBData(ctx context.Context, dbDir, sandboxDirRoot string, mint, maxt int64, match []string, formatter SeriesSetFormatter) (err error) {
db, err := tsdb.OpenDBReadOnly(dbDir, sandboxDirRoot, nil)
if err != nil {
return err
@@ -794,6 +795,30 @@ func CondensedString(ls labels.Labels) string {
return b.String()
}
+func formatSeriesSetLabelsToJSON(ss storage.SeriesSet) error {
+ seriesCache := make(map[string]struct{})
+ for ss.Next() {
+ series := ss.At()
+ lbs := series.Labels()
+
+ b, err := json.Marshal(lbs)
+ if err != nil {
+ return err
+ }
+
+ if len(b) == 0 {
+ continue
+ }
+
+ s := string(b)
+ if _, ok := seriesCache[s]; !ok {
+ fmt.Println(s)
+ seriesCache[s] = struct{}{}
+ }
+ }
+ return nil
+}
+
func formatSeriesSetOpenMetrics(ss storage.SeriesSet) error {
for ss.Next() {
series := ss.At()
diff --git a/cmd/promtool/tsdb_posix_test.go b/cmd/promtool/tsdb_posix_test.go
index 8a83aead70..9d0034844f 100644
--- a/cmd/promtool/tsdb_posix_test.go
+++ b/cmd/promtool/tsdb_posix_test.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/cmd/promtool/tsdb_test.go b/cmd/promtool/tsdb_test.go
index e745a3fe7a..3a2a5aff72 100644
--- a/cmd/promtool/tsdb_test.go
+++ b/cmd/promtool/tsdb_test.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -63,7 +63,7 @@ func getDumpedSamples(t *testing.T, databasePath, sandboxDirRoot string, mint, m
r, w, _ := os.Pipe()
os.Stdout = w
- err := dumpSamples(
+ err := dumpTSDBData(
context.Background(),
databasePath,
sandboxDirRoot,
@@ -106,13 +106,15 @@ func TestTSDBDump(t *testing.T) {
sandboxDirRoot string
match []string
expectedDump string
+ expectedSeries string
}{
{
- name: "default match",
- mint: math.MinInt64,
- maxt: math.MaxInt64,
- match: []string{"{__name__=~'(?s:.*)'}"},
- expectedDump: "testdata/dump-test-1.prom",
+ name: "default match",
+ mint: math.MinInt64,
+ maxt: math.MaxInt64,
+ match: []string{"{__name__=~'(?s:.*)'}"},
+ expectedDump: "testdata/dump-test-1.prom",
+ expectedSeries: "testdata/dump-series-1.prom",
},
{
name: "default match with sandbox dir root set",
@@ -121,41 +123,47 @@ func TestTSDBDump(t *testing.T) {
sandboxDirRoot: t.TempDir(),
match: []string{"{__name__=~'(?s:.*)'}"},
expectedDump: "testdata/dump-test-1.prom",
+ expectedSeries: "testdata/dump-series-1.prom",
},
{
- name: "same matcher twice",
- mint: math.MinInt64,
- maxt: math.MaxInt64,
- match: []string{"{foo=~'.+'}", "{foo=~'.+'}"},
- expectedDump: "testdata/dump-test-1.prom",
+ name: "same matcher twice",
+ mint: math.MinInt64,
+ maxt: math.MaxInt64,
+ match: []string{"{foo=~'.+'}", "{foo=~'.+'}"},
+ expectedDump: "testdata/dump-test-1.prom",
+ expectedSeries: "testdata/dump-series-1.prom",
},
{
- name: "no duplication",
- mint: math.MinInt64,
- maxt: math.MaxInt64,
- match: []string{"{__name__=~'(?s:.*)'}", "{baz='abc'}"},
- expectedDump: "testdata/dump-test-1.prom",
+ name: "no duplication",
+ mint: math.MinInt64,
+ maxt: math.MaxInt64,
+ match: []string{"{__name__=~'(?s:.*)'}", "{baz='abc'}"},
+ expectedDump: "testdata/dump-test-1.prom",
+ expectedSeries: "testdata/dump-series-1.prom",
},
{
- name: "well merged",
- mint: math.MinInt64,
- maxt: math.MaxInt64,
- match: []string{"{__name__='heavy_metric'}", "{baz='abc'}"},
- expectedDump: "testdata/dump-test-1.prom",
+ name: "well merged",
+ mint: math.MinInt64,
+ maxt: math.MaxInt64,
+ match: []string{"{__name__='heavy_metric'}", "{baz='abc'}"},
+ expectedDump: "testdata/dump-test-1.prom",
+ expectedSeries: "testdata/dump-series-1.prom",
},
{
- name: "multi matchers",
- mint: math.MinInt64,
- maxt: math.MaxInt64,
- match: []string{"{__name__='heavy_metric',foo='foo'}", "{__name__='metric'}"},
- expectedDump: "testdata/dump-test-2.prom",
+ name: "multi matchers",
+ mint: math.MinInt64,
+ maxt: math.MaxInt64,
+ match: []string{"{__name__='heavy_metric',foo='foo'}", "{__name__='metric'}"},
+ expectedDump: "testdata/dump-test-2.prom",
+ expectedSeries: "testdata/dump-series-2.prom",
},
{
- name: "with reduced mint and maxt",
- mint: int64(60000),
- maxt: int64(120000),
- match: []string{"{__name__='metric'}"},
- expectedDump: "testdata/dump-test-3.prom",
+ name: "with reduced mint and maxt",
+ mint: int64(60000),
+ maxt: int64(120000),
+ match: []string{"{__name__='metric'}"},
+ expectedDump: "testdata/dump-test-3.prom",
+ expectedSeries: "testdata/dump-series-3.prom",
},
}
for _, tt := range tests {
@@ -166,6 +174,12 @@ func TestTSDBDump(t *testing.T) {
expectedMetrics = normalizeNewLine(expectedMetrics)
// Sort both, because Prometheus does not guarantee the output order.
require.Equal(t, sortLines(string(expectedMetrics)), sortLines(dumpedMetrics))
+
+ dumpedSeries := getDumpedSamples(t, storage.Dir(), tt.sandboxDirRoot, tt.mint, tt.maxt, tt.match, formatSeriesSetLabelsToJSON)
+ expectedSeries, err := os.ReadFile(tt.expectedSeries)
+ require.NoError(t, err)
+ expectedSeries = normalizeNewLine(expectedSeries)
+ require.Equal(t, sortLines(string(expectedSeries)), sortLines(dumpedSeries))
})
}
}
diff --git a/cmd/promtool/unittest.go b/cmd/promtool/unittest.go
index 15b5171645..105e626eba 100644
--- a/cmd/promtool/unittest.go
+++ b/cmd/promtool/unittest.go
@@ -1,4 +1,4 @@
-// Copyright 2018 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -188,15 +188,37 @@ func resolveAndGlobFilepaths(baseDir string, utf *unitTestFile) error {
return nil
}
+// testStartTimestamp wraps time.Time to support custom YAML unmarshaling.
+// It can parse both RFC3339 timestamps and Unix timestamps.
+type testStartTimestamp struct {
+ time.Time
+}
+
+// UnmarshalYAML implements custom YAML unmarshaling for testStartTimestamp.
+// It accepts both RFC3339 formatted strings and numeric Unix timestamps.
+func (t *testStartTimestamp) UnmarshalYAML(unmarshal func(any) error) error {
+ var s string
+ if err := unmarshal(&s); err != nil {
+ return err
+ }
+ parsed, err := parseTime(s)
+ if err != nil {
+ return err
+ }
+ t.Time = parsed
+ return nil
+}
+
// testGroup is a group of input series and tests associated with it.
type testGroup struct {
- Interval model.Duration `yaml:"interval"`
- InputSeries []series `yaml:"input_series"`
- AlertRuleTests []alertTestCase `yaml:"alert_rule_test,omitempty"`
- PromqlExprTests []promqlTestCase `yaml:"promql_expr_test,omitempty"`
- ExternalLabels labels.Labels `yaml:"external_labels,omitempty"`
- ExternalURL string `yaml:"external_url,omitempty"`
- TestGroupName string `yaml:"name,omitempty"`
+ Interval model.Duration `yaml:"interval"`
+ InputSeries []series `yaml:"input_series"`
+ AlertRuleTests []alertTestCase `yaml:"alert_rule_test,omitempty"`
+ PromqlExprTests []promqlTestCase `yaml:"promql_expr_test,omitempty"`
+ ExternalLabels labels.Labels `yaml:"external_labels,omitempty"`
+ ExternalURL string `yaml:"external_url,omitempty"`
+ TestGroupName string `yaml:"name,omitempty"`
+ StartTimestamp testStartTimestamp `yaml:"start_timestamp,omitempty"`
}
// test performs the unit tests.
@@ -209,6 +231,8 @@ func (tg *testGroup) test(testname string, evalInterval time.Duration, groupOrde
}()
}
// Setup testing suite.
+ // Set the start time from the test group.
+ queryOpts.StartTime = tg.StartTimestamp.Time
suite, err := promqltest.NewLazyLoader(tg.seriesLoadingString(), queryOpts)
if err != nil {
return []error{err}
@@ -237,7 +261,12 @@ func (tg *testGroup) test(testname string, evalInterval time.Duration, groupOrde
groups := orderedGroups(groupsMap, groupOrderMap)
// Bounds for evaluating the rules.
- mint := time.Unix(0, 0).UTC()
+ var mint time.Time
+ if tg.StartTimestamp.IsZero() {
+ mint = time.Unix(0, 0).UTC()
+ } else {
+ mint = tg.StartTimestamp.Time
+ }
maxt := mint.Add(tg.maxEvalTime())
// Optional floating point compare fuzzing.
@@ -631,13 +660,14 @@ func (la labelsAndAnnotations) String() string {
if len(la) == 0 {
return "[]"
}
- s := "[\n0:" + indentLines("\n"+la[0].String(), " ")
+ var s strings.Builder
+ s.WriteString("[\n0:" + indentLines("\n"+la[0].String(), " "))
for i, l := range la[1:] {
- s += ",\n" + strconv.Itoa(i+1) + ":" + indentLines("\n"+l.String(), " ")
+ s.WriteString(",\n" + strconv.Itoa(i+1) + ":" + indentLines("\n"+l.String(), " "))
}
- s += "\n]"
+ s.WriteString("\n]")
- return s
+ return s.String()
}
type labelAndAnnotation struct {
@@ -688,11 +718,12 @@ func parsedSamplesString(pss []parsedSample) string {
if len(pss) == 0 {
return "nil"
}
- s := pss[0].String()
+ var s strings.Builder
+ s.WriteString(pss[0].String())
for _, ps := range pss[1:] {
- s += ", " + ps.String()
+ s.WriteString(", " + ps.String())
}
- return s
+ return s.String()
}
func (ps *parsedSample) String() string {
diff --git a/cmd/promtool/unittest_test.go b/cmd/promtool/unittest_test.go
index 566e0acbc6..32886fc4df 100644
--- a/cmd/promtool/unittest_test.go
+++ b/cmd/promtool/unittest_test.go
@@ -1,4 +1,4 @@
-// Copyright 2018 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -129,6 +129,16 @@ func TestRulesUnitTest(t *testing.T) {
},
want: 0,
},
+ {
+ name: "Start time tests",
+ args: args{
+ files: []string{"./testdata/start-time-test.yml"},
+ },
+ queryOpts: promqltest.LazyLoaderOpts{
+ EnableAtModifier: true,
+ },
+ want: 0,
+ },
}
reuseFiles := []string{}
reuseCount := [2]int{}
diff --git a/config/config.go b/config/config.go
index 5410b7d34f..d721d7fb86 100644
--- a/config/config.go
+++ b/config/config.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -149,6 +149,10 @@ func LoadFile(filename string, agentMode bool, logger *slog.Logger) (*Config, er
return cfg, nil
}
+func boolPtr(b bool) *bool {
+ return &b
+}
+
// The defaults applied before parsing the respective config sections.
var (
// DefaultConfig is the default top-level configuration.
@@ -158,7 +162,6 @@ var (
OTLPConfig: DefaultOTLPConfig,
}
- f bool
// DefaultGlobalConfig is the default global configuration.
DefaultGlobalConfig = GlobalConfig{
ScrapeInterval: model.Duration(1 * time.Minute),
@@ -173,9 +176,10 @@ var (
ScrapeProtocols: nil,
// When the native histogram feature flag is enabled,
// ScrapeNativeHistograms default changes to true.
- ScrapeNativeHistograms: &f,
+ ScrapeNativeHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: false,
AlwaysScrapeClassicHistograms: false,
+ ExtraScrapeMetrics: boolPtr(false),
MetricNameValidationScheme: model.UTF8Validation,
MetricNameEscapingScheme: model.AllowUTF8,
}
@@ -513,6 +517,10 @@ type GlobalConfig struct {
ConvertClassicHistogramsToNHCB bool `yaml:"convert_classic_histograms_to_nhcb,omitempty"`
// Whether to scrape a classic histogram, even if it is also exposed as a native histogram.
AlwaysScrapeClassicHistograms bool `yaml:"always_scrape_classic_histograms,omitempty"`
+ // Whether to enable additional scrape metrics.
+ // When enabled, Prometheus stores samples for scrape_timeout_seconds,
+ // scrape_sample_limit, and scrape_body_size_bytes.
+ ExtraScrapeMetrics *bool `yaml:"extra_scrape_metrics,omitempty"`
}
// ScrapeProtocol represents supported protocol for scraping metrics.
@@ -652,6 +660,9 @@ func (c *GlobalConfig) UnmarshalYAML(unmarshal func(any) error) error {
if gc.ScrapeNativeHistograms == nil {
gc.ScrapeNativeHistograms = DefaultGlobalConfig.ScrapeNativeHistograms
}
+ if gc.ExtraScrapeMetrics == nil {
+ gc.ExtraScrapeMetrics = DefaultGlobalConfig.ExtraScrapeMetrics
+ }
if gc.ScrapeProtocols == nil {
if DefaultGlobalConfig.ScrapeProtocols != nil {
// This is the case where the defaults are set due to a feature flag.
@@ -687,7 +698,17 @@ func (c *GlobalConfig) isZero() bool {
c.ScrapeProtocols == nil &&
c.ScrapeNativeHistograms == nil &&
!c.ConvertClassicHistogramsToNHCB &&
- !c.AlwaysScrapeClassicHistograms
+ !c.AlwaysScrapeClassicHistograms &&
+ c.BodySizeLimit == 0 &&
+ c.SampleLimit == 0 &&
+ c.TargetLimit == 0 &&
+ c.LabelLimit == 0 &&
+ c.LabelNameLengthLimit == 0 &&
+ c.LabelValueLengthLimit == 0 &&
+ c.KeepDroppedTargets == 0 &&
+ c.MetricNameValidationScheme == model.UnsetValidation &&
+ c.MetricNameEscapingScheme == "" &&
+ c.ExtraScrapeMetrics == nil
}
const DefaultGoGCPercentage = 75
@@ -796,6 +817,11 @@ type ScrapeConfig struct {
// blank in config files but must have a value if a ScrapeConfig is created
// programmatically.
MetricNameEscapingScheme string `yaml:"metric_name_escaping_scheme,omitempty"`
+ // Whether to enable additional scrape metrics.
+ // When enabled, Prometheus stores samples for scrape_timeout_seconds,
+ // scrape_sample_limit, and scrape_body_size_bytes.
+ // If not set (nil), inherits the value from the global configuration.
+ ExtraScrapeMetrics *bool `yaml:"extra_scrape_metrics,omitempty"`
// We cannot do proper Go type embedding below as the parser will then parse
// values arbitrarily into the overflow maps of further-down types.
@@ -897,6 +923,9 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error {
if c.ScrapeNativeHistograms == nil {
c.ScrapeNativeHistograms = globalConfig.ScrapeNativeHistograms
}
+ if c.ExtraScrapeMetrics == nil {
+ c.ExtraScrapeMetrics = globalConfig.ExtraScrapeMetrics
+ }
if c.ScrapeProtocols == nil {
switch {
@@ -1022,7 +1051,7 @@ func ToEscapingScheme(s string, v model.ValidationScheme) (model.EscapingScheme,
case model.LegacyValidation:
return model.UnderscoreEscaping, nil
case model.UnsetValidation:
- return model.NoEscaping, fmt.Errorf("v is unset: %s", v)
+ return model.NoEscaping, fmt.Errorf("ValidationScheme is unset: %s", v)
default:
panic(fmt.Errorf("unhandled validation scheme: %s", v))
}
@@ -1045,6 +1074,11 @@ func (c *ScrapeConfig) AlwaysScrapeClassicHistogramsEnabled() bool {
return c.AlwaysScrapeClassicHistograms != nil && *c.AlwaysScrapeClassicHistograms
}
+// ExtraScrapeMetricsEnabled returns whether to enable extra scrape metrics.
+func (c *ScrapeConfig) ExtraScrapeMetricsEnabled() bool {
+ return c.ExtraScrapeMetrics != nil && *c.ExtraScrapeMetrics
+}
+
// StorageConfig configures runtime reloadable configuration options.
type StorageConfig struct {
TSDBConfig *TSDBConfig `yaml:"tsdb,omitempty"`
diff --git a/config/config_default_test.go b/config/config_default_test.go
index e5f43e1f50..91c290ae4e 100644
--- a/config/config_default_test.go
+++ b/config/config_default_test.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/config/config_test.go b/config/config_test.go
index 28c8f2196d..08aa0b4f06 100644
--- a/config/config_test.go
+++ b/config/config_test.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -74,10 +74,6 @@ func mustParseURL(u string) *config.URL {
return &config.URL{URL: parsed}
}
-func boolPtr(b bool) *bool {
- return &b
-}
-
const (
globBodySizeLimit = 15 * units.MiB
globSampleLimit = 1500
@@ -109,6 +105,7 @@ var expectedConf = &Config{
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: false,
ConvertClassicHistogramsToNHCB: false,
+ ExtraScrapeMetrics: boolPtr(false),
MetricNameValidationScheme: model.UTF8Validation,
},
@@ -236,6 +233,7 @@ var expectedConf = &Config{
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
+ ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -360,6 +358,7 @@ var expectedConf = &Config{
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
+ ExtraScrapeMetrics: boolPtr(false),
HTTPClientConfig: config.HTTPClientConfig{
BasicAuth: &config.BasicAuth{
@@ -470,6 +469,7 @@ var expectedConf = &Config{
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
+ ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -532,6 +532,7 @@ var expectedConf = &Config{
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
+ ExtraScrapeMetrics: boolPtr(false),
MetricsPath: "/metrics",
Scheme: "http",
@@ -571,6 +572,7 @@ var expectedConf = &Config{
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
+ ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -616,6 +618,7 @@ var expectedConf = &Config{
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
+ ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -661,6 +664,7 @@ var expectedConf = &Config{
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
+ ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -696,6 +700,7 @@ var expectedConf = &Config{
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
+ ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -739,6 +744,7 @@ var expectedConf = &Config{
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
+ ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -779,6 +785,7 @@ var expectedConf = &Config{
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
+ ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -826,6 +833,7 @@ var expectedConf = &Config{
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
+ ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -863,6 +871,7 @@ var expectedConf = &Config{
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
+ ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -903,6 +912,7 @@ var expectedConf = &Config{
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
+ ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -936,6 +946,7 @@ var expectedConf = &Config{
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
+ ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -972,6 +983,7 @@ var expectedConf = &Config{
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
+ ExtraScrapeMetrics: boolPtr(false),
MetricsPath: "/federate",
Scheme: DefaultScrapeConfig.Scheme,
@@ -1008,6 +1020,7 @@ var expectedConf = &Config{
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
+ ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -1044,6 +1057,7 @@ var expectedConf = &Config{
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
+ ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -1077,6 +1091,7 @@ var expectedConf = &Config{
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
+ ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -1118,6 +1133,7 @@ var expectedConf = &Config{
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
+ ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -1158,6 +1174,7 @@ var expectedConf = &Config{
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
+ ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -1195,6 +1212,7 @@ var expectedConf = &Config{
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
+ ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -1231,6 +1249,7 @@ var expectedConf = &Config{
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
+ ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -1271,6 +1290,7 @@ var expectedConf = &Config{
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
+ ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -1314,6 +1334,7 @@ var expectedConf = &Config{
ScrapeNativeHistograms: boolPtr(true),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
+ ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -1377,6 +1398,7 @@ var expectedConf = &Config{
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
+ ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -1410,6 +1432,7 @@ var expectedConf = &Config{
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
+ ExtraScrapeMetrics: boolPtr(false),
HTTPClientConfig: config.DefaultHTTPClientConfig,
MetricsPath: DefaultScrapeConfig.MetricsPath,
@@ -1454,6 +1477,7 @@ var expectedConf = &Config{
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
+ ExtraScrapeMetrics: boolPtr(false),
HTTPClientConfig: config.DefaultHTTPClientConfig,
MetricsPath: DefaultScrapeConfig.MetricsPath,
@@ -1504,6 +1528,7 @@ var expectedConf = &Config{
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
+ ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -1544,6 +1569,7 @@ var expectedConf = &Config{
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
+ ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -1585,6 +1611,7 @@ var expectedConf = &Config{
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
+ ExtraScrapeMetrics: boolPtr(false),
HTTPClientConfig: config.DefaultHTTPClientConfig,
MetricsPath: DefaultScrapeConfig.MetricsPath,
@@ -1621,6 +1648,7 @@ var expectedConf = &Config{
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
+ ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -1659,6 +1687,7 @@ var expectedConf = &Config{
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
+ ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -2663,12 +2692,87 @@ func TestAgentMode(t *testing.T) {
)
}
-func TestEmptyGlobalBlock(t *testing.T) {
- c, err := Load("global:\n", promslog.NewNopLogger())
- require.NoError(t, err)
- exp := DefaultConfig
- exp.loaded = true
- require.Equal(t, exp, *c)
+func TestGlobalConfig(t *testing.T) {
+ t.Run("empty block restores defaults", func(t *testing.T) {
+ c, err := Load("global:\n", promslog.NewNopLogger())
+ require.NoError(t, err)
+ exp := DefaultConfig
+ exp.loaded = true
+ require.Equal(t, exp, *c)
+ })
+
+ // Verify that isZero() correctly identifies non-zero configurations for all
+ // fields in GlobalConfig. This is important because isZero() is used during
+ // YAML unmarshaling to detect empty global blocks that should be replaced
+ // with defaults.
+ t.Run("isZero", func(t *testing.T) {
+ for _, tc := range []struct {
+ name string
+ config GlobalConfig
+ expectZero bool
+ }{
+ {
+ name: "empty GlobalConfig",
+ config: GlobalConfig{},
+ expectZero: true,
+ },
+ {
+ name: "ScrapeInterval set",
+ config: GlobalConfig{ScrapeInterval: model.Duration(30 * time.Second)},
+ expectZero: false,
+ },
+ {
+ name: "BodySizeLimit set",
+ config: GlobalConfig{BodySizeLimit: 1 * units.MiB},
+ expectZero: false,
+ },
+ {
+ name: "SampleLimit set",
+ config: GlobalConfig{SampleLimit: 1000},
+ expectZero: false,
+ },
+ {
+ name: "TargetLimit set",
+ config: GlobalConfig{TargetLimit: 500},
+ expectZero: false,
+ },
+ {
+ name: "LabelLimit set",
+ config: GlobalConfig{LabelLimit: 100},
+ expectZero: false,
+ },
+ {
+ name: "LabelNameLengthLimit set",
+ config: GlobalConfig{LabelNameLengthLimit: 50},
+ expectZero: false,
+ },
+ {
+ name: "LabelValueLengthLimit set",
+ config: GlobalConfig{LabelValueLengthLimit: 200},
+ expectZero: false,
+ },
+ {
+ name: "KeepDroppedTargets set",
+ config: GlobalConfig{KeepDroppedTargets: 10},
+ expectZero: false,
+ },
+ {
+ name: "MetricNameValidationScheme set",
+ config: GlobalConfig{MetricNameValidationScheme: model.LegacyValidation},
+ expectZero: false,
+ },
+ {
+ name: "MetricNameEscapingScheme set",
+ config: GlobalConfig{MetricNameEscapingScheme: model.EscapeUnderscores},
+ expectZero: false,
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ result := tc.config.isZero()
+ require.Equal(t, tc.expectZero, result)
+ })
+ }
+ })
}
// ScrapeConfigOptions contains options for creating a scrape config.
@@ -2680,6 +2784,7 @@ type ScrapeConfigOptions struct {
ScrapeNativeHistograms bool
AlwaysScrapeClassicHistograms bool
ConvertClassicHistToNHCB bool
+ ExtraScrapeMetrics bool
}
func TestGetScrapeConfigs(t *testing.T) {
@@ -2713,6 +2818,7 @@ func TestGetScrapeConfigs(t *testing.T) {
ScrapeNativeHistograms: boolPtr(opts.ScrapeNativeHistograms),
AlwaysScrapeClassicHistograms: boolPtr(opts.AlwaysScrapeClassicHistograms),
ConvertClassicHistogramsToNHCB: boolPtr(opts.ConvertClassicHistToNHCB),
+ ExtraScrapeMetrics: boolPtr(opts.ExtraScrapeMetrics),
}
if opts.ScrapeProtocols == nil {
sc.ScrapeProtocols = DefaultScrapeProtocols
@@ -2796,6 +2902,7 @@ func TestGetScrapeConfigs(t *testing.T) {
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
+ ExtraScrapeMetrics: boolPtr(false),
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -2834,6 +2941,7 @@ func TestGetScrapeConfigs(t *testing.T) {
ScrapeNativeHistograms: boolPtr(false),
AlwaysScrapeClassicHistograms: boolPtr(false),
ConvertClassicHistogramsToNHCB: boolPtr(false),
+ ExtraScrapeMetrics: boolPtr(false),
HTTPClientConfig: config.HTTPClientConfig{
TLSConfig: config.TLSConfig{
@@ -2946,6 +3054,26 @@ func TestGetScrapeConfigs(t *testing.T) {
configFile: "testdata/global_scrape_protocols_and_local_disable_scrape_native_hist.good.yml",
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), ScrapeNativeHistograms: false, ScrapeProtocols: []ScrapeProtocol{PrometheusText0_0_4}})},
},
+ {
+ name: "A global config that enables extra scrape metrics",
+ configFile: "testdata/global_enable_extra_scrape_metrics.good.yml",
+ expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), ExtraScrapeMetrics: true})},
+ },
+ {
+ name: "A global config that disables extra scrape metrics",
+ configFile: "testdata/global_disable_extra_scrape_metrics.good.yml",
+ expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), ExtraScrapeMetrics: false})},
+ },
+ {
+ name: "A global config that disables extra scrape metrics and scrape config that enables it",
+ configFile: "testdata/local_enable_extra_scrape_metrics.good.yml",
+ expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), ExtraScrapeMetrics: true})},
+ },
+ {
+ name: "A global config that enables extra scrape metrics and scrape config that disables it",
+ configFile: "testdata/local_disable_extra_scrape_metrics.good.yml",
+ expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), ExtraScrapeMetrics: false})},
+ },
}
for _, tc := range testCases {
@@ -2962,6 +3090,99 @@ func TestGetScrapeConfigs(t *testing.T) {
}
}
+func TestExtraScrapeMetrics(t *testing.T) {
+ tests := []struct {
+ name string
+ config string
+ expectGlobal *bool
+ expectEnabled bool
+ }{
+ {
+ name: "default values (not set)",
+ config: `
+scrape_configs:
+ - job_name: test
+ static_configs:
+ - targets: ['localhost:9090']
+`,
+ expectGlobal: boolPtr(false), // inherits from DefaultGlobalConfig
+ expectEnabled: false,
+ },
+ {
+ name: "global enabled",
+ config: `
+global:
+ extra_scrape_metrics: true
+scrape_configs:
+ - job_name: test
+ static_configs:
+ - targets: ['localhost:9090']
+`,
+ expectGlobal: boolPtr(true),
+ expectEnabled: true,
+ },
+ {
+ name: "global disabled",
+ config: `
+global:
+ extra_scrape_metrics: false
+scrape_configs:
+ - job_name: test
+ static_configs:
+ - targets: ['localhost:9090']
+`,
+ expectGlobal: boolPtr(false),
+ expectEnabled: false,
+ },
+ {
+ name: "scrape override enabled",
+ config: `
+global:
+ extra_scrape_metrics: false
+scrape_configs:
+ - job_name: test
+ extra_scrape_metrics: true
+ static_configs:
+ - targets: ['localhost:9090']
+`,
+ expectGlobal: boolPtr(false),
+ expectEnabled: true,
+ },
+ {
+ name: "scrape override disabled",
+ config: `
+global:
+ extra_scrape_metrics: true
+scrape_configs:
+ - job_name: test
+ extra_scrape_metrics: false
+ static_configs:
+ - targets: ['localhost:9090']
+`,
+ expectGlobal: boolPtr(true),
+ expectEnabled: false,
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ cfg, err := Load(tc.config, promslog.NewNopLogger())
+ require.NoError(t, err)
+
+ // Check global config
+ require.Equal(t, tc.expectGlobal, cfg.GlobalConfig.ExtraScrapeMetrics)
+
+ // Check scrape config
+ scfgs, err := cfg.GetScrapeConfigs()
+ require.NoError(t, err)
+ require.Len(t, scfgs, 1)
+
+ // Check the effective value via the helper method
+ require.Equal(t, tc.expectEnabled, scfgs[0].ExtraScrapeMetricsEnabled())
+ })
+ }
+}
+
func kubernetesSDHostURL() config.URL {
tURL, _ := url.Parse("https://localhost:1234")
return config.URL{URL: tURL}
diff --git a/config/config_windows_test.go b/config/config_windows_test.go
index 9d338b99e7..72a56ff41a 100644
--- a/config/config_windows_test.go
+++ b/config/config_windows_test.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/config/reload.go b/config/reload.go
index 07a077a6a9..a250693169 100644
--- a/config/reload.go
+++ b/config/reload.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/config/reload_test.go b/config/reload_test.go
index 3e77260ab3..cb60d47651 100644
--- a/config/reload_test.go
+++ b/config/reload_test.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/config/testdata/global_disable_extra_scrape_metrics.good.yml b/config/testdata/global_disable_extra_scrape_metrics.good.yml
new file mode 100644
index 0000000000..26c6e4b8b5
--- /dev/null
+++ b/config/testdata/global_disable_extra_scrape_metrics.good.yml
@@ -0,0 +1,6 @@
+global:
+ extra_scrape_metrics: false
+scrape_configs:
+ - job_name: prometheus
+ static_configs:
+ - targets: ['localhost:8080']
diff --git a/config/testdata/global_enable_extra_scrape_metrics.good.yml b/config/testdata/global_enable_extra_scrape_metrics.good.yml
new file mode 100644
index 0000000000..1d7ea2db1c
--- /dev/null
+++ b/config/testdata/global_enable_extra_scrape_metrics.good.yml
@@ -0,0 +1,6 @@
+global:
+ extra_scrape_metrics: true
+scrape_configs:
+ - job_name: prometheus
+ static_configs:
+ - targets: ['localhost:8080']
diff --git a/config/testdata/local_disable_extra_scrape_metrics.good.yml b/config/testdata/local_disable_extra_scrape_metrics.good.yml
new file mode 100644
index 0000000000..a1b7c646fa
--- /dev/null
+++ b/config/testdata/local_disable_extra_scrape_metrics.good.yml
@@ -0,0 +1,7 @@
+global:
+ extra_scrape_metrics: true
+scrape_configs:
+ - job_name: prometheus
+ static_configs:
+ - targets: ['localhost:8080']
+ extra_scrape_metrics: false
diff --git a/config/testdata/local_enable_extra_scrape_metrics.good.yml b/config/testdata/local_enable_extra_scrape_metrics.good.yml
new file mode 100644
index 0000000000..a1c8b2808e
--- /dev/null
+++ b/config/testdata/local_enable_extra_scrape_metrics.good.yml
@@ -0,0 +1,7 @@
+global:
+ extra_scrape_metrics: false
+scrape_configs:
+ - job_name: prometheus
+ static_configs:
+ - targets: ['localhost:8080']
+ extra_scrape_metrics: true
diff --git a/discovery/README.md b/discovery/README.md
index d5418e7fb1..5d1adcf145 100644
--- a/discovery/README.md
+++ b/discovery/README.md
@@ -50,7 +50,7 @@ file for use with `file_sd`.
The general principle with SD is to extract all the potentially useful
information we can out of the SD, and let the user choose what they need of it
using
-[relabelling](https://prometheus.io/docs/operating/configuration/#).
+[relabelling](https://prometheus.io/docs/operating/configuration/#relabel_config).
This information is generally termed metadata.
Metadata is exposed as a set of key/value pairs (labels) per target. The keys
diff --git a/discovery/aws/ec2.go b/discovery/aws/ec2.go
index 0aae35d75d..19ecebd491 100644
--- a/discovery/aws/ec2.go
+++ b/discovery/aws/ec2.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/aws/ec2_test.go b/discovery/aws/ec2_test.go
index 46ab8e771d..bd1047ffc0 100644
--- a/discovery/aws/ec2_test.go
+++ b/discovery/aws/ec2_test.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/aws/ecs.go b/discovery/aws/ecs.go
index 3794ad178d..1d5ff366de 100644
--- a/discovery/aws/ecs.go
+++ b/discovery/aws/ecs.go
@@ -28,6 +28,7 @@ import (
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/credentials/stscreds"
"github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
+ "github.com/aws/aws-sdk-go-v2/service/ec2"
"github.com/aws/aws-sdk-go-v2/service/ecs"
"github.com/aws/aws-sdk-go-v2/service/ecs/types"
"github.com/aws/aws-sdk-go-v2/service/sts"
@@ -44,31 +45,37 @@ import (
)
const (
- ecsLabel = model.MetaLabelPrefix + "ecs_"
- ecsLabelCluster = ecsLabel + "cluster"
- ecsLabelClusterARN = ecsLabel + "cluster_arn"
- ecsLabelService = ecsLabel + "service"
- ecsLabelServiceARN = ecsLabel + "service_arn"
- ecsLabelServiceStatus = ecsLabel + "service_status"
- ecsLabelTaskGroup = ecsLabel + "task_group"
- ecsLabelTaskARN = ecsLabel + "task_arn"
- ecsLabelTaskDefinition = ecsLabel + "task_definition"
- ecsLabelRegion = ecsLabel + "region"
- ecsLabelAvailabilityZone = ecsLabel + "availability_zone"
- ecsLabelAZID = ecsLabel + "availability_zone_id"
- ecsLabelSubnetID = ecsLabel + "subnet_id"
- ecsLabelIPAddress = ecsLabel + "ip_address"
- ecsLabelLaunchType = ecsLabel + "launch_type"
- ecsLabelDesiredStatus = ecsLabel + "desired_status"
- ecsLabelLastStatus = ecsLabel + "last_status"
- ecsLabelHealthStatus = ecsLabel + "health_status"
- ecsLabelPlatformFamily = ecsLabel + "platform_family"
- ecsLabelPlatformVersion = ecsLabel + "platform_version"
- ecsLabelTag = ecsLabel + "tag_"
- ecsLabelTagCluster = ecsLabelTag + "cluster_"
- ecsLabelTagService = ecsLabelTag + "service_"
- ecsLabelTagTask = ecsLabelTag + "task_"
- ecsLabelSeparator = ","
+ ecsLabel = model.MetaLabelPrefix + "ecs_"
+ ecsLabelCluster = ecsLabel + "cluster"
+ ecsLabelClusterARN = ecsLabel + "cluster_arn"
+ ecsLabelService = ecsLabel + "service"
+ ecsLabelServiceARN = ecsLabel + "service_arn"
+ ecsLabelServiceStatus = ecsLabel + "service_status"
+ ecsLabelTaskGroup = ecsLabel + "task_group"
+ ecsLabelTaskARN = ecsLabel + "task_arn"
+ ecsLabelTaskDefinition = ecsLabel + "task_definition"
+ ecsLabelRegion = ecsLabel + "region"
+ ecsLabelAvailabilityZone = ecsLabel + "availability_zone"
+ ecsLabelSubnetID = ecsLabel + "subnet_id"
+ ecsLabelIPAddress = ecsLabel + "ip_address"
+ ecsLabelLaunchType = ecsLabel + "launch_type"
+ ecsLabelDesiredStatus = ecsLabel + "desired_status"
+ ecsLabelLastStatus = ecsLabel + "last_status"
+ ecsLabelHealthStatus = ecsLabel + "health_status"
+ ecsLabelPlatformFamily = ecsLabel + "platform_family"
+ ecsLabelPlatformVersion = ecsLabel + "platform_version"
+ ecsLabelTag = ecsLabel + "tag_"
+ ecsLabelTagCluster = ecsLabelTag + "cluster_"
+ ecsLabelTagService = ecsLabelTag + "service_"
+ ecsLabelTagTask = ecsLabelTag + "task_"
+ ecsLabelTagEC2 = ecsLabelTag + "ec2_"
+ ecsLabelNetworkMode = ecsLabel + "network_mode"
+ ecsLabelContainerInstanceARN = ecsLabel + "container_instance_arn"
+ ecsLabelEC2InstanceID = ecsLabel + "ec2_instance_id"
+ ecsLabelEC2InstanceType = ecsLabel + "ec2_instance_type"
+ ecsLabelEC2InstancePrivateIP = ecsLabel + "ec2_instance_private_ip"
+ ecsLabelEC2InstancePublicIP = ecsLabel + "ec2_instance_public_ip"
+ ecsLabelPublicIP = ecsLabel + "public_ip"
)
// DefaultECSSDConfig is the default ECS SD configuration.
@@ -122,7 +129,7 @@ func (c *ECSSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery
}
// UnmarshalYAML implements the yaml.Unmarshaler interface for the ECS Config.
-func (c *ECSSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
+func (c *ECSSDConfig) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultECSSDConfig
type plain ECSSDConfig
err := unmarshal((*plain)(c))
@@ -153,6 +160,12 @@ type ecsClient interface {
DescribeServices(context.Context, *ecs.DescribeServicesInput, ...func(*ecs.Options)) (*ecs.DescribeServicesOutput, error)
ListTasks(context.Context, *ecs.ListTasksInput, ...func(*ecs.Options)) (*ecs.ListTasksOutput, error)
DescribeTasks(context.Context, *ecs.DescribeTasksInput, ...func(*ecs.Options)) (*ecs.DescribeTasksOutput, error)
+ DescribeContainerInstances(context.Context, *ecs.DescribeContainerInstancesInput, ...func(*ecs.Options)) (*ecs.DescribeContainerInstancesOutput, error)
+}
+
+type ecsEC2Client interface {
+ DescribeInstances(context.Context, *ec2.DescribeInstancesInput, ...func(*ec2.Options)) (*ec2.DescribeInstancesOutput, error)
+ DescribeNetworkInterfaces(context.Context, *ec2.DescribeNetworkInterfacesInput, ...func(*ec2.Options)) (*ec2.DescribeNetworkInterfacesOutput, error)
}
// ECSDiscovery periodically performs ECS-SD requests. It implements
@@ -162,6 +175,7 @@ type ECSDiscovery struct {
logger *slog.Logger
cfg *ECSSDConfig
ecs ecsClient
+ ec2 ecsEC2Client
}
// NewECSDiscovery returns a new ECSDiscovery which periodically refreshes its targets.
@@ -191,7 +205,7 @@ func NewECSDiscovery(conf *ECSSDConfig, opts discovery.DiscovererOptions) (*ECSD
}
func (d *ECSDiscovery) initEcsClient(ctx context.Context) error {
- if d.ecs != nil {
+ if d.ecs != nil && d.ec2 != nil {
return nil
}
@@ -240,6 +254,10 @@ func (d *ECSDiscovery) initEcsClient(ctx context.Context) error {
options.HTTPClient = client
})
+ d.ec2 = ec2.NewFromConfig(cfg, func(options *ec2.Options) {
+ options.HTTPClient = client
+ })
+
// Test credentials by making a simple API call
testCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
@@ -458,13 +476,117 @@ func (d *ECSDiscovery) describeTasks(ctx context.Context, clusterARN string, tas
return tasks, errg.Wait()
}
+// describeContainerInstances returns a map of container instance ARN to EC2 instance ID
+// Uses batching to respect AWS API limits (100 container instances per request).
+func (d *ECSDiscovery) describeContainerInstances(ctx context.Context, clusterARN string, containerInstanceARNs []string) (map[string]string, error) {
+ if len(containerInstanceARNs) == 0 {
+ return make(map[string]string), nil
+ }
+
+ containerInstToEC2 := make(map[string]string)
+ batchSize := 100 // AWS API limit
+
+ for _, batch := range batchSlice(containerInstanceARNs, batchSize) {
+ resp, err := d.ecs.DescribeContainerInstances(ctx, &ecs.DescribeContainerInstancesInput{
+ Cluster: aws.String(clusterARN),
+ ContainerInstances: batch,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("could not describe container instances: %w", err)
+ }
+
+ for _, ci := range resp.ContainerInstances {
+ if ci.ContainerInstanceArn != nil && ci.Ec2InstanceId != nil {
+ containerInstToEC2[*ci.ContainerInstanceArn] = *ci.Ec2InstanceId
+ }
+ }
+ }
+
+ return containerInstToEC2, nil
+}
+
+// ec2InstanceInfo holds information retrieved from EC2 DescribeInstances.
+type ec2InstanceInfo struct {
+ privateIP string
+ publicIP string
+ subnetID string
+ instanceType string
+ tags map[string]string
+}
+
+// describeEC2Instances returns a map of EC2 instance ID to instance information.
+func (d *ECSDiscovery) describeEC2Instances(ctx context.Context, instanceIDs []string) (map[string]ec2InstanceInfo, error) {
+ if len(instanceIDs) == 0 {
+ return make(map[string]ec2InstanceInfo), nil
+ }
+
+ instanceInfo := make(map[string]ec2InstanceInfo)
+
+ resp, err := d.ec2.DescribeInstances(ctx, &ec2.DescribeInstancesInput{
+ InstanceIds: instanceIDs,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("could not describe EC2 instances: %w", err)
+ }
+
+ for _, reservation := range resp.Reservations {
+ for _, instance := range reservation.Instances {
+ if instance.InstanceId != nil && instance.PrivateIpAddress != nil {
+ info := ec2InstanceInfo{
+ privateIP: *instance.PrivateIpAddress,
+ tags: make(map[string]string),
+ }
+ if instance.PublicIpAddress != nil {
+ info.publicIP = *instance.PublicIpAddress
+ }
+ if instance.SubnetId != nil {
+ info.subnetID = *instance.SubnetId
+ }
+ if instance.InstanceType != "" {
+ info.instanceType = string(instance.InstanceType)
+ }
+ // Collect EC2 instance tags
+ for _, tag := range instance.Tags {
+ if tag.Key != nil && tag.Value != nil {
+ info.tags[*tag.Key] = *tag.Value
+ }
+ }
+ instanceInfo[*instance.InstanceId] = info
+ }
+ }
+ }
+
+ return instanceInfo, nil
+}
+
+// describeNetworkInterfaces returns a map of ENI ID to public IP address.
+func (d *ECSDiscovery) describeNetworkInterfaces(ctx context.Context, eniIDs []string) (map[string]string, error) {
+ if len(eniIDs) == 0 {
+ return make(map[string]string), nil
+ }
+
+ eniToPublicIP := make(map[string]string)
+
+ resp, err := d.ec2.DescribeNetworkInterfaces(ctx, &ec2.DescribeNetworkInterfacesInput{
+ NetworkInterfaceIds: eniIDs,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("could not describe network interfaces: %w", err)
+ }
+
+ for _, eni := range resp.NetworkInterfaces {
+ if eni.NetworkInterfaceId != nil && eni.Association != nil && eni.Association.PublicIp != nil {
+ eniToPublicIP[*eni.NetworkInterfaceId] = *eni.Association.PublicIp
+ }
+ }
+
+ return eniToPublicIP, nil
+}
+
func batchSlice[T any](a []T, size int) [][]T {
batches := make([][]T, 0, len(a)/size+1)
for i := 0; i < len(a); i += size {
- end := i + size
- if end > len(a) {
- end = len(a)
- }
+ end := min(i+size, len(a))
batches = append(batches, a[i:end])
}
return batches
@@ -557,8 +679,76 @@ func (d *ECSDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
if tasks, exists := serviceTaskMap[serviceArn]; exists {
var serviceTargets []model.LabelSet
+ // Collect container instance ARNs for all EC2 tasks to get instance type
+ var containerInstanceARNs []string
+ taskToContainerInstance := make(map[string]string)
+ // Collect ENI IDs for awsvpc tasks to get public IPs
+ var eniIDs []string
+ taskToENI := make(map[string]string)
+
for _, task := range tasks {
- // Find the ENI attachment to get the private IP address
+ // Collect container instance ARN for any task running on EC2
+ if task.ContainerInstanceArn != nil {
+ containerInstanceARNs = append(containerInstanceARNs, *task.ContainerInstanceArn)
+ taskToContainerInstance[*task.TaskArn] = *task.ContainerInstanceArn
+ }
+
+ // Collect ENI IDs from awsvpc tasks
+ for _, attachment := range task.Attachments {
+ if attachment.Type != nil && *attachment.Type == "ElasticNetworkInterface" {
+ for _, detail := range attachment.Details {
+ if detail.Name != nil && *detail.Name == "networkInterfaceId" && detail.Value != nil {
+ eniIDs = append(eniIDs, *detail.Value)
+ taskToENI[*task.TaskArn] = *detail.Value
+ break
+ }
+ }
+ break
+ }
+ }
+ }
+
+ // Batch describe container instances and EC2 instances to get instance type and other metadata
+ var containerInstToEC2 map[string]string
+ var ec2InstInfo map[string]ec2InstanceInfo
+ if len(containerInstanceARNs) > 0 {
+ var err error
+ containerInstToEC2, err = d.describeContainerInstances(ctx, clusterArn, containerInstanceARNs)
+ if err != nil {
+ d.logger.Error("Failed to describe container instances", "cluster", clusterArn, "error", err)
+ // Continue processing tasks
+ } else {
+ // Collect unique EC2 instance IDs
+ ec2InstanceIDs := make([]string, 0, len(containerInstToEC2))
+ for _, ec2ID := range containerInstToEC2 {
+ ec2InstanceIDs = append(ec2InstanceIDs, ec2ID)
+ }
+
+ // Batch describe EC2 instances
+ ec2InstInfo, err = d.describeEC2Instances(ctx, ec2InstanceIDs)
+ if err != nil {
+ d.logger.Error("Failed to describe EC2 instances", "cluster", clusterArn, "error", err)
+ }
+ }
+ }
+
+ // Batch describe ENIs to get public IPs for awsvpc tasks
+ var eniToPublicIP map[string]string
+ if len(eniIDs) > 0 {
+ var err error
+ eniToPublicIP, err = d.describeNetworkInterfaces(ctx, eniIDs)
+ if err != nil {
+ d.logger.Error("Failed to describe network interfaces", "cluster", clusterArn, "error", err)
+ // Continue processing without ENI public IPs
+ }
+ }
+
+ for _, task := range tasks {
+ var ipAddress, subnetID, publicIP string
+ var networkMode string
+ var ec2InstanceID, ec2InstanceType, ec2InstancePrivateIP, ec2InstancePublicIP string
+
+ // Try to get IP from ENI attachment (awsvpc mode)
var eniAttachment *types.Attachment
for _, attachment := range task.Attachments {
if attachment.Type != nil && *attachment.Type == "ElasticNetworkInterface" {
@@ -566,19 +756,65 @@ func (d *ECSDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
break
}
}
- if eniAttachment == nil {
- continue
- }
- var ipAddress, subnetID string
- for _, detail := range eniAttachment.Details {
- switch *detail.Name {
- case "privateIPv4Address":
- ipAddress = *detail.Value
- case "subnetId":
- subnetID = *detail.Value
+ if eniAttachment != nil {
+ // awsvpc networking mode - get IP from ENI
+ networkMode = "awsvpc"
+ for _, detail := range eniAttachment.Details {
+ switch *detail.Name {
+ case "privateIPv4Address":
+ ipAddress = *detail.Value
+ case "subnetId":
+ subnetID = *detail.Value
+ }
+ }
+ // Get public IP from ENI if available
+ if eniID, ok := taskToENI[*task.TaskArn]; ok {
+ if eniPublicIP, ok := eniToPublicIP[eniID]; ok {
+ publicIP = eniPublicIP
+ }
+ }
+ } else if task.ContainerInstanceArn != nil {
+ // bridge/host networking mode - need to get EC2 instance IP and subnet
+ networkMode = "bridge"
+ containerInstARN, ok := taskToContainerInstance[*task.TaskArn]
+ if ok {
+ ec2InstanceID, ok = containerInstToEC2[containerInstARN]
+ if ok {
+ info, ok := ec2InstInfo[ec2InstanceID]
+ if ok {
+ ipAddress = info.privateIP
+ publicIP = info.publicIP
+ subnetID = info.subnetID
+ ec2InstanceType = info.instanceType
+ ec2InstancePrivateIP = info.privateIP
+ ec2InstancePublicIP = info.publicIP
+ } else {
+ d.logger.Debug("EC2 instance info not found", "instance", ec2InstanceID, "task", *task.TaskArn)
+ }
+ } else {
+ d.logger.Debug("Container instance not found in map", "arn", containerInstARN, "task", *task.TaskArn)
+ }
}
}
+
+ // Get EC2 instance metadata for awsvpc tasks running on EC2
+ // We want the instance type and the host IPs for advanced use cases
+ if networkMode == "awsvpc" && task.ContainerInstanceArn != nil {
+ containerInstARN, ok := taskToContainerInstance[*task.TaskArn]
+ if ok {
+ ec2InstanceID, ok = containerInstToEC2[containerInstARN]
+ if ok {
+ info, ok := ec2InstInfo[ec2InstanceID]
+ if ok {
+ ec2InstanceType = info.instanceType
+ ec2InstancePrivateIP = info.privateIP
+ ec2InstancePublicIP = info.publicIP
+ }
+ }
+ }
+ }
+
if ipAddress == "" {
continue
}
@@ -592,13 +828,38 @@ func (d *ECSDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
ecsLabelTaskARN: model.LabelValue(*task.TaskArn),
ecsLabelTaskDefinition: model.LabelValue(*task.TaskDefinitionArn),
ecsLabelIPAddress: model.LabelValue(ipAddress),
- ecsLabelSubnetID: model.LabelValue(subnetID),
ecsLabelRegion: model.LabelValue(d.cfg.Region),
ecsLabelLaunchType: model.LabelValue(task.LaunchType),
ecsLabelAvailabilityZone: model.LabelValue(*task.AvailabilityZone),
ecsLabelDesiredStatus: model.LabelValue(*task.DesiredStatus),
ecsLabelLastStatus: model.LabelValue(*task.LastStatus),
ecsLabelHealthStatus: model.LabelValue(task.HealthStatus),
+ ecsLabelNetworkMode: model.LabelValue(networkMode),
+ }
+
+ // Add subnet ID when available (awsvpc mode from ENI, bridge/host from EC2 instance)
+ if subnetID != "" {
+ labels[ecsLabelSubnetID] = model.LabelValue(subnetID)
+ }
+
+ // Add container instance and EC2 instance info for EC2 launch type
+ if task.ContainerInstanceArn != nil {
+ labels[ecsLabelContainerInstanceARN] = model.LabelValue(*task.ContainerInstanceArn)
+ }
+ if ec2InstanceID != "" {
+ labels[ecsLabelEC2InstanceID] = model.LabelValue(ec2InstanceID)
+ }
+ if ec2InstanceType != "" {
+ labels[ecsLabelEC2InstanceType] = model.LabelValue(ec2InstanceType)
+ }
+ if ec2InstancePrivateIP != "" {
+ labels[ecsLabelEC2InstancePrivateIP] = model.LabelValue(ec2InstancePrivateIP)
+ }
+ if ec2InstancePublicIP != "" {
+ labels[ecsLabelEC2InstancePublicIP] = model.LabelValue(ec2InstancePublicIP)
+ }
+ if publicIP != "" {
+ labels[ecsLabelPublicIP] = model.LabelValue(publicIP)
}
if task.PlatformFamily != nil {
@@ -637,6 +898,15 @@ func (d *ECSDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
}
}
+ // Add EC2 instance tags (if running on EC2)
+ if ec2InstanceID != "" {
+ if info, ok := ec2InstInfo[ec2InstanceID]; ok {
+ for tagKey, tagValue := range info.tags {
+ labels[model.LabelName(ecsLabelTagEC2+strutil.SanitizeLabelName(tagKey))] = model.LabelValue(tagValue)
+ }
+ }
+ }
+
serviceTargets = append(serviceTargets, labels)
}
diff --git a/discovery/aws/ecs_test.go b/discovery/aws/ecs_test.go
index 60138a01c7..1cb48b27fa 100644
--- a/discovery/aws/ecs_test.go
+++ b/discovery/aws/ecs_test.go
@@ -17,6 +17,8 @@ import (
"context"
"testing"
+ "github.com/aws/aws-sdk-go-v2/service/ec2"
+ ec2Types "github.com/aws/aws-sdk-go-v2/service/ec2/types"
"github.com/aws/aws-sdk-go-v2/service/ecs"
ecsTypes "github.com/aws/aws-sdk-go-v2/service/ecs/types"
"github.com/prometheus/common/model"
@@ -29,9 +31,12 @@ import (
type ecsDataStore struct {
region string
- clusters []ecsTypes.Cluster
- services []ecsTypes.Service
- tasks []ecsTypes.Task
+ clusters []ecsTypes.Cluster
+ services []ecsTypes.Service
+ tasks []ecsTypes.Task
+ containerInstances []ecsTypes.ContainerInstance
+ ec2Instances map[string]ec2InstanceInfo // EC2 instance ID to instance info
+ eniPublicIPs map[string]string // ENI ID to public IP
}
func TestECSDiscoveryListClusterARNs(t *testing.T) {
@@ -716,6 +721,7 @@ func TestECSDiscoveryRefresh(t *testing.T) {
Details: []ecsTypes.KeyValuePair{
{Name: strptr("subnetId"), Value: strptr("subnet-12345")},
{Name: strptr("privateIPv4Address"), Value: strptr("10.0.1.100")},
+ {Name: strptr("networkInterfaceId"), Value: strptr("eni-fargate-123")},
},
},
},
@@ -724,6 +730,9 @@ func TestECSDiscoveryRefresh(t *testing.T) {
},
},
},
+ eniPublicIPs: map[string]string{
+ "eni-fargate-123": "52.1.2.3",
+ },
},
expected: []*targetgroup.Group{
{
@@ -749,6 +758,8 @@ func TestECSDiscoveryRefresh(t *testing.T) {
"__meta_ecs_health_status": model.LabelValue("HEALTHY"),
"__meta_ecs_platform_family": model.LabelValue("Linux"),
"__meta_ecs_platform_version": model.LabelValue("1.4.0"),
+ "__meta_ecs_network_mode": model.LabelValue("awsvpc"),
+ "__meta_ecs_public_ip": model.LabelValue("52.1.2.3"),
"__meta_ecs_tag_cluster_Environment": model.LabelValue("test"),
"__meta_ecs_tag_service_App": model.LabelValue("web"),
"__meta_ecs_tag_task_Version": model.LabelValue("v1.0"),
@@ -825,14 +836,345 @@ func TestECSDiscoveryRefresh(t *testing.T) {
},
},
},
+ {
+ name: "TaskWithBridgeNetworking",
+ ecsData: &ecsDataStore{
+ region: "us-west-2",
+ clusters: []ecsTypes.Cluster{
+ {
+ ClusterName: strptr("test-cluster"),
+ ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
+ Status: strptr("ACTIVE"),
+ },
+ },
+ services: []ecsTypes.Service{
+ {
+ ServiceName: strptr("bridge-service"),
+ ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/bridge-service"),
+ ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
+ Status: strptr("ACTIVE"),
+ },
+ },
+ tasks: []ecsTypes.Task{
+ {
+ TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-bridge"),
+ ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
+ TaskDefinitionArn: strptr("arn:aws:ecs:us-west-2:123456789012:task-definition/bridge-task:1"),
+ Group: strptr("service:bridge-service"),
+ LaunchType: ecsTypes.LaunchTypeEc2,
+ LastStatus: strptr("RUNNING"),
+ DesiredStatus: strptr("RUNNING"),
+ HealthStatus: ecsTypes.HealthStatusHealthy,
+ AvailabilityZone: strptr("us-west-2a"),
+ ContainerInstanceArn: strptr("arn:aws:ecs:us-west-2:123456789012:container-instance/test-cluster/abc123"),
+ Attachments: []ecsTypes.Attachment{},
+ },
+ },
+ containerInstances: []ecsTypes.ContainerInstance{
+ {
+ ContainerInstanceArn: strptr("arn:aws:ecs:us-west-2:123456789012:container-instance/test-cluster/abc123"),
+ Ec2InstanceId: strptr("i-1234567890abcdef0"),
+ Status: strptr("ACTIVE"),
+ },
+ },
+ ec2Instances: map[string]ec2InstanceInfo{
+ "i-1234567890abcdef0": {
+ privateIP: "10.0.1.50",
+ publicIP: "54.1.2.3",
+ subnetID: "subnet-bridge-1",
+ instanceType: "t3.medium",
+ tags: map[string]string{
+ "Name": "ecs-host-1",
+ "Environment": "production",
+ },
+ },
+ },
+ },
+ expected: []*targetgroup.Group{
+ {
+ Source: "us-west-2",
+ Targets: []model.LabelSet{
+ {
+ model.AddressLabel: model.LabelValue("10.0.1.50:80"),
+ "__meta_ecs_cluster": model.LabelValue("test-cluster"),
+ "__meta_ecs_cluster_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
+ "__meta_ecs_service": model.LabelValue("bridge-service"),
+ "__meta_ecs_service_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/bridge-service"),
+ "__meta_ecs_service_status": model.LabelValue("ACTIVE"),
+ "__meta_ecs_task_group": model.LabelValue("service:bridge-service"),
+ "__meta_ecs_task_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-bridge"),
+ "__meta_ecs_task_definition": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:task-definition/bridge-task:1"),
+ "__meta_ecs_region": model.LabelValue("us-west-2"),
+ "__meta_ecs_availability_zone": model.LabelValue("us-west-2a"),
+ "__meta_ecs_ip_address": model.LabelValue("10.0.1.50"),
+ "__meta_ecs_subnet_id": model.LabelValue("subnet-bridge-1"),
+ "__meta_ecs_launch_type": model.LabelValue("EC2"),
+ "__meta_ecs_desired_status": model.LabelValue("RUNNING"),
+ "__meta_ecs_last_status": model.LabelValue("RUNNING"),
+ "__meta_ecs_health_status": model.LabelValue("HEALTHY"),
+ "__meta_ecs_network_mode": model.LabelValue("bridge"),
+ "__meta_ecs_container_instance_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:container-instance/test-cluster/abc123"),
+ "__meta_ecs_ec2_instance_id": model.LabelValue("i-1234567890abcdef0"),
+ "__meta_ecs_ec2_instance_type": model.LabelValue("t3.medium"),
+ "__meta_ecs_ec2_instance_private_ip": model.LabelValue("10.0.1.50"),
+ "__meta_ecs_ec2_instance_public_ip": model.LabelValue("54.1.2.3"),
+ "__meta_ecs_public_ip": model.LabelValue("54.1.2.3"),
+ "__meta_ecs_tag_ec2_Name": model.LabelValue("ecs-host-1"),
+ "__meta_ecs_tag_ec2_Environment": model.LabelValue("production"),
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "MixedNetworkingModes",
+ ecsData: &ecsDataStore{
+ region: "us-west-2",
+ clusters: []ecsTypes.Cluster{
+ {
+ ClusterName: strptr("mixed-cluster"),
+ ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/mixed-cluster"),
+ Status: strptr("ACTIVE"),
+ },
+ },
+ services: []ecsTypes.Service{
+ {
+ ServiceName: strptr("mixed-service"),
+ ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/mixed-cluster/mixed-service"),
+ ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/mixed-cluster"),
+ Status: strptr("ACTIVE"),
+ },
+ },
+ tasks: []ecsTypes.Task{
+ {
+ TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/mixed-cluster/task-awsvpc"),
+ ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/mixed-cluster"),
+ TaskDefinitionArn: strptr("arn:aws:ecs:us-west-2:123456789012:task-definition/awsvpc-task:1"),
+ Group: strptr("service:mixed-service"),
+ LaunchType: ecsTypes.LaunchTypeFargate,
+ LastStatus: strptr("RUNNING"),
+ DesiredStatus: strptr("RUNNING"),
+ HealthStatus: ecsTypes.HealthStatusHealthy,
+ AvailabilityZone: strptr("us-west-2a"),
+ Attachments: []ecsTypes.Attachment{
+ {
+ Type: strptr("ElasticNetworkInterface"),
+ Details: []ecsTypes.KeyValuePair{
+ {Name: strptr("subnetId"), Value: strptr("subnet-12345")},
+ {Name: strptr("privateIPv4Address"), Value: strptr("10.0.2.100")},
+ {Name: strptr("networkInterfaceId"), Value: strptr("eni-mixed-awsvpc")},
+ },
+ },
+ },
+ },
+ {
+ TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/mixed-cluster/task-bridge"),
+ ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/mixed-cluster"),
+ TaskDefinitionArn: strptr("arn:aws:ecs:us-west-2:123456789012:task-definition/bridge-task:1"),
+ Group: strptr("service:mixed-service"),
+ LaunchType: ecsTypes.LaunchTypeEc2,
+ LastStatus: strptr("RUNNING"),
+ DesiredStatus: strptr("RUNNING"),
+ HealthStatus: ecsTypes.HealthStatusHealthy,
+ AvailabilityZone: strptr("us-west-2b"),
+ ContainerInstanceArn: strptr("arn:aws:ecs:us-west-2:123456789012:container-instance/mixed-cluster/xyz789"),
+ Attachments: []ecsTypes.Attachment{},
+ },
+ },
+ containerInstances: []ecsTypes.ContainerInstance{
+ {
+ ContainerInstanceArn: strptr("arn:aws:ecs:us-west-2:123456789012:container-instance/mixed-cluster/xyz789"),
+ Ec2InstanceId: strptr("i-0987654321fedcba0"),
+ Status: strptr("ACTIVE"),
+ },
+ },
+ ec2Instances: map[string]ec2InstanceInfo{
+ "i-0987654321fedcba0": {
+ privateIP: "10.0.1.75",
+ publicIP: "54.2.3.4",
+ subnetID: "subnet-bridge-2",
+ instanceType: "t3.large",
+ tags: map[string]string{
+ "Name": "mixed-host",
+ "Team": "platform",
+ },
+ },
+ },
+ eniPublicIPs: map[string]string{
+ "eni-mixed-awsvpc": "52.2.3.4",
+ },
+ },
+ expected: []*targetgroup.Group{
+ {
+ Source: "us-west-2",
+ Targets: []model.LabelSet{
+ {
+ model.AddressLabel: model.LabelValue("10.0.2.100:80"),
+ "__meta_ecs_cluster": model.LabelValue("mixed-cluster"),
+ "__meta_ecs_cluster_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:cluster/mixed-cluster"),
+ "__meta_ecs_service": model.LabelValue("mixed-service"),
+ "__meta_ecs_service_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:service/mixed-cluster/mixed-service"),
+ "__meta_ecs_service_status": model.LabelValue("ACTIVE"),
+ "__meta_ecs_task_group": model.LabelValue("service:mixed-service"),
+ "__meta_ecs_task_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:task/mixed-cluster/task-awsvpc"),
+ "__meta_ecs_task_definition": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:task-definition/awsvpc-task:1"),
+ "__meta_ecs_region": model.LabelValue("us-west-2"),
+ "__meta_ecs_availability_zone": model.LabelValue("us-west-2a"),
+ "__meta_ecs_ip_address": model.LabelValue("10.0.2.100"),
+ "__meta_ecs_subnet_id": model.LabelValue("subnet-12345"),
+ "__meta_ecs_launch_type": model.LabelValue("FARGATE"),
+ "__meta_ecs_desired_status": model.LabelValue("RUNNING"),
+ "__meta_ecs_last_status": model.LabelValue("RUNNING"),
+ "__meta_ecs_health_status": model.LabelValue("HEALTHY"),
+ "__meta_ecs_network_mode": model.LabelValue("awsvpc"),
+ "__meta_ecs_public_ip": model.LabelValue("52.2.3.4"),
+ },
+ {
+ model.AddressLabel: model.LabelValue("10.0.1.75:80"),
+ "__meta_ecs_cluster": model.LabelValue("mixed-cluster"),
+ "__meta_ecs_cluster_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:cluster/mixed-cluster"),
+ "__meta_ecs_service": model.LabelValue("mixed-service"),
+ "__meta_ecs_service_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:service/mixed-cluster/mixed-service"),
+ "__meta_ecs_service_status": model.LabelValue("ACTIVE"),
+ "__meta_ecs_task_group": model.LabelValue("service:mixed-service"),
+ "__meta_ecs_task_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:task/mixed-cluster/task-bridge"),
+ "__meta_ecs_task_definition": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:task-definition/bridge-task:1"),
+ "__meta_ecs_region": model.LabelValue("us-west-2"),
+ "__meta_ecs_availability_zone": model.LabelValue("us-west-2b"),
+ "__meta_ecs_ip_address": model.LabelValue("10.0.1.75"),
+ "__meta_ecs_subnet_id": model.LabelValue("subnet-bridge-2"),
+ "__meta_ecs_launch_type": model.LabelValue("EC2"),
+ "__meta_ecs_desired_status": model.LabelValue("RUNNING"),
+ "__meta_ecs_last_status": model.LabelValue("RUNNING"),
+ "__meta_ecs_health_status": model.LabelValue("HEALTHY"),
+ "__meta_ecs_network_mode": model.LabelValue("bridge"),
+ "__meta_ecs_container_instance_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:container-instance/mixed-cluster/xyz789"),
+ "__meta_ecs_ec2_instance_id": model.LabelValue("i-0987654321fedcba0"),
+ "__meta_ecs_ec2_instance_type": model.LabelValue("t3.large"),
+ "__meta_ecs_ec2_instance_private_ip": model.LabelValue("10.0.1.75"),
+ "__meta_ecs_ec2_instance_public_ip": model.LabelValue("54.2.3.4"),
+ "__meta_ecs_public_ip": model.LabelValue("54.2.3.4"),
+ "__meta_ecs_tag_ec2_Name": model.LabelValue("mixed-host"),
+ "__meta_ecs_tag_ec2_Team": model.LabelValue("platform"),
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "EC2WithAwsvpcNetworking",
+ ecsData: &ecsDataStore{
+ region: "us-west-2",
+ clusters: []ecsTypes.Cluster{
+ {
+ ClusterName: strptr("ec2-awsvpc-cluster"),
+ ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/ec2-awsvpc-cluster"),
+ Status: strptr("ACTIVE"),
+ },
+ },
+ services: []ecsTypes.Service{
+ {
+ ServiceName: strptr("ec2-awsvpc-service"),
+ ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/ec2-awsvpc-cluster/ec2-awsvpc-service"),
+ ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/ec2-awsvpc-cluster"),
+ Status: strptr("ACTIVE"),
+ },
+ },
+ tasks: []ecsTypes.Task{
+ {
+ TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/ec2-awsvpc-cluster/task-ec2-awsvpc"),
+ ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/ec2-awsvpc-cluster"),
+ TaskDefinitionArn: strptr("arn:aws:ecs:us-west-2:123456789012:task-definition/ec2-awsvpc-task:1"),
+ Group: strptr("service:ec2-awsvpc-service"),
+ LaunchType: ecsTypes.LaunchTypeEc2,
+ LastStatus: strptr("RUNNING"),
+ DesiredStatus: strptr("RUNNING"),
+ HealthStatus: ecsTypes.HealthStatusHealthy,
+ AvailabilityZone: strptr("us-west-2c"),
+ ContainerInstanceArn: strptr("arn:aws:ecs:us-west-2:123456789012:container-instance/ec2-awsvpc-cluster/def456"),
+ // Has BOTH ENI attachment AND container instance ARN - should use ENI
+ Attachments: []ecsTypes.Attachment{
+ {
+ Type: strptr("ElasticNetworkInterface"),
+ Details: []ecsTypes.KeyValuePair{
+ {Name: strptr("subnetId"), Value: strptr("subnet-99999")},
+ {Name: strptr("privateIPv4Address"), Value: strptr("10.0.3.200")},
+ {Name: strptr("networkInterfaceId"), Value: strptr("eni-ec2-awsvpc")},
+ },
+ },
+ },
+ },
+ },
+ eniPublicIPs: map[string]string{
+ "eni-ec2-awsvpc": "52.3.4.5",
+ },
+ // Container instance data - IP should NOT be used, but instance type SHOULD be used
+ containerInstances: []ecsTypes.ContainerInstance{
+ {
+ ContainerInstanceArn: strptr("arn:aws:ecs:us-west-2:123456789012:container-instance/ec2-awsvpc-cluster/def456"),
+ Ec2InstanceId: strptr("i-ec2awsvpcinstance"),
+ Status: strptr("ACTIVE"),
+ },
+ },
+ ec2Instances: map[string]ec2InstanceInfo{
+ "i-ec2awsvpcinstance": {
+ privateIP: "10.0.9.99", // This IP should NOT be used (ENI IP is used instead)
+ publicIP: "54.3.4.5", // This public IP SHOULD be exposed
+ subnetID: "subnet-wrong", // This subnet should NOT be used (ENI subnet is used instead)
+ instanceType: "c5.2xlarge", // This instance type SHOULD be used
+ tags: map[string]string{
+ "Name": "ec2-awsvpc-host",
+ "Owner": "team-a",
+ },
+ },
+ },
+ },
+ expected: []*targetgroup.Group{
+ {
+ Source: "us-west-2",
+ Targets: []model.LabelSet{
+ {
+ model.AddressLabel: model.LabelValue("10.0.3.200:80"),
+ "__meta_ecs_cluster": model.LabelValue("ec2-awsvpc-cluster"),
+ "__meta_ecs_cluster_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:cluster/ec2-awsvpc-cluster"),
+ "__meta_ecs_service": model.LabelValue("ec2-awsvpc-service"),
+ "__meta_ecs_service_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:service/ec2-awsvpc-cluster/ec2-awsvpc-service"),
+ "__meta_ecs_service_status": model.LabelValue("ACTIVE"),
+ "__meta_ecs_task_group": model.LabelValue("service:ec2-awsvpc-service"),
+ "__meta_ecs_task_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:task/ec2-awsvpc-cluster/task-ec2-awsvpc"),
+ "__meta_ecs_task_definition": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:task-definition/ec2-awsvpc-task:1"),
+ "__meta_ecs_region": model.LabelValue("us-west-2"),
+ "__meta_ecs_availability_zone": model.LabelValue("us-west-2c"),
+ "__meta_ecs_ip_address": model.LabelValue("10.0.3.200"),
+ "__meta_ecs_subnet_id": model.LabelValue("subnet-99999"),
+ "__meta_ecs_launch_type": model.LabelValue("EC2"),
+ "__meta_ecs_desired_status": model.LabelValue("RUNNING"),
+ "__meta_ecs_last_status": model.LabelValue("RUNNING"),
+ "__meta_ecs_health_status": model.LabelValue("HEALTHY"),
+ "__meta_ecs_network_mode": model.LabelValue("awsvpc"),
+ "__meta_ecs_container_instance_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:container-instance/ec2-awsvpc-cluster/def456"),
+ "__meta_ecs_ec2_instance_id": model.LabelValue("i-ec2awsvpcinstance"),
+ "__meta_ecs_ec2_instance_type": model.LabelValue("c5.2xlarge"),
+ "__meta_ecs_ec2_instance_private_ip": model.LabelValue("10.0.9.99"),
+ "__meta_ecs_ec2_instance_public_ip": model.LabelValue("54.3.4.5"),
+ "__meta_ecs_public_ip": model.LabelValue("52.3.4.5"),
+ "__meta_ecs_tag_ec2_Name": model.LabelValue("ec2-awsvpc-host"),
+ "__meta_ecs_tag_ec2_Owner": model.LabelValue("team-a"),
+ },
+ },
+ },
+ },
+ },
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- client := newMockECSClient(tt.ecsData)
+ ecsClient := newMockECSClient(tt.ecsData)
+ ec2Client := newMockECSEC2Client(tt.ecsData.ec2Instances, tt.ecsData.eniPublicIPs)
d := &ECSDiscovery{
- ecs: client,
+ ecs: ecsClient,
+ ec2: ec2Client,
cfg: &ECSSDConfig{
Region: tt.ecsData.region,
Port: 80,
@@ -951,3 +1293,91 @@ func (m *mockECSClient) DescribeTasks(_ context.Context, input *ecs.DescribeTask
Tasks: tasks,
}, nil
}
+
+func (m *mockECSClient) DescribeContainerInstances(_ context.Context, input *ecs.DescribeContainerInstancesInput, _ ...func(*ecs.Options)) (*ecs.DescribeContainerInstancesOutput, error) {
+ var containerInstances []ecsTypes.ContainerInstance
+ for _, ciArn := range input.ContainerInstances {
+ for _, ci := range m.ecsData.containerInstances {
+ if *ci.ContainerInstanceArn == ciArn {
+ containerInstances = append(containerInstances, ci)
+ break
+ }
+ }
+ }
+
+ return &ecs.DescribeContainerInstancesOutput{
+ ContainerInstances: containerInstances,
+ }, nil
+}
+
+// Mock EC2 client wrapper for ECS tests.
+type mockECSEC2Client struct {
+ ec2Instances map[string]ec2InstanceInfo
+ eniPublicIPs map[string]string
+}
+
+func newMockECSEC2Client(ec2Instances map[string]ec2InstanceInfo, eniPublicIPs map[string]string) *mockECSEC2Client {
+ return &mockECSEC2Client{
+ ec2Instances: ec2Instances,
+ eniPublicIPs: eniPublicIPs,
+ }
+}
+
+func (m *mockECSEC2Client) DescribeInstances(_ context.Context, input *ec2.DescribeInstancesInput, _ ...func(*ec2.Options)) (*ec2.DescribeInstancesOutput, error) {
+ var reservations []ec2Types.Reservation
+
+ for _, instanceID := range input.InstanceIds {
+ if info, ok := m.ec2Instances[instanceID]; ok {
+ instance := ec2Types.Instance{
+ InstanceId: &instanceID,
+ PrivateIpAddress: &info.privateIP,
+ }
+ if info.publicIP != "" {
+ instance.PublicIpAddress = &info.publicIP
+ }
+ if info.subnetID != "" {
+ instance.SubnetId = &info.subnetID
+ }
+ if info.instanceType != "" {
+ instance.InstanceType = ec2Types.InstanceType(info.instanceType)
+ }
+ // Add tags
+ for tagKey, tagValue := range info.tags {
+ instance.Tags = append(instance.Tags, ec2Types.Tag{
+ Key: &tagKey,
+ Value: &tagValue,
+ })
+ }
+ reservation := ec2Types.Reservation{
+ Instances: []ec2Types.Instance{instance},
+ }
+ reservations = append(reservations, reservation)
+ }
+ }
+
+ return &ec2.DescribeInstancesOutput{
+ Reservations: reservations,
+ }, nil
+}
+
+func (m *mockECSEC2Client) DescribeNetworkInterfaces(_ context.Context, input *ec2.DescribeNetworkInterfacesInput, _ ...func(*ec2.Options)) (*ec2.DescribeNetworkInterfacesOutput, error) {
+ var networkInterfaces []ec2Types.NetworkInterface
+
+ for _, eniID := range input.NetworkInterfaceIds {
+ if publicIP, ok := m.eniPublicIPs[eniID]; ok {
+ eni := ec2Types.NetworkInterface{
+ NetworkInterfaceId: &eniID,
+ }
+ if publicIP != "" {
+ eni.Association = &ec2Types.NetworkInterfaceAssociation{
+ PublicIp: &publicIP,
+ }
+ }
+ networkInterfaces = append(networkInterfaces, eni)
+ }
+ }
+
+ return &ec2.DescribeNetworkInterfacesOutput{
+ NetworkInterfaces: networkInterfaces,
+ }, nil
+}
diff --git a/discovery/aws/lightsail.go b/discovery/aws/lightsail.go
index c9ca3eaee9..b13f26cc5f 100644
--- a/discovery/aws/lightsail.go
+++ b/discovery/aws/lightsail.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/aws/metrics_ec2.go b/discovery/aws/metrics_ec2.go
index 45227c3534..1a37347b40 100644
--- a/discovery/aws/metrics_ec2.go
+++ b/discovery/aws/metrics_ec2.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/aws/metrics_lightsail.go b/discovery/aws/metrics_lightsail.go
index 4dfe14c60c..40f7639459 100644
--- a/discovery/aws/metrics_lightsail.go
+++ b/discovery/aws/metrics_lightsail.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/azure/azure.go b/discovery/azure/azure.go
index 3c38bbf3e6..32fc97fdfa 100644
--- a/discovery/azure/azure.go
+++ b/discovery/azure/azure.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/azure/azure_test.go b/discovery/azure/azure_test.go
index a6e3a6713b..23c120ac6b 100644
--- a/discovery/azure/azure_test.go
+++ b/discovery/azure/azure_test.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/azure/metrics.go b/discovery/azure/metrics.go
index 3e3dbdbfbb..dc0291cdb8 100644
--- a/discovery/azure/metrics.go
+++ b/discovery/azure/metrics.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/consul/consul.go b/discovery/consul/consul.go
index 74b5d0724e..1004d0941a 100644
--- a/discovery/consul/consul.go
+++ b/discovery/consul/consul.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/consul/consul_test.go b/discovery/consul/consul_test.go
index a6ff4a625e..feec5d4747 100644
--- a/discovery/consul/consul_test.go
+++ b/discovery/consul/consul_test.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/consul/metrics.go b/discovery/consul/metrics.go
index b49509bd8f..903fba5cef 100644
--- a/discovery/consul/metrics.go
+++ b/discovery/consul/metrics.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/digitalocean/digitalocean.go b/discovery/digitalocean/digitalocean.go
index d2fbee1d94..0a185c2915 100644
--- a/discovery/digitalocean/digitalocean.go
+++ b/discovery/digitalocean/digitalocean.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/digitalocean/digitalocean_test.go b/discovery/digitalocean/digitalocean_test.go
index ca99e83b20..560d8d533a 100644
--- a/discovery/digitalocean/digitalocean_test.go
+++ b/discovery/digitalocean/digitalocean_test.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/digitalocean/metrics.go b/discovery/digitalocean/metrics.go
index 7f68b39e56..4b11b825e5 100644
--- a/discovery/digitalocean/metrics.go
+++ b/discovery/digitalocean/metrics.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/digitalocean/mock_test.go b/discovery/digitalocean/mock_test.go
index 62d963c3b3..d5703d7702 100644
--- a/discovery/digitalocean/mock_test.go
+++ b/discovery/digitalocean/mock_test.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/discoverer_metrics_noop.go b/discovery/discoverer_metrics_noop.go
index 4321204b6c..b75474dfec 100644
--- a/discovery/discoverer_metrics_noop.go
+++ b/discovery/discoverer_metrics_noop.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/discovery.go b/discovery/discovery.go
index 70cd856bb2..c4f8c8d458 100644
--- a/discovery/discovery.go
+++ b/discovery/discovery.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -63,8 +63,9 @@ type DiscovererOptions struct {
// We define them here in the "discovery" package in order to avoid a cyclic dependency between
// "discovery" and "refresh".
type RefreshMetrics struct {
- Failures prometheus.Counter
- Duration prometheus.Observer
+ Failures prometheus.Counter
+ Duration prometheus.Observer
+ DurationHistogram prometheus.Observer
}
// RefreshMetricsInstantiator instantiates the metrics used by the "refresh" package.
diff --git a/discovery/discovery_test.go b/discovery/discovery_test.go
index 116095fd62..53539b6d40 100644
--- a/discovery/discovery_test.go
+++ b/discovery/discovery_test.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/dns/dns.go b/discovery/dns/dns.go
index 1e0a78698b..4d9200d734 100644
--- a/discovery/dns/dns.go
+++ b/discovery/dns/dns.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/dns/dns_test.go b/discovery/dns/dns_test.go
index 4a7170cc7d..eeb1137878 100644
--- a/discovery/dns/dns_test.go
+++ b/discovery/dns/dns_test.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/dns/metrics.go b/discovery/dns/metrics.go
index 27c96b53e0..b65db5e6c0 100644
--- a/discovery/dns/metrics.go
+++ b/discovery/dns/metrics.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/eureka/client.go b/discovery/eureka/client.go
index e4b54faae6..252b152637 100644
--- a/discovery/eureka/client.go
+++ b/discovery/eureka/client.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/eureka/client_test.go b/discovery/eureka/client_test.go
index f85409a11e..19812b1f5d 100644
--- a/discovery/eureka/client_test.go
+++ b/discovery/eureka/client_test.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/eureka/eureka.go b/discovery/eureka/eureka.go
index 6d726966bc..0d46667437 100644
--- a/discovery/eureka/eureka.go
+++ b/discovery/eureka/eureka.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/eureka/eureka_test.go b/discovery/eureka/eureka_test.go
index def6126e86..69612fedb7 100644
--- a/discovery/eureka/eureka_test.go
+++ b/discovery/eureka/eureka_test.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/eureka/metrics.go b/discovery/eureka/metrics.go
index 72cfe47096..5a0720a8d5 100644
--- a/discovery/eureka/metrics.go
+++ b/discovery/eureka/metrics.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/file/file.go b/discovery/file/file.go
index e0225891ce..c654297e0a 100644
--- a/discovery/file/file.go
+++ b/discovery/file/file.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/file/file_test.go b/discovery/file/file_test.go
index c80744f8c3..d8a36df399 100644
--- a/discovery/file/file_test.go
+++ b/discovery/file/file_test.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/file/metrics.go b/discovery/file/metrics.go
index 3e3df7bbf6..0371338d46 100644
--- a/discovery/file/metrics.go
+++ b/discovery/file/metrics.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/gce/gce.go b/discovery/gce/gce.go
index 106028ff93..96eed2b27b 100644
--- a/discovery/gce/gce.go
+++ b/discovery/gce/gce.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/gce/metrics.go b/discovery/gce/metrics.go
index 7ea69b1a89..c4020f0a53 100644
--- a/discovery/gce/metrics.go
+++ b/discovery/gce/metrics.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/hetzner/hcloud.go b/discovery/hetzner/hcloud.go
index 88fe09bd3e..7fe55ffded 100644
--- a/discovery/hetzner/hcloud.go
+++ b/discovery/hetzner/hcloud.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -98,13 +98,13 @@ func (d *hcloudDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er
hetznerLabelRole: model.LabelValue(HetznerRoleHcloud),
hetznerLabelServerID: model.LabelValue(strconv.FormatInt(server.ID, 10)),
hetznerLabelServerName: model.LabelValue(server.Name),
- hetznerLabelDatacenter: model.LabelValue(server.Datacenter.Name),
+ hetznerLabelDatacenter: model.LabelValue(server.Datacenter.Name), //nolint:staticcheck // server.Datacenter is deprecated but kept for backwards compatibility until the next minor release
hetznerLabelPublicIPv4: model.LabelValue(server.PublicNet.IPv4.IP.String()),
hetznerLabelPublicIPv6Network: model.LabelValue(server.PublicNet.IPv6.Network.String()),
hetznerLabelServerStatus: model.LabelValue(server.Status),
- hetznerLabelHcloudDatacenterLocation: model.LabelValue(server.Datacenter.Location.Name),
- hetznerLabelHcloudDatacenterLocationNetworkZone: model.LabelValue(server.Datacenter.Location.NetworkZone),
+ hetznerLabelHcloudDatacenterLocation: model.LabelValue(server.Datacenter.Location.Name), //nolint:staticcheck // server.Datacenter is deprecated but kept for backwards compatibility until the next minor release
+ hetznerLabelHcloudDatacenterLocationNetworkZone: model.LabelValue(server.Datacenter.Location.NetworkZone), //nolint:staticcheck // server.Datacenter is deprecated but kept for backwards compatibility until the next minor release
hetznerLabelHcloudType: model.LabelValue(server.ServerType.Name),
hetznerLabelHcloudCPUCores: model.LabelValue(strconv.Itoa(server.ServerType.Cores)),
hetznerLabelHcloudCPUType: model.LabelValue(server.ServerType.CPUType),
diff --git a/discovery/hetzner/hcloud_test.go b/discovery/hetzner/hcloud_test.go
index fa8291625a..3f20bcb86c 100644
--- a/discovery/hetzner/hcloud_test.go
+++ b/discovery/hetzner/hcloud_test.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/hetzner/hetzner.go b/discovery/hetzner/hetzner.go
index 8e52d21e39..932cfc8c93 100644
--- a/discovery/hetzner/hetzner.go
+++ b/discovery/hetzner/hetzner.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/hetzner/metrics.go b/discovery/hetzner/metrics.go
index 0023018194..cab1d66a3e 100644
--- a/discovery/hetzner/metrics.go
+++ b/discovery/hetzner/metrics.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/hetzner/mock_test.go b/discovery/hetzner/mock_test.go
index d192a4eae9..5f1e9c036b 100644
--- a/discovery/hetzner/mock_test.go
+++ b/discovery/hetzner/mock_test.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/hetzner/robot.go b/discovery/hetzner/robot.go
index 33aa2abcd8..ef5de1a30c 100644
--- a/discovery/hetzner/robot.go
+++ b/discovery/hetzner/robot.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/hetzner/robot_test.go b/discovery/hetzner/robot_test.go
index 2618bd097c..0e8b7954cc 100644
--- a/discovery/hetzner/robot_test.go
+++ b/discovery/hetzner/robot_test.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/http/http.go b/discovery/http/http.go
index d792bdacd7..fa9c7208fa 100644
--- a/discovery/http/http.go
+++ b/discovery/http/http.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/http/http_test.go b/discovery/http/http_test.go
index c553c21504..50a5800fc6 100644
--- a/discovery/http/http_test.go
+++ b/discovery/http/http_test.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/http/metrics.go b/discovery/http/metrics.go
index b1f8b84433..57fbcac15a 100644
--- a/discovery/http/metrics.go
+++ b/discovery/http/metrics.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/install/install.go b/discovery/install/install.go
index 9c397f9d36..05598347c1 100644
--- a/discovery/install/install.go
+++ b/discovery/install/install.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/ionos/ionos.go b/discovery/ionos/ionos.go
index c74013d109..93d57654e8 100644
--- a/discovery/ionos/ionos.go
+++ b/discovery/ionos/ionos.go
@@ -1,4 +1,4 @@
-// Copyright 2022 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/ionos/metrics.go b/discovery/ionos/metrics.go
index e79bded695..7fc78fdfa5 100644
--- a/discovery/ionos/metrics.go
+++ b/discovery/ionos/metrics.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/ionos/server.go b/discovery/ionos/server.go
index 81bb497277..bd351625db 100644
--- a/discovery/ionos/server.go
+++ b/discovery/ionos/server.go
@@ -1,4 +1,4 @@
-// Copyright 2022 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/ionos/server_test.go b/discovery/ionos/server_test.go
index 30f358e325..28fd285f67 100644
--- a/discovery/ionos/server_test.go
+++ b/discovery/ionos/server_test.go
@@ -1,4 +1,4 @@
-// Copyright 2022 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/kubernetes/endpoints.go b/discovery/kubernetes/endpoints.go
index 21c401da2c..4edcf9d4fa 100644
--- a/discovery/kubernetes/endpoints.go
+++ b/discovery/kubernetes/endpoints.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/kubernetes/endpoints_test.go b/discovery/kubernetes/endpoints_test.go
index aa0e432bfd..0ac472324d 100644
--- a/discovery/kubernetes/endpoints_test.go
+++ b/discovery/kubernetes/endpoints_test.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/kubernetes/endpointslice.go b/discovery/kubernetes/endpointslice.go
index 85b579438f..a6cfb0706a 100644
--- a/discovery/kubernetes/endpointslice.go
+++ b/discovery/kubernetes/endpointslice.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/kubernetes/endpointslice_test.go b/discovery/kubernetes/endpointslice_test.go
index cfd6be709e..b4dc0c36ce 100644
--- a/discovery/kubernetes/endpointslice_test.go
+++ b/discovery/kubernetes/endpointslice_test.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/kubernetes/ingress.go b/discovery/kubernetes/ingress.go
index 551453e513..985cc8f138 100644
--- a/discovery/kubernetes/ingress.go
+++ b/discovery/kubernetes/ingress.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/kubernetes/ingress_test.go b/discovery/kubernetes/ingress_test.go
index 76c9ff9036..15fa28002a 100644
--- a/discovery/kubernetes/ingress_test.go
+++ b/discovery/kubernetes/ingress_test.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/kubernetes/kubernetes.go b/discovery/kubernetes/kubernetes.go
index 1a6f965ecd..678f287ef5 100644
--- a/discovery/kubernetes/kubernetes.go
+++ b/discovery/kubernetes/kubernetes.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/kubernetes/kubernetes_test.go b/discovery/kubernetes/kubernetes_test.go
index f8edec23cb..a68a7c9a43 100644
--- a/discovery/kubernetes/kubernetes_test.go
+++ b/discovery/kubernetes/kubernetes_test.go
@@ -1,4 +1,4 @@
-// Copyright 2018 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/kubernetes/metrics.go b/discovery/kubernetes/metrics.go
index ba3cb1d32a..cdf158a032 100644
--- a/discovery/kubernetes/metrics.go
+++ b/discovery/kubernetes/metrics.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/kubernetes/node.go b/discovery/kubernetes/node.go
index 131cdcc9e7..cbc69dd0ca 100644
--- a/discovery/kubernetes/node.go
+++ b/discovery/kubernetes/node.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/kubernetes/node_test.go b/discovery/kubernetes/node_test.go
index bc17efdc01..9e56b95bb9 100644
--- a/discovery/kubernetes/node_test.go
+++ b/discovery/kubernetes/node_test.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/kubernetes/pod.go b/discovery/kubernetes/pod.go
index 03089e39d4..1fed78b3a7 100644
--- a/discovery/kubernetes/pod.go
+++ b/discovery/kubernetes/pod.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/kubernetes/pod_test.go b/discovery/kubernetes/pod_test.go
index 2cf336774a..db5db546d0 100644
--- a/discovery/kubernetes/pod_test.go
+++ b/discovery/kubernetes/pod_test.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/kubernetes/service.go b/discovery/kubernetes/service.go
index d676490d6c..ac2d42fc7c 100644
--- a/discovery/kubernetes/service.go
+++ b/discovery/kubernetes/service.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/kubernetes/service_test.go b/discovery/kubernetes/service_test.go
index 43c2b7922d..56a785d9c2 100644
--- a/discovery/kubernetes/service_test.go
+++ b/discovery/kubernetes/service_test.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/linode/linode.go b/discovery/linode/linode.go
index 2dc4d5f796..a5f05600c1 100644
--- a/discovery/linode/linode.go
+++ b/discovery/linode/linode.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/linode/linode_test.go b/discovery/linode/linode_test.go
index 533bc0fb62..d795d29698 100644
--- a/discovery/linode/linode_test.go
+++ b/discovery/linode/linode_test.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/linode/metrics.go b/discovery/linode/metrics.go
index 8f81389226..5bc805a60e 100644
--- a/discovery/linode/metrics.go
+++ b/discovery/linode/metrics.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/linode/mock_test.go b/discovery/linode/mock_test.go
index 50f0572ecd..b8094ec211 100644
--- a/discovery/linode/mock_test.go
+++ b/discovery/linode/mock_test.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/manager.go b/discovery/manager.go
index 878bc5f6d4..3f2b2db652 100644
--- a/discovery/manager.go
+++ b/discovery/manager.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -27,6 +27,7 @@ import (
"github.com/prometheus/common/promslog"
"github.com/prometheus/prometheus/discovery/targetgroup"
+ "github.com/prometheus/prometheus/util/features"
)
type poolKey struct {
@@ -111,6 +112,13 @@ func NewManager(ctx context.Context, logger *slog.Logger, registerer prometheus.
}
mgr.metrics = metrics
+ // Register all available service discovery providers with the feature registry.
+ if mgr.featureRegistry != nil {
+ for _, sdName := range RegisteredConfigNames() {
+ mgr.featureRegistry.Enable(features.ServiceDiscoveryProviders, sdName)
+ }
+ }
+
return mgr
}
@@ -141,6 +149,15 @@ func HTTPClientOptions(opts ...config.HTTPClientOption) func(*Manager) {
}
}
+// FeatureRegistry sets the feature registry for the manager.
+func FeatureRegistry(fr features.Collector) func(*Manager) {
+ return func(m *Manager) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ m.featureRegistry = fr
+ }
+}
+
// Manager maintains a set of discovery providers and sends each update to a map channel.
// Targets are grouped by the target set name.
type Manager struct {
@@ -175,6 +192,9 @@ type Manager struct {
metrics *Metrics
sdMetrics map[string]DiscovererMetrics
+
+ // featureRegistry is used to track which service discovery providers are configured.
+ featureRegistry features.Collector
}
// Providers returns the currently configured SD providers.
diff --git a/discovery/manager_test.go b/discovery/manager_test.go
index 5d34cb7ac0..162730d9aa 100644
--- a/discovery/manager_test.go
+++ b/discovery/manager_test.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/marathon/marathon.go b/discovery/marathon/marathon.go
index 438b8915df..878d404373 100644
--- a/discovery/marathon/marathon.go
+++ b/discovery/marathon/marathon.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/marathon/marathon_test.go b/discovery/marathon/marathon_test.go
index 53f7d3a1f9..71c7d73d7e 100644
--- a/discovery/marathon/marathon_test.go
+++ b/discovery/marathon/marathon_test.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/marathon/metrics.go b/discovery/marathon/metrics.go
index 40e2ade558..3d3d57d9ae 100644
--- a/discovery/marathon/metrics.go
+++ b/discovery/marathon/metrics.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/metrics.go b/discovery/metrics.go
index 356be1ddcb..2a3734fb2d 100644
--- a/discovery/metrics.go
+++ b/discovery/metrics.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/metrics_k8s_client.go b/discovery/metrics_k8s_client.go
index 19dfd4e247..3642eac568 100644
--- a/discovery/metrics_k8s_client.go
+++ b/discovery/metrics_k8s_client.go
@@ -1,4 +1,4 @@
-// Copyright 2018 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/metrics_refresh.go b/discovery/metrics_refresh.go
index 8a8bf221b8..11092d9f96 100644
--- a/discovery/metrics_refresh.go
+++ b/discovery/metrics_refresh.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -14,6 +14,8 @@
package discovery
import (
+ "time"
+
"github.com/prometheus/client_golang/prometheus"
)
@@ -21,8 +23,9 @@ import (
// We define them here in the "discovery" package in order to avoid a cyclic dependency between
// "discovery" and "refresh".
type RefreshMetricsVecs struct {
- failuresVec *prometheus.CounterVec
- durationVec *prometheus.SummaryVec
+ failuresVec *prometheus.CounterVec
+ durationVec *prometheus.SummaryVec
+ durationHistVec *prometheus.HistogramVec
metricRegisterer MetricRegisterer
}
@@ -44,6 +47,16 @@ func NewRefreshMetrics(reg prometheus.Registerer) RefreshMetricsManager {
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
},
[]string{"mechanism", "config"}),
+ durationHistVec: prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Name: "prometheus_sd_refresh_duration_histogram_seconds",
+ Help: "The duration of a refresh for the given SD mechanism.",
+ Buckets: []float64{.01, .1, 1, 10},
+ NativeHistogramBucketFactor: 1.1,
+ NativeHistogramMaxBucketNumber: 100,
+ NativeHistogramMinResetDuration: 1 * time.Hour,
+ },
+ []string{"mechanism"}),
}
// The reason we register metric vectors instead of metrics is so that
@@ -51,6 +64,7 @@ func NewRefreshMetrics(reg prometheus.Registerer) RefreshMetricsManager {
m.metricRegisterer = NewMetricRegisterer(reg, []prometheus.Collector{
m.failuresVec,
m.durationVec,
+ m.durationHistVec,
})
return m
@@ -59,8 +73,9 @@ func NewRefreshMetrics(reg prometheus.Registerer) RefreshMetricsManager {
// Instantiate returns metrics out of metric vectors for a given mechanism and config.
func (m *RefreshMetricsVecs) Instantiate(mech, config string) *RefreshMetrics {
return &RefreshMetrics{
- Failures: m.failuresVec.WithLabelValues(mech, config),
- Duration: m.durationVec.WithLabelValues(mech, config),
+ Failures: m.failuresVec.WithLabelValues(mech, config),
+ Duration: m.durationVec.WithLabelValues(mech, config),
+ DurationHistogram: m.durationHistVec.WithLabelValues(mech),
}
}
diff --git a/discovery/moby/docker.go b/discovery/moby/docker.go
index ec1187278b..aa1cd2eb42 100644
--- a/discovery/moby/docker.go
+++ b/discovery/moby/docker.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/moby/docker_test.go b/discovery/moby/docker_test.go
index 88c832db1b..effdf90b36 100644
--- a/discovery/moby/docker_test.go
+++ b/discovery/moby/docker_test.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/moby/dockerswarm.go b/discovery/moby/dockerswarm.go
index 2761e891b5..5cb12279d8 100644
--- a/discovery/moby/dockerswarm.go
+++ b/discovery/moby/dockerswarm.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/moby/metrics_docker.go b/discovery/moby/metrics_docker.go
index 716f52b60a..8c2518a75e 100644
--- a/discovery/moby/metrics_docker.go
+++ b/discovery/moby/metrics_docker.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/moby/metrics_dockerswarm.go b/discovery/moby/metrics_dockerswarm.go
index 17dd30d1b3..e4682b032a 100644
--- a/discovery/moby/metrics_dockerswarm.go
+++ b/discovery/moby/metrics_dockerswarm.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/moby/mock_test.go b/discovery/moby/mock_test.go
index 2450ca4436..e43319494d 100644
--- a/discovery/moby/mock_test.go
+++ b/discovery/moby/mock_test.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/moby/network.go b/discovery/moby/network.go
index ea1ca66bc7..02db2b8a12 100644
--- a/discovery/moby/network.go
+++ b/discovery/moby/network.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/moby/nodes.go b/discovery/moby/nodes.go
index a11afeee25..76e090c803 100644
--- a/discovery/moby/nodes.go
+++ b/discovery/moby/nodes.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/moby/nodes_test.go b/discovery/moby/nodes_test.go
index c65b9411ed..1f97016297 100644
--- a/discovery/moby/nodes_test.go
+++ b/discovery/moby/nodes_test.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/moby/services.go b/discovery/moby/services.go
index 0698c01e6a..558d544e25 100644
--- a/discovery/moby/services.go
+++ b/discovery/moby/services.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/moby/services_test.go b/discovery/moby/services_test.go
index 95702ced9b..eb5c75c71e 100644
--- a/discovery/moby/services_test.go
+++ b/discovery/moby/services_test.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/moby/tasks.go b/discovery/moby/tasks.go
index 8a3dbe8101..d4e3678ee5 100644
--- a/discovery/moby/tasks.go
+++ b/discovery/moby/tasks.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/moby/tasks_test.go b/discovery/moby/tasks_test.go
index 3f38135096..60453990c4 100644
--- a/discovery/moby/tasks_test.go
+++ b/discovery/moby/tasks_test.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/nomad/metrics.go b/discovery/nomad/metrics.go
index 9707153d91..0e5dca4723 100644
--- a/discovery/nomad/metrics.go
+++ b/discovery/nomad/metrics.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/nomad/nomad.go b/discovery/nomad/nomad.go
index f2971fb01b..da558f54d9 100644
--- a/discovery/nomad/nomad.go
+++ b/discovery/nomad/nomad.go
@@ -1,4 +1,4 @@
-// Copyright 2022 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/nomad/nomad_test.go b/discovery/nomad/nomad_test.go
index 099a347cbf..3a4963e24b 100644
--- a/discovery/nomad/nomad_test.go
+++ b/discovery/nomad/nomad_test.go
@@ -1,4 +1,4 @@
-// Copyright 2022 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/openstack/hypervisor.go b/discovery/openstack/hypervisor.go
index e7a6362052..141b77c706 100644
--- a/discovery/openstack/hypervisor.go
+++ b/discovery/openstack/hypervisor.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/openstack/hypervisor_test.go b/discovery/openstack/hypervisor_test.go
index e4a97f32cf..afba84af2d 100644
--- a/discovery/openstack/hypervisor_test.go
+++ b/discovery/openstack/hypervisor_test.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/openstack/instance.go b/discovery/openstack/instance.go
index 58bf154555..2a6a777e9a 100644
--- a/discovery/openstack/instance.go
+++ b/discovery/openstack/instance.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/openstack/instance_test.go b/discovery/openstack/instance_test.go
index 0933b57067..aa202cddff 100644
--- a/discovery/openstack/instance_test.go
+++ b/discovery/openstack/instance_test.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/openstack/loadbalancer.go b/discovery/openstack/loadbalancer.go
index 254b713cdd..3b2def0d6a 100644
--- a/discovery/openstack/loadbalancer.go
+++ b/discovery/openstack/loadbalancer.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/openstack/loadbalancer_test.go b/discovery/openstack/loadbalancer_test.go
index eee21b9831..68be323a5a 100644
--- a/discovery/openstack/loadbalancer_test.go
+++ b/discovery/openstack/loadbalancer_test.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/openstack/metrics.go b/discovery/openstack/metrics.go
index 664f5ea6bc..01e7ab3add 100644
--- a/discovery/openstack/metrics.go
+++ b/discovery/openstack/metrics.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/openstack/mock_test.go b/discovery/openstack/mock_test.go
index 34e09c710f..c44dadfbc0 100644
--- a/discovery/openstack/mock_test.go
+++ b/discovery/openstack/mock_test.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/openstack/openstack.go b/discovery/openstack/openstack.go
index 61dff847cf..ce365e6cd0 100644
--- a/discovery/openstack/openstack.go
+++ b/discovery/openstack/openstack.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/ovhcloud/dedicated_server.go b/discovery/ovhcloud/dedicated_server.go
index 2035e92c91..e892607c34 100644
--- a/discovery/ovhcloud/dedicated_server.go
+++ b/discovery/ovhcloud/dedicated_server.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/ovhcloud/dedicated_server_test.go b/discovery/ovhcloud/dedicated_server_test.go
index 686fa7ef3f..84fa2c4c12 100644
--- a/discovery/ovhcloud/dedicated_server_test.go
+++ b/discovery/ovhcloud/dedicated_server_test.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/ovhcloud/metrics.go b/discovery/ovhcloud/metrics.go
index 18492c0ab4..dbcfe130e9 100644
--- a/discovery/ovhcloud/metrics.go
+++ b/discovery/ovhcloud/metrics.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/ovhcloud/ovhcloud.go b/discovery/ovhcloud/ovhcloud.go
index df150b8ce4..863fcfeaf9 100644
--- a/discovery/ovhcloud/ovhcloud.go
+++ b/discovery/ovhcloud/ovhcloud.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/ovhcloud/ovhcloud_test.go b/discovery/ovhcloud/ovhcloud_test.go
index 8f2272b746..acb1c43fad 100644
--- a/discovery/ovhcloud/ovhcloud_test.go
+++ b/discovery/ovhcloud/ovhcloud_test.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/ovhcloud/vps.go b/discovery/ovhcloud/vps.go
index 4e71a877bc..4023c4ff49 100644
--- a/discovery/ovhcloud/vps.go
+++ b/discovery/ovhcloud/vps.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/ovhcloud/vps_test.go b/discovery/ovhcloud/vps_test.go
index 051d52e85e..d997f2bb0e 100644
--- a/discovery/ovhcloud/vps_test.go
+++ b/discovery/ovhcloud/vps_test.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/puppetdb/metrics.go b/discovery/puppetdb/metrics.go
index 83e7975ed5..5a8e9736c2 100644
--- a/discovery/puppetdb/metrics.go
+++ b/discovery/puppetdb/metrics.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/puppetdb/puppetdb.go b/discovery/puppetdb/puppetdb.go
index db5fc2e2fb..52a1cf73c6 100644
--- a/discovery/puppetdb/puppetdb.go
+++ b/discovery/puppetdb/puppetdb.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/puppetdb/puppetdb_test.go b/discovery/puppetdb/puppetdb_test.go
index a96310553b..b12835b47c 100644
--- a/discovery/puppetdb/puppetdb_test.go
+++ b/discovery/puppetdb/puppetdb_test.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/puppetdb/resources.go b/discovery/puppetdb/resources.go
index 487c471c1b..09aa43a776 100644
--- a/discovery/puppetdb/resources.go
+++ b/discovery/puppetdb/resources.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/refresh/refresh.go b/discovery/refresh/refresh.go
index e0bac2af5e..3e766d1c84 100644
--- a/discovery/refresh/refresh.go
+++ b/discovery/refresh/refresh.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -108,6 +108,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
now := time.Now()
defer func() {
d.metrics.Duration.Observe(time.Since(now).Seconds())
+ d.metrics.DurationHistogram.Observe(time.Since(now).Seconds())
}()
tgs, err := d.refreshf(ctx)
diff --git a/discovery/refresh/refresh_test.go b/discovery/refresh/refresh_test.go
index 385c256932..e227d0abc9 100644
--- a/discovery/refresh/refresh_test.go
+++ b/discovery/refresh/refresh_test.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/registry.go b/discovery/registry.go
index 33938cef3e..04145e72e4 100644
--- a/discovery/registry.go
+++ b/discovery/registry.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -280,3 +280,13 @@ func RegisterSDMetrics(registerer prometheus.Registerer, rmm RefreshMetricsManag
}
return metrics, nil
}
+
+// RegisteredConfigNames returns the names of all registered service discovery providers.
+func RegisteredConfigNames() []string {
+ names := make([]string, 0, len(configNames))
+ for name := range configNames {
+ names = append(names, name)
+ }
+ sort.Strings(names)
+ return names
+}
diff --git a/discovery/scaleway/baremetal.go b/discovery/scaleway/baremetal.go
index 06f13532df..347ed40bab 100644
--- a/discovery/scaleway/baremetal.go
+++ b/discovery/scaleway/baremetal.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/scaleway/instance.go b/discovery/scaleway/instance.go
index 162a75e407..c0ed5853b3 100644
--- a/discovery/scaleway/instance.go
+++ b/discovery/scaleway/instance.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/scaleway/instance_test.go b/discovery/scaleway/instance_test.go
index b67b858ae0..2d0f7a67ff 100644
--- a/discovery/scaleway/instance_test.go
+++ b/discovery/scaleway/instance_test.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/scaleway/metrics.go b/discovery/scaleway/metrics.go
index d7a4e78556..5871f7e31b 100644
--- a/discovery/scaleway/metrics.go
+++ b/discovery/scaleway/metrics.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/scaleway/scaleway.go b/discovery/scaleway/scaleway.go
index 16a9835848..f8ef6c706c 100644
--- a/discovery/scaleway/scaleway.go
+++ b/discovery/scaleway/scaleway.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/stackit/metrics.go b/discovery/stackit/metrics.go
index 5ba565eb9c..a44d0728e3 100644
--- a/discovery/stackit/metrics.go
+++ b/discovery/stackit/metrics.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/stackit/mock_test.go b/discovery/stackit/mock_test.go
index 59641ce2bc..d1366508a3 100644
--- a/discovery/stackit/mock_test.go
+++ b/discovery/stackit/mock_test.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/stackit/server.go b/discovery/stackit/server.go
index 1be834a689..c553d9b3f3 100644
--- a/discovery/stackit/server.go
+++ b/discovery/stackit/server.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/stackit/server_test.go b/discovery/stackit/server_test.go
index 117fbdd66d..afb9460851 100644
--- a/discovery/stackit/server_test.go
+++ b/discovery/stackit/server_test.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/stackit/stackit.go b/discovery/stackit/stackit.go
index 1f9bd22469..bae76c8897 100644
--- a/discovery/stackit/stackit.go
+++ b/discovery/stackit/stackit.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/stackit/types.go b/discovery/stackit/types.go
index 84b7d0266c..575acbbe56 100644
--- a/discovery/stackit/types.go
+++ b/discovery/stackit/types.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/targetgroup/targetgroup.go b/discovery/targetgroup/targetgroup.go
index 5c3b67d6e8..4b1670ae1b 100644
--- a/discovery/targetgroup/targetgroup.go
+++ b/discovery/targetgroup/targetgroup.go
@@ -1,4 +1,4 @@
-// Copyright 2013 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/targetgroup/targetgroup_test.go b/discovery/targetgroup/targetgroup_test.go
index d68e29644a..1c1583d33d 100644
--- a/discovery/targetgroup/targetgroup_test.go
+++ b/discovery/targetgroup/targetgroup_test.go
@@ -1,4 +1,4 @@
-// Copyright 2018 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/triton/metrics.go b/discovery/triton/metrics.go
index ea98eae452..2d4193ee1f 100644
--- a/discovery/triton/metrics.go
+++ b/discovery/triton/metrics.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/triton/triton.go b/discovery/triton/triton.go
index 209e1c4deb..b21beef9d0 100644
--- a/discovery/triton/triton.go
+++ b/discovery/triton/triton.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/triton/triton_test.go b/discovery/triton/triton_test.go
index 731303677d..f2b6398bc8 100644
--- a/discovery/triton/triton_test.go
+++ b/discovery/triton/triton_test.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -83,7 +83,6 @@ var (
func newTritonDiscovery(c SDConfig) (*Discovery, discovery.DiscovererMetrics, error) {
reg := prometheus.NewRegistry()
refreshMetrics := discovery.NewRefreshMetrics(reg)
- // TODO(ptodev): Add the ability to unregister refresh metrics.
metrics := c.NewDiscovererMetrics(reg, refreshMetrics)
err := metrics.Register()
if err != nil {
diff --git a/discovery/util.go b/discovery/util.go
index 4e2a088518..064a5312a7 100644
--- a/discovery/util.go
+++ b/discovery/util.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/uyuni/metrics.go b/discovery/uyuni/metrics.go
index 85ea9d73d2..e1a9fd4db0 100644
--- a/discovery/uyuni/metrics.go
+++ b/discovery/uyuni/metrics.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/uyuni/uyuni.go b/discovery/uyuni/uyuni.go
index 0320a0490d..6f29fa130c 100644
--- a/discovery/uyuni/uyuni.go
+++ b/discovery/uyuni/uyuni.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/uyuni/uyuni_test.go b/discovery/uyuni/uyuni_test.go
index 4a73fa9ada..71f1c5afb1 100644
--- a/discovery/uyuni/uyuni_test.go
+++ b/discovery/uyuni/uyuni_test.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/vultr/metrics.go b/discovery/vultr/metrics.go
index 65b15eae2f..823fe4bdc0 100644
--- a/discovery/vultr/metrics.go
+++ b/discovery/vultr/metrics.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/vultr/mock_test.go b/discovery/vultr/mock_test.go
index bfc24d06fb..03e5952dd0 100644
--- a/discovery/vultr/mock_test.go
+++ b/discovery/vultr/mock_test.go
@@ -1,4 +1,4 @@
-// Copyright 2022 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/vultr/vultr.go b/discovery/vultr/vultr.go
index 27f3e11064..b2f6bde52a 100644
--- a/discovery/vultr/vultr.go
+++ b/discovery/vultr/vultr.go
@@ -1,4 +1,4 @@
-// Copyright 2022 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/vultr/vultr_test.go b/discovery/vultr/vultr_test.go
index 8975cfb455..d116c419b7 100644
--- a/discovery/vultr/vultr_test.go
+++ b/discovery/vultr/vultr_test.go
@@ -1,4 +1,4 @@
-// Copyright 2022 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/xds/client.go b/discovery/xds/client.go
index a27e060fbd..59485ffcba 100644
--- a/discovery/xds/client.go
+++ b/discovery/xds/client.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/xds/client_test.go b/discovery/xds/client_test.go
index 7e3cd85b6c..e663902161 100644
--- a/discovery/xds/client_test.go
+++ b/discovery/xds/client_test.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/xds/kuma.go b/discovery/xds/kuma.go
index 82ca8f2c9a..34bebe7765 100644
--- a/discovery/xds/kuma.go
+++ b/discovery/xds/kuma.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/xds/kuma_mads.pb.go b/discovery/xds/kuma_mads.pb.go
index 210a5343a4..d234241453 100644
--- a/discovery/xds/kuma_mads.pb.go
+++ b/discovery/xds/kuma_mads.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/xds/kuma_test.go b/discovery/xds/kuma_test.go
index 533a31dcf3..6620f9fac6 100644
--- a/discovery/xds/kuma_test.go
+++ b/discovery/xds/kuma_test.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -111,7 +111,6 @@ func getKumaMadsV1DiscoveryResponse(resources ...*MonitoringAssignment) (*v3.Dis
func newKumaTestHTTPDiscovery(c KumaSDConfig) (*fetchDiscovery, error) {
reg := prometheus.NewRegistry()
refreshMetrics := discovery.NewRefreshMetrics(reg)
- // TODO(ptodev): Add the ability to unregister refresh metrics.
metrics := c.NewDiscovererMetrics(reg, refreshMetrics)
err := metrics.Register()
if err != nil {
diff --git a/discovery/xds/metrics.go b/discovery/xds/metrics.go
index bdc9598f2c..7e5be89bd3 100644
--- a/discovery/xds/metrics.go
+++ b/discovery/xds/metrics.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/xds/xds.go b/discovery/xds/xds.go
index db55a2b6f7..29da7b7c89 100644
--- a/discovery/xds/xds.go
+++ b/discovery/xds/xds.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/xds/xds_test.go b/discovery/xds/xds_test.go
index 5a2e9d737b..c11cdd2c05 100644
--- a/discovery/xds/xds_test.go
+++ b/discovery/xds/xds_test.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/zookeeper/zookeeper.go b/discovery/zookeeper/zookeeper.go
index d5239324cb..6ac9b25cd6 100644
--- a/discovery/zookeeper/zookeeper.go
+++ b/discovery/zookeeper/zookeeper.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/discovery/zookeeper/zookeeper_test.go b/discovery/zookeeper/zookeeper_test.go
index de0d1f4924..ae2d23e607 100644
--- a/discovery/zookeeper/zookeeper_test.go
+++ b/discovery/zookeeper/zookeeper_test.go
@@ -1,4 +1,4 @@
-// Copyright 2018 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/docs/command-line/prometheus.md b/docs/command-line/prometheus.md
index c79dad40a2..d4a8cd4f20 100644
--- a/docs/command-line/prometheus.md
+++ b/docs/command-line/prometheus.md
@@ -38,6 +38,7 @@ The Prometheus monitoring server
| --storage.tsdb.retention.size | [DEPRECATED] Maximum number of bytes that can be stored for blocks. A unit is required, supported units: B, KB, MB, GB, TB, PB, EB. Ex: "512MB". Based on powers-of-2, so 1KB is 1024B. This flag has been deprecated, use the storage.tsdb.retention.size field in the config file instead. Use with server mode only. | |
| --storage.tsdb.no-lockfile | Do not create lockfile in data directory. Use with server mode only. | `false` |
| --storage.tsdb.head-chunks-write-queue-size | Size of the queue through which head chunks are written to the disk to be m-mapped, 0 disables the queue completely. Experimental. Use with server mode only. | `0` |
+| --storage.tsdb.delay-compact-file.path | Path to a JSON file with uploaded TSDB blocks e.g. Thanos shipper meta file. If set TSDB will only compact 1 level blocks that are marked as uploaded in that file, improving external storage integrations e.g. with Thanos sidecar. 1+ level compactions won't be delayed. Use with server mode only. | |
| --storage.agent.path | Base path for metrics storage. Use with agent mode only. | `data-agent/` |
| --storage.agent.wal-compression | Compress the agent WAL. If false, the --storage.agent.wal-compression-type flag is ignored. Use with agent mode only. | `true` |
| --storage.agent.retention.min-time | Minimum age samples may be before being considered for deletion when the WAL is truncated Use with agent mode only. | |
diff --git a/docs/command-line/promtool.md b/docs/command-line/promtool.md
index 69fcd265f2..f6737bc37f 100644
--- a/docs/command-line/promtool.md
+++ b/docs/command-line/promtool.md
@@ -59,7 +59,6 @@ Check the resources for validity.
| Flag | Description | Default |
| --- | --- | --- |
| --query.lookback-delta | The server's maximum query lookback duration. | `5m` |
-| --extended | Print extended information related to the cardinality of the metrics. | |
@@ -192,13 +191,25 @@ Check if the rule files are valid or not.
##### `promtool check metrics`
-Pass Prometheus metrics over stdin to lint them for consistency and correctness.
+Pass Prometheus metrics over stdin to lint them for consistency and correctness, and optionally perform cardinality analysis.
examples:
$ cat metrics.prom | promtool check metrics
-$ curl -s http://localhost:9090/metrics | promtool check metrics
+$ curl -s http://localhost:9090/metrics | promtool check metrics `--extended`
+
+$ curl -s http://localhost:9100/metrics | promtool check metrics `--extended` `--lint`=none
+
+
+
+###### Flags
+
+| Flag | Description | Default |
+| --- | --- | --- |
+| --extended | Print extended information related to the cardinality of the metrics. | |
+| --lint | Linting checks to apply for metrics. Available options are: all, none. Use --lint=none to disable metrics linting. | `all` |
+
@@ -570,7 +581,7 @@ List tsdb blocks.
##### `promtool tsdb dump`
-Dump samples from a TSDB.
+Dump data (series+samples or optionally just series) from a TSDB.
@@ -582,6 +593,7 @@ Dump samples from a TSDB.
| --min-time | Minimum timestamp to dump, in milliseconds since the Unix epoch. | `-9223372036854775808` |
| --max-time | Maximum timestamp to dump, in milliseconds since the Unix epoch. | `9223372036854775807` |
| --match ... | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` |
+| --format | Output format of the dump (prom (default) or seriesjson). | `prom` |
diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md
index c31d70389b..4079daae02 100644
--- a/docs/configuration/configuration.md
+++ b/docs/configuration/configuration.md
@@ -159,6 +159,12 @@ global:
# native histogram with custom buckets.
[ always_scrape_classic_histograms: | default = false ]
+ # When enabled, Prometheus stores additional time series for each scrape:
+ # scrape_timeout_seconds, scrape_sample_limit, and scrape_body_size_bytes.
+ # These metrics help monitor how close targets are to their configured limits.
+ # This option can be overridden per scrape config.
+ [ extra_scrape_metrics: | default = false ]
+
# The following explains the various combinations of the last three options
# in various exposition cases.
#
@@ -647,6 +653,12 @@ metric_relabel_configs:
# native histogram with custom buckets.
[ always_scrape_classic_histograms: | default = ]
+# When enabled, Prometheus stores additional time series for this scrape job:
+# scrape_timeout_seconds, scrape_sample_limit, and scrape_body_size_bytes.
+# These metrics help monitor how close targets are to their configured limits.
+# If not set, inherits the value from the global configuration.
+[ extra_scrape_metrics: | default = ]
+
# See global configuration above for further explanations of how the last three
# options combine their effects.
@@ -761,16 +773,56 @@ A `tls_config` allows configuring TLS connections.
OAuth 2.0 authentication using the client credentials or password grant type.
Prometheus fetches an access token from the specified endpoint with
-the given client access and secret keys.
+the given client access and credentials.
```yaml
client_id:
+
+# OAuth2 grant type to use. It can be one of
+# "client_credentials" or "urn:ietf:params:oauth:grant-type:jwt-bearer" (RFC 7523).
+# Default value is "client_credentials"
+[ grant_type: ]
+
+# Client secret to provide to authorization server. Only used if
+# GrantType is set empty or set to "client_credentials".
[ client_secret: ]
# Read the client secret from a file.
# It is mutually exclusive with `client_secret`.
[ client_secret_file: ]
+# Secret key to sign JWT with. Only used if
+# GrantType is set to "urn:ietf:params:oauth:grant-type:jwt-bearer".
+[ client_certificate_key: ]
+
+# Read the secret key from a file.
+# It is mutually exclusive with `client_certificate_key`.
+[ client_certificate_key_file: ]
+
+# JWT kid value to include in the JWT header. Only used if
+# GrantType is set to "urn:ietf:params:oauth:grant-type:jwt-bearer".
+[ client_certificate_key_id: ]
+
+# Signature algorithm used to sign JWT token. Only used if
+# GrantType is set to "urn:ietf:params:oauth:grant-type:jwt-bearer".
+# Default value is RS256 and valid values RS256, RS384, RS512
+[ signature_algorithm: ]
+
+# OAuth client identifier used when communicating with
+# the configured OAuth provider. Default value is client_id. Only used if
+# GrantType is set to "urn:ietf:params:oauth:grant-type:jwt-bearer".
+[ iss: ]
+
+# Intended audience of the request. If empty, the value
+# of TokenURL is used as the intended audience. Only used if
+# GrantType is set to "urn:ietf:params:oauth:grant-type:jwt-bearer".
+[ audience: ]
+
+# Map of claims to be added to the JWT token. Only used if
+# GrantType is set to "urn:ietf:params:oauth:grant-type:jwt-bearer".
+claims:
+ [ : ... ]
+
# Scopes for the token request.
scopes:
[ - ... ]
@@ -879,11 +931,16 @@ The following meta labels are available on targets during [relabeling](#relabel_
#### `ecs`
-The `ecs` role discovers targets from AWS ECS containers. The private IP address is used by default, but may be changed to
-the public IP address with relabeling.
+The `ecs` role discovers targets from AWS ECS containers.
-The IAM credentials used must have the following permissions to discover
-scrape targets:
+ECS service discovery supports all ECS networking modes:
+- **awsvpc mode** (Fargate and EC2 with ENI): Uses the task's private IP address from its elastic network interface
+- **bridge mode** (EC2): Uses the EC2 host instance's private IP address
+- **host mode** (EC2): Uses the EC2 host instance's private IP address
+
+The private IP address is used by default, but may be changed to the public IP address with relabeling.
+
+The IAM credentials used must have the following permissions to discover scrape targets:
- `ecs:ListClusters`
- `ecs:DescribeClusters`
@@ -891,6 +948,9 @@ scrape targets:
- `ecs:DescribeServices`
- `ecs:ListTasks`
- `ecs:DescribeTasks`
+- `ecs:DescribeContainerInstances` (required for EC2 launch type tasks)
+- `ec2:DescribeInstances` (required for EC2 launch type tasks)
+- `ec2:DescribeNetworkInterfaces` (required to get public IP for awsvpc mode tasks)
The following meta labels are available on targets during [relabeling](#relabel_config):
@@ -912,9 +972,17 @@ The following meta labels are available on targets during [relabeling](#relabel_
* `__meta_ecs_subnet_id`: the subnet ID where the task is running
* `__meta_ecs_availability_zone`: the availability zone where the task is running
* `__meta_ecs_region`: the AWS region
+* `__meta_ecs_public_ip`: the public IP address (from ENI for awsvpc mode, from EC2 instance for bridge/host mode), if available
+* `__meta_ecs_network_mode`: the network mode of the task (awsvpc or bridge)
+* `__meta_ecs_container_instance_arn`: the ARN of the container instance (EC2 launch type only)
+* `__meta_ecs_ec2_instance_id`: the EC2 instance ID (EC2 launch type only)
+* `__meta_ecs_ec2_instance_type`: the EC2 instance type (EC2 launch type only)
+* `__meta_ecs_ec2_instance_private_ip`: the private IP address of the EC2 instance (EC2 launch type only)
+* `__meta_ecs_ec2_instance_public_ip`: the public IP address of the EC2 instance, if available (EC2 launch type only)
* `__meta_ecs_tag_cluster_`: each cluster tag value, keyed by tag name
* `__meta_ecs_tag_service_`: each service tag value, keyed by tag name
* `__meta_ecs_tag_task_`: each task tag value, keyed by tag name
+* `__meta_ecs_tag_ec2_`: each EC2 instance tag value, keyed by tag name (EC2 launch type only)
See below for the configuration options for AWS discovery:
@@ -2554,12 +2622,35 @@ project:
[ ]
```
-A Service Account Token can be set through `http_config`.
+A [Service Account Key](https://docs.stackit.cloud/platform/access-and-identity/service-accounts/how-tos/manage-service-account-keys/) can be set through `http_config`. This can be done mapping values from STACKIT Service Account json into oauth2 configuration.
+
+From a given Service Account json
+```json
+{
+ //....
+ "credentials": {
+ "kid": "6a7c3b36-xxxxxxxx",
+ "iss": "xxxx@sa.stackit.cloud",
+ "sub": "af2c2336-xxxxxxxx",
+ "aud": "https://stackit-service-account-prod.apps.01.cf.eu01.stackit.cloud",
+ "privateKey": "-----BEGIN PRIVATE KEY-----xxxx"
+ }
+}
+```
+
+properties can be mapped as:
```yaml
stackit_sd_config:
-- authorization:
- credentials:
+- oauth2:
+ client_id:
+ client_certificate_key:
+ client_certificate_key_id:
+ iss:
+ audience:
+ grant_type: "urn:ietf:params:oauth:grant-type:jwt-bearer"
+ token_url: "https://service-account.api.stackit.cloud/token"
+ signature_algorithm: RS512
```
### ``
@@ -3277,6 +3368,14 @@ azuread:
[ sdk:
[ tenant_id: ] ]
+ # Optional custom OAuth 2.0 scope to request when acquiring tokens.
+ # If not specified, defaults to the appropriate monitoring scope for the cloud:
+ # - AzurePublic: https://monitor.azure.com//.default
+ # - AzureGovernment: https://monitor.azure.us//.default
+ # - AzureChina: https://monitor.azure.cn//.default
+ # Use this to authenticate against custom Azure applications or non-standard endpoints.
+ [ scope: ]
+
# WARNING: Remote write is NOT SUPPORTED by Google Cloud. This configuration is reserved for future use.
# Optional Google Cloud Monitoring configuration.
# Cannot be used at the same time as basic_auth, authorization, oauth2, sigv4 or azuread.
diff --git a/docs/configuration/unit_testing_rules.md b/docs/configuration/unit_testing_rules.md
index d237c8cf88..af94c414f0 100644
--- a/docs/configuration/unit_testing_rules.md
+++ b/docs/configuration/unit_testing_rules.md
@@ -48,6 +48,18 @@ input_series:
# Name of the test group
[ name: ]
+# Start timestamp for the test group. This sets the base time for all samples
+# and evaluations in this test group.
+# Accepts either a Unix timestamp (e.g., 1609459200) or an RFC3339 formatted
+# timestamp (e.g., "2021-01-01T00:00:00Z").
+# Default: 0 (Unix epoch: 1970-01-01 00:00:00 UTC)
+#
+# When set:
+# - All input_series samples are timestamped starting from start_timestamp
+# - The eval_time in test cases is relative to start_timestamp
+# - The time() function returns start_timestamp + eval_time
+[ start_timestamp: | | default = 0 ]
+
# Unit tests for the above data.
# Unit tests for alerting rules. We consider the alerting rules from the input file.
@@ -137,7 +149,8 @@ values:
Prometheus allows you to have same alertname for different alerting rules. Hence in this unit testing, you have to list the union of all the firing alerts for the alertname under a single ``.
``` yaml
-# The time elapsed from time=0s when the alerts have to be checked.
+# The time elapsed from start_timestamp when the alerts have to be checked.
+# This is a duration relative to start_timestamp (which defaults to 0).
eval_time:
# Name of the alert to be tested.
@@ -168,7 +181,8 @@ exp_annotations:
# Expression to evaluate
expr:
-# The time elapsed from time=0s when the expression has to be evaluated.
+# The time elapsed from start_timestamp when the expression has to be evaluated.
+# This is a duration relative to start_timestamp (which defaults to 0).
eval_time:
# Expected samples at the given evaluation time.
@@ -275,3 +289,24 @@ groups:
summary: "Instance {{ $labels.instance }} down"
description: "{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes."
```
+
+### Time within tests
+
+It should be noted that in all tests, either in `alert_test_case` or
+`promql_test_case`, the output from all functions related to the current time,
+for example the `time()` and `day_of_*()` functions, will output a consistent value
+for tests.
+
+By default, at the start of the test evaluation, `time()` returns 0 (Unix epoch:
+January 1, 1970 00:00:00 UTC). The `eval_time` field specifies a duration relative
+to `start_timestamp`, so by default `time()` will return a value of `0 + eval_time`.
+
+You can configure a custom start timestamp for your tests by setting the `start_timestamp`
+field in your test group. This field accepts either:
+- A Unix timestamp (e.g., `1609459200` for January 1, 2021 00:00:00 UTC)
+- An RFC3339 formatted timestamp (e.g., `"2021-01-01T00:00:00Z"`)
+
+When you set `start_timestamp`:
+- All `input_series` samples will be timestamped starting from `start_timestamp`
+- The `eval_time` field in test cases is interpreted as a duration relative to `start_timestamp`
+- The `time()` function will return `start_timestamp + eval_time`
diff --git a/docs/feature_flags.md b/docs/feature_flags.md
index 0051859d66..af08eebb45 100644
--- a/docs/feature_flags.md
+++ b/docs/feature_flags.md
@@ -28,6 +28,8 @@ and m-mapped chunks, while a WAL replay from disk is only needed for the parts o
`--enable-feature=extra-scrape-metrics`
+> **Note:** This feature flag is deprecated. Please use the `extra_scrape_metrics` configuration option instead (available at both global and scrape-config level). The feature flag will be removed in a future major version. See the [configuration documentation](configuration/configuration.md) for more details.
+
When enabled, for each instance scrape, Prometheus stores a sample in the following additional time series:
- `scrape_timeout_seconds`. The configured `scrape_timeout` for a target. This allows you to measure each target to find out how close they are to timing out with `scrape_duration_seconds / scrape_timeout_seconds`.
@@ -197,7 +199,12 @@ the offset calculation.
`step()` can be used in duration expressions.
For a **range query**, it resolves to the step width of the range query.
-For an **instant query**, it resolves to `0s`.
+For an **instant query**, it resolves to `0s`.
+
+`range()` can be used in duration expressions.
+For a **range query**, it resolves to the full range of the query (end time - start time).
+For an **instant query**, it resolves to `0s`.
+This is particularly useful in combination with `@end()` to look back over the entire query range, e.g., `max_over_time(metric[range()] @ end())`.
`min(, )` and `max(, )` can be used to find the minimum or maximum of two duration expressions.
diff --git a/docs/querying/api.md b/docs/querying/api.md
index b377c6174e..4891db8980 100644
--- a/docs/querying/api.md
+++ b/docs/querying/api.md
@@ -84,8 +84,9 @@ URL query parameters:
- `time=`: Evaluation timestamp. Optional.
- `timeout=`: Evaluation timeout. Optional. Defaults to and
is capped by the value of the `-query.timeout` flag.
-- `limit=`: Maximum number of returned series. Doesn’t affect scalars or strings but truncates the number of series for matrices and vectors. Optional. 0 means disabled.
+- `limit=`: Maximum number of returned series. Doesn't affect scalars or strings but truncates the number of series for matrices and vectors. Optional. 0 means disabled.
- `lookback_delta=`: Override the the [lookback period](#staleness) just for this query. Optional.
+- `stats=`: Include query statistics in the response. If set to `all`, includes detailed statistics. Optional.
The current server time is used if the `time` parameter is omitted.
@@ -159,6 +160,7 @@ URL query parameters:
is capped by the value of the `-query.timeout` flag.
- `limit=`: Maximum number of returned series. Optional. 0 means disabled.
- `lookback_delta=`: Override the the [lookback period](#staleness) just for this query. Optional.
+- `stats=`: Include query statistics in the response. If set to `all`, includes detailed statistics. Optional.
You can URL-encode these parameters directly in the request body by using the `POST` method and
`Content-Type: application/x-www-form-urlencoded` header. This is useful when specifying a large
@@ -670,6 +672,35 @@ Note that with the currently implemented bucket schemas, positive buckets are
“open left”, negative buckets are “open right”, and the zero bucket (with a
negative left boundary and a positive right boundary) is “closed both”.
+## Scrape pools
+
+The following endpoint returns a list of all configured scrape pools:
+
+```
+GET /api/v1/scrape_pools
+```
+
+The `data` section of the JSON response is a list of string scrape pool names.
+
+```bash
+curl http://localhost:9090/api/v1/scrape_pools
+```
+
+```json
+{
+ "status": "success",
+ "data": {
+ "scrapePools": [
+ "prometheus",
+ "node_exporter",
+ "blackbox"
+ ]
+ }
+}
+```
+
+*New in v2.42*
+
## Targets
The following endpoint returns an overview of the current state of the
@@ -1346,7 +1377,7 @@ GET /api/v1/status/tsdb
```
URL query parameters:
-- `limit=`: Limit the number of returned items to a given number for each set of statistics. By default, 10 items are returned.
+- `limit=`: Limit the number of returned items to a given number for each set of statistics. By default, 10 items are returned. The maximum allowed limit is 10000.
The `data` section of the query result consists of:
@@ -1700,3 +1731,80 @@ GET /api/v1/notifications/live
```
*New in v3.0*
+
+### Features
+
+The following endpoint returns a list of enabled features in the Prometheus server:
+
+```
+GET /api/v1/features
+```
+
+This endpoint provides information about which features are currently enabled or disabled in the Prometheus instance. Features are organized into categories such as `api`, `promql`, `promql_functions`, etc.
+
+The `data` section contains a map where each key is a feature category, and each value is a map of feature names to their enabled status (boolean).
+
+```bash
+curl http://localhost:9090/api/v1/features
+```
+
+```json
+{
+ "status": "success",
+ "data": {
+ "api": {
+ "admin": false,
+ "exclude_alerts": true
+ },
+ "otlp_receiver": {
+ "delta_conversion": false,
+ "native_delta_ingestion": false
+ },
+ "prometheus": {
+ "agent_mode": false,
+ "auto_reload_config": false
+ },
+ "promql": {
+ "anchored": false,
+ "at_modifier": true
+ },
+ "promql_functions": {
+ "abs": true,
+ "absent": true
+ },
+ "promql_operators": {
+ "!=": true,
+ "!~": true
+ },
+ "rules": {
+ "concurrent_rule_eval": false,
+ "keep_firing_for": true
+ },
+ "scrape": {
+ "start_timestamp_zero_ingestion": false,
+ "extra_metrics": false
+ },
+ "service_discovery": {
+ "azure": true,
+ "consul": true
+ },
+ "templating": {
+ "args": true,
+ "externalURL": true
+ },
+ "tsdb": {
+ "delayed_compaction": false,
+ "exemplar_storage": false
+ }
+ }
+}
+```
+
+**Notes:**
+
+- All feature names use `snake_case` naming convention
+- Features set to `false` may be omitted from the response
+- Clients should treat absent features as equivalent to `false`
+- Clients must ignore unknown feature names and categories for forward compatibility
+
+*New in v3.8*
diff --git a/docs/storage.md b/docs/storage.md
index 1feb0a4940..e9c81a5036 100644
--- a/docs/storage.md
+++ b/docs/storage.md
@@ -59,12 +59,16 @@ A Prometheus server's data directory looks something like this:
Note that a limitation of local storage is that it is not clustered or
replicated. Thus, it is not arbitrarily scalable or durable in the face of
drive or node outages and should be managed like any other single node
-database.
+database. With proper architecture, it is possible to retain years of
+data in local storage.
[Snapshots](querying/api.md#snapshot) are recommended for backups. Backups
made without snapshots run the risk of losing data that was recorded since
-the last WAL sync, which typically happens every two hours. With proper
-architecture, it is possible to retain years of data in local storage.
+the last TSDB block was created, which typically happens every two hours,
+covering the last three hours of samples. Excluding the WAL files (the
+`chunks_head/`, `wal/`, and `wbl/` directories in `storage.tsdb.path`)
+on backup or restore will ensure a coherent backup, in any case, at the
+cost of losing the time range covered by the WAL files.
Alternatively, external storage may be used via the
[remote read/write APIs](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage).
diff --git a/documentation/examples/Makefile b/documentation/examples/Makefile
index 4085155f80..8ed308899b 100644
--- a/documentation/examples/Makefile
+++ b/documentation/examples/Makefile
@@ -1,4 +1,4 @@
-# Copyright 2022 The Prometheus Authors
+# Copyright The Prometheus Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
diff --git a/documentation/examples/custom-sd/adapter-usage/main.go b/documentation/examples/custom-sd/adapter-usage/main.go
index e7f7a69b5d..c0ce03cd0f 100644
--- a/documentation/examples/custom-sd/adapter-usage/main.go
+++ b/documentation/examples/custom-sd/adapter-usage/main.go
@@ -1,4 +1,4 @@
-// Copyright 2018 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/documentation/examples/custom-sd/adapter/adapter.go b/documentation/examples/custom-sd/adapter/adapter.go
index b242c4eaa0..83f0e80c49 100644
--- a/documentation/examples/custom-sd/adapter/adapter.go
+++ b/documentation/examples/custom-sd/adapter/adapter.go
@@ -1,4 +1,4 @@
-// Copyright 2018 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/documentation/examples/custom-sd/adapter/adapter_test.go b/documentation/examples/custom-sd/adapter/adapter_test.go
index 329ca8c29a..0ec69348d8 100644
--- a/documentation/examples/custom-sd/adapter/adapter_test.go
+++ b/documentation/examples/custom-sd/adapter/adapter_test.go
@@ -1,4 +1,4 @@
-// Copyright 2018 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/documentation/examples/prometheus-stackit.yml b/documentation/examples/prometheus-stackit.yml
index 623cb231ff..9be3f9c53a 100644
--- a/documentation/examples/prometheus-stackit.yml
+++ b/documentation/examples/prometheus-stackit.yml
@@ -12,8 +12,15 @@ scrape_configs:
stackit_sd_configs:
- project: 11111111-1111-1111-1111-111111111111
- authorization:
- credentials: ""
+ oauth2:
+ client_id:
+ client_certificate_key:
+ client_certificate_key_id:
+ iss:
+ audience:
+ grant_type: "urn:ietf:params:oauth:grant-type:jwt-bearer"
+ token_url: "https://service-account.api.stackit.cloud/token"
+ signature_algorithm: RS512
relabel_configs:
# Use the public IPv4 and port 9100 to scrape the target.
- source_labels: [__meta_stackit_public_ipv4]
@@ -25,8 +32,15 @@ scrape_configs:
stackit_sd_configs:
- project: 11111111-1111-1111-1111-111111111111
- authorization:
- credentials: ""
+ oauth2:
+ client_id:
+ client_certificate_key:
+ client_certificate_key_id:
+ iss:
+ audience:
+ grant_type: "urn:ietf:params:oauth:grant-type:jwt-bearer"
+ token_url: "https://service-account.api.stackit.cloud/token"
+ signature_algorithm: RS512
relabel_configs:
# Use the private IPv4 within the STACKIT Subnet and port 9100 to scrape the target.
- source_labels: [__meta_stackit_private_ipv4_mynet]
diff --git a/documentation/examples/remote_storage/Makefile b/documentation/examples/remote_storage/Makefile
index e0dfd4d647..a6c8e48c45 100644
--- a/documentation/examples/remote_storage/Makefile
+++ b/documentation/examples/remote_storage/Makefile
@@ -1,4 +1,4 @@
-# Copyright 2022 The Prometheus Authors
+# Copyright The Prometheus Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
diff --git a/documentation/examples/remote_storage/example_write_adapter/server.go b/documentation/examples/remote_storage/example_write_adapter/server.go
index 727a3056d3..c2ec7184e3 100644
--- a/documentation/examples/remote_storage/example_write_adapter/server.go
+++ b/documentation/examples/remote_storage/example_write_adapter/server.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -59,7 +59,11 @@ func main() {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
- printV2(req)
+ err = printV2(req)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
default:
msg := fmt.Sprintf("Unknown remote write content type: %s", contentType)
fmt.Println(msg)
@@ -93,10 +97,13 @@ func printV1(req *prompb.WriteRequest) {
}
}
-func printV2(req *writev2.Request) {
+func printV2(req *writev2.Request) error {
b := labels.NewScratchBuilder(0)
for _, ts := range req.Timeseries {
- l := ts.ToLabels(&b, req.Symbols)
+ l, err := ts.ToLabels(&b, req.Symbols)
+ if err != nil {
+ return err
+ }
m := ts.ToMetadata(req.Symbols)
fmt.Println(l, m)
@@ -104,7 +111,10 @@ func printV2(req *writev2.Request) {
fmt.Printf("\tSample: %f %d\n", s.Value, s.Timestamp)
}
for _, ep := range ts.Exemplars {
- e := ep.ToExemplar(&b, req.Symbols)
+ e, err := ep.ToExemplar(&b, req.Symbols)
+ if err != nil {
+ return err
+ }
fmt.Printf("\tExemplar: %+v %f %d\n", e.Labels, e.Value, ep.Timestamp)
}
for _, hp := range ts.Histograms {
@@ -117,4 +127,5 @@ func printV2(req *writev2.Request) {
fmt.Printf("\tHistogram: %s\n", h.String())
}
}
+ return nil
}
diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod
index a97ad32a6a..b77f248bf5 100644
--- a/documentation/examples/remote_storage/go.mod
+++ b/documentation/examples/remote_storage/go.mod
@@ -1,6 +1,6 @@
module github.com/prometheus/prometheus/documentation/examples/remote_storage
-go 1.24.0
+go 1.24.9
require (
github.com/alecthomas/kingpin/v2 v2.4.0
@@ -8,34 +8,35 @@ require (
github.com/golang/snappy v1.0.0
github.com/influxdata/influxdb-client-go/v2 v2.14.0
github.com/prometheus/client_golang v1.23.2
- github.com/prometheus/common v0.66.1
- github.com/prometheus/prometheus v0.305.1-0.20250905124657-5c2e43f09c03
+ github.com/prometheus/common v0.67.4
+ github.com/prometheus/prometheus v0.308.1
github.com/stretchr/testify v1.11.1
)
require (
- cloud.google.com/go/auth v0.16.2 // indirect
+ cloud.google.com/go/auth v0.17.0 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
- cloud.google.com/go/compute/metadata v0.7.0 // indirect
- github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 // indirect
- github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1 // indirect
- github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect
- github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect
+ cloud.google.com/go/compute/metadata v0.9.0 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.12.0 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect
+ github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 // indirect
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect
github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect
- github.com/aws/aws-sdk-go-v2 v1.37.0 // indirect
- github.com/aws/aws-sdk-go-v2/config v1.29.14 // indirect
- github.com/aws/aws-sdk-go-v2/credentials v1.17.67 // indirect
- github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect
- github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.0 // indirect
- github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.0 // indirect
- github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.0 // indirect
- github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 // indirect
- github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 // indirect
- github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 // indirect
- github.com/aws/smithy-go v1.22.5 // indirect
+ github.com/aws/aws-sdk-go-v2 v1.41.0 // indirect
+ github.com/aws/aws-sdk-go-v2/config v1.32.6 // indirect
+ github.com/aws/aws-sdk-go-v2/credentials v1.19.6 // indirect
+ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16 // indirect
+ github.com/aws/aws-sdk-go-v2/service/signin v1.0.4 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sso v1.30.8 // indirect
+ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.12 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sts v1.41.5 // indirect
+ github.com/aws/smithy-go v1.24.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
@@ -43,22 +44,22 @@ require (
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
- github.com/go-viper/mapstructure/v2 v2.3.0 // indirect
+ github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
github.com/gobwas/glob v0.2.3 // indirect
- github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
+ github.com/golang-jwt/jwt/v5 v5.3.0 // indirect
github.com/google/s2a-go v0.1.9 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
- github.com/googleapis/gax-go/v2 v2.14.2 // indirect
- github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect
+ github.com/googleapis/gax-go/v2 v2.15.0 // indirect
+ github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853 // indirect
github.com/hashicorp/go-version v1.7.0 // indirect
github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
- github.com/klauspost/compress v1.18.0 // indirect
+ github.com/klauspost/compress v1.18.2 // indirect
github.com/knadh/koanf/maps v0.1.2 // indirect
github.com/knadh/koanf/providers/confmap v1.0.0 // indirect
- github.com/knadh/koanf/v2 v2.2.1 // indirect
+ github.com/knadh/koanf/v2 v2.3.0 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
@@ -67,21 +68,22 @@ require (
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
github.com/oapi-codegen/runtime v1.0.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.129.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.129.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.129.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.139.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.139.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.139.0 // indirect
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
+ github.com/prometheus/client_golang/exp v0.0.0-20251212205219-7ba246a648ca // indirect
github.com/prometheus/client_model v0.6.2 // indirect
- github.com/prometheus/otlptranslator v0.0.2 // indirect
+ github.com/prometheus/otlptranslator v1.0.0 // indirect
github.com/prometheus/procfs v0.16.1 // indirect
- github.com/prometheus/sigv4 v0.2.0 // indirect
+ github.com/prometheus/sigv4 v0.3.0 // indirect
github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/collector/component v1.45.0 // indirect
- go.opentelemetry.io/collector/confmap v1.35.0 // indirect
- go.opentelemetry.io/collector/confmap/xconfmap v0.129.0 // indirect
+ go.opentelemetry.io/collector/confmap v1.45.0 // indirect
+ go.opentelemetry.io/collector/confmap/xconfmap v0.139.0 // indirect
go.opentelemetry.io/collector/consumer v1.45.0 // indirect
go.opentelemetry.io/collector/featuregate v1.45.0 // indirect
go.opentelemetry.io/collector/pdata v1.45.0 // indirect
@@ -96,24 +98,23 @@ require (
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
- go.yaml.in/yaml/v2 v2.4.2 // indirect
- golang.org/x/crypto v0.41.0 // indirect
- golang.org/x/net v0.43.0 // indirect
- golang.org/x/oauth2 v0.30.0 // indirect
- golang.org/x/sys v0.35.0 // indirect
- golang.org/x/text v0.28.0 // indirect
- golang.org/x/time v0.12.0 // indirect
- google.golang.org/api v0.239.0 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b // indirect
+ go.yaml.in/yaml/v2 v2.4.3 // indirect
+ go.yaml.in/yaml/v3 v3.0.4 // indirect
+ golang.org/x/crypto v0.43.0 // indirect
+ golang.org/x/net v0.46.0 // indirect
+ golang.org/x/oauth2 v0.32.0 // indirect
+ golang.org/x/sys v0.37.0 // indirect
+ golang.org/x/text v0.30.0 // indirect
+ golang.org/x/time v0.13.0 // indirect
+ google.golang.org/api v0.252.0 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20251002232023-7c0ddcbb5797 // indirect
google.golang.org/grpc v1.76.0 // indirect
google.golang.org/protobuf v1.36.10 // indirect
- gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
- k8s.io/apimachinery v0.33.5 // indirect
- k8s.io/client-go v0.33.5 // indirect
+ k8s.io/apimachinery v0.34.1 // indirect
+ k8s.io/client-go v0.34.1 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
- k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect
- sigs.k8s.io/yaml v1.4.0 // indirect
+ k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect
)
exclude (
diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum
index b7c633982b..1a3e86ff22 100644
--- a/documentation/examples/remote_storage/go.sum
+++ b/documentation/examples/remote_storage/go.sum
@@ -1,25 +1,25 @@
-cloud.google.com/go/auth v0.16.2 h1:QvBAGFPLrDeoiNjyfVunhQ10HKNYuOwZ5noee0M5df4=
-cloud.google.com/go/auth v0.16.2/go.mod h1:sRBas2Y1fB1vZTdurouM0AzuYQBMZinrUYL8EufhtEA=
+cloud.google.com/go/auth v0.17.0 h1:74yCm7hCj2rUyyAocqnFzsAYXgJhrG26XCFimrc/Kz4=
+cloud.google.com/go/auth v0.17.0/go.mod h1:6wv/t5/6rOPAX4fJiRjKkJCvswLwdet7G8+UGXt7nCQ=
cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc=
cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c=
-cloud.google.com/go/compute/metadata v0.7.0 h1:PBWF+iiAerVNe8UCHxdOt6eHLVc3ydFeOCw78U8ytSU=
-cloud.google.com/go/compute/metadata v0.7.0/go.mod h1:j5MvL9PprKL39t166CoB1uVHfQMs4tFQZZcKwksXUjo=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 h1:Gt0j3wceWMwPmiazCa8MzMA0MfhmPIz0Qp0FJ6qcM0U=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0/go.mod h1:Ot/6aikWnKWi4l9QB7qVSwa8iMphQNqkWALMoNT3rzM=
-github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1 h1:B+blDbyVIG3WaikNxPnhPiJ1MThR03b3vKGtER95TP4=
-github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1/go.mod h1:JdM5psgjfBf5fo2uWOZhflPWyDBZ/O/CNAH9CtsuZE4=
+cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs=
+cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1 h1:5YTBM8QDVIBN3sxBil89WfdAAqDZbyJTgh688DSxX5w=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.12.0 h1:wL5IEG5zb7BVv1Kv0Xm92orq+5hB5Nipn3B5tn4Rqfk=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.12.0/go.mod h1:J7MUC/wtRpfGVbQ5sIItY5/FuVWmvzlY21WAOfQnq/I=
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY=
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8=
-github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 h1:FPKJS1T+clwv+OLGt13a8UjqeRuh0O4SJ3lUriThc+4=
-github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1/go.mod h1:j2chePtV91HrC22tGoRX3sGY42uF13WzmmV80/OdVAA=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0/go.mod h1:Y/HgrePTmGy9HjdSGTqZNa+apUpTVIEVKXJyARP2lrk=
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE=
-github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs=
-github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 h1:XkkQbfMyuH2jTSjQjSoihryI8GINRcs4xp8lNawg0FI=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk=
github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U+0KMqAA0KcU=
github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4=
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
@@ -33,36 +33,40 @@ github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7D
github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk=
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
-github.com/aws/aws-sdk-go-v2 v1.37.0 h1:YtCOESR/pN4j5oA7cVHSfOwIcuh/KwHC4DOSXFbv5F0=
-github.com/aws/aws-sdk-go-v2 v1.37.0/go.mod h1:9Q0OoGQoboYIAJyslFyF1f5K1Ryddop8gqMhWx/n4Wg=
-github.com/aws/aws-sdk-go-v2/config v1.29.14 h1:f+eEi/2cKCg9pqKBoAIwRGzVb70MRKqWX4dg1BDcSJM=
-github.com/aws/aws-sdk-go-v2/config v1.29.14/go.mod h1:wVPHWcIFv3WO89w0rE10gzf17ZYy+UVS1Geq8Iei34g=
-github.com/aws/aws-sdk-go-v2/credentials v1.17.67 h1:9KxtdcIA/5xPNQyZRgUSpYOE6j9Bc4+D7nZua0KGYOM=
-github.com/aws/aws-sdk-go-v2/credentials v1.17.67/go.mod h1:p3C44m+cfnbv763s52gCqrjaqyPikj9Sg47kUVaNZQQ=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 h1:x793wxmUWVDhshP8WW2mlnXuFrO4cOd3HLBroh1paFw=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30/go.mod h1:Jpne2tDnYiFascUEs2AWHJL9Yp7A5ZVy3TNyxaAjD6M=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.0 h1:H2iZoqW/v2Jnrh1FnU725Bq6KJ0k2uP63yH+DcY+HUI=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.0/go.mod h1:L0FqLbwMXHvNC/7crWV1iIxUlOKYZUE8KuTIA+TozAI=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.0 h1:EDped/rNzAhFPhVY0sDGbtD16OKqksfA8OjF/kLEgw8=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.0/go.mod h1:uUI335jvzpZRPpjYx6ODc/wg1qH+NnoSTK/FwVeK0C0=
-github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo=
-github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo=
-github.com/aws/aws-sdk-go-v2/service/ec2 v1.237.0 h1:XHE2G+yaDQql32FZt19QmQt4WuisqQJIkMUSCxeCUl8=
-github.com/aws/aws-sdk-go-v2/service/ec2 v1.237.0/go.mod h1:t11/j/nH9i6bbsPH9xc04BJOsV2nVPUqrB67/TLDsyM=
-github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 h1:6+lZi2JeGKtCraAj1rpoZfKqnQ9SptseRZioejfUOLM=
-github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0/go.mod h1:eb3gfbVIxIoGgJsi9pGne19dhCBpK6opTYpQqAmdy44=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.0 h1:eRhU3Sh8dGbaniI6B+I48XJMrTPRkK4DKo+vqIxziOU=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.0/go.mod h1:paNLV18DZ6FnWE/bd06RIKPDIFpjuvCkGKWTG/GDBeM=
-github.com/aws/aws-sdk-go-v2/service/lightsail v1.44.0 h1:QiiCqpKy0prxq+92uWfESzcb7/8Y9JAamcMOzVYLEoM=
-github.com/aws/aws-sdk-go-v2/service/lightsail v1.44.0/go.mod h1:ESppxYqXQCpCY+KWl3BdkQjmsQX6zxKP39SnDtRDoU0=
-github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 h1:1Gw+9ajCV1jogloEv1RRnvfRFia2cL6c9cuKV2Ps+G8=
-github.com/aws/aws-sdk-go-v2/service/sso v1.25.3/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 h1:hXmVKytPfTy5axZ+fYbR5d0cFmC3JvwLm5kM83luako=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1/go.mod h1:MlYRNmYu/fGPoxBQVvBYr9nyr948aY/WLUvwBMBJubs=
-github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 h1:1XuUZ8mYJw9B6lzAkXhqHlJd/XvaX32evhproijJEZY=
-github.com/aws/aws-sdk-go-v2/service/sts v1.33.19/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4=
-github.com/aws/smithy-go v1.22.5 h1:P9ATCXPMb2mPjYBgueqJNCA5S9UfktsW0tTxi+a7eqw=
-github.com/aws/smithy-go v1.22.5/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI=
+github.com/aws/aws-sdk-go-v2 v1.41.0 h1:tNvqh1s+v0vFYdA1xq0aOJH+Y5cRyZ5upu6roPgPKd4=
+github.com/aws/aws-sdk-go-v2 v1.41.0/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0=
+github.com/aws/aws-sdk-go-v2/config v1.32.6 h1:hFLBGUKjmLAekvi1evLi5hVvFQtSo3GYwi+Bx4lpJf8=
+github.com/aws/aws-sdk-go-v2/config v1.32.6/go.mod h1:lcUL/gcd8WyjCrMnxez5OXkO3/rwcNmvfno62tnXNcI=
+github.com/aws/aws-sdk-go-v2/credentials v1.19.6 h1:F9vWao2TwjV2MyiyVS+duza0NIRtAslgLUM0vTA1ZaE=
+github.com/aws/aws-sdk-go-v2/credentials v1.19.6/go.mod h1:SgHzKjEVsdQr6Opor0ihgWtkWdfRAIwxYzSJ8O85VHY=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16 h1:80+uETIWS1BqjnN9uJ0dBUaETh+P1XwFy5vwHwK5r9k=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16/go.mod h1:wOOsYuxYuB/7FlnVtzeBYRcjSRtQpAW0hCP7tIULMwo=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16 h1:rgGwPzb82iBYSvHMHXc8h9mRoOUBZIGFgKb9qniaZZc=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16/go.mod h1:L/UxsGeKpGoIj6DxfhOWHWQ/kGKcd4I1VncE4++IyKA=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 h1:1jtGzuV7c82xnqOVfx2F0xmJcOw5374L7N6juGW6x6U=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16/go.mod h1:M2E5OQf+XLe+SZGmmpaI2yy+J326aFf6/+54PoxSANc=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc=
+github.com/aws/aws-sdk-go-v2/service/ec2 v1.262.0 h1:5qBb1XV/D18qtCHd3bmmxoVglI+fZ4QWuS/EB8kIXYQ=
+github.com/aws/aws-sdk-go-v2/service/ec2 v1.262.0/go.mod h1:NDdDLLW5PtLLXN661gKcvJvqAH5OBXsfhMlmKVu1/pY=
+github.com/aws/aws-sdk-go-v2/service/ecs v1.67.2 h1:oeICOX/+D0XXV1aMYJPXVe3CO37zYr7fB6HFgxchleU=
+github.com/aws/aws-sdk-go-v2/service/ecs v1.67.2/go.mod h1:rrhqfkXfa2DSNq0RyFhnnFEAyI+yJB4+2QlZKeJvMjs=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16 h1:oHjJHeUy0ImIV0bsrX0X91GkV5nJAyv1l1CC9lnO0TI=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16/go.mod h1:iRSNGgOYmiYwSCXxXaKb9HfOEj40+oTKn8pTxMlYkRM=
+github.com/aws/aws-sdk-go-v2/service/lightsail v1.50.4 h1:/1o2AYwHJojUDeMvQNyJiKZwcWCc3e4kQuTXqRLuThc=
+github.com/aws/aws-sdk-go-v2/service/lightsail v1.50.4/go.mod h1:Nn2xx6HojGuNMtUFxxz/nyNLSS+tHMRsMhe3+W3wB5k=
+github.com/aws/aws-sdk-go-v2/service/signin v1.0.4 h1:HpI7aMmJ+mm1wkSHIA2t5EaFFv5EFYXePW30p1EIrbQ=
+github.com/aws/aws-sdk-go-v2/service/signin v1.0.4/go.mod h1:C5RdGMYGlfM0gYq/tifqgn4EbyX99V15P2V3R+VHbQU=
+github.com/aws/aws-sdk-go-v2/service/sso v1.30.8 h1:aM/Q24rIlS3bRAhTyFurowU8A0SMyGDtEOY/l/s/1Uw=
+github.com/aws/aws-sdk-go-v2/service/sso v1.30.8/go.mod h1:+fWt2UHSb4kS7Pu8y+BMBvJF0EWx+4H0hzNwtDNRTrg=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.12 h1:AHDr0DaHIAo8c9t1emrzAlVDFp+iMMKnPdYy6XO4MCE=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.12/go.mod h1:GQ73XawFFiWxyWXMHWfhiomvP3tXtdNar/fi8z18sx0=
+github.com/aws/aws-sdk-go-v2/service/sts v1.41.5 h1:SciGFVNZ4mHdm7gpD1dgZYnCuVdX1s+lFTg4+4DOy70=
+github.com/aws/aws-sdk-go-v2/service/sts v1.41.5/go.mod h1:iW40X4QBmUxdP+fZNOpfmkdMZqsovezbAeO+Ubiv2pk=
+github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk=
+github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
@@ -82,25 +86,23 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
-github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
-github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
-github.com/digitalocean/godo v1.157.0 h1:ReELaS6FxXNf8gryUiVH0wmyUmZN8/NCmBX4gXd3F0o=
-github.com/digitalocean/godo v1.157.0/go.mod h1:tYeiWY5ZXVpU48YaFv0M5irUFHXGorZpDNm7zzdWMzM=
+github.com/digitalocean/godo v1.168.0 h1:mlORtUcPD91LQeJoznrH3XvfvgK3t8Wvrpph9giUT/Q=
+github.com/digitalocean/godo v1.168.0/go.mod h1:xQsWpVCCbkDrWisHA72hPzPlnC+4W5w/McZY5ij9uvU=
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
-github.com/docker/docker v28.3.0+incompatible h1:ffS62aKWupCWdvcee7nBU9fhnmknOqDPaJAMtfK0ImQ=
-github.com/docker/docker v28.3.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM=
+github.com/docker/docker v28.5.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/edsrzf/mmap-go v1.2.0 h1:hXLYlkbaPzt1SaQk+anYwKSRNhufIDCchSPkUD6dD84=
github.com/edsrzf/mmap-go v1.2.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q=
-github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
-github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
+github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
+github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M=
-github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A=
-github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw=
+github.com/envoyproxy/go-control-plane/envoy v1.35.0 h1:ixjkELDE+ru6idPxcHLj8LBVc2bFP7iBytj353BoHUo=
+github.com/envoyproxy/go-control-plane/envoy v1.35.0/go.mod h1:09qwbGVuSWWAyN5t/b3iyVfz5+z8QWGrzkoqm/8SbEs=
github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8=
github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU=
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM=
@@ -109,10 +111,10 @@ github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
-github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M=
-github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
-github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
-github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
+github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
+github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
+github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
+github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
@@ -126,23 +128,22 @@ github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+Gr
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
github.com/go-resty/resty/v2 v2.16.5 h1:hBKqmWrr7uRc3euHVqmh1HTHcKn99Smr7o5spptdhTM=
github.com/go-resty/resty/v2 v2.16.5/go.mod h1:hkJtXbA2iKHzJheXYvQ8snQES5ZLGKMwQ07xAwp/fiA=
-github.com/go-viper/mapstructure/v2 v2.3.0 h1:27XbWsHIqhbdR5TIC911OfYvgSaW93HM+dX7970Q7jk=
-github.com/go-viper/mapstructure/v2 v2.3.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
+github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
+github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/go-zookeeper/zk v1.0.4 h1:DPzxraQx7OrPyXq2phlGlNSIyWEsAox0RJmjTseMV6I=
github.com/go-zookeeper/zk v1.0.4/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
-github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
+github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
+github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs=
github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
-github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
+github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
@@ -154,18 +155,18 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4=
github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA=
-github.com/googleapis/gax-go/v2 v2.14.2 h1:eBLnkZ9635krYIPD+ag1USrOAI0Nr0QYF3+/3GqO0k0=
-github.com/googleapis/gax-go/v2 v2.14.2/go.mod h1:ON64QhlJkhVtSqp4v1uaK92VyZ2gmvDQsweuyLV+8+w=
-github.com/gophercloud/gophercloud/v2 v2.7.0 h1:o0m4kgVcPgHlcXiWAjoVxGd8QCmvM5VU+YM71pFbn0E=
-github.com/gophercloud/gophercloud/v2 v2.7.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk=
+github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo=
+github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc=
+github.com/gophercloud/gophercloud/v2 v2.8.0 h1:of2+8tT6+FbEYHfYC8GBu8TXJNsXYSNm9KuvpX7Neqo=
+github.com/gophercloud/gophercloud/v2 v2.8.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk=
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
-github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248=
-github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=
+github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853 h1:cLN4IBkmkYZNnk7EAJ0BHIethd+J6LqxFNw5mSiI2bM=
+github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=
github.com/hashicorp/consul/api v1.32.0 h1:5wp5u780Gri7c4OedGEPzmlUEzi0g2KyiPphSr6zjVg=
github.com/hashicorp/consul/api v1.32.0/go.mod h1:Z8YgY0eVPukT/17ejW+l+C7zJmKwgPHtjU1q16v/Y40=
-github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A=
-github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4=
+github.com/hashicorp/cronexpr v1.1.3 h1:rl5IkxXN2m681EfivTlccqIryzYJSXRGRNa0xeG7NA4=
+github.com/hashicorp/cronexpr v1.1.3/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
@@ -184,12 +185,12 @@ github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKe
github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4=
github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
-github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec h1:+YBzb977VrmffaCX/OBm17dEVJUcWn5dW+eqs3aIJ/A=
-github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE=
+github.com/hashicorp/nomad/api v0.0.0-20250930071859-eaa0fe0e27af h1:ScAYf8O+9xTqTJPZH8MIlUfO+ak8cb31rW1aYJgS+jE=
+github.com/hashicorp/nomad/api v0.0.0-20250930071859-eaa0fe0e27af/go.mod h1:sldFTIgs+FsUeKU3LwVjviAIuksxD8TzDOn02MYwslE=
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
-github.com/hetznercloud/hcloud-go/v2 v2.21.1 h1:IH3liW8/cCRjfJ4cyqYvw3s1ek+KWP8dl1roa0lD8JM=
-github.com/hetznercloud/hcloud-go/v2 v2.21.1/go.mod h1:XOaYycZJ3XKMVWzmqQ24/+1V7ormJHmPdck/kxrNnQA=
+github.com/hetznercloud/hcloud-go/v2 v2.29.0 h1:LzNFw5XLBfftyu3WM1sdSLjOZBlWORtz2hgGydHaYV8=
+github.com/hetznercloud/hcloud-go/v2 v2.29.0/go.mod h1:XBU4+EDH2KVqu2KU7Ws0+ciZcX4ygukQl/J0L5GS8P8=
github.com/influxdata/influxdb-client-go/v2 v2.14.0 h1:AjbBfJuq+QoaXNcrova8smSjwJdUHnwvfjMF71M1iI4=
github.com/influxdata/influxdb-client-go/v2 v2.14.0/go.mod h1:Ahpm3QXKMJslpXl3IftVLVezreAUtBOTZssDrjZEFHI=
github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 h1:W9WBk7wlPfJLvMCdtV4zPulc4uCPrlywQOmbFOhgQNU=
@@ -207,14 +208,14 @@ github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRt
github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
-github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
+github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk=
+github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
github.com/knadh/koanf/maps v0.1.2 h1:RBfmAW5CnZT+PJ1CVc1QSJKf4Xu9kxfQgYVQSu8hpbo=
github.com/knadh/koanf/maps v0.1.2/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI=
github.com/knadh/koanf/providers/confmap v1.0.0 h1:mHKLJTE7iXEys6deO5p6olAiZdG5zwp8Aebir+/EaRE=
github.com/knadh/koanf/providers/confmap v1.0.0/go.mod h1:txHYHiI2hAtF0/0sCmcuol4IDcuQbKTybiB1nOcUo1A=
-github.com/knadh/koanf/v2 v2.2.1 h1:jaleChtw85y3UdBnI0wCqcg1sj1gPoz6D3caGNHtrNE=
-github.com/knadh/koanf/v2 v2.2.1/go.mod h1:PSFru3ufQgTsI7IF+95rf9s8XA1+aHxKuO/W+dPoHEY=
+github.com/knadh/koanf/v2 v2.3.0 h1:Qg076dDRFHvqnKG97ZEsi9TAg2/nFTa9hCdcSa1lvlM=
+github.com/knadh/koanf/v2 v2.3.0/go.mod h1:gRb40VRAbd4iJMYYD5IxZ6hfuopFcXBpc9bbQpZwo28=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
@@ -223,16 +224,16 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
-github.com/linode/linodego v1.52.2 h1:N9ozU27To1LMSrDd8WvJZ5STSz1eGYdyLnxhAR/dIZg=
-github.com/linode/linodego v1.52.2/go.mod h1:bI949fZaVchjWyKIA08hNyvAcV6BAS+PM2op3p7PAWA=
+github.com/linode/linodego v1.60.0 h1:SgsebJFRCi+lSmYy+C40wmKZeJllGGm+W12Qw4+yVdI=
+github.com/linode/linodego v1.60.0/go.mod h1:1+Bt0oTz5rBnDOJbGhccxn7LYVytXTIIfAy7QYmijDs=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
-github.com/miekg/dns v1.1.66 h1:FeZXOS3VCVsKnEAd+wBkjMC3D2K+ww66Cq3VnCINuJE=
-github.com/miekg/dns v1.1.66/go.mod h1:jGFzBsSNbJw6z1HYut1RKBKHA9PBdxeHrZG8J+gC2WE=
+github.com/miekg/dns v1.1.68 h1:jsSRkNozw7G/mnmXULynzMNIsgY2dHC8LO6U6Ij2JEA=
+github.com/miekg/dns v1.1.68/go.mod h1:fujopn7TB3Pu3JM69XaawiU0wqjpL9/8xGop5UrTPps=
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
@@ -258,14 +259,12 @@ github.com/oapi-codegen/runtime v1.0.0/go.mod h1:LmCUMQuPB4M/nLXilQXhHw+BLZdDb18
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
github.com/oklog/ulid/v2 v2.1.1 h1:suPZ4ARWLOJLegGFiZZ1dFAkqzhMjL3J1TzI+5wHz8s=
github.com/oklog/ulid/v2 v2.1.1/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.129.0 h1:2pzb6bC/AAfciC9DN+8d7Y8Rsk8ZPCfp/ACTfZu87FQ=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.129.0/go.mod h1:tIE4dzdxuM7HnFeYA6sj5zfLuUA/JxzQ+UDl1YrHvQw=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.129.0 h1:ydkfqpZ5BWZfEJEs7OUhTHW59og5aZspbUYxoGcAEok=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.129.0/go.mod h1:oA+49dkzmhUx0YFC9JXGuPPSBL0TOTp6jkv7qSr2n0Q=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.129.0 h1:AOVxBvCZfTPj0GLGqBVHpAnlC9t9pl1JXUQXymHliiY=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.129.0/go.mod h1:0CAJ32V/bCUBhNTEvnN9wlOG5IsyZ+Bmhe9e3Eri7CU=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.129.0 h1:yDLSAoIi3jNt4R/5xN4IJ9YAg1rhOShgchlO/ESv8EY=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.129.0/go.mod h1:IXQHbTPxqNcuu44FvkyvpYJ6Qy4wh4YsCVkKsp0Flzo=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.139.0 h1:D5aGQCErSCb4sKIHoZhgR4El6AzgviTRYlHUpbSFqDo=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.139.0/go.mod h1:ZjeRsA5oaVk89fg5D+iXStx2QncmhAvtGbdSumT07H4=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.139.0 h1:6/j0Ta8ZJnmAFVEoC3aZ1Hs19RB4fHzlN6kOZhsBJqM=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.139.0/go.mod h1:VfA8xHz4xg7Fyj5bBsCDbOO3iVYzDn9wP/QFsjcAE5c=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.139.0 h1:iRNX/ueuad1psOVgnNkxuQmXxvF3ze5ZZCP66xKFk/w=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.139.0/go.mod h1:bW09lo3WgHsPsZ1mgsJvby9wCefT5o13patM5phdfIU=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
@@ -283,31 +282,31 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
+github.com/prometheus/client_golang/exp v0.0.0-20251212205219-7ba246a648ca h1:BOxmsLoL2ymn8lXJtorca7N/m+2vDQUDoEtPjf0iAxA=
+github.com/prometheus/client_golang/exp v0.0.0-20251212205219-7ba246a648ca/go.mod h1:gndBHh3ZdjBozGcGrjUYjN3UJLRS3l2drALtu4lUt+k=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
-github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs=
-github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA=
-github.com/prometheus/otlptranslator v0.0.2 h1:+1CdeLVrRQ6Psmhnobldo0kTp96Rj80DRXRd5OSnMEQ=
-github.com/prometheus/otlptranslator v0.0.2/go.mod h1:P8AwMgdD7XEr6QRUJ2QWLpiAZTgTE2UYgjlu3svompI=
+github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc=
+github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI=
+github.com/prometheus/otlptranslator v1.0.0 h1:s0LJW/iN9dkIH+EnhiD3BlkkP5QVIUVEoIwkU+A6qos=
+github.com/prometheus/otlptranslator v1.0.0/go.mod h1:vRYWnXvI6aWGpsdY/mOT/cbeVRBlPWtBNDb7kGR3uKM=
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
-github.com/prometheus/prometheus v0.305.1-0.20250905124657-5c2e43f09c03 h1:NIVtqQm7NTsUcxfjdHuVE7pw3GVjEgwL6a9ADLSj+Wg=
-github.com/prometheus/prometheus v0.305.1-0.20250905124657-5c2e43f09c03/go.mod h1:9D9CfSEbKg087QXXz2ev+G1SoB6MqQE0ll4jCmrgCe0=
-github.com/prometheus/sigv4 v0.2.0 h1:qDFKnHYFswJxdzGeRP63c4HlH3Vbn1Yf/Ao2zabtVXk=
-github.com/prometheus/sigv4 v0.2.0/go.mod h1:D04rqmAaPPEUkjRQxGqjoxdyJuyCh6E0M18fZr0zBiE=
+github.com/prometheus/prometheus v0.308.1 h1:ApMNI/3/es3Ze90Z7CMb+wwU2BsSYur0m5VKeqHj7h4=
+github.com/prometheus/prometheus v0.308.1/go.mod h1:aHjYCDz9zKRyoUXvMWvu13K9XHOkBB12XrEqibs3e0A=
+github.com/prometheus/sigv4 v0.3.0 h1:QIG7nTbu0JTnNidGI1Uwl5AGVIChWUACxn2B/BQ1kms=
+github.com/prometheus/sigv4 v0.3.0/go.mod h1:fKtFYDus2M43CWKMNtGvFNHGXnAJJEGZbiYCmVp/F8I=
github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
-github.com/redis/go-redis/v9 v9.8.0 h1:q3nRvjrlge/6UD7eTu/DSg2uYiU2mCL0G/uzBWqhicI=
-github.com/redis/go-redis/v9 v9.8.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
-github.com/scaleway/scaleway-sdk-go v1.0.0-beta.33 h1:KhF0WejiUTDbL5X55nXowP7zNopwpowa6qaMAWyIE+0=
-github.com/scaleway/scaleway-sdk-go v1.0.0-beta.33/go.mod h1:792k1RTU+5JeMXm35/e2Wgp71qPH/DmDoZrRc+EFZDk=
-github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
-github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/scaleway/scaleway-sdk-go v1.0.0-beta.35 h1:8xfn1RzeI9yoCUuEwDy08F+No6PcKZGEDOQ6hrRyLts=
+github.com/scaleway/scaleway-sdk-go v1.0.0-beta.35/go.mod h1:47B1d/YXmSAxlJxUJxClzHR6b3T4M1WyCvwENPQNBWc=
+github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
+github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0=
-github.com/stackitcloud/stackit-sdk-go/core v0.17.2 h1:jPyn+i8rkp2hM80+hOg0B/1EVRbMt778Tr5RWyK1m2E=
-github.com/stackitcloud/stackit-sdk-go/core v0.17.2/go.mod h1:8KIw3czdNJ9sdil9QQimxjR6vHjeINFrRv0iZ67wfn0=
+github.com/stackitcloud/stackit-sdk-go/core v0.17.3 h1:GsZGmRRc/3GJLmCUnsZswirr5wfLRrwavbnL/renOqg=
+github.com/stackitcloud/stackit-sdk-go/core v0.17.3/go.mod h1:HBCXJGPgdRulplDzhrmwC+Dak9B/x0nzNtmOpu+1Ahg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
@@ -332,14 +331,14 @@ go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJyS
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/collector/component v1.45.0 h1:gGFfVdbQ+1YuyUkJjWo85I7euu3H/CiupuzCHv8OgHA=
go.opentelemetry.io/collector/component v1.45.0/go.mod h1:xoNFnRKE8Iv6gmlqAKgjayWraRnDcYLLgrPt9VgyO2g=
-go.opentelemetry.io/collector/component/componentstatus v0.129.0 h1:ejpBAt7hXAAZiQKcSxLvcy8sj8SjY4HOLdoXIlW6ybw=
-go.opentelemetry.io/collector/component/componentstatus v0.129.0/go.mod h1:/dLPIxn/tRMWmGi+DPtuFoBsffOLqPpSZ2IpEQzYtwI=
-go.opentelemetry.io/collector/component/componenttest v0.129.0 h1:gpKkZGCRPu3Yn0U2co09bMvhs17yLFb59oV8Gl9mmRI=
-go.opentelemetry.io/collector/component/componenttest v0.129.0/go.mod h1:JR9k34Qvd/pap6sYkPr5QqdHpTn66A5lYeYwhenKBAM=
-go.opentelemetry.io/collector/confmap v1.35.0 h1:U4JDATAl4PrKWe9bGHbZkoQXmJXefWgR2DIkFvw8ULQ=
-go.opentelemetry.io/collector/confmap v1.35.0/go.mod h1:qX37ExVBa+WU4jWWJCZc7IJ+uBjb58/9oL+/ctF1Bt0=
-go.opentelemetry.io/collector/confmap/xconfmap v0.129.0 h1:Q/+pJKrkCaMPSoSAH2BpC3UZCh+5hTiFkh/bdy5yChk=
-go.opentelemetry.io/collector/confmap/xconfmap v0.129.0/go.mod h1:RNMnlay2meJDXcKjxiLbST9/YAhKLJlj0kZCrJrLGgw=
+go.opentelemetry.io/collector/component/componentstatus v0.139.0 h1:bQmkv1t7xW7uIDireE0a2Am4IMOprXm6zQr/qDtGCIA=
+go.opentelemetry.io/collector/component/componentstatus v0.139.0/go.mod h1:ibZOohpG0u081/NaT/jMCTsKwRbbwwxWrjZml+owpyM=
+go.opentelemetry.io/collector/component/componenttest v0.139.0 h1:x9Yu2eYhrHxdZ7sFXWtAWVjQ3UIraje557LgNurDC2I=
+go.opentelemetry.io/collector/component/componenttest v0.139.0/go.mod h1:S9cj+qkf9FgHMzjvlYsLwQKd9BiS7B7oLZvxvlENM/c=
+go.opentelemetry.io/collector/confmap v1.45.0 h1:7M7TTlpzX4r+mIzP/ARdxZBAvI4N+1V96phDane+akU=
+go.opentelemetry.io/collector/confmap v1.45.0/go.mod h1:AE1dnkjv0T9gptsh5+mTX0XFGdXx0n7JS4b7CcPfJ6Q=
+go.opentelemetry.io/collector/confmap/xconfmap v0.139.0 h1:uQGpFuWnTCXqdMbI3gDSvkwU66/kF/aoC0kVMrit1EM=
+go.opentelemetry.io/collector/confmap/xconfmap v0.139.0/go.mod h1:d0ucaeNq2rojFRSQsCHF/gkT3cgBx5H2bVkPQMj57ck=
go.opentelemetry.io/collector/consumer v1.45.0 h1:TtqXxgW+1GSCwdoohq0fzqnfqrZBKbfo++1XRj8mrEA=
go.opentelemetry.io/collector/consumer v1.45.0/go.mod h1:pJzqTWBubwLt8mVou+G4/Hs23b3m425rVmld3LqOYpY=
go.opentelemetry.io/collector/consumer/consumertest v0.139.0 h1:06mu43mMO7l49ASJ/GEbKgTWcV3py5zE/pKhNBZ1b3k=
@@ -352,16 +351,16 @@ go.opentelemetry.io/collector/pdata v1.45.0 h1:q4XaISpeX640BcwXwb2mKOVw/gb67r22H
go.opentelemetry.io/collector/pdata v1.45.0/go.mod h1:5q2f001YhwMQO8QvpFhCOa4Cq/vtwX9W4HRMsXkU/nE=
go.opentelemetry.io/collector/pdata/pprofile v0.139.0 h1:UA5TgFzYmRuJN3Wz0GR1efLUfjbs5rH0HTaxfASpTR8=
go.opentelemetry.io/collector/pdata/pprofile v0.139.0/go.mod h1:sI5qHt+zzE2fhOWFdJIaiDBR0yGGjD4A4ZvDFU0tiHk=
-go.opentelemetry.io/collector/pdata/testdata v0.129.0 h1:n1QLnLOtrcAR57oMSVzmtPsQEpCc/nE5Avk1xfuAkjY=
-go.opentelemetry.io/collector/pdata/testdata v0.129.0/go.mod h1:RfY5IKpmcvkS2IGVjl9jG9fcT7xpQEBWpg9sQOn/7mY=
+go.opentelemetry.io/collector/pdata/testdata v0.139.0 h1:n7O5bmLLhc3T6PePV4447fFcI/6QWcMhBsLtfCaD0do=
+go.opentelemetry.io/collector/pdata/testdata v0.139.0/go.mod h1:fxZ2VrhYLYBLHYBHC1XQRKZ6IJXwy0I2rPaaRlebYaY=
go.opentelemetry.io/collector/pipeline v1.45.0 h1:sn9JJAEBe3XABTkWechMk0eH60QMBjjNe5V+ccBl+Uo=
go.opentelemetry.io/collector/pipeline v1.45.0/go.mod h1:xUrAqiebzYbrgxyoXSkk6/Y3oi5Sy3im2iCA51LwUAI=
go.opentelemetry.io/collector/processor v1.45.0 h1:GH5km9BkDQOoz7MR0jzTnzB1Kb5vtKzPwa/wDmRg2dQ=
go.opentelemetry.io/collector/processor v1.45.0/go.mod h1:wdlaTTC3wqlZIJP9R9/SLc2q7h+MFGARsxfjgPtwbes=
-go.opentelemetry.io/collector/processor/processortest v0.129.0 h1:r5iJHdS7Ffdb2zmMVYx4ahe92PLrce5cas/AJEXivkY=
-go.opentelemetry.io/collector/processor/processortest v0.129.0/go.mod h1:gdf8GzyzjGoDTA11+CPwC4jfXphtC+B7MWbWn+LIWXc=
-go.opentelemetry.io/collector/processor/xprocessor v0.129.0 h1:V3Zgd+YIeu3Ij3DPlGtzdcTwpqOQIqQVcL5jdHHS7sc=
-go.opentelemetry.io/collector/processor/xprocessor v0.129.0/go.mod h1:78T+AP5NO137W/E+SibQhaqOyS67fR+IN697b4JFh00=
+go.opentelemetry.io/collector/processor/processortest v0.139.0 h1:30akUdruFNG7EDpayuBhXoX2lV+hcfxW9Gl3Z6MYHb0=
+go.opentelemetry.io/collector/processor/processortest v0.139.0/go.mod h1:RTll3UKHrqj/VS6RGjTHtuGIJzyLEwFhbw8KuCL3pjo=
+go.opentelemetry.io/collector/processor/xprocessor v0.139.0 h1:O9x9RF/OG8gZ+HrOcB4f6F1fjniby484xf2D8GBxgqU=
+go.opentelemetry.io/collector/processor/xprocessor v0.139.0/go.mod h1:hqGhEZ1/PftD/QHaYna0o1xAqZUsb7GhqpOiaTTDJnQ=
go.opentelemetry.io/collector/semconv v0.128.0 h1:MzYOz7Vgb3Kf5D7b49pqqgeUhEmOCuT10bIXb/Cc+k4=
go.opentelemetry.io/collector/semconv v0.128.0/go.mod h1:OPXer4l43X23cnjLXIZnRj/qQOjSuq4TgBLI76P9hns=
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.63.0 h1:2pn7OzMewmYRiNtv1doZnLo3gONcnMHlFnmOR8Vgt+8=
@@ -392,65 +391,67 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
-go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
-go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
+go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
+go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
+go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
+go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4=
-golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
-golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA=
-golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU=
+golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04=
+golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0=
+golang.org/x/exp v0.0.0-20250808145144-a408d31f581a h1:Y+7uR/b1Mw2iSXZ3G//1haIiSElDQZ8KWh0h+sZPG90=
+golang.org/x/exp v0.0.0-20250808145144-a408d31f581a/go.mod h1:rT6SFzZ7oxADUDx58pcaKFTcZ+inxAa9fTrYx/uVYwg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg=
-golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ=
+golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U=
+golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE=
-golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
-golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
-golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
+golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4=
+golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210=
+golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY=
+golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
-golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
+golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
+golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
-golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
-golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4=
-golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw=
+golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
+golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
+golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q=
+golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
-golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
-golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
-golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
+golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
+golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
+golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI=
+golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0=
-golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw=
+golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE=
+golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
-google.golang.org/api v0.239.0 h1:2hZKUnFZEy81eugPs4e2XzIJ5SOwQg0G82bpXD65Puo=
-google.golang.org/api v0.239.0/go.mod h1:cOVEm2TpdAGHL2z+UwyS+kmlGr3bVWQQ6sYEqkKje50=
-google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 h1:1tXaIXCracvtsRxSBsYDiSBN0cuJvM7QYW+MrpIRY78=
-google.golang.org/genproto/googleapis/api v0.0.0-20250804133106-a7a43d27e69b h1:ULiyYQ0FdsJhwwZUwbaXpZF5yUE3h+RA+gxvBu37ucc=
-google.golang.org/genproto/googleapis/api v0.0.0-20250804133106-a7a43d27e69b/go.mod h1:oDOGiMSXHL4sDTJvFvIB9nRQCGdLP1o/iVaqQK8zB+M=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b h1:zPKJod4w6F1+nRGDI9ubnXYhU9NSWoFAijkHkUXeTK8=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
+google.golang.org/api v0.252.0 h1:xfKJeAJaMwb8OC9fesr369rjciQ704AjU/psjkKURSI=
+google.golang.org/api v0.252.0/go.mod h1:dnHOv81x5RAmumZ7BWLShB/u7JZNeyalImxHmtTHxqw=
+google.golang.org/genproto v0.0.0-20250603155806-513f23925822 h1:rHWScKit0gvAPuOnu87KpaYtjK5zBMLcULh7gxkCXu4=
+google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4 h1:8XJ4pajGwOlasW+L13MnEGA8W4115jJySQtVfS2/IBU=
+google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4/go.mod h1:NnuHhy+bxcg30o7FnVAZbXsPHUDQ9qKWAQKCD7VxFtk=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20251002232023-7c0ddcbb5797 h1:CirRxTOwnRWVLKzDNrs0CXAaVozJoR4G9xvdRecrdpk=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20251002232023-7c0ddcbb5797/go.mod h1:HSkG/KdJWusxU1F6CNrwNDjBMgisKxGnc5dAZfT0mjQ=
google.golang.org/grpc v1.76.0 h1:UnVkv1+uMLYXoIz6o7chp59WfQUYA2ex/BXQ9rHZu7A=
google.golang.org/grpc v1.76.0/go.mod h1:Ju12QI8M6iQJtbcsV+awF5a4hfJMLi4X0JLo94ULZ6c=
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
@@ -469,23 +470,23 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-k8s.io/api v0.33.5 h1:YR+uhYj05jdRpcksv8kjSliW+v9hwXxn6Cv10aR8Juw=
-k8s.io/api v0.33.5/go.mod h1:2gzShdwXKT5yPGiqrTrn/U/nLZ7ZyT4WuAj3XGDVgVs=
-k8s.io/apimachinery v0.33.5 h1:NiT64hln4TQXeYR18/ES39OrNsjGz8NguxsBgp+6QIo=
-k8s.io/apimachinery v0.33.5/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
-k8s.io/client-go v0.33.5 h1:I8BdmQGxInpkMEnJvV6iG7dqzP3JRlpZZlib3OMFc3o=
-k8s.io/client-go v0.33.5/go.mod h1:W8PQP4MxbM4ypgagVE65mUUqK1/ByQkSALF9tzuQ6u0=
+k8s.io/api v0.34.1 h1:jC+153630BMdlFukegoEL8E/yT7aLyQkIVuwhmwDgJM=
+k8s.io/api v0.34.1/go.mod h1:SB80FxFtXn5/gwzCoN6QCtPD7Vbu5w2n1S0J5gFfTYk=
+k8s.io/apimachinery v0.34.1 h1:dTlxFls/eikpJxmAC7MVE8oOeP1zryV7iRyIjB0gky4=
+k8s.io/apimachinery v0.34.1/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=
+k8s.io/client-go v0.34.1 h1:ZUPJKgXsnKwVwmKKdPfw4tB58+7/Ik3CrjOEhsiZ7mY=
+k8s.io/client-go v0.34.1/go.mod h1:kA8v0FP+tk6sZA0yKLRG67LWjqufAoSHA2xVGKw9Of8=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
-k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
-k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
-k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
-k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
-sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
-sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
+k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA=
+k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts=
+k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y=
+k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
+sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
-sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc=
-sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
-sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
-sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
+sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
+sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
+sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
+sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
diff --git a/documentation/examples/remote_storage/remote_storage_adapter/graphite/client.go b/documentation/examples/remote_storage/remote_storage_adapter/graphite/client.go
index d04355a712..2e78354bd2 100644
--- a/documentation/examples/remote_storage/remote_storage_adapter/graphite/client.go
+++ b/documentation/examples/remote_storage/remote_storage_adapter/graphite/client.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/documentation/examples/remote_storage/remote_storage_adapter/graphite/client_test.go b/documentation/examples/remote_storage/remote_storage_adapter/graphite/client_test.go
index 535027e076..8a96413443 100644
--- a/documentation/examples/remote_storage/remote_storage_adapter/graphite/client_test.go
+++ b/documentation/examples/remote_storage/remote_storage_adapter/graphite/client_test.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/documentation/examples/remote_storage/remote_storage_adapter/graphite/escape.go b/documentation/examples/remote_storage/remote_storage_adapter/graphite/escape.go
index 1386f46761..e7357c001a 100644
--- a/documentation/examples/remote_storage/remote_storage_adapter/graphite/escape.go
+++ b/documentation/examples/remote_storage/remote_storage_adapter/graphite/escape.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -82,7 +82,7 @@ const (
func escape(tv model.LabelValue) string {
length := len(tv)
result := bytes.NewBuffer(make([]byte, 0, length))
- for i := 0; i < length; i++ {
+ for i := range length {
b := tv[i]
switch {
// . is reserved by graphite, % is used to escape other bytes.
diff --git a/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go b/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go
index 005f8d534d..ddf78283e7 100644
--- a/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go
+++ b/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -96,7 +96,7 @@ func (c *Client) Write(samples model.Samples) error {
p := influx.NewPoint(
string(s.Metric[model.MetricNameLabel]),
tagsFromMetric(s.Metric),
- map[string]interface{}{"value": v},
+ map[string]any{"value": v},
s.Timestamp.Time(),
)
points = append(points, p)
@@ -158,16 +158,17 @@ func (c *Client) buildCommand(q *prompb.Query) (string, error) {
// If we don't find a metric name matcher, query all metrics
// (InfluxDB measurements) by default.
- measurement := `r._measurement`
+ var measurement strings.Builder
+ measurement.WriteString(`r._measurement`)
matchers := make([]string, 0, len(q.Matchers))
var joinedMatchers string
for _, m := range q.Matchers {
if m.Name == model.MetricNameLabel {
switch m.Type {
case prompb.LabelMatcher_EQ:
- measurement += fmt.Sprintf(" == \"%s\"", m.Value)
+ measurement.WriteString(fmt.Sprintf(" == \"%s\"", m.Value))
case prompb.LabelMatcher_RE:
- measurement += fmt.Sprintf(" =~ /%s/", escapeSlashes(m.Value))
+ measurement.WriteString(fmt.Sprintf(" =~ /%s/", escapeSlashes(m.Value)))
default:
// TODO: Figure out how to support these efficiently.
return "", errors.New("non-equal or regex-non-equal matchers are not supported on the metric name yet")
@@ -195,7 +196,7 @@ func (c *Client) buildCommand(q *prompb.Query) (string, error) {
// _measurement must be retained, otherwise "invalid metric name" shall be thrown
command := fmt.Sprintf(
"from(bucket: \"%s\") |> range(%s) |> filter(fn: (r) => %s%s)",
- c.bucket, rangeInNs, measurement, joinedMatchers,
+ c.bucket, rangeInNs, measurement.String(), joinedMatchers,
)
return command, nil
@@ -237,7 +238,7 @@ func mergeResult(labelsToSeries map[string]*prompb.TimeSeries, record *query.Flu
return nil
}
-func filterOutBuiltInLabels(labels map[string]interface{}) {
+func filterOutBuiltInLabels(labels map[string]any) {
delete(labels, "table")
delete(labels, "_start")
delete(labels, "_stop")
@@ -248,7 +249,7 @@ func filterOutBuiltInLabels(labels map[string]interface{}) {
delete(labels, "_measurement")
}
-func concatLabels(labels map[string]interface{}) string {
+func concatLabels(labels map[string]any) string {
// 0xff cannot occur in valid UTF-8 sequences, so use it
// as a separator here.
separator := "\xff"
@@ -259,7 +260,7 @@ func concatLabels(labels map[string]interface{}) string {
return strings.Join(pairs, separator)
}
-func tagsToLabelPairs(name string, tags map[string]interface{}) []prompb.Label {
+func tagsToLabelPairs(name string, tags map[string]any) []prompb.Label {
pairs := make([]prompb.Label, 0, len(tags))
for k, v := range tags {
if v == nil {
@@ -283,7 +284,7 @@ func tagsToLabelPairs(name string, tags map[string]interface{}) []prompb.Label {
return pairs
}
-func valuesToSamples(timestamp time.Time, value interface{}) (prompb.Sample, error) {
+func valuesToSamples(timestamp time.Time, value any) (prompb.Sample, error) {
var valueFloat64 float64
var valueInt64 int64
var ok bool
diff --git a/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client_test.go b/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client_test.go
index f78d4db794..faf48045cb 100644
--- a/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client_test.go
+++ b/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client_test.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/documentation/examples/remote_storage/remote_storage_adapter/main.go b/documentation/examples/remote_storage/remote_storage_adapter/main.go
index ffcbb5385a..ac891cca50 100644
--- a/documentation/examples/remote_storage/remote_storage_adapter/main.go
+++ b/documentation/examples/remote_storage/remote_storage_adapter/main.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client.go b/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client.go
index ffc6c58b88..e2f64be5d8 100644
--- a/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client.go
+++ b/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client.go
@@ -1,4 +1,4 @@
-// Copyright 2013 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client_test.go b/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client_test.go
index bc9703c88c..fa76cc334d 100644
--- a/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client_test.go
+++ b/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client_test.go
@@ -1,4 +1,4 @@
-// Copyright 2013 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/tagvalue.go b/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/tagvalue.go
index 6a691778af..f822e37808 100644
--- a/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/tagvalue.go
+++ b/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/tagvalue.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -66,7 +66,7 @@ func (tv TagValue) MarshalJSON() ([]byte, error) {
// Need at least two more bytes than in tv.
result := bytes.NewBuffer(make([]byte, 0, length+2))
result.WriteByte('"')
- for i := 0; i < length; i++ {
+ for i := range length {
b := tv[i]
switch {
case (b >= '-' && b <= '9') || // '-', '.', '/', 0-9
diff --git a/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/tagvalue_test.go b/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/tagvalue_test.go
index 5adedb3248..071fd5a85a 100644
--- a/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/tagvalue_test.go
+++ b/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/tagvalue_test.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/go.mod b/go.mod
index 55b8d2ce1f..61c555abc2 100644
--- a/go.mod
+++ b/go.mod
@@ -1,110 +1,121 @@
module github.com/prometheus/prometheus
-go 1.24.0
+go 1.24.9
require (
- github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1
- github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.12.0
+ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0
+ github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0
github.com/Code-Hex/go-generics-cache v1.5.1
- github.com/KimMachineGun/automemlimit v0.7.4
+ github.com/KimMachineGun/automemlimit v0.7.5
github.com/alecthomas/kingpin/v2 v2.4.0
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b
- github.com/aws/aws-sdk-go-v2 v1.39.6
- github.com/aws/aws-sdk-go-v2/config v1.31.17
- github.com/aws/aws-sdk-go-v2/credentials v1.18.21
- github.com/aws/aws-sdk-go-v2/service/ec2 v1.262.0
- github.com/aws/aws-sdk-go-v2/service/ecs v1.67.2
- github.com/aws/aws-sdk-go-v2/service/lightsail v1.50.4
- github.com/aws/aws-sdk-go-v2/service/sts v1.39.1
- github.com/aws/smithy-go v1.23.2
+ github.com/aws/aws-sdk-go-v2 v1.41.0
+ github.com/aws/aws-sdk-go-v2/config v1.32.6
+ github.com/aws/aws-sdk-go-v2/credentials v1.19.6
+ github.com/aws/aws-sdk-go-v2/service/ec2 v1.279.0
+ github.com/aws/aws-sdk-go-v2/service/ecs v1.70.0
+ github.com/aws/aws-sdk-go-v2/service/lightsail v1.50.10
+ github.com/aws/aws-sdk-go-v2/service/sts v1.41.5
+ github.com/aws/smithy-go v1.24.0
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3
github.com/cespare/xxhash/v2 v2.3.0
github.com/dennwc/varint v1.0.0
- github.com/digitalocean/godo v1.168.0
+ github.com/digitalocean/godo v1.171.0
github.com/docker/docker v28.5.2+incompatible
github.com/edsrzf/mmap-go v1.2.0
- github.com/envoyproxy/go-control-plane/envoy v1.35.0
- github.com/envoyproxy/protoc-gen-validate v1.2.1
+ github.com/envoyproxy/go-control-plane/envoy v1.36.0
+ github.com/envoyproxy/protoc-gen-validate v1.3.0
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb
github.com/fsnotify/fsnotify v1.9.0
- github.com/go-openapi/strfmt v0.24.0
+ github.com/go-openapi/strfmt v0.25.0
github.com/go-zookeeper/zk v1.0.4
github.com/gogo/protobuf v1.3.2
github.com/golang/snappy v1.0.0
github.com/google/go-cmp v0.7.0
- github.com/google/pprof v0.0.0-20250923004556-9e5a51aed1e8
+ github.com/google/pprof v0.0.0-20251213031049-b05bdaca462f
github.com/google/uuid v1.6.0
- github.com/gophercloud/gophercloud/v2 v2.8.0
+ github.com/gophercloud/gophercloud/v2 v2.9.0
github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853
- github.com/hashicorp/consul/api v1.32.0
- github.com/hashicorp/nomad/api v0.0.0-20250930071859-eaa0fe0e27af
- github.com/hetznercloud/hcloud-go/v2 v2.29.0
- github.com/ionos-cloud/sdk-go/v6 v6.3.4
+ github.com/hashicorp/consul/api v1.32.1
+ github.com/hashicorp/nomad/api v0.0.0-20260106084653-e8f2200c7039
+ github.com/hetznercloud/hcloud-go/v2 v2.33.0
+ github.com/ionos-cloud/sdk-go/v6 v6.3.6
github.com/json-iterator/go v1.1.12
- github.com/klauspost/compress v1.18.1
+ github.com/klauspost/compress v1.18.2
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b
- github.com/linode/linodego v1.60.0
- github.com/miekg/dns v1.1.68
+ github.com/linode/linodego v1.63.0
+ github.com/miekg/dns v1.1.69
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1
github.com/oklog/run v1.2.0
github.com/oklog/ulid/v2 v2.1.1
- github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.139.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.142.0
github.com/ovh/go-ovh v1.9.0
- github.com/prometheus/alertmanager v0.28.1
+ github.com/prometheus/alertmanager v0.30.0
github.com/prometheus/client_golang v1.23.2
- github.com/prometheus/client_golang/exp v0.0.0-20250914183048-a974e0d45e0a
+ github.com/prometheus/client_golang/exp v0.0.0-20260101091701-2cd067eb23c9
github.com/prometheus/client_model v0.6.2
- github.com/prometheus/common v0.67.2
+ github.com/prometheus/common v0.67.4
github.com/prometheus/common/assets v0.2.0
github.com/prometheus/exporter-toolkit v0.15.0
github.com/prometheus/sigv4 v0.3.0
- github.com/scaleway/scaleway-sdk-go v1.0.0-beta.35
+ github.com/scaleway/scaleway-sdk-go v1.0.0-beta.36
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c
- github.com/stackitcloud/stackit-sdk-go/core v0.17.3
+ github.com/stackitcloud/stackit-sdk-go/core v0.20.1
github.com/stretchr/testify v1.11.1
github.com/vultr/govultr/v2 v2.17.2
- go.opentelemetry.io/collector/component v1.45.0
- go.opentelemetry.io/collector/consumer v1.45.0
- go.opentelemetry.io/collector/pdata v1.45.0
- go.opentelemetry.io/collector/processor v1.45.0
+ go.opentelemetry.io/collector/component v1.48.0
+ go.opentelemetry.io/collector/consumer v1.48.0
+ go.opentelemetry.io/collector/pdata v1.48.0
+ go.opentelemetry.io/collector/processor v1.48.0
go.opentelemetry.io/collector/semconv v0.128.0
- go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.63.0
- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0
- go.opentelemetry.io/otel v1.38.0
- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0
- go.opentelemetry.io/otel/metric v1.38.0
- go.opentelemetry.io/otel/sdk v1.38.0
- go.opentelemetry.io/otel/trace v1.38.0
+ go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.64.0
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0
+ go.opentelemetry.io/otel v1.39.0
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.39.0
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.39.0
+ go.opentelemetry.io/otel/metric v1.39.0
+ go.opentelemetry.io/otel/sdk v1.39.0
+ go.opentelemetry.io/otel/trace v1.39.0
go.uber.org/atomic v1.11.0
go.uber.org/automaxprocs v1.6.0
go.uber.org/goleak v1.3.0
go.uber.org/multierr v1.11.0
go.yaml.in/yaml/v2 v2.4.3
- golang.org/x/oauth2 v0.32.0
- golang.org/x/sync v0.17.0
- golang.org/x/sys v0.37.0
- golang.org/x/text v0.30.0
- google.golang.org/api v0.252.0
- google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4
- google.golang.org/grpc v1.76.0
- google.golang.org/protobuf v1.36.10
+ golang.org/x/oauth2 v0.34.0
+ golang.org/x/sync v0.19.0
+ golang.org/x/sys v0.39.0
+ golang.org/x/text v0.32.0
+ google.golang.org/api v0.258.0
+ google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b
+ google.golang.org/grpc v1.78.0
+ google.golang.org/protobuf v1.36.11
gopkg.in/yaml.v3 v3.0.1
- k8s.io/api v0.34.1
- k8s.io/apimachinery v0.34.1
- k8s.io/client-go v0.34.1
+ k8s.io/api v0.34.3
+ k8s.io/apimachinery v0.34.3
+ k8s.io/client-go v0.34.3
k8s.io/klog v1.0.0
k8s.io/klog/v2 v2.130.1
)
require (
+ github.com/aws/aws-sdk-go-v2/service/signin v1.0.4 // indirect
+ github.com/go-openapi/swag/cmdutils v0.25.4 // indirect
+ github.com/go-openapi/swag/conv v0.25.4 // indirect
+ github.com/go-openapi/swag/fileutils v0.25.4 // indirect
+ github.com/go-openapi/swag/jsonname v0.25.4 // indirect
+ github.com/go-openapi/swag/jsonutils v0.25.4 // indirect
+ github.com/go-openapi/swag/loading v0.25.4 // indirect
+ github.com/go-openapi/swag/mangling v0.25.4 // indirect
+ github.com/go-openapi/swag/netutils v0.25.4 // indirect
+ github.com/go-openapi/swag/stringutils v0.25.4 // indirect
+ github.com/go-openapi/swag/typeutils v0.25.4 // indirect
+ github.com/go-openapi/swag/yamlutils v0.25.4 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
- golang.org/x/tools/godoc v0.1.0-deprecated // indirect
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
)
@@ -113,21 +124,20 @@ require (
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
cloud.google.com/go/compute/metadata v0.9.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect
- github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 // indirect
+ github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/armon/go-metrics v0.4.1 // indirect
- github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
- github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.13
- github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.13 // indirect
- github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.13 // indirect
+ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16
+ github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13 // indirect
- github.com/aws/aws-sdk-go-v2/service/sso v1.30.1 // indirect
- github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.5 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sso v1.30.8 // indirect
+ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.12 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cenkalti/backoff/v5 v5.0.3 // indirect
- github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect
+ github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f // indirect
github.com/containerd/errdefs v1.0.0 // indirect
github.com/containerd/errdefs/pkg v0.3.0 // indirect
github.com/containerd/log v0.1.0 // indirect
@@ -142,25 +152,25 @@ require (
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
- github.com/go-openapi/analysis v0.23.0 // indirect
- github.com/go-openapi/errors v0.22.3 // indirect
- github.com/go-openapi/jsonpointer v0.21.0 // indirect
- github.com/go-openapi/jsonreference v0.21.0 // indirect
- github.com/go-openapi/loads v0.22.0 // indirect
- github.com/go-openapi/spec v0.21.0 // indirect
- github.com/go-openapi/swag v0.23.0 // indirect
- github.com/go-openapi/validate v0.24.0 // indirect
- github.com/go-resty/resty/v2 v2.16.5 // indirect
+ github.com/go-openapi/analysis v0.24.1 // indirect
+ github.com/go-openapi/errors v0.22.4 // indirect
+ github.com/go-openapi/jsonpointer v0.22.1 // indirect
+ github.com/go-openapi/jsonreference v0.21.3 // indirect
+ github.com/go-openapi/loads v0.23.2 // indirect
+ github.com/go-openapi/spec v0.22.1 // indirect
+ github.com/go-openapi/swag v0.25.4 // indirect
+ github.com/go-openapi/validate v0.25.1 // indirect
+ github.com/go-resty/resty/v2 v2.17.1 // indirect
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
github.com/gobwas/glob v0.2.3 // indirect
github.com/golang-jwt/jwt/v5 v5.3.0 // indirect
github.com/google/gnostic-models v0.7.0 // indirect
github.com/google/go-querystring v1.1.0 // indirect
github.com/google/s2a-go v0.1.9 // indirect
- github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
+ github.com/googleapis/enterprise-certificate-proxy v0.3.7 // indirect
github.com/googleapis/gax-go/v2 v2.15.0 // indirect
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
- github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 // indirect
github.com/hashicorp/cronexpr v1.1.3 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
@@ -169,17 +179,15 @@ require (
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-retryablehttp v0.7.7 // indirect
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
- github.com/hashicorp/go-version v1.7.0 // indirect
+ github.com/hashicorp/go-version v1.8.0 // indirect
github.com/hashicorp/golang-lru v0.6.0 // indirect
github.com/hashicorp/serf v0.10.1 // indirect
- github.com/josharian/intern v1.0.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
github.com/julienschmidt/httprouter v1.3.0 // indirect
github.com/knadh/koanf/maps v0.1.2 // indirect
github.com/knadh/koanf/providers/confmap v1.0.0 // indirect
github.com/knadh/koanf/v2 v2.3.0 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
- github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mdlayher/socket v0.4.1 // indirect
@@ -195,8 +203,8 @@ require (
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/oklog/ulid v1.3.1 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.139.0 // indirect
- github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.139.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.142.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.142.0 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.2 // indirect
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
@@ -211,22 +219,22 @@ require (
github.com/stretchr/objx v0.5.2 // indirect
github.com/x448/float16 v0.8.4 // indirect
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
- go.mongodb.org/mongo-driver v1.17.4 // indirect
- go.opentelemetry.io/auto/sdk v1.1.0 // indirect
- go.opentelemetry.io/collector/confmap v1.45.0 // indirect
- go.opentelemetry.io/collector/confmap/xconfmap v0.139.0 // indirect
- go.opentelemetry.io/collector/featuregate v1.45.0 // indirect
- go.opentelemetry.io/collector/pipeline v1.45.0 // indirect
- go.opentelemetry.io/proto/otlp v1.7.1 // indirect
- go.uber.org/zap v1.27.0 // indirect
- golang.org/x/crypto v0.43.0 // indirect
+ go.mongodb.org/mongo-driver v1.17.6 // indirect
+ go.opentelemetry.io/auto/sdk v1.2.1 // indirect
+ go.opentelemetry.io/collector/confmap v1.48.0 // indirect
+ go.opentelemetry.io/collector/confmap/xconfmap v0.142.0 // indirect
+ go.opentelemetry.io/collector/featuregate v1.48.0 // indirect
+ go.opentelemetry.io/collector/pipeline v1.48.0 // indirect
+ go.opentelemetry.io/proto/otlp v1.9.0 // indirect
+ go.uber.org/zap v1.27.1 // indirect
+ golang.org/x/crypto v0.46.0 // indirect
golang.org/x/exp v0.0.0-20250808145144-a408d31f581a // indirect
- golang.org/x/mod v0.28.0 // indirect
- golang.org/x/net v0.46.0 // indirect
- golang.org/x/term v0.36.0 // indirect
- golang.org/x/time v0.13.0 // indirect
- golang.org/x/tools v0.37.0 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20251002232023-7c0ddcbb5797 // indirect
+ golang.org/x/mod v0.30.0 // indirect
+ golang.org/x/net v0.48.0 // indirect
+ golang.org/x/term v0.38.0 // indirect
+ golang.org/x/time v0.14.0 // indirect
+ golang.org/x/tools v0.39.0 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20251213004720-97cd9d5aeac2 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
diff --git a/go.sum b/go.sum
index 2c0042edbb..b3333208dd 100644
--- a/go.sum
+++ b/go.sum
@@ -4,10 +4,10 @@ cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIi
cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c=
cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs=
cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1 h1:5YTBM8QDVIBN3sxBil89WfdAAqDZbyJTgh688DSxX5w=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw=
-github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.12.0 h1:wL5IEG5zb7BVv1Kv0Xm92orq+5hB5Nipn3B5tn4Rqfk=
-github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.12.0/go.mod h1:J7MUC/wtRpfGVbQ5sIItY5/FuVWmvzlY21WAOfQnq/I=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 h1:JXg2dwJUmPB9JmtVmdEB16APJ7jurfbY5jnfXpJoRMc=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 h1:Hk5QBxZQC1jb2Fwj6mpzme37xbCDdNTxU7O9eb5+LB4=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1/go.mod h1:IYus9qsFobWIc2YVwe/WPjcnyCkPKtnHAqUYeebc8z0=
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY=
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA=
@@ -24,13 +24,13 @@ github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOEl
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE=
-github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 h1:XkkQbfMyuH2jTSjQjSoihryI8GINRcs4xp8lNawg0FI=
-github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgvJqCH0sFfrBUTnUJSBrBf7++ypk+twtRs=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk=
github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U+0KMqAA0KcU=
github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4=
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
-github.com/KimMachineGun/automemlimit v0.7.4 h1:UY7QYOIfrr3wjjOAqahFmC3IaQCLWvur9nmfIn6LnWk=
-github.com/KimMachineGun/automemlimit v0.7.4/go.mod h1:QZxpHaGOQoYvFhv/r4u3U0JTC2ZcOwbSr11UZF46UBM=
+github.com/KimMachineGun/automemlimit v0.7.5 h1:RkbaC0MwhjL1ZuBKunGDjE/ggwAX43DwZrJqVwyveTk=
+github.com/KimMachineGun/automemlimit v0.7.5/go.mod h1:QZxpHaGOQoYvFhv/r4u3U0JTC2ZcOwbSr11UZF46UBM=
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY=
@@ -47,40 +47,40 @@ github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJ
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
-github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
-github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
-github.com/aws/aws-sdk-go-v2 v1.39.6 h1:2JrPCVgWJm7bm83BDwY5z8ietmeJUbh3O2ACnn+Xsqk=
-github.com/aws/aws-sdk-go-v2 v1.39.6/go.mod h1:c9pm7VwuW0UPxAEYGyTmyurVcNrbF6Rt/wixFqDhcjE=
-github.com/aws/aws-sdk-go-v2/config v1.31.17 h1:QFl8lL6RgakNK86vusim14P2k8BFSxjvUkcWLDjgz9Y=
-github.com/aws/aws-sdk-go-v2/config v1.31.17/go.mod h1:V8P7ILjp/Uef/aX8TjGk6OHZN6IKPM5YW6S78QnRD5c=
-github.com/aws/aws-sdk-go-v2/credentials v1.18.21 h1:56HGpsgnmD+2/KpG0ikvvR8+3v3COCwaF4r+oWwOeNA=
-github.com/aws/aws-sdk-go-v2/credentials v1.18.21/go.mod h1:3YELwedmQbw7cXNaII2Wywd+YY58AmLPwX4LzARgmmA=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.13 h1:T1brd5dR3/fzNFAQch/iBKeX07/ffu/cLu+q+RuzEWk=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.13/go.mod h1:Peg/GBAQ6JDt+RoBf4meB1wylmAipb7Kg2ZFakZTlwk=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.13 h1:a+8/MLcWlIxo1lF9xaGt3J/u3yOZx+CdSveSNwjhD40=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.13/go.mod h1:oGnKwIYZ4XttyU2JWxFrwvhF6YKiK/9/wmE3v3Iu9K8=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.13 h1:HBSI2kDkMdWz4ZM7FjwE7e/pWDEZ+nR95x8Ztet1ooY=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.13/go.mod h1:YE94ZoDArI7awZqJzBAZ3PDD2zSfuP7w6P2knOzIn8M=
+github.com/aws/aws-sdk-go-v2 v1.41.0 h1:tNvqh1s+v0vFYdA1xq0aOJH+Y5cRyZ5upu6roPgPKd4=
+github.com/aws/aws-sdk-go-v2 v1.41.0/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0=
+github.com/aws/aws-sdk-go-v2/config v1.32.6 h1:hFLBGUKjmLAekvi1evLi5hVvFQtSo3GYwi+Bx4lpJf8=
+github.com/aws/aws-sdk-go-v2/config v1.32.6/go.mod h1:lcUL/gcd8WyjCrMnxez5OXkO3/rwcNmvfno62tnXNcI=
+github.com/aws/aws-sdk-go-v2/credentials v1.19.6 h1:F9vWao2TwjV2MyiyVS+duza0NIRtAslgLUM0vTA1ZaE=
+github.com/aws/aws-sdk-go-v2/credentials v1.19.6/go.mod h1:SgHzKjEVsdQr6Opor0ihgWtkWdfRAIwxYzSJ8O85VHY=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16 h1:80+uETIWS1BqjnN9uJ0dBUaETh+P1XwFy5vwHwK5r9k=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16/go.mod h1:wOOsYuxYuB/7FlnVtzeBYRcjSRtQpAW0hCP7tIULMwo=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16 h1:rgGwPzb82iBYSvHMHXc8h9mRoOUBZIGFgKb9qniaZZc=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16/go.mod h1:L/UxsGeKpGoIj6DxfhOWHWQ/kGKcd4I1VncE4++IyKA=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 h1:1jtGzuV7c82xnqOVfx2F0xmJcOw5374L7N6juGW6x6U=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16/go.mod h1:M2E5OQf+XLe+SZGmmpaI2yy+J326aFf6/+54PoxSANc=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc=
-github.com/aws/aws-sdk-go-v2/service/ec2 v1.262.0 h1:5qBb1XV/D18qtCHd3bmmxoVglI+fZ4QWuS/EB8kIXYQ=
-github.com/aws/aws-sdk-go-v2/service/ec2 v1.262.0/go.mod h1:NDdDLLW5PtLLXN661gKcvJvqAH5OBXsfhMlmKVu1/pY=
-github.com/aws/aws-sdk-go-v2/service/ecs v1.67.2 h1:oeICOX/+D0XXV1aMYJPXVe3CO37zYr7fB6HFgxchleU=
-github.com/aws/aws-sdk-go-v2/service/ecs v1.67.2/go.mod h1:rrhqfkXfa2DSNq0RyFhnnFEAyI+yJB4+2QlZKeJvMjs=
-github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 h1:x2Ibm/Af8Fi+BH+Hsn9TXGdT+hKbDd5XOTZxTMxDk7o=
-github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3/go.mod h1:IW1jwyrQgMdhisceG8fQLmQIydcT/jWY21rFhzgaKwo=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13 h1:kDqdFvMY4AtKoACfzIGD8A0+hbT41KTKF//gq7jITfM=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13/go.mod h1:lmKuogqSU3HzQCwZ9ZtcqOc5XGMqtDK7OIc2+DxiUEg=
-github.com/aws/aws-sdk-go-v2/service/lightsail v1.50.4 h1:/1o2AYwHJojUDeMvQNyJiKZwcWCc3e4kQuTXqRLuThc=
-github.com/aws/aws-sdk-go-v2/service/lightsail v1.50.4/go.mod h1:Nn2xx6HojGuNMtUFxxz/nyNLSS+tHMRsMhe3+W3wB5k=
-github.com/aws/aws-sdk-go-v2/service/sso v1.30.1 h1:0JPwLz1J+5lEOfy/g0SURC9cxhbQ1lIMHMa+AHZSzz0=
-github.com/aws/aws-sdk-go-v2/service/sso v1.30.1/go.mod h1:fKvyjJcz63iL/ftA6RaM8sRCtN4r4zl4tjL3qw5ec7k=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.5 h1:OWs0/j2UYR5LOGi88sD5/lhN6TDLG6SfA7CqsQO9zF0=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.5/go.mod h1:klO+ejMvYsB4QATfEOIXk8WAEwN4N0aBfJpvC+5SZBo=
-github.com/aws/aws-sdk-go-v2/service/sts v1.39.1 h1:mLlUgHn02ue8whiR4BmxxGJLR2gwU6s6ZzJ5wDamBUs=
-github.com/aws/aws-sdk-go-v2/service/sts v1.39.1/go.mod h1:E19xDjpzPZC7LS2knI9E6BaRFDK43Eul7vd6rSq2HWk=
-github.com/aws/smithy-go v1.23.2 h1:Crv0eatJUQhaManss33hS5r40CG3ZFH+21XSkqMrIUM=
-github.com/aws/smithy-go v1.23.2/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
+github.com/aws/aws-sdk-go-v2/service/ec2 v1.279.0 h1:o7eJKe6VYAnqERPlLAvDW5VKXV6eTKv1oxTpMoDP378=
+github.com/aws/aws-sdk-go-v2/service/ec2 v1.279.0/go.mod h1:Wg68QRgy2gEGGdmTPU/UbVpdv8sM14bUZmF64KFwAsY=
+github.com/aws/aws-sdk-go-v2/service/ecs v1.70.0 h1:IZpZatHsscdOKjwmDXC6idsCXmm3F/obutAUNjnX+OM=
+github.com/aws/aws-sdk-go-v2/service/ecs v1.70.0/go.mod h1:LQMlcWBoiFVD3vUVEz42ST0yTiaDujv2dRE6sXt1yPE=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16 h1:oHjJHeUy0ImIV0bsrX0X91GkV5nJAyv1l1CC9lnO0TI=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16/go.mod h1:iRSNGgOYmiYwSCXxXaKb9HfOEj40+oTKn8pTxMlYkRM=
+github.com/aws/aws-sdk-go-v2/service/lightsail v1.50.10 h1:MQuZZ6Tq1qQabPlkVxrCMdyVl70Ogl4AERZKo+y9Wzo=
+github.com/aws/aws-sdk-go-v2/service/lightsail v1.50.10/go.mod h1:U5C3JME1ibKESmpzBAqlRpTYZfVbTqrb5ICJm+sVVd8=
+github.com/aws/aws-sdk-go-v2/service/signin v1.0.4 h1:HpI7aMmJ+mm1wkSHIA2t5EaFFv5EFYXePW30p1EIrbQ=
+github.com/aws/aws-sdk-go-v2/service/signin v1.0.4/go.mod h1:C5RdGMYGlfM0gYq/tifqgn4EbyX99V15P2V3R+VHbQU=
+github.com/aws/aws-sdk-go-v2/service/sso v1.30.8 h1:aM/Q24rIlS3bRAhTyFurowU8A0SMyGDtEOY/l/s/1Uw=
+github.com/aws/aws-sdk-go-v2/service/sso v1.30.8/go.mod h1:+fWt2UHSb4kS7Pu8y+BMBvJF0EWx+4H0hzNwtDNRTrg=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.12 h1:AHDr0DaHIAo8c9t1emrzAlVDFp+iMMKnPdYy6XO4MCE=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.12/go.mod h1:GQ73XawFFiWxyWXMHWfhiomvP3tXtdNar/fi8z18sx0=
+github.com/aws/aws-sdk-go-v2/service/sts v1.41.5 h1:SciGFVNZ4mHdm7gpD1dgZYnCuVdX1s+lFTg4+4DOy70=
+github.com/aws/aws-sdk-go-v2/service/sts v1.41.5/go.mod h1:iW40X4QBmUxdP+fZNOpfmkdMZqsovezbAeO+Ubiv2pk=
+github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk=
+github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
@@ -95,8 +95,8 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
-github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 h1:aQ3y1lwWyqYPiWZThqv1aFbZMiM9vblcSArJRf2Irls=
-github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
+github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f h1:Y8xYupdHxryycyPlc9Y+bSQAYZnetRJ70VMVKm5CKI0=
+github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f/go.mod h1:HlzOvOjVBOfTGSRXRyY0OiCS/3J1akRGQQpRO/7zyF4=
github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
@@ -112,12 +112,10 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
-github.com/digitalocean/godo v1.168.0 h1:mlORtUcPD91LQeJoznrH3XvfvgK3t8Wvrpph9giUT/Q=
-github.com/digitalocean/godo v1.168.0/go.mod h1:xQsWpVCCbkDrWisHA72hPzPlnC+4W5w/McZY5ij9uvU=
+github.com/digitalocean/godo v1.171.0 h1:QwpkwWKr3v7yxc8D4NQG973NoR9APCEWjYnLOQeXVpQ=
+github.com/digitalocean/godo v1.171.0/go.mod h1:xQsWpVCCbkDrWisHA72hPzPlnC+4W5w/McZY5ij9uvU=
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
-github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
-github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM=
github.com/docker/docker v28.5.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
@@ -128,10 +126,10 @@ github.com/edsrzf/mmap-go v1.2.0 h1:hXLYlkbaPzt1SaQk+anYwKSRNhufIDCchSPkUD6dD84=
github.com/edsrzf/mmap-go v1.2.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q=
github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
-github.com/envoyproxy/go-control-plane/envoy v1.35.0 h1:ixjkELDE+ru6idPxcHLj8LBVc2bFP7iBytj353BoHUo=
-github.com/envoyproxy/go-control-plane/envoy v1.35.0/go.mod h1:09qwbGVuSWWAyN5t/b3iyVfz5+z8QWGrzkoqm/8SbEs=
-github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8=
-github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU=
+github.com/envoyproxy/go-control-plane/envoy v1.36.0 h1:yg/JjO5E7ubRyKX3m07GF3reDNEnfOboJ0QySbH736g=
+github.com/envoyproxy/go-control-plane/envoy v1.36.0/go.mod h1:ty89S1YCCVruQAm9OtKeEkQLTb+Lkz0k8v9W0Oxsv98=
+github.com/envoyproxy/protoc-gen-validate v1.3.0 h1:TvGH1wof4H33rezVKWSpqKz5NXWg5VPuZ0uONDT6eb4=
+github.com/envoyproxy/protoc-gen-validate v1.3.0/go.mod h1:HvYl7zwPa5mffgyeTUHA9zHIH36nmrm7oCbo4YKoSWA=
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM=
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
@@ -155,26 +153,54 @@ github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
-github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU=
-github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo=
-github.com/go-openapi/errors v0.22.3 h1:k6Hxa5Jg1TUyZnOwV2Lh81j8ayNw5VVYLvKrp4zFKFs=
-github.com/go-openapi/errors v0.22.3/go.mod h1:+WvbaBBULWCOna//9B9TbLNGSFOfF8lY9dw4hGiEiKQ=
-github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
-github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
-github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
-github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
-github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco=
-github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs=
-github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY=
-github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk=
-github.com/go-openapi/strfmt v0.24.0 h1:dDsopqbI3wrrlIzeXRbqMihRNnjzGC+ez4NQaAAJLuc=
-github.com/go-openapi/strfmt v0.24.0/go.mod h1:Lnn1Bk9rZjXxU9VMADbEEOo7D7CDyKGLsSKekhFr7s4=
-github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
-github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
-github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58=
-github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ=
-github.com/go-resty/resty/v2 v2.16.5 h1:hBKqmWrr7uRc3euHVqmh1HTHcKn99Smr7o5spptdhTM=
-github.com/go-resty/resty/v2 v2.16.5/go.mod h1:hkJtXbA2iKHzJheXYvQ8snQES5ZLGKMwQ07xAwp/fiA=
+github.com/go-openapi/analysis v0.24.1 h1:Xp+7Yn/KOnVWYG8d+hPksOYnCYImE3TieBa7rBOesYM=
+github.com/go-openapi/analysis v0.24.1/go.mod h1:dU+qxX7QGU1rl7IYhBC8bIfmWQdX4Buoea4TGtxXY84=
+github.com/go-openapi/errors v0.22.4 h1:oi2K9mHTOb5DPW2Zjdzs/NIvwi2N3fARKaTJLdNabaM=
+github.com/go-openapi/errors v0.22.4/go.mod h1:z9S8ASTUqx7+CP1Q8dD8ewGH/1JWFFLX/2PmAYNQLgk=
+github.com/go-openapi/jsonpointer v0.22.1 h1:sHYI1He3b9NqJ4wXLoJDKmUmHkWy/L7rtEo92JUxBNk=
+github.com/go-openapi/jsonpointer v0.22.1/go.mod h1:pQT9OsLkfz1yWoMgYFy4x3U5GY5nUlsOn1qSBH5MkCM=
+github.com/go-openapi/jsonreference v0.21.3 h1:96Dn+MRPa0nYAR8DR1E03SblB5FJvh7W6krPI0Z7qMc=
+github.com/go-openapi/jsonreference v0.21.3/go.mod h1:RqkUP0MrLf37HqxZxrIAtTWW4ZJIK1VzduhXYBEeGc4=
+github.com/go-openapi/loads v0.23.2 h1:rJXAcP7g1+lWyBHC7iTY+WAF0rprtM+pm8Jxv1uQJp4=
+github.com/go-openapi/loads v0.23.2/go.mod h1:IEVw1GfRt/P2Pplkelxzj9BYFajiWOtY2nHZNj4UnWY=
+github.com/go-openapi/spec v0.22.1 h1:beZMa5AVQzRspNjvhe5aG1/XyBSMeX1eEOs7dMoXh/k=
+github.com/go-openapi/spec v0.22.1/go.mod h1:c7aeIQT175dVowfp7FeCvXXnjN/MrpaONStibD2WtDA=
+github.com/go-openapi/strfmt v0.25.0 h1:7R0RX7mbKLa9EYCTHRcCuIPcaqlyQiWNPTXwClK0saQ=
+github.com/go-openapi/strfmt v0.25.0/go.mod h1:nNXct7OzbwrMY9+5tLX4I21pzcmE6ccMGXl3jFdPfn8=
+github.com/go-openapi/swag v0.25.4 h1:OyUPUFYDPDBMkqyxOTkqDYFnrhuhi9NR6QVUvIochMU=
+github.com/go-openapi/swag v0.25.4/go.mod h1:zNfJ9WZABGHCFg2RnY0S4IOkAcVTzJ6z2Bi+Q4i6qFQ=
+github.com/go-openapi/swag/cmdutils v0.25.4 h1:8rYhB5n6WawR192/BfUu2iVlxqVR9aRgGJP6WaBoW+4=
+github.com/go-openapi/swag/cmdutils v0.25.4/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0=
+github.com/go-openapi/swag/conv v0.25.4 h1:/Dd7p0LZXczgUcC/Ikm1+YqVzkEeCc9LnOWjfkpkfe4=
+github.com/go-openapi/swag/conv v0.25.4/go.mod h1:3LXfie/lwoAv0NHoEuY1hjoFAYkvlqI/Bn5EQDD3PPU=
+github.com/go-openapi/swag/fileutils v0.25.4 h1:2oI0XNW5y6UWZTC7vAxC8hmsK/tOkWXHJQH4lKjqw+Y=
+github.com/go-openapi/swag/fileutils v0.25.4/go.mod h1:cdOT/PKbwcysVQ9Tpr0q20lQKH7MGhOEb6EwmHOirUk=
+github.com/go-openapi/swag/jsonname v0.25.4 h1:bZH0+MsS03MbnwBXYhuTttMOqk+5KcQ9869Vye1bNHI=
+github.com/go-openapi/swag/jsonname v0.25.4/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag=
+github.com/go-openapi/swag/jsonutils v0.25.4 h1:VSchfbGhD4UTf4vCdR2F4TLBdLwHyUDTd1/q4i+jGZA=
+github.com/go-openapi/swag/jsonutils v0.25.4/go.mod h1:7OYGXpvVFPn4PpaSdPHJBtF0iGnbEaTk8AvBkoWnaAY=
+github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4 h1:IACsSvBhiNJwlDix7wq39SS2Fh7lUOCJRmx/4SN4sVo=
+github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4/go.mod h1:Mt0Ost9l3cUzVv4OEZG+WSeoHwjWLnarzMePNDAOBiM=
+github.com/go-openapi/swag/loading v0.25.4 h1:jN4MvLj0X6yhCDduRsxDDw1aHe+ZWoLjW+9ZQWIKn2s=
+github.com/go-openapi/swag/loading v0.25.4/go.mod h1:rpUM1ZiyEP9+mNLIQUdMiD7dCETXvkkC30z53i+ftTE=
+github.com/go-openapi/swag/mangling v0.25.4 h1:2b9kBJk9JvPgxr36V23FxJLdwBrpijI26Bx5JH4Hp48=
+github.com/go-openapi/swag/mangling v0.25.4/go.mod h1:6dxwu6QyORHpIIApsdZgb6wBk/DPU15MdyYj/ikn0Hg=
+github.com/go-openapi/swag/netutils v0.25.4 h1:Gqe6K71bGRb3ZQLusdI8p/y1KLgV4M/k+/HzVSqT8H0=
+github.com/go-openapi/swag/netutils v0.25.4/go.mod h1:m2W8dtdaoX7oj9rEttLyTeEFFEBvnAx9qHd5nJEBzYg=
+github.com/go-openapi/swag/stringutils v0.25.4 h1:O6dU1Rd8bej4HPA3/CLPciNBBDwZj9HiEpdVsb8B5A8=
+github.com/go-openapi/swag/stringutils v0.25.4/go.mod h1:GTsRvhJW5xM5gkgiFe0fV3PUlFm0dr8vki6/VSRaZK0=
+github.com/go-openapi/swag/typeutils v0.25.4 h1:1/fbZOUN472NTc39zpa+YGHn3jzHWhv42wAJSN91wRw=
+github.com/go-openapi/swag/typeutils v0.25.4/go.mod h1:Ou7g//Wx8tTLS9vG0UmzfCsjZjKhpjxayRKTHXf2pTE=
+github.com/go-openapi/swag/yamlutils v0.25.4 h1:6jdaeSItEUb7ioS9lFoCZ65Cne1/RZtPBZ9A56h92Sw=
+github.com/go-openapi/swag/yamlutils v0.25.4/go.mod h1:MNzq1ulQu+yd8Kl7wPOut/YHAAU/H6hL91fF+E2RFwc=
+github.com/go-openapi/testify/enable/yaml/v2 v2.0.2 h1:0+Y41Pz1NkbTHz8NngxTuAXxEodtNSI1WG1c/m5Akw4=
+github.com/go-openapi/testify/enable/yaml/v2 v2.0.2/go.mod h1:kme83333GCtJQHXQ8UKX3IBZu6z8T5Dvy5+CW3NLUUg=
+github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls=
+github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54=
+github.com/go-openapi/validate v0.25.1 h1:sSACUI6Jcnbo5IWqbYHgjibrhhmt3vR6lCzKZnmAgBw=
+github.com/go-openapi/validate v0.25.1/go.mod h1:RMVyVFYte0gbSTaZ0N4KmTn6u/kClvAFp+mAVfS/DQc=
+github.com/go-resty/resty/v2 v2.17.1 h1:x3aMpHK1YM9e4va/TMDRlusDDoZiQ+ViDu/WpA6xTM4=
+github.com/go-resty/resty/v2 v2.17.1/go.mod h1:kCKZ3wWmwJaNc7S29BRtUhJwy7iqmn+2mLtQrOyQlVA=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
@@ -210,26 +236,26 @@ github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/pprof v0.0.0-20250923004556-9e5a51aed1e8 h1:ZI8gCoCjGzPsum4L21jHdQs8shFBIQih1TM9Rd/c+EQ=
-github.com/google/pprof v0.0.0-20250923004556-9e5a51aed1e8/go.mod h1:I6V7YzU0XDpsHqbsyrghnFZLO1gwK6NPTNvmetQIk9U=
+github.com/google/pprof v0.0.0-20251213031049-b05bdaca462f h1:HU1RgM6NALf/KW9HEY6zry3ADbDKcmpQ+hJedoNGQYQ=
+github.com/google/pprof v0.0.0-20251213031049-b05bdaca462f/go.mod h1:67FPmZWbr+KDT/VlpWtw6sO9XSjpJmLuHpoLmWiTGgY=
github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0=
github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4=
-github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA=
+github.com/googleapis/enterprise-certificate-proxy v0.3.7 h1:zrn2Ee/nWmHulBx5sAVrGgAa0f2/R35S4DJwfFaUPFQ=
+github.com/googleapis/enterprise-certificate-proxy v0.3.7/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA=
github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo=
github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc=
-github.com/gophercloud/gophercloud/v2 v2.8.0 h1:of2+8tT6+FbEYHfYC8GBu8TXJNsXYSNm9KuvpX7Neqo=
-github.com/gophercloud/gophercloud/v2 v2.8.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk=
+github.com/gophercloud/gophercloud/v2 v2.9.0 h1:Y9OMrwKF9EDERcHFSOTpf/6XGoAI0yOxmsLmQki4LPM=
+github.com/gophercloud/gophercloud/v2 v2.9.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk=
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853 h1:cLN4IBkmkYZNnk7EAJ0BHIethd+J6LqxFNw5mSiI2bM=
github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs=
-github.com/hashicorp/consul/api v1.32.0 h1:5wp5u780Gri7c4OedGEPzmlUEzi0g2KyiPphSr6zjVg=
-github.com/hashicorp/consul/api v1.32.0/go.mod h1:Z8YgY0eVPukT/17ejW+l+C7zJmKwgPHtjU1q16v/Y40=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4=
+github.com/hashicorp/consul/api v1.32.1 h1:0+osr/3t/aZNAdJX558crU3PEjVrG4x6715aZHRgceE=
+github.com/hashicorp/consul/api v1.32.1/go.mod h1:mXUWLnxftwTmDv4W3lzxYCPD199iNLLUyLfLGFJbtl4=
github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg=
github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s=
github.com/hashicorp/cronexpr v1.1.3 h1:rl5IkxXN2m681EfivTlccqIryzYJSXRGRNa0xeG7NA4=
@@ -245,6 +271,8 @@ github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVH
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-metrics v0.5.4 h1:8mmPiIJkTPPEbAiV97IxdAGNdRdaWwVap1BU6elejKY=
+github.com/hashicorp/go-metrics v0.5.4/go.mod h1:CG5yz4NZ/AI/aQt9Ucm/vdBnbh7fvmv4lxZ350i+QQI=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI=
github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
@@ -267,28 +295,26 @@ github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
-github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/go-version v1.8.0 h1:KAkNb1HAiZd1ukkxDFGmokVZe1Xy9HG6NUp+bPle2i4=
+github.com/hashicorp/go-version v1.8.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4=
github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc=
github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0=
-github.com/hashicorp/memberlist v0.5.1 h1:mk5dRuzeDNis2bi6LLoQIXfMH7JQvAzt3mQD0vNZZUo=
-github.com/hashicorp/memberlist v0.5.1/go.mod h1:zGDXV6AqbDTKTM6yxW0I4+JtFzZAJVoIPvss4hV8F24=
-github.com/hashicorp/nomad/api v0.0.0-20250930071859-eaa0fe0e27af h1:ScAYf8O+9xTqTJPZH8MIlUfO+ak8cb31rW1aYJgS+jE=
-github.com/hashicorp/nomad/api v0.0.0-20250930071859-eaa0fe0e27af/go.mod h1:sldFTIgs+FsUeKU3LwVjviAIuksxD8TzDOn02MYwslE=
+github.com/hashicorp/memberlist v0.5.3 h1:tQ1jOCypD0WvMemw/ZhhtH+PWpzcftQvgCorLu0hndk=
+github.com/hashicorp/memberlist v0.5.3/go.mod h1:h60o12SZn/ua/j0B6iKAZezA4eDaGsIuPO70eOaJ6WE=
+github.com/hashicorp/nomad/api v0.0.0-20260106084653-e8f2200c7039 h1:77URO0yPjlPjRc00KbjoBTG2dqHXFKA7Fv3s98w16kM=
+github.com/hashicorp/nomad/api v0.0.0-20260106084653-e8f2200c7039/go.mod h1:sldFTIgs+FsUeKU3LwVjviAIuksxD8TzDOn02MYwslE=
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
-github.com/hetznercloud/hcloud-go/v2 v2.29.0 h1:LzNFw5XLBfftyu3WM1sdSLjOZBlWORtz2hgGydHaYV8=
-github.com/hetznercloud/hcloud-go/v2 v2.29.0/go.mod h1:XBU4+EDH2KVqu2KU7Ws0+ciZcX4ygukQl/J0L5GS8P8=
-github.com/ionos-cloud/sdk-go/v6 v6.3.4 h1:jTvGl4LOF8v8OYoEIBNVwbFoqSGAFqn6vGE7sp7/BqQ=
-github.com/ionos-cloud/sdk-go/v6 v6.3.4/go.mod h1:wCVwNJ/21W29FWFUv+fNawOTMlFoP1dS3L+ZuztFW48=
+github.com/hetznercloud/hcloud-go/v2 v2.33.0 h1:g9hwuo60IXbupXJCYMlO4xDXgxxMPuFk31iOpLXDCV4=
+github.com/hetznercloud/hcloud-go/v2 v2.33.0/go.mod h1:GzYEl7slIGKc6Ttt08hjiJvGj8/PbWzcQf6IUi02dIs=
+github.com/ionos-cloud/sdk-go/v6 v6.3.6 h1:l/TtKgdQ1wUH3DDe2SfFD78AW+TJWdEbDpQhHkWd6CM=
+github.com/ionos-cloud/sdk-go/v6 v6.3.6/go.mod h1:nUGHP4kZHAZngCVr4v6C8nuargFrtvt7GrzH/hqn7c4=
github.com/jarcoal/httpmock v1.4.1 h1:0Ju+VCFuARfFlhVXFc2HxlcQkfB+Xq12/EotHko+x2A=
github.com/jarcoal/httpmock v1.4.1/go.mod h1:ftW1xULwo+j0R0JJkJIIi7UKigZUXCLLanykgjwBXL0=
-github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
-github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
@@ -302,8 +328,8 @@ github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRt
github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co=
-github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0=
+github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk=
+github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
github.com/knadh/koanf/maps v0.1.2 h1:RBfmAW5CnZT+PJ1CVc1QSJKf4Xu9kxfQgYVQSu8hpbo=
github.com/knadh/koanf/maps v0.1.2/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI=
github.com/knadh/koanf/providers/confmap v1.0.0 h1:mHKLJTE7iXEys6deO5p6olAiZdG5zwp8Aebir+/EaRE=
@@ -323,10 +349,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
-github.com/linode/linodego v1.60.0 h1:SgsebJFRCi+lSmYy+C40wmKZeJllGGm+W12Qw4+yVdI=
-github.com/linode/linodego v1.60.0/go.mod h1:1+Bt0oTz5rBnDOJbGhccxn7LYVytXTIIfAy7QYmijDs=
-github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
-github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
+github.com/linode/linodego v1.63.0 h1:MdjizfXNJDVJU6ggoJmMO5O9h4KGPGivNX0fzrAnstk=
+github.com/linode/linodego v1.63.0/go.mod h1:GoiwLVuLdBQcAebxAVKVL3mMYUgJZR/puOUSla04xBE=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
@@ -351,8 +375,8 @@ github.com/mdlayher/vsock v1.2.1 h1:pC1mTJTvjo1r9n9fbm7S1j04rCgCzhCOS5DY0zqHlnQ=
github.com/mdlayher/vsock v1.2.1/go.mod h1:NRfCibel++DgeMD8z/hP+PPTjlNJsdPOmxcnENvE+SE=
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
-github.com/miekg/dns v1.1.68 h1:jsSRkNozw7G/mnmXULynzMNIsgY2dHC8LO6U6Ij2JEA=
-github.com/miekg/dns v1.1.68/go.mod h1:fujopn7TB3Pu3JM69XaawiU0wqjpL9/8xGop5UrTPps=
+github.com/miekg/dns v1.1.69 h1:Kb7Y/1Jo+SG+a2GtfoFUfDkG//csdRPwRLkCsxDG9Sc=
+github.com/miekg/dns v1.1.69/go.mod h1:7OyjD9nEba5OkqQ/hB4fy3PIoxafSZJtducccIelz3g=
github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
@@ -398,12 +422,12 @@ github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM
github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.139.0 h1:D5aGQCErSCb4sKIHoZhgR4El6AzgviTRYlHUpbSFqDo=
-github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.139.0/go.mod h1:ZjeRsA5oaVk89fg5D+iXStx2QncmhAvtGbdSumT07H4=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.139.0 h1:6/j0Ta8ZJnmAFVEoC3aZ1Hs19RB4fHzlN6kOZhsBJqM=
-github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.139.0/go.mod h1:VfA8xHz4xg7Fyj5bBsCDbOO3iVYzDn9wP/QFsjcAE5c=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.139.0 h1:iRNX/ueuad1psOVgnNkxuQmXxvF3ze5ZZCP66xKFk/w=
-github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.139.0/go.mod h1:bW09lo3WgHsPsZ1mgsJvby9wCefT5o13patM5phdfIU=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.142.0 h1:agYk41V3eIfV6aIMxIeRQ7SFhfaW5k2O96HEebpmPwM=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.142.0/go.mod h1:ZmMdcBia20ih8NYia5b4dNhfNLT68xHgaqF+fNW+TLM=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.142.0 h1:bLp+Ii1UQ9cNr+Dm1jKzbcklhd0eBnPuIFQY6NPzkZ0=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.142.0/go.mod h1:6N36UrFd9Yiz2aYpXM5xiK7Eqp2RyAr3O8lUE+wK2Y8=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.142.0 h1:fL8LBVeje+nbts2VIInvRa4T5LlsC0BZCI60wNGoS+Y=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.142.0/go.mod h1:fSnKuTN91I68Ou1Lgfwe3Mt6BGl9kcA8PYCpnGkPnsY=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
@@ -431,15 +455,15 @@ github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndr
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
-github.com/prometheus/alertmanager v0.28.1 h1:BK5pCoAtaKg01BYRUJhEDV1tqJMEtYBGzPw8QdvnnvA=
-github.com/prometheus/alertmanager v0.28.1/go.mod h1:0StpPUDDHi1VXeM7p2yYfeZgLVi/PPlt39vo9LQUHxM=
+github.com/prometheus/alertmanager v0.30.0 h1:E4dnxSFXK8V2Bb8iqudlisTmaIrF3hRJSWnliG08tBM=
+github.com/prometheus/alertmanager v0.30.0/go.mod h1:93PBumcTLr/gNtNtM0m7BcCffbvYP5bKuLBWiOnISaA=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
-github.com/prometheus/client_golang/exp v0.0.0-20250914183048-a974e0d45e0a h1:RF1vfKM34/3DbGNis22BGd6sDDY3XBi0eM7pYqmOEO0=
-github.com/prometheus/client_golang/exp v0.0.0-20250914183048-a974e0d45e0a/go.mod h1:FGJuwvfcPY0V5enm+w8zF1RNS062yugQtPPQp1c4Io4=
+github.com/prometheus/client_golang/exp v0.0.0-20260101091701-2cd067eb23c9 h1:al1B/YzHmaXhacIFkrZSDSUpnPHV4ZPMfENQpvk3PZQ=
+github.com/prometheus/client_golang/exp v0.0.0-20260101091701-2cd067eb23c9/go.mod h1:PmAYDB13uBFBG9qE1qxZZgZWhg7Rg6SfKM5DMK7hjyI=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@@ -447,8 +471,8 @@ github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNw
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
-github.com/prometheus/common v0.67.2 h1:PcBAckGFTIHt2+L3I33uNRTlKTplNzFctXcWhPyAEN8=
-github.com/prometheus/common v0.67.2/go.mod h1:63W3KZb1JOKgcjlIr64WW/LvFGAqKPj0atm+knVGEko=
+github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc=
+github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI=
github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM=
github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI=
github.com/prometheus/exporter-toolkit v0.15.0 h1:Pcle5sSViwR1x0gdPd0wtYrPQENBieQAM7TmT0qtb2U=
@@ -464,11 +488,11 @@ github.com/prometheus/sigv4 v0.3.0 h1:QIG7nTbu0JTnNidGI1Uwl5AGVIChWUACxn2B/BQ1km
github.com/prometheus/sigv4 v0.3.0/go.mod h1:fKtFYDus2M43CWKMNtGvFNHGXnAJJEGZbiYCmVp/F8I=
github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
-github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
-github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
+github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
+github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
-github.com/scaleway/scaleway-sdk-go v1.0.0-beta.35 h1:8xfn1RzeI9yoCUuEwDy08F+No6PcKZGEDOQ6hrRyLts=
-github.com/scaleway/scaleway-sdk-go v1.0.0-beta.35/go.mod h1:47B1d/YXmSAxlJxUJxClzHR6b3T4M1WyCvwENPQNBWc=
+github.com/scaleway/scaleway-sdk-go v1.0.0-beta.36 h1:ObX9hZmK+VmijreZO/8x9pQ8/P/ToHD/bdSb4Eg4tUo=
+github.com/scaleway/scaleway-sdk-go v1.0.0-beta.36/go.mod h1:LEsDu4BubxK7/cWhtlQWfuxwL4rf/2UEpxXz1o1EMtM=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/shoenig/test v1.12.2 h1:ZVT8NeIUwGWpZcKaepPmFMoNQ3sVpxvqUh/MAqwFiJI=
@@ -482,8 +506,8 @@ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVs
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/stackitcloud/stackit-sdk-go/core v0.17.3 h1:GsZGmRRc/3GJLmCUnsZswirr5wfLRrwavbnL/renOqg=
-github.com/stackitcloud/stackit-sdk-go/core v0.17.3/go.mod h1:HBCXJGPgdRulplDzhrmwC+Dak9B/x0nzNtmOpu+1Ahg=
+github.com/stackitcloud/stackit-sdk-go/core v0.20.1 h1:odiuhhRXmxvEvnVTeZSN9u98edvw2Cd3DcnkepncP3M=
+github.com/stackitcloud/stackit-sdk-go/core v0.20.1/go.mod h1:fqto7M82ynGhEnpZU6VkQKYWYoFG5goC076JWXTUPRQ=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
@@ -509,72 +533,74 @@ github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8
github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-go.mongodb.org/mongo-driver v1.17.4 h1:jUorfmVzljjr0FLzYQsGP8cgN/qzzxlY9Vh0C9KFXVw=
-go.mongodb.org/mongo-driver v1.17.4/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
-go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
-go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
-go.opentelemetry.io/collector/component v1.45.0 h1:gGFfVdbQ+1YuyUkJjWo85I7euu3H/CiupuzCHv8OgHA=
-go.opentelemetry.io/collector/component v1.45.0/go.mod h1:xoNFnRKE8Iv6gmlqAKgjayWraRnDcYLLgrPt9VgyO2g=
-go.opentelemetry.io/collector/component/componentstatus v0.139.0 h1:bQmkv1t7xW7uIDireE0a2Am4IMOprXm6zQr/qDtGCIA=
-go.opentelemetry.io/collector/component/componentstatus v0.139.0/go.mod h1:ibZOohpG0u081/NaT/jMCTsKwRbbwwxWrjZml+owpyM=
-go.opentelemetry.io/collector/component/componenttest v0.139.0 h1:x9Yu2eYhrHxdZ7sFXWtAWVjQ3UIraje557LgNurDC2I=
-go.opentelemetry.io/collector/component/componenttest v0.139.0/go.mod h1:S9cj+qkf9FgHMzjvlYsLwQKd9BiS7B7oLZvxvlENM/c=
-go.opentelemetry.io/collector/confmap v1.45.0 h1:7M7TTlpzX4r+mIzP/ARdxZBAvI4N+1V96phDane+akU=
-go.opentelemetry.io/collector/confmap v1.45.0/go.mod h1:AE1dnkjv0T9gptsh5+mTX0XFGdXx0n7JS4b7CcPfJ6Q=
-go.opentelemetry.io/collector/confmap/xconfmap v0.139.0 h1:uQGpFuWnTCXqdMbI3gDSvkwU66/kF/aoC0kVMrit1EM=
-go.opentelemetry.io/collector/confmap/xconfmap v0.139.0/go.mod h1:d0ucaeNq2rojFRSQsCHF/gkT3cgBx5H2bVkPQMj57ck=
-go.opentelemetry.io/collector/consumer v1.45.0 h1:TtqXxgW+1GSCwdoohq0fzqnfqrZBKbfo++1XRj8mrEA=
-go.opentelemetry.io/collector/consumer v1.45.0/go.mod h1:pJzqTWBubwLt8mVou+G4/Hs23b3m425rVmld3LqOYpY=
-go.opentelemetry.io/collector/consumer/consumertest v0.139.0 h1:06mu43mMO7l49ASJ/GEbKgTWcV3py5zE/pKhNBZ1b3k=
-go.opentelemetry.io/collector/consumer/consumertest v0.139.0/go.mod h1:gaeCpRQGbCFYTeLzi+Z2cTDt40GiIa3hgIEgLEmiC78=
-go.opentelemetry.io/collector/consumer/xconsumer v0.139.0 h1:FhzDv+idglnrfjqPvnUw3YAEOkXSNv/FuNsuMiXQwcY=
-go.opentelemetry.io/collector/consumer/xconsumer v0.139.0/go.mod h1:yWrg/6FE/A4Q7eo/Mg++CzkBoSILHdeMnTlxV3serI0=
-go.opentelemetry.io/collector/featuregate v1.45.0 h1:D06hpf1F2KzKC+qXLmVv5e8IZpgCyZVeVVC8iOQxVmw=
-go.opentelemetry.io/collector/featuregate v1.45.0/go.mod h1:d0tiRzVYrytB6LkcYgz2ESFTv7OktRPQe0QEQcPt1L4=
-go.opentelemetry.io/collector/pdata v1.45.0 h1:q4XaISpeX640BcwXwb2mKOVw/gb67r22HjGWl8sbWsk=
-go.opentelemetry.io/collector/pdata v1.45.0/go.mod h1:5q2f001YhwMQO8QvpFhCOa4Cq/vtwX9W4HRMsXkU/nE=
-go.opentelemetry.io/collector/pdata/pprofile v0.139.0 h1:UA5TgFzYmRuJN3Wz0GR1efLUfjbs5rH0HTaxfASpTR8=
-go.opentelemetry.io/collector/pdata/pprofile v0.139.0/go.mod h1:sI5qHt+zzE2fhOWFdJIaiDBR0yGGjD4A4ZvDFU0tiHk=
-go.opentelemetry.io/collector/pdata/testdata v0.139.0 h1:n7O5bmLLhc3T6PePV4447fFcI/6QWcMhBsLtfCaD0do=
-go.opentelemetry.io/collector/pdata/testdata v0.139.0/go.mod h1:fxZ2VrhYLYBLHYBHC1XQRKZ6IJXwy0I2rPaaRlebYaY=
-go.opentelemetry.io/collector/pipeline v1.45.0 h1:sn9JJAEBe3XABTkWechMk0eH60QMBjjNe5V+ccBl+Uo=
-go.opentelemetry.io/collector/pipeline v1.45.0/go.mod h1:xUrAqiebzYbrgxyoXSkk6/Y3oi5Sy3im2iCA51LwUAI=
-go.opentelemetry.io/collector/processor v1.45.0 h1:GH5km9BkDQOoz7MR0jzTnzB1Kb5vtKzPwa/wDmRg2dQ=
-go.opentelemetry.io/collector/processor v1.45.0/go.mod h1:wdlaTTC3wqlZIJP9R9/SLc2q7h+MFGARsxfjgPtwbes=
-go.opentelemetry.io/collector/processor/processortest v0.139.0 h1:30akUdruFNG7EDpayuBhXoX2lV+hcfxW9Gl3Z6MYHb0=
-go.opentelemetry.io/collector/processor/processortest v0.139.0/go.mod h1:RTll3UKHrqj/VS6RGjTHtuGIJzyLEwFhbw8KuCL3pjo=
-go.opentelemetry.io/collector/processor/xprocessor v0.139.0 h1:O9x9RF/OG8gZ+HrOcB4f6F1fjniby484xf2D8GBxgqU=
-go.opentelemetry.io/collector/processor/xprocessor v0.139.0/go.mod h1:hqGhEZ1/PftD/QHaYna0o1xAqZUsb7GhqpOiaTTDJnQ=
+go.mongodb.org/mongo-driver v1.17.6 h1:87JUG1wZfWsr6rIz3ZmpH90rL5tea7O3IHuSwHUpsss=
+go.mongodb.org/mongo-driver v1.17.6/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
+go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
+go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
+go.opentelemetry.io/collector/component v1.48.0 h1:0hZKOvT6fIlXoE+6t40UXbXOH7r/h9jyE3eIt0W19Qg=
+go.opentelemetry.io/collector/component v1.48.0/go.mod h1:Kmc9Z2CT53M2oRRf+WXHUHHgjCC+ADbiqfPO5mgZe3g=
+go.opentelemetry.io/collector/component/componentstatus v0.142.0 h1:a1KkLCtShI5SfhO2ga75VqWjjBRGgrerelt/2JXWLBI=
+go.opentelemetry.io/collector/component/componentstatus v0.142.0/go.mod h1:IRWKvFcUrFrkz1gJEV+cKAdE2ZBT128gk1sHt0OzKI4=
+go.opentelemetry.io/collector/component/componenttest v0.142.0 h1:a8XclEutO5dv4AnzThHK8dfqR4lDWjJKLtRNM2aVUFM=
+go.opentelemetry.io/collector/component/componenttest v0.142.0/go.mod h1:JhX/zKaEbjhFcsiV2ha2spzo24A6RL/jqNBS0svURD0=
+go.opentelemetry.io/collector/confmap v1.48.0 h1:vGhg25NEUX5DiYziJEw2siwdzsvtXBRZVuYyLVinFR8=
+go.opentelemetry.io/collector/confmap v1.48.0/go.mod h1:8tJHJowmvUkJ8AHzZ6SaH61dcWbdfRE9Sd/hwsKLgRE=
+go.opentelemetry.io/collector/confmap/xconfmap v0.142.0 h1:SNfuFP8TA0PmUkx6ryY63uNjLN2HMh5VeGO++IYdPgA=
+go.opentelemetry.io/collector/confmap/xconfmap v0.142.0/go.mod h1:FXuX6B8b7Ub7qkLqloWKanmPhADL18EEkaFptcd4eDQ=
+go.opentelemetry.io/collector/consumer v1.48.0 h1:g1uroz2AA0cqnEsjqFTSZG+y8uH1gQBqqyzk8kd3QiM=
+go.opentelemetry.io/collector/consumer v1.48.0/go.mod h1:lC6PnVXBwI456SV5WtvJqE7vjCNN6DAUc8xjFQ9wUV4=
+go.opentelemetry.io/collector/consumer/consumertest v0.142.0 h1:TRt8zR57Vk1PTjtqjHOwOAMbIl+IeloHxWAuF8sWdRw=
+go.opentelemetry.io/collector/consumer/consumertest v0.142.0/go.mod h1:yq2dhMxFUlCFkRN7LES3fzsTmUDw9VaunyRAka2TEaY=
+go.opentelemetry.io/collector/consumer/xconsumer v0.142.0 h1:qOoQnLZXQ9sRLexTkkmBx3qfaOmEgco9VBPmryg5UhA=
+go.opentelemetry.io/collector/consumer/xconsumer v0.142.0/go.mod h1:oPN0yJzEpovwlWvmSaiYgtDqGuOmMMLmmg352sqZdsE=
+go.opentelemetry.io/collector/featuregate v1.48.0 h1:jiGRcl93yzUFgZVDuskMAftFraE21jANdxXTQfSQScc=
+go.opentelemetry.io/collector/featuregate v1.48.0/go.mod h1:/1bclXgP91pISaEeNulRxzzmzMTm4I5Xih2SnI4HRSo=
+go.opentelemetry.io/collector/internal/testutil v0.142.0 h1:MHnAVRimQdsfYqYHC3YuJRkIUap4VmSpJkkIT2N7jJA=
+go.opentelemetry.io/collector/internal/testutil v0.142.0/go.mod h1:YAD9EAkwh/l5asZNbEBEUCqEjoL1OKMjAMoPjPqH76c=
+go.opentelemetry.io/collector/pdata v1.48.0 h1:CKZ+9v/lGTX/cTGx2XVp8kp0E8R//60kHFCBdZudrTg=
+go.opentelemetry.io/collector/pdata v1.48.0/go.mod h1:jaf2JQGpfUreD1TOtGBPsq00ecOqM66NG15wALmdxKA=
+go.opentelemetry.io/collector/pdata/pprofile v0.142.0 h1:Ivyw7WY8SIIWqzXsnNmjEgz3ysVs/OkIf0KIpJUnuuo=
+go.opentelemetry.io/collector/pdata/pprofile v0.142.0/go.mod h1:94GAph54K4WDpYz9xirhroHB3ptNLuPiY02k8fyoNUI=
+go.opentelemetry.io/collector/pdata/testdata v0.142.0 h1:+jf9RyLWl8WyhIVjpg7yuH+bRdQH4mW20cPtCMlY1cI=
+go.opentelemetry.io/collector/pdata/testdata v0.142.0/go.mod h1:kgAu5ZLEcVuPH3RFiHDg23RGitgm1M0cUAVwiGX4SB8=
+go.opentelemetry.io/collector/pipeline v1.48.0 h1:E4zyQ7+4FTGvdGS4pruUnItuyRTGhN0Qqk1CN71lfW0=
+go.opentelemetry.io/collector/pipeline v1.48.0/go.mod h1:xUrAqiebzYbrgxyoXSkk6/Y3oi5Sy3im2iCA51LwUAI=
+go.opentelemetry.io/collector/processor v1.48.0 h1:3Kttw79mnrf463QKJGoGZzFfiNzQuMWK0p2nHuvOhaQ=
+go.opentelemetry.io/collector/processor v1.48.0/go.mod h1:A3OsW6ga+a48J1mrnVNH5L5kB0v+n9nVFlmOQB5/Jwk=
+go.opentelemetry.io/collector/processor/processortest v0.142.0 h1:wQnJeXDejBL6r8ov66AYAGf8Q0/JspjuqAjPVBdCUoI=
+go.opentelemetry.io/collector/processor/processortest v0.142.0/go.mod h1:QU5SWj0L+92MSvQxZDjwWCsKssNDm+nD6SHn7IvviUE=
+go.opentelemetry.io/collector/processor/xprocessor v0.142.0 h1:7a1Crxrd5iBMVnebTxkcqxVkRHAlOBUUmNTUVUTnlCU=
+go.opentelemetry.io/collector/processor/xprocessor v0.142.0/go.mod h1:LY/GS2DiJILJKS3ynU3eOLLWSP8CmN1FtdpAMsVV8AU=
go.opentelemetry.io/collector/semconv v0.128.0 h1:MzYOz7Vgb3Kf5D7b49pqqgeUhEmOCuT10bIXb/Cc+k4=
go.opentelemetry.io/collector/semconv v0.128.0/go.mod h1:OPXer4l43X23cnjLXIZnRj/qQOjSuq4TgBLI76P9hns=
-go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.63.0 h1:2pn7OzMewmYRiNtv1doZnLo3gONcnMHlFnmOR8Vgt+8=
-go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.63.0/go.mod h1:rjbQTDEPQymPE0YnRQp9/NuPwwtL0sesz/fnqRW/v84=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg=
-go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
-go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 h1:aTL7F04bJHUlztTsNGJ2l+6he8c+y/b//eR0jjjemT4=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0/go.mod h1:kldtb7jDTeol0l3ewcmd8SDvx3EmIE7lyvqbasU3QC4=
-go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
-go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
-go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=
-go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg=
-go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM=
-go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA=
-go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
-go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
-go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4=
-go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE=
-go.opentelemetry.io/proto/slim/otlp v1.8.0 h1:afcLwp2XOeCbGrjufT1qWyruFt+6C9g5SOuymrSPUXQ=
-go.opentelemetry.io/proto/slim/otlp v1.8.0/go.mod h1:Yaa5fjYm1SMCq0hG0x/87wV1MP9H5xDuG/1+AhvBcsI=
-go.opentelemetry.io/proto/slim/otlp/collector/profiles/v1development v0.1.0 h1:Uc+elixz922LHx5colXGi1ORbsW8DTIGM+gg+D9V7HE=
-go.opentelemetry.io/proto/slim/otlp/collector/profiles/v1development v0.1.0/go.mod h1:VyU6dTWBWv6h9w/+DYgSZAPMabWbPTFTuxp25sM8+s0=
-go.opentelemetry.io/proto/slim/otlp/profiles/v1development v0.1.0 h1:i8YpvWGm/Uq1koL//bnbJ/26eV3OrKWm09+rDYo7keU=
-go.opentelemetry.io/proto/slim/otlp/profiles/v1development v0.1.0/go.mod h1:pQ70xHY/ZVxNUBPn+qUWPl8nwai87eWdqL3M37lNi9A=
+go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.64.0 h1:OXSUzgmIFkcC4An+mv+lqqZSndTffXpjAyoR+1f8k/A=
+go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.64.0/go.mod h1:1A4GVLFIm54HFqVdOpWmukap7rgb0frrE3zWXohLPdM=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 h1:ssfIgGNANqpVFCndZvcuyKbl0g+UAVcbBcqGkG28H0Y=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0/go.mod h1:GQ/474YrbE4Jx8gZ4q5I4hrhUzM6UPzyrqJYV2AqPoQ=
+go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48=
+go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0 h1:f0cb2XPmrqn4XMy9PNliTgRKJgS5WcL/u0/WRYGz4t0=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0/go.mod h1:vnakAaFckOMiMtOIhFI2MNH4FYrZzXCYxmb1LlhoGz8=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.39.0 h1:in9O8ESIOlwJAEGTkkf34DesGRAc/Pn8qJ7k3r/42LM=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.39.0/go.mod h1:Rp0EXBm5tfnv0WL+ARyO/PHBEaEAT8UUHQ6AGJcSq6c=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.39.0 h1:Ckwye2FpXkYgiHX7fyVrN1uA/UYd9ounqqTuSNAv0k4=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.39.0/go.mod h1:teIFJh5pW2y+AN7riv6IBPX2DuesS3HgP39mwOspKwU=
+go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0=
+go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs=
+go.opentelemetry.io/otel/sdk v1.39.0 h1:nMLYcjVsvdui1B/4FRkwjzoRVsMK8uL/cj0OyhKzt18=
+go.opentelemetry.io/otel/sdk v1.39.0/go.mod h1:vDojkC4/jsTJsE+kh+LXYQlbL8CgrEcwmt1ENZszdJE=
+go.opentelemetry.io/otel/sdk/metric v1.39.0 h1:cXMVVFVgsIf2YL6QkRF4Urbr/aMInf+2WKg+sEJTtB8=
+go.opentelemetry.io/otel/sdk/metric v1.39.0/go.mod h1:xq9HEVH7qeX69/JnwEfp6fVq5wosJsY1mt4lLfYdVew=
+go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI=
+go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA=
+go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A=
+go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4=
+go.opentelemetry.io/proto/slim/otlp v1.9.0 h1:fPVMv8tP3TrsqlkH1HWYUpbCY9cAIemx184VGkS6vlE=
+go.opentelemetry.io/proto/slim/otlp v1.9.0/go.mod h1:xXdeJJ90Gqyll+orzUkY4bOd2HECo5JofeoLpymVqdI=
+go.opentelemetry.io/proto/slim/otlp/collector/profiles/v1development v0.2.0 h1:o13nadWDNkH/quoDomDUClnQBpdQQ2Qqv0lQBjIXjE8=
+go.opentelemetry.io/proto/slim/otlp/collector/profiles/v1development v0.2.0/go.mod h1:Gyb6Xe7FTi/6xBHwMmngGoHqL0w29Y4eW8TGFzpefGA=
+go.opentelemetry.io/proto/slim/otlp/profiles/v1development v0.2.0 h1:EiUYvtwu6PMrMHVjcPfnsG3v+ajPkbUeH+IL93+QYyk=
+go.opentelemetry.io/proto/slim/otlp/profiles/v1development v0.2.0/go.mod h1:mUUHKFiN2SST3AhJ8XhJxEoeVW12oqfXog0Bo8W3Ec4=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
@@ -583,25 +609,27 @@ go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
-go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
-go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
+go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc=
+go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
+go.yaml.in/yaml/v4 v4.0.0-rc.3 h1:3h1fjsh1CTAPjW7q/EMe+C8shx5d8ctzZTrLcs/j8Go=
+go.yaml.in/yaml/v4 v4.0.0-rc.3/go.mod h1:aZqd9kCMsGL7AuUv/m/PvWLdg5sjJsZ4oHDEnfPPfY0=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04=
-golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0=
+golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU=
+golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0=
golang.org/x/exp v0.0.0-20250808145144-a408d31f581a h1:Y+7uR/b1Mw2iSXZ3G//1haIiSElDQZ8KWh0h+sZPG90=
golang.org/x/exp v0.0.0-20250808145144-a408d31f581a/go.mod h1:rT6SFzZ7oxADUDx58pcaKFTcZ+inxAa9fTrYx/uVYwg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U=
-golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI=
+golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk=
+golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@@ -612,18 +640,18 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
-golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4=
-golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210=
-golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY=
-golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
+golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU=
+golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=
+golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=
+golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
-golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
+golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
+golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -649,27 +677,27 @@ golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
-golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
+golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
+golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q=
-golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss=
+golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q=
+golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
-golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
-golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI=
-golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
+golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
+golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
+golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
+golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE=
-golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w=
+golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ=
+golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
golang.org/x/tools/godoc v0.1.0-deprecated h1:o+aZ1BOj6Hsx/GBdJO/s815sqftjSnrZZwyYTHODvtk=
golang.org/x/tools/godoc v0.1.0-deprecated/go.mod h1:qM63CriJ961IHWmnWa9CjZnBndniPt4a3CK0PVB9bIg=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -678,23 +706,25 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
-google.golang.org/api v0.252.0 h1:xfKJeAJaMwb8OC9fesr369rjciQ704AjU/psjkKURSI=
-google.golang.org/api v0.252.0/go.mod h1:dnHOv81x5RAmumZ7BWLShB/u7JZNeyalImxHmtTHxqw=
+google.golang.org/api v0.258.0 h1:IKo1j5FBlN74fe5isA2PVozN3Y5pwNKriEgAXPOkDAc=
+google.golang.org/api v0.258.0/go.mod h1:qhOMTQEZ6lUps63ZNq9jhODswwjkjYYguA7fA3TBFww=
google.golang.org/genproto v0.0.0-20250603155806-513f23925822 h1:rHWScKit0gvAPuOnu87KpaYtjK5zBMLcULh7gxkCXu4=
google.golang.org/genproto v0.0.0-20250603155806-513f23925822/go.mod h1:HubltRL7rMh0LfnQPkMH4NPDFEWp0jw3vixw7jEM53s=
-google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4 h1:8XJ4pajGwOlasW+L13MnEGA8W4115jJySQtVfS2/IBU=
-google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4/go.mod h1:NnuHhy+bxcg30o7FnVAZbXsPHUDQ9qKWAQKCD7VxFtk=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20251002232023-7c0ddcbb5797 h1:CirRxTOwnRWVLKzDNrs0CXAaVozJoR4G9xvdRecrdpk=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20251002232023-7c0ddcbb5797/go.mod h1:HSkG/KdJWusxU1F6CNrwNDjBMgisKxGnc5dAZfT0mjQ=
-google.golang.org/grpc v1.76.0 h1:UnVkv1+uMLYXoIz6o7chp59WfQUYA2ex/BXQ9rHZu7A=
-google.golang.org/grpc v1.76.0/go.mod h1:Ju12QI8M6iQJtbcsV+awF5a4hfJMLi4X0JLo94ULZ6c=
-google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
-google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
+google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b h1:uA40e2M6fYRBf0+8uN5mLlqUtV192iiksiICIBkYJ1E=
+google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b/go.mod h1:Xa7le7qx2vmqB/SzWUBa7KdMjpdpAHlh5QCSnjessQk=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20251213004720-97cd9d5aeac2 h1:2I6GHUeJ/4shcDpoUlLs/2WPnhg7yJwvXtqcMJt9liA=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20251213004720-97cd9d5aeac2/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk=
+google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc=
+google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U=
+google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
+google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/dnaeon/go-vcr.v4 v4.0.6 h1:PiJkrakkmzc5s7EfBnZOnyiLwi7o7A9fwPzN0X2uwe0=
+gopkg.in/dnaeon/go-vcr.v4 v4.0.6/go.mod h1:sbq5oMEcM4PXngbcNbHhzfCP9OdZodLhrbRYoyg09HY=
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
@@ -713,12 +743,12 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
-k8s.io/api v0.34.1 h1:jC+153630BMdlFukegoEL8E/yT7aLyQkIVuwhmwDgJM=
-k8s.io/api v0.34.1/go.mod h1:SB80FxFtXn5/gwzCoN6QCtPD7Vbu5w2n1S0J5gFfTYk=
-k8s.io/apimachinery v0.34.1 h1:dTlxFls/eikpJxmAC7MVE8oOeP1zryV7iRyIjB0gky4=
-k8s.io/apimachinery v0.34.1/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=
-k8s.io/client-go v0.34.1 h1:ZUPJKgXsnKwVwmKKdPfw4tB58+7/Ik3CrjOEhsiZ7mY=
-k8s.io/client-go v0.34.1/go.mod h1:kA8v0FP+tk6sZA0yKLRG67LWjqufAoSHA2xVGKw9Of8=
+k8s.io/api v0.34.3 h1:D12sTP257/jSH2vHV2EDYrb16bS7ULlHpdNdNhEw2S4=
+k8s.io/api v0.34.3/go.mod h1:PyVQBF886Q5RSQZOim7DybQjAbVs8g7gwJNhGtY5MBk=
+k8s.io/apimachinery v0.34.3 h1:/TB+SFEiQvN9HPldtlWOTp0hWbJ+fjU+wkxysf/aQnE=
+k8s.io/apimachinery v0.34.3/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=
+k8s.io/client-go v0.34.3 h1:wtYtpzy/OPNYf7WyNBTj3iUA0XaBHVqhv4Iv3tbrF5A=
+k8s.io/client-go v0.34.3/go.mod h1:OxxeYagaP9Kdf78UrKLa3YZixMCfP6bgPwPwNBQBzpM=
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
diff --git a/go.work b/go.work
new file mode 100644
index 0000000000..5ec4aeab50
--- /dev/null
+++ b/go.work
@@ -0,0 +1,8 @@
+go 1.24.9
+
+use (
+ .
+ ./documentation/examples/remote_storage
+ ./internal/tools
+ ./web/ui/mantine-ui/src/promql/tools
+)
diff --git a/internal/tools/go.mod b/internal/tools/go.mod
index a343a56834..a7a1ebec54 100644
--- a/internal/tools/go.mod
+++ b/internal/tools/go.mod
@@ -1,107 +1,112 @@
module github.com/prometheus/prometheus/internal/tools
-go 1.24.0
+go 1.24.9
require (
- github.com/bufbuild/buf v1.57.2
+ github.com/bufbuild/buf v1.62.1
github.com/daixiang0/gci v0.13.7
github.com/gogo/protobuf v1.3.2
- github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.4
)
require (
- buf.build/gen/go/bufbuild/bufplugin/protocolbuffers/go v1.36.9-20250718181942-e35f9b667443.1 // indirect
- buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.9-20250912141014-52f32327d4b0.1 // indirect
- buf.build/gen/go/bufbuild/registry/connectrpc/go v1.18.1-20250903170917-c4be0f57e197.1 // indirect
- buf.build/gen/go/bufbuild/registry/protocolbuffers/go v1.36.9-20250903170917-c4be0f57e197.1 // indirect
- buf.build/gen/go/pluginrpc/pluginrpc/protocolbuffers/go v1.36.8-20241007202033-cf42259fcbfc.1 // indirect
- buf.build/go/app v0.1.0 // indirect
+ buf.build/gen/go/bufbuild/bufplugin/protocolbuffers/go v1.36.11-20250718181942-e35f9b667443.1 // indirect
+ buf.build/gen/go/bufbuild/protodescriptor/protocolbuffers/go v1.36.11-20250109164928-1da0de137947.1 // indirect
+ buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.11-20251209175733-2a1774d88802.1 // indirect
+ buf.build/gen/go/bufbuild/registry/connectrpc/go v1.19.1-20251202164234-62b14f0b533c.2 // indirect
+ buf.build/gen/go/bufbuild/registry/protocolbuffers/go v1.36.11-20251202164234-62b14f0b533c.1 // indirect
+ buf.build/gen/go/pluginrpc/pluginrpc/protocolbuffers/go v1.36.11-20241007202033-cf42259fcbfc.1 // indirect
+ buf.build/go/app v0.2.0 // indirect
buf.build/go/bufplugin v0.9.0 // indirect
+ buf.build/go/bufprivateusage v0.1.0 // indirect
buf.build/go/interrupt v1.1.0 // indirect
- buf.build/go/protovalidate v1.0.0 // indirect
+ buf.build/go/protovalidate v1.1.0 // indirect
buf.build/go/protoyaml v0.6.0 // indirect
buf.build/go/spdx v0.2.0 // indirect
buf.build/go/standard v0.1.0 // indirect
- cel.dev/expr v0.24.0 // indirect
- connectrpc.com/connect v1.18.1 // indirect
+ cel.dev/expr v0.25.1 // indirect
+ connectrpc.com/connect v1.19.1 // indirect
connectrpc.com/otelconnect v0.8.0 // indirect
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/antlr4-go/antlr/v4 v4.13.1 // indirect
- github.com/bufbuild/protocompile v0.14.1 // indirect
+ github.com/bufbuild/protocompile v0.14.2-0.20251223142729-db46c1b9d34e // indirect
github.com/bufbuild/protoplugin v0.0.0-20250218205857-750e09ce93e1 // indirect
+ github.com/cespare/xxhash/v2 v2.3.0 // indirect
+ github.com/cli/browser v1.3.0 // indirect
github.com/containerd/errdefs v1.0.0 // indirect
github.com/containerd/errdefs/pkg v0.3.0 // indirect
- github.com/containerd/stargz-snapshotter/estargz v0.17.0 // indirect
+ github.com/containerd/stargz-snapshotter/estargz v0.18.1 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect
github.com/distribution/reference v0.6.0 // indirect
- github.com/docker/cli v28.4.0+incompatible // indirect
+ github.com/docker/cli v29.1.3+incompatible // indirect
github.com/docker/distribution v2.8.3+incompatible // indirect
- github.com/docker/docker v28.4.0+incompatible // indirect
- github.com/docker/docker-credential-helpers v0.9.3 // indirect
+ github.com/docker/docker v28.5.2+incompatible // indirect
+ github.com/docker/docker-credential-helpers v0.9.4 // indirect
github.com/docker/go-connections v0.6.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/go-chi/chi/v5 v5.2.3 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
- github.com/gofrs/flock v0.12.1 // indirect
+ github.com/gofrs/flock v0.13.0 // indirect
github.com/google/cel-go v0.26.1 // indirect
- github.com/google/go-containerregistry v0.20.6 // indirect
+ github.com/google/go-containerregistry v0.20.7 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/hexops/gotextdiff v1.0.3 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jdx/go-netrc v1.0.0 // indirect
- github.com/klauspost/compress v1.18.0 // indirect
+ github.com/klauspost/compress v1.18.2 // indirect
github.com/klauspost/pgzip v1.2.6 // indirect
github.com/mattn/go-colorable v0.1.14 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/term v0.5.2 // indirect
- github.com/morikuni/aec v1.0.0 // indirect
+ github.com/morikuni/aec v1.1.0 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.1 // indirect
- github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
+ github.com/petermattis/goid v0.0.0-20251121121749-a11dd1a45f9a // indirect
github.com/pkg/errors v0.9.1 // indirect
- github.com/quic-go/qpack v0.5.1 // indirect
- github.com/quic-go/quic-go v0.54.0 // indirect
+ github.com/quic-go/qpack v0.6.0 // indirect
+ github.com/quic-go/quic-go v0.58.0 // indirect
+ github.com/rivo/uniseg v0.4.7 // indirect
github.com/rs/cors v1.11.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
- github.com/segmentio/asm v1.2.0 // indirect
+ github.com/segmentio/asm v1.2.1 // indirect
github.com/segmentio/encoding v0.5.3 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
- github.com/spf13/cobra v1.10.1 // indirect
+ github.com/spf13/cobra v1.10.2 // indirect
github.com/spf13/pflag v1.0.10 // indirect
github.com/stoewer/go-strcase v1.3.1 // indirect
- github.com/tetratelabs/wazero v1.9.0 // indirect
- github.com/vbatts/tar-split v0.12.1 // indirect
+ github.com/tetratelabs/wazero v1.11.0 // indirect
+ github.com/tidwall/btree v1.8.1 // indirect
+ github.com/vbatts/tar-split v0.12.2 // indirect
go.lsp.dev/jsonrpc2 v0.10.0 // indirect
go.lsp.dev/pkg v0.0.0-20210717090340-384b27a52fb2 // indirect
go.lsp.dev/protocol v0.12.0 // indirect
go.lsp.dev/uri v0.3.0 // indirect
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect
- go.opentelemetry.io/otel v1.38.0 // indirect
- go.opentelemetry.io/otel/metric v1.38.0 // indirect
- go.opentelemetry.io/otel/trace v1.38.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 // indirect
+ go.opentelemetry.io/otel v1.39.0 // indirect
+ go.opentelemetry.io/otel/metric v1.39.0 // indirect
+ go.opentelemetry.io/otel/trace v1.39.0 // indirect
go.uber.org/mock v0.6.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
- go.uber.org/zap v1.27.0 // indirect
+ go.uber.org/zap v1.27.1 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
- golang.org/x/crypto v0.42.0 // indirect
- golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b // indirect
- golang.org/x/mod v0.28.0 // indirect
- golang.org/x/net v0.44.0 // indirect
- golang.org/x/sync v0.17.0 // indirect
- golang.org/x/sys v0.36.0 // indirect
- golang.org/x/term v0.35.0 // indirect
- golang.org/x/text v0.29.0 // indirect
- golang.org/x/tools v0.37.0 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4 // indirect
- google.golang.org/grpc v1.75.1 // indirect
- google.golang.org/protobuf v1.36.10 // indirect
+ golang.org/x/crypto v0.46.0 // indirect
+ golang.org/x/exp v0.0.0-20251219203646-944ab1f22d93 // indirect
+ golang.org/x/mod v0.31.0 // indirect
+ golang.org/x/net v0.48.0 // indirect
+ golang.org/x/sync v0.19.0 // indirect
+ golang.org/x/sys v0.39.0 // indirect
+ golang.org/x/term v0.38.0 // indirect
+ golang.org/x/text v0.32.0 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b // indirect
+ google.golang.org/grpc v1.78.0 // indirect
+ google.golang.org/protobuf v1.36.11 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
pluginrpc.com/pluginrpc v0.5.0 // indirect
)
diff --git a/internal/tools/go.sum b/internal/tools/go.sum
index 3a2788f200..df735a5536 100644
--- a/internal/tools/go.sum
+++ b/internal/tools/go.sum
@@ -1,31 +1,35 @@
-buf.build/gen/go/bufbuild/bufplugin/protocolbuffers/go v1.36.9-20250718181942-e35f9b667443.1 h1:HiLfreYRsqycF5QDlsnvSQOnl4tvhBoROl8+DkbaphI=
-buf.build/gen/go/bufbuild/bufplugin/protocolbuffers/go v1.36.9-20250718181942-e35f9b667443.1/go.mod h1:WSxC6zKCpqVRcGZCpOgVwkATp9XBIleoAdSAnkq7dhw=
-buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.9-20250912141014-52f32327d4b0.1 h1:DQLS/rRxLHuugVzjJU5AvOwD57pdFl9he/0O7e5P294=
-buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.9-20250912141014-52f32327d4b0.1/go.mod h1:aY3zbkNan5F+cGm9lITDP6oxJIwu0dn9KjJuJjWaHkg=
-buf.build/gen/go/bufbuild/registry/connectrpc/go v1.18.1-20250903170917-c4be0f57e197.1 h1:isqFuFhL6JRd7+KF/vivWqZGJMCaTuAccZIWwneCcqE=
-buf.build/gen/go/bufbuild/registry/connectrpc/go v1.18.1-20250903170917-c4be0f57e197.1/go.mod h1:eGjb9P6sl1irS46NKyXnxkyozT2aWs3BF4tbYWQuCsw=
-buf.build/gen/go/bufbuild/registry/protocolbuffers/go v1.36.9-20250903170917-c4be0f57e197.1 h1:q+tABqEH2Cpcp8fO9TBZlvKok7zorHGy+/UyywXaAKo=
-buf.build/gen/go/bufbuild/registry/protocolbuffers/go v1.36.9-20250903170917-c4be0f57e197.1/go.mod h1:Y3m+VD8IH6JTgnFYggPHvFul/ry6dL3QDliy8xH7610=
-buf.build/gen/go/pluginrpc/pluginrpc/protocolbuffers/go v1.36.8-20241007202033-cf42259fcbfc.1 h1:KuP+b+in6LGh2ukof5KgDCD8hPXotEq6EVOo13Wg1pE=
-buf.build/gen/go/pluginrpc/pluginrpc/protocolbuffers/go v1.36.8-20241007202033-cf42259fcbfc.1/go.mod h1:dV1Kz6zdmyXt7QWm5OXby44OFpyLemllUDBUG5HMLio=
-buf.build/go/app v0.1.0 h1:nlqD/h0rhIN73ZoiDElprrPiO2N6JV+RmNK34K29Ihg=
-buf.build/go/app v0.1.0/go.mod h1:0XVOYemubVbxNXVY0DnsVgWeGkcbbAvjDa1fmhBC+Wo=
+buf.build/gen/go/bufbuild/bufplugin/protocolbuffers/go v1.36.11-20250718181942-e35f9b667443.1 h1:zQ9C3e6FtwSZUFuKAQfpIKGFk5ZuRoGt5g35Bix55sI=
+buf.build/gen/go/bufbuild/bufplugin/protocolbuffers/go v1.36.11-20250718181942-e35f9b667443.1/go.mod h1:1Znr6gmYBhbxWUPRrrVnSLXQsz8bvFVw1HHJq2bI3VQ=
+buf.build/gen/go/bufbuild/protodescriptor/protocolbuffers/go v1.36.11-20250109164928-1da0de137947.1 h1:HwzzCRS4ZrEm1++rzSDxHnO0DOjiT1b8I/24e8a4exY=
+buf.build/gen/go/bufbuild/protodescriptor/protocolbuffers/go v1.36.11-20250109164928-1da0de137947.1/go.mod h1:8PRKXhgNes29Tjrnv8KdZzg3I1QceOkzibW1QK7EXv0=
+buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.11-20251209175733-2a1774d88802.1 h1:j9yeqTWEFrtimt8Nng2MIeRrpoCvQzM9/g25XTvqUGg=
+buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.11-20251209175733-2a1774d88802.1/go.mod h1:tvtbpgaVXZX4g6Pn+AnzFycuRK3MOz5HJfEGeEllXYM=
+buf.build/gen/go/bufbuild/registry/connectrpc/go v1.19.1-20251202164234-62b14f0b533c.2 h1:eQ6XRVUaYYZFOZvBsyrOYLWbw6464s5dVnHscxa0b8w=
+buf.build/gen/go/bufbuild/registry/connectrpc/go v1.19.1-20251202164234-62b14f0b533c.2/go.mod h1:omxVRch3jEPMINnUipLsuRWoEhND6LPXELKBG7xzyDw=
+buf.build/gen/go/bufbuild/registry/protocolbuffers/go v1.36.11-20251202164234-62b14f0b533c.1 h1:PdfIJUbUVKdajMVYuMdvr2Wvo+wmzGnlPEYA4bhFaWI=
+buf.build/gen/go/bufbuild/registry/protocolbuffers/go v1.36.11-20251202164234-62b14f0b533c.1/go.mod h1:1JJi9jvOqRxSMa+JxiZSm57doB+db/1WYCIa2lHfc40=
+buf.build/gen/go/pluginrpc/pluginrpc/protocolbuffers/go v1.36.11-20241007202033-cf42259fcbfc.1 h1:iGPvEJltOXUMANWf0zajcRcbiOXLD90ZwPUFvbcuv6Q=
+buf.build/gen/go/pluginrpc/pluginrpc/protocolbuffers/go v1.36.11-20241007202033-cf42259fcbfc.1/go.mod h1:nWVKKRA29zdt4uvkjka3i/y4mkrswyWwiu0TbdX0zts=
+buf.build/go/app v0.2.0 h1:NYaH13A+RzPb7M5vO8uZYZ2maBZI5+MS9A9tQm66fy8=
+buf.build/go/app v0.2.0/go.mod h1:0XVOYemubVbxNXVY0DnsVgWeGkcbbAvjDa1fmhBC+Wo=
buf.build/go/bufplugin v0.9.0 h1:ktZJNP3If7ldcWVqh46XKeiYJVPxHQxCfjzVQDzZ/lo=
buf.build/go/bufplugin v0.9.0/go.mod h1:Z0CxA3sKQ6EPz/Os4kJJneeRO6CjPeidtP1ABh5jPPY=
+buf.build/go/bufprivateusage v0.1.0 h1:SzCoCcmzS3zyXHEXHeSQhGI7OTkgtljoknLzsUz9Gg4=
+buf.build/go/bufprivateusage v0.1.0/go.mod h1:GlCCJ3VVF7EqqU0CoRmo1FzAwwaKymEWSr+ty69xU5w=
buf.build/go/interrupt v1.1.0 h1:olBuhgv9Sav4/9pkSLoxgiOsZDgM5VhRhvRpn3DL0lE=
buf.build/go/interrupt v1.1.0/go.mod h1:ql56nXPG1oHlvZa6efNC7SKAQ/tUjS6z0mhJl0gyeRM=
-buf.build/go/protovalidate v1.0.0 h1:IAG1etULddAy93fiBsFVhpj7es5zL53AfB/79CVGtyY=
-buf.build/go/protovalidate v1.0.0/go.mod h1:KQmEUrcQuC99hAw+juzOEAmILScQiKBP1Oc36vvCLW8=
+buf.build/go/protovalidate v1.1.0 h1:pQqEQRpOo4SqS60qkvmhLTTQU9JwzEvdyiqAtXa5SeY=
+buf.build/go/protovalidate v1.1.0/go.mod h1:bGZcPiAQDC3ErCHK3t74jSoJDFOs2JH3d7LWuTEIdss=
buf.build/go/protoyaml v0.6.0 h1:Nzz1lvcXF8YgNZXk+voPPwdU8FjDPTUV4ndNTXN0n2w=
buf.build/go/protoyaml v0.6.0/go.mod h1:RgUOsBu/GYKLDSIRgQXniXbNgFlGEZnQpRAUdLAFV2Q=
buf.build/go/spdx v0.2.0 h1:IItqM0/cMxvFJJumcBuP8NrsIzMs/UYjp/6WSpq8LTw=
buf.build/go/spdx v0.2.0/go.mod h1:bXdwQFem9Si3nsbNy8aJKGPoaPi5DKwdeEp5/ArZ6w8=
buf.build/go/standard v0.1.0 h1:g98T9IyvAl0vS3Pq8iVk6Cvj2ZiFvoUJRtfyGa0120U=
buf.build/go/standard v0.1.0/go.mod h1:PiqpHz/7ZFq+kqvYhc/SK3lxFIB9N/aiH2CFC2JHIQg=
-cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY=
-cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw=
-connectrpc.com/connect v1.18.1 h1:PAg7CjSAGvscaf6YZKUefjoih5Z/qYkyaTrBW8xvYPw=
-connectrpc.com/connect v1.18.1/go.mod h1:0292hj1rnx8oFrStN7cB4jjVBeqs+Yx5yDIC2prWDO8=
+cel.dev/expr v0.25.1 h1:1KrZg61W6TWSxuNZ37Xy49ps13NUovb66QLprthtwi4=
+cel.dev/expr v0.25.1/go.mod h1:hrXvqGP6G6gyx8UAHSHJ5RGk//1Oj5nXQ2NI02Nrsg4=
+connectrpc.com/connect v1.19.1 h1:R5M57z05+90EfEvCY1b7hBxDVOUl45PrtXtAV2fOC14=
+connectrpc.com/connect v1.19.1/go.mod h1:tN20fjdGlewnSFeZxLKb0xwIZ6ozc3OQs2hTXy4du9w=
connectrpc.com/otelconnect v0.8.0 h1:a4qrN4H8aEE2jAoCxheZYYfEjXMgVPyL9OzPQLBEFXU=
connectrpc.com/otelconnect v0.8.0/go.mod h1:AEkVLjCPXra+ObGFCOClcJkNjS7zPaQSqvO0lCyjfZc=
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg=
@@ -34,22 +38,30 @@ github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERo
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ=
github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw=
-github.com/bufbuild/buf v1.57.2 h1:2vxP0giB8DVo0Lkem9T8WDUYIEC3zqY98+NHqAlP4ig=
-github.com/bufbuild/buf v1.57.2/go.mod h1:8cygE3L/J84dtgQAaquZKpXLo9MjAn+dSdFuXvbUNYg=
-github.com/bufbuild/protocompile v0.14.1 h1:iA73zAf/fyljNjQKwYzUHD6AD4R8KMasmwa/FBatYVw=
-github.com/bufbuild/protocompile v0.14.1/go.mod h1:ppVdAIhbr2H8asPk6k4pY7t9zB1OU5DoEw9xY/FUi1c=
+github.com/bmatcuk/doublestar/v4 v4.9.1 h1:X8jg9rRZmJd4yRy7ZeNDRnM+T3ZfHv15JiBJ/avrEXE=
+github.com/bmatcuk/doublestar/v4 v4.9.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
+github.com/brianvoe/gofakeit/v6 v6.28.0 h1:Xib46XXuQfmlLS2EXRuJpqcw8St6qSZz75OUo0tgAW4=
+github.com/brianvoe/gofakeit/v6 v6.28.0/go.mod h1:Xj58BMSnFqcn/fAQeSK+/PLtC5kSb7FJIq4JyGa8vEs=
+github.com/bufbuild/buf v1.62.1 h1:QdYB6JDW7dP+5H7sKx0lN1raxnuUJDDlEJtPHDYKB0g=
+github.com/bufbuild/buf v1.62.1/go.mod h1:igMN/6U32/GDzyfkmn0VfIaKoeOnWTTizEf5CG0/87k=
+github.com/bufbuild/protocompile v0.14.2-0.20251223142729-db46c1b9d34e h1:LQA+1MyiPkolGHJGC2GMDC5Xu+0RDVH6jGMKech7Exs=
+github.com/bufbuild/protocompile v0.14.2-0.20251223142729-db46c1b9d34e/go.mod h1:5UUj46Eu+U+C59C5N6YilaMI7WWfP2bW9xGcOkme2DI=
github.com/bufbuild/protoplugin v0.0.0-20250218205857-750e09ce93e1 h1:V1xulAoqLqVg44rY97xOR+mQpD2N+GzhMHVwJ030WEU=
github.com/bufbuild/protoplugin v0.0.0-20250218205857-750e09ce93e1/go.mod h1:c5D8gWRIZ2HLWO3gXYTtUfw/hbJyD8xikv2ooPxnklQ=
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cli/browser v1.3.0 h1:LejqCrpWr+1pRqmEPDGnTZOjsMe7sehifLynZJuqJpo=
+github.com/cli/browser v1.3.0/go.mod h1:HH8s+fOAxjhQoBUAsKuPCbqUuxZDhQ2/aD+SzsEfBTk=
github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
-github.com/containerd/stargz-snapshotter/estargz v0.17.0 h1:+TyQIsR/zSFI1Rm31EQBwpAA1ovYgIKHy7kctL3sLcE=
-github.com/containerd/stargz-snapshotter/estargz v0.17.0/go.mod h1:s06tWAiJcXQo9/8AReBCIo/QxcXFZ2n4qfsRnpl71SM=
+github.com/containerd/stargz-snapshotter/estargz v0.18.1 h1:cy2/lpgBXDA3cDKSyEfNOFMA/c10O1axL69EU7iirO8=
+github.com/containerd/stargz-snapshotter/estargz v0.18.1/go.mod h1:ALIEqa7B6oVDsrF37GkGN20SuvG/pIMm7FwP7ZmRb0Q=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo=
github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
@@ -62,14 +74,14 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
-github.com/docker/cli v28.4.0+incompatible h1:RBcf3Kjw2pMtwui5V0DIMdyeab8glEw5QY0UUU4C9kY=
-github.com/docker/cli v28.4.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
+github.com/docker/cli v29.1.3+incompatible h1:+kz9uDWgs+mAaIZojWfFt4d53/jv0ZUOOoSh5ZnH36c=
+github.com/docker/cli v29.1.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/docker v28.4.0+incompatible h1:KVC7bz5zJY/4AZe/78BIvCnPsLaC9T/zh72xnlrTTOk=
-github.com/docker/docker v28.4.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8=
-github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo=
+github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM=
+github.com/docker/docker v28.5.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker-credential-helpers v0.9.4 h1:76ItO69/AP/V4yT9V4uuuItG0B1N8hvt0T0c0NN/DzI=
+github.com/docker/docker-credential-helpers v0.9.4/go.mod h1:v1S+hepowrQXITkEfw6o4+BMbGot02wiKpzWhGUZK6c=
github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94=
github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
@@ -83,8 +95,8 @@ github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
-github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E=
-github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0=
+github.com/gofrs/flock v0.13.0 h1:95JolYOvGMqeH31+FC7D2+uULf6mG61mEZ/A8dRYMzw=
+github.com/gofrs/flock v0.13.0/go.mod h1:jxeyy9R1auM5S6JYDBhDt+E2TCo7DkratH4Pgi8P+Z0=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/google/cel-go v0.26.1 h1:iPbVVEdkhTX++hpe3lzSk7D3G3QSYqLGoHOcEio+UXQ=
@@ -92,12 +104,12 @@ github.com/google/cel-go v0.26.1/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PU
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
-github.com/google/go-containerregistry v0.20.6 h1:cvWX87UxxLgaH76b4hIvya6Dzz9qHB31qAwjAohdSTU=
-github.com/google/go-containerregistry v0.20.6/go.mod h1:T0x8MuoAoKX/873bkeSfLD2FAkwCDf9/HZgsFJ02E2Y=
+github.com/google/go-containerregistry v0.20.7 h1:24VGNpS0IwrOZ2ms2P1QE3Xa5X9p4phx0aUgzYzHW6I=
+github.com/google/go-containerregistry v0.20.7/go.mod h1:Lx5LCZQjLH1QBaMPeGwsME9biPeo1lPx6lbGj/UmzgM=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.4 h1:kEISI/Gx67NzH3nJxAmY/dGac80kKZgZt134u7Y/k1s=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.4/go.mod h1:6Nz966r3vQYCqIzWsuEl9d7cf7mRhtDmm++sOxlnfxI=
github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
@@ -108,8 +120,8 @@ github.com/jhump/protoreflect/v2 v2.0.0-beta.2 h1:qZU+rEZUOYTz1Bnhi3xbwn+VxdXkLV
github.com/jhump/protoreflect/v2 v2.0.0-beta.2/go.mod h1:4tnOYkB/mq7QTyS3YKtVtNrJv4Psqout8HA1U+hZtgM=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
-github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
+github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk=
+github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU=
github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
@@ -130,36 +142,42 @@ github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7z
github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko=
github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ=
github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc=
-github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
-github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
+github.com/morikuni/aec v1.1.0 h1:vBBl0pUnvi/Je71dsRrhMBtreIqNMYErSAbEeb8jrXQ=
+github.com/morikuni/aec v1.1.0/go.mod h1:xDRgiq/iw5l+zkao76YTKzKttOp2cwPEne25HDkJnBw=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
-github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
-github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
+github.com/petermattis/goid v0.0.0-20251121121749-a11dd1a45f9a h1:VweslR2akb/ARhXfqSfRbj1vpWwYXf3eeAUyw/ndms0=
+github.com/petermattis/goid v0.0.0-20251121121749-a11dd1a45f9a/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
-github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
-github.com/quic-go/quic-go v0.54.0 h1:6s1YB9QotYI6Ospeiguknbp2Znb/jZYjZLRXn9kMQBg=
-github.com/quic-go/quic-go v0.54.0/go.mod h1:e68ZEaCdyviluZmy44P6Iey98v/Wfz6HCjQEm+l8zTY=
+github.com/protocolbuffers/protoscope v0.0.0-20221109213918-8e7a6aafa2c9 h1:arwj11zP0yJIxIRiDn22E0H8PxfF7TsTrc2wIPFIsf4=
+github.com/protocolbuffers/protoscope v0.0.0-20221109213918-8e7a6aafa2c9/go.mod h1:SKZx6stCn03JN3BOWTwvVIO2ajMkb/zQdTceXYhKw/4=
+github.com/quic-go/qpack v0.6.0 h1:g7W+BMYynC1LbYLSqRt8PBg5Tgwxn214ZZR34VIOjz8=
+github.com/quic-go/qpack v0.6.0/go.mod h1:lUpLKChi8njB4ty2bFLX2x4gzDqXwUpaO1DP9qMDZII=
+github.com/quic-go/quic-go v0.58.0 h1:ggY2pvZaVdB9EyojxL1p+5mptkuHyX5MOSv4dgWF4Ug=
+github.com/quic-go/quic-go v0.58.0/go.mod h1:upnsH4Ju1YkqpLXC305eW3yDZ4NfnNbmQRCMWS58IKU=
+github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
+github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
+github.com/rodaine/protogofakeit v0.1.1 h1:ZKouljuRM3A+TArppfBqnH8tGZHOwM/pjvtXe9DaXH8=
+github.com/rodaine/protogofakeit v0.1.1/go.mod h1:pXn/AstBYMaSfc1/RqH3N82pBuxtWgejz1AlYpY1mI0=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA=
github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
-github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
+github.com/segmentio/asm v1.2.1 h1:DTNbBqs57ioxAD4PrArqftgypG4/qNpXoJx8TVXxPR0=
+github.com/segmentio/asm v1.2.1/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
github.com/segmentio/encoding v0.5.3 h1:OjMgICtcSFuNvQCdwqMCv9Tg7lEOXGwm1J5RPQccx6w=
github.com/segmentio/encoding v0.5.3/go.mod h1:HS1ZKa3kSN32ZHVZ7ZLPLXWvOVIiZtyJnO1gPH1sKt0=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
-github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
-github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
+github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
+github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
@@ -174,10 +192,12 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
-github.com/tetratelabs/wazero v1.9.0 h1:IcZ56OuxrtaEz8UYNRHBrUa9bYeX9oVY93KspZZBf/I=
-github.com/tetratelabs/wazero v1.9.0/go.mod h1:TSbcXCfFP0L2FGkRPxHphadXPjo1T6W+CseNNY7EkjM=
-github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo=
-github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA=
+github.com/tetratelabs/wazero v1.11.0 h1:+gKemEuKCTevU4d7ZTzlsvgd1uaToIDtlQlmNbwqYhA=
+github.com/tetratelabs/wazero v1.11.0/go.mod h1:eV28rsN8Q+xwjogd7f4/Pp4xFxO7uOGbLcD/LzB1wiU=
+github.com/tidwall/btree v1.8.1 h1:27ehoXvm5AG/g+1VxLS1SD3vRhp/H7LuEfwNvddEdmA=
+github.com/tidwall/btree v1.8.1/go.mod h1:jBbTdUWhSZClZWoDg54VnvV7/54modSOzDN7VXftj1A=
+github.com/vbatts/tar-split v0.12.2 h1:w/Y6tjxpeiFMR47yzZPlPj/FcPLpXbTUi/9H7d3CPa4=
+github.com/vbatts/tar-split v0.12.2/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.lsp.dev/jsonrpc2 v0.10.0 h1:Pr/YcXJoEOTMc/b6OTmcR1DPJ3mSWl/SWiU1Cct6VmI=
@@ -190,91 +210,90 @@ go.lsp.dev/uri v0.3.0 h1:KcZJmh6nFIBeJzTugn5JTU6OOyG0lDOo3R9KwTxTYbo=
go.lsp.dev/uri v0.3.0/go.mod h1:P5sbO1IQR+qySTWOCnhnK7phBx+W3zbLqSMDJNTw88I=
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg=
-go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
-go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 h1:ssfIgGNANqpVFCndZvcuyKbl0g+UAVcbBcqGkG28H0Y=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0/go.mod h1:GQ/474YrbE4Jx8gZ4q5I4hrhUzM6UPzyrqJYV2AqPoQ=
+go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48=
+go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0 h1:f0cb2XPmrqn4XMy9PNliTgRKJgS5WcL/u0/WRYGz4t0=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0/go.mod h1:vnakAaFckOMiMtOIhFI2MNH4FYrZzXCYxmb1LlhoGz8=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 h1:wpMfgF8E1rkrT1Z6meFh1NDtownE9Ii3n3X2GJYjsaU=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0/go.mod h1:wAy0T/dUbs468uOlkT31xjvqQgEVXv58BRFWEgn5v/0=
-go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
-go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
-go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=
-go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg=
-go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM=
-go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA=
-go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
-go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
-go.opentelemetry.io/proto/otlp v1.8.0 h1:fRAZQDcAFHySxpJ1TwlA1cJ4tvcrw7nXl9xWWC8N5CE=
-go.opentelemetry.io/proto/otlp v1.8.0/go.mod h1:tIeYOeNBU4cvmPqpaji1P+KbB4Oloai8wN4rWzRrFF0=
+go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0=
+go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs=
+go.opentelemetry.io/otel/sdk v1.39.0 h1:nMLYcjVsvdui1B/4FRkwjzoRVsMK8uL/cj0OyhKzt18=
+go.opentelemetry.io/otel/sdk v1.39.0/go.mod h1:vDojkC4/jsTJsE+kh+LXYQlbL8CgrEcwmt1ENZszdJE=
+go.opentelemetry.io/otel/sdk/metric v1.39.0 h1:cXMVVFVgsIf2YL6QkRF4Urbr/aMInf+2WKg+sEJTtB8=
+go.opentelemetry.io/otel/sdk/metric v1.39.0/go.mod h1:xq9HEVH7qeX69/JnwEfp6fVq5wosJsY1mt4lLfYdVew=
+go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI=
+go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA=
+go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A=
+go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y=
go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
-go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
-go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
+go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc=
+go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI=
-golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8=
-golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b h1:DXr+pvt3nC887026GRP39Ej11UATqWDmWuS99x26cD0=
-golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b/go.mod h1:4QTo5u+SEIbbKW1RacMZq1YEfOBqeXa19JeshGi+zc4=
+golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU=
+golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0=
+golang.org/x/exp v0.0.0-20251219203646-944ab1f22d93 h1:fQsdNF2N+/YewlRZiricy4P1iimyPKZ/xwniHj8Q2a0=
+golang.org/x/exp v0.0.0-20251219203646-944ab1f22d93/go.mod h1:EPRbTFwzwjXj9NpYyyrvenVh9Y+GFeEvMNh7Xuz7xgU=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U=
-golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI=
+golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI=
+golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I=
-golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
+golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU=
+golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
-golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
+golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
+golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
-golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
-golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ=
-golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA=
+golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
+golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
+golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q=
+golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
-golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
+golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
+golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs=
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE=
-golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w=
+golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA=
+golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4 h1:8XJ4pajGwOlasW+L13MnEGA8W4115jJySQtVfS2/IBU=
-google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4/go.mod h1:NnuHhy+bxcg30o7FnVAZbXsPHUDQ9qKWAQKCD7VxFtk=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4 h1:i8QOKZfYg6AbGVZzUAY3LrNWCKF8O6zFisU9Wl9RER4=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4/go.mod h1:HSkG/KdJWusxU1F6CNrwNDjBMgisKxGnc5dAZfT0mjQ=
-google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI=
-google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ=
-google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
-google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
+google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b h1:uA40e2M6fYRBf0+8uN5mLlqUtV192iiksiICIBkYJ1E=
+google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b/go.mod h1:Xa7le7qx2vmqB/SzWUBa7KdMjpdpAHlh5QCSnjessQk=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b h1:Mv8VFug0MP9e5vUxfBcE3vUkV6CImK3cMNMIDFjmzxU=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
+google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc=
+google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U=
+google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
+google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
diff --git a/internal/tools/tools.go b/internal/tools/tools.go
index e57e37186f..22e79a56f7 100644
--- a/internal/tools/tools.go
+++ b/internal/tools/tools.go
@@ -1,4 +1,4 @@
-// Copyright 2018 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/model/exemplar/exemplar.go b/model/exemplar/exemplar.go
index d03940f1b2..5db7c46a68 100644
--- a/model/exemplar/exemplar.go
+++ b/model/exemplar/exemplar.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go
index 91fcac1cfb..75021d2c62 100644
--- a/model/histogram/float_histogram.go
+++ b/model/histogram/float_histogram.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -484,7 +484,7 @@ func (h *FloatHistogram) Sub(other *FloatHistogram) (res *FloatHistogram, counte
// supposed to be used according to the schema.
func (h *FloatHistogram) Equals(h2 *FloatHistogram) bool {
if h2 == nil {
- return false
+ return h == nil
}
if h.Schema != h2.Schema ||
diff --git a/model/histogram/float_histogram_test.go b/model/histogram/float_histogram_test.go
index e79f5a0f49..5c29544c8f 100644
--- a/model/histogram/float_histogram_test.go
+++ b/model/histogram/float_histogram_test.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/model/histogram/generic.go b/model/histogram/generic.go
index 649db769c7..61fc5067f2 100644
--- a/model/histogram/generic.go
+++ b/model/histogram/generic.go
@@ -1,4 +1,4 @@
-// Copyright 2022 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/model/histogram/generic_test.go b/model/histogram/generic_test.go
index 54324beaff..525c731571 100644
--- a/model/histogram/generic_test.go
+++ b/model/histogram/generic_test.go
@@ -1,4 +1,4 @@
-// Copyright 2022 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/model/histogram/histogram.go b/model/histogram/histogram.go
index 5fc68ef9d0..5be60174fc 100644
--- a/model/histogram/histogram.go
+++ b/model/histogram/histogram.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -247,7 +247,7 @@ func (h *Histogram) CumulativeBucketIterator() BucketIterator[uint64] {
// supposed to be used according to the schema.
func (h *Histogram) Equals(h2 *Histogram) bool {
if h2 == nil {
- return false
+ return h == nil
}
if h.Schema != h2.Schema || h.Count != h2.Count ||
diff --git a/model/histogram/histogram_test.go b/model/histogram/histogram_test.go
index ae17f9be37..a2b4c7c0a8 100644
--- a/model/histogram/histogram_test.go
+++ b/model/histogram/histogram_test.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/model/histogram/test_utils.go b/model/histogram/test_utils.go
index a4871ada31..c86becdcf9 100644
--- a/model/histogram/test_utils.go
+++ b/model/histogram/test_utils.go
@@ -1,4 +1,4 @@
-// Copyright 2023 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/model/labels/labels_common.go b/model/labels/labels_common.go
index ab82ae6a8f..571064d6c4 100644
--- a/model/labels/labels_common.go
+++ b/model/labels/labels_common.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/model/labels/labels_dedupelabels.go b/model/labels/labels_dedupelabels.go
index 1e736c832e..ae751fe34a 100644
--- a/model/labels/labels_dedupelabels.go
+++ b/model/labels/labels_dedupelabels.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -24,6 +24,9 @@ import (
"github.com/cespare/xxhash/v2"
)
+// ImplementationName is the name of the labels implementation.
+const ImplementationName = "dedupelabels"
+
// Labels is implemented by a SymbolTable and string holding name/value
// pairs encoded as indexes into the table in varint encoding.
// Names are in alphabetical order.
diff --git a/model/labels/labels_dedupelabels_test.go b/model/labels/labels_dedupelabels_test.go
index 229bb45a8e..b05d18e4cc 100644
--- a/model/labels/labels_dedupelabels_test.go
+++ b/model/labels/labels_dedupelabels_test.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/model/labels/labels_slicelabels.go b/model/labels/labels_slicelabels.go
index 21ad145c1c..2a9056e68f 100644
--- a/model/labels/labels_slicelabels.go
+++ b/model/labels/labels_slicelabels.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -25,6 +25,9 @@ import (
"github.com/cespare/xxhash/v2"
)
+// ImplementationName is the name of the labels implementation.
+const ImplementationName = "slicelabels"
+
// Labels is a sorted set of labels. Order has to be guaranteed upon
// instantiation.
type Labels []Label
@@ -297,12 +300,9 @@ func FromStrings(ss ...string) Labels {
// Compare compares the two label sets.
// The result will be 0 if a==b, <0 if a < b, and >0 if a > b.
func Compare(a, b Labels) int {
- l := len(a)
- if len(b) < l {
- l = len(b)
- }
+ l := min(len(b), len(a))
- for i := 0; i < l; i++ {
+ for i := range l {
if a[i].Name != b[i].Name {
if a[i].Name < b[i].Name {
return -1
@@ -419,10 +419,7 @@ func (b *Builder) Labels() Labels {
return b.base
}
- expectedSize := len(b.base) + len(b.add) - len(b.del)
- if expectedSize < 1 {
- expectedSize = 1
- }
+ expectedSize := max(len(b.base)+len(b.add)-len(b.del), 1)
res := make(Labels, 0, expectedSize)
for _, l := range b.base {
if slices.Contains(b.del, l.Name) || contains(b.add, l.Name) {
diff --git a/model/labels/labels_slicelabels_test.go b/model/labels/labels_slicelabels_test.go
index 7961828378..700e88fd13 100644
--- a/model/labels/labels_slicelabels_test.go
+++ b/model/labels/labels_slicelabels_test.go
@@ -1,4 +1,4 @@
-// Copyright 2025 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -77,8 +77,8 @@ func BenchmarkScratchBuilderUnsafeAdd(b *testing.B) {
l.SetUnsafeAdd(true)
b.ReportAllocs()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
+
+ for b.Loop() {
l.Add("__name__", "metric1")
l.add = l.add[:0] // Reset slice so add can be repeated without side effects.
}
diff --git a/model/labels/labels_stringlabels.go b/model/labels/labels_stringlabels.go
index f087223802..c9be42bf74 100644
--- a/model/labels/labels_stringlabels.go
+++ b/model/labels/labels_stringlabels.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -23,6 +23,9 @@ import (
"github.com/cespare/xxhash/v2"
)
+// ImplementationName is the name of the labels implementation.
+const ImplementationName = "stringlabels"
+
// Labels is implemented by a single flat string holding name/value pairs.
// Each name and value is preceded by its length, encoded as a single byte
// for size 0-254, or the following 3 bytes little-endian, if the first byte is 255.
diff --git a/model/labels/labels_stringlabels_test.go b/model/labels/labels_stringlabels_test.go
index 0704a2ff36..45b5a19f40 100644
--- a/model/labels/labels_stringlabels_test.go
+++ b/model/labels/labels_stringlabels_test.go
@@ -1,4 +1,4 @@
-// Copyright 2025 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/model/labels/labels_test.go b/model/labels/labels_test.go
index 4be2eeb0b7..67614daf92 100644
--- a/model/labels/labels_test.go
+++ b/model/labels/labels_test.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/model/labels/matcher.go b/model/labels/matcher.go
index a09c838e3f..6d22b1bf64 100644
--- a/model/labels/matcher.go
+++ b/model/labels/matcher.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/model/labels/matcher_test.go b/model/labels/matcher_test.go
index 214bb37eff..11ed6dd29c 100644
--- a/model/labels/matcher_test.go
+++ b/model/labels/matcher_test.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/model/labels/regexp.go b/model/labels/regexp.go
index 47b50e703a..a4bdf885ee 100644
--- a/model/labels/regexp.go
+++ b/model/labels/regexp.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -77,7 +77,18 @@ func NewFastRegexMatcher(v string) (*FastRegexMatcher, error) {
if matches, caseSensitive := findSetMatches(parsed); caseSensitive {
m.setMatches = matches
}
- m.stringMatcher = stringMatcherFromRegexp(parsed)
+
+ // Check if we have a pattern like .*-.*-.*.
+ // If so, then we can rely on the containsInOrder check in compileMatchStringFunction,
+ // so no further inspection of the string is required.
+ // We can't do this in stringMatcherFromRegexpInternal as we only want to apply this
+ // if the top-level pattern satisfies this requirement.
+ if isSimpleConcatenationPattern(parsed) {
+ m.stringMatcher = trueMatcher{}
+ } else {
+ m.stringMatcher = stringMatcherFromRegexp(parsed)
+ }
+
m.matchString = m.compileMatchStringFunction()
}
@@ -566,6 +577,40 @@ func stringMatcherFromRegexpInternal(re *syntax.Regexp) StringMatcher {
return nil
}
+// isSimpleConcatenationPattern returns true if re contains only literals or wildcard matchers,
+// and starts and ends with a wildcard matcher (eg. .*-.*-.*).
+func isSimpleConcatenationPattern(re *syntax.Regexp) bool {
+ if re.Op != syntax.OpConcat {
+ return false
+ }
+
+ if len(re.Sub) < 2 {
+ return false
+ }
+
+ first := re.Sub[0]
+ last := re.Sub[len(re.Sub)-1]
+ if !isMatchAny(first) || !isMatchAny(last) {
+ return false
+ }
+
+ for _, re := range re.Sub[1 : len(re.Sub)-1] {
+ if !isMatchAny(re) && !isCaseSensitiveLiteral(re) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func isMatchAny(re *syntax.Regexp) bool {
+ return re.Op == syntax.OpStar && re.Sub[0].Op == syntax.OpAnyChar
+}
+
+func isCaseSensitiveLiteral(re *syntax.Regexp) bool {
+ return re.Op == syntax.OpLiteral && isCaseSensitive(re)
+}
+
// containsStringMatcher matches a string if it contains any of the substrings.
// If left and right are not nil, it's a contains operation where left and right must match.
// If left is nil, it's a hasPrefix operation and right must match.
diff --git a/model/labels/regexp_test.go b/model/labels/regexp_test.go
index 94ef14028b..85cbe02a1f 100644
--- a/model/labels/regexp_test.go
+++ b/model/labels/regexp_test.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -87,6 +87,9 @@ var (
"ſſs",
// Concat of literals and wildcards.
".*-.*-.*-.*-.*",
+ ".+-.*-.*-.*-.+",
+ "-.*-.*-.*-.*",
+ ".*-.*-.*-.*-",
"(.+)-(.+)-(.+)-(.+)-(.+)",
"((.*))(?i:f)((.*))o((.*))o((.*))",
"((.*))f((.*))(?i:o)((.*))o((.*))",
@@ -96,6 +99,11 @@ var (
"FOO", "Foo", "fOo", "foO", "OO", "Oo", "\nfoo\n", strings.Repeat("f", 20), "prometheus", "prometheus_api_v1", "prometheus_api_v1_foo",
"10.0.1.20", "10.0.2.10", "10.0.3.30", "10.0.4.40",
"foofoo0", "foofoo", "😀foo0", "ſſs", "ſſS", "AAAAAAAAAAAAAAAAAAAAAAAA", "BBBBBBBBBBBBBBBBBBBBBBBB", "cccccccccccccccccccccccC", "ſſſſſſſſſſſſſſſſſſſſſſſſS", "SSSSSSSSSSSSSSSSSSSSSSSSſ",
+ "a-b-c-d-e",
+ "aaaaaa-bbbbbb-cccccc-dddddd-eeeeee",
+ "aaaaaa----eeeeee",
+ "----",
+ "-a-a-a-",
// Values matching / not matching the test regexps on long alternations.
"zQPbMkNO", "zQPbMkNo", "jyyfj00j0061", "jyyfj00j006", "jyyfj00j00612", "NNSPdvMi", "NNSPdvMiXXX", "NNSPdvMixxx", "nnSPdvMi", "nnSPdvMiXXX",
@@ -162,6 +170,7 @@ func TestOptimizeConcatRegex(t *testing.T) {
{regex: "^5..$", prefix: "5", suffix: "", contains: nil},
{regex: "^release.*", prefix: "release", suffix: "", contains: nil},
{regex: "^env-[0-9]+laio[1]?[^0-9].*", prefix: "env-", suffix: "", contains: []string{"laio"}},
+ {regex: ".*-.*-.*-.*-.*", prefix: "", suffix: "", contains: []string{"-", "-", "-", "-"}},
}
for _, c := range cases {
@@ -341,7 +350,7 @@ func BenchmarkToNormalizedLower(b *testing.B) {
}
}
-func TestStringMatcherFromRegexp(t *testing.T) {
+func TestNewFastRegexMatcher(t *testing.T) {
for _, c := range []struct {
pattern string
exp StringMatcher
@@ -364,12 +373,12 @@ func TestStringMatcherFromRegexp(t *testing.T) {
{`(?i:((foo1|foo2|bar)))`, orStringMatcher([]StringMatcher{orStringMatcher([]StringMatcher{&equalStringMatcher{s: "FOO1", caseSensitive: false}, &equalStringMatcher{s: "FOO2", caseSensitive: false}}), &equalStringMatcher{s: "BAR", caseSensitive: false}})},
{"^((?i:foo|oo)|(bar))$", orStringMatcher([]StringMatcher{&equalStringMatcher{s: "FOO", caseSensitive: false}, &equalStringMatcher{s: "OO", caseSensitive: false}, &equalStringMatcher{s: "bar", caseSensitive: true}})},
{"(?i:(foo1|foo2|bar))", orStringMatcher([]StringMatcher{orStringMatcher([]StringMatcher{&equalStringMatcher{s: "FOO1", caseSensitive: false}, &equalStringMatcher{s: "FOO2", caseSensitive: false}}), &equalStringMatcher{s: "BAR", caseSensitive: false}})},
- {".*foo.*", &containsStringMatcher{substrings: []string{"foo"}, left: trueMatcher{}, right: trueMatcher{}}},
- {"(.*)foo.*", &containsStringMatcher{substrings: []string{"foo"}, left: trueMatcher{}, right: trueMatcher{}}},
- {"(.*)foo(.*)", &containsStringMatcher{substrings: []string{"foo"}, left: trueMatcher{}, right: trueMatcher{}}},
+ {".*foo.*", trueMatcher{}}, // The containsInOrder check done in the function returned by compileMatchStringFunction is sufficient.
+ {"(.*)foo.*", trueMatcher{}}, // The containsInOrder check done in the function returned by compileMatchStringFunction is sufficient.
+ {"(.*)foo(.*)", trueMatcher{}}, // The containsInOrder check done in the function returned by compileMatchStringFunction is sufficient.
{"(.+)foo(.*)", &containsStringMatcher{substrings: []string{"foo"}, left: &anyNonEmptyStringMatcher{matchNL: true}, right: trueMatcher{}}},
{"^.+foo.+", &containsStringMatcher{substrings: []string{"foo"}, left: &anyNonEmptyStringMatcher{matchNL: true}, right: &anyNonEmptyStringMatcher{matchNL: true}}},
- {"^(.*)(foo)(.*)$", &containsStringMatcher{substrings: []string{"foo"}, left: trueMatcher{}, right: trueMatcher{}}},
+ {"^(.*)(foo)(.*)$", trueMatcher{}}, // The containsInOrder check done in the function returned by compileMatchStringFunction is sufficient.
{"^(.*)(foo|foobar)(.*)$", &containsStringMatcher{substrings: []string{"foo", "foobar"}, left: trueMatcher{}, right: trueMatcher{}}},
{"^(.*)(foo|foobar)(.+)$", &containsStringMatcher{substrings: []string{"foo", "foobar"}, left: trueMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: true}}},
{"^(.*)(bar|b|buzz)(.+)$", &containsStringMatcher{substrings: []string{"bar", "b", "buzz"}, left: trueMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: true}}},
@@ -388,7 +397,7 @@ func TestStringMatcherFromRegexp(t *testing.T) {
{"(api|rpc)_(v1|prom)_((?i)push|query)", nil},
{"[a-z][a-z]", nil},
{"[1^3]", nil},
- {".*foo.*bar.*", nil},
+ {".*foo.*bar.*", trueMatcher{}}, // The containsInOrder check done in the function returned by compileMatchStringFunction is sufficient.
{`\d*`, nil},
{".", nil},
{"/|/bar.*", &literalPrefixSensitiveStringMatcher{prefix: "/", right: orStringMatcher{emptyStringMatcher{}, &literalPrefixSensitiveStringMatcher{prefix: "bar", right: trueMatcher{}}}}},
@@ -415,10 +424,9 @@ func TestStringMatcherFromRegexp(t *testing.T) {
} {
t.Run(c.pattern, func(t *testing.T) {
t.Parallel()
- parsed, err := syntax.Parse(c.pattern, syntax.Perl|syntax.DotNL)
+ matcher, err := NewFastRegexMatcher(c.pattern)
require.NoError(t, err)
- matches := stringMatcherFromRegexp(parsed)
- require.Equal(t, c.exp, matches)
+ require.Equal(t, c.exp, matcher.stringMatcher)
})
}
}
@@ -1389,3 +1397,42 @@ func TestToNormalisedLower(t *testing.T) {
require.Equal(t, expectedOutput, toNormalisedLower(input, nil))
}
}
+
+func TestIsSimpleConcatenationPattern(t *testing.T) {
+ testCases := map[string]bool{
+ ".*-.*-.*-.*-.*": true,
+ ".+-.*-.*-.*-.+": false,
+ "-.*-.*-.*-.*": false,
+ ".*-.*-.*-.*-": false,
+ "-": false,
+ ".*": false,
+ }
+
+ for testCase, expected := range testCases {
+ t.Run(testCase, func(t *testing.T) {
+ re, err := syntax.Parse(testCase, syntax.Perl|syntax.DotNL)
+ require.NoError(t, err)
+ require.Equal(t, expected, isSimpleConcatenationPattern(re))
+ })
+ }
+}
+
+func BenchmarkFastRegexMatcher_ConcatenatedPattern(b *testing.B) {
+ pattern, err := NewFastRegexMatcher(".*-.*-.*-.*-.*")
+ require.NoError(b, err)
+
+ testCases := []string{
+ "a-b-c-d-e",
+ "aaaaaa-bbbbbb-cccccc-dddddd-eeeeee",
+ "aaaaaa----eeeeee",
+ "----",
+ "-a-a-a-",
+ "abcd",
+ }
+
+ for b.Loop() {
+ for _, s := range testCases {
+ pattern.MatchString(s)
+ }
+ }
+}
diff --git a/model/labels/sharding.go b/model/labels/sharding.go
index ed05da675f..6394d0a01e 100644
--- a/model/labels/sharding.go
+++ b/model/labels/sharding.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/model/labels/sharding_dedupelabels.go b/model/labels/sharding_dedupelabels.go
index 5bf41b05d6..11342146a8 100644
--- a/model/labels/sharding_dedupelabels.go
+++ b/model/labels/sharding_dedupelabels.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/model/labels/sharding_stringlabels.go b/model/labels/sharding_stringlabels.go
index 4dcbaa21d1..776a58bb5e 100644
--- a/model/labels/sharding_stringlabels.go
+++ b/model/labels/sharding_stringlabels.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/model/labels/sharding_test.go b/model/labels/sharding_test.go
index 78e3047509..8d094d780e 100644
--- a/model/labels/sharding_test.go
+++ b/model/labels/sharding_test.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/model/labels/test_utils.go b/model/labels/test_utils.go
index 66020799e9..21d1d71296 100644
--- a/model/labels/test_utils.go
+++ b/model/labels/test_utils.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/model/metadata/metadata.go b/model/metadata/metadata.go
index 1b7e63e0f3..d2a91bb560 100644
--- a/model/metadata/metadata.go
+++ b/model/metadata/metadata.go
@@ -1,4 +1,4 @@
-// Copyright 2022 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -13,7 +13,11 @@
package metadata
-import "github.com/prometheus/common/model"
+import (
+ "strings"
+
+ "github.com/prometheus/common/model"
+)
// Metadata stores a series' metadata information.
type Metadata struct {
@@ -21,3 +25,21 @@ type Metadata struct {
Unit string `json:"unit"`
Help string `json:"help"`
}
+
+// IsEmpty returns true if metadata structure is empty, including unknown type case.
+func (m Metadata) IsEmpty() bool {
+ return (m.Type == "" || m.Type == model.MetricTypeUnknown) && m.Unit == "" && m.Help == ""
+}
+
+// Equals returns true if m is semantically the same as other metadata.
+func (m Metadata) Equals(other Metadata) bool {
+ if strings.Compare(m.Unit, other.Unit) != 0 || strings.Compare(m.Help, other.Help) != 0 {
+ return false
+ }
+
+ // Unknown means the same as empty string.
+ if m.Type == "" || m.Type == model.MetricTypeUnknown {
+ return other.Type == "" || other.Type == model.MetricTypeUnknown
+ }
+ return m.Type == other.Type
+}
diff --git a/model/metadata/metadata_test.go b/model/metadata/metadata_test.go
new file mode 100644
index 0000000000..169cd60c2e
--- /dev/null
+++ b/model/metadata/metadata_test.go
@@ -0,0 +1,116 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metadata
+
+import (
+ "testing"
+
+ "github.com/prometheus/common/model"
+ "github.com/stretchr/testify/require"
+)
+
+func TestMetadata_IsEmpty(t *testing.T) {
+ for _, tt := range []struct {
+ name string
+ m Metadata
+ expected bool
+ }{
+ {
+ name: "empty struct", expected: true,
+ },
+ {
+ name: "unknown type with empty fields", expected: true,
+ m: Metadata{Type: model.MetricTypeUnknown},
+ },
+ {
+ name: "type", expected: false,
+ m: Metadata{Type: model.MetricTypeCounter},
+ },
+ {
+ name: "unit", expected: false,
+ m: Metadata{Unit: "seconds"},
+ },
+ {
+ name: "help", expected: false,
+ m: Metadata{Help: "help text"},
+ },
+ {
+ name: "unknown type with help", expected: false,
+ m: Metadata{Type: model.MetricTypeUnknown, Help: "help text"},
+ },
+ } {
+ t.Run(tt.name, func(t *testing.T) {
+ require.Equal(t, tt.expected, tt.m.IsEmpty())
+ })
+ }
+}
+
+func TestMetadata_Equals(t *testing.T) {
+ for _, tt := range []struct {
+ name string
+ m Metadata
+ other Metadata
+ expected bool
+ }{
+ {
+ name: "same empty", expected: true,
+ },
+ {
+ name: "same", expected: true,
+ m: Metadata{Type: model.MetricTypeCounter, Unit: "s", Help: "doc"},
+ other: Metadata{Type: model.MetricTypeCounter, Unit: "s", Help: "doc"},
+ },
+ {
+ name: "same unknown type", expected: true,
+ m: Metadata{Type: model.MetricTypeUnknown, Unit: "s", Help: "doc"},
+ other: Metadata{Type: model.MetricTypeUnknown, Unit: "s", Help: "doc"},
+ },
+ {
+ name: "same mixed unknown type", expected: true,
+ m: Metadata{Type: "", Unit: "s", Help: "doc"},
+ other: Metadata{Type: model.MetricTypeUnknown, Unit: "s", Help: "doc"},
+ },
+ {
+ name: "different unit", expected: false,
+ m: Metadata{Type: model.MetricTypeCounter, Unit: "s", Help: "doc"},
+ other: Metadata{Type: model.MetricTypeCounter, Unit: "bytes", Help: "doc"},
+ },
+ {
+ name: "different help", expected: false,
+ m: Metadata{Type: model.MetricTypeCounter, Unit: "s", Help: "doc"},
+ other: Metadata{Type: model.MetricTypeCounter, Unit: "s", Help: "other doc"},
+ },
+ {
+ name: "different type", expected: false,
+ m: Metadata{Type: model.MetricTypeCounter, Unit: "s", Help: "doc"},
+ other: Metadata{Type: model.MetricTypeGauge, Unit: "s", Help: "doc"},
+ },
+ {
+ name: "different type with unknown", expected: false,
+ m: Metadata{Type: model.MetricTypeUnknown, Unit: "s", Help: "doc"},
+ other: Metadata{Type: model.MetricTypeCounter, Unit: "s", Help: "doc"},
+ },
+ {
+ name: "different type with empty", expected: false,
+ m: Metadata{Type: "", Unit: "s", Help: "doc"},
+ other: Metadata{Type: model.MetricTypeCounter, Unit: "s", Help: "doc"},
+ },
+ } {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := tt.m.Equals(tt.other); got != tt.expected {
+ t.Errorf("Metadata.Equals() = %v, expected %v", got, tt.expected)
+ }
+ })
+ }
+}
diff --git a/model/relabel/relabel.go b/model/relabel/relabel.go
index f7085037fd..6087253d11 100644
--- a/model/relabel/relabel.go
+++ b/model/relabel/relabel.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/model/relabel/relabel_test.go b/model/relabel/relabel_test.go
index 7ce3c86549..a3eb925995 100644
--- a/model/relabel/relabel_test.go
+++ b/model/relabel/relabel_test.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/model/rulefmt/rulefmt.go b/model/rulefmt/rulefmt.go
index 83203ba769..70541eb0d3 100644
--- a/model/rulefmt/rulefmt.go
+++ b/model/rulefmt/rulefmt.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/model/rulefmt/rulefmt_test.go b/model/rulefmt/rulefmt_test.go
index 45fc0f8227..ec16052bc0 100644
--- a/model/rulefmt/rulefmt_test.go
+++ b/model/rulefmt/rulefmt_test.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/model/textparse/benchmark_test.go b/model/textparse/benchmark_test.go
index 510da72c6c..cf63dad260 100644
--- a/model/textparse/benchmark_test.go
+++ b/model/textparse/benchmark_test.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/model/textparse/interface.go b/model/textparse/interface.go
index bbc52290ad..08d9a080a7 100644
--- a/model/textparse/interface.go
+++ b/model/textparse/interface.go
@@ -1,4 +1,4 @@
-// Copyright 2018 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/model/textparse/interface_test.go b/model/textparse/interface_test.go
index 7030544793..d0b6b293a9 100644
--- a/model/textparse/interface_test.go
+++ b/model/textparse/interface_test.go
@@ -1,4 +1,4 @@
-// Copyright 2022 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/model/textparse/nhcbparse.go b/model/textparse/nhcbparse.go
index 79441e1f75..13ce3ca988 100644
--- a/model/textparse/nhcbparse.go
+++ b/model/textparse/nhcbparse.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/model/textparse/nhcbparse_test.go b/model/textparse/nhcbparse_test.go
index 7e2f75ae63..9a27c16ea8 100644
--- a/model/textparse/nhcbparse_test.go
+++ b/model/textparse/nhcbparse_test.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/model/textparse/openmetricsparse.go b/model/textparse/openmetricsparse.go
index 207ceb4573..724c340546 100644
--- a/model/textparse/openmetricsparse.go
+++ b/model/textparse/openmetricsparse.go
@@ -1,4 +1,4 @@
-// Copyright 2018 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/model/textparse/openmetricsparse_test.go b/model/textparse/openmetricsparse_test.go
index f0bbab309e..8f6393cd53 100644
--- a/model/textparse/openmetricsparse_test.go
+++ b/model/textparse/openmetricsparse_test.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/model/textparse/promparse.go b/model/textparse/promparse.go
index 4a75bcd8d8..ada1b29013 100644
--- a/model/textparse/promparse.go
+++ b/model/textparse/promparse.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/model/textparse/promparse_test.go b/model/textparse/promparse_test.go
index 4e9406808f..a398067efe 100644
--- a/model/textparse/promparse_test.go
+++ b/model/textparse/promparse_test.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/model/textparse/protobufparse.go b/model/textparse/protobufparse.go
index a48aa4af69..637ae7b747 100644
--- a/model/textparse/protobufparse.go
+++ b/model/textparse/protobufparse.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/model/textparse/protobufparse_test.go b/model/textparse/protobufparse_test.go
index 6a16258f00..3a4f4abdda 100644
--- a/model/textparse/protobufparse_test.go
+++ b/model/textparse/protobufparse_test.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/model/timestamp/timestamp.go b/model/timestamp/timestamp.go
index 93458f644d..0f27314e57 100644
--- a/model/timestamp/timestamp.go
+++ b/model/timestamp/timestamp.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/model/value/value.go b/model/value/value.go
index 655ce852d5..fe8f50e002 100644
--- a/model/value/value.go
+++ b/model/value/value.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/notifier/alert.go b/notifier/alert.go
index 83e7a97fe0..5e6df2097b 100644
--- a/notifier/alert.go
+++ b/notifier/alert.go
@@ -1,4 +1,4 @@
-// Copyright 2013 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/notifier/alertmanager.go b/notifier/alertmanager.go
index 8bcf7954ec..a9c1e8669f 100644
--- a/notifier/alertmanager.go
+++ b/notifier/alertmanager.go
@@ -1,4 +1,4 @@
-// Copyright 2013 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/notifier/alertmanager_test.go b/notifier/alertmanager_test.go
index ea27f37be7..668271d267 100644
--- a/notifier/alertmanager_test.go
+++ b/notifier/alertmanager_test.go
@@ -1,4 +1,4 @@
-// Copyright 2013 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/notifier/alertmanagerset.go b/notifier/alertmanagerset.go
index b6d1b8c4aa..eca798e6f5 100644
--- a/notifier/alertmanagerset.go
+++ b/notifier/alertmanagerset.go
@@ -1,4 +1,4 @@
-// Copyright 2013 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/notifier/manager.go b/notifier/manager.go
index e37f59a250..a835cccffd 100644
--- a/notifier/manager.go
+++ b/notifier/manager.go
@@ -1,4 +1,4 @@
-// Copyright 2013 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/notifier/manager_test.go b/notifier/manager_test.go
index 64de020338..21ab0b28a1 100644
--- a/notifier/manager_test.go
+++ b/notifier/manager_test.go
@@ -1,4 +1,4 @@
-// Copyright 2013 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/notifier/metric.go b/notifier/metric.go
index 3f4abdda93..d10a02614c 100644
--- a/notifier/metric.go
+++ b/notifier/metric.go
@@ -1,4 +1,4 @@
-// Copyright 2013 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/notifier/util.go b/notifier/util.go
index c21c33a57b..cf9a53eda0 100644
--- a/notifier/util.go
+++ b/notifier/util.go
@@ -1,4 +1,4 @@
-// Copyright 2013 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/notifier/util_test.go b/notifier/util_test.go
index 2c1c7d241b..a9f0509ba1 100644
--- a/notifier/util_test.go
+++ b/notifier/util_test.go
@@ -1,4 +1,4 @@
-// Copyright 2013 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/plugins.yml b/plugins.yml
deleted file mode 100644
index 0541fe4852..0000000000
--- a/plugins.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-- github.com/prometheus/prometheus/discovery/aws
-- github.com/prometheus/prometheus/discovery/azure
-- github.com/prometheus/prometheus/discovery/consul
-- github.com/prometheus/prometheus/discovery/digitalocean
-- github.com/prometheus/prometheus/discovery/dns
-- github.com/prometheus/prometheus/discovery/eureka
-- github.com/prometheus/prometheus/discovery/gce
-- github.com/prometheus/prometheus/discovery/hetzner
-- github.com/prometheus/prometheus/discovery/ionos
-- github.com/prometheus/prometheus/discovery/kubernetes
-- github.com/prometheus/prometheus/discovery/linode
-- github.com/prometheus/prometheus/discovery/marathon
-- github.com/prometheus/prometheus/discovery/moby
-- github.com/prometheus/prometheus/discovery/nomad
-- github.com/prometheus/prometheus/discovery/openstack
-- github.com/prometheus/prometheus/discovery/ovhcloud
-- github.com/prometheus/prometheus/discovery/puppetdb
-- github.com/prometheus/prometheus/discovery/scaleway
-- github.com/prometheus/prometheus/discovery/stackit
-- github.com/prometheus/prometheus/discovery/triton
-- github.com/prometheus/prometheus/discovery/uyuni
-- github.com/prometheus/prometheus/discovery/vultr
-- github.com/prometheus/prometheus/discovery/xds
-- github.com/prometheus/prometheus/discovery/zookeeper
diff --git a/plugins/generate.go b/plugins/generate.go
deleted file mode 100644
index 2c4ba410f2..0000000000
--- a/plugins/generate.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2022 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build plugins
-
-package main
-
-import (
- "fmt"
- "log"
- "os"
- "path"
- "path/filepath"
-
- "go.yaml.in/yaml/v2"
-)
-
-//go:generate go run generate.go
-
-func main() {
- data, err := os.ReadFile(filepath.Join("..", "plugins.yml"))
- if err != nil {
- log.Fatal(err)
- }
-
- var plugins []string
- err = yaml.Unmarshal(data, &plugins)
- if err != nil {
- log.Fatal(err)
- }
-
- f, err := os.Create("plugins.go")
- if err != nil {
- log.Fatal(err)
- }
- defer f.Close()
- _, err = f.WriteString(`// Copyright 2022 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by "make plugins". DO NOT EDIT.
-
-package plugins
-
-`)
- if err != nil {
- log.Fatal(err)
- }
-
- if len(plugins) == 0 {
- return
- }
-
- _, err = f.WriteString("import (\n")
- if err != nil {
- log.Fatal(err)
- }
-
- for _, plugin := range plugins {
- _, err = f.WriteString(fmt.Sprintf("\t// Register %s plugin.\n", path.Base(plugin)))
- if err != nil {
- log.Fatal(err)
- }
- _, err = f.WriteString(fmt.Sprintf("\t_ \"%s\"\n", plugin))
- if err != nil {
- log.Fatal(err)
- }
- }
-
- _, err = f.WriteString(")\n")
- if err != nil {
- log.Fatal(err)
- }
-}
diff --git a/plugins/minimum.go b/plugins/minimum.go
index 8541de922f..9797c2dbe2 100644
--- a/plugins/minimum.go
+++ b/plugins/minimum.go
@@ -1,4 +1,4 @@
-// Copyright 2022 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/plugins/plugin_aws.go b/plugins/plugin_aws.go
new file mode 100644
index 0000000000..711ef38c3e
--- /dev/null
+++ b/plugins/plugin_aws.go
@@ -0,0 +1,20 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !remove_all_sd || enable_aws_sd
+
+package plugins
+
+import (
+ _ "github.com/prometheus/prometheus/discovery/aws" // Register aws plugin.
+)
diff --git a/plugins/plugin_azure.go b/plugins/plugin_azure.go
new file mode 100644
index 0000000000..1f72812b8a
--- /dev/null
+++ b/plugins/plugin_azure.go
@@ -0,0 +1,20 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !remove_all_sd || enable_azure_sd
+
+package plugins
+
+import (
+ _ "github.com/prometheus/prometheus/discovery/azure" // Register azure plugin.
+)
diff --git a/plugins/plugin_consul.go b/plugins/plugin_consul.go
new file mode 100644
index 0000000000..6ff5003041
--- /dev/null
+++ b/plugins/plugin_consul.go
@@ -0,0 +1,20 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !remove_all_sd || enable_consul_sd
+
+package plugins
+
+import (
+ _ "github.com/prometheus/prometheus/discovery/consul" // Register consul plugin.
+)
diff --git a/plugins/plugin_digitalocean.go b/plugins/plugin_digitalocean.go
new file mode 100644
index 0000000000..927180e90b
--- /dev/null
+++ b/plugins/plugin_digitalocean.go
@@ -0,0 +1,20 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !remove_all_sd || enable_digitalocean_sd
+
+package plugins
+
+import (
+ _ "github.com/prometheus/prometheus/discovery/digitalocean" // Register digitalocean plugin.
+)
diff --git a/plugins/plugin_dns.go b/plugins/plugin_dns.go
new file mode 100644
index 0000000000..7bec66371e
--- /dev/null
+++ b/plugins/plugin_dns.go
@@ -0,0 +1,20 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !remove_all_sd || enable_dns_sd
+
+package plugins
+
+import (
+ _ "github.com/prometheus/prometheus/discovery/dns" // Register dns plugin.
+)
diff --git a/plugins/plugin_eureka.go b/plugins/plugin_eureka.go
new file mode 100644
index 0000000000..e4011da02a
--- /dev/null
+++ b/plugins/plugin_eureka.go
@@ -0,0 +1,20 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !remove_all_sd || enable_eureka_sd
+
+package plugins
+
+import (
+ _ "github.com/prometheus/prometheus/discovery/eureka" // Register eureka plugin.
+)
diff --git a/plugins/plugin_gce.go b/plugins/plugin_gce.go
new file mode 100644
index 0000000000..1c67657260
--- /dev/null
+++ b/plugins/plugin_gce.go
@@ -0,0 +1,20 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !remove_all_sd || enable_gce_sd
+
+package plugins
+
+import (
+ _ "github.com/prometheus/prometheus/discovery/gce" // Register gce plugin.
+)
diff --git a/plugins/plugin_hetzner.go b/plugins/plugin_hetzner.go
new file mode 100644
index 0000000000..f6b7db4563
--- /dev/null
+++ b/plugins/plugin_hetzner.go
@@ -0,0 +1,20 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !remove_all_sd || enable_hetzner_sd
+
+package plugins
+
+import (
+ _ "github.com/prometheus/prometheus/discovery/hetzner" // Register hetzner plugin.
+)
diff --git a/plugins/plugin_ionos.go b/plugins/plugin_ionos.go
new file mode 100644
index 0000000000..bf53b73053
--- /dev/null
+++ b/plugins/plugin_ionos.go
@@ -0,0 +1,20 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !remove_all_sd || enable_ionos_sd
+
+package plugins
+
+import (
+ _ "github.com/prometheus/prometheus/discovery/ionos" // Register ionos plugin.
+)
diff --git a/plugins/plugin_kubernetes.go b/plugins/plugin_kubernetes.go
new file mode 100644
index 0000000000..7145cedb2e
--- /dev/null
+++ b/plugins/plugin_kubernetes.go
@@ -0,0 +1,20 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !remove_all_sd || enable_kubernetes_sd
+
+package plugins
+
+import (
+ _ "github.com/prometheus/prometheus/discovery/kubernetes" // Register kubernetes plugin.
+)
diff --git a/plugins/plugin_linode.go b/plugins/plugin_linode.go
new file mode 100644
index 0000000000..4eb24b409c
--- /dev/null
+++ b/plugins/plugin_linode.go
@@ -0,0 +1,20 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !remove_all_sd || enable_linode_sd
+
+package plugins
+
+import (
+ _ "github.com/prometheus/prometheus/discovery/linode" // Register linode plugin.
+)
diff --git a/plugins/plugin_marathon.go b/plugins/plugin_marathon.go
new file mode 100644
index 0000000000..c26219a37a
--- /dev/null
+++ b/plugins/plugin_marathon.go
@@ -0,0 +1,20 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !remove_all_sd || enable_marathon_sd
+
+package plugins
+
+import (
+ _ "github.com/prometheus/prometheus/discovery/marathon" // Register marathon plugin.
+)
diff --git a/plugins/plugin_moby.go b/plugins/plugin_moby.go
new file mode 100644
index 0000000000..2c7c8e158b
--- /dev/null
+++ b/plugins/plugin_moby.go
@@ -0,0 +1,20 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !remove_all_sd || enable_moby_sd
+
+package plugins
+
+import (
+ _ "github.com/prometheus/prometheus/discovery/moby" // Register moby plugin.
+)
diff --git a/plugins/plugin_nomad.go b/plugins/plugin_nomad.go
new file mode 100644
index 0000000000..7251e507a2
--- /dev/null
+++ b/plugins/plugin_nomad.go
@@ -0,0 +1,20 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !remove_all_sd || enable_nomad_sd
+
+package plugins
+
+import (
+ _ "github.com/prometheus/prometheus/discovery/nomad" // Register nomad plugin.
+)
diff --git a/plugins/plugin_openstack.go b/plugins/plugin_openstack.go
new file mode 100644
index 0000000000..0dd227e8ac
--- /dev/null
+++ b/plugins/plugin_openstack.go
@@ -0,0 +1,20 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !remove_all_sd || enable_openstack_sd
+
+package plugins
+
+import (
+ _ "github.com/prometheus/prometheus/discovery/openstack" // Register openstack plugin.
+)
diff --git a/plugins/plugin_ovhcloud.go b/plugins/plugin_ovhcloud.go
new file mode 100644
index 0000000000..e3c372db8c
--- /dev/null
+++ b/plugins/plugin_ovhcloud.go
@@ -0,0 +1,20 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !remove_all_sd || enable_ovhcloud_sd
+
+package plugins
+
+import (
+ _ "github.com/prometheus/prometheus/discovery/ovhcloud" // Register ovhcloud plugin.
+)
diff --git a/plugins/plugin_puppetdb.go b/plugins/plugin_puppetdb.go
new file mode 100644
index 0000000000..33e82b6eac
--- /dev/null
+++ b/plugins/plugin_puppetdb.go
@@ -0,0 +1,20 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !remove_all_sd || enable_puppetdb_sd
+
+package plugins
+
+import (
+ _ "github.com/prometheus/prometheus/discovery/puppetdb" // Register puppetdb plugin.
+)
diff --git a/plugins/plugin_scaleway.go b/plugins/plugin_scaleway.go
new file mode 100644
index 0000000000..88e58ac646
--- /dev/null
+++ b/plugins/plugin_scaleway.go
@@ -0,0 +1,20 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !remove_all_sd || enable_scaleway_sd
+
+package plugins
+
+import (
+ _ "github.com/prometheus/prometheus/discovery/scaleway" // Register scaleway plugin.
+)
diff --git a/plugins/plugin_stackit.go b/plugins/plugin_stackit.go
new file mode 100644
index 0000000000..ac19419c27
--- /dev/null
+++ b/plugins/plugin_stackit.go
@@ -0,0 +1,20 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !remove_all_sd || enable_stackit_sd
+
+package plugins
+
+import (
+ _ "github.com/prometheus/prometheus/discovery/stackit" // Register stackit plugin.
+)
diff --git a/plugins/plugin_triton.go b/plugins/plugin_triton.go
new file mode 100644
index 0000000000..48989df8dd
--- /dev/null
+++ b/plugins/plugin_triton.go
@@ -0,0 +1,20 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !remove_all_sd || enable_triton_sd
+
+package plugins
+
+import (
+ _ "github.com/prometheus/prometheus/discovery/triton" // Register triton plugin.
+)
diff --git a/plugins/plugin_uyuni.go b/plugins/plugin_uyuni.go
new file mode 100644
index 0000000000..09f9ff033d
--- /dev/null
+++ b/plugins/plugin_uyuni.go
@@ -0,0 +1,20 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !remove_all_sd || enable_uyuni_sd
+
+package plugins
+
+import (
+ _ "github.com/prometheus/prometheus/discovery/uyuni" // Register uyuni plugin.
+)
diff --git a/plugins/plugin_vultr.go b/plugins/plugin_vultr.go
new file mode 100644
index 0000000000..5de4747cc7
--- /dev/null
+++ b/plugins/plugin_vultr.go
@@ -0,0 +1,20 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !remove_all_sd || enable_vultr_sd
+
+package plugins
+
+import (
+ _ "github.com/prometheus/prometheus/discovery/vultr" // Register vultr plugin.
+)
diff --git a/plugins/plugin_xds.go b/plugins/plugin_xds.go
new file mode 100644
index 0000000000..e0b0f048d2
--- /dev/null
+++ b/plugins/plugin_xds.go
@@ -0,0 +1,20 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !remove_all_sd || enable_xds_sd
+
+package plugins
+
+import (
+ _ "github.com/prometheus/prometheus/discovery/xds" // Register xds plugin.
+)
diff --git a/plugins/plugin_zookeeper.go b/plugins/plugin_zookeeper.go
new file mode 100644
index 0000000000..0852432920
--- /dev/null
+++ b/plugins/plugin_zookeeper.go
@@ -0,0 +1,20 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !remove_all_sd || enable_zookeeper_sd
+
+package plugins
+
+import (
+ _ "github.com/prometheus/prometheus/discovery/zookeeper" // Register zookeeper plugin.
+)
diff --git a/plugins/plugins.go b/plugins/plugins.go
deleted file mode 100644
index 90b1407281..0000000000
--- a/plugins/plugins.go
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2022 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by "make plugins". DO NOT EDIT.
-
-package plugins
-
-import (
- // Register aws plugin.
- _ "github.com/prometheus/prometheus/discovery/aws"
- // Register azure plugin.
- _ "github.com/prometheus/prometheus/discovery/azure"
- // Register consul plugin.
- _ "github.com/prometheus/prometheus/discovery/consul"
- // Register digitalocean plugin.
- _ "github.com/prometheus/prometheus/discovery/digitalocean"
- // Register dns plugin.
- _ "github.com/prometheus/prometheus/discovery/dns"
- // Register eureka plugin.
- _ "github.com/prometheus/prometheus/discovery/eureka"
- // Register gce plugin.
- _ "github.com/prometheus/prometheus/discovery/gce"
- // Register hetzner plugin.
- _ "github.com/prometheus/prometheus/discovery/hetzner"
- // Register ionos plugin.
- _ "github.com/prometheus/prometheus/discovery/ionos"
- // Register kubernetes plugin.
- _ "github.com/prometheus/prometheus/discovery/kubernetes"
- // Register linode plugin.
- _ "github.com/prometheus/prometheus/discovery/linode"
- // Register marathon plugin.
- _ "github.com/prometheus/prometheus/discovery/marathon"
- // Register moby plugin.
- _ "github.com/prometheus/prometheus/discovery/moby"
- // Register nomad plugin.
- _ "github.com/prometheus/prometheus/discovery/nomad"
- // Register openstack plugin.
- _ "github.com/prometheus/prometheus/discovery/openstack"
- // Register ovhcloud plugin.
- _ "github.com/prometheus/prometheus/discovery/ovhcloud"
- // Register puppetdb plugin.
- _ "github.com/prometheus/prometheus/discovery/puppetdb"
- // Register scaleway plugin.
- _ "github.com/prometheus/prometheus/discovery/scaleway"
- // Register stackit plugin.
- _ "github.com/prometheus/prometheus/discovery/stackit"
- // Register triton plugin.
- _ "github.com/prometheus/prometheus/discovery/triton"
- // Register uyuni plugin.
- _ "github.com/prometheus/prometheus/discovery/uyuni"
- // Register vultr plugin.
- _ "github.com/prometheus/prometheus/discovery/vultr"
- // Register xds plugin.
- _ "github.com/prometheus/prometheus/discovery/xds"
- // Register zookeeper plugin.
- _ "github.com/prometheus/prometheus/discovery/zookeeper"
-)
diff --git a/prompb/codec.go b/prompb/codec.go
index 6cc0cdc861..36490984a0 100644
--- a/prompb/codec.go
+++ b/prompb/codec.go
@@ -1,4 +1,4 @@
-// Copyright 2024 Prometheus Team
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -110,7 +110,7 @@ func (h Histogram) ToFloatHistogram() *histogram.FloatHistogram {
PositiveBuckets: h.GetPositiveCounts(),
NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()),
NegativeBuckets: h.GetNegativeCounts(),
- CustomValues: h.CustomValues,
+ CustomValues: h.CustomValues, // CustomValues are immutable.
}
}
// Conversion from integer histogram.
@@ -125,6 +125,7 @@ func (h Histogram) ToFloatHistogram() *histogram.FloatHistogram {
PositiveBuckets: deltasToCounts(h.GetPositiveDeltas()),
NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()),
NegativeBuckets: deltasToCounts(h.GetNegativeDeltas()),
+ CustomValues: h.CustomValues, // CustomValues are immutable.
}
}
@@ -161,6 +162,7 @@ func FromIntHistogram(timestamp int64, h *histogram.Histogram) Histogram {
PositiveDeltas: h.PositiveBuckets,
ResetHint: Histogram_ResetHint(h.CounterResetHint),
Timestamp: timestamp,
+ CustomValues: h.CustomValues, // CustomValues are immutable.
}
}
@@ -178,6 +180,7 @@ func FromFloatHistogram(timestamp int64, fh *histogram.FloatHistogram) Histogram
PositiveCounts: fh.PositiveBuckets,
ResetHint: Histogram_ResetHint(fh.CounterResetHint),
Timestamp: timestamp,
+ CustomValues: fh.CustomValues, // CustomValues are immutable.
}
}
diff --git a/prompb/custom.go b/prompb/custom.go
index f73ddd446b..65f856a755 100644
--- a/prompb/custom.go
+++ b/prompb/custom.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/prompb/io/prometheus/client/decoder.go b/prompb/io/prometheus/client/decoder.go
index 6bc9600ab6..de7184c4b5 100644
--- a/prompb/io/prometheus/client/decoder.go
+++ b/prompb/io/prometheus/client/decoder.go
@@ -1,4 +1,4 @@
-// Copyright 2025 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/prompb/io/prometheus/client/decoder_test.go b/prompb/io/prometheus/client/decoder_test.go
index b28fe43db9..0b210c7c0f 100644
--- a/prompb/io/prometheus/client/decoder_test.go
+++ b/prompb/io/prometheus/client/decoder_test.go
@@ -1,4 +1,4 @@
-// Copyright 2025 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/prompb/io/prometheus/write/v2/codec.go b/prompb/io/prometheus/write/v2/codec.go
index 71196edb88..ae4d0f635a 100644
--- a/prompb/io/prometheus/write/v2/codec.go
+++ b/prompb/io/prometheus/write/v2/codec.go
@@ -1,4 +1,4 @@
-// Copyright 2024 Prometheus Team
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/prompb/io/prometheus/write/v2/custom.go b/prompb/io/prometheus/write/v2/custom.go
index 5721aec532..4063cf32ed 100644
--- a/prompb/io/prometheus/write/v2/custom.go
+++ b/prompb/io/prometheus/write/v2/custom.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/prompb/io/prometheus/write/v2/custom_test.go b/prompb/io/prometheus/write/v2/custom_test.go
index 139cbfb225..30715477cb 100644
--- a/prompb/io/prometheus/write/v2/custom_test.go
+++ b/prompb/io/prometheus/write/v2/custom_test.go
@@ -1,4 +1,4 @@
-// Copyright 2023 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/prompb/io/prometheus/write/v2/symbols.go b/prompb/io/prometheus/write/v2/symbols.go
index 7c7feca239..292801a185 100644
--- a/prompb/io/prometheus/write/v2/symbols.go
+++ b/prompb/io/prometheus/write/v2/symbols.go
@@ -1,4 +1,4 @@
-// Copyright 2024 Prometheus Team
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/prompb/io/prometheus/write/v2/symbols_test.go b/prompb/io/prometheus/write/v2/symbols_test.go
index 7e7c7cb0bd..d0f335665a 100644
--- a/prompb/io/prometheus/write/v2/symbols_test.go
+++ b/prompb/io/prometheus/write/v2/symbols_test.go
@@ -1,4 +1,4 @@
-// Copyright 2024 Prometheus Team
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/prompb/io/prometheus/write/v2/types_test.go b/prompb/io/prometheus/write/v2/types_test.go
index 5b7622fc2f..12528943a1 100644
--- a/prompb/io/prometheus/write/v2/types_test.go
+++ b/prompb/io/prometheus/write/v2/types_test.go
@@ -1,4 +1,4 @@
-// Copyright 2024 Prometheus Team
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/prompb/rwcommon/codec_test.go b/prompb/rwcommon/codec_test.go
index 73a8196fa8..ee92581f59 100644
--- a/prompb/rwcommon/codec_test.go
+++ b/prompb/rwcommon/codec_test.go
@@ -1,4 +1,4 @@
-// Copyright 2024 Prometheus Team
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -198,17 +198,14 @@ func testFloatHistogram() histogram.FloatHistogram {
func TestFromIntToFloatOrIntHistogram(t *testing.T) {
t.Run("v1", func(t *testing.T) {
- // v1 does not support nhcb.
- testIntHistWithoutNHCB := testIntHistogram()
- testIntHistWithoutNHCB.CustomValues = nil
- testFloatHistWithoutNHCB := testFloatHistogram()
- testFloatHistWithoutNHCB.CustomValues = nil
+ testIntHist := testIntHistogram()
+ testFloatHist := testFloatHistogram()
- h := prompb.FromIntHistogram(123, &testIntHistWithoutNHCB)
+ h := prompb.FromIntHistogram(123, &testIntHist)
require.False(t, h.IsFloatHistogram())
require.Equal(t, int64(123), h.Timestamp)
- require.Equal(t, testIntHistWithoutNHCB, *h.ToIntHistogram())
- require.Equal(t, testFloatHistWithoutNHCB, *h.ToFloatHistogram())
+ require.Equal(t, testIntHist, *h.ToIntHistogram())
+ require.Equal(t, testFloatHist, *h.ToFloatHistogram())
})
t.Run("v2", func(t *testing.T) {
testIntHist := testIntHistogram()
@@ -224,15 +221,13 @@ func TestFromIntToFloatOrIntHistogram(t *testing.T) {
func TestFromFloatToFloatHistogram(t *testing.T) {
t.Run("v1", func(t *testing.T) {
- // v1 does not support nhcb.
- testFloatHistWithoutNHCB := testFloatHistogram()
- testFloatHistWithoutNHCB.CustomValues = nil
+ testFloatHist := testFloatHistogram()
- h := prompb.FromFloatHistogram(123, &testFloatHistWithoutNHCB)
+ h := prompb.FromFloatHistogram(123, &testFloatHist)
require.True(t, h.IsFloatHistogram())
require.Equal(t, int64(123), h.Timestamp)
require.Nil(t, h.ToIntHistogram())
- require.Equal(t, testFloatHistWithoutNHCB, *h.ToFloatHistogram())
+ require.Equal(t, testFloatHist, *h.ToFloatHistogram())
})
t.Run("v2", func(t *testing.T) {
testFloatHist := testFloatHistogram()
diff --git a/promql/bench_test.go b/promql/bench_test.go
index 37c8311305..f647b03600 100644
--- a/promql/bench_test.go
+++ b/promql/bench_test.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/promql/durations.go b/promql/durations.go
index c882adfbb6..c660dbf464 100644
--- a/promql/durations.go
+++ b/promql/durations.go
@@ -1,4 +1,4 @@
-// Copyright 2025 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -28,7 +28,8 @@ import (
// in OriginalOffsetExpr representing (1h / 2). This visitor evaluates
// such duration expression, setting OriginalOffset to 30m.
type durationVisitor struct {
- step time.Duration
+ step time.Duration
+ queryRange time.Duration
}
// Visit finds any duration expressions in AST Nodes and modifies the Node to
@@ -121,6 +122,8 @@ func (v *durationVisitor) evaluateDurationExpr(expr parser.Expr) (float64, error
switch n.Op {
case parser.STEP:
return float64(v.step.Seconds()), nil
+ case parser.RANGE:
+ return float64(v.queryRange.Seconds()), nil
case parser.MIN:
return math.Min(lhs, rhs), nil
case parser.MAX:
diff --git a/promql/durations_test.go b/promql/durations_test.go
index 18592a0d0a..e9759af0dd 100644
--- a/promql/durations_test.go
+++ b/promql/durations_test.go
@@ -1,4 +1,4 @@
-// Copyright 2025 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -213,6 +213,37 @@ func TestCalculateDuration(t *testing.T) {
},
expected: 3 * time.Second,
},
+ {
+ name: "range",
+ expr: &parser.DurationExpr{
+ Op: parser.RANGE,
+ },
+ expected: 5 * time.Minute,
+ },
+ {
+ name: "range division",
+ expr: &parser.DurationExpr{
+ LHS: &parser.DurationExpr{
+ Op: parser.RANGE,
+ },
+ RHS: &parser.NumberLiteral{Val: 2},
+ Op: parser.DIV,
+ },
+ expected: 150 * time.Second,
+ },
+ {
+ name: "max of step and range",
+ expr: &parser.DurationExpr{
+ LHS: &parser.DurationExpr{
+ Op: parser.STEP,
+ },
+ RHS: &parser.DurationExpr{
+ Op: parser.RANGE,
+ },
+ Op: parser.MAX,
+ },
+ expected: 5 * time.Minute,
+ },
{
name: "division by zero",
expr: &parser.DurationExpr{
@@ -243,7 +274,7 @@ func TestCalculateDuration(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- v := &durationVisitor{step: 1 * time.Second}
+ v := &durationVisitor{step: 1 * time.Second, queryRange: 5 * time.Minute}
result, err := v.calculateDuration(tt.expr, tt.allowedNegative)
if tt.errorMessage != "" {
require.Error(t, err)
diff --git a/promql/engine.go b/promql/engine.go
index a5b66052f3..11a7ad22ec 100644
--- a/promql/engine.go
+++ b/promql/engine.go
@@ -1,4 +1,4 @@
-// Copyright 2013 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -49,6 +49,7 @@ import (
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/util/annotations"
+ "github.com/prometheus/prometheus/util/features"
"github.com/prometheus/prometheus/util/logging"
"github.com/prometheus/prometheus/util/stats"
"github.com/prometheus/prometheus/util/zeropool"
@@ -76,15 +77,19 @@ const (
)
type engineMetrics struct {
- currentQueries prometheus.Gauge
- maxConcurrentQueries prometheus.Gauge
- queryLogEnabled prometheus.Gauge
- queryLogFailures prometheus.Counter
- queryQueueTime prometheus.Observer
- queryPrepareTime prometheus.Observer
- queryInnerEval prometheus.Observer
- queryResultSort prometheus.Observer
- querySamples prometheus.Counter
+ currentQueries prometheus.Gauge
+ maxConcurrentQueries prometheus.Gauge
+ queryLogEnabled prometheus.Gauge
+ queryLogFailures prometheus.Counter
+ queryQueueTime prometheus.Observer
+ queryQueueTimeHistogram prometheus.Observer
+ queryPrepareTime prometheus.Observer
+ queryPrepareTimeHistogram prometheus.Observer
+ queryInnerEval prometheus.Observer
+ queryInnerEvalHistogram prometheus.Observer
+ queryResultSort prometheus.Observer
+ queryResultSortHistogram prometheus.Observer
+ querySamples prometheus.Counter
}
type (
@@ -326,6 +331,9 @@ type EngineOpts struct {
EnableDelayedNameRemoval bool
// EnableTypeAndUnitLabels will allow PromQL Engine to make decisions based on the type and unit labels.
EnableTypeAndUnitLabels bool
+
+ // FeatureRegistry is the registry for tracking enabled/disabled features.
+ FeatureRegistry features.Collector
}
// Engine handles the lifetime of queries from beginning to end.
@@ -363,6 +371,19 @@ func NewEngine(opts EngineOpts) *Engine {
[]string{"slice"},
)
+ queryResultHistogram := prometheus.NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: namespace,
+ Subsystem: subsystem,
+ Name: "query_duration_histogram_seconds",
+ Help: "The duration of various parts of PromQL query execution.",
+ Buckets: []float64{.01, .1, 1, 10},
+ NativeHistogramBucketFactor: 1.1,
+ NativeHistogramMaxBucketNumber: 100,
+ NativeHistogramMinResetDuration: 1 * time.Hour,
+ },
+ []string{"slice"},
+ )
+
metrics := &engineMetrics{
currentQueries: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
@@ -394,10 +415,14 @@ func NewEngine(opts EngineOpts) *Engine {
Name: "query_samples_total",
Help: "The total number of samples loaded by all queries.",
}),
- queryQueueTime: queryResultSummary.WithLabelValues("queue_time"),
- queryPrepareTime: queryResultSummary.WithLabelValues("prepare_time"),
- queryInnerEval: queryResultSummary.WithLabelValues("inner_eval"),
- queryResultSort: queryResultSummary.WithLabelValues("result_sort"),
+ queryQueueTime: queryResultSummary.WithLabelValues("queue_time"),
+ queryQueueTimeHistogram: queryResultHistogram.WithLabelValues("queue_time"),
+ queryPrepareTime: queryResultSummary.WithLabelValues("prepare_time"),
+ queryPrepareTimeHistogram: queryResultHistogram.WithLabelValues("prepare_time"),
+ queryInnerEval: queryResultSummary.WithLabelValues("inner_eval"),
+ queryInnerEvalHistogram: queryResultHistogram.WithLabelValues("inner_eval"),
+ queryResultSort: queryResultSummary.WithLabelValues("result_sort"),
+ queryResultSortHistogram: queryResultHistogram.WithLabelValues("result_sort"),
}
if t := opts.ActiveQueryTracker; t != nil {
@@ -421,9 +446,22 @@ func NewEngine(opts EngineOpts) *Engine {
metrics.queryLogFailures,
metrics.querySamples,
queryResultSummary,
+ queryResultHistogram,
)
}
+ if r := opts.FeatureRegistry; r != nil {
+ r.Set(features.PromQL, "at_modifier", opts.EnableAtModifier)
+ r.Set(features.PromQL, "negative_offset", opts.EnableNegativeOffset)
+ r.Set(features.PromQL, "per_step_stats", opts.EnablePerStepStats)
+ r.Set(features.PromQL, "delayed_name_removal", opts.EnableDelayedNameRemoval)
+ r.Set(features.PromQL, "type_and_unit_labels", opts.EnableTypeAndUnitLabels)
+ r.Enable(features.PromQL, "per_query_lookback_delta")
+ r.Enable(features.PromQL, "subqueries")
+
+ parser.RegisterFeatures(r)
+ }
+
return &Engine{
timeout: opts.Timeout,
logger: opts.Logger,
@@ -701,7 +739,7 @@ func (ng *Engine) queueActive(ctx context.Context, q *query) (func(), error) {
if ng.activeQueryTracker == nil {
return func() {}, nil
}
- queueSpanTimer, _ := q.stats.GetSpanTimer(ctx, stats.ExecQueueTime, ng.metrics.queryQueueTime)
+ queueSpanTimer, _ := q.stats.GetSpanTimer(ctx, stats.ExecQueueTime, ng.metrics.queryQueueTime, ng.metrics.queryQueueTimeHistogram)
queryIndex, err := ng.activeQueryTracker.Insert(ctx, q.q)
queueSpanTimer.Finish()
return func() { ng.activeQueryTracker.Delete(queryIndex) }, err
@@ -717,7 +755,7 @@ func durationMilliseconds(d time.Duration) int64 {
// execEvalStmt evaluates the expression of an evaluation statement for the given time range.
func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.EvalStmt) (parser.Value, annotations.Annotations, error) {
- prepareSpanTimer, ctxPrepare := query.stats.GetSpanTimer(ctx, stats.QueryPreparationTime, ng.metrics.queryPrepareTime)
+ prepareSpanTimer, ctxPrepare := query.stats.GetSpanTimer(ctx, stats.QueryPreparationTime, ng.metrics.queryPrepareTime, ng.metrics.queryPrepareTimeHistogram)
mint, maxt := FindMinMaxTime(s)
querier, err := query.queryable.Querier(mint, maxt)
if err != nil {
@@ -732,7 +770,7 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval
// Modify the offset of vector and matrix selectors for the @ modifier
// w.r.t. the start time since only 1 evaluation will be done on them.
setOffsetForAtModifier(timeMilliseconds(s.Start), s.Expr)
- evalSpanTimer, ctxInnerEval := query.stats.GetSpanTimer(ctx, stats.InnerEvalTime, ng.metrics.queryInnerEval)
+ evalSpanTimer, ctxInnerEval := query.stats.GetSpanTimer(ctx, stats.InnerEvalTime, ng.metrics.queryInnerEval, ng.metrics.queryInnerEvalHistogram)
// Instant evaluation. This is executed as a range evaluation with one step.
if s.Start.Equal(s.End) && s.Interval == 0 {
start := timeMilliseconds(s.Start)
@@ -835,7 +873,7 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval
}
func (ng *Engine) sortMatrixResult(ctx context.Context, query *query, mat Matrix) {
- sortSpanTimer, _ := query.stats.GetSpanTimer(ctx, stats.ResultSortTime, ng.metrics.queryResultSort)
+ sortSpanTimer, _ := query.stats.GetSpanTimer(ctx, stats.ResultSortTime, ng.metrics.queryResultSort, ng.metrics.queryResultSortHistogram)
sort.Sort(mat)
sortSpanTimer.Finish()
}
@@ -1137,7 +1175,7 @@ func (ev *evaluator) Eval(ctx context.Context, expr parser.Expr) (v parser.Value
v, ws = ev.eval(ctx, expr)
if ev.enableDelayedNameRemoval {
- ev.cleanupMetricLabels(v)
+ v = ev.cleanupMetricLabels(v)
}
return v, ws, nil
}
@@ -2153,8 +2191,8 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value,
mat[i].Histograms[j].H = mat[i].Histograms[j].H.Copy().Mul(-1)
}
}
- if !ev.enableDelayedNameRemoval && mat.ContainsSameLabelset() {
- ev.errorf("vector cannot contain metrics with the same labelset")
+ if !ev.enableDelayedNameRemoval {
+ mat = ev.mergeSeriesWithSameLabelset(mat)
}
}
return mat, ws
@@ -2898,17 +2936,15 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
if info != nil {
lastErr = info
}
- switch {
- case returnBool:
+ if returnBool {
histogramValue = nil
if keep {
floatValue = 1.0
} else {
floatValue = 0.0
}
- case !keep:
- continue
}
+
metric := resultMetric(ls.Metric, rs.Metric, op, matching, enh)
if !ev.enableDelayedNameRemoval && returnBool {
metric = metric.DropReserved(schema.IsMetadataLabel)
@@ -2934,6 +2970,10 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
insertedSigs[insertSig] = struct{}{}
}
+ if !keep && !returnBool {
+ continue
+ }
+
enh.Out = append(enh.Out, Sample{
Metric: metric,
F: floatValue,
@@ -3792,7 +3832,7 @@ func (*evaluator) aggregationCountValues(e *parser.AggregateExpr, grouping []str
return enh.Out, nil
}
-func (ev *evaluator) cleanupMetricLabels(v parser.Value) {
+func (ev *evaluator) cleanupMetricLabels(v parser.Value) parser.Value {
if v.Type() == parser.ValueTypeMatrix {
mat := v.(Matrix)
for i := range mat {
@@ -3800,9 +3840,7 @@ func (ev *evaluator) cleanupMetricLabels(v parser.Value) {
mat[i].Metric = mat[i].Metric.DropReserved(schema.IsMetadataLabel)
}
}
- if mat.ContainsSameLabelset() {
- ev.errorf("vector cannot contain metrics with the same labelset")
- }
+ return ev.mergeSeriesWithSameLabelset(mat)
} else if v.Type() == parser.ValueTypeVector {
vec := v.(Vector)
for i := range vec {
@@ -3813,7 +3851,75 @@ func (ev *evaluator) cleanupMetricLabels(v parser.Value) {
if vec.ContainsSameLabelset() {
ev.errorf("vector cannot contain metrics with the same labelset")
}
+ return vec
}
+ return v
+}
+
+// mergeSeriesWithSameLabelset merges series in a matrix that have the same labelset
+// after __name__ label removal. This happens when delayed name removal is enabled and
+// operations like OR combine series that originally had different names but end up
+// with the same labelset after dropping the name. If series with the same labelset
+// have overlapping timestamps, an error is returned.
+func (ev *evaluator) mergeSeriesWithSameLabelset(mat Matrix) Matrix {
+ if len(mat) <= 1 {
+ return mat
+ }
+
+ // Fast path: check if there are any duplicate labelsets without allocating.
+ // This is the common case and we want to avoid allocations.
+ if !mat.ContainsSameLabelset() {
+ return mat
+ }
+
+ // Slow path: there are duplicates, so we need to merge series with non-overlapping timestamps.
+ // Group series by their labelset hash.
+ seriesByHash := make(map[uint64][]int)
+ for i := range mat {
+ hash := mat[i].Metric.Hash()
+ seriesByHash[hash] = append(seriesByHash[hash], i)
+ }
+
+ // Merge series with the same labelset.
+ merged := make(Matrix, 0, len(seriesByHash))
+ for _, indices := range seriesByHash {
+ if len(indices) == 1 {
+ // No collision, add as-is.
+ merged = append(merged, mat[indices[0]])
+ continue
+ }
+
+ // Multiple series with the same labelset - merge all samples.
+ base := mat[indices[0]]
+ for _, idx := range indices[1:] {
+ base.Floats = append(base.Floats, mat[idx].Floats...)
+ base.Histograms = append(base.Histograms, mat[idx].Histograms...)
+ }
+
+ // Sort merged samples by timestamp.
+ sort.Slice(base.Floats, func(i, j int) bool {
+ return base.Floats[i].T < base.Floats[j].T
+ })
+ sort.Slice(base.Histograms, func(i, j int) bool {
+ return base.Histograms[i].T < base.Histograms[j].T
+ })
+
+ // Check for duplicate timestamps in sorted samples.
+ for i := 1; i < len(base.Floats); i++ {
+ if base.Floats[i].T == base.Floats[i-1].T {
+ ev.errorf("vector cannot contain metrics with the same labelset")
+ }
+ }
+ for i := 1; i < len(base.Histograms); i++ {
+ if base.Histograms[i].T == base.Histograms[i-1].T {
+ ev.errorf("vector cannot contain metrics with the same labelset")
+ }
+ }
+
+ merged = append(merged, base)
+ }
+
+ return merged
}
func addToSeries(ss *Series, ts int64, f float64, h *histogram.FloatHistogram, numSteps int) {
@@ -3951,7 +4057,7 @@ func unwrapStepInvariantExpr(e parser.Expr) parser.Expr {
func PreprocessExpr(expr parser.Expr, start, end time.Time, step time.Duration) (parser.Expr, error) {
detectHistogramStatsDecoding(expr)
- if err := parser.Walk(&durationVisitor{step: step}, expr, nil); err != nil {
+ if err := parser.Walk(&durationVisitor{step: step, queryRange: end.Sub(start)}, expr, nil); err != nil {
return nil, err
}
diff --git a/promql/engine_internal_test.go b/promql/engine_internal_test.go
index 4c5d532cbc..f040f53e61 100644
--- a/promql/engine_internal_test.go
+++ b/promql/engine_internal_test.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/promql/engine_test.go b/promql/engine_test.go
index 80bb75c945..7b7a67a54b 100644
--- a/promql/engine_test.go
+++ b/promql/engine_test.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -3946,6 +3946,41 @@ eval instant at 1m histogram_fraction(-Inf, 0.7071067811865475, histogram_nan)
{case="100% NaNs"} 0.0
{case="20% NaNs"} 0.4
+# Test unary negation with non-overlapping series that have different metric names.
+# After negation, the __name__ label is dropped, so series with different names
+# but same other labels should merge if they don't overlap in time.
+clear
+load 20m
+ http_requests{job="api"} 2 _
+ http_errors{job="api"} _ 4
+
+eval instant at 0 -{job="api"}
+ {job="api"} -2
+
+eval instant at 20m -{job="api"}
+ {job="api"} -4
+
+eval range from 0 to 20m step 20m -{job="api"}
+ {job="api"} -2 -4
+
+# Test unary negation failure with overlapping timestamps (same labelset at same time).
+clear
+load 1m
+ http_requests{job="api"} 1
+ http_errors{job="api"} 2
+
+eval_fail instant at 0 -{job="api"}
+
+# Test unary negation with "or" operator combining metrics with removed names.
+clear
+load 10m
+ metric_a 1 _
+ metric_b 3 4
+
+# Use "-" unary operator as a simple way to remove the metric name.
+eval range from 0 to 20m step 10m -metric_a or -metric_b
+ {} -1 -4
+
`, engine)
}
diff --git a/promql/functions.go b/promql/functions.go
index 3d85719895..9c04392232 100644
--- a/promql/functions.go
+++ b/promql/functions.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -200,9 +200,8 @@ func extrapolatedRate(vals Matrix, args parser.Expressions, enh *EvalNodeHelper,
// We need either at least two Histograms and no Floats, or at least two
// Floats and no Histograms to calculate a rate. Otherwise, drop this
// Vector element.
- metricName := samples.Metric.Get(labels.MetricName)
if len(samples.Histograms) > 0 && len(samples.Floats) > 0 {
- return enh.Out, annos.Add(annotations.NewMixedFloatsHistogramsWarning(metricName, args[0].PositionRange()))
+ return enh.Out, annos.Add(annotations.NewMixedFloatsHistogramsWarning(getMetricName(samples.Metric), args[0].PositionRange()))
}
switch {
@@ -211,7 +210,7 @@ func extrapolatedRate(vals Matrix, args parser.Expressions, enh *EvalNodeHelper,
firstT = samples.Histograms[0].T
lastT = samples.Histograms[numSamplesMinusOne].T
var newAnnos annotations.Annotations
- resultHistogram, newAnnos = histogramRate(samples.Histograms, isCounter, metricName, args[0].PositionRange())
+ resultHistogram, newAnnos = histogramRate(samples.Histograms, isCounter, samples.Metric, args[0].PositionRange())
annos.Merge(newAnnos)
if resultHistogram == nil {
// The histograms are not compatible with each other.
@@ -305,7 +304,7 @@ func extrapolatedRate(vals Matrix, args parser.Expressions, enh *EvalNodeHelper,
// points[0] to be a histogram. It returns nil if any other Point in points is
// not a histogram, and a warning wrapped in an annotation in that case.
// Otherwise, it returns the calculated histogram and an empty annotation.
-func histogramRate(points []HPoint, isCounter bool, metricName string, pos posrange.PositionRange) (*histogram.FloatHistogram, annotations.Annotations) {
+func histogramRate(points []HPoint, isCounter bool, labels labels.Labels, pos posrange.PositionRange) (*histogram.FloatHistogram, annotations.Annotations) {
var (
prev = points[0].H
usingCustomBuckets = prev.UsesCustomBuckets()
@@ -314,14 +313,14 @@ func histogramRate(points []HPoint, isCounter bool, metricName string, pos posra
)
if last == nil {
- return nil, annos.Add(annotations.NewMixedFloatsHistogramsWarning(metricName, pos))
+ return nil, annos.Add(annotations.NewMixedFloatsHistogramsWarning(getMetricName(labels), pos))
}
// We check for gauge type histograms in the loop below, but the loop
// below does not run on the first and last point, so check the first
// and last point now.
if isCounter && (prev.CounterResetHint == histogram.GaugeType || last.CounterResetHint == histogram.GaugeType) {
- annos.Add(annotations.NewNativeHistogramNotCounterWarning(metricName, pos))
+ annos.Add(annotations.NewNativeHistogramNotCounterWarning(getMetricName(labels), pos))
}
// Null out the 1st sample if there is a counter reset between the 1st
@@ -338,7 +337,7 @@ func histogramRate(points []HPoint, isCounter bool, metricName string, pos posra
}
if last.UsesCustomBuckets() != usingCustomBuckets {
- return nil, annos.Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos))
+ return nil, annos.Add(annotations.NewMixedExponentialCustomHistogramsWarning(getMetricName(labels), pos))
}
// First iteration to find out two things:
@@ -348,19 +347,19 @@ func histogramRate(points []HPoint, isCounter bool, metricName string, pos posra
for _, currPoint := range points[1 : len(points)-1] {
curr := currPoint.H
if curr == nil {
- return nil, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, pos))
+ return nil, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(getMetricName(labels), pos))
}
if !isCounter {
continue
}
if curr.CounterResetHint == histogram.GaugeType {
- annos.Add(annotations.NewNativeHistogramNotCounterWarning(metricName, pos))
+ annos.Add(annotations.NewNativeHistogramNotCounterWarning(getMetricName(labels), pos))
}
if curr.Schema < minSchema {
minSchema = curr.Schema
}
if curr.UsesCustomBuckets() != usingCustomBuckets {
- return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos))
+ return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(getMetricName(labels), pos))
}
}
@@ -371,7 +370,7 @@ func histogramRate(points []HPoint, isCounter bool, metricName string, pos posra
_, _, nhcbBoundsReconciled, err := h.Sub(prev)
if err != nil {
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
- return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos))
+ return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(getMetricName(labels), pos))
}
}
if nhcbBoundsReconciled {
@@ -387,7 +386,7 @@ func histogramRate(points []HPoint, isCounter bool, metricName string, pos posra
_, _, nhcbBoundsReconciled, err := h.Add(prev)
if err != nil {
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
- return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos))
+ return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(getMetricName(labels), pos))
}
}
if nhcbBoundsReconciled {
@@ -397,9 +396,10 @@ func histogramRate(points []HPoint, isCounter bool, metricName string, pos posra
prev = curr
}
} else if points[0].H.CounterResetHint != histogram.GaugeType || points[len(points)-1].H.CounterResetHint != histogram.GaugeType {
- annos.Add(annotations.NewNativeHistogramNotGaugeWarning(metricName, pos))
+ annos.Add(annotations.NewNativeHistogramNotGaugeWarning(getMetricName(labels), pos))
}
+ h.CounterResetHint = histogram.GaugeType
return h.Compact(0), annos
}
@@ -430,10 +430,9 @@ func funcIdelta(_ []Vector, matrixVals Matrix, args parser.Expressions, enh *Eva
func instantValue(vals Matrix, args parser.Expressions, out Vector, isRate bool) (Vector, annotations.Annotations) {
var (
- samples = vals[0]
- metricName = samples.Metric.Get(labels.MetricName)
- ss = make([]Sample, 0, 2)
- annos annotations.Annotations
+ samples = vals[0]
+ ss = make([]Sample, 0, 2)
+ annos annotations.Annotations
)
// No sense in trying to compute a rate without at least two points. Drop
@@ -499,11 +498,11 @@ func instantValue(vals Matrix, args parser.Expressions, out Vector, isRate bool)
resultSample.H = ss[1].H.Copy()
// irate should only be applied to counters.
if isRate && (ss[1].H.CounterResetHint == histogram.GaugeType || ss[0].H.CounterResetHint == histogram.GaugeType) {
- annos.Add(annotations.NewNativeHistogramNotCounterWarning(metricName, args.PositionRange()))
+ annos.Add(annotations.NewNativeHistogramNotCounterWarning(getMetricName(samples.Metric), args.PositionRange()))
}
// idelta should only be applied to gauges.
if !isRate && (ss[1].H.CounterResetHint != histogram.GaugeType || ss[0].H.CounterResetHint != histogram.GaugeType) {
- annos.Add(annotations.NewNativeHistogramNotGaugeWarning(metricName, args.PositionRange()))
+ annos.Add(annotations.NewNativeHistogramNotGaugeWarning(getMetricName(samples.Metric), args.PositionRange()))
}
if !isRate || !ss[1].H.DetectReset(ss[0].H) {
// This subtraction may deliberately include conflicting
@@ -512,7 +511,7 @@ func instantValue(vals Matrix, args parser.Expressions, out Vector, isRate bool)
// conflicting counter resets is ignored here.
_, _, nhcbBoundsReconciled, err := resultSample.H.Sub(ss[0].H)
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
- return out, annos.Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, args.PositionRange()))
+ return out, annos.Add(annotations.NewMixedExponentialCustomHistogramsWarning(getMetricName(samples.Metric), args.PositionRange()))
}
if nhcbBoundsReconciled {
annos.Add(annotations.NewMismatchedCustomBucketsHistogramsInfo(args.PositionRange(), annotations.HistogramSub))
@@ -522,7 +521,7 @@ func instantValue(vals Matrix, args parser.Expressions, out Vector, isRate bool)
resultSample.H.Compact(0)
default:
// Mix of a float and a histogram.
- return out, annos.Add(annotations.NewMixedFloatsHistogramsWarning(metricName, args.PositionRange()))
+ return out, annos.Add(annotations.NewMixedFloatsHistogramsWarning(getMetricName(samples.Metric), args.PositionRange()))
}
if isRate {
@@ -564,7 +563,6 @@ func calcTrendValue(i int, tf, s0, s1, b float64) float64 {
// https://en.wikipedia.org/wiki/Exponential_smoothing .
func funcDoubleExponentialSmoothing(vectorVals []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
samples := matrixVal[0]
- metricName := samples.Metric.Get(labels.MetricName)
// The smoothing factor argument.
sf := vectorVals[0][0].F
@@ -585,7 +583,7 @@ func funcDoubleExponentialSmoothing(vectorVals []Vector, matrixVal Matrix, args
if l < 2 {
// Annotate mix of float and histogram.
if l == 1 && len(samples.Histograms) > 0 {
- return enh.Out, annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange()))
+ return enh.Out, annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(getMetricName(samples.Metric), args[0].PositionRange()))
}
return enh.Out, nil
}
@@ -608,7 +606,7 @@ func funcDoubleExponentialSmoothing(vectorVals []Vector, matrixVal Matrix, args
s0, s1 = s1, x+y
}
if len(samples.Histograms) > 0 {
- return append(enh.Out, Sample{F: s1}), annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange()))
+ return append(enh.Out, Sample{F: s1}), annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(getMetricName(samples.Metric), args[0].PositionRange()))
}
return append(enh.Out, Sample{F: s1}), nil
}
@@ -794,8 +792,7 @@ func aggrHistOverTime(matrixVal Matrix, enh *EvalNodeHelper, aggrFn func(Series)
func funcAvgOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
firstSeries := matrixVal[0]
if len(firstSeries.Floats) > 0 && len(firstSeries.Histograms) > 0 {
- metricName := firstSeries.Metric.Get(labels.MetricName)
- return enh.Out, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, args[0].PositionRange()))
+ return enh.Out, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(getMetricName(firstSeries.Metric), args[0].PositionRange()))
}
// For the average calculation of histograms, we use incremental mean
// calculation without the help of Kahan summation (but this should
@@ -870,9 +867,8 @@ func funcAvgOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh
return mean, nil
})
if err != nil {
- metricName := firstSeries.Metric.Get(labels.MetricName)
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
- return enh.Out, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, args[0].PositionRange()))
+ return enh.Out, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(getMetricName(firstSeries.Metric), args[0].PositionRange()))
}
}
return vec, annos
@@ -979,8 +975,7 @@ func funcMadOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh
return enh.Out, nil
}
if len(samples.Histograms) > 0 {
- metricName := samples.Metric.Get(labels.MetricName)
- annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange()))
+ annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(getMetricName(samples.Metric), args[0].PositionRange()))
}
return aggrOverTime(matrixVal, enh, func(s Series) float64 {
values := make(vectorByValueHeap, 0, len(s.Floats))
@@ -1058,8 +1053,7 @@ func compareOverTime(matrixVal Matrix, args parser.Expressions, enh *EvalNodeHel
return enh.Out, nil
}
if len(samples.Histograms) > 0 {
- metricName := samples.Metric.Get(labels.MetricName)
- annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange()))
+ annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(getMetricName(samples.Metric), args[0].PositionRange()))
}
return aggrOverTime(matrixVal, enh, func(s Series) float64 {
maxVal := s.Floats[0].F
@@ -1095,8 +1089,7 @@ func funcMinOverTime(_ []Vector, matrixVals Matrix, args parser.Expressions, enh
func funcSumOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
firstSeries := matrixVal[0]
if len(firstSeries.Floats) > 0 && len(firstSeries.Histograms) > 0 {
- metricName := firstSeries.Metric.Get(labels.MetricName)
- return enh.Out, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, args[0].PositionRange()))
+ return enh.Out, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(getMetricName(firstSeries.Metric), args[0].PositionRange()))
}
if len(firstSeries.Floats) == 0 {
// The passed values only contain histograms.
@@ -1137,9 +1130,8 @@ func funcSumOverTime(_ []Vector, matrixVal Matrix, args parser.Expressions, enh
return sum, nil
})
if err != nil {
- metricName := firstSeries.Metric.Get(labels.MetricName)
if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) {
- return enh.Out, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, args[0].PositionRange()))
+ return enh.Out, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(getMetricName(firstSeries.Metric), args[0].PositionRange()))
}
}
return vec, annos
@@ -1169,8 +1161,7 @@ func funcQuantileOverTime(vectorVals []Vector, matrixVal Matrix, args parser.Exp
annos.Add(annotations.NewInvalidQuantileWarning(q, args[0].PositionRange()))
}
if len(el.Histograms) > 0 {
- metricName := el.Metric.Get(labels.MetricName)
- annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange()))
+ annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(getMetricName(el.Metric), args[0].PositionRange()))
}
values := make(vectorByValueHeap, 0, len(el.Floats))
for _, f := range el.Floats {
@@ -1186,8 +1177,7 @@ func varianceOverTime(matrixVal Matrix, args parser.Expressions, enh *EvalNodeHe
return enh.Out, nil
}
if len(samples.Histograms) > 0 {
- metricName := samples.Metric.Get(labels.MetricName)
- annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange()))
+ annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(getMetricName(samples.Metric), args[0].PositionRange()))
}
return aggrOverTime(matrixVal, enh, func(s Series) float64 {
var count float64
@@ -1477,14 +1467,13 @@ func linearRegression(samples []FPoint, interceptTime int64) (slope, intercept f
// === deriv(node parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcDeriv(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
samples := matrixVal[0]
- metricName := samples.Metric.Get(labels.MetricName)
// No sense in trying to compute a derivative without at least two float points.
// Drop this Vector element.
if len(samples.Floats) < 2 {
// Annotate mix of float and histogram.
if len(samples.Floats) == 1 && len(samples.Histograms) > 0 {
- return enh.Out, annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange()))
+ return enh.Out, annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(getMetricName(samples.Metric), args[0].PositionRange()))
}
return enh.Out, nil
}
@@ -1494,7 +1483,7 @@ func funcDeriv(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalN
// https://github.com/prometheus/prometheus/issues/2674
slope, _ := linearRegression(samples.Floats, samples.Floats[0].T)
if len(samples.Histograms) > 0 {
- return append(enh.Out, Sample{F: slope}), annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange()))
+ return append(enh.Out, Sample{F: slope}), annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(getMetricName(samples.Metric), args[0].PositionRange()))
}
return append(enh.Out, Sample{F: slope}), nil
}
@@ -1503,21 +1492,20 @@ func funcDeriv(_ []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalN
func funcPredictLinear(vectorVals []Vector, matrixVal Matrix, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
samples := matrixVal[0]
duration := vectorVals[0][0].F
- metricName := samples.Metric.Get(labels.MetricName)
// No sense in trying to predict anything without at least two float points.
// Drop this Vector element.
if len(samples.Floats) < 2 {
// Annotate mix of float and histogram.
if len(samples.Floats) == 1 && len(samples.Histograms) > 0 {
- return enh.Out, annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange()))
+ return enh.Out, annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(getMetricName(samples.Metric), args[0].PositionRange()))
}
return enh.Out, nil
}
slope, intercept := linearRegression(samples.Floats, enh.Ts)
if len(samples.Histograms) > 0 {
- return append(enh.Out, Sample{F: slope*duration + intercept}), annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange()))
+ return append(enh.Out, Sample{F: slope*duration + intercept}), annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(getMetricName(samples.Metric), args[0].PositionRange()))
}
return append(enh.Out, Sample{F: slope*duration + intercept}), nil
}
@@ -1623,7 +1611,7 @@ func funcHistogramFraction(vectorVals []Vector, _ Matrix, args parser.Expression
if !enh.enableDelayedNameRemoval {
sample.Metric = sample.Metric.DropReserved(schema.IsMetadataLabel)
}
- hf, hfAnnos := HistogramFraction(lower, upper, sample.H, sample.Metric.Get(model.MetricNameLabel), args[0].PositionRange())
+ hf, hfAnnos := HistogramFraction(lower, upper, sample.H, getMetricName(sample.Metric), args[0].PositionRange())
annos.Merge(hfAnnos)
enh.Out = append(enh.Out, Sample{
Metric: sample.Metric,
@@ -1671,7 +1659,7 @@ func funcHistogramQuantile(vectorVals []Vector, _ Matrix, args parser.Expression
if !enh.enableDelayedNameRemoval {
sample.Metric = sample.Metric.DropReserved(schema.IsMetadataLabel)
}
- hq, hqAnnos := HistogramQuantile(q, sample.H, sample.Metric.Get(model.MetricNameLabel), args[0].PositionRange())
+ hq, hqAnnos := HistogramQuantile(q, sample.H, getMetricName(sample.Metric), args[0].PositionRange())
annos.Merge(hqAnnos)
enh.Out = append(enh.Out, Sample{
Metric: sample.Metric,
@@ -1686,7 +1674,7 @@ func funcHistogramQuantile(vectorVals []Vector, _ Matrix, args parser.Expression
res, forcedMonotonicity, _ := BucketQuantile(q, mb.buckets)
if forcedMonotonicity {
if enh.enableDelayedNameRemoval {
- annos.Add(annotations.NewHistogramQuantileForcedMonotonicityInfo(mb.metric.Get(labels.MetricName), args[1].PositionRange()))
+ annos.Add(annotations.NewHistogramQuantileForcedMonotonicityInfo(getMetricName(mb.metric), args[1].PositionRange()))
} else {
annos.Add(annotations.NewHistogramQuantileForcedMonotonicityInfo("", args[1].PositionRange()))
}
@@ -1858,11 +1846,8 @@ func (ev *evaluator) evalLabelReplace(ctx context.Context, args parser.Expressio
}
}
}
- if matrix.ContainsSameLabelset() {
- ev.errorf("vector cannot contain metrics with the same labelset")
- }
- return matrix, ws
+ return ev.mergeSeriesWithSameLabelset(matrix), ws
}
// === Vector(s Scalar) (Vector, Annotations) ===
@@ -1912,11 +1897,8 @@ func (ev *evaluator) evalLabelJoin(ctx context.Context, args parser.Expressions)
matrix[i].DropName = el.DropName
}
}
- if matrix.ContainsSameLabelset() {
- ev.errorf("vector cannot contain metrics with the same labelset")
- }
- return matrix, ws
+ return ev.mergeSeriesWithSameLabelset(matrix), ws
}
// Common code for date related functions.
@@ -2229,3 +2211,7 @@ func stringSliceFromArgs(args parser.Expressions) []string {
}
return tmp
}
+
+func getMetricName(metric labels.Labels) string {
+ return metric.Get(model.MetricNameLabel)
+}
diff --git a/promql/functions_internal_test.go b/promql/functions_internal_test.go
index 658eb7550d..bb52e4976b 100644
--- a/promql/functions_internal_test.go
+++ b/promql/functions_internal_test.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -18,9 +18,27 @@ import (
"math"
"testing"
+ "github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
+
+ "github.com/prometheus/prometheus/model/histogram"
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/promql/parser/posrange"
)
+func TestHistogramRateCounterResetHint(t *testing.T) {
+ points := []HPoint{
+ {T: 0, H: &histogram.FloatHistogram{CounterResetHint: histogram.CounterReset, Count: 5, Sum: 5}},
+ {T: 1, H: &histogram.FloatHistogram{CounterResetHint: histogram.UnknownCounterReset, Count: 10, Sum: 10}},
+ }
+ labels := labels.FromMap(map[string]string{model.MetricNameLabel: "foo"})
+ fh, _ := histogramRate(points, false, labels, posrange.PositionRange{})
+ require.Equal(t, histogram.GaugeType, fh.CounterResetHint)
+
+ fh, _ = histogramRate(points, true, labels, posrange.PositionRange{})
+ require.Equal(t, histogram.GaugeType, fh.CounterResetHint)
+}
+
func TestKahanSumInc(t *testing.T) {
testCases := map[string]struct {
first float64
diff --git a/promql/functions_test.go b/promql/functions_test.go
index 8dd91e7537..2566843092 100644
--- a/promql/functions_test.go
+++ b/promql/functions_test.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/promql/fuzz.go b/promql/fuzz.go
index a71a63f8eb..f9cc4794a6 100644
--- a/promql/fuzz.go
+++ b/promql/fuzz.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/promql/fuzz_test.go b/promql/fuzz_test.go
index 4a26798ded..a24da48e63 100644
--- a/promql/fuzz_test.go
+++ b/promql/fuzz_test.go
@@ -1,4 +1,4 @@
-// Copyright 2022 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/promql/histogram_stats_iterator.go b/promql/histogram_stats_iterator.go
index e58cc7d848..87cc5acfbd 100644
--- a/promql/histogram_stats_iterator.go
+++ b/promql/histogram_stats_iterator.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/promql/histogram_stats_iterator_test.go b/promql/histogram_stats_iterator_test.go
index 80bfee519d..cfea8a568e 100644
--- a/promql/histogram_stats_iterator_test.go
+++ b/promql/histogram_stats_iterator_test.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/promql/info.go b/promql/info.go
index d5ffda6af2..ab4250104d 100644
--- a/promql/info.go
+++ b/promql/info.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/promql/parser/ast.go b/promql/parser/ast.go
index 67ecb190fe..130f9aefb7 100644
--- a/promql/parser/ast.go
+++ b/promql/parser/ast.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -116,8 +116,8 @@ type DurationExpr struct {
LHS, RHS Expr // The operands on the respective sides of the operator.
Wrapped bool // Set when the duration is wrapped in parentheses.
- StartPos posrange.Pos // For unary operations and step(), the start position of the operator.
- EndPos posrange.Pos // For step(), the end position of the operator.
+ StartPos posrange.Pos // For unary operations, step(), and range(), the start position of the operator.
+ EndPos posrange.Pos // For step() and range(), the end position of the operator.
}
// Call represents a function call.
@@ -474,7 +474,7 @@ func (e *BinaryExpr) PositionRange() posrange.PositionRange {
}
func (e *DurationExpr) PositionRange() posrange.PositionRange {
- if e.Op == STEP {
+ if e.Op == STEP || e.Op == RANGE {
return posrange.PositionRange{
Start: e.StartPos,
End: e.EndPos,
diff --git a/promql/parser/features.go b/promql/parser/features.go
new file mode 100644
index 0000000000..ec64678237
--- /dev/null
+++ b/promql/parser/features.go
@@ -0,0 +1,57 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package parser
+
+import "github.com/prometheus/prometheus/util/features"
+
+// RegisterFeatures registers all PromQL features with the feature registry.
+// This includes operators (arithmetic and comparison/set), aggregators (standard
+// and experimental), and functions.
+func RegisterFeatures(r features.Collector) {
+ // Register core PromQL language keywords.
+ for keyword, itemType := range key {
+ if itemType.IsKeyword() {
+ // Handle experimental keywords separately.
+ switch keyword {
+ case "anchored", "smoothed":
+ r.Set(features.PromQL, keyword, EnableExtendedRangeSelectors)
+ default:
+ r.Enable(features.PromQL, keyword)
+ }
+ }
+ }
+
+ // Register operators.
+ for o := ItemType(operatorsStart + 1); o < operatorsEnd; o++ {
+ if o.IsOperator() {
+ r.Set(features.PromQLOperators, o.String(), true)
+ }
+ }
+
+ // Register aggregators.
+ for a := ItemType(aggregatorsStart + 1); a < aggregatorsEnd; a++ {
+ if a.IsAggregator() {
+ experimental := a.IsExperimentalAggregator() && !EnableExperimentalFunctions
+ r.Set(features.PromQLOperators, a.String(), !experimental)
+ }
+ }
+
+ // Register functions.
+ for f, fc := range Functions {
+ r.Set(features.PromQLFunctions, f, !fc.Experimental || EnableExperimentalFunctions)
+ }
+
+ // Register experimental parser features.
+ r.Set(features.PromQL, "duration_expr", ExperimentalDurationExpr)
+}
diff --git a/promql/parser/functions.go b/promql/parser/functions.go
index a471cb3a6d..2f2b1c68e4 100644
--- a/promql/parser/functions.go
+++ b/promql/parser/functions.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/promql/parser/generated_parser.y b/promql/parser/generated_parser.y
index d9bbb10b28..47776f53d0 100644
--- a/promql/parser/generated_parser.y
+++ b/promql/parser/generated_parser.y
@@ -153,6 +153,7 @@ WITHOUT
START
END
STEP
+RANGE
%token preprocessorEnd
// Counter reset hints.
@@ -465,7 +466,7 @@ offset_expr: expr OFFSET offset_duration_expr
$$ = $1
}
| expr OFFSET error
- { yylex.(*parser).unexpected("offset", "number, duration, or step()"); $$ = $1 }
+ { yylex.(*parser).unexpected("offset", "number, duration, step(), or range()"); $$ = $1 }
;
/*
@@ -575,11 +576,11 @@ subquery_expr : expr LEFT_BRACKET positive_duration_expr COLON positive_durati
| expr LEFT_BRACKET positive_duration_expr COLON positive_duration_expr error
{ yylex.(*parser).unexpected("subquery selector", "\"]\""); $$ = $1 }
| expr LEFT_BRACKET positive_duration_expr COLON error
- { yylex.(*parser).unexpected("subquery selector", "number, duration, or step() or \"]\""); $$ = $1 }
+ { yylex.(*parser).unexpected("subquery selector", "number, duration, step(), range(), or \"]\""); $$ = $1 }
| expr LEFT_BRACKET positive_duration_expr error
{ yylex.(*parser).unexpected("subquery or range", "\":\" or \"]\""); $$ = $1 }
| expr LEFT_BRACKET error
- { yylex.(*parser).unexpected("subquery or range selector", "number, duration, or step()"); $$ = $1 }
+ { yylex.(*parser).unexpected("subquery or range selector", "number, duration, step(), or range()"); $$ = $1 }
;
/*
@@ -696,7 +697,7 @@ metric : metric_identifier label_set
;
-metric_identifier: AVG | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | IDENTIFIER | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | QUANTILE | STDDEV | STDVAR | SUM | TOPK | WITHOUT | START | END | LIMITK | LIMIT_RATIO | STEP | ANCHORED | SMOOTHED;
+metric_identifier: AVG | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | IDENTIFIER | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | QUANTILE | STDDEV | STDVAR | SUM | TOPK | WITHOUT | START | END | LIMITK | LIMIT_RATIO | STEP | RANGE | ANCHORED | SMOOTHED;
label_set : LEFT_BRACE label_set_list RIGHT_BRACE
{ $$ = labels.New($2...) }
@@ -953,7 +954,7 @@ counter_reset_hint : UNKNOWN_COUNTER_RESET | COUNTER_RESET | NOT_COUNTER_RESET |
aggregate_op : AVG | BOTTOMK | COUNT | COUNT_VALUES | GROUP | MAX | MIN | QUANTILE | STDDEV | STDVAR | SUM | TOPK | LIMITK | LIMIT_RATIO;
// Inside of grouping options label names can be recognized as keywords by the lexer. This is a list of keywords that could also be a label name.
-maybe_label : AVG | BOOL | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | GROUP_LEFT | GROUP_RIGHT | IDENTIFIER | IGNORING | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | ON | QUANTILE | STDDEV | STDVAR | SUM | TOPK | START | END | ATAN2 | LIMITK | LIMIT_RATIO | STEP | ANCHORED | SMOOTHED;
+maybe_label : AVG | BOOL | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | GROUP_LEFT | GROUP_RIGHT | IDENTIFIER | IGNORING | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | ON | QUANTILE | STDDEV | STDVAR | SUM | TOPK | START | END | ATAN2 | LIMITK | LIMIT_RATIO | STEP | RANGE | ANCHORED | SMOOTHED;
unary_op : ADD | SUB;
@@ -1088,6 +1089,14 @@ offset_duration_expr : number_duration_literal
EndPos: $3.PositionRange().End,
}
}
+ | RANGE LEFT_PAREN RIGHT_PAREN
+ {
+ $$ = &DurationExpr{
+ Op: RANGE,
+ StartPos: $1.PositionRange().Start,
+ EndPos: $3.PositionRange().End,
+ }
+ }
| unary_op STEP LEFT_PAREN RIGHT_PAREN
{
$$ = &DurationExpr{
@@ -1100,6 +1109,18 @@ offset_duration_expr : number_duration_literal
StartPos: $1.Pos,
}
}
+ | unary_op RANGE LEFT_PAREN RIGHT_PAREN
+ {
+ $$ = &DurationExpr{
+ Op: $1.Typ,
+ RHS: &DurationExpr{
+ Op: RANGE,
+ StartPos: $2.PositionRange().Start,
+ EndPos: $4.PositionRange().End,
+ },
+ StartPos: $1.Pos,
+ }
+ }
| min_max LEFT_PAREN duration_expr COMMA duration_expr RIGHT_PAREN
{
$$ = &DurationExpr{
@@ -1234,6 +1255,14 @@ duration_expr : number_duration_literal
EndPos: $3.PositionRange().End,
}
}
+ | RANGE LEFT_PAREN RIGHT_PAREN
+ {
+ $$ = &DurationExpr{
+ Op: RANGE,
+ StartPos: $1.PositionRange().Start,
+ EndPos: $3.PositionRange().End,
+ }
+ }
| min_max LEFT_PAREN duration_expr COMMA duration_expr RIGHT_PAREN
{
$$ = &DurationExpr{
diff --git a/promql/parser/generated_parser.y.go b/promql/parser/generated_parser.y.go
index eb4b32129a..f5feec0b55 100644
--- a/promql/parser/generated_parser.y.go
+++ b/promql/parser/generated_parser.y.go
@@ -124,19 +124,20 @@ const preprocessorStart = 57431
const START = 57432
const END = 57433
const STEP = 57434
-const preprocessorEnd = 57435
-const counterResetHintsStart = 57436
-const UNKNOWN_COUNTER_RESET = 57437
-const COUNTER_RESET = 57438
-const NOT_COUNTER_RESET = 57439
-const GAUGE_TYPE = 57440
-const counterResetHintsEnd = 57441
-const startSymbolsStart = 57442
-const START_METRIC = 57443
-const START_SERIES_DESCRIPTION = 57444
-const START_EXPRESSION = 57445
-const START_METRIC_SELECTOR = 57446
-const startSymbolsEnd = 57447
+const RANGE = 57435
+const preprocessorEnd = 57436
+const counterResetHintsStart = 57437
+const UNKNOWN_COUNTER_RESET = 57438
+const COUNTER_RESET = 57439
+const NOT_COUNTER_RESET = 57440
+const GAUGE_TYPE = 57441
+const counterResetHintsEnd = 57442
+const startSymbolsStart = 57443
+const START_METRIC = 57444
+const START_SERIES_DESCRIPTION = 57445
+const START_EXPRESSION = 57446
+const START_METRIC_SELECTOR = 57447
+const startSymbolsEnd = 57448
var yyToknames = [...]string{
"$end",
@@ -231,6 +232,7 @@ var yyToknames = [...]string{
"START",
"END",
"STEP",
+ "RANGE",
"preprocessorEnd",
"counterResetHintsStart",
"UNKNOWN_COUNTER_RESET",
@@ -256,344 +258,344 @@ var yyExca = [...]int16{
-1, 1,
1, -1,
-2, 0,
- -1, 40,
- 1, 149,
- 10, 149,
- 24, 149,
+ -1, 41,
+ 1, 150,
+ 10, 150,
+ 24, 150,
-2, 0,
- -1, 70,
- 2, 192,
- 15, 192,
- 79, 192,
- 87, 192,
- -2, 107,
- -1, 71,
+ -1, 72,
2, 193,
15, 193,
79, 193,
87, 193,
- -2, 108,
- -1, 72,
+ -2, 107,
+ -1, 73,
2, 194,
15, 194,
79, 194,
87, 194,
- -2, 110,
- -1, 73,
+ -2, 108,
+ -1, 74,
2, 195,
15, 195,
79, 195,
87, 195,
- -2, 111,
- -1, 74,
+ -2, 110,
+ -1, 75,
2, 196,
15, 196,
79, 196,
87, 196,
- -2, 112,
- -1, 75,
+ -2, 111,
+ -1, 76,
2, 197,
15, 197,
79, 197,
87, 197,
- -2, 117,
- -1, 76,
+ -2, 112,
+ -1, 77,
2, 198,
15, 198,
79, 198,
87, 198,
- -2, 119,
- -1, 77,
+ -2, 117,
+ -1, 78,
2, 199,
15, 199,
79, 199,
87, 199,
- -2, 121,
- -1, 78,
+ -2, 119,
+ -1, 79,
2, 200,
15, 200,
79, 200,
87, 200,
- -2, 122,
- -1, 79,
+ -2, 121,
+ -1, 80,
2, 201,
15, 201,
79, 201,
87, 201,
- -2, 123,
- -1, 80,
+ -2, 122,
+ -1, 81,
2, 202,
15, 202,
79, 202,
87, 202,
- -2, 124,
- -1, 81,
+ -2, 123,
+ -1, 82,
2, 203,
15, 203,
79, 203,
87, 203,
- -2, 125,
- -1, 82,
+ -2, 124,
+ -1, 83,
2, 204,
15, 204,
79, 204,
87, 204,
- -2, 129,
- -1, 83,
+ -2, 125,
+ -1, 84,
2, 205,
15, 205,
79, 205,
87, 205,
+ -2, 129,
+ -1, 85,
+ 2, 206,
+ 15, 206,
+ 79, 206,
+ 87, 206,
-2, 130,
- -1, 135,
- 41, 270,
- 42, 270,
- 52, 270,
- 53, 270,
- 57, 270,
+ -1, 137,
+ 41, 274,
+ 42, 274,
+ 52, 274,
+ 53, 274,
+ 57, 274,
-2, 22,
- -1, 245,
- 9, 257,
- 12, 257,
- 13, 257,
- 18, 257,
- 19, 257,
- 25, 257,
- 41, 257,
- 47, 257,
- 48, 257,
- 51, 257,
- 57, 257,
- 62, 257,
- 63, 257,
- 64, 257,
- 65, 257,
- 66, 257,
- 67, 257,
- 68, 257,
- 69, 257,
- 70, 257,
- 71, 257,
- 72, 257,
- 73, 257,
- 74, 257,
- 75, 257,
- 79, 257,
- 83, 257,
- 84, 257,
- 85, 257,
- 87, 257,
- 90, 257,
- 91, 257,
- 92, 257,
+ -1, 251,
+ 9, 259,
+ 12, 259,
+ 13, 259,
+ 18, 259,
+ 19, 259,
+ 25, 259,
+ 41, 259,
+ 47, 259,
+ 48, 259,
+ 51, 259,
+ 57, 259,
+ 62, 259,
+ 63, 259,
+ 64, 259,
+ 65, 259,
+ 66, 259,
+ 67, 259,
+ 68, 259,
+ 69, 259,
+ 70, 259,
+ 71, 259,
+ 72, 259,
+ 73, 259,
+ 74, 259,
+ 75, 259,
+ 79, 259,
+ 83, 259,
+ 84, 259,
+ 85, 259,
+ 87, 259,
+ 90, 259,
+ 91, 259,
+ 92, 259,
+ 93, 259,
-2, 0,
- -1, 246,
- 9, 257,
- 12, 257,
- 13, 257,
- 18, 257,
- 19, 257,
- 25, 257,
- 41, 257,
- 47, 257,
- 48, 257,
- 51, 257,
- 57, 257,
- 62, 257,
- 63, 257,
- 64, 257,
- 65, 257,
- 66, 257,
- 67, 257,
- 68, 257,
- 69, 257,
- 70, 257,
- 71, 257,
- 72, 257,
- 73, 257,
- 74, 257,
- 75, 257,
- 79, 257,
- 83, 257,
- 84, 257,
- 85, 257,
- 87, 257,
- 90, 257,
- 91, 257,
- 92, 257,
+ -1, 252,
+ 9, 259,
+ 12, 259,
+ 13, 259,
+ 18, 259,
+ 19, 259,
+ 25, 259,
+ 41, 259,
+ 47, 259,
+ 48, 259,
+ 51, 259,
+ 57, 259,
+ 62, 259,
+ 63, 259,
+ 64, 259,
+ 65, 259,
+ 66, 259,
+ 67, 259,
+ 68, 259,
+ 69, 259,
+ 70, 259,
+ 71, 259,
+ 72, 259,
+ 73, 259,
+ 74, 259,
+ 75, 259,
+ 79, 259,
+ 83, 259,
+ 84, 259,
+ 85, 259,
+ 87, 259,
+ 90, 259,
+ 91, 259,
+ 92, 259,
+ 93, 259,
-2, 0,
}
const yyPrivate = 57344
-const yyLast = 1071
+const yyLast = 1050
var yyAct = [...]int16{
- 57, 182, 401, 399, 185, 406, 278, 237, 193, 332,
- 93, 47, 346, 141, 68, 221, 91, 413, 414, 415,
- 416, 127, 128, 64, 156, 186, 66, 126, 347, 326,
- 129, 243, 122, 125, 130, 244, 245, 246, 119, 122,
- 118, 124, 123, 121, 327, 151, 124, 118, 214, 123,
- 121, 396, 373, 124, 120, 364, 395, 366, 323, 385,
- 328, 354, 352, 133, 216, 135, 6, 98, 100, 101,
- 364, 102, 103, 104, 105, 106, 107, 108, 109, 110,
- 111, 324, 112, 113, 117, 99, 42, 131, 315, 112,
- 144, 117, 136, 400, 241, 350, 191, 143, 128, 349,
- 142, 137, 270, 314, 322, 320, 129, 268, 317, 114,
- 116, 115, 192, 95, 233, 178, 114, 116, 115, 195,
- 199, 200, 201, 202, 203, 204, 174, 321, 319, 177,
- 196, 196, 196, 196, 196, 196, 196, 232, 175, 217,
- 267, 130, 197, 197, 197, 197, 197, 197, 197, 132,
- 196, 134, 138, 205, 390, 407, 239, 207, 210, 227,
- 206, 223, 197, 229, 428, 2, 3, 4, 5, 360,
- 190, 194, 429, 389, 359, 7, 266, 240, 61, 86,
- 189, 231, 269, 427, 181, 150, 426, 262, 60, 358,
- 264, 119, 122, 196, 425, 209, 271, 272, 266, 197,
- 152, 225, 123, 121, 230, 197, 124, 120, 208, 196,
- 84, 224, 226, 119, 122, 38, 384, 213, 222, 383,
- 223, 197, 10, 382, 123, 121, 85, 235, 124, 120,
- 143, 190, 88, 318, 238, 381, 180, 179, 241, 242,
- 380, 189, 379, 378, 247, 248, 249, 250, 251, 252,
- 253, 254, 255, 256, 257, 258, 259, 260, 261, 348,
- 225, 198, 325, 191, 94, 377, 351, 376, 97, 353,
- 224, 226, 344, 345, 92, 195, 375, 196, 374, 192,
- 196, 39, 228, 355, 61, 55, 196, 95, 1, 197,
- 181, 87, 197, 149, 60, 148, 172, 69, 197, 54,
- 157, 158, 159, 160, 161, 162, 163, 164, 165, 166,
- 167, 168, 169, 170, 171, 417, 84, 362, 65, 53,
- 190, 9, 9, 144, 52, 51, 363, 365, 196, 367,
- 189, 155, 85, 142, 275, 368, 369, 184, 274, 50,
- 197, 140, 180, 179, 190, 49, 95, 48, 372, 119,
- 122, 386, 191, 273, 189, 8, 46, 153, 211, 40,
- 123, 121, 196, 371, 124, 120, 392, 198, 192, 394,
- 370, 388, 94, 45, 197, 154, 191, 402, 403, 404,
- 398, 44, 92, 405, 43, 409, 408, 411, 410, 418,
- 90, 281, 192, 56, 236, 95, 422, 316, 419, 420,
- 196, 291, 361, 421, 393, 119, 122, 297, 329, 423,
- 96, 391, 197, 234, 280, 276, 123, 121, 424, 89,
- 124, 120, 412, 119, 122, 187, 188, 183, 431, 196,
- 279, 119, 122, 58, 123, 121, 293, 294, 124, 120,
- 295, 197, 123, 121, 139, 0, 124, 120, 308, 0,
- 0, 282, 284, 286, 287, 288, 296, 298, 301, 302,
- 303, 304, 305, 309, 310, 0, 281, 283, 285, 289,
- 290, 292, 299, 313, 312, 300, 291, 0, 220, 306,
- 307, 311, 297, 219, 0, 0, 277, 387, 0, 280,
- 147, 0, 190, 61, 0, 146, 218, 0, 0, 265,
- 0, 0, 189, 60, 430, 0, 119, 122, 145, 0,
- 0, 293, 294, 0, 0, 295, 0, 123, 121, 0,
- 0, 124, 120, 308, 191, 84, 282, 284, 286, 287,
- 288, 296, 298, 301, 302, 303, 304, 305, 309, 310,
- 192, 85, 283, 285, 289, 290, 292, 299, 313, 312,
- 300, 180, 179, 0, 306, 307, 311, 61, 0, 118,
- 59, 86, 0, 62, 0, 0, 22, 60, 0, 0,
- 212, 0, 0, 63, 0, 0, 263, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 98, 100, 0, 84,
- 0, 0, 0, 0, 0, 18, 19, 109, 110, 20,
- 0, 112, 113, 117, 99, 85, 0, 0, 0, 0,
- 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
- 80, 81, 82, 83, 0, 0, 0, 13, 114, 116,
- 115, 24, 37, 36, 215, 30, 0, 0, 31, 32,
- 67, 61, 41, 0, 59, 86, 0, 62, 0, 0,
- 22, 60, 0, 119, 122, 0, 0, 63, 0, 0,
- 0, 0, 0, 0, 123, 121, 0, 0, 124, 120,
- 0, 357, 0, 84, 0, 0, 0, 0, 61, 18,
- 19, 0, 0, 20, 181, 0, 0, 0, 60, 85,
- 356, 0, 0, 0, 70, 71, 72, 73, 74, 75,
- 76, 77, 78, 79, 80, 81, 82, 83, 0, 0,
- 84, 13, 0, 0, 0, 24, 37, 36, 0, 30,
- 0, 0, 31, 32, 67, 61, 85, 0, 59, 86,
- 0, 62, 331, 0, 22, 60, 180, 179, 0, 330,
- 0, 63, 0, 334, 335, 333, 340, 342, 339, 341,
- 336, 337, 338, 343, 0, 0, 0, 84, 0, 0,
- 0, 198, 0, 18, 19, 0, 0, 20, 0, 0,
- 0, 0, 0, 85, 0, 0, 0, 0, 70, 71,
- 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
- 82, 83, 17, 86, 0, 13, 0, 0, 22, 24,
- 37, 36, 397, 30, 0, 0, 31, 32, 67, 0,
- 0, 0, 0, 334, 335, 333, 340, 342, 339, 341,
- 336, 337, 338, 343, 0, 0, 0, 18, 19, 0,
- 0, 20, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 11, 12, 14, 15, 16, 21, 23, 25,
- 26, 27, 28, 29, 33, 34, 17, 38, 0, 13,
- 0, 0, 22, 24, 37, 36, 0, 30, 0, 0,
- 31, 32, 35, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 18, 19, 0, 0, 20, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 11, 12, 14, 15,
- 16, 21, 23, 25, 26, 27, 28, 29, 33, 34,
- 118, 0, 0, 13, 0, 0, 0, 24, 37, 36,
- 0, 30, 0, 0, 31, 32, 35, 0, 0, 118,
- 0, 0, 0, 0, 0, 0, 0, 98, 100, 101,
- 0, 102, 103, 104, 105, 106, 107, 108, 109, 110,
- 111, 0, 112, 113, 117, 99, 98, 100, 101, 0,
- 102, 103, 104, 0, 106, 107, 108, 109, 110, 111,
- 173, 112, 113, 117, 99, 118, 0, 61, 0, 114,
- 116, 115, 0, 181, 118, 0, 0, 60, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 114, 116,
- 115, 0, 98, 100, 101, 0, 102, 103, 0, 84,
- 106, 107, 100, 109, 110, 111, 0, 112, 113, 117,
- 99, 0, 109, 110, 0, 85, 112, 0, 117, 99,
- 0, 0, 0, 0, 0, 180, 179, 0, 0, 0,
- 0, 0, 0, 0, 114, 116, 115, 0, 0, 0,
- 0, 0, 0, 114, 116, 115, 0, 0, 0, 0,
- 176,
+ 58, 186, 413, 411, 341, 418, 286, 243, 197, 95,
+ 189, 48, 355, 144, 70, 227, 93, 251, 252, 356,
+ 159, 190, 65, 120, 17, 88, 127, 130, 128, 129,
+ 22, 425, 426, 427, 428, 131, 249, 121, 124, 335,
+ 250, 67, 132, 126, 408, 407, 377, 332, 125, 123,
+ 331, 102, 126, 122, 336, 154, 324, 6, 397, 18,
+ 19, 111, 112, 20, 135, 114, 137, 119, 101, 375,
+ 337, 323, 375, 330, 11, 12, 14, 15, 16, 21,
+ 23, 25, 26, 27, 28, 29, 33, 34, 43, 133,
+ 329, 13, 116, 118, 117, 24, 38, 37, 146, 30,
+ 402, 124, 31, 32, 35, 36, 130, 412, 138, 396,
+ 194, 125, 123, 328, 131, 126, 365, 182, 239, 401,
+ 193, 199, 204, 205, 206, 207, 208, 209, 177, 363,
+ 362, 181, 200, 200, 200, 200, 200, 200, 200, 178,
+ 120, 238, 223, 201, 201, 201, 201, 201, 201, 201,
+ 212, 215, 134, 200, 136, 211, 210, 2, 3, 4,
+ 5, 222, 233, 221, 201, 245, 235, 384, 333, 371,
+ 228, 247, 229, 360, 370, 359, 246, 358, 188, 273,
+ 140, 368, 114, 195, 119, 194, 277, 139, 62, 369,
+ 268, 237, 229, 271, 185, 193, 441, 200, 61, 196,
+ 367, 201, 273, 383, 155, 278, 279, 280, 201, 116,
+ 118, 117, 231, 200, 236, 121, 124, 195, 382, 440,
+ 86, 218, 230, 232, 201, 381, 125, 123, 276, 275,
+ 126, 122, 231, 196, 274, 146, 87, 132, 439, 327,
+ 429, 438, 230, 232, 248, 141, 184, 183, 419, 253,
+ 254, 255, 256, 257, 258, 259, 260, 261, 262, 263,
+ 264, 265, 266, 267, 334, 357, 191, 192, 214, 353,
+ 354, 202, 203, 361, 121, 124, 88, 364, 283, 7,
+ 39, 213, 282, 199, 200, 125, 123, 395, 200, 126,
+ 122, 366, 10, 194, 200, 201, 394, 281, 393, 201,
+ 392, 391, 90, 193, 390, 201, 160, 161, 162, 163,
+ 164, 165, 166, 167, 168, 169, 170, 171, 172, 173,
+ 174, 389, 194, 388, 120, 195, 373, 387, 386, 385,
+ 153, 99, 193, 62, 442, 374, 376, 200, 378, 185,
+ 56, 196, 40, 61, 379, 380, 89, 152, 201, 151,
+ 1, 100, 102, 103, 195, 104, 105, 175, 71, 108,
+ 109, 398, 111, 112, 113, 86, 114, 115, 119, 101,
+ 196, 66, 200, 55, 9, 9, 54, 404, 8, 53,
+ 406, 87, 41, 201, 52, 158, 410, 51, 414, 415,
+ 416, 184, 183, 116, 118, 117, 421, 420, 423, 422,
+ 417, 430, 50, 49, 289, 47, 156, 216, 147, 46,
+ 431, 432, 200, 372, 299, 433, 202, 203, 145, 96,
+ 305, 435, 157, 201, 403, 437, 326, 288, 147, 94,
+ 436, 97, 45, 44, 57, 242, 434, 234, 145, 338,
+ 443, 200, 97, 98, 121, 124, 143, 240, 284, 301,
+ 302, 97, 201, 303, 91, 125, 123, 424, 187, 126,
+ 122, 316, 287, 59, 290, 292, 294, 295, 296, 304,
+ 306, 309, 310, 311, 312, 313, 317, 318, 142, 0,
+ 291, 293, 297, 298, 300, 307, 322, 321, 308, 289,
+ 96, 0, 314, 315, 319, 320, 226, 150, 405, 299,
+ 94, 225, 149, 0, 0, 305, 0, 0, 92, 285,
+ 0, 0, 288, 97, 224, 148, 62, 121, 124, 0,
+ 0, 0, 272, 0, 0, 0, 61, 0, 125, 123,
+ 0, 0, 126, 122, 301, 302, 0, 0, 303, 0,
+ 0, 0, 0, 0, 0, 0, 316, 0, 86, 290,
+ 292, 294, 295, 296, 304, 306, 309, 310, 311, 312,
+ 313, 317, 318, 0, 87, 291, 293, 297, 298, 300,
+ 307, 322, 321, 308, 184, 183, 0, 314, 315, 319,
+ 320, 62, 0, 120, 60, 88, 0, 63, 0, 0,
+ 22, 61, 0, 0, 217, 0, 0, 64, 0, 269,
+ 270, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 100, 102, 0, 86, 0, 0, 0, 0, 0, 18,
+ 19, 111, 112, 20, 0, 114, 115, 119, 101, 87,
+ 0, 0, 0, 0, 72, 73, 74, 75, 76, 77,
+ 78, 79, 80, 81, 82, 83, 84, 85, 0, 0,
+ 400, 13, 116, 118, 117, 24, 38, 37, 399, 30,
+ 0, 0, 31, 32, 68, 69, 62, 42, 0, 60,
+ 88, 0, 63, 0, 0, 22, 61, 121, 124, 0,
+ 0, 0, 64, 0, 121, 124, 0, 0, 125, 123,
+ 0, 0, 126, 122, 0, 125, 123, 0, 86, 126,
+ 122, 0, 0, 0, 18, 19, 0, 0, 20, 0,
+ 0, 0, 0, 0, 87, 0, 0, 0, 0, 72,
+ 73, 74, 75, 76, 77, 78, 79, 80, 81, 82,
+ 83, 84, 85, 0, 0, 0, 13, 0, 0, 220,
+ 24, 38, 37, 0, 30, 0, 325, 31, 32, 68,
+ 69, 62, 0, 0, 60, 88, 0, 63, 121, 124,
+ 22, 61, 0, 0, 0, 0, 0, 64, 0, 125,
+ 123, 0, 0, 126, 122, 0, 0, 0, 0, 0,
+ 121, 124, 0, 86, 0, 0, 0, 0, 0, 18,
+ 19, 125, 123, 20, 0, 126, 122, 0, 0, 87,
+ 0, 0, 0, 0, 72, 73, 74, 75, 76, 77,
+ 78, 79, 80, 81, 82, 83, 84, 85, 17, 39,
+ 0, 13, 0, 0, 22, 24, 38, 37, 0, 30,
+ 340, 0, 31, 32, 68, 69, 0, 339, 0, 0,
+ 0, 343, 344, 342, 349, 351, 348, 350, 345, 346,
+ 347, 352, 241, 18, 19, 0, 194, 20, 0, 244,
+ 0, 0, 0, 247, 0, 0, 193, 0, 11, 12,
+ 14, 15, 16, 21, 23, 25, 26, 27, 28, 29,
+ 33, 34, 0, 0, 120, 13, 0, 0, 195, 24,
+ 38, 37, 219, 30, 0, 0, 31, 32, 35, 36,
+ 0, 0, 0, 120, 196, 0, 0, 0, 0, 0,
+ 0, 100, 102, 103, 0, 104, 105, 106, 107, 108,
+ 109, 110, 111, 112, 113, 0, 114, 115, 119, 101,
+ 100, 102, 103, 0, 104, 105, 106, 107, 108, 109,
+ 110, 111, 112, 113, 198, 114, 115, 119, 101, 120,
+ 0, 62, 0, 116, 118, 117, 0, 185, 176, 0,
+ 0, 61, 0, 0, 0, 62, 0, 0, 0, 0,
+ 0, 185, 116, 118, 117, 61, 100, 102, 103, 0,
+ 104, 105, 106, 86, 108, 109, 110, 111, 112, 113,
+ 0, 114, 115, 119, 101, 0, 0, 86, 0, 87,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 184,
+ 183, 0, 0, 87, 0, 0, 0, 0, 116, 118,
+ 117, 0, 0, 184, 183, 409, 0, 0, 0, 0,
+ 0, 0, 0, 0, 202, 203, 343, 344, 342, 349,
+ 351, 348, 350, 345, 346, 347, 352, 0, 179, 180,
}
var yyPact = [...]int16{
- 64, 165, 844, 844, 632, 780, -1000, -1000, -1000, 202,
+ 55, 269, 806, 806, 657, 12, -1000, -1000, -1000, 267,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 370, -1000,
- 266, -1000, 906, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, -1000, -1000, -1000, -1000, -3, 19, 126,
- -1000, -1000, 716, -1000, 716, 166, -1000, 86, 137, -1000,
- -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, -1000, -1000, -1000, 321, -1000, -1000, 488,
- -1000, -1000, 291, 181, -1000, -1000, 21, -1000, -54, -54,
- -54, -54, -54, -54, -54, -54, -54, -54, -54, -54,
- -54, -54, -54, -54, 978, -1000, -1000, 335, 169, 275,
- 275, 275, 275, 275, 275, 126, -57, -1000, 193, 193,
- 548, -1000, 26, 612, 33, -15, -1000, 42, 275, 476,
- -1000, -1000, 216, 157, -1000, -1000, 262, -1000, 179, -1000,
- 112, 222, 716, -1000, -51, -44, -1000, 716, 716, 716,
- 716, 716, 716, 716, 716, 716, 716, 716, 716, 716,
- 716, 716, -1000, -1000, -1000, 484, 125, 92, -3, -1000,
- -1000, 275, -1000, 87, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, 161, 161, 332, -1000, -3, -1000, 275, 86, -10,
- -10, -15, -15, -15, -15, -1000, -1000, -1000, 464, -1000,
- -1000, 81, -1000, 906, -1000, -1000, -1000, 390, -1000, 88,
- -1000, 103, -1000, -1000, -1000, -1000, -1000, 102, -1000, -1000,
- -1000, -1000, -1000, -1000, -1000, 32, 55, 3, -1000, -1000,
- -1000, 715, 980, 193, 193, 193, 193, 33, 33, 545,
- 545, 545, 971, 925, 545, 545, 971, 33, 33, 545,
- 33, 980, -1000, 84, 80, 275, -15, 40, 275, 612,
- 39, -1000, -1000, -1000, 669, -1000, 167, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 488,
+ -1000, 329, -1000, 889, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -4, 27,
+ 222, -1000, -1000, 742, -1000, 742, 263, -1000, 172, 165,
+ 230, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 426, -1000,
+ -1000, 495, -1000, -1000, 345, 326, -1000, -1000, 31, -1000,
+ -58, -58, -58, -58, -58, -58, -58, -58, -58, -58,
+ -58, -58, -58, -58, -58, -58, 956, -1000, -1000, 176,
+ 942, 324, 324, 324, 324, 324, 324, 222, -52, -1000,
+ 266, 266, 572, -1000, 870, 717, 126, -13, -1000, 141,
+ 139, 324, 494, -1000, -1000, 168, 188, -1000, -1000, 417,
+ -1000, 189, -1000, 116, 847, 742, -1000, -46, -63, -1000,
+ 742, 742, 742, 742, 742, 742, 742, 742, 742, 742,
+ 742, 742, 742, 742, 742, -1000, -1000, -1000, 507, 219,
+ 214, 213, -4, -1000, -1000, 324, -1000, 190, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, 101, 101, 276, -1000, -4,
+ -1000, 324, 172, 165, 59, 59, -13, -13, -13, -13,
+ -1000, -1000, -1000, 487, -1000, -1000, 49, -1000, 889, -1000,
+ -1000, -1000, -1000, 739, -1000, 406, -1000, 88, -1000, -1000,
+ -1000, -1000, -1000, 48, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, 21, 142, 13, -1000, -1000, -1000, 813, 9, 266,
+ 266, 266, 266, 126, 126, 569, 569, 569, 310, 935,
+ 569, 569, 310, 126, 126, 569, 126, 9, -1000, 162,
+ 160, 158, 324, -13, 108, 107, 324, 717, 94, -1000,
+ -1000, -1000, 179, -1000, 167, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, -1000, -1000, 716, 275, -1000, -1000, -1000,
- -1000, -1000, -1000, 51, 51, 31, 51, 78, 78, 346,
- 35, -1000, -1000, 272, 270, 261, 259, 237, 236, 234,
- 229, 217, 213, 210, -1000, -1000, -1000, -1000, -1000, 37,
- 275, 465, -1000, 364, -1000, 152, -1000, -1000, -1000, 389,
- -1000, 906, 382, -1000, -1000, -1000, 51, -1000, 30, 25,
- 785, -1000, -1000, -1000, 36, 311, 311, 311, 161, 141,
- 141, 36, 141, 36, -78, -1000, 308, -1000, 275, -1000,
- -1000, -1000, -1000, -1000, -1000, 51, 51, -1000, -1000, -1000,
- 51, -1000, -1000, -1000, -1000, -1000, -1000, 311, -1000, -1000,
- -1000, -1000, -1000, -1000, -1000, -1000, -1000, 275, 172, -1000,
- -1000, -1000, 162, -1000, 150, -1000, 483, -1000, -1000, -1000,
- -1000, -1000,
+ -1000, -1000, -1000, -1000, 742, 324, -1000, -1000, -1000, -1000,
+ -1000, -1000, 53, 53, 20, 53, 155, 155, 201, 150,
+ -1000, -1000, 323, 322, 321, 317, 315, 298, 295, 294,
+ 292, 290, 281, -1000, -1000, -1000, -1000, -1000, 87, 36,
+ 324, 636, -1000, -1000, 643, -1000, 98, -1000, -1000, -1000,
+ 402, -1000, 889, 476, -1000, -1000, -1000, 53, -1000, 19,
+ 18, 1008, -1000, -1000, -1000, 50, 284, 284, 284, 101,
+ 234, 234, 50, 234, 50, -65, -1000, -1000, 233, -1000,
+ 324, -1000, -1000, -1000, -1000, -1000, -1000, 53, 53, -1000,
+ -1000, -1000, 53, -1000, -1000, -1000, -1000, -1000, -1000, 284,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 324,
+ 403, -1000, -1000, -1000, 217, -1000, 174, -1000, 313, -1000,
+ -1000, -1000, -1000, -1000,
}
var yyPgo = [...]int16{
- 0, 444, 13, 433, 6, 15, 430, 318, 23, 427,
- 10, 422, 14, 222, 355, 419, 16, 415, 28, 12,
- 413, 410, 7, 408, 9, 5, 396, 3, 2, 4,
- 394, 25, 1, 393, 384, 33, 200, 381, 375, 86,
- 373, 358, 27, 357, 26, 356, 11, 347, 345, 339,
- 331, 325, 324, 319, 299, 285, 0, 297, 8, 296,
- 288, 281,
+ 0, 478, 13, 463, 6, 15, 462, 371, 22, 458,
+ 9, 457, 14, 292, 378, 454, 16, 448, 19, 12,
+ 447, 443, 7, 439, 4, 5, 436, 3, 2, 10,
+ 435, 21, 1, 434, 433, 26, 204, 432, 422, 88,
+ 409, 407, 28, 406, 41, 405, 11, 403, 402, 387,
+ 385, 384, 379, 376, 373, 340, 0, 358, 8, 357,
+ 350, 342,
}
var yyR1 = [...]int8{
@@ -610,22 +612,22 @@ var yyR1 = [...]int8{
2, 2, 2, 2, 2, 14, 14, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 13, 13, 13, 13, 15, 15,
- 15, 16, 16, 16, 16, 16, 16, 16, 61, 21,
- 21, 21, 21, 20, 20, 20, 20, 20, 20, 20,
- 20, 20, 30, 30, 30, 22, 22, 22, 22, 23,
- 23, 23, 24, 24, 24, 24, 24, 24, 24, 24,
- 24, 24, 24, 25, 25, 26, 26, 26, 11, 11,
- 11, 11, 3, 3, 3, 3, 3, 3, 3, 3,
- 3, 3, 3, 3, 3, 3, 6, 6, 6, 6,
+ 7, 7, 7, 7, 7, 13, 13, 13, 13, 15,
+ 15, 15, 16, 16, 16, 16, 16, 16, 16, 61,
+ 21, 21, 21, 21, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 30, 30, 30, 22, 22, 22, 22,
+ 23, 23, 23, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 25, 25, 26, 26, 26, 11,
+ 11, 11, 11, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
- 6, 6, 6, 6, 6, 6, 6, 6, 8, 8,
- 5, 5, 5, 5, 46, 46, 29, 29, 31, 31,
- 32, 32, 28, 27, 27, 52, 10, 19, 19, 59,
- 59, 59, 59, 59, 59, 59, 59, 12, 12, 56,
- 56, 56, 56, 56, 56, 56, 56, 56, 56, 56,
- 57,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 8, 8, 5, 5, 5, 5, 46, 46, 29, 29,
+ 31, 31, 32, 32, 28, 27, 27, 52, 10, 19,
+ 19, 59, 59, 59, 59, 59, 59, 59, 59, 59,
+ 59, 12, 12, 56, 56, 56, 56, 56, 56, 56,
+ 56, 56, 56, 56, 56, 57,
}
var yyR2 = [...]int8{
@@ -642,116 +644,118 @@ var yyR2 = [...]int8{
1, 3, 3, 2, 1, 2, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 3, 4, 2, 0, 3, 1,
- 2, 3, 3, 1, 3, 3, 2, 1, 2, 0,
- 3, 2, 1, 1, 3, 1, 3, 4, 1, 3,
- 5, 5, 1, 1, 1, 4, 3, 3, 2, 3,
- 1, 2, 3, 3, 3, 3, 3, 3, 3, 3,
- 3, 3, 3, 4, 3, 3, 1, 2, 1, 1,
+ 1, 1, 1, 1, 1, 3, 4, 2, 0, 3,
+ 1, 2, 3, 3, 1, 3, 3, 2, 1, 2,
+ 0, 3, 2, 1, 1, 3, 1, 3, 4, 1,
+ 3, 5, 5, 1, 1, 1, 4, 3, 3, 2,
+ 3, 1, 2, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 4, 3, 3, 1, 2, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
- 1, 1, 1, 2, 1, 1, 1, 0, 1, 1,
- 2, 3, 4, 6, 7, 4, 1, 1, 1, 1,
- 2, 3, 3, 3, 3, 3, 3, 3, 6, 1,
- 3,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 1, 1, 1, 2, 1, 1, 1, 0,
+ 1, 1, 2, 3, 3, 4, 4, 6, 7, 4,
+ 1, 1, 1, 1, 2, 3, 3, 3, 3, 3,
+ 3, 3, 3, 6, 1, 3,
}
var yyChk = [...]int16{
- -1000, -60, 101, 102, 103, 104, 2, 10, -14, -7,
+ -1000, -60, 102, 103, 104, 105, 2, 10, -14, -7,
-13, 62, 63, 79, 64, 65, 66, 12, 47, 48,
51, 67, 18, 68, 83, 69, 70, 71, 72, 73,
- 87, 90, 91, 74, 75, 92, 85, 84, 13, -61,
- -14, 10, -39, -34, -37, -40, -45, -46, -47, -48,
- -49, -51, -52, -53, -54, -55, -33, -56, -3, 12,
- 19, 9, 15, 25, -8, -7, -44, 92, -12, -57,
- 62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
- 72, 73, 74, 75, 41, 57, 13, -55, -13, -15,
- 20, -16, 12, -10, 2, 25, -21, 2, 41, 59,
- 42, 43, 45, 46, 47, 48, 49, 50, 51, 52,
- 53, 54, 56, 57, 83, 85, 84, 58, 14, 41,
- 57, 53, 42, 52, 56, -35, -42, 2, 79, 87,
- 15, -42, -39, -56, -39, -56, -44, 15, 15, -1,
- 20, -2, 12, -10, 2, 20, 7, 2, 4, 2,
- 4, 24, -36, -43, -38, -50, 78, -36, -36, -36,
+ 87, 90, 91, 74, 75, 92, 93, 85, 84, 13,
+ -61, -14, 10, -39, -34, -37, -40, -45, -46, -47,
+ -48, -49, -51, -52, -53, -54, -55, -33, -56, -3,
+ 12, 19, 9, 15, 25, -8, -7, -44, 92, 93,
+ -12, -57, 62, 63, 64, 65, 66, 67, 68, 69,
+ 70, 71, 72, 73, 74, 75, 41, 57, 13, -55,
+ -13, -15, 20, -16, 12, -10, 2, 25, -21, 2,
+ 41, 59, 42, 43, 45, 46, 47, 48, 49, 50,
+ 51, 52, 53, 54, 56, 57, 83, 85, 84, 58,
+ 14, 41, 57, 53, 42, 52, 56, -35, -42, 2,
+ 79, 87, 15, -42, -39, -56, -39, -56, -44, 15,
+ 15, 15, -1, 20, -2, 12, -10, 2, 20, 7,
+ 2, 4, 2, 4, 24, -36, -43, -38, -50, 78,
-36, -36, -36, -36, -36, -36, -36, -36, -36, -36,
- -36, -36, -59, 2, -46, -8, 92, -12, -56, 68,
- 67, 15, -32, -9, 2, -29, -31, 90, 91, 19,
- 9, 41, 57, -58, 2, -56, -46, -8, 92, -56,
- -56, -56, -56, -56, -56, -42, -35, -18, 15, 2,
- -18, -41, 22, -39, 22, 22, 22, -56, 20, 7,
- 2, -5, 2, 4, 54, 44, 55, -5, 20, -16,
- 25, 2, 25, 2, -20, 5, -30, -22, 12, -29,
- -31, 16, -39, 82, 86, 80, 81, -39, -39, -39,
- -39, -39, -39, -39, -39, -39, -39, -39, -39, -39,
- -39, -39, -46, 92, -12, 15, -56, 15, 15, -56,
- 15, -29, -29, 21, 6, 2, -17, 22, -4, -6,
- 25, 2, 62, 78, 63, 79, 64, 65, 66, 80,
- 81, 12, 82, 47, 48, 51, 67, 18, 68, 83,
- 86, 69, 70, 71, 72, 73, 90, 91, 59, 74,
- 75, 92, 85, 84, 22, 7, 7, 20, -2, 25,
- 2, 25, 2, 26, 26, -31, 26, 41, 57, -23,
- 24, 17, -24, 30, 28, 29, 35, 36, 37, 33,
- 31, 34, 32, 38, -18, -18, -19, -18, -19, 15,
- 15, -56, 22, -56, 22, -58, 21, 2, 22, 7,
- 2, -39, -56, -28, 19, -28, 26, -28, -22, -22,
- 24, 17, 2, 17, 6, 6, 6, 6, 6, 6,
- 6, 6, 6, 6, 6, 22, -56, 22, 7, 21,
- 2, 22, -4, 22, -28, 26, 26, 17, -24, -27,
- 57, -28, -32, -32, -32, -29, -25, 14, -25, -27,
- -25, -27, -11, 95, 96, 97, 98, 7, -56, -28,
- -28, -28, -26, -32, -56, 22, 24, 21, 2, 22,
- 21, -32,
+ -36, -36, -36, -36, -36, -59, 2, -46, -8, 92,
+ 93, -12, -56, 68, 67, 15, -32, -9, 2, -29,
+ -31, 90, 91, 19, 9, 41, 57, -58, 2, -56,
+ -46, -8, 92, 93, -56, -56, -56, -56, -56, -56,
+ -42, -35, -18, 15, 2, -18, -41, 22, -39, 22,
+ 22, 22, 22, -56, 20, 7, 2, -5, 2, 4,
+ 54, 44, 55, -5, 20, -16, 25, 2, 25, 2,
+ -20, 5, -30, -22, 12, -29, -31, 16, -39, 82,
+ 86, 80, 81, -39, -39, -39, -39, -39, -39, -39,
+ -39, -39, -39, -39, -39, -39, -39, -39, -46, 92,
+ 93, -12, 15, -56, 15, 15, 15, -56, 15, -29,
+ -29, 21, 6, 2, -17, 22, -4, -6, 25, 2,
+ 62, 78, 63, 79, 64, 65, 66, 80, 81, 12,
+ 82, 47, 48, 51, 67, 18, 68, 83, 86, 69,
+ 70, 71, 72, 73, 90, 91, 59, 74, 75, 92,
+ 93, 85, 84, 22, 7, 7, 20, -2, 25, 2,
+ 25, 2, 26, 26, -31, 26, 41, 57, -23, 24,
+ 17, -24, 30, 28, 29, 35, 36, 37, 33, 31,
+ 34, 32, 38, -18, -18, -19, -18, -19, 15, 15,
+ 15, -56, 22, 22, -56, 22, -58, 21, 2, 22,
+ 7, 2, -39, -56, -28, 19, -28, 26, -28, -22,
+ -22, 24, 17, 2, 17, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 22, 22, -56, 22,
+ 7, 21, 2, 22, -4, 22, -28, 26, 26, 17,
+ -24, -27, 57, -28, -32, -32, -32, -29, -25, 14,
+ -25, -27, -25, -27, -11, 96, 97, 98, 99, 7,
+ -56, -28, -28, -28, -26, -32, -56, 22, 24, 21,
+ 2, 22, 21, -32,
}
var yyDef = [...]int16{
- 0, -2, 137, 137, 0, 0, 7, 6, 1, 137,
+ 0, -2, 138, 138, 0, 0, 7, 6, 1, 138,
106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
116, 117, 118, 119, 120, 121, 122, 123, 124, 125,
- 126, 127, 128, 129, 130, 131, 132, 133, 0, 2,
- -2, 3, 4, 8, 9, 10, 11, 12, 13, 14,
- 15, 16, 17, 18, 19, 20, 21, 22, 0, 113,
- 244, 245, 0, 255, 0, 90, 91, 131, 0, 279,
- -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
- -2, -2, -2, -2, 238, 239, 0, 5, 105, 0,
- 136, 139, 0, 143, 147, 256, 148, 152, 46, 46,
+ 126, 127, 128, 129, 130, 131, 132, 133, 134, 0,
+ 2, -2, 3, 4, 8, 9, 10, 11, 12, 13,
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 0,
+ 113, 246, 247, 0, 257, 0, 90, 91, 131, 132,
+ 0, 284, -2, -2, -2, -2, -2, -2, -2, -2,
+ -2, -2, -2, -2, -2, -2, 240, 241, 0, 5,
+ 105, 0, 137, 140, 0, 144, 148, 258, 149, 153,
46, 46, 46, 46, 46, 46, 46, 46, 46, 46,
- 46, 46, 46, 46, 0, 74, 75, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 25, 26, 0, 0,
- 0, 64, 0, 22, 88, -2, 89, 0, 0, 0,
- 94, 96, 0, 100, 104, 134, 0, 140, 0, 146,
- 0, 151, 0, 45, 50, 51, 47, 0, 0, 0,
+ 46, 46, 46, 46, 46, 46, 0, 74, 75, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 25, 26,
+ 0, 0, 0, 64, 0, 22, 88, -2, 89, 0,
+ 0, 0, 0, 94, 96, 0, 100, 104, 135, 0,
+ 141, 0, 147, 0, 152, 0, 45, 50, 51, 47,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 72, 73, 259, 0, 0, 0, 266, 267,
- 268, 0, 76, 0, 78, 250, 251, 79, 80, 246,
- 247, 0, 0, 0, 87, 71, 269, 0, 0, 271,
- 272, 273, 274, 275, 276, 23, 24, 27, 0, 57,
- 28, 0, 66, 68, 70, 280, 277, 0, 92, 0,
- 97, 0, 103, 240, 241, 242, 243, 0, 135, 138,
- 141, 144, 142, 145, 150, 153, 155, 158, 162, 163,
- 164, 0, 29, 0, 0, -2, -2, 30, 31, 32,
- 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
- 43, 44, 260, 0, 0, 0, 270, 0, 0, 0,
- 0, 248, 249, 81, 0, 86, 0, 56, 59, 61,
- 62, 63, 206, 207, 208, 209, 210, 211, 212, 213,
- 214, 215, 216, 217, 218, 219, 220, 221, 222, 223,
- 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
- 234, 235, 236, 237, 65, 69, 0, 93, 95, 98,
- 102, 99, 101, 0, 0, 0, 0, 0, 0, 0,
- 0, 168, 170, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 48, 49, 52, 258, 53, 0,
- 0, 0, 261, 0, 77, 0, 83, 85, 54, 0,
- 60, 67, 0, 154, 252, 156, 0, 159, 0, 0,
- 0, 166, 171, 167, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 262, 0, 265, 0, 82,
- 84, 55, 58, 278, 157, 0, 0, 165, 169, 172,
- 0, 254, 173, 174, 175, 176, 177, 0, 178, 179,
- 180, 181, 182, 188, 189, 190, 191, 0, 0, 160,
- 161, 253, 0, 186, 0, 263, 0, 184, 187, 264,
- 183, 185,
+ 0, 0, 0, 0, 0, 72, 73, 261, 0, 0,
+ 0, 0, 270, 271, 272, 0, 76, 0, 78, 252,
+ 253, 79, 80, 248, 249, 0, 0, 0, 87, 71,
+ 273, 0, 0, 0, 275, 276, 277, 278, 279, 280,
+ 23, 24, 27, 0, 57, 28, 0, 66, 68, 70,
+ 285, 281, 282, 0, 92, 0, 97, 0, 103, 242,
+ 243, 244, 245, 0, 136, 139, 142, 145, 143, 146,
+ 151, 154, 156, 159, 163, 164, 165, 0, 29, 0,
+ 0, -2, -2, 30, 31, 32, 33, 34, 35, 36,
+ 37, 38, 39, 40, 41, 42, 43, 44, 262, 0,
+ 0, 0, 0, 274, 0, 0, 0, 0, 0, 250,
+ 251, 81, 0, 86, 0, 56, 59, 61, 62, 63,
+ 207, 208, 209, 210, 211, 212, 213, 214, 215, 216,
+ 217, 218, 219, 220, 221, 222, 223, 224, 225, 226,
+ 227, 228, 229, 230, 231, 232, 233, 234, 235, 236,
+ 237, 238, 239, 65, 69, 0, 93, 95, 98, 102,
+ 99, 101, 0, 0, 0, 0, 0, 0, 0, 0,
+ 169, 171, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 48, 49, 52, 260, 53, 0, 0,
+ 0, 0, 263, 264, 0, 77, 0, 83, 85, 54,
+ 0, 60, 67, 0, 155, 254, 157, 0, 160, 0,
+ 0, 0, 167, 172, 168, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 265, 266, 0, 269,
+ 0, 82, 84, 55, 58, 283, 158, 0, 0, 166,
+ 170, 173, 0, 256, 174, 175, 176, 177, 178, 0,
+ 179, 180, 181, 182, 183, 189, 190, 191, 192, 0,
+ 0, 161, 162, 255, 0, 187, 0, 267, 0, 185,
+ 188, 268, 184, 186,
}
var yyTok1 = [...]int8{
@@ -769,7 +773,7 @@ var yyTok2 = [...]int8{
72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
92, 93, 94, 95, 96, 97, 98, 99, 100, 101,
- 102, 103, 104, 105,
+ 102, 103, 104, 105, 106,
}
var yyTok3 = [...]int8{
@@ -1434,7 +1438,7 @@ yydefault:
case 73:
yyDollar = yyS[yypt-3 : yypt+1]
{
- yylex.(*parser).unexpected("offset", "number, duration, or step()")
+ yylex.(*parser).unexpected("offset", "number, duration, step(), or range()")
yyVAL.node = yyDollar[1].node
}
case 74:
@@ -1541,7 +1545,7 @@ yydefault:
case 85:
yyDollar = yyS[yypt-5 : yypt+1]
{
- yylex.(*parser).unexpected("subquery selector", "number, duration, or step() or \"]\"")
+ yylex.(*parser).unexpected("subquery selector", "number, duration, step(), range(), or \"]\"")
yyVAL.node = yyDollar[1].node
}
case 86:
@@ -1553,7 +1557,7 @@ yydefault:
case 87:
yyDollar = yyS[yypt-3 : yypt+1]
{
- yylex.(*parser).unexpected("subquery or range selector", "number, duration, or step()")
+ yylex.(*parser).unexpected("subquery or range selector", "number, duration, step(), or range()")
yyVAL.node = yyDollar[1].node
}
case 88:
@@ -1691,63 +1695,57 @@ yydefault:
{
yyVAL.labels = yyDollar[1].labels
}
- case 134:
- yyDollar = yyS[yypt-3 : yypt+1]
- {
- yyVAL.labels = labels.New(yyDollar[2].lblList...)
- }
case 135:
- yyDollar = yyS[yypt-4 : yypt+1]
+ yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.labels = labels.New(yyDollar[2].lblList...)
}
case 136:
- yyDollar = yyS[yypt-2 : yypt+1]
+ yyDollar = yyS[yypt-4 : yypt+1]
{
- yyVAL.labels = labels.New()
+ yyVAL.labels = labels.New(yyDollar[2].lblList...)
}
case 137:
- yyDollar = yyS[yypt-0 : yypt+1]
+ yyDollar = yyS[yypt-2 : yypt+1]
{
yyVAL.labels = labels.New()
}
case 138:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ {
+ yyVAL.labels = labels.New()
+ }
+ case 139:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.lblList = append(yyDollar[1].lblList, yyDollar[3].label)
}
- case 139:
+ case 140:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.lblList = []labels.Label{yyDollar[1].label}
}
- case 140:
+ case 141:
yyDollar = yyS[yypt-2 : yypt+1]
{
yylex.(*parser).unexpected("label set", "\",\" or \"}\"")
yyVAL.lblList = yyDollar[1].lblList
}
- case 141:
- yyDollar = yyS[yypt-3 : yypt+1]
- {
- yyVAL.label = labels.Label{Name: yyDollar[1].item.Val, Value: yylex.(*parser).unquoteString(yyDollar[3].item.Val)}
- }
case 142:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.label = labels.Label{Name: yyDollar[1].item.Val, Value: yylex.(*parser).unquoteString(yyDollar[3].item.Val)}
}
case 143:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ {
+ yyVAL.label = labels.Label{Name: yyDollar[1].item.Val, Value: yylex.(*parser).unquoteString(yyDollar[3].item.Val)}
+ }
+ case 144:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.label = labels.Label{Name: labels.MetricName, Value: yyDollar[1].item.Val}
}
- case 144:
- yyDollar = yyS[yypt-3 : yypt+1]
- {
- yylex.(*parser).unexpected("label set", "string")
- yyVAL.label = labels.Label{}
- }
case 145:
yyDollar = yyS[yypt-3 : yypt+1]
{
@@ -1755,18 +1753,24 @@ yydefault:
yyVAL.label = labels.Label{}
}
case 146:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ {
+ yylex.(*parser).unexpected("label set", "string")
+ yyVAL.label = labels.Label{}
+ }
+ case 147:
yyDollar = yyS[yypt-2 : yypt+1]
{
yylex.(*parser).unexpected("label set", "\"=\"")
yyVAL.label = labels.Label{}
}
- case 147:
+ case 148:
yyDollar = yyS[yypt-1 : yypt+1]
{
yylex.(*parser).unexpected("label set", "identifier or \"}\"")
yyVAL.label = labels.Label{}
}
- case 148:
+ case 149:
yyDollar = yyS[yypt-2 : yypt+1]
{
yylex.(*parser).generatedParserResult = &seriesDescription{
@@ -1774,33 +1778,33 @@ yydefault:
values: yyDollar[2].series,
}
}
- case 149:
+ case 150:
yyDollar = yyS[yypt-0 : yypt+1]
{
yyVAL.series = []SequenceValue{}
}
- case 150:
+ case 151:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.series = append(yyDollar[1].series, yyDollar[3].series...)
}
- case 151:
+ case 152:
yyDollar = yyS[yypt-2 : yypt+1]
{
yyVAL.series = yyDollar[1].series
}
- case 152:
+ case 153:
yyDollar = yyS[yypt-1 : yypt+1]
{
yylex.(*parser).unexpected("series values", "")
yyVAL.series = nil
}
- case 153:
+ case 154:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.series = []SequenceValue{{Omitted: true}}
}
- case 154:
+ case 155:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.series = []SequenceValue{}
@@ -1808,12 +1812,12 @@ yydefault:
yyVAL.series = append(yyVAL.series, SequenceValue{Omitted: true})
}
}
- case 155:
+ case 156:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.series = []SequenceValue{{Value: yyDollar[1].float}}
}
- case 156:
+ case 157:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.series = []SequenceValue{}
@@ -1822,7 +1826,7 @@ yydefault:
yyVAL.series = append(yyVAL.series, SequenceValue{Value: yyDollar[1].float})
}
}
- case 157:
+ case 158:
yyDollar = yyS[yypt-4 : yypt+1]
{
yyVAL.series = []SequenceValue{}
@@ -1832,12 +1836,12 @@ yydefault:
yyDollar[1].float += yyDollar[2].float
}
}
- case 158:
+ case 159:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.series = []SequenceValue{{Histogram: yyDollar[1].histogram}}
}
- case 159:
+ case 160:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.series = []SequenceValue{}
@@ -1847,7 +1851,7 @@ yydefault:
//$1 += $2
}
}
- case 160:
+ case 161:
yyDollar = yyS[yypt-5 : yypt+1]
{
val, err := yylex.(*parser).histogramsIncreaseSeries(yyDollar[1].histogram, yyDollar[3].histogram, yyDollar[5].uint)
@@ -1856,7 +1860,7 @@ yydefault:
}
yyVAL.series = val
}
- case 161:
+ case 162:
yyDollar = yyS[yypt-5 : yypt+1]
{
val, err := yylex.(*parser).histogramsDecreaseSeries(yyDollar[1].histogram, yyDollar[3].histogram, yyDollar[5].uint)
@@ -1865,7 +1869,7 @@ yydefault:
}
yyVAL.series = val
}
- case 162:
+ case 163:
yyDollar = yyS[yypt-1 : yypt+1]
{
if yyDollar[1].item.Val != "stale" {
@@ -1873,130 +1877,130 @@ yydefault:
}
yyVAL.float = math.Float64frombits(value.StaleNaN)
}
- case 165:
- yyDollar = yyS[yypt-4 : yypt+1]
- {
- yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&yyDollar[2].descriptors)
- }
case 166:
- yyDollar = yyS[yypt-3 : yypt+1]
+ yyDollar = yyS[yypt-4 : yypt+1]
{
yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&yyDollar[2].descriptors)
}
case 167:
yyDollar = yyS[yypt-3 : yypt+1]
{
- m := yylex.(*parser).newMap()
- yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&m)
+ yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&yyDollar[2].descriptors)
}
case 168:
- yyDollar = yyS[yypt-2 : yypt+1]
+ yyDollar = yyS[yypt-3 : yypt+1]
{
m := yylex.(*parser).newMap()
yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&m)
}
case 169:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ {
+ m := yylex.(*parser).newMap()
+ yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&m)
+ }
+ case 170:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = *(yylex.(*parser).mergeMaps(&yyDollar[1].descriptors, &yyDollar[3].descriptors))
}
- case 170:
+ case 171:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.descriptors = yyDollar[1].descriptors
}
- case 171:
+ case 172:
yyDollar = yyS[yypt-2 : yypt+1]
{
yylex.(*parser).unexpected("histogram description", "histogram description key, e.g. buckets:[5 10 7]")
}
- case 172:
- yyDollar = yyS[yypt-3 : yypt+1]
- {
- yyVAL.descriptors = yylex.(*parser).newMap()
- yyVAL.descriptors["schema"] = yyDollar[3].int
- }
case 173:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = yylex.(*parser).newMap()
- yyVAL.descriptors["sum"] = yyDollar[3].float
+ yyVAL.descriptors["schema"] = yyDollar[3].int
}
case 174:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = yylex.(*parser).newMap()
- yyVAL.descriptors["count"] = yyDollar[3].float
+ yyVAL.descriptors["sum"] = yyDollar[3].float
}
case 175:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = yylex.(*parser).newMap()
- yyVAL.descriptors["z_bucket"] = yyDollar[3].float
+ yyVAL.descriptors["count"] = yyDollar[3].float
}
case 176:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = yylex.(*parser).newMap()
- yyVAL.descriptors["z_bucket_w"] = yyDollar[3].float
+ yyVAL.descriptors["z_bucket"] = yyDollar[3].float
}
case 177:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = yylex.(*parser).newMap()
- yyVAL.descriptors["custom_values"] = yyDollar[3].bucket_set
+ yyVAL.descriptors["z_bucket_w"] = yyDollar[3].float
}
case 178:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = yylex.(*parser).newMap()
- yyVAL.descriptors["buckets"] = yyDollar[3].bucket_set
+ yyVAL.descriptors["custom_values"] = yyDollar[3].bucket_set
}
case 179:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = yylex.(*parser).newMap()
- yyVAL.descriptors["offset"] = yyDollar[3].int
+ yyVAL.descriptors["buckets"] = yyDollar[3].bucket_set
}
case 180:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = yylex.(*parser).newMap()
- yyVAL.descriptors["n_buckets"] = yyDollar[3].bucket_set
+ yyVAL.descriptors["offset"] = yyDollar[3].int
}
case 181:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = yylex.(*parser).newMap()
- yyVAL.descriptors["n_offset"] = yyDollar[3].int
+ yyVAL.descriptors["n_buckets"] = yyDollar[3].bucket_set
}
case 182:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.descriptors = yylex.(*parser).newMap()
- yyVAL.descriptors["counter_reset_hint"] = yyDollar[3].item
+ yyVAL.descriptors["n_offset"] = yyDollar[3].int
}
case 183:
- yyDollar = yyS[yypt-4 : yypt+1]
+ yyDollar = yyS[yypt-3 : yypt+1]
{
- yyVAL.bucket_set = yyDollar[2].bucket_set
+ yyVAL.descriptors = yylex.(*parser).newMap()
+ yyVAL.descriptors["counter_reset_hint"] = yyDollar[3].item
}
case 184:
- yyDollar = yyS[yypt-3 : yypt+1]
+ yyDollar = yyS[yypt-4 : yypt+1]
{
yyVAL.bucket_set = yyDollar[2].bucket_set
}
case 185:
yyDollar = yyS[yypt-3 : yypt+1]
{
- yyVAL.bucket_set = append(yyDollar[1].bucket_set, yyDollar[3].float)
+ yyVAL.bucket_set = yyDollar[2].bucket_set
}
case 186:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ {
+ yyVAL.bucket_set = append(yyDollar[1].bucket_set, yyDollar[3].float)
+ }
+ case 187:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.bucket_set = []float64{yyDollar[1].float}
}
- case 244:
+ case 246:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.node = &NumberLiteral{
@@ -2004,7 +2008,7 @@ yydefault:
PosRange: yyDollar[1].item.PositionRange(),
}
}
- case 245:
+ case 247:
yyDollar = yyS[yypt-1 : yypt+1]
{
var err error
@@ -2019,12 +2023,12 @@ yydefault:
Duration: true,
}
}
- case 246:
+ case 248:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.float = yylex.(*parser).number(yyDollar[1].item.Val)
}
- case 247:
+ case 249:
yyDollar = yyS[yypt-1 : yypt+1]
{
var err error
@@ -2035,17 +2039,17 @@ yydefault:
}
yyVAL.float = dur.Seconds()
}
- case 248:
+ case 250:
yyDollar = yyS[yypt-2 : yypt+1]
{
yyVAL.float = yyDollar[2].float
}
- case 249:
+ case 251:
yyDollar = yyS[yypt-2 : yypt+1]
{
yyVAL.float = -yyDollar[2].float
}
- case 252:
+ case 254:
yyDollar = yyS[yypt-1 : yypt+1]
{
var err error
@@ -2054,17 +2058,17 @@ yydefault:
yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "invalid repetition in series values: %s", err)
}
}
- case 253:
+ case 255:
yyDollar = yyS[yypt-2 : yypt+1]
{
yyVAL.int = -int64(yyDollar[2].uint)
}
- case 254:
+ case 256:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.int = int64(yyDollar[1].uint)
}
- case 255:
+ case 257:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.node = &StringLiteral{
@@ -2072,7 +2076,7 @@ yydefault:
PosRange: yyDollar[1].item.PositionRange(),
}
}
- case 256:
+ case 258:
yyDollar = yyS[yypt-1 : yypt+1]
{
yyVAL.item = Item{
@@ -2081,12 +2085,12 @@ yydefault:
Val: yylex.(*parser).unquoteString(yyDollar[1].item.Val),
}
}
- case 257:
+ case 259:
yyDollar = yyS[yypt-0 : yypt+1]
{
yyVAL.strings = nil
}
- case 259:
+ case 261:
yyDollar = yyS[yypt-1 : yypt+1]
{
nl := yyDollar[1].node.(*NumberLiteral)
@@ -2097,7 +2101,7 @@ yydefault:
}
yyVAL.node = nl
}
- case 260:
+ case 262:
yyDollar = yyS[yypt-2 : yypt+1]
{
nl := yyDollar[2].node.(*NumberLiteral)
@@ -2112,7 +2116,7 @@ yydefault:
nl.PosRange.Start = yyDollar[1].item.Pos
yyVAL.node = nl
}
- case 261:
+ case 263:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.node = &DurationExpr{
@@ -2121,7 +2125,16 @@ yydefault:
EndPos: yyDollar[3].item.PositionRange().End,
}
}
- case 262:
+ case 264:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ {
+ yyVAL.node = &DurationExpr{
+ Op: RANGE,
+ StartPos: yyDollar[1].item.PositionRange().Start,
+ EndPos: yyDollar[3].item.PositionRange().End,
+ }
+ }
+ case 265:
yyDollar = yyS[yypt-4 : yypt+1]
{
yyVAL.node = &DurationExpr{
@@ -2134,7 +2147,20 @@ yydefault:
StartPos: yyDollar[1].item.Pos,
}
}
- case 263:
+ case 266:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ {
+ yyVAL.node = &DurationExpr{
+ Op: yyDollar[1].item.Typ,
+ RHS: &DurationExpr{
+ Op: RANGE,
+ StartPos: yyDollar[2].item.PositionRange().Start,
+ EndPos: yyDollar[4].item.PositionRange().End,
+ },
+ StartPos: yyDollar[1].item.Pos,
+ }
+ }
+ case 267:
yyDollar = yyS[yypt-6 : yypt+1]
{
yyVAL.node = &DurationExpr{
@@ -2145,7 +2171,7 @@ yydefault:
RHS: yyDollar[5].node.(Expr),
}
}
- case 264:
+ case 268:
yyDollar = yyS[yypt-7 : yypt+1]
{
yyVAL.node = &DurationExpr{
@@ -2161,7 +2187,7 @@ yydefault:
},
}
}
- case 265:
+ case 269:
yyDollar = yyS[yypt-4 : yypt+1]
{
de := yyDollar[3].node.(*DurationExpr)
@@ -2176,7 +2202,7 @@ yydefault:
}
yyVAL.node = yyDollar[3].node
}
- case 269:
+ case 273:
yyDollar = yyS[yypt-1 : yypt+1]
{
nl := yyDollar[1].node.(*NumberLiteral)
@@ -2187,7 +2213,7 @@ yydefault:
}
yyVAL.node = nl
}
- case 270:
+ case 274:
yyDollar = yyS[yypt-2 : yypt+1]
{
switch expr := yyDollar[2].node.(type) {
@@ -2220,25 +2246,25 @@ yydefault:
break
}
}
- case 271:
+ case 275:
yyDollar = yyS[yypt-3 : yypt+1]
{
yylex.(*parser).experimentalDurationExpr(yyDollar[1].node.(Expr))
yyVAL.node = &DurationExpr{Op: ADD, LHS: yyDollar[1].node.(Expr), RHS: yyDollar[3].node.(Expr)}
}
- case 272:
+ case 276:
yyDollar = yyS[yypt-3 : yypt+1]
{
yylex.(*parser).experimentalDurationExpr(yyDollar[1].node.(Expr))
yyVAL.node = &DurationExpr{Op: SUB, LHS: yyDollar[1].node.(Expr), RHS: yyDollar[3].node.(Expr)}
}
- case 273:
+ case 277:
yyDollar = yyS[yypt-3 : yypt+1]
{
yylex.(*parser).experimentalDurationExpr(yyDollar[1].node.(Expr))
yyVAL.node = &DurationExpr{Op: MUL, LHS: yyDollar[1].node.(Expr), RHS: yyDollar[3].node.(Expr)}
}
- case 274:
+ case 278:
yyDollar = yyS[yypt-3 : yypt+1]
{
yylex.(*parser).experimentalDurationExpr(yyDollar[1].node.(Expr))
@@ -2249,7 +2275,7 @@ yydefault:
}
yyVAL.node = &DurationExpr{Op: DIV, LHS: yyDollar[1].node.(Expr), RHS: yyDollar[3].node.(Expr)}
}
- case 275:
+ case 279:
yyDollar = yyS[yypt-3 : yypt+1]
{
yylex.(*parser).experimentalDurationExpr(yyDollar[1].node.(Expr))
@@ -2260,13 +2286,13 @@ yydefault:
}
yyVAL.node = &DurationExpr{Op: MOD, LHS: yyDollar[1].node.(Expr), RHS: yyDollar[3].node.(Expr)}
}
- case 276:
+ case 280:
yyDollar = yyS[yypt-3 : yypt+1]
{
yylex.(*parser).experimentalDurationExpr(yyDollar[1].node.(Expr))
yyVAL.node = &DurationExpr{Op: POW, LHS: yyDollar[1].node.(Expr), RHS: yyDollar[3].node.(Expr)}
}
- case 277:
+ case 281:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.node = &DurationExpr{
@@ -2275,7 +2301,16 @@ yydefault:
EndPos: yyDollar[3].item.PositionRange().End,
}
}
- case 278:
+ case 282:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ {
+ yyVAL.node = &DurationExpr{
+ Op: RANGE,
+ StartPos: yyDollar[1].item.PositionRange().Start,
+ EndPos: yyDollar[3].item.PositionRange().End,
+ }
+ }
+ case 283:
yyDollar = yyS[yypt-6 : yypt+1]
{
yyVAL.node = &DurationExpr{
@@ -2286,7 +2321,7 @@ yydefault:
RHS: yyDollar[5].node.(Expr),
}
}
- case 280:
+ case 285:
yyDollar = yyS[yypt-3 : yypt+1]
{
yylex.(*parser).experimentalDurationExpr(yyDollar[2].node.(Expr))
diff --git a/promql/parser/lex.go b/promql/parser/lex.go
index 296b91d1ae..b3a82dc0c6 100644
--- a/promql/parser/lex.go
+++ b/promql/parser/lex.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -143,6 +143,7 @@ var key = map[string]ItemType{
"start": START,
"end": END,
"step": STEP,
+ "range": RANGE,
}
var histogramDesc = map[string]ItemType{
@@ -915,6 +916,9 @@ func (l *Lexer) scanDurationKeyword() bool {
case "step":
l.emit(STEP)
return true
+ case "range":
+ l.emit(RANGE)
+ return true
case "min":
l.emit(MIN)
return true
@@ -1175,7 +1179,7 @@ func lexDurationExpr(l *Lexer) stateFn {
case r == ',':
l.emit(COMMA)
return lexDurationExpr
- case r == 's' || r == 'S' || r == 'm' || r == 'M':
+ case r == 's' || r == 'S' || r == 'm' || r == 'M' || r == 'r' || r == 'R':
if l.scanDurationKeyword() {
return lexDurationExpr
}
diff --git a/promql/parser/lex_test.go b/promql/parser/lex_test.go
index f86f282089..5c915ec74f 100644
--- a/promql/parser/lex_test.go
+++ b/promql/parser/lex_test.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/promql/parser/parse.go b/promql/parser/parse.go
index bcd511f467..817e0d02d9 100644
--- a/promql/parser/parse.go
+++ b/promql/parser/parse.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/promql/parser/parse_test.go b/promql/parser/parse_test.go
index b5d7c288d1..ab5564f0ff 100644
--- a/promql/parser/parse_test.go
+++ b/promql/parser/parse_test.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -2708,7 +2708,7 @@ var testExpr = []struct {
errors: ParseErrors{
ParseErr{
PositionRange: posrange.PositionRange{Start: 4, End: 5},
- Err: errors.New("unexpected \"]\" in subquery or range selector, expected number, duration, or step()"),
+ Err: errors.New("unexpected \"]\" in subquery or range selector, expected number, duration, step(), or range()"),
Query: `foo[]`,
},
},
@@ -2741,7 +2741,7 @@ var testExpr = []struct {
errors: ParseErrors{
ParseErr{
PositionRange: posrange.PositionRange{Start: 22, End: 22},
- Err: errors.New("unexpected end of input in offset, expected number, duration, or step()"),
+ Err: errors.New("unexpected end of input in offset, expected number, duration, step(), or range()"),
Query: `some_metric[5m] OFFSET`,
},
},
@@ -4698,6 +4698,100 @@ var testExpr = []struct {
},
},
},
+ {
+ input: `foo[range()]`,
+ expected: &MatrixSelector{
+ VectorSelector: &VectorSelector{
+ Name: "foo",
+ LabelMatchers: []*labels.Matcher{
+ MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
+ },
+ PosRange: posrange.PositionRange{Start: 0, End: 3},
+ },
+ RangeExpr: &DurationExpr{
+ Op: RANGE,
+ StartPos: 4,
+ EndPos: 11,
+ },
+ EndPos: 12,
+ },
+ },
+ {
+ input: `foo[-range()]`,
+ expected: &MatrixSelector{
+ VectorSelector: &VectorSelector{
+ Name: "foo",
+ LabelMatchers: []*labels.Matcher{
+ MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
+ },
+ PosRange: posrange.PositionRange{Start: 0, End: 3},
+ },
+ RangeExpr: &DurationExpr{
+ Op: SUB,
+ StartPos: 4,
+ RHS: &DurationExpr{Op: RANGE, StartPos: 5, EndPos: 12},
+ },
+ EndPos: 13,
+ },
+ },
+ {
+ input: `foo offset range()`,
+ expected: &VectorSelector{
+ Name: "foo",
+ LabelMatchers: []*labels.Matcher{
+ MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
+ },
+ PosRange: posrange.PositionRange{Start: 0, End: 18},
+ OriginalOffsetExpr: &DurationExpr{
+ Op: RANGE,
+ StartPos: 11,
+ EndPos: 18,
+ },
+ },
+ },
+ {
+ input: `foo offset -range()`,
+ expected: &VectorSelector{
+ Name: "foo",
+ LabelMatchers: []*labels.Matcher{
+ MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
+ },
+ PosRange: posrange.PositionRange{Start: 0, End: 19},
+ OriginalOffsetExpr: &DurationExpr{
+ Op: SUB,
+ RHS: &DurationExpr{Op: RANGE, StartPos: 12, EndPos: 19},
+ StartPos: 11,
+ },
+ },
+ },
+ {
+ input: `foo[max(range(),5s)]`,
+ expected: &MatrixSelector{
+ VectorSelector: &VectorSelector{
+ Name: "foo",
+ LabelMatchers: []*labels.Matcher{
+ MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
+ },
+ PosRange: posrange.PositionRange{Start: 0, End: 3},
+ },
+ RangeExpr: &DurationExpr{
+ Op: MAX,
+ LHS: &DurationExpr{
+ Op: RANGE,
+ StartPos: 8,
+ EndPos: 15,
+ },
+ RHS: &NumberLiteral{
+ Val: 5,
+ Duration: true,
+ PosRange: posrange.PositionRange{Start: 16, End: 18},
+ },
+ StartPos: 4,
+ EndPos: 19,
+ },
+ EndPos: 20,
+ },
+ },
{
input: `foo[4s+4s:1s*2] offset (5s-8)`,
expected: &SubqueryExpr{
@@ -4942,7 +5036,7 @@ var testExpr = []struct {
errors: ParseErrors{
ParseErr{
PositionRange: posrange.PositionRange{Start: 8, End: 9},
- Err: errors.New(`unexpected "]" in subquery or range selector, expected number, duration, or step()`),
+ Err: errors.New(`unexpected "]" in subquery or range selector, expected number, duration, step(), or range()`),
Query: `foo[step]`,
},
},
diff --git a/promql/parser/posrange/posrange.go b/promql/parser/posrange/posrange.go
index f883a91bbb..c5cdc4b91b 100644
--- a/promql/parser/posrange/posrange.go
+++ b/promql/parser/posrange/posrange.go
@@ -1,4 +1,4 @@
-// Copyright 2023 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/promql/parser/prettier.go b/promql/parser/prettier.go
index 90fb7a0cf9..a0ab9e1219 100644
--- a/promql/parser/prettier.go
+++ b/promql/parser/prettier.go
@@ -1,4 +1,4 @@
-// Copyright 2022 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/promql/parser/prettier_test.go b/promql/parser/prettier_test.go
index ea9a7a1a26..8ba5134d4a 100644
--- a/promql/parser/prettier_test.go
+++ b/promql/parser/prettier_test.go
@@ -1,4 +1,4 @@
-// Copyright 2022 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/promql/parser/printer.go b/promql/parser/printer.go
index a562b88044..01e2c46c1b 100644
--- a/promql/parser/printer.go
+++ b/promql/parser/printer.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -37,15 +37,16 @@ func tree(node Node, level string) string {
}
typs := strings.Split(fmt.Sprintf("%T", node), ".")[1]
- t := fmt.Sprintf("%s |---- %s :: %s\n", level, typs, node)
+ var t strings.Builder
+ t.WriteString(fmt.Sprintf("%s |---- %s :: %s\n", level, typs, node))
level += " · · ·"
for e := range ChildrenIter(node) {
- t += tree(e, level)
+ t.WriteString(tree(e, level))
}
- return t
+ return t.String()
}
func (node *EvalStmt) String() string {
@@ -108,7 +109,7 @@ func writeLabels(b *bytes.Buffer, ss []string) {
if i > 0 {
b.WriteString(", ")
}
- if !model.LegacyValidation.IsValidMetricName(s) {
+ if !model.LegacyValidation.IsValidLabelName(s) {
b.Write(strconv.AppendQuote(b.AvailableBuffer(), s))
} else {
b.WriteString(s)
@@ -146,20 +147,30 @@ func (node *BinaryExpr) ShortString() string {
func (node *BinaryExpr) getMatchingStr() string {
matching := ""
+ var b bytes.Buffer
vm := node.VectorMatching
- if vm != nil && (len(vm.MatchingLabels) > 0 || vm.On) {
- vmTag := "ignoring"
- if vm.On {
- vmTag = "on"
+ if vm != nil {
+ if len(vm.MatchingLabels) > 0 || vm.On || vm.Card == CardManyToOne || vm.Card == CardOneToMany {
+ vmTag := "ignoring"
+ if vm.On {
+ vmTag = "on"
+ }
+ b.WriteString(" " + vmTag + " (")
+ writeLabels(&b, vm.MatchingLabels)
+ b.WriteString(")")
+ matching = b.String()
}
- matching = fmt.Sprintf(" %s (%s)", vmTag, strings.Join(vm.MatchingLabels, ", "))
if vm.Card == CardManyToOne || vm.Card == CardOneToMany {
vmCard := "right"
if vm.Card == CardManyToOne {
vmCard = "left"
}
- matching += fmt.Sprintf(" group_%s (%s)", vmCard, strings.Join(vm.Include, ", "))
+ b.Reset()
+ b.WriteString(" group_" + vmCard + " (")
+ writeLabels(&b, vm.Include)
+ b.WriteString(")")
+ matching += b.String()
}
}
return matching
@@ -179,6 +190,8 @@ func (node *DurationExpr) writeTo(b *bytes.Buffer) {
switch {
case node.Op == STEP:
b.WriteString("step()")
+ case node.Op == RANGE:
+ b.WriteString("range()")
case node.Op == MIN:
b.WriteString("min(")
b.WriteString(node.LHS.String())
diff --git a/promql/parser/printer_test.go b/promql/parser/printer_test.go
index aadfd5688a..4499fa7860 100644
--- a/promql/parser/printer_test.go
+++ b/promql/parser/printer_test.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -94,6 +94,25 @@ func TestExprString(t *testing.T) {
in: `a - ignoring() c`,
out: `a - c`,
},
+ {
+ // This is a bit of an odd case, but valid. If the user specifies ignoring() with
+ // no labels, it means that both label sets have to be exactly the same on both
+ // sides (except for the metric name). This is the same behavior as specifying
+ // no matching modifier at all, but if the user wants to include the metric name
+ // from either side in the output via group_x(__name__), they have to specify
+ // ignoring() explicitly to be able to do so, since the grammar does not allow
+ // grouping modifiers without either ignoring(...) or on(...). So we need to
+ // preserve the empty ignoring() clause in this case.
+ //
+ // a - group_left(__name__) c <--- Parse error
+ // a - ignoring() group_left(__name__) c <--- Valid
+ in: `a - ignoring() group_left(__metric__) c`,
+ out: `a - ignoring () group_left (__metric__) c`,
+ },
+ {
+ in: `a - ignoring() group_left c`,
+ out: `a - ignoring () group_left () c`,
+ },
{
in: `up > bool 0`,
},
@@ -247,9 +266,36 @@ func TestExprString(t *testing.T) {
{
in: "foo[200 - min(step() + 10s, -max(step() ^ 2, 3))]",
},
+ {
+ in: "foo[range()]",
+ },
+ {
+ in: "foo[-range()]",
+ },
+ {
+ in: "foo offset range()",
+ },
+ {
+ in: "foo offset -range()",
+ },
+ {
+ in: "foo[max(range(), 5s)]",
+ },
{
in: `predict_linear(foo[1h], 3000)`,
},
+ {
+ in: `sum by("üüü") (foo)`,
+ out: `sum by ("üüü") (foo)`,
+ },
+ {
+ in: `sum without("äää") (foo)`,
+ out: `sum without ("äää") (foo)`,
+ },
+ {
+ in: `count by("ööö", job) (foo)`,
+ out: `count by ("ööö", job) (foo)`,
+ },
}
EnableExtendedRangeSelectors = true
@@ -375,3 +421,55 @@ func TestVectorSelector_String(t *testing.T) {
})
}
}
+
+func TestBinaryExprUTF8Labels(t *testing.T) {
+ testCases := []struct {
+ name string
+ input string
+ expected string
+ }{
+ {
+ name: "UTF-8 labels in on clause",
+ input: `foo / on("äää") bar`,
+ expected: `foo / on ("äää") bar`,
+ },
+ {
+ name: "UTF-8 labels in ignoring clause",
+ input: `foo / ignoring("üüü") bar`,
+ expected: `foo / ignoring ("üüü") bar`,
+ },
+ {
+ name: "UTF-8 labels in group_left clause",
+ input: `foo / on("äää") group_left("ööö") bar`,
+ expected: `foo / on ("äää") group_left ("ööö") bar`,
+ },
+ {
+ name: "UTF-8 labels in group_right clause",
+ input: `foo / on("äää") group_right("ööö") bar`,
+ expected: `foo / on ("äää") group_right ("ööö") bar`,
+ },
+ {
+ name: "Mixed legacy and UTF-8 labels",
+ input: `foo / on(legacy, "üüü") bar`,
+ expected: `foo / on (legacy, "üüü") bar`,
+ },
+ {
+ name: "Legacy labels only (should not quote)",
+ input: `foo / on(job, instance) bar`,
+ expected: `foo / on (job, instance) bar`,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ expr, err := ParseExpr(tc.input)
+ if err != nil {
+ t.Fatalf("Failed to parse: %v", err)
+ }
+ result := expr.String()
+ if result != tc.expected {
+ t.Errorf("Expected: %s\nGot: %s", tc.expected, result)
+ }
+ })
+ }
+}
diff --git a/promql/parser/value.go b/promql/parser/value.go
index f882f9f0be..3c1c8571dc 100644
--- a/promql/parser/value.go
+++ b/promql/parser/value.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/promql/promql_test.go b/promql/promql_test.go
index 92d933f1ee..fc13f7e64f 100644
--- a/promql/promql_test.go
+++ b/promql/promql_test.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/promql/promqltest/cmd/migrate/main.go b/promql/promqltest/cmd/migrate/main.go
index a506f084c5..b570b1dfaa 100644
--- a/promql/promqltest/cmd/migrate/main.go
+++ b/promql/promqltest/cmd/migrate/main.go
@@ -1,4 +1,4 @@
-// Copyright 2025 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/promql/promqltest/test.go b/promql/promqltest/test.go
index b16433c14e..1c4226b461 100644
--- a/promql/promqltest/test.go
+++ b/promql/promqltest/test.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -231,7 +231,7 @@ func raise(line int, format string, v ...any) error {
}
}
-func parseLoad(lines []string, i int) (int, *loadCmd, error) {
+func parseLoad(lines []string, i int, startTime time.Time) (int, *loadCmd, error) {
if !patLoad.MatchString(lines[i]) {
return i, nil, raise(i, "invalid load command. (load[_with_nhcb] )")
}
@@ -245,6 +245,7 @@ func parseLoad(lines []string, i int) (int, *loadCmd, error) {
return i, nil, raise(i, "invalid step definition %q: %s", step, err)
}
cmd := newLoadCmd(time.Duration(gap), withNHCB)
+ cmd.startTime = startTime
for i+1 < len(lines) {
i++
defLine := lines[i]
@@ -579,7 +580,7 @@ func (t *test) parse(input string) error {
case c == "clear":
cmd = &clearCmd{}
case strings.HasPrefix(c, "load"):
- i, cmd, err = parseLoad(lines, i)
+ i, cmd, err = parseLoad(lines, i, testStartTime)
case strings.HasPrefix(c, "eval"):
i, cmd, err = t.parseEval(lines, i)
default:
@@ -611,6 +612,7 @@ type loadCmd struct {
defs map[uint64][]promql.Sample
exemplars map[uint64][]exemplar.Exemplar
withNHCB bool
+ startTime time.Time
}
func newLoadCmd(gap time.Duration, withNHCB bool) *loadCmd {
@@ -620,6 +622,7 @@ func newLoadCmd(gap time.Duration, withNHCB bool) *loadCmd {
defs: map[uint64][]promql.Sample{},
exemplars: map[uint64][]exemplar.Exemplar{},
withNHCB: withNHCB,
+ startTime: testStartTime,
}
}
@@ -632,7 +635,7 @@ func (cmd *loadCmd) set(m labels.Labels, vals ...parser.SequenceValue) {
h := m.Hash()
samples := make([]promql.Sample, 0, len(vals))
- ts := testStartTime
+ ts := cmd.startTime
for _, v := range vals {
if !v.Omitted {
samples = append(samples, promql.Sample{
@@ -1516,6 +1519,10 @@ func (t *test) runInstantQuery(iq atModifierTestCase, cmd *evalCmd, engine promq
// Check query returns same result in range mode,
// by checking against the middle step.
+ // Skip this check for queries containing range() since it would resolve differently.
+ if strings.Contains(iq.expr, "range()") {
+ return nil
+ }
q, err = engine.NewRangeQuery(t.context, t.storage, nil, iq.expr, iq.evalTime.Add(-time.Minute), iq.evalTime.Add(time.Minute), time.Minute)
if err != nil {
return fmt.Errorf("error creating range query for %q (line %d): %w", cmd.expr, cmd.line, err)
@@ -1627,6 +1634,8 @@ type LazyLoaderOpts struct {
// Currently defaults to false, matches the "promql-delayed-name-removal"
// feature flag.
EnableDelayedNameRemoval bool
+ // StartTime is the start time for the test. If zero, defaults to Unix epoch.
+ StartTime time.Time
}
// NewLazyLoader returns an initialized empty LazyLoader.
@@ -1652,7 +1661,12 @@ func (ll *LazyLoader) parse(input string) error {
continue
}
if strings.HasPrefix(strings.ToLower(patSpace.Split(l, 2)[0]), "load") {
- _, cmd, err := parseLoad(lines, i)
+ // Determine the start time to use for loading samples.
+ startTime := testStartTime
+ if !ll.opts.StartTime.IsZero() {
+ startTime = ll.opts.StartTime
+ }
+ _, cmd, err := parseLoad(lines, i, startTime)
if err != nil {
return err
}
diff --git a/promql/promqltest/test_migrate.go b/promql/promqltest/test_migrate.go
index 0b233e7592..693b773b7d 100644
--- a/promql/promqltest/test_migrate.go
+++ b/promql/promqltest/test_migrate.go
@@ -1,4 +1,4 @@
-// Copyright 2025 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/promql/promqltest/test_migrate_test.go b/promql/promqltest/test_migrate_test.go
index fcf7e9db03..6c9784b56f 100644
--- a/promql/promqltest/test_migrate_test.go
+++ b/promql/promqltest/test_migrate_test.go
@@ -1,4 +1,4 @@
-// Copyright 2025 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/promql/promqltest/test_test.go b/promql/promqltest/test_test.go
index f441d148d6..cbb73a5651 100644
--- a/promql/promqltest/test_test.go
+++ b/promql/promqltest/test_test.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/promql/promqltest/testdata/duration_expression.test b/promql/promqltest/testdata/duration_expression.test
index db8253777b..e58b34131b 100644
--- a/promql/promqltest/testdata/duration_expression.test
+++ b/promql/promqltest/testdata/duration_expression.test
@@ -225,4 +225,27 @@ eval range from 50s to 60s step 5s metric1_total offset max(3s,min(step(), 1s))+
{} 8047 8052 8057
eval range from 50s to 60s step 5s metric1_total offset -(min(step(), 2s)-5)+8000
- {} 8047 8052 8057
\ No newline at end of file
+ {} 8047 8052 8057
+
+# Test range() function - resolves to query range (end - start).
+# For a range query from 50s to 60s, range() = 10s.
+eval range from 50s to 60s step 10s count_over_time(metric1_total[range()])
+ {} 10 10
+
+eval range from 50s to 60s step 5s count_over_time(metric1_total[range()])
+ {} 10 10 10
+
+eval range from 50s to 60s step 5s metric1_total offset range()
+ metric1_total{} 40 45 50
+
+eval range from 50s to 60s step 5s metric1_total offset min(range(), 8s)
+ metric1_total{} 42 47 52
+
+clear
+
+load 1s
+ metric1_total 0+1x100
+
+# For an instant query (start == end), range() = 0s, offset 0s.
+eval instant at 50s metric1_total offset range()
+ metric1_total{} 50
diff --git a/promql/promqltest/testdata/functions.test b/promql/promqltest/testdata/functions.test
index ba3df76ff6..7bc4bcb624 100644
--- a/promql/promqltest/testdata/functions.test
+++ b/promql/promqltest/testdata/functions.test
@@ -2014,3 +2014,38 @@ eval instant at 0m scalar({type="histogram"})
# One float in the vector.
eval instant at 0m scalar({l="x"})
1
+
+clear
+load 20m
+ series{label="a", idx="1"} 2 _
+ series{label="a", idx="2"} _ 4
+
+eval instant at 0 label_replace(series, "idx", "replaced", "idx", ".*")
+ series{label="a", idx="replaced"} 2
+
+eval instant at 20m label_replace(series, "idx", "replaced", "idx", ".*")
+ series{label="a", idx="replaced"} 4
+
+eval range from 0 to 20m step 20m label_replace(series, "idx", "replaced", "idx", ".*")
+ series{label="a", idx="replaced"} 2 4
+
+# Test label_join with non-overlapping series.
+eval instant at 0 label_join(series, "idx", ",", "label", "label")
+ series{label="a", idx="a,a"} 2
+
+eval instant at 20m label_join(series, "idx", ",", "label", "label")
+ series{label="a", idx="a,a"} 4
+
+eval range from 0 to 20m step 20m label_join(series, "idx", ",", "label", "label")
+ series{label="a", idx="a,a"} 2 4
+
+# Test label_replace failure with overlapping timestamps (same labelset at same time).
+clear
+load 1m
+ overlap{label="a", idx="1"} 1
+ overlap{label="a", idx="2"} 2
+
+eval_fail instant at 0 label_replace(overlap, "idx", "same", "idx", ".*")
+
+# Test label_join failure with overlapping timestamps (same labelset at same time).
+eval_fail instant at 0 label_join(overlap, "idx", ",", "label", "label")
diff --git a/promql/promqltest/testdata/histograms.test b/promql/promqltest/testdata/histograms.test
index 84a467a314..436390ee41 100644
--- a/promql/promqltest/testdata/histograms.test
+++ b/promql/promqltest/testdata/histograms.test
@@ -158,6 +158,383 @@ eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3_bucket[10m]))
{start="positive"} 0.6363636363636364
{start="negative"} 0
+# Positive buckets, lower falls in the first bucket.
+load_with_nhcb 5m
+ positive_buckets_lower_falls_in_the_first_bucket_bucket{le="1"} 1+0x10
+ positive_buckets_lower_falls_in_the_first_bucket_bucket{le="2"} 3+0x10
+ positive_buckets_lower_falls_in_the_first_bucket_bucket{le="3"} 6+0x10
+ positive_buckets_lower_falls_in_the_first_bucket_bucket{le="+Inf"} 100+0x10
+
+# - Bucket [0, 1]: contributes 1.0 observation (full bucket).
+# - Bucket [1, 2]: contributes (1.5-1)/(2-1) * (3-1) = 0.5 * 2 = 1.0 observations.
+# Total: (1.0 + 1.0) / 100.0 = 0.02
+
+eval instant at 50m histogram_fraction(0, 1.5, positive_buckets_lower_falls_in_the_first_bucket_bucket)
+ expect no_warn
+ {} 0.02
+
+eval instant at 50m histogram_fraction(0, 1.5, positive_buckets_lower_falls_in_the_first_bucket)
+ expect no_warn
+ {} 0.02
+
+# Negative buckets, lower falls in the first bucket.
+load_with_nhcb 5m
+ negative_buckets_lower_falls_in_the_first_bucket_bucket{le="-3"} 10+0x10
+ negative_buckets_lower_falls_in_the_first_bucket_bucket{le="-2"} 12+0x10
+ negative_buckets_lower_falls_in_the_first_bucket_bucket{le="-1"} 15+0x10
+ negative_buckets_lower_falls_in_the_first_bucket_bucket{le="+Inf"} 100+0x10
+
+# - Bucket [-Inf, -3]: contributes zero observations (no interpolation with infinite width bucket).
+# - Bucket [-3, -2]: contributes 12-10 = 2.0 observations (full bucket).
+# Total: 2.0 / 100.0 = 0.02
+
+eval instant at 50m histogram_fraction(-4, -2, negative_buckets_lower_falls_in_the_first_bucket_bucket)
+ expect no_warn
+ {} 0.02
+
+eval instant at 50m histogram_fraction(-4, -2, negative_buckets_lower_falls_in_the_first_bucket)
+ expect no_warn
+ {} 0.02
+
+# Lower is -Inf.
+load_with_nhcb 5m
+ lower_is_negative_Inf_bucket{le="-3"} 10+0x10
+ lower_is_negative_Inf_bucket{le="-2"} 12+0x10
+ lower_is_negative_Inf_bucket{le="-1"} 15+0x10
+ lower_is_negative_Inf_bucket{le="+Inf"} 100+0x10
+
+# - Bucket [-Inf, -3]: contributes 10.0 observations (full bucket).
+# - Bucket [-3, -2]: contributes 12-10 = 2.0 observations (full bucket).
+# - Bucket [-2, -1]: contributes (-1.5-(-2))/(-1-(-2)) * (15-12) = 0.5 * 3 = 1.5 observations.
+# Total: (10.0 + 2.0 + 1.5) / 100.0 = 0.135
+
+eval instant at 50m histogram_fraction(-Inf, -1.5, lower_is_negative_Inf_bucket)
+ expect no_warn
+ {} 0.135
+
+eval instant at 50m histogram_fraction(-Inf, -1.5, lower_is_negative_Inf)
+ expect no_warn
+ {} 0.135
+
+# Lower is -Inf and upper is +Inf (positive buckets).
+load_with_nhcb 5m
+ lower_is_negative_Inf_and_upper_is_positive_Inf__positive_buckets__bucket{le="1"} 1+0x10
+ lower_is_negative_Inf_and_upper_is_positive_Inf__positive_buckets__bucket{le="2"} 3+0x10
+ lower_is_negative_Inf_and_upper_is_positive_Inf__positive_buckets__bucket{le="3"} 6+0x10
+ lower_is_negative_Inf_and_upper_is_positive_Inf__positive_buckets__bucket{le="+Inf"} 100+0x10
+
+# Range [-Inf, +Inf] captures all observations.
+
+eval instant at 50m histogram_fraction(-Inf, +Inf, lower_is_negative_Inf_and_upper_is_positive_Inf__positive_buckets__bucket)
+ expect no_warn
+ {} 1.0
+
+eval instant at 50m histogram_fraction(-Inf, +Inf, lower_is_negative_Inf_and_upper_is_positive_Inf__positive_buckets_)
+ expect no_warn
+ {} 1.0
+
+# Lower is -Inf and upper is +Inf (negative buckets).
+load_with_nhcb 5m
+ lower_is_negative_Inf_and_upper_is_positive_Inf__negative_buckets__bucket{le="-3"} 10+0x10
+ lower_is_negative_Inf_and_upper_is_positive_Inf__negative_buckets__bucket{le="-2"} 12+0x10
+ lower_is_negative_Inf_and_upper_is_positive_Inf__negative_buckets__bucket{le="-1"} 15+0x10
+ lower_is_negative_Inf_and_upper_is_positive_Inf__negative_buckets__bucket{le="+Inf"} 100+0x10
+
+# Range [-Inf, +Inf] captures all observations.
+
+eval instant at 50m histogram_fraction(-Inf, +Inf, lower_is_negative_Inf_and_upper_is_positive_Inf__negative_buckets__bucket)
+ expect no_warn
+ {} 1.0
+
+eval instant at 50m histogram_fraction(-Inf, +Inf, lower_is_negative_Inf_and_upper_is_positive_Inf__negative_buckets_)
+ expect no_warn
+ {} 1.0
+
+# Lower and upper fall in last bucket (positive buckets).
+load_with_nhcb 5m
+ lower_and_upper_fall_in_last_bucket__positive_buckets__bucket{le="1"} 1+0x10
+ lower_and_upper_fall_in_last_bucket__positive_buckets__bucket{le="2"} 3+0x10
+ lower_and_upper_fall_in_last_bucket__positive_buckets__bucket{le="3"} 6+0x10
+ lower_and_upper_fall_in_last_bucket__positive_buckets__bucket{le="+Inf"} 100+0x10
+
+# - Bucket [3, +Inf]: contributes zero observations (no interpolation with infinite width bucket).
+# Total: 0.0 / 100.0 = 0.0
+
+eval instant at 50m histogram_fraction(4, 5, lower_and_upper_fall_in_last_bucket__positive_buckets__bucket)
+ expect no_warn
+ {} 0.0
+
+eval instant at 50m histogram_fraction(4, 5, lower_and_upper_fall_in_last_bucket__positive_buckets_)
+ expect no_warn
+ {} 0.0
+
+# Lower and upper fall in last bucket (negative buckets).
+load_with_nhcb 5m
+ lower_and_upper_fall_in_last_bucket__negative_buckets__bucket{le="-3"} 10+0x10
+ lower_and_upper_fall_in_last_bucket__negative_buckets__bucket{le="-2"} 12+0x10
+ lower_and_upper_fall_in_last_bucket__negative_buckets__bucket{le="-1"} 15+0x10
+ lower_and_upper_fall_in_last_bucket__negative_buckets__bucket{le="+Inf"} 100+0x10
+
+# - Bucket [-1, +Inf]: contributes zero observations (no interpolation with infinite width bucket).
+# Total: 0.0 / 100.0 = 0.0
+
+eval instant at 50m histogram_fraction(0, 1, lower_and_upper_fall_in_last_bucket__negative_buckets__bucket)
+ expect no_warn
+ {} 0.0
+
+eval instant at 50m histogram_fraction(0, 1, lower_and_upper_fall_in_last_bucket__negative_buckets_)
+ expect no_warn
+ {} 0.0
+
+# Upper falls in last bucket.
+load_with_nhcb 5m
+ upper_falls_in_last_bucket_bucket{le="1"} 1+0x10
+ upper_falls_in_last_bucket_bucket{le="2"} 3+0x10
+ upper_falls_in_last_bucket_bucket{le="3"} 6+0x10
+ upper_falls_in_last_bucket_bucket{le="+Inf"} 100+0x10
+
+# - Bucket [2, 3]: 6-3 = 3.0 observations (full bucket).
+# - Bucket [3, +Inf]: contributes zero observations (no interpolation with infinite width bucket).
+# Total: 3.0 / 100.0 = 0.03
+
+eval instant at 50m histogram_fraction(2, 5, upper_falls_in_last_bucket_bucket)
+ expect no_warn
+ {} 0.03
+
+eval instant at 50m histogram_fraction(2, 5, upper_falls_in_last_bucket)
+ expect no_warn
+ {} 0.03
+
+# Upper is +Inf.
+load_with_nhcb 5m
+ upper_is_positive_Inf_bucket{le="1"} 1+0x10
+ upper_is_positive_Inf_bucket{le="2"} 3+0x10
+ upper_is_positive_Inf_bucket{le="3"} 6+0x10
+ upper_is_positive_Inf_bucket{le="+Inf"} 100+0x10
+
+# All observations in +Inf bucket: 100-6 = 94.0 observations.
+# Total: 94.0 / 100.0 = 0.94
+
+eval instant at 50m histogram_fraction(400, +Inf, upper_is_positive_Inf_bucket)
+ expect no_warn
+ {} 0.94
+
+eval instant at 50m histogram_fraction(400, +Inf, upper_is_positive_Inf)
+ expect no_warn
+ {} 0.94
+
+# Lower equals upper.
+load_with_nhcb 5m
+ lower_equals_upper_bucket{le="1"} 1+0x10
+ lower_equals_upper_bucket{le="2"} 3+0x10
+ lower_equals_upper_bucket{le="3"} 6+0x10
+ lower_equals_upper_bucket{le="+Inf"} 100+0x10
+
+# No observations can be captured in a zero-width range.
+
+eval instant at 50m histogram_fraction(2, 2, lower_equals_upper_bucket)
+ expect no_warn
+ {} 0.0
+
+eval instant at 50m histogram_fraction(2, 2, lower_equals_upper)
+ expect no_warn
+ {} 0.0
+
+# Lower greater than upper.
+load_with_nhcb 5m
+ lower_greater_than_upper_bucket{le="1"} 1+0x10
+ lower_greater_than_upper_bucket{le="2"} 3+0x10
+ lower_greater_than_upper_bucket{le="3"} 6+0x10
+ lower_greater_than_upper_bucket{le="+Inf"} 100+0x10
+
+eval instant at 50m histogram_fraction(3, 2, lower_greater_than_upper_bucket)
+ expect no_warn
+ {} 0.0
+
+eval instant at 50m histogram_fraction(3, 2, lower_greater_than_upper)
+ expect no_warn
+ {} 0.0
+
+# Single bucket.
+load_with_nhcb 5m
+ single_bucket_bucket{le="+Inf"} 100+0x10
+
+# - Bucket [0, +Inf]: contributes zero observations (no interpolation with infinite width bucket).
+# Total: 0.0 / 100.0 = 0.0
+
+eval instant at 50m histogram_fraction(0, 1, single_bucket_bucket)
+ expect no_warn
+ {} 0.0
+
+eval instant at 50m histogram_fraction(0, 1, single_bucket)
+ expect no_warn
+ {} 0.0
+
+# All zero counts.
+load_with_nhcb 5m
+ all_zero_counts_bucket{le="1"} 0+0x10
+ all_zero_counts_bucket{le="2"} 0+0x10
+ all_zero_counts_bucket{le="3"} 0+0x10
+ all_zero_counts_bucket{le="+Inf"} 0+0x10
+
+eval instant at 50m histogram_fraction(0, 5, all_zero_counts_bucket)
+ expect no_warn
+ {} NaN
+
+eval instant at 50m histogram_fraction(0, 5, all_zero_counts)
+ expect no_warn
+ {} NaN
+
+# Lower exactly on bucket boundary.
+load_with_nhcb 5m
+ lower_exactly_on_bucket_boundary_bucket{le="1"} 1+0x10
+ lower_exactly_on_bucket_boundary_bucket{le="2"} 3+0x10
+ lower_exactly_on_bucket_boundary_bucket{le="3"} 6+0x10
+ lower_exactly_on_bucket_boundary_bucket{le="+Inf"} 100+0x10
+
+# - Bucket [2, 3]: 6-3 = 3.0 observations (full bucket).
+# - Bucket [3, +Inf]: contributes zero observations (no interpolation with infinite width bucket).
+# Total: 3.0 / 100.0 = 0.03
+
+eval instant at 50m histogram_fraction(2, 3.5, lower_exactly_on_bucket_boundary_bucket)
+ expect no_warn
+ {} 0.03
+
+eval instant at 50m histogram_fraction(2, 3.5, lower_exactly_on_bucket_boundary)
+ expect no_warn
+ {} 0.03
+
+# Upper exactly on bucket boundary.
+load_with_nhcb 5m
+ upper_exactly_on_bucket_boundary_bucket{le="1"} 1+0x10
+ upper_exactly_on_bucket_boundary_bucket{le="2"} 3+0x10
+ upper_exactly_on_bucket_boundary_bucket{le="3"} 6+0x10
+ upper_exactly_on_bucket_boundary_bucket{le="+Inf"} 100+0x10
+
+# - Bucket [0, 1]: (1.0-0.5)/(1.0-0.0) * 1.0 = 0.5 * 1.0 = 0.5 observations.
+# - Bucket [1, 2]: 3-1 = 2.0 observations (full bucket).
+# Total: (0.5 + 2.0) / 100.0 = 0.025
+
+eval instant at 50m histogram_fraction(0.5, 2, upper_exactly_on_bucket_boundary_bucket)
+ expect no_warn
+ {} 0.025
+
+eval instant at 50m histogram_fraction(0.5, 2, upper_exactly_on_bucket_boundary)
+ expect no_warn
+ {} 0.025
+
+# Both bounds exactly on bucket boundaries.
+load_with_nhcb 5m
+ both_bounds_exactly_on_bucket_boundaries_bucket{le="1"} 1+0x10
+ both_bounds_exactly_on_bucket_boundaries_bucket{le="2"} 3+0x10
+ both_bounds_exactly_on_bucket_boundaries_bucket{le="3"} 6+0x10
+ both_bounds_exactly_on_bucket_boundaries_bucket{le="+Inf"} 100+0x10
+
+# - Bucket [1, 2]: 3-1 = 2.0 observations (full bucket).
+# - Bucket [2, 3]: 6-3 = 3.0 observations (full bucket).
+# Total: (2.0 + 3.0) / 100.0 = 0.05
+
+eval instant at 50m histogram_fraction(1, 3, both_bounds_exactly_on_bucket_boundaries_bucket)
+ expect no_warn
+ {} 0.05
+
+eval instant at 50m histogram_fraction(1, 3, both_bounds_exactly_on_bucket_boundaries)
+ expect no_warn
+ {} 0.05
+
+# Fractional bucket bounds.
+load_with_nhcb 5m
+ fractional_bucket_bounds_bucket{le="0.5"} 2.5+0x10
+ fractional_bucket_bounds_bucket{le="1"} 7.5+0x10
+ fractional_bucket_bounds_bucket{le="+Inf"} 100+0x10
+
+# - Bucket [0, 0.5]: (0.5-0.1)/(0.5-0.0) * 2.5 = 0.8 * 2.5 = 2.0 observations.
+# - Bucket [0.5, 1.0]: (0.75-0.5)/(1.0-0.5) * (7.5-2.5) = 0.5 * 5.0 = 2.5 observations.
+# Total: (2.0 + 2.5) / 100.0 = 0.045
+
+eval instant at 50m histogram_fraction(0.1, 0.75, fractional_bucket_bounds_bucket)
+ expect no_warn
+ {} 0.045
+
+eval instant at 50m histogram_fraction(0.1, 0.75, fractional_bucket_bounds)
+ expect no_warn
+ {} 0.045
+
+# Range crosses zero.
+load_with_nhcb 5m
+ range_crosses_zero_bucket{le="-2"} 5+0x10
+ range_crosses_zero_bucket{le="-1"} 10+0x10
+ range_crosses_zero_bucket{le="0"} 15+0x10
+ range_crosses_zero_bucket{le="1"} 20+0x10
+ range_crosses_zero_bucket{le="+Inf"} 100+0x10
+
+# - Bucket [-1, 0]: 15-10 = 5.0 observations (full bucket).
+# - Bucket [0, 1]: 20-15 = 5.0 observations (full bucket).
+# Total: (5.0 + 5.0) / 100.0 = 0.1
+
+eval instant at 50m histogram_fraction(-1, 1, range_crosses_zero_bucket)
+ expect no_warn
+ {} 0.1
+
+eval instant at 50m histogram_fraction(-1, 1, range_crosses_zero)
+ expect no_warn
+ {} 0.1
+
+# Lower is NaN.
+load_with_nhcb 5m
+ lower_is_NaN_bucket{le="1"} 1+0x10
+ lower_is_NaN_bucket{le="+Inf"} 100+0x10
+
+eval instant at 50m histogram_fraction(NaN, 1, lower_is_NaN_bucket)
+ expect no_warn
+ {} NaN
+
+eval instant at 50m histogram_fraction(NaN, 1, lower_is_NaN)
+ expect no_warn
+ {} NaN
+
+# Upper is NaN.
+load_with_nhcb 5m
+ upper_is_NaN_bucket{le="1"} 1+0x10
+ upper_is_NaN_bucket{le="+Inf"} 100+0x10
+
+eval instant at 50m histogram_fraction(0, NaN, upper_is_NaN_bucket)
+ expect no_warn
+ {} NaN
+
+eval instant at 50m histogram_fraction(0, NaN, upper_is_NaN)
+ expect no_warn
+ {} NaN
+
+# Range entirely below all buckets.
+load_with_nhcb 5m
+ range_entirely_below_all_buckets_bucket{le="1"} 1+0x10
+ range_entirely_below_all_buckets_bucket{le="2"} 3+0x10
+ range_entirely_below_all_buckets_bucket{le="+Inf"} 10+0x10
+
+eval instant at 50m histogram_fraction(-10, -5, range_entirely_below_all_buckets_bucket)
+ expect no_warn
+ {} 0.0
+
+eval instant at 50m histogram_fraction(-10, -5, range_entirely_below_all_buckets)
+ expect no_warn
+ {} 0.0
+
+# Range entirely above all buckets.
+load_with_nhcb 5m
+ range_entirely_above_all_buckets_bucket{le="1"} 1+0x10
+ range_entirely_above_all_buckets_bucket{le="2"} 3+0x10
+ range_entirely_above_all_buckets_bucket{le="+Inf"} 10+0x10
+
+eval instant at 50m histogram_fraction(5, 10, range_entirely_above_all_buckets_bucket)
+ expect no_warn
+ {} 0.0
+
+eval instant at 50m histogram_fraction(5, 10, range_entirely_above_all_buckets)
+ expect no_warn
+ {} 0.0
+
+
# In the classic histogram, we can access the corresponding bucket (if
# it exists) and divide by the count to get the same result.
diff --git a/promql/promqltest/testdata/name_label_dropping.test b/promql/promqltest/testdata/name_label_dropping.test
index 3a6f4098df..e0180c7ffe 100644
--- a/promql/promqltest/testdata/name_label_dropping.test
+++ b/promql/promqltest/testdata/name_label_dropping.test
@@ -126,3 +126,13 @@ eval instant at 10m sum by (__name__) (metric_total{env="3"} or rate(metric_tota
# Same as above, but with reversed order.
eval instant at 10m sum by (__name__) (rate(metric_total{env="3"}[5m]) or metric_total{env="1"})
metric_total 10
+
+clear
+
+# Test delayed name removal with range queries and OR operator.
+load 10m
+ metric_a 1 _
+ metric_b 3 4
+
+eval range from 0 to 20m step 10m -metric_a or -metric_b
+ {} -1 -4 _
diff --git a/promql/promqltest/testdata/operators.test b/promql/promqltest/testdata/operators.test
index 0e779f192c..cd608b3c36 100644
--- a/promql/promqltest/testdata/operators.test
+++ b/promql/promqltest/testdata/operators.test
@@ -316,6 +316,27 @@ eval instant at 5m http_requests_histogram == http_requests_histogram
eval instant at 5m http_requests_histogram != http_requests_histogram
expect no_info
+clear
+
+# Check that we track many-to-one vector matching errors even when all but 0 or 1
+# series on the "many" side are filtered away.
+load 5m
+ many_side{label="foo",job="test"} 0
+ many_side{label="bar",job="test"} 1
+ one_side{job="test"} 1
+
+# Check 0 series surviving the filtering producing an error.
+eval instant at 0m many_side > on(job) one_side
+ expect fail
+
+# Check 1 series surviving the filtering producing an error.
+eval instant at 0m many_side >= on(job) one_side
+ expect fail
+
+# Check 2 series surviving the filtering producing an error.
+eval instant at 0m many_side <= on(job) one_side
+ expect fail
+
# group_left/group_right.
clear
@@ -959,3 +980,40 @@ eval instant at 10m (testhistogram) and on() (vector(-1) == 1)
eval range from 0 to 10m step 5m (testhistogram) and on() (vector(-1) == 1)
clear
+
+# Test unary negation with non-overlapping series that have different metric names.
+# After negation, the __name__ label is dropped, so series with different names
+# but same other labels should merge if they don't overlap in time.
+load 20m
+ http_requests{job="api"} 2 _
+ http_errors{job="api"} _ 4
+
+eval instant at 0 -{job="api"}
+ {job="api"} -2
+
+eval instant at 20m -{job="api"}
+ {job="api"} -4
+
+eval range from 0 to 20m step 20m -{job="api"}
+ {job="api"} -2 -4
+
+# Test unary negation failure with overlapping timestamps (same labelset at same time).
+clear
+load 1m
+ http_requests{job="api"} 1
+ http_errors{job="api"} 2
+
+eval_fail instant at 0 -{job="api"}
+
+clear
+
+# Test unary negation with "or" operator combining metrics with removed names.
+load 10m
+ metric_a 1 _
+ metric_b 3 4
+
+# Use "-" unary operator as a simple way to remove the metric name.
+eval range from 0 to 20m step 10m -metric_a or -metric_b
+ {} -1 -4
+
+clear
diff --git a/promql/quantile.go b/promql/quantile.go
index 1454974107..c44eb89e68 100644
--- a/promql/quantile.go
+++ b/promql/quantile.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -406,6 +406,18 @@ func HistogramFraction(lower, upper float64, h *histogram.FloatHistogram, metric
// consistent with the linear interpolation known from classic
// histograms. It is also used for the zero bucket.
interpolateLinearly := func(v float64) float64 {
+ // Note: `v` is a finite value.
+ // For buckets with infinite bounds, we cannot interpolate meaningfully.
+ // For +Inf upper bound, interpolation returns the cumulative count of the previous bucket
+ // as the second term in the interpolation formula yields 0 (finite/Inf).
+ // In other words, no observations from the last bucket are considered in the fraction calculation.
+ // For -Inf lower bound, however, the second term would be (v-(-Inf))/(upperBound-(-Inf)) = Inf/Inf = NaN.
+ // To achieve the same effect of no contribution as the +Inf bucket, handle the -Inf case by returning
+ // the cumulative count at the first bucket (which equals the bucket's count).
+ // In both cases, we effectively skip interpolation within the infinite-width bucket.
+ if b.Lower == math.Inf(-1) {
+ return b.Count
+ }
return rank + b.Count*(v-b.Lower)/(b.Upper-b.Lower)
}
@@ -531,14 +543,34 @@ func BucketFraction(lower, upper float64, buckets Buckets) float64 {
rank, lowerRank, upperRank float64
lowerSet, upperSet bool
)
+
+ // If the upper bound of the first bucket is greater than 0, we assume
+ // we are dealing with positive buckets only and lowerBound for the
+ // first bucket is set to 0; otherwise it is set to -Inf.
+ lowerBound := 0.0
+ if buckets[0].UpperBound <= 0 {
+ lowerBound = math.Inf(-1)
+ }
+
for i, b := range buckets {
- lowerBound := math.Inf(-1)
if i > 0 {
lowerBound = buckets[i-1].UpperBound
}
upperBound := b.UpperBound
interpolateLinearly := func(v float64) float64 {
+ // Note: `v` is a finite value.
+ // For buckets with infinite bounds, we cannot interpolate meaningfully.
+ // For +Inf upper bound, interpolation returns the cumulative count of the previous bucket
+ // as the second term in the interpolation formula yields 0 (finite/Inf).
+ // In other words, no observations from the last bucket are considered in the fraction calculation.
+ // For -Inf lower bound, however, the second term would be (v-(-Inf))/(upperBound-(-Inf)) = Inf/Inf = NaN.
+ // To achieve the same effect of no contribution as the +Inf bucket, handle the -Inf case by returning
+ // the cumulative count at the first bucket.
+ // In both cases, we effectively skip interpolation within the infinite-width bucket.
+ if lowerBound == math.Inf(-1) {
+ return b.Count
+ }
return rank + (b.Count-rank)*(v-lowerBound)/(upperBound-lowerBound)
}
diff --git a/promql/quantile_test.go b/promql/quantile_test.go
index a1047d73f4..c97ff7c3c4 100644
--- a/promql/quantile_test.go
+++ b/promql/quantile_test.go
@@ -1,4 +1,4 @@
-// Copyright 2023 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/promql/query_logger.go b/promql/query_logger.go
index 5923223aa0..954f8b1a5b 100644
--- a/promql/query_logger.go
+++ b/promql/query_logger.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/promql/query_logger_test.go b/promql/query_logger_test.go
index 47a6d1a25d..8c88757bd7 100644
--- a/promql/query_logger_test.go
+++ b/promql/query_logger_test.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/promql/value.go b/promql/value.go
index b909085b17..02cb021024 100644
--- a/promql/value.go
+++ b/promql/value.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/promql/value_test.go b/promql/value_test.go
index 0017b41e2c..c7454284ff 100644
--- a/promql/value_test.go
+++ b/promql/value_test.go
@@ -1,4 +1,4 @@
-// Copyright 2022 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/renovate.json b/renovate.json
index 175e1d6464..350cfe2a0d 100644
--- a/renovate.json
+++ b/renovate.json
@@ -1,62 +1,89 @@
{
- "$schema": "https://docs.renovatebot.com/renovate-schema.json",
- "extends": [
- "config:recommended"
- ],
- "separateMultipleMajor": true,
- "baseBranches": ["main"],
- "postUpdateOptions": [
- "gomodTidy",
- "gomodUpdateImportPaths"
- ],
- "schedule": ["* 0-8 * * 1"],
- "timezone": "UTC",
- "packageRules": [
- {
- "description": "Don't update replace directives",
- "matchPackageNames": [
- "github.com/fsnotify/fsnotify"
- ],
- "enabled": false
- },
- {
- "description": "Don't update prometheus-io namespace packages",
- "matchPackageNames": ["@prometheus-io/**"],
- "enabled": false
- },
- {
- "description": "Group Mantine UI dependencies",
- "matchFileNames": [
- "web/ui/mantine-ui/package.json"
- ],
- "groupName": "Mantine UI",
- "matchUpdateTypes": ["minor", "patch"],
- "enabled": true
- },
- {
- "description": "Group React App dependencies",
- "matchFileNames": [
- "web/ui/react-app/package.json"
- ],
- "groupName": "React App",
- "matchUpdateTypes": ["minor", "patch"],
- "enabled": true
- },
- {
- "description": "Group module dependencies",
- "matchFileNames": [
- "web/ui/module/**/package.json"
- ],
- "groupName": "Modules",
- "matchUpdateTypes": ["minor", "patch"],
- "enabled": true
- }
- ],
- "branchPrefix": "deps-update/",
- "vulnerabilityAlerts": {
- "enabled": true,
- "labels": ["security-update"]
+ "$schema": "https://docs.renovatebot.com/renovate-schema.json",
+ "extends": [
+ "config:recommended"
+ ],
+ "separateMultipleMajor": true,
+ "baseBranches": ["main"],
+ "postUpdateOptions": [
+ "gomodTidy",
+ "gomodUpdateImportPaths"
+ ],
+ "schedule": ["* 11 21 * *"],
+ "timezone": "UTC",
+ "github-actions": {
+ "managerFilePatterns": ["scripts/**"]
+ },
+ "packageRules": [
+ {
+ "description": "Don't update replace directives",
+ "matchPackageNames": [
+ "github.com/fsnotify/fsnotify"
+ ],
+ "enabled": false
},
- "osvVulnerabilityAlerts": true,
- "dependencyDashboardApproval": false
+ {
+ "description": "Don't update prometheus-io namespace packages",
+ "matchPackageNames": ["@prometheus-io/**"],
+ "enabled": false
+ },
+ {
+ "description": "Group AWS Go dependencies",
+ "matchManagers": ["gomod"],
+ "matchPackageNames": ["github.com/aws/**"],
+ "groupName": "AWS Go dependencies"
+ },
+ {
+ "description": "Group Azure Go dependencies",
+ "matchManagers": ["gomod"],
+ "matchPackageNames": ["github.com/Azure/**"],
+ "groupName": "Azure Go dependencies"
+ },
+ {
+ "description": "Group Kubernetes Go dependencies",
+ "matchManagers": ["gomod"],
+ "matchPackageNames": ["k8s.io/**"],
+ "groupName": "Kubernetes Go dependencies"
+ },
+ {
+ "description": "Group OpenTelemetry Go dependencies",
+ "matchManagers": ["gomod"],
+ "matchPackageNames": ["go.opentelemetry.io/**"],
+ "groupName": "OpenTelemetry Go dependencies"
+ },
+ {
+ "description": "Group Mantine UI dependencies",
+ "matchFileNames": [
+ "web/ui/mantine-ui/package.json"
+ ],
+ "groupName": "Mantine UI",
+ "matchUpdateTypes": ["minor", "patch"],
+ "enabled": true
+ },
+ {
+ "description": "Group React App dependencies",
+ "matchFileNames": [
+ "web/ui/react-app/package.json"
+ ],
+ "groupName": "React App",
+ "matchUpdateTypes": ["minor", "patch"],
+ "enabled": true
+ },
+ {
+ "description": "Group module dependencies",
+ "matchFileNames": [
+ "web/ui/module/**/package.json"
+ ],
+ "groupName": "Modules",
+ "matchUpdateTypes": ["minor", "patch"],
+ "enabled": true
+ }
+ ],
+ "branchPrefix": "deps-update/",
+ "vulnerabilityAlerts": {
+ "enabled": true,
+ "labels": ["security-update"]
+ },
+ "osvVulnerabilityAlerts": true,
+ "dependencyDashboardApproval": false
}
diff --git a/rules/alerting.go b/rules/alerting.go
index b0151d7cb3..d94113b46b 100644
--- a/rules/alerting.go
+++ b/rules/alerting.go
@@ -1,4 +1,4 @@
-// Copyright 2013 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -46,6 +46,10 @@ const (
alertStateLabel = "alertstate"
)
+// ErrDuplicateAlertLabelSet is returned when an alerting rule evaluation produces
+// metrics with identical labelsets after applying alert labels.
+var ErrDuplicateAlertLabelSet = errors.New("vector contains metrics with the same labelset after applying alert labels")
+
// AlertState denotes the state of an active alert.
type AlertState int
@@ -441,7 +445,7 @@ func (r *AlertingRule) Eval(ctx context.Context, queryOffset time.Duration, ts t
resultFPs[h] = struct{}{}
if _, ok := alerts[h]; ok {
- return nil, errors.New("vector contains metrics with the same labelset after applying alert labels")
+ return nil, ErrDuplicateAlertLabelSet
}
alerts[h] = &Alert{
diff --git a/rules/alerting_test.go b/rules/alerting_test.go
index dc5a6d1c43..a2c7abcd56 100644
--- a/rules/alerting_test.go
+++ b/rules/alerting_test.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -612,7 +612,7 @@ func TestAlertingRuleDuplicate(t *testing.T) {
)
_, err := rule.Eval(ctx, 0, now, EngineQueryFunc(engine, storage), nil, 0)
require.Error(t, err)
- require.EqualError(t, err, "vector contains metrics with the same labelset after applying alert labels")
+ require.ErrorIs(t, err, ErrDuplicateAlertLabelSet)
}
func TestAlertingRuleLimit(t *testing.T) {
diff --git a/rules/group.go b/rules/group.go
index 8cedcd40d1..704fd13d85 100644
--- a/rules/group.go
+++ b/rules/group.go
@@ -1,4 +1,4 @@
-// Copyright 2013 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -519,6 +519,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
since := time.Since(t)
g.metrics.EvalDuration.Observe(since.Seconds())
+ g.metrics.EvalDurationHistogram.Observe(since.Seconds())
rule.SetEvaluationDuration(since)
rule.SetEvaluationTimestamp(t)
}(time.Now())
@@ -910,19 +911,21 @@ const namespace = "prometheus"
// Metrics for rule evaluation.
type Metrics struct {
- EvalDuration prometheus.Summary
- IterationDuration prometheus.Summary
- IterationsMissed *prometheus.CounterVec
- IterationsScheduled *prometheus.CounterVec
- EvalTotal *prometheus.CounterVec
- EvalFailures *prometheus.CounterVec
- GroupInterval *prometheus.GaugeVec
- GroupLastEvalTime *prometheus.GaugeVec
- GroupLastDuration *prometheus.GaugeVec
- GroupLastRuleDurationSum *prometheus.GaugeVec
- GroupLastRestoreDuration *prometheus.GaugeVec
- GroupRules *prometheus.GaugeVec
- GroupSamples *prometheus.GaugeVec
+ EvalDuration prometheus.Summary
+ EvalDurationHistogram prometheus.Histogram
+ IterationDuration prometheus.Summary
+ IterationDurationHistogram prometheus.Histogram
+ IterationsMissed *prometheus.CounterVec
+ IterationsScheduled *prometheus.CounterVec
+ EvalTotal *prometheus.CounterVec
+ EvalFailures *prometheus.CounterVec
+ GroupInterval *prometheus.GaugeVec
+ GroupLastEvalTime *prometheus.GaugeVec
+ GroupLastDuration *prometheus.GaugeVec
+ GroupLastRuleDurationSum *prometheus.GaugeVec
+ GroupLastRestoreDuration *prometheus.GaugeVec
+ GroupRules *prometheus.GaugeVec
+ GroupSamples *prometheus.GaugeVec
}
// NewGroupMetrics creates a new instance of Metrics and registers it with the provided registerer,
@@ -936,12 +939,30 @@ func NewGroupMetrics(reg prometheus.Registerer) *Metrics {
Help: "The duration for a rule to execute.",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
}),
+ EvalDurationHistogram: prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: namespace,
+ Name: "rule_evaluation_duration_histogram_seconds",
+ Help: "The duration for a rule to execute.",
+ Buckets: []float64{.01, .1, 1, 10},
+ NativeHistogramBucketFactor: 1.1,
+ NativeHistogramMaxBucketNumber: 100,
+ NativeHistogramMinResetDuration: 1 * time.Hour,
+ }),
IterationDuration: prometheus.NewSummary(prometheus.SummaryOpts{
Namespace: namespace,
Name: "rule_group_duration_seconds",
Help: "The duration of rule group evaluations.",
Objectives: map[float64]float64{0.01: 0.001, 0.05: 0.005, 0.5: 0.05, 0.90: 0.01, 0.99: 0.001},
}),
+ IterationDurationHistogram: prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: namespace,
+ Name: "rule_group_duration_histogram_seconds",
+ Help: "The duration of rule group evaluations.",
+ Buckets: []float64{.01, .1, 1, 10},
+ NativeHistogramBucketFactor: 1.1,
+ NativeHistogramMaxBucketNumber: 100,
+ NativeHistogramMinResetDuration: 1 * time.Hour,
+ }),
IterationsMissed: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: namespace,
@@ -1035,7 +1056,9 @@ func NewGroupMetrics(reg prometheus.Registerer) *Metrics {
if reg != nil {
reg.MustRegister(
m.EvalDuration,
+ m.EvalDurationHistogram,
m.IterationDuration,
+ m.IterationDurationHistogram,
m.IterationsMissed,
m.IterationsScheduled,
m.EvalTotal,
diff --git a/rules/group_test.go b/rules/group_test.go
index ff1ef3d6c1..a110c78510 100644
--- a/rules/group_test.go
+++ b/rules/group_test.go
@@ -1,4 +1,4 @@
-// Copyright 2013 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/rules/manager.go b/rules/manager.go
index d2fb0a7797..c835a7c6e8 100644
--- a/rules/manager.go
+++ b/rules/manager.go
@@ -1,4 +1,4 @@
-// Copyright 2013 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -37,6 +37,7 @@ import (
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/storage"
+ "github.com/prometheus/prometheus/util/features"
"github.com/prometheus/prometheus/util/strutil"
)
@@ -85,6 +86,7 @@ func DefaultEvalIterationFunc(ctx context.Context, g *Group, evalTimestamp time.
timeSinceStart := time.Since(start)
g.metrics.IterationDuration.Observe(timeSinceStart.Seconds())
+ g.metrics.IterationDurationHistogram.Observe(timeSinceStart.Seconds())
g.updateRuleEvaluationTimeSum()
g.setEvaluationTime(timeSinceStart)
g.setLastEvaluation(start)
@@ -133,6 +135,9 @@ type ManagerOptions struct {
RestoreNewRuleGroups bool
Metrics *Metrics
+
+ // FeatureRegistry is used to register rule manager features.
+ FeatureRegistry features.Collector
}
// NewManager returns an implementation of Manager, ready to be started
@@ -173,6 +178,13 @@ func NewManager(o *ManagerOptions) *Manager {
o.Logger = promslog.NewNopLogger()
}
+ // Register rule manager features if a registry is provided.
+ if o.FeatureRegistry != nil {
+ o.FeatureRegistry.Set(features.Rules, "concurrent_rule_eval", o.ConcurrentEvalsEnabled)
+ o.FeatureRegistry.Enable(features.Rules, "query_offset")
+ o.FeatureRegistry.Enable(features.Rules, "keep_firing_for")
+ }
+
m := &Manager{
groups: map[string]*Group{},
opts: o,
diff --git a/rules/manager_test.go b/rules/manager_test.go
index a88be1e5d1..0991e8198a 100644
--- a/rules/manager_test.go
+++ b/rules/manager_test.go
@@ -1,4 +1,4 @@
-// Copyright 2013 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/rules/origin.go b/rules/origin.go
index 695fc5f838..683568c71f 100644
--- a/rules/origin.go
+++ b/rules/origin.go
@@ -1,4 +1,4 @@
-// Copyright 2023 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/rules/origin_test.go b/rules/origin_test.go
index 16f87de716..55ad927fd9 100644
--- a/rules/origin_test.go
+++ b/rules/origin_test.go
@@ -1,4 +1,4 @@
-// Copyright 2023 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/rules/recording.go b/rules/recording.go
index 2da6885f5b..61a27aceb6 100644
--- a/rules/recording.go
+++ b/rules/recording.go
@@ -1,4 +1,4 @@
-// Copyright 2013 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -30,6 +30,10 @@ import (
"github.com/prometheus/prometheus/promql/parser"
)
+// ErrDuplicateRecordingLabelSet is returned when a recording rule evaluation produces
+// metrics with identical labelsets after applying rule labels.
+var ErrDuplicateRecordingLabelSet = errors.New("vector contains metrics with the same labelset after applying rule labels")
+
// A RecordingRule records its vector expression into new timeseries.
type RecordingRule struct {
name string
@@ -104,7 +108,7 @@ func (rule *RecordingRule) Eval(ctx context.Context, queryOffset time.Duration,
// Check that the rule does not produce identical metrics after applying
// labels.
if vector.ContainsSameLabelset() {
- return nil, errors.New("vector contains metrics with the same labelset after applying rule labels")
+ return nil, ErrDuplicateRecordingLabelSet
}
numSeries := len(vector)
diff --git a/rules/recording_test.go b/rules/recording_test.go
index 014aa85ceb..1fee5ede72 100644
--- a/rules/recording_test.go
+++ b/rules/recording_test.go
@@ -1,4 +1,4 @@
-// Copyright 2013 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -176,7 +176,7 @@ func TestRuleEvalDuplicate(t *testing.T) {
rule := NewRecordingRule("foo", expr, labels.FromStrings("test", "test"))
_, err := rule.Eval(ctx, 0, now, EngineQueryFunc(engine, storage), nil, 0)
require.Error(t, err)
- require.EqualError(t, err, "vector contains metrics with the same labelset after applying rule labels")
+ require.ErrorIs(t, err, ErrDuplicateRecordingLabelSet)
}
func TestRecordingRuleLimit(t *testing.T) {
diff --git a/rules/rule.go b/rules/rule.go
index 33f1755ac5..fc88e22840 100644
--- a/rules/rule.go
+++ b/rules/rule.go
@@ -1,4 +1,4 @@
-// Copyright 2013 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/schema/labels.go b/schema/labels.go
index 05329af7f6..c71e352640 100644
--- a/schema/labels.go
+++ b/schema/labels.go
@@ -1,4 +1,4 @@
-// Copyright 2025 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/schema/labels_test.go b/schema/labels_test.go
index ae1ec9e90b..c2ba576c4a 100644
--- a/schema/labels_test.go
+++ b/schema/labels_test.go
@@ -1,4 +1,4 @@
-// Copyright 2025 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/scrape/clientprotobuf.go b/scrape/clientprotobuf.go
index 6dc22c959f..d84d4bebfc 100644
--- a/scrape/clientprotobuf.go
+++ b/scrape/clientprotobuf.go
@@ -1,4 +1,4 @@
-// Copyright 2023 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/scrape/helpers_test.go b/scrape/helpers_test.go
index ff7a7bf65a..dd5179b360 100644
--- a/scrape/helpers_test.go
+++ b/scrape/helpers_test.go
@@ -1,4 +1,4 @@
-// Copyright 2013 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -17,240 +17,127 @@ import (
"bytes"
"context"
"encoding/binary"
- "fmt"
- "math"
- "strings"
- "sync"
+ "net/http"
"testing"
+ "time"
"github.com/gogo/protobuf/proto"
dto "github.com/prometheus/client_model/go"
+ "github.com/prometheus/common/model"
+ "github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require"
- "github.com/prometheus/prometheus/model/exemplar"
+ "github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
- "github.com/prometheus/prometheus/model/metadata"
"github.com/prometheus/prometheus/storage"
+ "github.com/prometheus/prometheus/util/pool"
+ "github.com/prometheus/prometheus/util/teststorage"
)
-type nopAppendable struct{}
+// For readability.
+type sample = teststorage.Sample
-func (nopAppendable) Appender(context.Context) storage.Appender {
- return nopAppender{}
-}
-
-type nopAppender struct{}
-
-func (nopAppender) SetOptions(*storage.AppendOptions) {}
-
-func (nopAppender) Append(storage.SeriesRef, labels.Labels, int64, float64) (storage.SeriesRef, error) {
- return 1, nil
-}
-
-func (nopAppender) AppendExemplar(storage.SeriesRef, labels.Labels, exemplar.Exemplar) (storage.SeriesRef, error) {
- return 2, nil
-}
-
-func (nopAppender) AppendHistogram(storage.SeriesRef, labels.Labels, int64, *histogram.Histogram, *histogram.FloatHistogram) (storage.SeriesRef, error) {
- return 3, nil
-}
-
-func (nopAppender) AppendHistogramSTZeroSample(storage.SeriesRef, labels.Labels, int64, int64, *histogram.Histogram, *histogram.FloatHistogram) (storage.SeriesRef, error) {
- return 0, nil
-}
-
-func (nopAppender) UpdateMetadata(storage.SeriesRef, labels.Labels, metadata.Metadata) (storage.SeriesRef, error) {
- return 4, nil
-}
-
-func (nopAppender) AppendSTZeroSample(storage.SeriesRef, labels.Labels, int64, int64) (storage.SeriesRef, error) {
- return 5, nil
-}
-
-func (nopAppender) Commit() error { return nil }
-func (nopAppender) Rollback() error { return nil }
-
-type floatSample struct {
- metric labels.Labels
- t int64
- f float64
-}
-
-func equalFloatSamples(a, b floatSample) bool {
- // Compare Float64bits so NaN values which are exactly the same will compare equal.
- return labels.Equal(a.metric, b.metric) && a.t == b.t && math.Float64bits(a.f) == math.Float64bits(b.f)
-}
-
-type histogramSample struct {
- metric labels.Labels
- t int64
- h *histogram.Histogram
- fh *histogram.FloatHistogram
-}
-
-type metadataEntry struct {
- m metadata.Metadata
- metric labels.Labels
-}
-
-func metadataEntryEqual(a, b metadataEntry) bool {
- if !labels.Equal(a.metric, b.metric) {
- return false
+func withCtx(ctx context.Context) func(sl *scrapeLoop) {
+ return func(sl *scrapeLoop) {
+ sl.ctx = ctx
}
- if a.m.Type != b.m.Type {
- return false
- }
- if a.m.Unit != b.m.Unit {
- return false
- }
- if a.m.Help != b.m.Help {
- return false
- }
- return true
}
-type collectResultAppendable struct {
- *collectResultAppender
+func withAppendable(appendable storage.Appendable) func(sl *scrapeLoop) {
+ return func(sl *scrapeLoop) {
+ sl.appendable = appendable
+ }
}
-func (a *collectResultAppendable) Appender(context.Context) storage.Appender {
- return a
+// newTestScrapeLoop is the initial scrape loop for all tests.
+// It returns scrapeLoop and mock scraper you can customize.
+//
+// It's recommended to use withXYZ functions for simple option customizations, e.g:
+//
+// appTest := teststorage.NewAppendable()
+// sl, _ := newTestScrapeLoop(t, withAppendable(appTest))
+//
+// However, when changing more than one scrapeLoop options it's more readable to have one explicit opt function:
+//
+// ctx, cancel := context.WithCancel(t.Context())
+// appTest := teststorage.NewAppendable()
+// sl, scraper := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+// sl.ctx = ctx
+// sl.appendable = appTest
+// // Since we're writing samples directly below we need to provide a protocol fallback.
+// sl.fallbackScrapeProtocol = "text/plain"
+// })
+//
+// NOTE: Try to NOT add more parameter to this function. Try to NOT add more
+// newTestScrapeLoop-like constructors. It should be flexible enough with scrapeLoop
+// used for initial options.
+func newTestScrapeLoop(t testing.TB, opts ...func(sl *scrapeLoop)) (_ *scrapeLoop, scraper *testScraper) {
+ metrics := newTestScrapeMetrics(t)
+ sl := &scrapeLoop{
+ stopped: make(chan struct{}),
+
+ l: promslog.NewNopLogger(),
+ cache: newScrapeCache(metrics),
+
+ interval: 10 * time.Millisecond,
+ timeout: 1 * time.Hour,
+ sampleMutator: nopMutator,
+ reportSampleMutator: nopMutator,
+
+ appendable: teststorage.NewAppendable(),
+ buffers: pool.New(1e3, 1e6, 3, func(sz int) any { return make([]byte, 0, sz) }),
+ metrics: metrics,
+ maxSchema: histogram.ExponentialSchemaMax,
+ honorTimestamps: true,
+ enableCompression: true,
+ validationScheme: model.UTF8Validation,
+ symbolTable: labels.NewSymbolTable(),
+ appendMetadataToWAL: true, // Tests assumes it's enabled, unless explicitly turned off.
+ }
+ for _, o := range opts {
+ o(sl)
+ }
+ // Validate user opts for convenience.
+ require.Nil(t, sl.parentCtx, "newTestScrapeLoop does not support injecting non-nil parent context")
+ require.Nil(t, sl.appenderCtx, "newTestScrapeLoop does not support injecting non-nil appender context")
+ require.Nil(t, sl.cancel, "newTestScrapeLoop does not support injecting custom cancel function")
+ require.Nil(t, sl.scraper, "newTestScrapeLoop does not support injecting scraper, it's mocked, use the returned scraper")
+
+ rootCtx := t.Context()
+ // Use sl.ctx for context injection.
+ // True contexts (sl.appenderCtx, sl.parentCtx, sl.ctx) are populated from it
+ if sl.ctx != nil {
+ rootCtx = sl.ctx
+ }
+ ctx, cancel := context.WithCancel(rootCtx)
+ sl.ctx = ctx
+ sl.cancel = cancel
+ sl.appenderCtx = rootCtx
+ sl.parentCtx = rootCtx
+
+ scraper = &testScraper{}
+ sl.scraper = scraper
+ return sl, scraper
}
-// collectResultAppender records all samples that were added through the appender.
-// It can be used as its zero value or be backed by another appender it writes samples through.
-type collectResultAppender struct {
- mtx sync.Mutex
+func newTestScrapePool(t *testing.T, injectNewLoop func(options scrapeLoopOptions) loop) *scrapePool {
+ return &scrapePool{
+ ctx: t.Context(),
+ cancel: func() {},
+ logger: promslog.NewNopLogger(),
+ config: &config.ScrapeConfig{},
+ options: &Options{},
+ client: http.DefaultClient,
- next storage.Appender
- resultFloats []floatSample
- pendingFloats []floatSample
- rolledbackFloats []floatSample
- resultHistograms []histogramSample
- pendingHistograms []histogramSample
- rolledbackHistograms []histogramSample
- resultExemplars []exemplar.Exemplar
- pendingExemplars []exemplar.Exemplar
- resultMetadata []metadataEntry
- pendingMetadata []metadataEntry
-}
+ activeTargets: map[uint64]*Target{},
+ loops: map[uint64]loop{},
+ injectTestNewLoop: injectNewLoop,
-func (*collectResultAppender) SetOptions(*storage.AppendOptions) {}
-
-func (a *collectResultAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
- a.mtx.Lock()
- defer a.mtx.Unlock()
- a.pendingFloats = append(a.pendingFloats, floatSample{
- metric: lset,
- t: t,
- f: v,
- })
-
- if a.next == nil {
- if ref == 0 {
- // Use labels hash as a stand-in for unique series reference, to avoid having to track all series.
- ref = storage.SeriesRef(lset.Hash())
- }
- return ref, nil
+ appendable: teststorage.NewAppendable(),
+ symbolTable: labels.NewSymbolTable(),
+ metrics: newTestScrapeMetrics(t),
}
-
- ref, err := a.next.Append(ref, lset, t, v)
- if err != nil {
- return 0, err
- }
- return ref, nil
-}
-
-func (a *collectResultAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) {
- a.mtx.Lock()
- defer a.mtx.Unlock()
- a.pendingExemplars = append(a.pendingExemplars, e)
- if a.next == nil {
- return 0, nil
- }
-
- return a.next.AppendExemplar(ref, l, e)
-}
-
-func (a *collectResultAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
- a.mtx.Lock()
- defer a.mtx.Unlock()
- a.pendingHistograms = append(a.pendingHistograms, histogramSample{h: h, fh: fh, t: t, metric: l})
- if a.next == nil {
- return 0, nil
- }
-
- return a.next.AppendHistogram(ref, l, t, h, fh)
-}
-
-func (a *collectResultAppender) AppendHistogramSTZeroSample(ref storage.SeriesRef, l labels.Labels, _, st int64, h *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) {
- if h != nil {
- return a.AppendHistogram(ref, l, st, &histogram.Histogram{}, nil)
- }
- return a.AppendHistogram(ref, l, st, nil, &histogram.FloatHistogram{})
-}
-
-func (a *collectResultAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) {
- a.mtx.Lock()
- defer a.mtx.Unlock()
- a.pendingMetadata = append(a.pendingMetadata, metadataEntry{metric: l, m: m})
- if a.next == nil {
- if ref == 0 {
- ref = storage.SeriesRef(l.Hash())
- }
- return ref, nil
- }
-
- return a.next.UpdateMetadata(ref, l, m)
-}
-
-func (a *collectResultAppender) AppendSTZeroSample(ref storage.SeriesRef, l labels.Labels, _, st int64) (storage.SeriesRef, error) {
- return a.Append(ref, l, st, 0.0)
-}
-
-func (a *collectResultAppender) Commit() error {
- a.mtx.Lock()
- defer a.mtx.Unlock()
- a.resultFloats = append(a.resultFloats, a.pendingFloats...)
- a.resultExemplars = append(a.resultExemplars, a.pendingExemplars...)
- a.resultHistograms = append(a.resultHistograms, a.pendingHistograms...)
- a.resultMetadata = append(a.resultMetadata, a.pendingMetadata...)
- a.pendingFloats = nil
- a.pendingExemplars = nil
- a.pendingHistograms = nil
- a.pendingMetadata = nil
- if a.next == nil {
- return nil
- }
- return a.next.Commit()
-}
-
-func (a *collectResultAppender) Rollback() error {
- a.mtx.Lock()
- defer a.mtx.Unlock()
- a.rolledbackFloats = a.pendingFloats
- a.rolledbackHistograms = a.pendingHistograms
- a.pendingFloats = nil
- a.pendingHistograms = nil
- if a.next == nil {
- return nil
- }
- return a.next.Rollback()
-}
-
-func (a *collectResultAppender) String() string {
- var sb strings.Builder
- for _, s := range a.resultFloats {
- sb.WriteString(fmt.Sprintf("committed: %s %f %d\n", s.metric, s.f, s.t))
- }
- for _, s := range a.pendingFloats {
- sb.WriteString(fmt.Sprintf("pending: %s %f %d\n", s.metric, s.f, s.t))
- }
- for _, s := range a.rolledbackFloats {
- sb.WriteString(fmt.Sprintf("rolledback: %s %f %d\n", s.metric, s.f, s.t))
- }
- return sb.String()
}
// protoMarshalDelimited marshals a MetricFamily into a delimited
diff --git a/scrape/manager.go b/scrape/manager.go
index c63d7d0eae..a2297aa824 100644
--- a/scrape/manager.go
+++ b/scrape/manager.go
@@ -1,4 +1,4 @@
-// Copyright 2013 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -33,13 +33,14 @@ import (
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/storage"
+ "github.com/prometheus/prometheus/util/features"
"github.com/prometheus/prometheus/util/logging"
"github.com/prometheus/prometheus/util/osutil"
"github.com/prometheus/prometheus/util/pool"
)
-// NewManager is the Manager constructor.
-func NewManager(o *Options, logger *slog.Logger, newScrapeFailureLogger func(string) (*logging.JSONFileLogger, error), app storage.Appendable, registerer prometheus.Registerer) (*Manager, error) {
+// NewManager is the Manager constructor using Appendable.
+func NewManager(o *Options, logger *slog.Logger, newScrapeFailureLogger func(string) (*logging.JSONFileLogger, error), appendable storage.Appendable, registerer prometheus.Registerer) (*Manager, error) {
if o == nil {
o = &Options{}
}
@@ -53,7 +54,7 @@ func NewManager(o *Options, logger *slog.Logger, newScrapeFailureLogger func(str
}
m := &Manager{
- append: app,
+ appendable: appendable,
opts: o,
logger: logger,
newScrapeFailureLogger: newScrapeFailureLogger,
@@ -67,32 +68,42 @@ func NewManager(o *Options, logger *slog.Logger, newScrapeFailureLogger func(str
m.metrics.setTargetMetadataCacheGatherer(m)
+ // Register scrape features.
+ if r := o.FeatureRegistry; r != nil {
+ // "Extra scrape metrics" is always enabled because it moved from feature flag to config file.
+ r.Enable(features.Scrape, "extra_scrape_metrics")
+ r.Set(features.Scrape, "start_timestamp_zero_ingestion", o.EnableStartTimestampZeroIngestion)
+ r.Set(features.Scrape, "type_and_unit_labels", o.EnableTypeAndUnitLabels)
+ }
+
return m, nil
}
// Options are the configuration parameters to the scrape manager.
type Options struct {
- ExtraMetrics bool
// Option used by downstream scraper users like OpenTelemetry Collector
// to help lookup metric metadata. Should be false for Prometheus.
PassMetadataInContext bool
// Option to enable appending of scraped Metadata to the TSDB/other appenders. Individual appenders
// can decide what to do with metadata, but for practical purposes this flag exists so that metadata
// can be written to the WAL and thus read for remote write.
- // TODO: implement some form of metadata storage
AppendMetadata bool
// Option to increase the interval used by scrape manager to throttle target groups updates.
DiscoveryReloadInterval model.Duration
+
// Option to enable the ingestion of the created timestamp as a synthetic zero sample.
// See: https://github.com/prometheus/proposals/blob/main/proposals/2023-06-13_created-timestamp.md
EnableStartTimestampZeroIngestion bool
- // EnableTypeAndUnitLabels
+ // EnableTypeAndUnitLabels represents type-and-unit-labels feature flag.
EnableTypeAndUnitLabels bool
// Optional HTTP client options to use when scraping.
HTTPClientOptions []config_util.HTTPClientOption
+ // FeatureRegistry is the registry for tracking enabled/disabled features.
+ FeatureRegistry features.Collector
+
// private option for testability.
skipOffsetting bool
}
@@ -100,9 +111,11 @@ type Options struct {
// Manager maintains a set of scrape pools and manages start/stop cycles
// when receiving new target groups from the discovery manager.
type Manager struct {
- opts *Options
- logger *slog.Logger
- append storage.Appendable
+ opts *Options
+ logger *slog.Logger
+
+ appendable storage.Appendable
+
graceShut chan struct{}
offsetSeed uint64 // Global offsetSeed seed is used to spread scrape workload across HA setup.
@@ -183,7 +196,7 @@ func (m *Manager) reload() {
continue
}
m.metrics.targetScrapePools.Inc()
- sp, err := newScrapePool(scrapeConfig, m.append, m.offsetSeed, m.logger.With("scrape_pool", setName), m.buffers, m.opts, m.metrics)
+ sp, err := newScrapePool(scrapeConfig, m.appendable, m.offsetSeed, m.logger.With("scrape_pool", setName), m.buffers, m.opts, m.metrics)
if err != nil {
m.metrics.targetScrapePoolsFailed.Inc()
m.logger.Error("error creating new scrape pool", "err", err, "scrape_pool", setName)
diff --git a/scrape/manager_test.go b/scrape/manager_test.go
index 1ec4875d19..d4898eb996 100644
--- a/scrape/manager_test.go
+++ b/scrape/manager_test.go
@@ -1,4 +1,4 @@
-// Copyright 2013 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -51,6 +51,7 @@ import (
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/tsdbutil"
"github.com/prometheus/prometheus/util/runutil"
+ "github.com/prometheus/prometheus/util/teststorage"
"github.com/prometheus/prometheus/util/testutil"
)
@@ -527,21 +528,12 @@ scrape_configs:
ch <- struct{}{}
return noopLoop()
}
- sp := &scrapePool{
- appendable: &nopAppendable{},
- activeTargets: map[uint64]*Target{
- 1: {},
- },
- loops: map[uint64]loop{
- 1: noopLoop(),
- },
- newLoop: newLoop,
- logger: nil,
- config: cfg1.ScrapeConfigs[0],
- client: http.DefaultClient,
- metrics: scrapeManager.metrics,
- symbolTable: labels.NewSymbolTable(),
- }
+ sp := newTestScrapePool(t, newLoop)
+ sp.activeTargets[1] = &Target{}
+ sp.loops[1] = noopLoop()
+ sp.config = cfg1.ScrapeConfigs[0]
+ sp.metrics = scrapeManager.metrics
+
scrapeManager.scrapePools = map[string]*scrapePool{
"job1": sp,
}
@@ -691,18 +683,11 @@ scrape_configs:
for _, sc := range cfg.ScrapeConfigs {
_, cancel := context.WithCancel(context.Background())
defer cancel()
- sp := &scrapePool{
- appendable: &nopAppendable{},
- activeTargets: map[uint64]*Target{},
- loops: map[uint64]loop{
- 1: noopLoop(),
- },
- newLoop: newLoop,
- logger: nil,
- config: sc,
- client: http.DefaultClient,
- cancel: cancel,
- }
+
+ sp := newTestScrapePool(t, newLoop)
+ sp.loops[1] = noopLoop()
+ sp.config = cfg1.ScrapeConfigs[0]
+ sp.metrics = scrapeManager.metrics
for _, c := range sc.ServiceDiscoveryConfigs {
staticConfig := c.(discovery.StaticConfig)
for _, group := range staticConfig {
@@ -764,7 +749,7 @@ func TestManagerSTZeroIngestion(t *testing.T) {
for _, testWithST := range []bool{false, true} {
t.Run(fmt.Sprintf("withST=%v", testWithST), func(t *testing.T) {
for _, testSTZeroIngest := range []bool{false, true} {
- t.Run(fmt.Sprintf("ctZeroIngest=%v", testSTZeroIngest), func(t *testing.T) {
+ t.Run(fmt.Sprintf("stZeroIngest=%v", testSTZeroIngest), func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@@ -777,11 +762,11 @@ func TestManagerSTZeroIngestion(t *testing.T) {
// TODO(bwplotka): Add more types than just counter?
encoded := prepareTestEncodedCounter(t, testFormat, expectedMetricName, expectedSampleValue, sampleTs, stTs)
- app := &collectResultAppender{}
+ app := teststorage.NewAppendable()
discoveryManager, scrapeManager := runManagers(t, ctx, &Options{
EnableStartTimestampZeroIngestion: testSTZeroIngest,
skipOffsetting: true,
- }, &collectResultAppendable{app})
+ }, app)
defer scrapeManager.Stop()
server := setupTestServer(t, config.ScrapeProtocolsHeaders[testFormat], encoded)
@@ -806,11 +791,8 @@ scrape_configs:
ctx, cancel = context.WithTimeout(ctx, 1*time.Minute)
defer cancel()
require.NoError(t, runutil.Retry(100*time.Millisecond, ctx.Done(), func() error {
- app.mtx.Lock()
- defer app.mtx.Unlock()
-
// Check if scrape happened and grab the relevant samples.
- if len(app.resultFloats) > 0 {
+ if len(app.ResultSamples()) > 0 {
return nil
}
return errors.New("expected some float samples, got none")
@@ -818,32 +800,32 @@ scrape_configs:
// Verify results.
// Verify what we got vs expectations around ST injection.
- samples := findSamplesForMetric(app.resultFloats, expectedMetricName)
+ got := findSamplesForMetric(app.ResultSamples(), expectedMetricName)
if testWithST && testSTZeroIngest {
- require.Len(t, samples, 2)
- require.Equal(t, 0.0, samples[0].f)
- require.Equal(t, timestamp.FromTime(stTs), samples[0].t)
- require.Equal(t, expectedSampleValue, samples[1].f)
- require.Equal(t, timestamp.FromTime(sampleTs), samples[1].t)
+ require.Len(t, got, 2)
+ require.Equal(t, 0.0, got[0].V)
+ require.Equal(t, timestamp.FromTime(stTs), got[0].T)
+ require.Equal(t, expectedSampleValue, got[1].V)
+ require.Equal(t, timestamp.FromTime(sampleTs), got[1].T)
} else {
- require.Len(t, samples, 1)
- require.Equal(t, expectedSampleValue, samples[0].f)
- require.Equal(t, timestamp.FromTime(sampleTs), samples[0].t)
+ require.Len(t, got, 1)
+ require.Equal(t, expectedSampleValue, got[0].V)
+ require.Equal(t, timestamp.FromTime(sampleTs), got[0].T)
}
// Verify what we got vs expectations around additional _created series for OM text.
// enableSTZeroInjection also kills that _created line.
- createdSeriesSamples := findSamplesForMetric(app.resultFloats, expectedCreatedMetricName)
+ gotSTSeries := findSamplesForMetric(app.ResultSamples(), expectedCreatedMetricName)
if testFormat == config.OpenMetricsText1_0_0 && testWithST && !testSTZeroIngest {
// For OM Text, when counter has ST, and feature flag disabled we should see _created lines.
- require.Len(t, createdSeriesSamples, 1)
+ require.Len(t, gotSTSeries, 1)
// Conversion taken from common/expfmt.writeOpenMetricsFloat.
// We don't check the st timestamp as explicit ts was not implemented in expfmt.Encoder,
// but exists in OM https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#:~:text=An%20example%20with%20a%20Metric%20with%20no%20labels%2C%20and%20a%20MetricPoint%20with%20a%20timestamp%20and%20a%20created
// We can implement this, but we want to potentially get rid of OM 1.0 ST lines
- require.Equal(t, float64(timestamppb.New(stTs).AsTime().UnixNano())/1e9, createdSeriesSamples[0].f)
+ require.Equal(t, float64(timestamppb.New(stTs).AsTime().UnixNano())/1e9, gotSTSeries[0].V)
} else {
- require.Empty(t, createdSeriesSamples)
+ require.Empty(t, gotSTSeries)
}
})
}
@@ -885,9 +867,9 @@ func prepareTestEncodedCounter(t *testing.T, format config.ScrapeProtocol, mName
}
}
-func findSamplesForMetric(floats []floatSample, metricName string) (ret []floatSample) {
+func findSamplesForMetric(floats []sample, metricName string) (ret []sample) {
for _, f := range floats {
- if f.metric.Get(model.MetricNameLabel) == metricName {
+ if f.L.Get(model.MetricNameLabel) == metricName {
ret = append(ret, f)
}
}
@@ -964,11 +946,11 @@ func TestManagerSTZeroIngestionHistogram(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- app := &collectResultAppender{}
+ app := teststorage.NewAppendable()
discoveryManager, scrapeManager := runManagers(t, ctx, &Options{
EnableStartTimestampZeroIngestion: tc.enableSTZeroIngestion,
skipOffsetting: true,
- }, &collectResultAppendable{app})
+ }, app)
defer scrapeManager.Stop()
once := sync.Once{}
@@ -1012,43 +994,33 @@ scrape_configs:
`, serverURL.Host)
applyConfig(t, testConfig, scrapeManager, discoveryManager)
- var got []histogramSample
-
// Wait for one scrape.
ctx, cancel = context.WithTimeout(ctx, 1*time.Minute)
defer cancel()
require.NoError(t, runutil.Retry(100*time.Millisecond, ctx.Done(), func() error {
- app.mtx.Lock()
- defer app.mtx.Unlock()
-
- // Check if scrape happened and grab the relevant histograms, they have to be there - or it's a bug
- // and it's not worth waiting.
- for _, h := range app.resultHistograms {
- if h.metric.Get(model.MetricNameLabel) == mName {
- got = append(got, h)
- }
- }
- if len(app.resultHistograms) > 0 {
+ if len(app.ResultSamples()) > 0 {
return nil
}
return errors.New("expected some histogram samples, got none")
}), "after 1 minute")
+ got := findSamplesForMetric(app.ResultSamples(), mName)
+
// Check for zero samples, assuming we only injected always one histogram sample.
// Did it contain ST to inject? If yes, was ST zero enabled?
if tc.inputHistSample.CreatedTimestamp.IsValid() && tc.enableSTZeroIngestion {
require.Len(t, got, 2)
// Zero sample.
- require.Equal(t, histogram.Histogram{}, *got[0].h)
+ require.Equal(t, histogram.Histogram{}, *got[0].H)
// Quick soft check to make sure it's the same sample or at least not zero.
- require.Equal(t, tc.inputHistSample.GetSampleSum(), got[1].h.Sum)
+ require.Equal(t, tc.inputHistSample.GetSampleSum(), got[1].H.Sum)
return
}
// Expect only one, valid sample.
require.Len(t, got, 1)
// Quick soft check to make sure it's the same sample or at least not zero.
- require.Equal(t, tc.inputHistSample.GetSampleSum(), got[0].h.Sum)
+ require.Equal(t, tc.inputHistSample.GetSampleSum(), got[0].H.Sum)
})
}
}
@@ -1083,11 +1055,11 @@ func TestNHCBAndSTZeroIngestion(t *testing.T) {
ctx := t.Context()
- app := &collectResultAppender{}
+ app := teststorage.NewAppendable()
discoveryManager, scrapeManager := runManagers(t, ctx, &Options{
EnableStartTimestampZeroIngestion: true,
skipOffsetting: true,
- }, &collectResultAppendable{app})
+ }, app)
defer scrapeManager.Stop()
once := sync.Once{}
@@ -1146,33 +1118,19 @@ scrape_configs:
return exists
}, 5*time.Second, 100*time.Millisecond, "scrape pool should be created for job 'test'")
- // Helper function to get matching histograms to avoid race conditions.
- getMatchingHistograms := func() []histogramSample {
- app.mtx.Lock()
- defer app.mtx.Unlock()
-
- var got []histogramSample
- for _, h := range app.resultHistograms {
- if h.metric.Get(model.MetricNameLabel) == mName {
- got = append(got, h)
- }
- }
- return got
- }
-
require.Eventually(t, func() bool {
- return len(getMatchingHistograms()) > 0
+ return len(app.ResultSamples()) > 0
}, 1*time.Minute, 100*time.Millisecond, "expected histogram samples, got none")
// Verify that samples were ingested (proving both features work together).
- got := getMatchingHistograms()
+ got := findSamplesForMetric(app.ResultSamples(), mName)
// With ST zero ingestion enabled and a created timestamp present, we expect 2 samples:
// one zero sample and one actual sample.
require.Len(t, got, 2, "expected 2 histogram samples (zero sample + actual sample)")
- require.Equal(t, histogram.Histogram{}, *got[0].h, "first sample should be zero sample")
- require.InDelta(t, expectedHistogramSum, got[1].h.Sum, 1e-9, "second sample should retain the expected sum")
- require.Len(t, app.resultExemplars, 2, "expected 2 exemplars from histogram buckets")
+ require.Equal(t, histogram.Histogram{}, *got[0].H, "first sample should be zero sample")
+ require.InDelta(t, expectedHistogramSum, got[1].H.Sum, 1e-9, "second sample should retain the expected sum")
+ require.Len(t, got[1].ES, 2, "expected 2 exemplars on second histogram")
}
func applyConfig(
@@ -1203,7 +1161,7 @@ func runManagers(t *testing.T, ctx context.Context, opts *Options, app storage.A
}
opts.DiscoveryReloadInterval = model.Duration(100 * time.Millisecond)
if app == nil {
- app = nopAppendable{}
+ app = teststorage.NewAppendable()
}
reg := prometheus.NewRegistry()
@@ -1601,7 +1559,7 @@ scrape_configs:
cfg := loadConfiguration(t, cfgText)
- m, err := NewManager(&Options{}, nil, nil, &nopAppendable{}, prometheus.NewRegistry())
+ m, err := NewManager(&Options{}, nil, nil, teststorage.NewAppendable(), prometheus.NewRegistry())
require.NoError(t, err)
defer m.Stop()
require.NoError(t, m.ApplyConfig(cfg))
diff --git a/scrape/metrics.go b/scrape/metrics.go
index e7395c6191..4662a9fd9e 100644
--- a/scrape/metrics.go
+++ b/scrape/metrics.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -15,6 +15,7 @@ package scrape
import (
"fmt"
+ "time"
"github.com/prometheus/client_golang/prometheus"
)
@@ -36,6 +37,7 @@ type scrapeMetrics struct {
targetScrapePoolTargetsAdded *prometheus.GaugeVec
targetScrapePoolSymbolTableItems *prometheus.GaugeVec
targetSyncIntervalLength *prometheus.SummaryVec
+ targetSyncIntervalLengthHistogram *prometheus.HistogramVec
targetSyncFailed *prometheus.CounterVec
// Used by targetScraper.
@@ -46,6 +48,7 @@ type scrapeMetrics struct {
// Used by scrapeLoop.
targetIntervalLength *prometheus.SummaryVec
+ targetIntervalLengthHistogram *prometheus.HistogramVec
targetScrapeSampleLimit prometheus.Counter
targetScrapeSampleDuplicate prometheus.Counter
targetScrapeSampleOutOfOrder prometheus.Counter
@@ -152,6 +155,17 @@ func newScrapeMetrics(reg prometheus.Registerer) (*scrapeMetrics, error) {
},
[]string{"scrape_job"},
)
+ sm.targetSyncIntervalLengthHistogram = prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Name: "prometheus_target_sync_length_histogram_seconds",
+ Help: "Actual interval to sync the scrape pool.",
+ Buckets: []float64{.01, .1, 1, 10},
+ NativeHistogramBucketFactor: 1.1,
+ NativeHistogramMaxBucketNumber: 100,
+ NativeHistogramMinResetDuration: 1 * time.Hour,
+ },
+ []string{"scrape_job"},
+ )
sm.targetSyncFailed = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "prometheus_target_sync_failed_total",
@@ -185,6 +199,17 @@ func newScrapeMetrics(reg prometheus.Registerer) (*scrapeMetrics, error) {
},
[]string{"interval"},
)
+ sm.targetIntervalLengthHistogram = prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Name: "prometheus_target_interval_length_histogram_seconds",
+ Help: "Actual intervals between scrapes.",
+ Buckets: []float64{.01, .1, 1, 10},
+ NativeHistogramBucketFactor: 1.1,
+ NativeHistogramMaxBucketNumber: 100,
+ NativeHistogramMinResetDuration: 1 * time.Hour,
+ },
+ []string{"interval"},
+ )
sm.targetScrapeSampleLimit = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "prometheus_target_scrapes_exceeded_sample_limit_total",
@@ -238,6 +263,7 @@ func newScrapeMetrics(reg prometheus.Registerer) (*scrapeMetrics, error) {
sm.targetScrapePoolReloads,
sm.targetScrapePoolReloadsFailed,
sm.targetSyncIntervalLength,
+ sm.targetSyncIntervalLengthHistogram,
sm.targetScrapePoolSyncsCounter,
sm.targetScrapePoolExceededTargetLimit,
sm.targetScrapePoolTargetLimit,
@@ -250,6 +276,7 @@ func newScrapeMetrics(reg prometheus.Registerer) (*scrapeMetrics, error) {
sm.targetScrapeCacheFlushForced,
// Used by scrapeLoop.
sm.targetIntervalLength,
+ sm.targetIntervalLengthHistogram,
sm.targetScrapeSampleLimit,
sm.targetScrapeSampleDuplicate,
sm.targetScrapeSampleOutOfOrder,
@@ -279,6 +306,7 @@ func (sm *scrapeMetrics) Unregister() {
sm.reg.Unregister(sm.targetScrapePoolReloads)
sm.reg.Unregister(sm.targetScrapePoolReloadsFailed)
sm.reg.Unregister(sm.targetSyncIntervalLength)
+ sm.reg.Unregister(sm.targetSyncIntervalLengthHistogram)
sm.reg.Unregister(sm.targetScrapePoolSyncsCounter)
sm.reg.Unregister(sm.targetScrapePoolExceededTargetLimit)
sm.reg.Unregister(sm.targetScrapePoolTargetLimit)
@@ -288,6 +316,7 @@ func (sm *scrapeMetrics) Unregister() {
sm.reg.Unregister(sm.targetScrapeExceededBodySizeLimit)
sm.reg.Unregister(sm.targetScrapeCacheFlushForced)
sm.reg.Unregister(sm.targetIntervalLength)
+ sm.reg.Unregister(sm.targetIntervalLengthHistogram)
sm.reg.Unregister(sm.targetScrapeSampleLimit)
sm.reg.Unregister(sm.targetScrapeSampleDuplicate)
sm.reg.Unregister(sm.targetScrapeSampleOutOfOrder)
diff --git a/scrape/scrape.go b/scrape/scrape.go
index db662cb089..1a99155d09 100644
--- a/scrape/scrape.go
+++ b/scrape/scrape.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -59,6 +59,8 @@ import (
"github.com/prometheus/prometheus/util/pool"
)
+var aOptionRejectEarlyOOO = storage.AppendOptions{DiscardOutOfOrder: true}
+
// ScrapeTimestampTolerance is the tolerance for scrape appends timestamps
// alignment, to enable better compression at the TSDB level.
// See https://github.com/prometheus/prometheus/issues/7846
@@ -67,7 +69,7 @@ var ScrapeTimestampTolerance = 2 * time.Millisecond
// AlignScrapeTimestamps enables the tolerance for scrape appends timestamps described above.
var AlignScrapeTimestamps = true
-var errNameLabelMandatory = fmt.Errorf("missing metric name (%s label)", labels.MetricName)
+var errNameLabelMandatory = fmt.Errorf("missing metric name (%s label)", model.MetricNameLabel)
var _ FailureLogger = (*logging.JSONFileLogger)(nil)
@@ -82,8 +84,9 @@ type FailureLogger interface {
type scrapePool struct {
appendable storage.Appendable
logger *slog.Logger
+ ctx context.Context
cancel context.CancelFunc
- httpOpts []config_util.HTTPClientOption
+ options *Options
// mtx must not be taken after targetMtx.
mtx sync.Mutex
@@ -102,16 +105,15 @@ type scrapePool struct {
droppedTargets []*Target // Subject to KeepDroppedTargets limit.
droppedTargetsCount int // Count of all dropped targets.
- // Constructor for new scrape loops. This is settable for testing convenience.
- newLoop func(scrapeLoopOptions) loop
+ // newLoop injection for testing purposes.
+ injectTestNewLoop func(scrapeLoopOptions) loop
- metrics *scrapeMetrics
+ metrics *scrapeMetrics
+ buffers *pool.Pool
+ offsetSeed uint64
scrapeFailureLogger FailureLogger
scrapeFailureLoggerMtx sync.RWMutex
-
- validationScheme model.ValidationScheme
- escapingScheme model.EscapingScheme
}
type labelLimits struct {
@@ -120,118 +122,80 @@ type labelLimits struct {
labelValueLengthLimit int
}
-type scrapeLoopOptions struct {
- target *Target
- scraper scraper
- sampleLimit int
- bucketLimit int
- maxSchema int32
- labelLimits *labelLimits
- honorLabels bool
- honorTimestamps bool
- trackTimestampsStaleness bool
- interval time.Duration
- timeout time.Duration
- scrapeNativeHist bool
- alwaysScrapeClassicHist bool
- convertClassicHistToNHCB bool
- fallbackScrapeProtocol string
-
- mrc []*relabel.Config
- cache *scrapeCache
- enableCompression bool
-}
-
const maxAheadTime = 10 * time.Minute
// returning an empty label set is interpreted as "drop".
type labelsMutator func(labels.Labels) labels.Labels
-func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed uint64, logger *slog.Logger, buffers *pool.Pool, options *Options, metrics *scrapeMetrics) (*scrapePool, error) {
+// scrapeLoopAppendAdapter allows support for multiple storage.Appender versions.
+type scrapeLoopAppendAdapter interface {
+ Commit() error
+ Rollback() error
+
+ addReportSample(s reportSample, t int64, v float64, b *labels.Builder, rejectOOO bool) error
+ append(b []byte, contentType string, ts time.Time) (total, added, seriesAdded int, err error)
+}
+
+func newScrapePool(
+ cfg *config.ScrapeConfig,
+ appendable storage.Appendable,
+ offsetSeed uint64,
+ logger *slog.Logger,
+ buffers *pool.Pool,
+ options *Options,
+ metrics *scrapeMetrics,
+) (*scrapePool, error) {
if logger == nil {
logger = promslog.NewNopLogger()
}
+ if buffers == nil {
+ buffers = pool.New(1e3, 1e6, 3, func(sz int) any { return make([]byte, 0, sz) })
+ }
client, err := newScrapeClient(cfg.HTTPClientConfig, cfg.JobName, options.HTTPClientOptions...)
if err != nil {
return nil, err
}
+ // Validate scheme so we don't need to do it later.
+ // We also do it on scrapePool.reload(...)
+ // TODO(bwplotka): Can we move it to scrape config validation?
if err := namevalidationutil.CheckNameValidationScheme(cfg.MetricNameValidationScheme); err != nil {
return nil, errors.New("newScrapePool: MetricNameValidationScheme must be set in scrape configuration")
}
- var escapingScheme model.EscapingScheme
- escapingScheme, err = config.ToEscapingScheme(cfg.MetricNameEscapingScheme, cfg.MetricNameValidationScheme)
- if err != nil {
+ if _, err = config.ToEscapingScheme(cfg.MetricNameEscapingScheme, cfg.MetricNameValidationScheme); err != nil {
return nil, fmt.Errorf("invalid metric name escaping scheme, %w", err)
}
+ symbols := labels.NewSymbolTable()
ctx, cancel := context.WithCancel(context.Background())
sp := &scrapePool{
+ appendable: appendable,
+ logger: logger,
+ ctx: ctx,
cancel: cancel,
- appendable: app,
+ options: options,
config: cfg,
client: client,
- activeTargets: map[uint64]*Target{},
loops: map[uint64]loop{},
- symbolTable: labels.NewSymbolTable(),
+ symbolTable: symbols,
lastSymbolTableCheck: time.Now(),
- logger: logger,
+ activeTargets: map[uint64]*Target{},
metrics: metrics,
- httpOpts: options.HTTPClientOptions,
- validationScheme: cfg.MetricNameValidationScheme,
- escapingScheme: escapingScheme,
- }
- sp.newLoop = func(opts scrapeLoopOptions) loop {
- // Update the targets retrieval function for metadata to a new scrape cache.
- cache := opts.cache
- if cache == nil {
- cache = newScrapeCache(metrics)
- }
- opts.target.SetMetadataStore(cache)
-
- return newScrapeLoop(
- ctx,
- opts.scraper,
- logger.With("target", opts.target),
- buffers,
- func(l labels.Labels) labels.Labels {
- return mutateSampleLabels(l, opts.target, opts.honorLabels, opts.mrc)
- },
- func(l labels.Labels) labels.Labels { return mutateReportSampleLabels(l, opts.target) },
- func(ctx context.Context) storage.Appender { return app.Appender(ctx) },
- cache,
- sp.symbolTable,
- offsetSeed,
- opts.honorTimestamps,
- opts.trackTimestampsStaleness,
- opts.enableCompression,
- opts.sampleLimit,
- opts.bucketLimit,
- opts.maxSchema,
- opts.labelLimits,
- opts.interval,
- opts.timeout,
- opts.alwaysScrapeClassicHist,
- opts.convertClassicHistToNHCB,
- cfg.ScrapeNativeHistogramsEnabled(),
- options.EnableStartTimestampZeroIngestion,
- options.EnableTypeAndUnitLabels,
- options.ExtraMetrics,
- options.AppendMetadata,
- opts.target,
- options.PassMetadataInContext,
- metrics,
- options.skipOffsetting,
- sp.validationScheme,
- sp.escapingScheme,
- opts.fallbackScrapeProtocol,
- )
+ buffers: buffers,
+ offsetSeed: offsetSeed,
}
sp.metrics.targetScrapePoolTargetLimit.WithLabelValues(sp.config.JobName).Set(float64(sp.config.TargetLimit))
return sp, nil
}
+func (sp *scrapePool) newLoop(opts scrapeLoopOptions) loop {
+ if sp.injectTestNewLoop != nil {
+ return sp.injectTestNewLoop(opts)
+ }
+ return newScrapeLoop(opts)
+}
+
func (sp *scrapePool) ActiveTargets() []*Target {
sp.targetMtx.Lock()
defer sp.targetMtx.Unlock()
@@ -309,6 +273,7 @@ func (sp *scrapePool) stop() {
sp.metrics.targetScrapePoolTargetsAdded.DeleteLabelValues(sp.config.JobName)
sp.metrics.targetScrapePoolSymbolTableItems.DeleteLabelValues(sp.config.JobName)
sp.metrics.targetSyncIntervalLength.DeleteLabelValues(sp.config.JobName)
+ sp.metrics.targetSyncIntervalLengthHistogram.DeleteLabelValues(sp.config.JobName)
sp.metrics.targetSyncFailed.DeleteLabelValues(sp.config.JobName)
}
}
@@ -322,7 +287,7 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
sp.metrics.targetScrapePoolReloads.Inc()
start := time.Now()
- client, err := newScrapeClient(cfg.HTTPClientConfig, cfg.JobName, sp.httpOpts...)
+ client, err := newScrapeClient(cfg.HTTPClientConfig, cfg.JobName, sp.options.HTTPClientOptions...)
if err != nil {
sp.metrics.targetScrapePoolReloadsFailed.Inc()
return err
@@ -332,17 +297,14 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
sp.config = cfg
oldClient := sp.client
sp.client = client
+
+ // Validate scheme so we don't need to do it later.
if err := namevalidationutil.CheckNameValidationScheme(cfg.MetricNameValidationScheme); err != nil {
return errors.New("scrapePool.reload: MetricNameValidationScheme must be set in scrape configuration")
}
- sp.validationScheme = cfg.MetricNameValidationScheme
- var escapingScheme model.EscapingScheme
- escapingScheme, err = model.ToEscapingScheme(cfg.MetricNameEscapingScheme)
- if err != nil {
- return fmt.Errorf("invalid metric name escaping scheme, %w", err)
+ if _, err = config.ToEscapingScheme(cfg.MetricNameEscapingScheme, cfg.MetricNameValidationScheme); err != nil {
+ return fmt.Errorf("scrapePool.reload: invalid metric name escaping scheme, %w", err)
}
- sp.escapingScheme = escapingScheme
-
sp.metrics.targetScrapePoolTargetLimit.WithLabelValues(sp.config.JobName).Set(float64(sp.config.TargetLimit))
sp.restartLoops(reuseCache)
@@ -354,30 +316,7 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
}
func (sp *scrapePool) restartLoops(reuseCache bool) {
- var (
- wg sync.WaitGroup
- interval = time.Duration(sp.config.ScrapeInterval)
- timeout = time.Duration(sp.config.ScrapeTimeout)
- bodySizeLimit = int64(sp.config.BodySizeLimit)
- sampleLimit = int(sp.config.SampleLimit)
- bucketLimit = int(sp.config.NativeHistogramBucketLimit)
- maxSchema = pickSchema(sp.config.NativeHistogramMinBucketFactor)
- labelLimits = &labelLimits{
- labelLimit: int(sp.config.LabelLimit),
- labelNameLengthLimit: int(sp.config.LabelNameLengthLimit),
- labelValueLengthLimit: int(sp.config.LabelValueLengthLimit),
- }
- honorLabels = sp.config.HonorLabels
- honorTimestamps = sp.config.HonorTimestamps
- enableCompression = sp.config.EnableCompression
- trackTimestampsStaleness = sp.config.TrackTimestampsStaleness
- mrc = sp.config.MetricRelabelConfigs
- fallbackScrapeProtocol = sp.config.ScrapeFallbackProtocol.HeaderMediaType()
- scrapeNativeHist = sp.config.ScrapeNativeHistogramsEnabled()
- alwaysScrapeClassicHist = sp.config.AlwaysScrapeClassicHistogramsEnabled()
- convertClassicHistToNHCB = sp.config.ConvertClassicHistogramsToNHCBEnabled()
- )
-
+ var wg sync.WaitGroup
sp.targetMtx.Lock()
forcedErr := sp.refreshTargetLimitErr()
@@ -391,38 +330,27 @@ func (sp *scrapePool) restartLoops(reuseCache bool) {
}
t := sp.activeTargets[fp]
- targetInterval, targetTimeout, err := t.intervalAndTimeout(interval, timeout)
- var (
- s = &targetScraper{
+ targetInterval, targetTimeout, err := t.intervalAndTimeout(
+ time.Duration(sp.config.ScrapeInterval),
+ time.Duration(sp.config.ScrapeTimeout),
+ )
+ escapingScheme, _ := config.ToEscapingScheme(sp.config.MetricNameEscapingScheme, sp.config.MetricNameValidationScheme)
+ newLoop := sp.newLoop(scrapeLoopOptions{
+ target: t,
+ scraper: &targetScraper{
Target: t,
client: sp.client,
timeout: targetTimeout,
- bodySizeLimit: bodySizeLimit,
- acceptHeader: acceptHeader(sp.config.ScrapeProtocols, sp.escapingScheme),
- acceptEncodingHeader: acceptEncodingHeader(enableCompression),
+ bodySizeLimit: int64(sp.config.BodySizeLimit),
+ acceptHeader: acceptHeader(sp.config.ScrapeProtocols, escapingScheme),
+ acceptEncodingHeader: acceptEncodingHeader(sp.config.EnableCompression),
metrics: sp.metrics,
- }
- newLoop = sp.newLoop(scrapeLoopOptions{
- target: t,
- scraper: s,
- sampleLimit: sampleLimit,
- bucketLimit: bucketLimit,
- maxSchema: maxSchema,
- labelLimits: labelLimits,
- honorLabels: honorLabels,
- honorTimestamps: honorTimestamps,
- enableCompression: enableCompression,
- trackTimestampsStaleness: trackTimestampsStaleness,
- mrc: mrc,
- cache: cache,
- interval: targetInterval,
- timeout: targetTimeout,
- fallbackScrapeProtocol: fallbackScrapeProtocol,
- scrapeNativeHist: scrapeNativeHist,
- alwaysScrapeClassicHist: alwaysScrapeClassicHist,
- convertClassicHistToNHCB: convertClassicHistToNHCB,
- })
- )
+ },
+ cache: cache,
+ interval: targetInterval,
+ timeout: targetTimeout,
+ sp: sp,
+ })
if err != nil {
newLoop.setForcedError(err)
}
@@ -505,6 +433,9 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) {
sp.metrics.targetSyncIntervalLength.WithLabelValues(sp.config.JobName).Observe(
time.Since(start).Seconds(),
)
+ sp.metrics.targetSyncIntervalLengthHistogram.WithLabelValues(sp.config.JobName).Observe(
+ time.Since(start).Seconds(),
+ )
sp.metrics.targetScrapePoolSyncsCounter.WithLabelValues(sp.config.JobName).Inc()
}
@@ -512,31 +443,10 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) {
// scrape loops for new targets, and stops scrape loops for disappeared targets.
// It returns after all stopped scrape loops terminated.
func (sp *scrapePool) sync(targets []*Target) {
- var (
- uniqueLoops = make(map[uint64]loop)
- interval = time.Duration(sp.config.ScrapeInterval)
- timeout = time.Duration(sp.config.ScrapeTimeout)
- bodySizeLimit = int64(sp.config.BodySizeLimit)
- sampleLimit = int(sp.config.SampleLimit)
- bucketLimit = int(sp.config.NativeHistogramBucketLimit)
- maxSchema = pickSchema(sp.config.NativeHistogramMinBucketFactor)
- labelLimits = &labelLimits{
- labelLimit: int(sp.config.LabelLimit),
- labelNameLengthLimit: int(sp.config.LabelNameLengthLimit),
- labelValueLengthLimit: int(sp.config.LabelValueLengthLimit),
- }
- honorLabels = sp.config.HonorLabels
- honorTimestamps = sp.config.HonorTimestamps
- enableCompression = sp.config.EnableCompression
- trackTimestampsStaleness = sp.config.TrackTimestampsStaleness
- mrc = sp.config.MetricRelabelConfigs
- fallbackScrapeProtocol = sp.config.ScrapeFallbackProtocol.HeaderMediaType()
- scrapeNativeHist = sp.config.ScrapeNativeHistogramsEnabled()
- alwaysScrapeClassicHist = sp.config.AlwaysScrapeClassicHistogramsEnabled()
- convertClassicHistToNHCB = sp.config.ConvertClassicHistogramsToNHCBEnabled()
- )
+ uniqueLoops := make(map[uint64]loop)
sp.targetMtx.Lock()
+ escapingScheme, _ := config.ToEscapingScheme(sp.config.MetricNameEscapingScheme, sp.config.MetricNameValidationScheme)
for _, t := range targets {
hash := t.hash()
@@ -545,34 +455,25 @@ func (sp *scrapePool) sync(targets []*Target) {
// so whether changed via relabeling or not, they'll exist and hold the correct values
// for every target.
var err error
- interval, timeout, err = t.intervalAndTimeout(interval, timeout)
- s := &targetScraper{
- Target: t,
- client: sp.client,
- timeout: timeout,
- bodySizeLimit: bodySizeLimit,
- acceptHeader: acceptHeader(sp.config.ScrapeProtocols, sp.escapingScheme),
- acceptEncodingHeader: acceptEncodingHeader(enableCompression),
- metrics: sp.metrics,
- }
+ targetInterval, targetTimeout, err := t.intervalAndTimeout(
+ time.Duration(sp.config.ScrapeInterval),
+ time.Duration(sp.config.ScrapeTimeout),
+ )
l := sp.newLoop(scrapeLoopOptions{
- target: t,
- scraper: s,
- sampleLimit: sampleLimit,
- bucketLimit: bucketLimit,
- maxSchema: maxSchema,
- labelLimits: labelLimits,
- honorLabels: honorLabels,
- honorTimestamps: honorTimestamps,
- enableCompression: enableCompression,
- trackTimestampsStaleness: trackTimestampsStaleness,
- mrc: mrc,
- interval: interval,
- timeout: timeout,
- scrapeNativeHist: scrapeNativeHist,
- alwaysScrapeClassicHist: alwaysScrapeClassicHist,
- convertClassicHistToNHCB: convertClassicHistToNHCB,
- fallbackScrapeProtocol: fallbackScrapeProtocol,
+ target: t,
+ scraper: &targetScraper{
+ Target: t,
+ client: sp.client,
+ timeout: targetTimeout,
+ bodySizeLimit: int64(sp.config.BodySizeLimit),
+ acceptHeader: acceptHeader(sp.config.ScrapeProtocols, escapingScheme),
+ acceptEncodingHeader: acceptEncodingHeader(sp.config.EnableCompression),
+ metrics: sp.metrics,
+ },
+ cache: newScrapeCache(sp.metrics),
+ interval: targetInterval,
+ timeout: targetTimeout,
+ sp: sp,
})
if err != nil {
l.setForcedError(err)
@@ -657,7 +558,7 @@ func verifyLabelLimits(lset labels.Labels, limits *labelLimits) error {
return nil
}
- met := lset.Get(labels.MetricName)
+ met := lset.Get(model.MetricNameLabel)
if limits.labelLimit > 0 {
nbLabels := lset.Len()
if nbLabels > limits.labelLimit {
@@ -712,13 +613,11 @@ func mutateSampleLabels(lset labels.Labels, target *Target, honor bool, rc []*re
}
}
- res := lb.Labels()
-
- if len(rc) > 0 {
- res, _ = relabel.Process(res, rc...)
+ if keep := relabel.ProcessBuilder(lb, rc...); !keep {
+ return labels.EmptyLabels()
}
- return res
+ return lb.Labels()
}
func resolveConflictingExposedLabels(lb *labels.Builder, conflictingExposedLabels []labels.Label) {
@@ -749,8 +648,8 @@ func mutateReportSampleLabels(lset labels.Labels, target *Target) labels.Labels
return lb.Labels()
}
-// appender returns an appender for ingested samples from the target.
-func appender(app storage.Appender, sampleLimit, bucketLimit int, maxSchema int32) storage.Appender {
+// appenderWithLimits returns an appender with additional validation.
+func appenderWithLimits(app storage.Appender, sampleLimit, bucketLimit int, maxSchema int32) storage.Appender {
app = &timeLimitAppender{
Appender: app,
maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
@@ -927,55 +826,63 @@ type cacheEntry struct {
}
type scrapeLoop struct {
- scraper scraper
- l *slog.Logger
- scrapeFailureLogger FailureLogger
- scrapeFailureLoggerMtx sync.RWMutex
- cache *scrapeCache
- lastScrapeSize int
- buffers *pool.Pool
- offsetSeed uint64
- honorTimestamps bool
- trackTimestampsStaleness bool
- enableCompression bool
- forcedErr error
- forcedErrMtx sync.Mutex
- sampleLimit int
- bucketLimit int
- maxSchema int32
- labelLimits *labelLimits
- interval time.Duration
- timeout time.Duration
- validationScheme model.ValidationScheme
- escapingScheme model.EscapingScheme
-
- alwaysScrapeClassicHist bool
- convertClassicHistToNHCB bool
- enableSTZeroIngestion bool
- enableTypeAndUnitLabels bool
- fallbackScrapeProtocol string
-
- enableNativeHistogramScraping bool
-
- appender func(ctx context.Context) storage.Appender
- symbolTable *labels.SymbolTable
- sampleMutator labelsMutator
- reportSampleMutator labelsMutator
-
- parentCtx context.Context
- appenderCtx context.Context
+ // Parameters.
ctx context.Context
cancel func()
stopped chan struct{}
+ parentCtx context.Context
+ appenderCtx context.Context
+ l *slog.Logger
+ cache *scrapeCache
+ interval time.Duration
+ timeout time.Duration
+ sampleMutator labelsMutator
+ reportSampleMutator labelsMutator
+ scraper scraper
+
+ // Static params per scrapePool.
+ appendable storage.Appendable
+ buffers *pool.Pool
+ offsetSeed uint64
+ symbolTable *labels.SymbolTable
+ metrics *scrapeMetrics
+
+ // Options from config.ScrapeConfig.
+ sampleLimit int
+ bucketLimit int
+ maxSchema int32
+ labelLimits *labelLimits
+ honorLabels bool
+ honorTimestamps bool
+ trackTimestampsStaleness bool
+ enableNativeHistogramScraping bool
+ alwaysScrapeClassicHist bool
+ convertClassicHistToNHCB bool
+ fallbackScrapeProtocol string
+ enableCompression bool
+ mrc []*relabel.Config
+ validationScheme model.ValidationScheme
+
+ // Options from scrape.Options.
+ enableSTZeroIngestion bool
+ enableTypeAndUnitLabels bool
+ reportExtraMetrics bool
+ appendMetadataToWAL bool
+ passMetadataInContext bool
+ skipOffsetting bool // For testability.
+
+ // error injection through setForcedError.
+ forcedErr error
+ forcedErrMtx sync.Mutex
+
+ // Special logger set on setScrapeFailureLogger
+ scrapeFailureLoggerMtx sync.RWMutex
+ scrapeFailureLogger FailureLogger
+
+ // Locally cached data.
+ lastScrapeSize int
disabledEndOfRunStalenessMarkers atomic.Bool
-
- reportExtraMetrics bool
- appendMetadataToWAL bool
-
- metrics *scrapeMetrics
-
- skipOffsetting bool // For testability.
}
// scrapeCache tracks mappings of exposed metric strings to label sets and
@@ -1000,8 +907,8 @@ type scrapeCache struct {
seriesCur map[storage.SeriesRef]*cacheEntry
seriesPrev map[storage.SeriesRef]*cacheEntry
- // TODO(bwplotka): Consider moving Metadata API to use WAL instead of scrape loop to
- // avoid locking (using metadata API can block scraping).
+ // TODO(bwplotka): Consider moving metadata caching to head. See
+ // https://github.com/prometheus/prometheus/issues/17619.
metaMtx sync.Mutex // Mutex is needed due to api touching it when metadata is queried.
metadata map[string]*metaEntry // metadata by metric family name.
@@ -1236,99 +1143,87 @@ func (c *scrapeCache) LengthMetadata() int {
return len(c.metadata)
}
-func newScrapeLoop(ctx context.Context,
- sc scraper,
- l *slog.Logger,
- buffers *pool.Pool,
- sampleMutator labelsMutator,
- reportSampleMutator labelsMutator,
- appender func(ctx context.Context) storage.Appender,
- cache *scrapeCache,
- symbolTable *labels.SymbolTable,
- offsetSeed uint64,
- honorTimestamps bool,
- trackTimestampsStaleness bool,
- enableCompression bool,
- sampleLimit int,
- bucketLimit int,
- maxSchema int32,
- labelLimits *labelLimits,
- interval time.Duration,
- timeout time.Duration,
- alwaysScrapeClassicHist bool,
- convertClassicHistToNHCB bool,
- enableNativeHistogramScraping bool,
- enableSTZeroIngestion bool,
- enableTypeAndUnitLabels bool,
- reportExtraMetrics bool,
- appendMetadataToWAL bool,
- target *Target,
- passMetadataInContext bool,
- metrics *scrapeMetrics,
- skipOffsetting bool,
- validationScheme model.ValidationScheme,
- escapingScheme model.EscapingScheme,
- fallbackScrapeProtocol string,
-) *scrapeLoop {
- if l == nil {
- l = promslog.NewNopLogger()
- }
- if buffers == nil {
- buffers = pool.New(1e3, 1e6, 3, func(sz int) any { return make([]byte, 0, sz) })
- }
- if cache == nil {
- cache = newScrapeCache(metrics)
- }
+// scrapeLoopOptions contains static options that do not change per scrapePool lifecycle.
+type scrapeLoopOptions struct {
+ target *Target
+ scraper scraper
+ cache *scrapeCache
+ interval, timeout time.Duration
- appenderCtx := ctx
+ sp *scrapePool
+}
- if passMetadataInContext {
+// newScrapeLoop constructs new scrapeLoop.
+// NOTE: Technically this could be a scrapePool method, but it's a standalone function to make it clear scrapeLoop
+// can be used outside scrapePool lifecycle (e.g. in tests).
+func newScrapeLoop(opts scrapeLoopOptions) *scrapeLoop {
+ // Update the targets retrieval function for metadata to a new target.
+ opts.target.SetMetadataStore(opts.cache)
+
+ appenderCtx := opts.sp.ctx
+ if opts.sp.options.PassMetadataInContext {
// Store the cache and target in the context. This is then used by downstream OTel Collector
// to lookup the metadata required to process the samples. Not used by Prometheus itself.
// TODO(gouthamve) We're using a dedicated context because using the parentCtx caused a memory
// leak. We should ideally fix the main leak. See: https://github.com/prometheus/prometheus/pull/10590
- appenderCtx = ContextWithMetricMetadataStore(appenderCtx, cache)
- appenderCtx = ContextWithTarget(appenderCtx, target)
+ // TODO(bwplotka): Remove once OpenTelemetry collector uses AppenderV2 (add issue)
+ appenderCtx = ContextWithMetricMetadataStore(appenderCtx, opts.cache)
+ appenderCtx = ContextWithTarget(appenderCtx, opts.target)
}
- sl := &scrapeLoop{
- scraper: sc,
- buffers: buffers,
- cache: cache,
- appender: appender,
- symbolTable: symbolTable,
- sampleMutator: sampleMutator,
- reportSampleMutator: reportSampleMutator,
- stopped: make(chan struct{}),
- offsetSeed: offsetSeed,
- l: l,
- parentCtx: ctx,
- appenderCtx: appenderCtx,
- honorTimestamps: honorTimestamps,
- trackTimestampsStaleness: trackTimestampsStaleness,
- enableCompression: enableCompression,
- sampleLimit: sampleLimit,
- bucketLimit: bucketLimit,
- maxSchema: maxSchema,
- labelLimits: labelLimits,
- interval: interval,
- timeout: timeout,
- alwaysScrapeClassicHist: alwaysScrapeClassicHist,
- convertClassicHistToNHCB: convertClassicHistToNHCB,
- enableSTZeroIngestion: enableSTZeroIngestion,
- enableTypeAndUnitLabels: enableTypeAndUnitLabels,
- fallbackScrapeProtocol: fallbackScrapeProtocol,
- enableNativeHistogramScraping: enableNativeHistogramScraping,
- reportExtraMetrics: reportExtraMetrics,
- appendMetadataToWAL: appendMetadataToWAL,
- metrics: metrics,
- skipOffsetting: skipOffsetting,
- validationScheme: validationScheme,
- escapingScheme: escapingScheme,
- }
- sl.ctx, sl.cancel = context.WithCancel(ctx)
+ ctx, cancel := context.WithCancel(opts.sp.ctx)
+ return &scrapeLoop{
+ ctx: ctx,
+ cancel: cancel,
+ stopped: make(chan struct{}),
+ parentCtx: opts.sp.ctx,
+ appenderCtx: appenderCtx,
+ l: opts.sp.logger.With("target", opts.target),
+ cache: opts.cache,
- return sl
+ interval: opts.interval,
+ timeout: opts.timeout,
+ sampleMutator: func(l labels.Labels) labels.Labels {
+ return mutateSampleLabels(l, opts.target, opts.sp.config.HonorLabels, opts.sp.config.MetricRelabelConfigs)
+ },
+ reportSampleMutator: func(l labels.Labels) labels.Labels { return mutateReportSampleLabels(l, opts.target) },
+ scraper: opts.scraper,
+
+ // Static params per scrapePool.
+ appendable: opts.sp.appendable,
+ buffers: opts.sp.buffers,
+ offsetSeed: opts.sp.offsetSeed,
+ symbolTable: opts.sp.symbolTable,
+ metrics: opts.sp.metrics,
+
+ // config.ScrapeConfig.
+ sampleLimit: int(opts.sp.config.SampleLimit),
+ bucketLimit: int(opts.sp.config.NativeHistogramBucketLimit),
+ maxSchema: pickSchema(opts.sp.config.NativeHistogramMinBucketFactor),
+ labelLimits: &labelLimits{
+ labelLimit: int(opts.sp.config.LabelLimit),
+ labelNameLengthLimit: int(opts.sp.config.LabelNameLengthLimit),
+ labelValueLengthLimit: int(opts.sp.config.LabelValueLengthLimit),
+ },
+ honorLabels: opts.sp.config.HonorLabels,
+ honorTimestamps: opts.sp.config.HonorTimestamps,
+ trackTimestampsStaleness: opts.sp.config.TrackTimestampsStaleness,
+ enableNativeHistogramScraping: opts.sp.config.ScrapeNativeHistogramsEnabled(),
+ alwaysScrapeClassicHist: opts.sp.config.AlwaysScrapeClassicHistogramsEnabled(),
+ convertClassicHistToNHCB: opts.sp.config.ConvertClassicHistogramsToNHCBEnabled(),
+ fallbackScrapeProtocol: opts.sp.config.ScrapeFallbackProtocol.HeaderMediaType(),
+ enableCompression: opts.sp.config.EnableCompression,
+ mrc: opts.sp.config.MetricRelabelConfigs,
+ reportExtraMetrics: opts.sp.config.ExtraScrapeMetricsEnabled(),
+ validationScheme: opts.sp.config.MetricNameValidationScheme,
+
+ // scrape.Options.
+ enableSTZeroIngestion: opts.sp.options.EnableStartTimestampZeroIngestion,
+ enableTypeAndUnitLabels: opts.sp.options.EnableTypeAndUnitLabels,
+ appendMetadataToWAL: opts.sp.options.AppendMetadata,
+ passMetadataInContext: opts.sp.options.PassMetadataInContext,
+ skipOffsetting: opts.sp.options.skipOffsetting,
+ }
}
func (sl *scrapeLoop) setScrapeFailureLogger(l FailureLogger) {
@@ -1407,6 +1302,11 @@ mainLoop:
}
}
+func (sl *scrapeLoop) appender() scrapeLoopAppendAdapter {
+ // NOTE(bwplotka): Add AppenderV2 implementation, see https://github.com/prometheus/prometheus/issues/17632.
+ return &scrapeLoopAppender{scrapeLoop: sl, Appender: sl.appendable.Appender(sl.appenderCtx)}
+}
+
// scrapeAndReport performs a scrape and then appends the result to the storage
// together with reporting metrics, by using as few appenders as possible.
// In the happy scenario, a single appender is used.
@@ -1420,15 +1320,18 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er
sl.metrics.targetIntervalLength.WithLabelValues(sl.interval.String()).Observe(
time.Since(last).Seconds(),
)
+ sl.metrics.targetIntervalLengthHistogram.WithLabelValues(sl.interval.String()).Observe(
+ time.Since(last).Seconds(),
+ )
}
var total, added, seriesAdded, bytesRead int
var err, appErr, scrapeErr error
- app := sl.appender(sl.appenderCtx)
+ app := sl.appender()
defer func() {
if err != nil {
- app.Rollback()
+ _ = app.Rollback()
return
}
err = app.Commit()
@@ -1446,13 +1349,16 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er
if forcedErr := sl.getForcedError(); forcedErr != nil {
scrapeErr = forcedErr
// Add stale markers.
- if _, _, _, err := sl.append(app, []byte{}, "", appendTime); err != nil {
- app.Rollback()
- app = sl.appender(sl.appenderCtx)
+ if _, _, _, err := app.append([]byte{}, "", appendTime); err != nil {
+ _ = app.Rollback()
+ app = sl.appender()
sl.l.Warn("Append failed", "err", err)
}
if errc != nil {
- errc <- forcedErr
+ select {
+ case errc <- forcedErr:
+ case <-sl.ctx.Done():
+ }
}
return start
@@ -1489,7 +1395,10 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er
}
sl.scrapeFailureLoggerMtx.RUnlock()
if errc != nil {
- errc <- scrapeErr
+ select {
+ case errc <- scrapeErr:
+ case <-sl.ctx.Done():
+ }
}
if errors.Is(scrapeErr, errBodySizeLimit) {
bytesRead = -1
@@ -1498,16 +1407,16 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er
// A failed scrape is the same as an empty scrape,
// we still call sl.append to trigger stale markers.
- total, added, seriesAdded, appErr = sl.append(app, b, contentType, appendTime)
+ total, added, seriesAdded, appErr = app.append(b, contentType, appendTime)
if appErr != nil {
- app.Rollback()
- app = sl.appender(sl.appenderCtx)
+ _ = app.Rollback()
+ app = sl.appender()
sl.l.Debug("Append failed", "err", appErr)
// The append failed, probably due to a parse error or sample limit.
// Call sl.append again with an empty scrape to trigger stale markers.
- if _, _, _, err := sl.append(app, []byte{}, "", appendTime); err != nil {
- app.Rollback()
- app = sl.appender(sl.appenderCtx)
+ if _, _, _, err := app.append([]byte{}, "", appendTime); err != nil {
+ _ = app.Rollback()
+ app = sl.appender()
sl.l.Warn("Append failed", "err", err)
}
}
@@ -1577,11 +1486,11 @@ func (sl *scrapeLoop) endOfRunStaleness(last time.Time, ticker *time.Ticker, int
// If the target has since been recreated and scraped, the
// stale markers will be out of order and ignored.
// sl.context would have been cancelled, hence using sl.appenderCtx.
- app := sl.appender(sl.appenderCtx)
+ app := sl.appender()
var err error
defer func() {
if err != nil {
- app.Rollback()
+ _ = app.Rollback()
return
}
err = app.Commit()
@@ -1589,9 +1498,9 @@ func (sl *scrapeLoop) endOfRunStaleness(last time.Time, ticker *time.Ticker, int
sl.l.Warn("Stale commit failed", "err", err)
}
}()
- if _, _, _, err = sl.append(app, []byte{}, "", staleTime); err != nil {
- app.Rollback()
- app = sl.appender(sl.appenderCtx)
+ if _, _, _, err = app.append([]byte{}, "", staleTime); err != nil {
+ _ = app.Rollback()
+ app = sl.appender()
sl.l.Warn("Stale append failed", "err", err)
}
if err = sl.reportStale(app, staleTime); err != nil {
@@ -1625,7 +1534,7 @@ type appendErrors struct {
func (sl *scrapeLoop) updateStaleMarkers(app storage.Appender, defTime int64) (err error) {
sl.cache.forEachStale(func(ref storage.SeriesRef, lset labels.Labels) bool {
// Series no longer exposed, mark it stale.
- app.SetOptions(&storage.AppendOptions{DiscardOutOfOrder: true})
+ app.SetOptions(&aOptionRejectEarlyOOO)
_, err = app.Append(ref, lset, defTime, math.Float64frombits(value.StaleNaN))
app.SetOptions(nil)
switch {
@@ -1639,12 +1548,20 @@ func (sl *scrapeLoop) updateStaleMarkers(app storage.Appender, defTime int64) (e
return err
}
-func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, ts time.Time) (total, added, seriesAdded int, err error) {
+type scrapeLoopAppender struct {
+ *scrapeLoop
+
+ storage.Appender
+}
+
+var _ scrapeLoopAppendAdapter = &scrapeLoopAppender{}
+
+func (sl *scrapeLoopAppender) append(b []byte, contentType string, ts time.Time) (total, added, seriesAdded int, err error) {
defTime := timestamp.FromTime(ts)
if len(b) == 0 {
// Empty scrape. Just update the stale makers and swap the cache (but don't flush it).
- err = sl.updateStaleMarkers(app, defTime)
+ err = sl.updateStaleMarkers(sl.Appender, defTime)
sl.cache.iterDone(false)
return total, added, seriesAdded, err
}
@@ -1687,7 +1604,7 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string,
exemplars := make([]exemplar.Exemplar, 0, 1)
// Take an appender with limits.
- app = appender(app, sl.sampleLimit, sl.bucketLimit, sl.maxSchema)
+ app := appenderWithLimits(sl.Appender, sl.sampleLimit, sl.bucketLimit, sl.maxSchema)
defer func() {
if err != nil {
@@ -1776,7 +1693,7 @@ loop:
continue
}
- if !lset.Has(labels.MetricName) {
+ if !lset.Has(model.MetricNameLabel) {
err = errNameLabelMandatory
break loop
}
@@ -1850,7 +1767,7 @@ loop:
// But make sure we only do this if we have a cache entry (ce) for our series.
sl.cache.trackStaleness(ref, ce)
}
- if sampleAdded && sampleLimitErr == nil && bucketLimitErr == nil {
+ if sampleLimitErr == nil && bucketLimitErr == nil {
seriesAdded++
}
}
@@ -1908,7 +1825,7 @@ loop:
// In majority cases we can trust that the current series/histogram is matching the lastMeta and lastMFName.
// However, optional TYPE etc metadata and broken OM text can break this, detect those cases here.
// TODO(bwplotka): Consider moving this to parser as many parser users end up doing this (e.g. ST and NHCB parsing).
- if isSeriesPartOfFamily(lset.Get(labels.MetricName), lastMFName, lastMeta.Type) {
+ if isSeriesPartOfFamily(lset.Get(model.MetricNameLabel), lastMFName, lastMeta.Type) {
if _, merr := app.UpdateMetadata(ref, lset, lastMeta.Metadata); merr != nil {
// No need to fail the scrape on errors appending metadata.
sl.l.Debug("Error when appending metadata in scrape loop", "ref", fmt.Sprintf("%d", ref), "metadata", fmt.Sprintf("%+v", lastMeta.Metadata), "err", merr)
@@ -2020,7 +1937,7 @@ func isSeriesPartOfFamily(mName string, mfName []byte, typ model.MetricType) boo
// during normal operation (e.g., accidental cardinality explosion, sudden traffic spikes).
// Current case ordering prevents exercising other cases when limits are exceeded.
// Remaining error cases typically occur only a few times, often during initial setup.
-func (sl *scrapeLoop) checkAddError(met []byte, err error, sampleLimitErr, bucketLimitErr *error, appErrs *appendErrors) (bool, error) {
+func (sl *scrapeLoop) checkAddError(met []byte, err error, sampleLimitErr, bucketLimitErr *error, appErrs *appendErrors) (sampleAdded bool, _ error) {
switch {
case err == nil:
return true, nil
@@ -2132,7 +2049,7 @@ var (
}
)
-func (sl *scrapeLoop) report(app storage.Appender, start time.Time, duration time.Duration, scraped, added, seriesAdded, bytes int, scrapeErr error) (err error) {
+func (sl *scrapeLoop) report(app scrapeLoopAppendAdapter, start time.Time, duration time.Duration, scraped, added, seriesAdded, bytes int, scrapeErr error) (err error) {
sl.scraper.Report(start, duration, scrapeErr)
ts := timestamp.FromTime(start)
@@ -2143,71 +2060,70 @@ func (sl *scrapeLoop) report(app storage.Appender, start time.Time, duration tim
}
b := labels.NewBuilderWithSymbolTable(sl.symbolTable)
- if err = sl.addReportSample(app, scrapeHealthMetric, ts, health, b); err != nil {
+ if err = app.addReportSample(scrapeHealthMetric, ts, health, b, false); err != nil {
return err
}
- if err = sl.addReportSample(app, scrapeDurationMetric, ts, duration.Seconds(), b); err != nil {
+ if err = app.addReportSample(scrapeDurationMetric, ts, duration.Seconds(), b, false); err != nil {
return err
}
- if err = sl.addReportSample(app, scrapeSamplesMetric, ts, float64(scraped), b); err != nil {
+ if err = app.addReportSample(scrapeSamplesMetric, ts, float64(scraped), b, false); err != nil {
return err
}
- if err = sl.addReportSample(app, samplesPostRelabelMetric, ts, float64(added), b); err != nil {
+ if err = app.addReportSample(samplesPostRelabelMetric, ts, float64(added), b, false); err != nil {
return err
}
- if err = sl.addReportSample(app, scrapeSeriesAddedMetric, ts, float64(seriesAdded), b); err != nil {
+ if err = app.addReportSample(scrapeSeriesAddedMetric, ts, float64(seriesAdded), b, false); err != nil {
return err
}
if sl.reportExtraMetrics {
- if err = sl.addReportSample(app, scrapeTimeoutMetric, ts, sl.timeout.Seconds(), b); err != nil {
+ if err = app.addReportSample(scrapeTimeoutMetric, ts, sl.timeout.Seconds(), b, false); err != nil {
return err
}
- if err = sl.addReportSample(app, scrapeSampleLimitMetric, ts, float64(sl.sampleLimit), b); err != nil {
+ if err = app.addReportSample(scrapeSampleLimitMetric, ts, float64(sl.sampleLimit), b, false); err != nil {
return err
}
- if err = sl.addReportSample(app, scrapeBodySizeBytesMetric, ts, float64(bytes), b); err != nil {
+ if err = app.addReportSample(scrapeBodySizeBytesMetric, ts, float64(bytes), b, false); err != nil {
return err
}
}
return err
}
-func (sl *scrapeLoop) reportStale(app storage.Appender, start time.Time) (err error) {
+func (sl *scrapeLoop) reportStale(app scrapeLoopAppendAdapter, start time.Time) (err error) {
ts := timestamp.FromTime(start)
- app.SetOptions(&storage.AppendOptions{DiscardOutOfOrder: true})
stale := math.Float64frombits(value.StaleNaN)
b := labels.NewBuilder(labels.EmptyLabels())
- if err = sl.addReportSample(app, scrapeHealthMetric, ts, stale, b); err != nil {
+ if err = app.addReportSample(scrapeHealthMetric, ts, stale, b, true); err != nil {
return err
}
- if err = sl.addReportSample(app, scrapeDurationMetric, ts, stale, b); err != nil {
+ if err = app.addReportSample(scrapeDurationMetric, ts, stale, b, true); err != nil {
return err
}
- if err = sl.addReportSample(app, scrapeSamplesMetric, ts, stale, b); err != nil {
+ if err = app.addReportSample(scrapeSamplesMetric, ts, stale, b, true); err != nil {
return err
}
- if err = sl.addReportSample(app, samplesPostRelabelMetric, ts, stale, b); err != nil {
+ if err = app.addReportSample(samplesPostRelabelMetric, ts, stale, b, true); err != nil {
return err
}
- if err = sl.addReportSample(app, scrapeSeriesAddedMetric, ts, stale, b); err != nil {
+ if err = app.addReportSample(scrapeSeriesAddedMetric, ts, stale, b, true); err != nil {
return err
}
if sl.reportExtraMetrics {
- if err = sl.addReportSample(app, scrapeTimeoutMetric, ts, stale, b); err != nil {
+ if err = app.addReportSample(scrapeTimeoutMetric, ts, stale, b, true); err != nil {
return err
}
- if err = sl.addReportSample(app, scrapeSampleLimitMetric, ts, stale, b); err != nil {
+ if err = app.addReportSample(scrapeSampleLimitMetric, ts, stale, b, true); err != nil {
return err
}
- if err = sl.addReportSample(app, scrapeBodySizeBytesMetric, ts, stale, b); err != nil {
+ if err = app.addReportSample(scrapeBodySizeBytesMetric, ts, stale, b, true); err != nil {
return err
}
}
return err
}
-func (sl *scrapeLoop) addReportSample(app storage.Appender, s reportSample, t int64, v float64, b *labels.Builder) error {
+func (sl *scrapeLoopAppender) addReportSample(s reportSample, t int64, v float64, b *labels.Builder, rejectOOO bool) (err error) {
ce, ok, _ := sl.cache.get(s.name)
var ref storage.SeriesRef
var lset labels.Labels
@@ -2219,18 +2135,26 @@ func (sl *scrapeLoop) addReportSample(app storage.Appender, s reportSample, t in
// with scraped metrics in the cache.
// We have to drop it when building the actual metric.
b.Reset(labels.EmptyLabels())
- b.Set(labels.MetricName, string(s.name[:len(s.name)-1]))
+ b.Set(model.MetricNameLabel, string(s.name[:len(s.name)-1]))
lset = sl.reportSampleMutator(b.Labels())
}
- ref, err := app.Append(ref, lset, t, v)
+ // This will be improved in AppenderV2.
+ if rejectOOO {
+ sl.SetOptions(&aOptionRejectEarlyOOO)
+ ref, err = sl.Append(ref, lset, t, v)
+ sl.SetOptions(nil)
+ } else {
+ ref, err = sl.Append(ref, lset, t, v)
+ }
+
switch {
case err == nil:
if !ok {
sl.cache.addRef(s.name, ref, lset, lset.Hash())
// We only need to add metadata once a scrape target appears.
if sl.appendMetadataToWAL {
- if _, merr := app.UpdateMetadata(ref, lset, s.Metadata); merr != nil {
+ if _, merr := sl.UpdateMetadata(ref, lset, s.Metadata); merr != nil {
sl.l.Debug("Error when appending metadata in addReportSample", "ref", fmt.Sprintf("%d", ref), "metadata", fmt.Sprintf("%+v", s.Metadata), "err", merr)
}
}
diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go
index 5ccdb80019..c2b2ae132c 100644
--- a/scrape/scrape_test.go
+++ b/scrape/scrape_test.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -51,6 +51,7 @@ import (
"go.opentelemetry.io/otel/propagation"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
"go.uber.org/atomic"
+ "go.uber.org/goleak"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/discovery"
@@ -88,7 +89,7 @@ func newTestScrapeMetrics(t testing.TB) *scrapeMetrics {
func TestNewScrapePool(t *testing.T) {
var (
- app = &nopAppendable{}
+ app = teststorage.NewAppendable()
cfg = &config.ScrapeConfig{
MetricNameValidationScheme: model.UTF8Validation,
MetricNameEscapingScheme: model.AllowUTF8,
@@ -97,20 +98,17 @@ func TestNewScrapePool(t *testing.T) {
)
require.NoError(t, err)
- a, ok := sp.appendable.(*nopAppendable)
+ a, ok := sp.appendable.(*teststorage.Appendable)
require.True(t, ok, "Failure to append.")
require.Equal(t, app, a, "Wrong sample appender.")
require.Equal(t, cfg, sp.config, "Wrong scrape config.")
- require.NotNil(t, sp.newLoop, "newLoop function not initialized.")
}
func TestStorageHandlesOutOfOrderTimestamps(t *testing.T) {
// Test with default OutOfOrderTimeWindow (0)
t.Run("Out-Of-Order Sample Disabled", func(t *testing.T) {
s := teststorage.New(t)
- t.Cleanup(func() {
- _ = s.Close()
- })
+ t.Cleanup(func() { _ = s.Close() })
runScrapeLoopTest(t, s, false)
})
@@ -118,19 +116,14 @@ func TestStorageHandlesOutOfOrderTimestamps(t *testing.T) {
// Test with specific OutOfOrderTimeWindow (600000)
t.Run("Out-Of-Order Sample Enabled", func(t *testing.T) {
s := teststorage.New(t, 600000)
- t.Cleanup(func() {
- _ = s.Close()
- })
+ t.Cleanup(func() { _ = s.Close() })
runScrapeLoopTest(t, s, true)
})
}
func runScrapeLoopTest(t *testing.T, s *teststorage.TestStorage, expectOutOfOrder bool) {
- // Create an appender for adding samples to the storage.
- app := s.Appender(context.Background())
- capp := &collectResultAppender{next: app}
- sl := newBasicScrapeLoop(t, context.Background(), nil, func(context.Context) storage.Appender { return capp }, 0)
+ sl, _ := newTestScrapeLoop(t, withAppendable(s))
// Current time for generating timestamps.
now := time.Now()
@@ -141,37 +134,35 @@ func runScrapeLoopTest(t *testing.T, s *teststorage.TestStorage, expectOutOfOrde
timestampOutOfOrder := now.Add(-5 * time.Minute)
timestampInorder2 := now.Add(5 * time.Minute)
- slApp := sl.appender(context.Background())
- _, _, _, err := sl.append(slApp, []byte(`metric_total{a="1",b="1"} 1`), "text/plain", timestampInorder1)
+ app := sl.appender()
+ _, _, _, err := app.append([]byte(`metric_total{a="1",b="1"} 1`), "text/plain", timestampInorder1)
require.NoError(t, err)
- _, _, _, err = sl.append(slApp, []byte(`metric_total{a="1",b="1"} 2`), "text/plain", timestampOutOfOrder)
+ _, _, _, err = app.append([]byte(`metric_total{a="1",b="1"} 2`), "text/plain", timestampOutOfOrder)
require.NoError(t, err)
- _, _, _, err = sl.append(slApp, []byte(`metric_total{a="1",b="1"} 3`), "text/plain", timestampInorder2)
+ _, _, _, err = app.append([]byte(`metric_total{a="1",b="1"} 3`), "text/plain", timestampInorder2)
require.NoError(t, err)
- require.NoError(t, slApp.Commit())
+ require.NoError(t, app.Commit())
// Query the samples back from the storage.
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
q, err := s.Querier(time.Time{}.UnixNano(), time.Now().UnixNano())
require.NoError(t, err)
- defer q.Close()
+ t.Cleanup(func() { _ = q.Close() })
// Use a matcher to filter the metric name.
- series := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", "metric_total"))
+ series := q.Select(t.Context(), false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", "metric_total"))
- var results []floatSample
+ var results []sample
for series.Next() {
it := series.At().Iterator(nil)
for it.Next() == chunkenc.ValFloat {
t, v := it.At()
- results = append(results, floatSample{
- metric: series.At().Labels(),
- t: t,
- f: v,
+ results = append(results, sample{
+ L: series.At().Labels(),
+ T: t,
+ V: v,
})
}
require.NoError(t, it.Err())
@@ -179,16 +170,16 @@ func runScrapeLoopTest(t *testing.T, s *teststorage.TestStorage, expectOutOfOrde
require.NoError(t, series.Err())
// Define the expected results
- want := []floatSample{
+ want := []sample{
{
- metric: labels.FromStrings("__name__", "metric_total", "a", "1", "b", "1"),
- t: timestamp.FromTime(timestampInorder1),
- f: 1,
+ L: labels.FromStrings("__name__", "metric_total", "a", "1", "b", "1"),
+ T: timestamp.FromTime(timestampInorder1),
+ V: 1,
},
{
- metric: labels.FromStrings("__name__", "metric_total", "a", "1", "b", "1"),
- t: timestamp.FromTime(timestampInorder2),
- f: 3,
+ L: labels.FromStrings("__name__", "metric_total", "a", "1", "b", "1"),
+ T: timestamp.FromTime(timestampInorder2),
+ V: 3,
},
}
@@ -200,7 +191,7 @@ func runScrapeLoopTest(t *testing.T, s *teststorage.TestStorage, expectOutOfOrde
}
// Regression test against https://github.com/prometheus/prometheus/issues/15831.
-func TestScrapeAppendMetadataUpdate(t *testing.T) {
+func TestScrapeAppend_MetadataUpdate(t *testing.T) {
const (
scrape1 = `# TYPE test_metric counter
# HELP test_metric some help text
@@ -223,60 +214,54 @@ test_metric2{foo="bar"} 22
# EOF`
)
- // Create an appender for adding samples to the storage.
- capp := &collectResultAppender{next: nopAppender{}}
- sl := newBasicScrapeLoop(t, context.Background(), nil, func(context.Context) storage.Appender { return capp }, 0)
+ appTest := teststorage.NewAppendable()
+ sl, _ := newTestScrapeLoop(t, withAppendable(appTest))
now := time.Now()
- slApp := sl.appender(context.Background())
- _, _, _, err := sl.append(slApp, []byte(scrape1), "application/openmetrics-text", now)
+ app := sl.appender()
+ _, _, _, err := app.append([]byte(scrape1), "application/openmetrics-text", now)
require.NoError(t, err)
- require.NoError(t, slApp.Commit())
- testutil.RequireEqualWithOptions(t, []metadataEntry{
- {metric: labels.FromStrings("__name__", "test_metric_total"), m: metadata.Metadata{Type: "counter", Unit: "metric", Help: "some help text"}},
- {metric: labels.FromStrings("__name__", "test_metric2", "foo", "bar"), m: metadata.Metadata{Type: "gauge", Unit: "", Help: "other help text"}},
- }, capp.resultMetadata, []cmp.Option{cmp.Comparer(metadataEntryEqual)})
- capp.resultMetadata = nil
+ require.NoError(t, app.Commit())
+ testutil.RequireEqual(t, []sample{
+ {L: labels.FromStrings("__name__", "test_metric_total"), M: metadata.Metadata{Type: "counter", Unit: "metric", Help: "some help text"}},
+ {L: labels.FromStrings("__name__", "test_metric2", "foo", "bar"), M: metadata.Metadata{Type: "gauge", Unit: "", Help: "other help text"}},
+ }, appTest.ResultMetadata())
+ appTest.ResultReset()
- // Next (the same) scrape should not add new metadata entries.
- slApp = sl.appender(context.Background())
- _, _, _, err = sl.append(slApp, []byte(scrape1), "application/openmetrics-text", now.Add(15*time.Second))
+ // Next (the same) scrape should not new metadata entries.
+ app = sl.appender()
+ _, _, _, err = app.append([]byte(scrape1), "application/openmetrics-text", now.Add(15*time.Second))
require.NoError(t, err)
- require.NoError(t, slApp.Commit())
- testutil.RequireEqualWithOptions(t, []metadataEntry(nil), capp.resultMetadata, []cmp.Option{cmp.Comparer(metadataEntryEqual)})
+ require.NoError(t, app.Commit())
+ require.Empty(t, appTest.ResultMetadata())
+ appTest.ResultReset()
- slApp = sl.appender(context.Background())
- _, _, _, err = sl.append(slApp, []byte(scrape2), "application/openmetrics-text", now.Add(15*time.Second))
+ app = sl.appender()
+ _, _, _, err = app.append([]byte(scrape2), "application/openmetrics-text", now.Add(15*time.Second))
require.NoError(t, err)
- require.NoError(t, slApp.Commit())
- testutil.RequireEqualWithOptions(t, []metadataEntry{
- {metric: labels.FromStrings("__name__", "test_metric_total"), m: metadata.Metadata{Type: "counter", Unit: "metric", Help: "different help text"}}, // Here, technically we should have no unit, but it's a known limitation of the current implementation.
- {metric: labels.FromStrings("__name__", "test_metric2", "foo", "bar"), m: metadata.Metadata{Type: "gauge", Unit: "metric2", Help: "other help text"}},
- }, capp.resultMetadata, []cmp.Option{cmp.Comparer(metadataEntryEqual)})
+ require.NoError(t, app.Commit())
+ testutil.RequireEqual(t, []sample{
+ {L: labels.FromStrings("__name__", "test_metric_total"), M: metadata.Metadata{Type: "counter", Unit: "metric", Help: "different help text"}}, // Here, technically we should have no unit, but it's a known limitation of the current implementation.
+ {L: labels.FromStrings("__name__", "test_metric2", "foo", "bar"), M: metadata.Metadata{Type: "gauge", Unit: "metric2", Help: "other help text"}},
+ }, appTest.ResultMetadata())
+ appTest.ResultReset()
}
-type nopScraper struct {
- scraper
-}
+func TestScrapeReportMetadata(t *testing.T) {
+ appTest := teststorage.NewAppendable()
+ sl, _ := newTestScrapeLoop(t, withAppendable(appTest))
+ app := sl.appender()
-func (nopScraper) Report(time.Time, time.Duration, error) {}
-
-func TestScrapeReportMetadataUpdate(t *testing.T) {
- // Create an appender for adding samples to the storage.
- capp := &collectResultAppender{next: nopAppender{}}
- sl := newBasicScrapeLoop(t, context.Background(), nopScraper{}, func(context.Context) storage.Appender { return capp }, 0)
now := time.Now()
- slApp := sl.appender(context.Background())
-
- require.NoError(t, sl.report(slApp, now, 2*time.Second, 1, 1, 1, 512, nil))
- require.NoError(t, slApp.Commit())
- testutil.RequireEqualWithOptions(t, []metadataEntry{
- {metric: labels.FromStrings("__name__", "up"), m: scrapeHealthMetric.Metadata},
- {metric: labels.FromStrings("__name__", "scrape_duration_seconds"), m: scrapeDurationMetric.Metadata},
- {metric: labels.FromStrings("__name__", "scrape_samples_scraped"), m: scrapeSamplesMetric.Metadata},
- {metric: labels.FromStrings("__name__", "scrape_samples_post_metric_relabeling"), m: samplesPostRelabelMetric.Metadata},
- {metric: labels.FromStrings("__name__", "scrape_series_added"), m: scrapeSeriesAddedMetric.Metadata},
- }, capp.resultMetadata, []cmp.Option{cmp.Comparer(metadataEntryEqual)})
+ require.NoError(t, sl.report(app, now, 2*time.Second, 1, 1, 1, 512, nil))
+ require.NoError(t, app.Commit())
+ testutil.RequireEqual(t, []sample{
+ {L: labels.FromStrings("__name__", "up"), M: scrapeHealthMetric.Metadata},
+ {L: labels.FromStrings("__name__", "scrape_duration_seconds"), M: scrapeDurationMetric.Metadata},
+ {L: labels.FromStrings("__name__", "scrape_samples_scraped"), M: scrapeSamplesMetric.Metadata},
+ {L: labels.FromStrings("__name__", "scrape_samples_post_metric_relabeling"), M: samplesPostRelabelMetric.Metadata},
+ {L: labels.FromStrings("__name__", "scrape_series_added"), M: scrapeSeriesAddedMetric.Metadata},
+ }, appTest.ResultMetadata())
}
func TestIsSeriesPartOfFamily(t *testing.T) {
@@ -329,7 +314,7 @@ func TestIsSeriesPartOfFamily(t *testing.T) {
func TestDroppedTargetsList(t *testing.T) {
var (
- app = &nopAppendable{}
+ app = teststorage.NewAppendable()
cfg = &config.ScrapeConfig{
JobName: "dropMe",
ScrapeInterval: model.Duration(1),
@@ -373,9 +358,7 @@ func TestDroppedTargetsList(t *testing.T) {
// TestDiscoveredLabelsUpdate checks that DiscoveredLabels are updated
// even when new labels don't affect the target `hash`.
func TestDiscoveredLabelsUpdate(t *testing.T) {
- sp := &scrapePool{
- metrics: newTestScrapeMetrics(t),
- }
+ sp := newTestScrapePool(t, nil)
// These are used when syncing so need this to avoid a panic.
sp.config = &config.ScrapeConfig{
@@ -447,13 +430,8 @@ func (*testLoop) getCache() *scrapeCache {
func TestScrapePoolStop(t *testing.T) {
t.Parallel()
- sp := &scrapePool{
- activeTargets: map[uint64]*Target{},
- loops: map[uint64]loop{},
- cancel: func() {},
- client: http.DefaultClient,
- metrics: newTestScrapeMetrics(t),
- }
+ sp := newTestScrapePool(t, nil)
+
var mtx sync.Mutex
stopped := map[uint64]bool{}
numTargets := 20
@@ -505,26 +483,42 @@ func TestScrapePoolStop(t *testing.T) {
require.Empty(t, sp.loops, "Loops were not cleared on stopping: %d left", len(sp.loops))
}
+// TestScrapePoolReload tests reloading logic, so:
+// * all loops are reloaded, reusing cache if scrape config changed.
+// * reloaded loops are stopped before new ones are started.
+// * new scrapeLoops are configured with the updated scrape config.
func TestScrapePoolReload(t *testing.T) {
t.Parallel()
- var mtx sync.Mutex
- numTargets := 20
- stopped := map[uint64]bool{}
+ var (
+ mtx sync.Mutex
+ numTargets = 20
+ stopped = map[uint64]bool{}
+ )
- reloadCfg := &config.ScrapeConfig{
+ cfg0 := &config.ScrapeConfig{}
+ cfg1 := &config.ScrapeConfig{
ScrapeInterval: model.Duration(3 * time.Second),
ScrapeTimeout: model.Duration(2 * time.Second),
MetricNameValidationScheme: model.UTF8Validation,
MetricNameEscapingScheme: model.AllowUTF8,
+
+ // Test a few example options.
+ SampleLimit: 123,
+ ScrapeFallbackProtocol: "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited",
}
- // On starting to run, new loops created on reload check whether their preceding
- // equivalents have been stopped.
- newLoop := func(opts scrapeLoopOptions) loop {
- l := &testLoop{interval: time.Duration(reloadCfg.ScrapeInterval), timeout: time.Duration(reloadCfg.ScrapeTimeout)}
+ newLoopCfg1 := func(opts scrapeLoopOptions) loop {
+ // Test cfg1 is being used.
+ require.Equal(t, cfg1, opts.sp.config)
+
+ // Inject out testLoop that allows mocking start and stop.
+ l := &testLoop{interval: opts.interval, timeout: opts.timeout}
+
+ // On start, expect previous loop instances for the same target to be stopped.
l.startFunc = func(interval, timeout time.Duration, _ chan<- error) {
- require.Equal(t, 3*time.Second, interval, "Unexpected scrape interval")
- require.Equal(t, 2*time.Second, timeout, "Unexpected scrape timeout")
+ // Ensure cfg1 interval and timeout are correctly configured.
+ require.Equal(t, time.Duration(cfg1.ScrapeInterval), interval, "Unexpected scrape interval")
+ require.Equal(t, time.Duration(cfg1.ScrapeTimeout), timeout, "Unexpected scrape timeout")
mtx.Lock()
targetScraper := opts.scraper.(*targetScraper)
@@ -534,32 +528,21 @@ func TestScrapePoolReload(t *testing.T) {
return l
}
+ // Create test pool.
reg, metrics := newTestRegistryAndScrapeMetrics(t)
- sp := &scrapePool{
- appendable: &nopAppendable{},
- activeTargets: map[uint64]*Target{},
- loops: map[uint64]loop{},
- newLoop: newLoop,
- logger: nil,
- client: http.DefaultClient,
- metrics: metrics,
- symbolTable: labels.NewSymbolTable(),
- }
-
- // Reloading a scrape pool with a new scrape configuration must stop all scrape
- // loops and start new ones. A new loop must not be started before the preceding
- // one terminated.
+ sp := newTestScrapePool(t, newLoopCfg1)
+ sp.metrics = metrics
+ // Prefill pool with 20 loops, simulating 20 scrape targets.
for i := range numTargets {
- labels := labels.FromStrings(model.AddressLabel, fmt.Sprintf("example.com:%d", i))
t := &Target{
- labels: labels,
- scrapeConfig: &config.ScrapeConfig{},
+ labels: labels.FromStrings(model.AddressLabel, fmt.Sprintf("example.com:%d", i)),
+ scrapeConfig: cfg0,
}
l := &testLoop{}
d := time.Duration((i+1)*20) * time.Millisecond
l.stopFunc = func() {
- time.Sleep(d)
+ time.Sleep(d) // Sleep uneven time on stop.
mtx.Lock()
stopped[t.hash()] = true
@@ -569,36 +552,26 @@ func TestScrapePoolReload(t *testing.T) {
sp.activeTargets[t.hash()] = t
sp.loops[t.hash()] = l
}
- done := make(chan struct{})
beforeTargets := map[uint64]*Target{}
maps.Copy(beforeTargets, sp.activeTargets)
- reloadTime := time.Now()
-
- go func() {
- sp.reload(reloadCfg)
- close(done)
- }()
-
- select {
- case <-time.After(5 * time.Second):
- require.FailNow(t, "scrapeLoop.reload() did not return as expected")
- case <-done:
- // This should have taken at least as long as the last target slept.
- require.GreaterOrEqual(t, time.Since(reloadTime), time.Duration(numTargets*20)*time.Millisecond, "scrapeLoop.stop() exited before all targets stopped")
- }
-
+ // Reloading a scrape pool with a new scrape configuration must stop all scrape
+ // loops and start new ones. A new loop must not be started before the preceding
+ // one terminated.
+ require.NoError(t, sp.reload(cfg1))
+ var stoppedCount int
mtx.Lock()
- require.Len(t, stopped, numTargets, "Unexpected number of stopped loops")
+ stoppedCount = len(stopped)
mtx.Unlock()
-
+ require.Equal(t, numTargets, stoppedCount, "Unexpected number of stopped loops")
require.Equal(t, sp.activeTargets, beforeTargets, "Reloading affected target states unexpectedly")
- require.Len(t, sp.loops, numTargets, "Unexpected number of stopped loops after reload")
+ require.Len(t, sp.loops, numTargets, "Unexpected number of loops after reload")
+ // Check if prometheus_target_reload_length_seconds points to cfg1.ScrapeInterval.
got, err := gatherLabels(reg, "prometheus_target_reload_length_seconds")
require.NoError(t, err)
- expectedName, expectedValue := "interval", "3s"
+ expectedName, expectedValue := "interval", cfg1.ScrapeInterval.String()
require.Equal(t, [][]*dto.LabelPair{{{Name: &expectedName, Value: &expectedValue}}}, got)
require.Equal(t, 1.0, prom_testutil.ToFloat64(sp.metrics.targetScrapePoolReloads))
}
@@ -619,22 +592,12 @@ func TestScrapePoolReloadPreserveRelabeledIntervalTimeout(t *testing.T) {
return l
}
reg, metrics := newTestRegistryAndScrapeMetrics(t)
- sp := &scrapePool{
- appendable: &nopAppendable{},
- activeTargets: map[uint64]*Target{
- 1: {
- labels: labels.FromStrings(model.ScrapeIntervalLabel, "5s", model.ScrapeTimeoutLabel, "3s"),
- },
- },
- loops: map[uint64]loop{
- 1: noopLoop(),
- },
- newLoop: newLoop,
- logger: nil,
- client: http.DefaultClient,
- metrics: metrics,
- symbolTable: labels.NewSymbolTable(),
+ sp := newTestScrapePool(t, newLoop)
+ sp.activeTargets[1] = &Target{
+ labels: labels.FromStrings(model.ScrapeIntervalLabel, "5s", model.ScrapeTimeoutLabel, "3s"),
}
+ sp.metrics = metrics
+ sp.loops[1] = noopLoop()
err := sp.reload(reloadCfg)
if err != nil {
@@ -680,18 +643,10 @@ func TestScrapePoolTargetLimit(t *testing.T) {
}
return l
}
- sp := &scrapePool{
- appendable: &nopAppendable{},
- activeTargets: map[uint64]*Target{},
- loops: map[uint64]loop{},
- newLoop: newLoop,
- logger: promslog.NewNopLogger(),
- client: http.DefaultClient,
- metrics: newTestScrapeMetrics(t),
- symbolTable: labels.NewSymbolTable(),
- }
- tgs := []*targetgroup.Group{}
+ sp := newTestScrapePool(t, newLoop)
+
+ var tgs []*targetgroup.Group
for i := range 50 {
tgs = append(tgs,
&targetgroup.Group{
@@ -781,12 +736,12 @@ func TestScrapePoolTargetLimit(t *testing.T) {
tgs = append(tgs,
&targetgroup.Group{
Targets: []model.LabelSet{
- {model.AddressLabel: model.LabelValue("127.0.0.1:1090")},
+ {model.AddressLabel: "127.0.0.1:1090"},
},
},
&targetgroup.Group{
Targets: []model.LabelSet{
- {model.AddressLabel: model.LabelValue("127.0.0.1:1090")},
+ {model.AddressLabel: "127.0.0.1:1090"},
},
},
)
@@ -796,62 +751,48 @@ func TestScrapePoolTargetLimit(t *testing.T) {
validateErrorMessage(false)
}
-func TestScrapePoolAppender(t *testing.T) {
- cfg := &config.ScrapeConfig{
- MetricNameValidationScheme: model.UTF8Validation,
- MetricNameEscapingScheme: model.AllowUTF8,
- }
- app := &nopAppendable{}
- sp, _ := newScrapePool(cfg, app, 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
+func TestScrapePoolAppenderWithLimits(t *testing.T) {
+ // Create a unique value, to validate the correct chain of appenders.
+ baseAppender := struct{ storage.Appender }{}
+ appendable := appendableFunc(func(context.Context) storage.Appender { return baseAppender })
- loop := sp.newLoop(scrapeLoopOptions{
- target: &Target{},
- })
- appl, ok := loop.(*scrapeLoop)
- require.True(t, ok, "Expected scrapeLoop but got %T", loop)
-
- wrapped := appender(appl.appender(context.Background()), 0, 0, histogram.ExponentialSchemaMax)
+ sl, _ := newTestScrapeLoop(t, withAppendable(appendable))
+ wrapped := appenderWithLimits(sl.appendable.Appender(context.Background()), 0, 0, histogram.ExponentialSchemaMax)
tl, ok := wrapped.(*timeLimitAppender)
require.True(t, ok, "Expected timeLimitAppender but got %T", wrapped)
- _, ok = tl.Appender.(nopAppender)
- require.True(t, ok, "Expected base appender but got %T", tl.Appender)
+ require.Equal(t, baseAppender, tl.Appender, "Expected base appender but got %T", tl.Appender)
sampleLimit := 100
- loop = sp.newLoop(scrapeLoopOptions{
- target: &Target{},
- sampleLimit: sampleLimit,
+ sl, _ = newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl.appendable = appendable
+ sl.sampleLimit = sampleLimit
})
- appl, ok = loop.(*scrapeLoop)
- require.True(t, ok, "Expected scrapeLoop but got %T", loop)
+ wrapped = appenderWithLimits(sl.appendable.Appender(context.Background()), sampleLimit, 0, histogram.ExponentialSchemaMax)
- wrapped = appender(appl.appender(context.Background()), sampleLimit, 0, histogram.ExponentialSchemaMax)
-
- sl, ok := wrapped.(*limitAppender)
+ la, ok := wrapped.(*limitAppender)
require.True(t, ok, "Expected limitAppender but got %T", wrapped)
- tl, ok = sl.Appender.(*timeLimitAppender)
- require.True(t, ok, "Expected timeLimitAppender but got %T", sl.Appender)
+ tl, ok = la.Appender.(*timeLimitAppender)
+ require.True(t, ok, "Expected timeLimitAppender but got %T", la.Appender)
- _, ok = tl.Appender.(nopAppender)
- require.True(t, ok, "Expected base appender but got %T", tl.Appender)
+ require.Equal(t, baseAppender, tl.Appender, "Expected base appender but got %T", tl.Appender)
- wrapped = appender(appl.appender(context.Background()), sampleLimit, 100, histogram.ExponentialSchemaMax)
+ wrapped = appenderWithLimits(sl.appendable.Appender(context.Background()), sampleLimit, 100, histogram.ExponentialSchemaMax)
bl, ok := wrapped.(*bucketLimitAppender)
require.True(t, ok, "Expected bucketLimitAppender but got %T", wrapped)
- sl, ok = bl.Appender.(*limitAppender)
+ la, ok = bl.Appender.(*limitAppender)
require.True(t, ok, "Expected limitAppender but got %T", bl)
- tl, ok = sl.Appender.(*timeLimitAppender)
- require.True(t, ok, "Expected timeLimitAppender but got %T", sl.Appender)
+ tl, ok = la.Appender.(*timeLimitAppender)
+ require.True(t, ok, "Expected timeLimitAppender but got %T", la.Appender)
- _, ok = tl.Appender.(nopAppender)
- require.True(t, ok, "Expected base appender but got %T", tl.Appender)
+ require.Equal(t, baseAppender, tl.Appender, "Expected base appender but got %T", tl.Appender)
- wrapped = appender(appl.appender(context.Background()), sampleLimit, 100, 0)
+ wrapped = appenderWithLimits(sl.appendable.Appender(context.Background()), sampleLimit, 100, 0)
ml, ok := wrapped.(*maxSchemaAppender)
require.True(t, ok, "Expected maxSchemaAppender but got %T", wrapped)
@@ -859,14 +800,13 @@ func TestScrapePoolAppender(t *testing.T) {
bl, ok = ml.Appender.(*bucketLimitAppender)
require.True(t, ok, "Expected bucketLimitAppender but got %T", wrapped)
- sl, ok = bl.Appender.(*limitAppender)
+ la, ok = bl.Appender.(*limitAppender)
require.True(t, ok, "Expected limitAppender but got %T", bl)
- tl, ok = sl.Appender.(*timeLimitAppender)
- require.True(t, ok, "Expected timeLimitAppender but got %T", sl.Appender)
+ tl, ok = la.Appender.(*timeLimitAppender)
+ require.True(t, ok, "Expected timeLimitAppender but got %T", la.Appender)
- _, ok = tl.Appender.(nopAppender)
- require.True(t, ok, "Expected base appender but got %T", tl.Appender)
+ require.Equal(t, baseAppender, tl.Appender, "Expected base appender but got %T", tl.Appender)
}
func TestScrapePoolRaces(t *testing.T) {
@@ -881,7 +821,7 @@ func TestScrapePoolRaces(t *testing.T) {
MetricNameEscapingScheme: model.AllowUTF8,
}
}
- sp, _ := newScrapePool(newConfig(), &nopAppendable{}, 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
+ sp, _ := newScrapePool(newConfig(), teststorage.NewAppendable(), 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
tgts := []*targetgroup.Group{
{
Targets: []model.LabelSet{
@@ -907,7 +847,7 @@ func TestScrapePoolRaces(t *testing.T) {
for range 20 {
time.Sleep(10 * time.Millisecond)
- sp.reload(newConfig())
+ _ = sp.reload(newConfig())
}
sp.stop()
}
@@ -924,16 +864,7 @@ func TestScrapePoolScrapeLoopsStarted(t *testing.T) {
}
return l
}
- sp := &scrapePool{
- appendable: &nopAppendable{},
- activeTargets: map[uint64]*Target{},
- loops: map[uint64]loop{},
- newLoop: newLoop,
- logger: nil,
- client: http.DefaultClient,
- metrics: newTestScrapeMetrics(t),
- symbolTable: labels.NewSymbolTable(),
- }
+ sp := newTestScrapePool(t, newLoop)
tgs := []*targetgroup.Group{
{
@@ -964,51 +895,13 @@ func TestScrapePoolScrapeLoopsStarted(t *testing.T) {
}
}
-func newBasicScrapeLoop(t testing.TB, ctx context.Context, scraper scraper, app func(ctx context.Context) storage.Appender, interval time.Duration) *scrapeLoop {
- return newBasicScrapeLoopWithFallback(t, ctx, scraper, app, interval, "")
-}
-
-func newBasicScrapeLoopWithFallback(t testing.TB, ctx context.Context, scraper scraper, app func(ctx context.Context) storage.Appender, interval time.Duration, fallback string) *scrapeLoop {
- return newScrapeLoop(ctx,
- scraper,
- nil, nil,
- nopMutator,
- nopMutator,
- app,
- nil,
- labels.NewSymbolTable(),
- 0,
- true,
- false,
- true,
- 0, 0, histogram.ExponentialSchemaMax,
- nil,
- interval,
- time.Hour,
- false,
- false,
- false,
- false,
- false,
- false,
- true,
- nil,
- false,
- newTestScrapeMetrics(t),
- false,
- model.UTF8Validation,
- model.NoEscaping,
- fallback,
- )
-}
-
func TestScrapeLoopStopBeforeRun(t *testing.T) {
t.Parallel()
- scraper := &testScraper{}
- sl := newBasicScrapeLoop(t, context.Background(), scraper, nil, 1)
+
+ sl, scraper := newTestScrapeLoop(t)
// The scrape pool synchronizes on stopping scrape loops. However, new scrape
- // loops are started asynchronously. Thus it's possible, that a loop is stopped
+ // loops are started asynchronously. Thus, it's possible, that a loop is stopped
// again before having started properly.
// Stopping not-yet-started loops must block until the run method was called and exited.
// The run method must exit immediately.
@@ -1053,26 +946,24 @@ func TestScrapeLoopStopBeforeRun(t *testing.T) {
func nopMutator(l labels.Labels) labels.Labels { return l }
func TestScrapeLoopStop(t *testing.T) {
- var (
- signal = make(chan struct{}, 1)
- appender = &collectResultAppender{}
- scraper = &testScraper{}
- app = func(context.Context) storage.Appender { return appender }
- )
+ signal := make(chan struct{}, 1)
- // Since we're writing samples directly below we need to provide a protocol fallback.
- sl := newBasicScrapeLoopWithFallback(t, context.Background(), scraper, app, 10*time.Millisecond, "text/plain")
+ appTest := teststorage.NewAppendable()
+ sl, scraper := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl.appendable = appTest
+ // Since we're writing samples directly below we need to provide a protocol fallback.
+ sl.fallbackScrapeProtocol = "text/plain"
+ })
// Terminate loop after 2 scrapes.
numScrapes := 0
-
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
numScrapes++
if numScrapes == 2 {
go sl.stop()
<-sl.ctx.Done()
}
- w.Write([]byte("metric_a 42\n"))
+ _, _ = w.Write([]byte("metric_a 42\n"))
return ctx.Err()
}
@@ -1087,23 +978,24 @@ func TestScrapeLoopStop(t *testing.T) {
require.FailNow(t, "Scrape wasn't stopped.")
}
+ got := appTest.ResultSamples()
// We expected 1 actual sample for each scrape plus 5 for report samples.
// At least 2 scrapes were made, plus the final stale markers.
- require.GreaterOrEqual(t, len(appender.resultFloats), 6*3, "Expected at least 3 scrapes with 6 samples each.")
- require.Zero(t, len(appender.resultFloats)%6, "There is a scrape with missing samples.")
+ require.GreaterOrEqual(t, len(got), 6*3, "Expected at least 3 scrapes with 6 samples each.")
+ require.Zero(t, len(got)%6, "There is a scrape with missing samples.")
// All samples in a scrape must have the same timestamp.
var ts int64
- for i, s := range appender.resultFloats {
+ for i, s := range got {
switch {
case i%6 == 0:
- ts = s.t
- case s.t != ts:
+ ts = s.T
+ case s.T != ts:
t.Fatalf("Unexpected multiple timestamps within single scrape")
}
}
// All samples from the last scrape must be stale markers.
- for _, s := range appender.resultFloats[len(appender.resultFloats)-5:] {
- require.True(t, value.IsStaleNaN(s.f), "Appended last sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(s.f))
+ for _, s := range got[len(got)-5:] {
+ require.True(t, value.IsStaleNaN(s.V), "Appended last sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(s.V))
}
}
@@ -1112,45 +1004,10 @@ func TestScrapeLoopRun(t *testing.T) {
var (
signal = make(chan struct{}, 1)
errc = make(chan error)
-
- scraper = &testScraper{}
- app = func(context.Context) storage.Appender { return &nopAppender{} }
- scrapeMetrics = newTestScrapeMetrics(t)
- )
-
- ctx, cancel := context.WithCancel(context.Background())
- sl := newScrapeLoop(ctx,
- scraper,
- nil, nil,
- nopMutator,
- nopMutator,
- app,
- nil,
- nil,
- 0,
- true,
- false,
- true,
- 0, 0, histogram.ExponentialSchemaMax,
- nil,
- time.Second,
- time.Hour,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- nil,
- false,
- scrapeMetrics,
- false,
- model.UTF8Validation,
- model.NoEscaping,
- "",
)
+ ctx, cancel := context.WithCancel(t.Context())
+ sl, scraper := newTestScrapeLoop(t, withCtx(ctx))
// The loop must terminate during the initial offset if the context
// is canceled.
scraper.offsetDur = time.Hour
@@ -1172,24 +1029,26 @@ func TestScrapeLoopRun(t *testing.T) {
require.FailNow(t, "Unexpected error", "err: %s", err)
}
+ ctx, cancel = context.WithCancel(t.Context())
+ sl, scraper = newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl.ctx = ctx
+ sl.timeout = 100 * time.Millisecond
+ })
// The provided timeout must cause cancellation of the context passed down to the
// scraper. The scraper has to respect the context.
scraper.offsetDur = 0
- block := make(chan struct{})
+ blockCtx, blockCancel := context.WithCancel(t.Context())
scraper.scrapeFunc = func(ctx context.Context, _ io.Writer) error {
select {
- case <-block:
+ case <-blockCtx.Done():
+ cancel()
case <-ctx.Done():
return ctx.Err()
}
return nil
}
- ctx, cancel = context.WithCancel(context.Background())
- sl = newBasicScrapeLoop(t, ctx, scraper, app, time.Second)
- sl.timeout = 100 * time.Millisecond
-
go func() {
sl.run(errc)
signal <- struct{}{}
@@ -1205,9 +1064,7 @@ func TestScrapeLoopRun(t *testing.T) {
// We already caught the timeout error and are certainly in the loop.
// Let the scrapes returns immediately to cause no further timeout errors
// and check whether canceling the parent context terminates the loop.
- close(block)
- cancel()
-
+ blockCancel()
select {
case <-signal:
// Loop terminated as expected.
@@ -1222,13 +1079,10 @@ func TestScrapeLoopForcedErr(t *testing.T) {
var (
signal = make(chan struct{}, 1)
errc = make(chan error)
-
- scraper = &testScraper{}
- app = func(context.Context) storage.Appender { return &nopAppender{} }
)
- ctx, cancel := context.WithCancel(context.Background())
- sl := newBasicScrapeLoop(t, ctx, scraper, app, time.Second)
+ ctx, cancel := context.WithCancel(t.Context())
+ sl, scraper := newTestScrapeLoop(t, withCtx(ctx))
forcedErr := errors.New("forced err")
sl.setForcedError(forcedErr)
@@ -1258,51 +1112,47 @@ func TestScrapeLoopForcedErr(t *testing.T) {
}
}
-func TestScrapeLoopMetadata(t *testing.T) {
+func TestScrapeLoopRun_ContextCancelTerminatesBlockedSend(t *testing.T) {
+ // Regression test for issue #17553
+ defer goleak.VerifyNone(t)
+
var (
- signal = make(chan struct{})
- scraper = &testScraper{}
- scrapeMetrics = newTestScrapeMetrics(t)
- cache = newScrapeCache(scrapeMetrics)
+ signal = make(chan struct{})
+ errc = make(chan error)
)
- defer close(signal)
- ctx, cancel := context.WithCancel(context.Background())
- sl := newScrapeLoop(ctx,
- scraper,
- nil, nil,
- nopMutator,
- nopMutator,
- func(context.Context) storage.Appender { return nopAppender{} },
- cache,
- labels.NewSymbolTable(),
- 0,
- true,
- false,
- true,
- 0, 0, histogram.ExponentialSchemaMax,
- nil,
- 0,
- 0,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- nil,
- false,
- scrapeMetrics,
- false,
- model.UTF8Validation,
- model.NoEscaping,
- "",
- )
- defer cancel()
+ ctx, cancel := context.WithCancel(t.Context())
+ sl, scraper := newTestScrapeLoop(t, withCtx(ctx))
- slApp := sl.appender(ctx)
- total, _, _, err := sl.append(slApp, []byte(`# TYPE test_metric counter
+ forcedErr := errors.New("forced err")
+ sl.setForcedError(forcedErr)
+
+ scraper.scrapeFunc = func(context.Context, io.Writer) error {
+ return nil
+ }
+
+ go func() {
+ sl.run(errc)
+ close(signal)
+ }()
+
+ time.Sleep(50 * time.Millisecond)
+
+ cancel()
+
+ select {
+ case <-signal:
+ // success case
+ case <-time.After(3 * time.Second):
+ require.FailNow(t, "Scrape loop failed to exit on context cancellation (goroutine leak detected)")
+ }
+}
+
+func TestScrapeLoopMetadata(t *testing.T) {
+ sl, _ := newTestScrapeLoop(t)
+
+ app := sl.appender()
+ total, _, _, err := app.append([]byte(`# TYPE test_metric counter
# HELP test_metric some help text
# UNIT test_metric metric
test_metric_total 1
@@ -1310,54 +1160,42 @@ test_metric_total 1
# HELP test_metric_no_type other help text
# EOF`), "application/openmetrics-text", time.Now())
require.NoError(t, err)
- require.NoError(t, slApp.Commit())
+ require.NoError(t, app.Commit())
require.Equal(t, 1, total)
- md, ok := cache.GetMetadata("test_metric")
+ md, ok := sl.cache.GetMetadata("test_metric")
require.True(t, ok, "expected metadata to be present")
require.Equal(t, model.MetricTypeCounter, md.Type, "unexpected metric type")
require.Equal(t, "some help text", md.Help)
require.Equal(t, "metric", md.Unit)
- md, ok = cache.GetMetadata("test_metric_no_help")
+ md, ok = sl.cache.GetMetadata("test_metric_no_help")
require.True(t, ok, "expected metadata to be present")
require.Equal(t, model.MetricTypeGauge, md.Type, "unexpected metric type")
require.Empty(t, md.Help)
require.Empty(t, md.Unit)
- md, ok = cache.GetMetadata("test_metric_no_type")
+ md, ok = sl.cache.GetMetadata("test_metric_no_type")
require.True(t, ok, "expected metadata to be present")
require.Equal(t, model.MetricTypeUnknown, md.Type, "unexpected metric type")
require.Equal(t, "other help text", md.Help)
require.Empty(t, md.Unit)
}
-func simpleTestScrapeLoop(t testing.TB) (context.Context, *scrapeLoop) {
- // Need a full storage for correct Add/AddFast semantics.
- s := teststorage.New(t)
- t.Cleanup(func() { s.Close() })
-
- ctx, cancel := context.WithCancel(context.Background())
- sl := newBasicScrapeLoop(t, ctx, &testScraper{}, s.Appender, 0)
- t.Cleanup(func() { cancel() })
-
- return ctx, sl
-}
-
func TestScrapeLoopSeriesAdded(t *testing.T) {
- ctx, sl := simpleTestScrapeLoop(t)
+ sl, _ := newTestScrapeLoop(t)
- slApp := sl.appender(ctx)
- total, added, seriesAdded, err := sl.append(slApp, []byte("test_metric 1\n"), "text/plain", time.Time{})
+ app := sl.appender()
+ total, added, seriesAdded, err := app.append([]byte("test_metric 1\n"), "text/plain", time.Time{})
require.NoError(t, err)
- require.NoError(t, slApp.Commit())
+ require.NoError(t, app.Commit())
require.Equal(t, 1, total)
require.Equal(t, 1, added)
require.Equal(t, 1, seriesAdded)
- slApp = sl.appender(ctx)
- total, added, seriesAdded, err = sl.append(slApp, []byte("test_metric 1\n"), "text/plain", time.Time{})
- require.NoError(t, slApp.Commit())
+ app = sl.appender()
+ total, added, seriesAdded, err = app.append([]byte("test_metric 1\n"), "text/plain", time.Time{})
+ require.NoError(t, app.Commit())
require.NoError(t, err)
require.Equal(t, 1, total)
require.Equal(t, 1, added)
@@ -1365,10 +1203,6 @@ func TestScrapeLoopSeriesAdded(t *testing.T) {
}
func TestScrapeLoopFailWithInvalidLabelsAfterRelabel(t *testing.T) {
- s := teststorage.New(t)
- defer s.Close()
- ctx := t.Context()
-
target := &Target{
labels: labels.FromStrings("pod_label_invalid_012\xff", "test"),
}
@@ -1379,43 +1213,41 @@ func TestScrapeLoopFailWithInvalidLabelsAfterRelabel(t *testing.T) {
Replacement: "$1",
NameValidationScheme: model.UTF8Validation,
}}
- sl := newBasicScrapeLoop(t, ctx, &testScraper{}, s.Appender, 0)
- sl.sampleMutator = func(l labels.Labels) labels.Labels {
- return mutateSampleLabels(l, target, true, relabelConfig)
- }
+ sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl.sampleMutator = func(l labels.Labels) labels.Labels {
+ return mutateSampleLabels(l, target, true, relabelConfig)
+ }
+ })
- slApp := sl.appender(ctx)
- total, added, seriesAdded, err := sl.append(slApp, []byte("test_metric 1\n"), "text/plain", time.Time{})
+ app := sl.appender()
+ total, added, seriesAdded, err := app.append([]byte("test_metric 1\n"), "text/plain", time.Time{})
require.ErrorContains(t, err, "invalid metric name or label names")
- require.NoError(t, slApp.Rollback())
+ require.NoError(t, app.Rollback())
require.Equal(t, 1, total)
require.Equal(t, 0, added)
require.Equal(t, 0, seriesAdded)
}
func TestScrapeLoopFailLegacyUnderUTF8(t *testing.T) {
- // Test that scrapes fail when default validation is utf8 but scrape config is
- // legacy.
- s := teststorage.New(t)
- defer s.Close()
- ctx := t.Context()
+ sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl.validationScheme = model.LegacyValidation
+ })
- sl := newBasicScrapeLoop(t, ctx, &testScraper{}, s.Appender, 0)
- sl.validationScheme = model.LegacyValidation
-
- slApp := sl.appender(ctx)
- total, added, seriesAdded, err := sl.append(slApp, []byte("{\"test.metric\"} 1\n"), "text/plain", time.Time{})
+ app := sl.appender()
+ total, added, seriesAdded, err := app.append([]byte("{\"test.metric\"} 1\n"), "text/plain", time.Time{})
require.ErrorContains(t, err, "invalid metric name or label names")
- require.NoError(t, slApp.Rollback())
+ require.NoError(t, app.Rollback())
require.Equal(t, 1, total)
require.Equal(t, 0, added)
require.Equal(t, 0, seriesAdded)
// When scrapeloop has validation set to UTF-8, the metric is allowed.
- sl.validationScheme = model.UTF8Validation
+ sl, _ = newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl.validationScheme = model.UTF8Validation
+ })
- slApp = sl.appender(ctx)
- total, added, seriesAdded, err = sl.append(slApp, []byte("{\"test.metric\"} 1\n"), "text/plain", time.Time{})
+ app = sl.appender()
+ total, added, seriesAdded, err = app.append([]byte("{\"test.metric\"} 1\n"), "text/plain", time.Time{})
require.NoError(t, err)
require.Equal(t, 1, total)
require.Equal(t, 1, added)
@@ -1434,12 +1266,12 @@ func readTextParseTestMetrics(t testing.TB) []byte {
func makeTestGauges(n int) []byte {
sb := bytes.Buffer{}
- fmt.Fprintf(&sb, "# TYPE metric_a gauge\n")
- fmt.Fprintf(&sb, "# HELP metric_a help text\n")
+ sb.WriteString("# TYPE metric_a gauge\n")
+ sb.WriteString("# HELP metric_a help text\n")
for i := range n {
- fmt.Fprintf(&sb, "metric_a{foo=\"%d\",bar=\"%d\"} 1\n", i, i*100)
+ _, _ = fmt.Fprintf(&sb, "metric_a{foo=\"%d\",bar=\"%d\"} 1\n", i, i*100)
}
- fmt.Fprintf(&sb, "# EOF\n")
+ sb.WriteString("# EOF\n")
return sb.Bytes()
}
@@ -1510,7 +1342,7 @@ func TestPromTextToProto(t *testing.T) {
//
// Recommended CLI invocation:
/*
- export bench=append-v1 && go test ./scrape/... \
+ export bench=append && go test ./scrape/... \
-run '^$' -bench '^BenchmarkScrapeLoopAppend' \
-benchtime 5s -count 6 -cpu 2 -timeout 999m \
| tee ${bench}.txt
@@ -1536,16 +1368,19 @@ func BenchmarkScrapeLoopAppend(b *testing.B) {
{name: "PromProto", contentType: "application/vnd.google.protobuf", parsable: metricsProto},
} {
b.Run(fmt.Sprintf("fmt=%v", bcase.name), func(b *testing.B) {
- ctx, sl := simpleTestScrapeLoop(b)
+ // Need a full storage for correct Add/AddFast semantics.
+ s := teststorage.New(b)
+ b.Cleanup(func() { _ = s.Close() })
- slApp := sl.appender(ctx)
+ sl, _ := newTestScrapeLoop(b, withAppendable(s))
+ app := sl.appender()
ts := time.Time{}
b.ReportAllocs()
b.ResetTimer()
for b.Loop() {
ts = ts.Add(time.Second)
- _, _, _, err := sl.append(slApp, bcase.parsable, bcase.contentType, ts)
+ _, _, _, err := app.append(bcase.parsable, bcase.contentType, ts)
if err != nil {
b.Fatal(err)
}
@@ -1556,30 +1391,85 @@ func BenchmarkScrapeLoopAppend(b *testing.B) {
}
}
+func TestScrapeLoopScrapeAndReport(t *testing.T) {
+ parsableText := readTextParseTestMetrics(t)
+ // On windows \r is added when reading, but parsers do not support this. Kill it.
+ parsableText = bytes.ReplaceAll(parsableText, []byte("\r"), nil)
+
+ appTest := teststorage.NewAppendable()
+ sl, scraper := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl.appendable = appTest
+ sl.fallbackScrapeProtocol = "application/openmetrics-text"
+ })
+ scraper.scrapeFunc = func(_ context.Context, writer io.Writer) error {
+ _, err := writer.Write(parsableText)
+ return err
+ }
+
+ ts := time.Time{}
+
+ sl.scrapeAndReport(time.Time{}, ts, nil)
+ require.NoError(t, scraper.lastError)
+
+ require.Len(t, appTest.ResultSamples(), 1862)
+ require.Len(t, appTest.ResultMetadata(), 1862)
+}
+
+// Recommended CLI invocation:
+/*
+ export bench=scrapeAndReport && go test ./scrape/... \
+ -run '^$' -bench '^BenchmarkScrapeLoopScrapeAndReport' \
+ -benchtime 5s -count 6 -cpu 2 -timeout 999m \
+ | tee ${bench}.txt
+*/
+func BenchmarkScrapeLoopScrapeAndReport(b *testing.B) {
+ parsableText := readTextParseTestMetrics(b)
+
+ s := teststorage.New(b)
+ b.Cleanup(func() { _ = s.Close() })
+
+ sl, scraper := newTestScrapeLoop(b, func(sl *scrapeLoop) {
+ sl.appendable = s
+ sl.fallbackScrapeProtocol = "application/openmetrics-text"
+ })
+ scraper.scrapeFunc = func(_ context.Context, writer io.Writer) error {
+ _, err := writer.Write(parsableText)
+ return err
+ }
+
+ ts := time.Time{}
+
+ b.ReportAllocs()
+ b.ResetTimer()
+ for b.Loop() {
+ ts = ts.Add(time.Second)
+ sl.scrapeAndReport(time.Time{}, ts, nil)
+ require.NoError(b, scraper.lastError)
+ }
+}
+
func TestSetOptionsHandlingStaleness(t *testing.T) {
s := teststorage.New(t, 600000)
- defer s.Close()
+ t.Cleanup(func() { _ = s.Close() })
signal := make(chan struct{}, 1)
- ctx, cancel := context.WithCancel(context.Background())
+ ctx, cancel := context.WithCancel(t.Context())
defer cancel()
// Function to run the scrape loop
runScrapeLoop := func(ctx context.Context, t *testing.T, cue int, action func(*scrapeLoop)) {
- var (
- scraper = &testScraper{}
- app = func(ctx context.Context) storage.Appender {
- return s.Appender(ctx)
- }
- )
- sl := newBasicScrapeLoop(t, ctx, scraper, app, 10*time.Millisecond)
+ sl, scraper := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl.ctx = ctx
+ sl.appendable = s
+ })
+
numScrapes := 0
scraper.scrapeFunc = func(_ context.Context, w io.Writer) error {
numScrapes++
if numScrapes == cue {
action(sl)
}
- fmt.Fprintf(w, "metric_a{a=\"1\",b=\"1\"} %d\n", 42+numScrapes)
+ _, _ = fmt.Fprintf(w, "metric_a{a=\"1\",b=\"1\"} %d\n", 42+numScrapes)
return nil
}
sl.run(nil)
@@ -1604,25 +1494,25 @@ func TestSetOptionsHandlingStaleness(t *testing.T) {
t.Fatalf("Scrape wasn't stopped.")
}
- ctx1, cancel := context.WithCancel(context.Background())
+ ctx1, cancel := context.WithCancel(t.Context())
defer cancel()
q, err := s.Querier(0, time.Now().UnixNano())
require.NoError(t, err)
- defer q.Close()
+ t.Cleanup(func() { _ = q.Close() })
series := q.Select(ctx1, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", "metric_a"))
- var results []floatSample
+ var results []sample
for series.Next() {
it := series.At().Iterator(nil)
for it.Next() == chunkenc.ValFloat {
t, v := it.At()
- results = append(results, floatSample{
- metric: series.At().Labels(),
- t: t,
- f: v,
+ results = append(results, sample{
+ L: series.At().Labels(),
+ T: t,
+ V: v,
})
}
require.NoError(t, it.Err())
@@ -1630,7 +1520,7 @@ func TestSetOptionsHandlingStaleness(t *testing.T) {
require.NoError(t, series.Err())
var c int
for _, s := range results {
- if value.IsStaleNaN(s.f) {
+ if value.IsStaleNaN(s.V) {
c++
}
}
@@ -1638,25 +1528,25 @@ func TestSetOptionsHandlingStaleness(t *testing.T) {
}
func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) {
- appender := &collectResultAppender{}
- var (
- signal = make(chan struct{}, 1)
- scraper = &testScraper{}
- app = func(context.Context) storage.Appender { return appender }
- )
+ signal := make(chan struct{}, 1)
+
+ ctx, cancel := context.WithCancel(t.Context())
+ appTest := teststorage.NewAppendable()
+ sl, scraper := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl.ctx = ctx
+ sl.appendable = appTest
+ // Since we're writing samples directly below we need to provide a protocol fallback.
+ sl.fallbackScrapeProtocol = "text/plain"
+ })
- ctx, cancel := context.WithCancel(context.Background())
- // Since we're writing samples directly below we need to provide a protocol fallback.
- sl := newBasicScrapeLoopWithFallback(t, ctx, scraper, app, 10*time.Millisecond, "text/plain")
// Succeed once, several failures, then stop.
numScrapes := 0
-
scraper.scrapeFunc = func(_ context.Context, w io.Writer) error {
numScrapes++
switch numScrapes {
case 1:
- w.Write([]byte("metric_a 42\n"))
+ _, _ = w.Write([]byte("metric_a 42\n"))
return nil
case 5:
cancel()
@@ -1675,36 +1565,39 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) {
require.FailNow(t, "Scrape wasn't stopped.")
}
- // 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for
- // each scrape successful or not.
- require.Len(t, appender.resultFloats, 27, "Appended samples not as expected:\n%s", appender)
- require.Equal(t, 42.0, appender.resultFloats[0].f, "Appended first sample not as expected")
- require.True(t, value.IsStaleNaN(appender.resultFloats[6].f),
- "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.resultFloats[6].f))
+ got := appTest.ResultSamples()
+ // 1 successfully scraped sample
+ // 1 stale marker after first fail
+ // 5x 5 report samples for each scrape successful or not.
+ require.Len(t, got, 27, "Appended samples not as expected:\n%s", appTest)
+ require.Equal(t, 42.0, got[0].V, "Appended first sample not as expected")
+ require.True(t, value.IsStaleNaN(got[6].V),
+ "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(got[6].V))
}
func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) {
- appender := &collectResultAppender{}
- var (
- signal = make(chan struct{}, 1)
- scraper = &testScraper{}
- app = func(context.Context) storage.Appender { return appender }
- numScrapes = 0
- )
+ signal := make(chan struct{}, 1)
- ctx, cancel := context.WithCancel(context.Background())
- // Since we're writing samples directly below we need to provide a protocol fallback.
- sl := newBasicScrapeLoopWithFallback(t, ctx, scraper, app, 10*time.Millisecond, "text/plain")
+ ctx, cancel := context.WithCancel(t.Context())
+ appTest := teststorage.NewAppendable()
+ sl, scraper := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl.ctx = ctx
+ sl.appendable = appTest
+ // Since we're writing samples directly below we need to provide a protocol fallback.
+ sl.fallbackScrapeProtocol = "text/plain"
+ })
// Succeed once, several failures, then stop.
+ numScrapes := 0
scraper.scrapeFunc = func(_ context.Context, w io.Writer) error {
numScrapes++
+
switch numScrapes {
case 1:
- w.Write([]byte("metric_a 42\n"))
+ _, _ = w.Write([]byte("metric_a 42\n"))
return nil
case 2:
- w.Write([]byte("7&-\n"))
+ _, _ = w.Write([]byte("7&-\n"))
return nil
case 3:
cancel()
@@ -1719,46 +1612,49 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) {
select {
case <-signal:
+ // TODO(bwplotka): Prone to flakiness, depend on atomic numScrapes.
case <-time.After(5 * time.Second):
require.FailNow(t, "Scrape wasn't stopped.")
}
- // 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for
- // each scrape successful or not.
- require.Len(t, appender.resultFloats, 17, "Appended samples not as expected:\n%s", appender)
- require.Equal(t, 42.0, appender.resultFloats[0].f, "Appended first sample not as expected")
- require.True(t, value.IsStaleNaN(appender.resultFloats[6].f),
- "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.resultFloats[6].f))
+ got := appTest.ResultSamples()
+ // 1 successfully scraped sample
+ // 1 stale marker after first fail
+ // 3x 5 report samples for each scrape successful or not.
+ require.Len(t, got, 17, "Appended samples not as expected:\n%s", appTest)
+ require.Equal(t, 42.0, got[0].V, "Appended first sample not as expected")
+ require.True(t, value.IsStaleNaN(got[6].V),
+ "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(got[6].V))
}
-// If we have a target with sample_limit set and scrape initially works but then we hit the sample_limit error,
+// If we have a target with sample_limit set and scrape initially works, but then we hit the sample_limit error,
// then we don't expect to see any StaleNaNs appended for the series that disappeared due to sample_limit error.
func TestScrapeLoopRunCreatesStaleMarkersOnSampleLimit(t *testing.T) {
- appender := &collectResultAppender{}
- var (
- signal = make(chan struct{}, 1)
- scraper = &testScraper{}
- app = func(_ context.Context) storage.Appender { return appender }
- numScrapes = 0
- )
+ signal := make(chan struct{}, 1)
- ctx, cancel := context.WithCancel(context.Background())
- // Since we're writing samples directly below we need to provide a protocol fallback.
- sl := newBasicScrapeLoopWithFallback(t, ctx, scraper, app, 10*time.Millisecond, "text/plain")
- sl.sampleLimit = 4
+ ctx, cancel := context.WithCancel(t.Context())
+ appTest := teststorage.NewAppendable()
+ sl, scraper := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl.ctx = ctx
+ sl.appendable = appTest
+ // Since we're writing samples directly below we need to provide a protocol fallback.
+ sl.fallbackScrapeProtocol = "text/plain"
+ sl.sampleLimit = 4
+ })
// Succeed once, several failures, then stop.
+ numScrapes := 0
scraper.scrapeFunc = func(_ context.Context, w io.Writer) error {
numScrapes++
switch numScrapes {
case 1:
- w.Write([]byte("metric_a 10\nmetric_b 10\nmetric_c 10\nmetric_d 10\n"))
+ _, _ = w.Write([]byte("metric_a 10\nmetric_b 10\nmetric_c 10\nmetric_d 10\n"))
return nil
case 2:
- w.Write([]byte("metric_a 20\nmetric_b 20\nmetric_c 20\nmetric_d 20\nmetric_e 999\n"))
+ _, _ = w.Write([]byte("metric_a 20\nmetric_b 20\nmetric_c 20\nmetric_d 20\nmetric_e 999\n"))
return nil
case 3:
- w.Write([]byte("metric_a 30\nmetric_b 30\nmetric_c 30\nmetric_d 30\n"))
+ _, _ = w.Write([]byte("metric_a 30\nmetric_b 30\nmetric_c 30\nmetric_d 30\n"))
return nil
case 4:
cancel()
@@ -1777,49 +1673,52 @@ func TestScrapeLoopRunCreatesStaleMarkersOnSampleLimit(t *testing.T) {
require.FailNow(t, "Scrape wasn't stopped.")
}
+ got := appTest.ResultSamples()
+
// 4 scrapes in total:
// #1 - success - 4 samples appended + 5 report series
// #2 - sample_limit exceeded - no samples appended, only 5 report series
// #3 - success - 4 samples appended + 5 report series
// #4 - scrape canceled - 4 StaleNaNs appended because of scrape error + 5 report series
- require.Len(t, appender.resultFloats, (4+5)+5+(4+5)+(4+5), "Appended samples not as expected:\n%s", appender)
+ require.Len(t, got, (4+5)+5+(4+5)+(4+5), "Appended samples not as expected:\n%s", appTest)
// Expect first 4 samples to be metric_X [0-3].
for i := range 4 {
- require.Equal(t, 10.0, appender.resultFloats[i].f, "Appended %d sample not as expected", i)
+ require.Equal(t, 10.0, got[i].V, "Appended %d sample not as expected", i)
}
// Next 5 samples are report series [4-8].
// Next 5 samples are report series for the second scrape [9-13].
// Expect first 4 samples to be metric_X from the third scrape [14-17].
for i := 14; i <= 17; i++ {
- require.Equal(t, 30.0, appender.resultFloats[i].f, "Appended %d sample not as expected", i)
+ require.Equal(t, 30.0, got[i].V, "Appended %d sample not as expected", i)
}
// Next 5 samples are report series [18-22].
// Next 5 samples are report series [23-26].
for i := 23; i <= 26; i++ {
- require.True(t, value.IsStaleNaN(appender.resultFloats[i].f),
- "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.resultFloats[i].f))
+ require.True(t, value.IsStaleNaN(got[i].V),
+ "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(got[i].V))
}
}
func TestScrapeLoopCache(t *testing.T) {
s := teststorage.New(t)
- defer s.Close()
+ t.Cleanup(func() { _ = s.Close() })
- appender := &collectResultAppender{}
- var (
- signal = make(chan struct{}, 1)
- scraper = &testScraper{}
- app = func(ctx context.Context) storage.Appender { appender.next = s.Appender(ctx); return appender }
- )
+ signal := make(chan struct{}, 1)
- ctx, cancel := context.WithCancel(context.Background())
- // Decreasing the scrape interval could make the test fail, as multiple scrapes might be initiated at identical millisecond timestamps.
- // See https://github.com/prometheus/prometheus/issues/12727.
- // Since we're writing samples directly below we need to provide a protocol fallback.
- sl := newBasicScrapeLoopWithFallback(t, ctx, scraper, app, 100*time.Millisecond, "text/plain")
+ ctx, cancel := context.WithCancel(t.Context())
+ appTest := teststorage.NewAppendable().Then(s)
+ sl, scraper := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl.ctx = ctx
+ sl.l = promslog.New(&promslog.Config{})
+ sl.appendable = appTest
+ // Since we're writing samples directly below we need to provide a protocol fallback.
+ sl.fallbackScrapeProtocol = "text/plain"
+ // Decreasing the scrape interval could make the test fail, as multiple scrapes might be initiated at identical millisecond timestamps.
+ // See https://github.com/prometheus/prometheus/issues/12727.
+ sl.interval = 100 * time.Millisecond
+ })
numScrapes := 0
-
scraper.scrapeFunc = func(_ context.Context, w io.Writer) error {
switch numScrapes {
case 1, 2:
@@ -1837,10 +1736,10 @@ func TestScrapeLoopCache(t *testing.T) {
numScrapes++
switch numScrapes {
case 1:
- w.Write([]byte("metric_a 42\nmetric_b 43\n"))
+ _, _ = w.Write([]byte("metric_a 42\nmetric_b 43\n"))
return nil
case 3:
- w.Write([]byte("metric_a 44\n"))
+ _, _ = w.Write([]byte("metric_a 44\n"))
return nil
case 4:
cancel()
@@ -1859,29 +1758,23 @@ func TestScrapeLoopCache(t *testing.T) {
require.FailNow(t, "Scrape wasn't stopped.")
}
- // 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for
- // each scrape successful or not.
- require.Len(t, appender.resultFloats, 26, "Appended samples not as expected:\n%s", appender)
+ // 3 successfully scraped samples
+ // 3 stale marker after samples were missing.
+ // 4x 5 report samples for each scrape successful or not.
+ require.Len(t, appTest.ResultSamples(), 26, "Appended samples not as expected:\n%s", appTest)
}
func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) {
s := teststorage.New(t)
- defer s.Close()
+ t.Cleanup(func() { _ = s.Close() })
- sapp := s.Appender(context.Background())
-
- appender := &collectResultAppender{next: sapp}
- var (
- signal = make(chan struct{}, 1)
- scraper = &testScraper{}
- app = func(context.Context) storage.Appender { return appender }
- )
-
- ctx, cancel := context.WithCancel(context.Background())
- sl := newBasicScrapeLoop(t, ctx, scraper, app, 10*time.Millisecond)
+ signal := make(chan struct{}, 1)
+ ctx, cancel := context.WithCancel(t.Context())
+ sl, scraper := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl.ctx = ctx
+ })
numScrapes := 0
-
scraper.scrapeFunc = func(_ context.Context, w io.Writer) error {
numScrapes++
if numScrapes < 5 {
@@ -1889,7 +1782,7 @@ func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) {
for i := range 500 {
s = fmt.Sprintf("%smetric_%d_%d 42\n", s, i, numScrapes)
}
- w.Write([]byte(s + "&"))
+ _, _ = w.Write([]byte(s + "&"))
} else {
cancel()
}
@@ -1964,37 +1857,38 @@ func TestScrapeLoopAppend(t *testing.T) {
}
for _, test := range tests {
- app := &collectResultAppender{}
-
discoveryLabels := &Target{
labels: labels.FromStrings(test.discoveryLabels...),
}
- sl := newBasicScrapeLoop(t, context.Background(), nil, func(context.Context) storage.Appender { return app }, 0)
- sl.sampleMutator = func(l labels.Labels) labels.Labels {
- return mutateSampleLabels(l, discoveryLabels, test.honorLabels, nil)
- }
- sl.reportSampleMutator = func(l labels.Labels) labels.Labels {
- return mutateReportSampleLabels(l, discoveryLabels)
- }
+ appTest := teststorage.NewAppendable()
+ sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl.appendable = appTest
+ sl.sampleMutator = func(l labels.Labels) labels.Labels {
+ return mutateSampleLabels(l, discoveryLabels, test.honorLabels, nil)
+ }
+ sl.reportSampleMutator = func(l labels.Labels) labels.Labels {
+ return mutateReportSampleLabels(l, discoveryLabels)
+ }
+ })
now := time.Now()
- slApp := sl.appender(context.Background())
- _, _, _, err := sl.append(slApp, []byte(test.scrapeLabels), "text/plain", now)
+ app := sl.appender()
+ _, _, _, err := app.append([]byte(test.scrapeLabels), "text/plain", now)
require.NoError(t, err)
- require.NoError(t, slApp.Commit())
+ require.NoError(t, app.Commit())
- expected := []floatSample{
+ expected := []sample{
{
- metric: test.expLset,
- t: timestamp.FromTime(now),
- f: test.expValue,
+ L: test.expLset,
+ T: timestamp.FromTime(now),
+ V: test.expValue,
},
}
t.Logf("Test:%s", test.title)
- requireEqual(t, expected, app.resultFloats)
+ requireEqual(t, expected, appTest.ResultSamples())
}
}
@@ -2002,13 +1896,12 @@ func requireEqual(t *testing.T, expected, actual any, msgAndArgs ...any) {
t.Helper()
testutil.RequireEqualWithOptions(t, expected, actual,
[]cmp.Option{
- cmp.Comparer(equalFloatSamples),
- cmp.AllowUnexported(histogramSample{}),
+ cmp.Comparer(func(a, b sample) bool { return a.Equals(b) }),
// StaleNaN samples are generated by iterating over a map, which means that the order
// of samples might be different on every test run. Sort series by label to avoid
// test failures because of that.
- cmpopts.SortSlices(func(a, b floatSample) int {
- return labels.Compare(a.metric, b.metric)
+ cmpopts.SortSlices(func(a, b sample) int {
+ return labels.Compare(a.L, b.L)
}),
},
msgAndArgs...)
@@ -2066,32 +1959,34 @@ func TestScrapeLoopAppendForConflictingPrefixedLabels(t *testing.T) {
for name, tc := range testcases {
t.Run(name, func(t *testing.T) {
- app := &collectResultAppender{}
- sl := newBasicScrapeLoop(t, context.Background(), nil, func(context.Context) storage.Appender { return app }, 0)
- sl.sampleMutator = func(l labels.Labels) labels.Labels {
- return mutateSampleLabels(l, &Target{labels: labels.FromStrings(tc.targetLabels...)}, false, nil)
- }
- slApp := sl.appender(context.Background())
- _, _, _, err := sl.append(slApp, []byte(tc.exposedLabels), "text/plain", time.Date(2000, 1, 1, 1, 0, 0, 0, time.UTC))
+ appTest := teststorage.NewAppendable()
+ sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl.appendable = appTest
+ sl.sampleMutator = func(l labels.Labels) labels.Labels {
+ return mutateSampleLabels(l, &Target{labels: labels.FromStrings(tc.targetLabels...)}, false, nil)
+ }
+ })
+
+ app := sl.appender()
+ _, _, _, err := app.append([]byte(tc.exposedLabels), "text/plain", time.Date(2000, 1, 1, 1, 0, 0, 0, time.UTC))
require.NoError(t, err)
- require.NoError(t, slApp.Commit())
+ require.NoError(t, app.Commit())
- requireEqual(t, []floatSample{
+ requireEqual(t, []sample{
{
- metric: labels.FromStrings(tc.expected...),
- t: timestamp.FromTime(time.Date(2000, 1, 1, 1, 0, 0, 0, time.UTC)),
- f: 0,
+ L: labels.FromStrings(tc.expected...),
+ T: timestamp.FromTime(time.Date(2000, 1, 1, 1, 0, 0, 0, time.UTC)),
+ V: 0,
},
- }, app.resultFloats)
+ }, appTest.ResultSamples())
})
}
}
func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) {
- // collectResultAppender's AddFast always returns ErrNotFound if we don't give it a next.
- app := &collectResultAppender{}
- sl := newBasicScrapeLoop(t, context.Background(), nil, func(context.Context) storage.Appender { return app }, 0)
+ appTest := teststorage.NewAppendable()
+ sl, _ := newTestScrapeLoop(t, withAppendable(appTest))
fakeRef := storage.SeriesRef(1)
expValue := float64(1)
@@ -2101,7 +1996,8 @@ func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) {
require.NoError(t, warning)
var lset labels.Labels
- p.Next()
+ _, err := p.Next()
+ require.NoError(t, err)
p.Labels(&lset)
hash := lset.Hash()
@@ -2109,36 +2005,43 @@ func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) {
sl.cache.addRef(metric, fakeRef, lset, hash)
now := time.Now()
- slApp := sl.appender(context.Background())
- _, _, _, err := sl.append(slApp, metric, "text/plain", now)
+ app := sl.appender()
+ _, _, _, err = app.append(metric, "text/plain", now)
require.NoError(t, err)
- require.NoError(t, slApp.Commit())
+ require.NoError(t, app.Commit())
- expected := []floatSample{
+ expected := []sample{
{
- metric: lset,
- t: timestamp.FromTime(now),
- f: expValue,
+ L: lset,
+ T: timestamp.FromTime(now),
+ V: expValue,
},
}
- require.Equal(t, expected, app.resultFloats)
+ require.Equal(t, expected, appTest.ResultSamples())
}
+type appendableFunc func(ctx context.Context) storage.Appender
+
+func (a appendableFunc) Appender(ctx context.Context) storage.Appender { return a(ctx) }
+
func TestScrapeLoopAppendSampleLimit(t *testing.T) {
- resApp := &collectResultAppender{}
- app := &limitAppender{Appender: resApp, limit: 1}
-
- sl := newBasicScrapeLoop(t, context.Background(), nil, func(context.Context) storage.Appender { return app }, 0)
- sl.sampleMutator = func(l labels.Labels) labels.Labels {
- if l.Has("deleteme") {
- return labels.EmptyLabels()
+ appTest := teststorage.NewAppendable()
+ sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl.appendable = appendableFunc(func(ctx context.Context) storage.Appender {
+ // Chain appTest to verify what samples passed through.
+ return &limitAppender{Appender: appTest.Appender(ctx), limit: 1}
+ })
+ sl.sampleMutator = func(l labels.Labels) labels.Labels {
+ if l.Has("deleteme") {
+ return labels.EmptyLabels()
+ }
+ return l
}
- return l
- }
- sl.sampleLimit = app.limit
+ sl.sampleLimit = 1 // Same as limitAppender.limit
+ })
- // Get the value of the Counter before performing the append.
+ // Get the value of the Counter before performing append.
beforeMetric := dto.Metric{}
err := sl.metrics.targetScrapeSampleLimit.Write(&beforeMetric)
require.NoError(t, err)
@@ -2146,10 +2049,10 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) {
beforeMetricValue := beforeMetric.GetCounter().GetValue()
now := time.Now()
- slApp := sl.appender(context.Background())
- total, added, seriesAdded, err := sl.append(app, []byte("metric_a 1\nmetric_b 1\nmetric_c 1\n"), "text/plain", now)
+ app := sl.appender()
+ total, added, seriesAdded, err := app.append([]byte("metric_a 1\nmetric_b 1\nmetric_c 1\n"), "text/plain", now)
require.ErrorIs(t, err, errSampleLimit)
- require.NoError(t, slApp.Rollback())
+ require.NoError(t, app.Rollback())
require.Equal(t, 3, total)
require.Equal(t, 3, added)
require.Equal(t, 1, seriesAdded)
@@ -2160,42 +2063,44 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) {
err = sl.metrics.targetScrapeSampleLimit.Write(&metric)
require.NoError(t, err)
- value := metric.GetCounter().GetValue()
- change := value - beforeMetricValue
+ v := metric.GetCounter().GetValue()
+ change := v - beforeMetricValue
require.Equal(t, 1.0, change, "Unexpected change of sample limit metric: %f", change)
// And verify that we got the samples that fit under the limit.
- want := []floatSample{
+ want := []sample{
{
- metric: labels.FromStrings(model.MetricNameLabel, "metric_a"),
- t: timestamp.FromTime(now),
- f: 1,
+ L: labels.FromStrings(model.MetricNameLabel, "metric_a"),
+ T: timestamp.FromTime(now),
+ V: 1,
},
}
- requireEqual(t, want, resApp.rolledbackFloats, "Appended samples not as expected:\n%s", appender)
+ requireEqual(t, want, appTest.RolledbackSamples(), "Appended samples not as expected:\n%s", appTest)
now = time.Now()
- slApp = sl.appender(context.Background())
- total, added, seriesAdded, err = sl.append(slApp, []byte("metric_a 1\nmetric_b 1\nmetric_c{deleteme=\"yes\"} 1\nmetric_d 1\nmetric_e 1\nmetric_f 1\nmetric_g 1\nmetric_h{deleteme=\"yes\"} 1\nmetric_i{deleteme=\"yes\"} 1\n"), "text/plain", now)
+ app = sl.appender()
+ total, added, seriesAdded, err = app.append([]byte("metric_a 1\nmetric_b 1\nmetric_c{deleteme=\"yes\"} 1\nmetric_d 1\nmetric_e 1\nmetric_f 1\nmetric_g 1\nmetric_h{deleteme=\"yes\"} 1\nmetric_i{deleteme=\"yes\"} 1\n"), "text/plain", now)
require.ErrorIs(t, err, errSampleLimit)
- require.NoError(t, slApp.Rollback())
+ require.NoError(t, app.Rollback())
require.Equal(t, 9, total)
require.Equal(t, 6, added)
- require.Equal(t, 0, seriesAdded)
+ require.Equal(t, 1, seriesAdded)
}
func TestScrapeLoop_HistogramBucketLimit(t *testing.T) {
- resApp := &collectResultAppender{}
- app := &bucketLimitAppender{Appender: resApp, limit: 2}
-
- sl := newBasicScrapeLoop(t, context.Background(), nil, func(context.Context) storage.Appender { return app }, 0)
- sl.enableNativeHistogramScraping = true
- sl.sampleMutator = func(l labels.Labels) labels.Labels {
- if l.Has("deleteme") {
- return labels.EmptyLabels()
+ sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl.appendable = appendableFunc(func(ctx context.Context) storage.Appender {
+ return &bucketLimitAppender{Appender: teststorage.NewAppendable().Appender(ctx), limit: 2}
+ })
+ sl.enableNativeHistogramScraping = true
+ sl.sampleMutator = func(l labels.Labels) labels.Labels {
+ if l.Has("deleteme") {
+ return labels.EmptyLabels()
+ }
+ return l
}
- return l
- }
+ })
+ app := sl.appender()
metric := dto.Metric{}
err := sl.metrics.targetScrapeNativeHistogramBucketLimit.Write(&metric)
@@ -2214,7 +2119,7 @@ func TestScrapeLoop_HistogramBucketLimit(t *testing.T) {
[]string{"size"},
)
registry := prometheus.NewRegistry()
- registry.Register(nativeHistogram)
+ require.NoError(t, registry.Register(nativeHistogram))
nativeHistogram.WithLabelValues("S").Observe(1.0)
nativeHistogram.WithLabelValues("M").Observe(1.0)
nativeHistogram.WithLabelValues("L").Observe(1.0)
@@ -2230,7 +2135,7 @@ func TestScrapeLoop_HistogramBucketLimit(t *testing.T) {
require.NoError(t, err)
now := time.Now()
- total, added, seriesAdded, err := sl.append(app, msg, "application/vnd.google.protobuf", now)
+ total, added, seriesAdded, err := app.append(msg, "application/vnd.google.protobuf", now)
require.NoError(t, err)
require.Equal(t, 3, total)
require.Equal(t, 3, added)
@@ -2253,11 +2158,11 @@ func TestScrapeLoop_HistogramBucketLimit(t *testing.T) {
require.NoError(t, err)
now = time.Now()
- total, added, seriesAdded, err = sl.append(app, msg, "application/vnd.google.protobuf", now)
+ total, added, seriesAdded, err = app.append(msg, "application/vnd.google.protobuf", now)
require.NoError(t, err)
require.Equal(t, 3, total)
require.Equal(t, 3, added)
- require.Equal(t, 3, seriesAdded)
+ require.Equal(t, 0, seriesAdded) // Series are cached.
err = sl.metrics.targetScrapeNativeHistogramBucketLimit.Write(&metric)
require.NoError(t, err)
@@ -2276,14 +2181,14 @@ func TestScrapeLoop_HistogramBucketLimit(t *testing.T) {
require.NoError(t, err)
now = time.Now()
- total, added, seriesAdded, err = sl.append(app, msg, "application/vnd.google.protobuf", now)
+ total, added, seriesAdded, err = app.append(msg, "application/vnd.google.protobuf", now)
if !errors.Is(err, errBucketLimit) {
t.Fatalf("Did not see expected histogram bucket limit error: %s", err)
}
require.NoError(t, app.Rollback())
require.Equal(t, 3, total)
require.Equal(t, 3, added)
- require.Equal(t, 0, seriesAdded)
+ require.Equal(t, 0, seriesAdded) // Series are cached.
err = sl.metrics.targetScrapeNativeHistogramBucketLimit.Write(&metric)
require.NoError(t, err)
@@ -2293,151 +2198,149 @@ func TestScrapeLoop_HistogramBucketLimit(t *testing.T) {
func TestScrapeLoop_ChangingMetricString(t *testing.T) {
// This is a regression test for the scrape loop cache not properly maintaining
- // IDs when the string representation of a metric changes across a scrape. Thus
+ // IDs when the string representation of a metric changes across a scrape. Thus,
// we use a real storage appender here.
- s := teststorage.New(t)
- defer s.Close()
-
- capp := &collectResultAppender{}
- sl := newBasicScrapeLoop(t, context.Background(), nil, func(context.Context) storage.Appender { return capp }, 0)
+ appTest := teststorage.NewAppendable()
+ sl, _ := newTestScrapeLoop(t, withAppendable(appTest))
now := time.Now()
- slApp := sl.appender(context.Background())
- _, _, _, err := sl.append(slApp, []byte(`metric_a{a="1",b="1"} 1`), "text/plain", now)
+ app := sl.appender()
+ _, _, _, err := app.append([]byte(`metric_a{a="1",b="1"} 1`), "text/plain", now)
require.NoError(t, err)
- require.NoError(t, slApp.Commit())
+ require.NoError(t, app.Commit())
- slApp = sl.appender(context.Background())
- _, _, _, err = sl.append(slApp, []byte(`metric_a{b="1",a="1"} 2`), "text/plain", now.Add(time.Minute))
+ app = sl.appender()
+ _, _, _, err = app.append([]byte(`metric_a{b="1",a="1"} 2`), "text/plain", now.Add(time.Minute))
require.NoError(t, err)
- require.NoError(t, slApp.Commit())
+ require.NoError(t, app.Commit())
- want := []floatSample{
+ want := []sample{
{
- metric: labels.FromStrings("__name__", "metric_a", "a", "1", "b", "1"),
- t: timestamp.FromTime(now),
- f: 1,
+ L: labels.FromStrings("__name__", "metric_a", "a", "1", "b", "1"),
+ T: timestamp.FromTime(now),
+ V: 1,
},
{
- metric: labels.FromStrings("__name__", "metric_a", "a", "1", "b", "1"),
- t: timestamp.FromTime(now.Add(time.Minute)),
- f: 2,
+ L: labels.FromStrings("__name__", "metric_a", "a", "1", "b", "1"),
+ T: timestamp.FromTime(now.Add(time.Minute)),
+ V: 2,
},
}
- require.Equal(t, want, capp.resultFloats, "Appended samples not as expected:\n%s", appender)
+ require.Equal(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", appTest)
}
func TestScrapeLoopAppendFailsWithNoContentType(t *testing.T) {
- app := &collectResultAppender{}
-
- // Explicitly setting the lack of fallback protocol here to make it obvious.
- sl := newBasicScrapeLoopWithFallback(t, context.Background(), nil, func(context.Context) storage.Appender { return app }, 0, "")
+ sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ // Explicitly setting the lack of fallback protocol here to make it obvious.
+ sl.fallbackScrapeProtocol = ""
+ })
now := time.Now()
- slApp := sl.appender(context.Background())
- _, _, _, err := sl.append(slApp, []byte("metric_a 1\n"), "", now)
- // We expect the appropriate error.
+ app := sl.appender()
+ _, _, _, err := app.append([]byte("metric_a 1\n"), "", now)
+ // We expected the appropriate error.
require.ErrorContains(t, err, "non-compliant scrape target sending blank Content-Type and no fallback_scrape_protocol specified for target", "Expected \"non-compliant scrape\" error but got: %s", err)
}
+// TestScrapeLoopAppendEmptyWithNoContentType ensures we there are no errors when we get a blank scrape or just want to append a stale marker.
func TestScrapeLoopAppendEmptyWithNoContentType(t *testing.T) {
- // This test ensures we there are no errors when we get a blank scrape or just want to append a stale marker.
- app := &collectResultAppender{}
-
- // Explicitly setting the lack of fallback protocol here to make it obvious.
- sl := newBasicScrapeLoopWithFallback(t, context.Background(), nil, func(context.Context) storage.Appender { return app }, 0, "")
+ sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ // Explicitly setting the lack of fallback protocol here to make it obvious.
+ sl.fallbackScrapeProtocol = ""
+ })
now := time.Now()
- slApp := sl.appender(context.Background())
- _, _, _, err := sl.append(slApp, []byte(""), "", now)
+ app := sl.appender()
+ _, _, _, err := app.append([]byte(""), "", now)
require.NoError(t, err)
- require.NoError(t, slApp.Commit())
+ require.NoError(t, app.Commit())
}
func TestScrapeLoopAppendStaleness(t *testing.T) {
- app := &collectResultAppender{}
-
- sl := newBasicScrapeLoop(t, context.Background(), nil, func(context.Context) storage.Appender { return app }, 0)
+ appTest := teststorage.NewAppendable()
+ sl, _ := newTestScrapeLoop(t, withAppendable(appTest))
now := time.Now()
- slApp := sl.appender(context.Background())
- _, _, _, err := sl.append(slApp, []byte("metric_a 1\n"), "text/plain", now)
+ app := sl.appender()
+ _, _, _, err := app.append([]byte("metric_a 1\n"), "text/plain", now)
require.NoError(t, err)
- require.NoError(t, slApp.Commit())
+ require.NoError(t, app.Commit())
- slApp = sl.appender(context.Background())
- _, _, _, err = sl.append(slApp, []byte(""), "", now.Add(time.Second))
+ app = sl.appender()
+ _, _, _, err = app.append([]byte(""), "", now.Add(time.Second))
require.NoError(t, err)
- require.NoError(t, slApp.Commit())
+ require.NoError(t, app.Commit())
- want := []floatSample{
+ want := []sample{
{
- metric: labels.FromStrings(model.MetricNameLabel, "metric_a"),
- t: timestamp.FromTime(now),
- f: 1,
+ L: labels.FromStrings(model.MetricNameLabel, "metric_a"),
+ T: timestamp.FromTime(now),
+ V: 1,
},
{
- metric: labels.FromStrings(model.MetricNameLabel, "metric_a"),
- t: timestamp.FromTime(now.Add(time.Second)),
- f: math.Float64frombits(value.StaleNaN),
+ L: labels.FromStrings(model.MetricNameLabel, "metric_a"),
+ T: timestamp.FromTime(now.Add(time.Second)),
+ V: math.Float64frombits(value.StaleNaN),
},
}
- requireEqual(t, want, app.resultFloats, "Appended samples not as expected:\n%s", appender)
+ requireEqual(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", appTest)
}
func TestScrapeLoopAppendNoStalenessIfTimestamp(t *testing.T) {
- app := &collectResultAppender{}
- sl := newBasicScrapeLoop(t, context.Background(), nil, func(context.Context) storage.Appender { return app }, 0)
+ appTest := teststorage.NewAppendable()
+ sl, _ := newTestScrapeLoop(t, withAppendable(appTest))
now := time.Now()
- slApp := sl.appender(context.Background())
- _, _, _, err := sl.append(slApp, []byte("metric_a 1 1000\n"), "text/plain", now)
+ app := sl.appender()
+ _, _, _, err := app.append([]byte("metric_a 1 1000\n"), "text/plain", now)
require.NoError(t, err)
- require.NoError(t, slApp.Commit())
+ require.NoError(t, app.Commit())
- slApp = sl.appender(context.Background())
- _, _, _, err = sl.append(slApp, []byte(""), "", now.Add(time.Second))
+ app = sl.appender()
+ _, _, _, err = app.append([]byte(""), "", now.Add(time.Second))
require.NoError(t, err)
- require.NoError(t, slApp.Commit())
+ require.NoError(t, app.Commit())
- want := []floatSample{
+ want := []sample{
{
- metric: labels.FromStrings(model.MetricNameLabel, "metric_a"),
- t: 1000,
- f: 1,
+ L: labels.FromStrings(model.MetricNameLabel, "metric_a"),
+ T: 1000,
+ V: 1,
},
}
- require.Equal(t, want, app.resultFloats, "Appended samples not as expected:\n%s", appender)
+ require.Equal(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", appTest)
}
func TestScrapeLoopAppendStalenessIfTrackTimestampStaleness(t *testing.T) {
- app := &collectResultAppender{}
- sl := newBasicScrapeLoop(t, context.Background(), nil, func(context.Context) storage.Appender { return app }, 0)
- sl.trackTimestampsStaleness = true
+ appTest := teststorage.NewAppendable()
+ sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl.appendable = appTest
+ sl.trackTimestampsStaleness = true
+ })
now := time.Now()
- slApp := sl.appender(context.Background())
- _, _, _, err := sl.append(slApp, []byte("metric_a 1 1000\n"), "text/plain", now)
+ app := sl.appender()
+ _, _, _, err := app.append([]byte("metric_a 1 1000\n"), "text/plain", now)
require.NoError(t, err)
- require.NoError(t, slApp.Commit())
+ require.NoError(t, app.Commit())
- slApp = sl.appender(context.Background())
- _, _, _, err = sl.append(slApp, []byte(""), "", now.Add(time.Second))
+ app = sl.appender()
+ _, _, _, err = app.append([]byte(""), "", now.Add(time.Second))
require.NoError(t, err)
- require.NoError(t, slApp.Commit())
+ require.NoError(t, app.Commit())
- want := []floatSample{
+ want := []sample{
{
- metric: labels.FromStrings(model.MetricNameLabel, "metric_a"),
- t: 1000,
- f: 1,
+ L: labels.FromStrings(model.MetricNameLabel, "metric_a"),
+ T: 1000,
+ V: 1,
},
{
- metric: labels.FromStrings(model.MetricNameLabel, "metric_a"),
- t: timestamp.FromTime(now.Add(time.Second)),
- f: math.Float64frombits(value.StaleNaN),
+ L: labels.FromStrings(model.MetricNameLabel, "metric_a"),
+ T: timestamp.FromTime(now.Add(time.Second)),
+ V: math.Float64frombits(value.StaleNaN),
},
}
- requireEqual(t, want, app.resultFloats, "Appended samples not as expected:\n%s", appender)
+ requireEqual(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", appTest)
}
func TestScrapeLoopAppendExemplar(t *testing.T) {
@@ -2448,18 +2351,16 @@ func TestScrapeLoopAppendExemplar(t *testing.T) {
scrapeText string
contentType string
discoveryLabels []string
- floats []floatSample
- histograms []histogramSample
- exemplars []exemplar.Exemplar
+ samples []sample
}{
{
title: "Metric without exemplars",
scrapeText: "metric_total{n=\"1\"} 0\n# EOF",
contentType: "application/openmetrics-text",
discoveryLabels: []string{"n", "2"},
- floats: []floatSample{{
- metric: labels.FromStrings("__name__", "metric_total", "exported_n", "1", "n", "2"),
- f: 0,
+ samples: []sample{{
+ L: labels.FromStrings("__name__", "metric_total", "exported_n", "1", "n", "2"),
+ V: 0,
}},
},
{
@@ -2467,26 +2368,24 @@ func TestScrapeLoopAppendExemplar(t *testing.T) {
scrapeText: "metric_total{n=\"1\"} 0 # {a=\"abc\"} 1.0\n# EOF",
contentType: "application/openmetrics-text",
discoveryLabels: []string{"n", "2"},
- floats: []floatSample{{
- metric: labels.FromStrings("__name__", "metric_total", "exported_n", "1", "n", "2"),
- f: 0,
+ samples: []sample{{
+ L: labels.FromStrings("__name__", "metric_total", "exported_n", "1", "n", "2"),
+ V: 0,
+ ES: []exemplar.Exemplar{
+ {Labels: labels.FromStrings("a", "abc"), Value: 1},
+ },
}},
- exemplars: []exemplar.Exemplar{
- {Labels: labels.FromStrings("a", "abc"), Value: 1},
- },
},
{
title: "Metric with exemplars and TS",
scrapeText: "metric_total{n=\"1\"} 0 # {a=\"abc\"} 1.0 10000\n# EOF",
contentType: "application/openmetrics-text",
discoveryLabels: []string{"n", "2"},
- floats: []floatSample{{
- metric: labels.FromStrings("__name__", "metric_total", "exported_n", "1", "n", "2"),
- f: 0,
+ samples: []sample{{
+ L: labels.FromStrings("__name__", "metric_total", "exported_n", "1", "n", "2"),
+ V: 0,
+ ES: []exemplar.Exemplar{{Labels: labels.FromStrings("a", "abc"), Value: 1, Ts: 10000000, HasTs: true}},
}},
- exemplars: []exemplar.Exemplar{
- {Labels: labels.FromStrings("a", "abc"), Value: 1, Ts: 10000000, HasTs: true},
- },
},
{
title: "Two metrics and exemplars",
@@ -2494,17 +2393,15 @@ func TestScrapeLoopAppendExemplar(t *testing.T) {
metric_total{n="2"} 2 # {t="2"} 2.0 20000
# EOF`,
contentType: "application/openmetrics-text",
- floats: []floatSample{{
- metric: labels.FromStrings("__name__", "metric_total", "n", "1"),
- f: 1,
+ samples: []sample{{
+ L: labels.FromStrings("__name__", "metric_total", "n", "1"),
+ V: 1,
+ ES: []exemplar.Exemplar{{Labels: labels.FromStrings("t", "1"), Value: 1, Ts: 10000000, HasTs: true}},
}, {
- metric: labels.FromStrings("__name__", "metric_total", "n", "2"),
- f: 2,
+ L: labels.FromStrings("__name__", "metric_total", "n", "2"),
+ V: 2,
+ ES: []exemplar.Exemplar{{Labels: labels.FromStrings("t", "2"), Value: 2, Ts: 20000000, HasTs: true}},
}},
- exemplars: []exemplar.Exemplar{
- {Labels: labels.FromStrings("t", "1"), Value: 1, Ts: 10000000, HasTs: true},
- {Labels: labels.FromStrings("t", "2"), Value: 2, Ts: 20000000, HasTs: true},
- },
},
{
title: "Native histogram with three exemplars from classic buckets",
@@ -2596,10 +2493,10 @@ metric: <
`,
contentType: "application/vnd.google.protobuf",
- histograms: []histogramSample{{
- t: 1234568,
- metric: labels.FromStrings("__name__", "test_histogram"),
- h: &histogram.Histogram{
+ samples: []sample{{
+ T: 1234568,
+ L: labels.FromStrings("__name__", "test_histogram"),
+ H: &histogram.Histogram{
Count: 175,
ZeroCount: 2,
Sum: 0.0008280461746287094,
@@ -2616,12 +2513,12 @@ metric: <
PositiveBuckets: []int64{1, 2, -1, -1},
NegativeBuckets: []int64{1, 3, -2, -1, 1},
},
+ ES: []exemplar.Exemplar{
+ // Native histogram exemplars are arranged by timestamp, and those with missing timestamps are dropped.
+ {Labels: labels.FromStrings("dummyID", "58215"), Value: -0.00019, Ts: 1625851055146, HasTs: true},
+ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, Ts: 1625851155146, HasTs: true},
+ },
}},
- exemplars: []exemplar.Exemplar{
- // Native histogram exemplars are arranged by timestamp, and those with missing timestamps are dropped.
- {Labels: labels.FromStrings("dummyID", "58215"), Value: -0.00019, Ts: 1625851055146, HasTs: true},
- {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, Ts: 1625851155146, HasTs: true},
- },
},
{
title: "Native histogram with three exemplars scraped as classic histogram",
@@ -2714,46 +2611,50 @@ metric: <
`,
alwaysScrapeClassicHist: true,
contentType: "application/vnd.google.protobuf",
- floats: []floatSample{
- {metric: labels.FromStrings("__name__", "test_histogram_count"), t: 1234568, f: 175},
- {metric: labels.FromStrings("__name__", "test_histogram_sum"), t: 1234568, f: 0.0008280461746287094},
- {metric: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0004899999999999998"), t: 1234568, f: 2},
- {metric: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0003899999999999998"), t: 1234568, f: 4},
- {metric: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0002899999999999998"), t: 1234568, f: 16},
- {metric: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0001899999999999998"), t: 1234568, f: 32},
- {metric: labels.FromStrings("__name__", "test_histogram_bucket", "le", "+Inf"), t: 1234568, f: 175},
- },
- histograms: []histogramSample{{
- t: 1234568,
- metric: labels.FromStrings("__name__", "test_histogram"),
- h: &histogram.Histogram{
- Count: 175,
- ZeroCount: 2,
- Sum: 0.0008280461746287094,
- ZeroThreshold: 2.938735877055719e-39,
- Schema: 3,
- PositiveSpans: []histogram.Span{
- {Offset: -161, Length: 1},
- {Offset: 8, Length: 3},
- },
- NegativeSpans: []histogram.Span{
- {Offset: -162, Length: 1},
- {Offset: 23, Length: 4},
- },
- PositiveBuckets: []int64{1, 2, -1, -1},
- NegativeBuckets: []int64{1, 3, -2, -1, 1},
+ samples: []sample{
+ {L: labels.FromStrings("__name__", "test_histogram_count"), T: 1234568, V: 175},
+ {L: labels.FromStrings("__name__", "test_histogram_sum"), T: 1234568, V: 0.0008280461746287094},
+ {L: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0004899999999999998"), T: 1234568, V: 2},
+ {
+ L: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0003899999999999998"), T: 1234568, V: 4,
+ ES: []exemplar.Exemplar{{Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, Ts: 1625851155146, HasTs: true}},
+ },
+ {
+ L: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0002899999999999998"), T: 1234568, V: 16,
+ ES: []exemplar.Exemplar{{Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, Ts: 1234568, HasTs: false}},
+ },
+ {
+ L: labels.FromStrings("__name__", "test_histogram_bucket", "le", "-0.0001899999999999998"), T: 1234568, V: 32,
+ ES: []exemplar.Exemplar{{Labels: labels.FromStrings("dummyID", "58215"), Value: -0.00019, Ts: 1625851055146, HasTs: true}},
+ },
+ {L: labels.FromStrings("__name__", "test_histogram_bucket", "le", "+Inf"), T: 1234568, V: 175},
+ {
+ T: 1234568,
+ L: labels.FromStrings("__name__", "test_histogram"),
+ H: &histogram.Histogram{
+ Count: 175,
+ ZeroCount: 2,
+ Sum: 0.0008280461746287094,
+ ZeroThreshold: 2.938735877055719e-39,
+ Schema: 3,
+ PositiveSpans: []histogram.Span{
+ {Offset: -161, Length: 1},
+ {Offset: 8, Length: 3},
+ },
+ NegativeSpans: []histogram.Span{
+ {Offset: -162, Length: 1},
+ {Offset: 23, Length: 4},
+ },
+ PositiveBuckets: []int64{1, 2, -1, -1},
+ NegativeBuckets: []int64{1, 3, -2, -1, 1},
+ },
+ ES: []exemplar.Exemplar{
+ // Native histogram one is arranged by timestamp.
+ // Exemplars with missing timestamps are dropped for native histograms.
+ {Labels: labels.FromStrings("dummyID", "58215"), Value: -0.00019, Ts: 1625851055146, HasTs: true},
+ {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, Ts: 1625851155146, HasTs: true},
+ },
},
- }},
- exemplars: []exemplar.Exemplar{
- // Native histogram one is arranged by timestamp.
- // Exemplars with missing timestamps are dropped for native histograms.
- {Labels: labels.FromStrings("dummyID", "58215"), Value: -0.00019, Ts: 1625851055146, HasTs: true},
- {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, Ts: 1625851155146, HasTs: true},
- // Classic histogram one is in order of appearance.
- // Exemplars with missing timestamps are supported for classic histograms.
- {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, Ts: 1625851155146, HasTs: true},
- {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, Ts: 1234568, HasTs: false},
- {Labels: labels.FromStrings("dummyID", "58215"), Value: -0.00019, Ts: 1625851055146, HasTs: true},
},
},
{
@@ -2829,10 +2730,10 @@ metric: <
>
`,
- histograms: []histogramSample{{
- t: 1234568,
- metric: labels.FromStrings("__name__", "test_histogram"),
- h: &histogram.Histogram{
+ samples: []sample{{
+ T: 1234568,
+ L: labels.FromStrings("__name__", "test_histogram"),
+ H: &histogram.Histogram{
Count: 175,
ZeroCount: 2,
Sum: 0.0008280461746287094,
@@ -2849,12 +2750,12 @@ metric: <
PositiveBuckets: []int64{1, 2, -1, -1},
NegativeBuckets: []int64{1, 3, -2, -1, 1},
},
+ ES: []exemplar.Exemplar{
+ // Exemplars with missing timestamps are dropped for native histograms.
+ {Labels: labels.FromStrings("dummyID", "58242"), Value: -0.00019, Ts: 1625851055146, HasTs: true},
+ {Labels: labels.FromStrings("dummyID", "59732"), Value: -0.00039, Ts: 1625851155146, HasTs: true},
+ },
}},
- exemplars: []exemplar.Exemplar{
- // Exemplars with missing timestamps are dropped for native histograms.
- {Labels: labels.FromStrings("dummyID", "58242"), Value: -0.00019, Ts: 1625851055146, HasTs: true},
- {Labels: labels.FromStrings("dummyID", "59732"), Value: -0.00039, Ts: 1625851155146, HasTs: true},
- },
},
{
title: "Native histogram with exemplars but ingestion disabled",
@@ -2929,45 +2830,50 @@ metric: <
>
`,
- floats: []floatSample{
- {metric: labels.FromStrings("__name__", "test_histogram_count"), t: 1234568, f: 175},
- {metric: labels.FromStrings("__name__", "test_histogram_sum"), t: 1234568, f: 0.0008280461746287094},
- {metric: labels.FromStrings("__name__", "test_histogram_bucket", "le", "+Inf"), t: 1234568, f: 175},
+ samples: []sample{
+ {L: labels.FromStrings("__name__", "test_histogram_count"), T: 1234568, V: 175},
+ {L: labels.FromStrings("__name__", "test_histogram_sum"), T: 1234568, V: 0.0008280461746287094},
+ {L: labels.FromStrings("__name__", "test_histogram_bucket", "le", "+Inf"), T: 1234568, V: 175},
},
},
}
for _, test := range tests {
t.Run(test.title, func(t *testing.T) {
- app := &collectResultAppender{}
-
discoveryLabels := &Target{
labels: labels.FromStrings(test.discoveryLabels...),
}
- sl := newBasicScrapeLoop(t, context.Background(), nil, func(context.Context) storage.Appender { return app }, 0)
- sl.enableNativeHistogramScraping = test.enableNativeHistogramsIngestion
- sl.sampleMutator = func(l labels.Labels) labels.Labels {
- return mutateSampleLabels(l, discoveryLabels, false, nil)
- }
- sl.reportSampleMutator = func(l labels.Labels) labels.Labels {
- return mutateReportSampleLabels(l, discoveryLabels)
- }
- sl.alwaysScrapeClassicHist = test.alwaysScrapeClassicHist
+ appTest := teststorage.NewAppendable()
+ sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl.appendable = appTest
+ sl.enableNativeHistogramScraping = test.enableNativeHistogramsIngestion
+ sl.sampleMutator = func(l labels.Labels) labels.Labels {
+ return mutateSampleLabels(l, discoveryLabels, false, nil)
+ }
+ sl.reportSampleMutator = func(l labels.Labels) labels.Labels {
+ return mutateReportSampleLabels(l, discoveryLabels)
+ }
+ sl.alwaysScrapeClassicHist = test.alwaysScrapeClassicHist
+ // This test does not care about metadata. Having this true would mean we need to add metadata to sample
+ // expectations.
+ sl.appendMetadataToWAL = false
+ })
+ app := sl.appender()
now := time.Now()
- for i := range test.floats {
- if test.floats[i].t != 0 {
+ for i := range test.samples {
+ if test.samples[i].T != 0 {
continue
}
- test.floats[i].t = timestamp.FromTime(now)
- }
+ test.samples[i].T = timestamp.FromTime(now)
- // We need to set the timestamp for expected exemplars that does not have a timestamp.
- for i := range test.exemplars {
- if test.exemplars[i].Ts == 0 {
- test.exemplars[i].Ts = timestamp.FromTime(now)
+ // We need to set the timestamp for expected exemplars that does not have a timestamp.
+ for j := range test.samples[i].ES {
+ if test.samples[i].ES[j].Ts == 0 {
+ test.samples[i].ES[j].Ts = timestamp.FromTime(now)
+ }
}
}
@@ -2978,12 +2884,10 @@ metric: <
buf.WriteString(test.scrapeText)
}
- _, _, _, err := sl.append(app, buf.Bytes(), test.contentType, now)
+ _, _, _, err := app.append(buf.Bytes(), test.contentType, now)
require.NoError(t, err)
require.NoError(t, app.Commit())
- requireEqual(t, test.floats, app.resultFloats)
- requireEqual(t, test.histograms, app.resultHistograms)
- requireEqual(t, test.exemplars, app.resultExemplars)
+ requireEqual(t, test.samples, appTest.ResultSamples())
})
}
}
@@ -3012,152 +2916,136 @@ func TestScrapeLoopAppendExemplarSeries(t *testing.T) {
scrapeText := []string{`metric_total{n="1"} 1 # {t="1"} 1.0 10000
# EOF`, `metric_total{n="1"} 2 # {t="2"} 2.0 20000
# EOF`}
- samples := []floatSample{{
- metric: labels.FromStrings("__name__", "metric_total", "n", "1"),
- f: 1,
+ samples := []sample{{
+ L: labels.FromStrings("__name__", "metric_total", "n", "1"),
+ V: 1,
+ ES: []exemplar.Exemplar{
+ {Labels: labels.FromStrings("t", "1"), Value: 1, Ts: 10000000, HasTs: true},
+ },
}, {
- metric: labels.FromStrings("__name__", "metric_total", "n", "1"),
- f: 2,
+ L: labels.FromStrings("__name__", "metric_total", "n", "1"),
+ V: 2,
+ ES: []exemplar.Exemplar{
+ {Labels: labels.FromStrings("t", "2"), Value: 2, Ts: 20000000, HasTs: true},
+ },
}}
- exemplars := []exemplar.Exemplar{
- {Labels: labels.FromStrings("t", "1"), Value: 1, Ts: 10000000, HasTs: true},
- {Labels: labels.FromStrings("t", "2"), Value: 2, Ts: 20000000, HasTs: true},
- }
discoveryLabels := &Target{
labels: labels.FromStrings(),
}
- app := &collectResultAppender{}
-
- sl := newBasicScrapeLoop(t, context.Background(), nil, func(context.Context) storage.Appender { return app }, 0)
- sl.sampleMutator = func(l labels.Labels) labels.Labels {
- return mutateSampleLabels(l, discoveryLabels, false, nil)
- }
- sl.reportSampleMutator = func(l labels.Labels) labels.Labels {
- return mutateReportSampleLabels(l, discoveryLabels)
- }
+ appTest := teststorage.NewAppendable()
+ sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl.appendable = appTest
+ sl.sampleMutator = func(l labels.Labels) labels.Labels {
+ return mutateSampleLabels(l, discoveryLabels, false, nil)
+ }
+ sl.reportSampleMutator = func(l labels.Labels) labels.Labels {
+ return mutateReportSampleLabels(l, discoveryLabels)
+ }
+ // This test does not care about metadata. Having this true would mean we need to add metadata to sample
+ // expectations.
+ sl.appendMetadataToWAL = false
+ })
now := time.Now()
-
for i := range samples {
ts := now.Add(time.Second * time.Duration(i))
- samples[i].t = timestamp.FromTime(ts)
- }
-
- // We need to set the timestamp for expected exemplars that does not have a timestamp.
- for i := range exemplars {
- if exemplars[i].Ts == 0 {
- ts := now.Add(time.Second * time.Duration(i))
- exemplars[i].Ts = timestamp.FromTime(ts)
- }
+ samples[i].T = timestamp.FromTime(ts)
}
for i, st := range scrapeText {
- _, _, _, err := sl.append(app, []byte(st), "application/openmetrics-text", timestamp.Time(samples[i].t))
+ app := sl.appender()
+ _, _, _, err := app.append([]byte(st), "application/openmetrics-text", timestamp.Time(samples[i].T))
require.NoError(t, err)
require.NoError(t, app.Commit())
}
- requireEqual(t, samples, app.resultFloats)
- requireEqual(t, exemplars, app.resultExemplars)
+ requireEqual(t, samples, appTest.ResultSamples())
}
func TestScrapeLoopRunReportsTargetDownOnScrapeError(t *testing.T) {
- var (
- scraper = &testScraper{}
- appender = &collectResultAppender{}
- app = func(context.Context) storage.Appender { return appender }
- )
-
- ctx, cancel := context.WithCancel(context.Background())
- sl := newBasicScrapeLoop(t, ctx, scraper, app, 10*time.Millisecond)
-
+ ctx, cancel := context.WithCancel(t.Context())
+ appTest := teststorage.NewAppendable()
+ sl, scraper := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl.ctx = ctx
+ sl.appendable = appTest
+ })
scraper.scrapeFunc = func(context.Context, io.Writer) error {
cancel()
return errors.New("scrape failed")
}
sl.run(nil)
- require.Equal(t, 0.0, appender.resultFloats[0].f, "bad 'up' value")
+ require.Equal(t, 0.0, appTest.ResultSamples()[0].V, "bad 'up' value")
}
func TestScrapeLoopRunReportsTargetDownOnInvalidUTF8(t *testing.T) {
- var (
- scraper = &testScraper{}
- appender = &collectResultAppender{}
- app = func(context.Context) storage.Appender { return appender }
- )
-
- ctx, cancel := context.WithCancel(context.Background())
- sl := newBasicScrapeLoop(t, ctx, scraper, app, 10*time.Millisecond)
-
+ ctx, cancel := context.WithCancel(t.Context())
+ appTest := teststorage.NewAppendable()
+ sl, scraper := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl.ctx = ctx
+ sl.appendable = appTest
+ })
scraper.scrapeFunc = func(_ context.Context, w io.Writer) error {
cancel()
- w.Write([]byte("a{l=\"\xff\"} 1\n"))
+ _, _ = w.Write([]byte("a{l=\"\xff\"} 1\n"))
return nil
}
sl.run(nil)
- require.Equal(t, 0.0, appender.resultFloats[0].f, "bad 'up' value")
-}
-
-type errorAppender struct {
- collectResultAppender
-}
-
-func (app *errorAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
- switch lset.Get(model.MetricNameLabel) {
- case "out_of_order":
- return 0, storage.ErrOutOfOrderSample
- case "amend":
- return 0, storage.ErrDuplicateSampleForTimestamp
- case "out_of_bounds":
- return 0, storage.ErrOutOfBounds
- default:
- return app.collectResultAppender.Append(ref, lset, t, v)
- }
+ require.Equal(t, 0.0, appTest.ResultSamples()[0].V, "bad 'up' value")
}
func TestScrapeLoopAppendGracefullyIfAmendOrOutOfOrderOrOutOfBounds(t *testing.T) {
- app := &errorAppender{}
- sl := newBasicScrapeLoop(t, context.Background(), nil, func(context.Context) storage.Appender { return app }, 0)
+ appTest := teststorage.NewAppendable().WithErrs(
+ func(ls labels.Labels) error {
+ switch ls.Get(model.MetricNameLabel) {
+ case "out_of_order":
+ return storage.ErrOutOfOrderSample
+ case "amend":
+ return storage.ErrDuplicateSampleForTimestamp
+ case "out_of_bounds":
+ return storage.ErrOutOfBounds
+ default:
+ return nil
+ }
+ }, nil, nil)
+ sl, _ := newTestScrapeLoop(t, withAppendable(appTest))
now := time.Unix(1, 0)
- slApp := sl.appender(context.Background())
- total, added, seriesAdded, err := sl.append(slApp, []byte("out_of_order 1\namend 1\nnormal 1\nout_of_bounds 1\n"), "text/plain", now)
+ app := sl.appender()
+ total, added, seriesAdded, err := app.append([]byte("out_of_order 1\namend 1\nnormal 1\nout_of_bounds 1\n"), "text/plain", now)
require.NoError(t, err)
- require.NoError(t, slApp.Commit())
+ require.NoError(t, app.Commit())
- want := []floatSample{
+ want := []sample{
{
- metric: labels.FromStrings(model.MetricNameLabel, "normal"),
- t: timestamp.FromTime(now),
- f: 1,
+ L: labels.FromStrings(model.MetricNameLabel, "normal"),
+ T: timestamp.FromTime(now),
+ V: 1,
},
}
- requireEqual(t, want, app.resultFloats, "Appended samples not as expected:\n%s", appender)
+ requireEqual(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", appTest)
require.Equal(t, 4, total)
require.Equal(t, 4, added)
require.Equal(t, 1, seriesAdded)
}
func TestScrapeLoopOutOfBoundsTimeError(t *testing.T) {
- app := &collectResultAppender{}
- sl := newBasicScrapeLoop(t, context.Background(), nil,
- func(context.Context) storage.Appender {
+ sl, _ := newTestScrapeLoop(t, withAppendable(
+ appendableFunc(func(ctx context.Context) storage.Appender {
return &timeLimitAppender{
- Appender: app,
+ Appender: teststorage.NewAppendable().Appender(ctx),
maxTime: timestamp.FromTime(time.Now().Add(10 * time.Minute)),
}
- },
- 0,
- )
+ }),
+ ))
now := time.Now().Add(20 * time.Minute)
- slApp := sl.appender(context.Background())
- total, added, seriesAdded, err := sl.append(slApp, []byte("normal 1\n"), "text/plain", now)
+ app := sl.appender()
+ total, added, seriesAdded, err := app.append([]byte("normal 1\n"), "text/plain", now)
require.NoError(t, err)
- require.NoError(t, slApp.Commit())
+ require.NoError(t, app.Commit())
require.Equal(t, 1, total)
require.Equal(t, 1, added)
require.Equal(t, 0, seriesAdded)
@@ -3252,7 +3140,7 @@ func TestRequestTraceparentHeader(t *testing.T) {
resp, err := ts.scrape(context.Background())
require.NoError(t, err)
require.NotNil(t, resp)
- defer resp.Body.Close()
+ t.Cleanup(func() { _ = resp.Body.Close() })
}
func TestTargetScraperScrapeOK(t *testing.T) {
@@ -3299,7 +3187,7 @@ func TestTargetScraperScrapeOK(t *testing.T) {
} else {
w.Header().Set("Content-Type", `text/plain; version=0.0.4`)
}
- w.Write([]byte("metric_a 1\nmetric_b 2\n"))
+ _, _ = w.Write([]byte("metric_a 1\nmetric_b 2\n"))
}),
)
defer server.Close()
@@ -3414,9 +3302,9 @@ func TestTargetScrapeScrapeCancel(t *testing.T) {
_, err := ts.scrape(ctx)
switch {
case err == nil:
- errc <- errors.New("Expected error but got nil")
+ errc <- errors.New("expected error but got nil")
case !errors.Is(ctx.Err(), context.Canceled):
- errc <- fmt.Errorf("Expected context cancellation error but got: %w", ctx.Err())
+ errc <- fmt.Errorf("expected context cancellation error but got: %w", ctx.Err())
default:
close(errc)
}
@@ -3476,11 +3364,11 @@ func TestTargetScraperBodySizeLimit(t *testing.T) {
if gzipResponse {
w.Header().Set("Content-Encoding", "gzip")
gw := gzip.NewWriter(w)
- defer gw.Close()
- gw.Write([]byte(responseBody))
+ defer func() { _ = gw.Close() }()
+ _, _ = gw.Write([]byte(responseBody))
return
}
- w.Write([]byte(responseBody))
+ _, _ = w.Write([]byte(responseBody))
}),
)
defer server.Close()
@@ -3574,87 +3462,84 @@ func (ts *testScraper) readResponse(ctx context.Context, _ *http.Response, w io.
func TestScrapeLoop_RespectTimestamps(t *testing.T) {
s := teststorage.New(t)
- defer s.Close()
+ t.Cleanup(func() { _ = s.Close() })
- app := s.Appender(context.Background())
- capp := &collectResultAppender{next: app}
- sl := newBasicScrapeLoop(t, context.Background(), nil, func(context.Context) storage.Appender { return capp }, 0)
+ appTest := teststorage.NewAppendable().Then(s)
+ sl, _ := newTestScrapeLoop(t, withAppendable(appTest))
now := time.Now()
- slApp := sl.appender(context.Background())
- _, _, _, err := sl.append(slApp, []byte(`metric_a{a="1",b="1"} 1 0`), "text/plain", now)
+ app := sl.appender()
+ _, _, _, err := app.append([]byte(`metric_a{a="1",b="1"} 1 0`), "text/plain", now)
require.NoError(t, err)
- require.NoError(t, slApp.Commit())
+ require.NoError(t, app.Commit())
- want := []floatSample{
+ want := []sample{
{
- metric: labels.FromStrings("__name__", "metric_a", "a", "1", "b", "1"),
- t: 0,
- f: 1,
+ L: labels.FromStrings("__name__", "metric_a", "a", "1", "b", "1"),
+ T: 0,
+ V: 1,
},
}
- require.Equal(t, want, capp.resultFloats, "Appended samples not as expected:\n%s", appender)
+ require.Equal(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", appTest)
}
func TestScrapeLoop_DiscardTimestamps(t *testing.T) {
s := teststorage.New(t)
- defer s.Close()
+ t.Cleanup(func() { _ = s.Close() })
- app := s.Appender(context.Background())
-
- capp := &collectResultAppender{next: app}
-
- sl := newBasicScrapeLoop(t, context.Background(), nil, func(context.Context) storage.Appender { return capp }, 0)
- sl.honorTimestamps = false
+ appTest := teststorage.NewAppendable().Then(s)
+ sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl.appendable = appTest
+ sl.honorTimestamps = false
+ })
now := time.Now()
- slApp := sl.appender(context.Background())
- _, _, _, err := sl.append(slApp, []byte(`metric_a{a="1",b="1"} 1 0`), "text/plain", now)
+ app := sl.appender()
+ _, _, _, err := app.append([]byte(`metric_a{a="1",b="1"} 1 0`), "text/plain", now)
require.NoError(t, err)
- require.NoError(t, slApp.Commit())
+ require.NoError(t, app.Commit())
- want := []floatSample{
+ want := []sample{
{
- metric: labels.FromStrings("__name__", "metric_a", "a", "1", "b", "1"),
- t: timestamp.FromTime(now),
- f: 1,
+ L: labels.FromStrings("__name__", "metric_a", "a", "1", "b", "1"),
+ T: timestamp.FromTime(now),
+ V: 1,
},
}
- require.Equal(t, want, capp.resultFloats, "Appended samples not as expected:\n%s", appender)
+ require.Equal(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", appTest)
}
func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) {
s := teststorage.New(t)
- defer s.Close()
+ t.Cleanup(func() { _ = s.Close() })
- ctx, cancel := context.WithCancel(context.Background())
- sl := newBasicScrapeLoop(t, ctx, &testScraper{}, s.Appender, 0)
- defer cancel()
+ appTest := teststorage.NewAppendable().Then(s)
+ sl, _ := newTestScrapeLoop(t, withAppendable(appTest))
// We add a good and a bad metric to check that both are discarded.
- slApp := sl.appender(ctx)
- _, _, _, err := sl.append(slApp, []byte("test_metric{le=\"500\"} 1\ntest_metric{le=\"600\",le=\"700\"} 1\n"), "text/plain", time.Time{})
+ app := sl.appender()
+ _, _, _, err := app.append([]byte("test_metric{le=\"500\"} 1\ntest_metric{le=\"600\",le=\"700\"} 1\n"), "text/plain", time.Time{})
require.Error(t, err)
- require.NoError(t, slApp.Rollback())
- // We need to cycle staleness cache maps after a manual rollback. Otherwise they will have old entries in them,
+ require.NoError(t, app.Rollback())
+ // We need to cycle staleness cache maps after a manual rollback. Otherwise, they will have old entries in them,
// which would cause ErrDuplicateSampleForTimestamp errors on the next append.
sl.cache.iterDone(true)
q, err := s.Querier(time.Time{}.UnixNano(), 0)
require.NoError(t, err)
- series := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*"))
+ series := q.Select(sl.ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*"))
require.False(t, series.Next(), "series found in tsdb")
require.NoError(t, series.Err())
// We add a good metric to check that it is recorded.
- slApp = sl.appender(ctx)
- _, _, _, err = sl.append(slApp, []byte("test_metric{le=\"500\"} 1\n"), "text/plain", time.Time{})
+ app = sl.appender()
+ _, _, _, err = app.append([]byte("test_metric{le=\"500\"} 1\n"), "text/plain", time.Time{})
require.NoError(t, err)
- require.NoError(t, slApp.Commit())
+ require.NoError(t, app.Commit())
q, err = s.Querier(time.Time{}.UnixNano(), 0)
require.NoError(t, err)
- series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "le", "500"))
+ series = q.Select(sl.ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "le", "500"))
require.True(t, series.Next(), "series not found in tsdb")
require.NoError(t, series.Err())
require.False(t, series.Next(), "more than one series found in tsdb")
@@ -3662,29 +3547,28 @@ func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) {
func TestScrapeLoopDiscardUnnamedMetrics(t *testing.T) {
s := teststorage.New(t)
- defer s.Close()
+ t.Cleanup(func() { _ = s.Close() })
- app := s.Appender(context.Background())
-
- ctx, cancel := context.WithCancel(context.Background())
- sl := newBasicScrapeLoop(t, context.Background(), &testScraper{}, func(context.Context) storage.Appender { return app }, 0)
- sl.sampleMutator = func(l labels.Labels) labels.Labels {
- if l.Has("drop") {
- return labels.FromStrings("no", "name") // This label set will trigger an error.
+ appTest := teststorage.NewAppendable().Then(s)
+ sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl.appendable = appTest
+ sl.sampleMutator = func(l labels.Labels) labels.Labels {
+ if l.Has("drop") {
+ return labels.FromStrings("no", "name") // This label set will trigger an error.
+ }
+ return l
}
- return l
- }
- defer cancel()
+ })
- slApp := sl.appender(context.Background())
- _, _, _, err := sl.append(slApp, []byte("nok 1\nnok2{drop=\"drop\"} 1\n"), "text/plain", time.Time{})
+ app := sl.appender()
+ _, _, _, err := app.append([]byte("nok 1\nnok2{drop=\"drop\"} 1\n"), "text/plain", time.Time{})
require.Error(t, err)
- require.NoError(t, slApp.Rollback())
+ require.NoError(t, app.Rollback())
require.Equal(t, errNameLabelMandatory, err)
q, err := s.Querier(time.Time{}.UnixNano(), 0)
require.NoError(t, err)
- series := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*"))
+ series := q.Select(sl.ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*"))
require.False(t, series.Next(), "series found in tsdb")
require.NoError(t, series.Err())
}
@@ -3758,7 +3642,7 @@ func TestReusableConfig(t *testing.T) {
func TestReuseScrapeCache(t *testing.T) {
var (
- app = &nopAppendable{}
+ app = teststorage.NewAppendable()
cfg = &config.ScrapeConfig{
JobName: "Prometheus",
ScrapeTimeout: model.Duration(5 * time.Second),
@@ -3924,7 +3808,7 @@ func TestReuseScrapeCache(t *testing.T) {
for i, s := range steps {
initCacheAddr := cacheAddr(sp)
- sp.reload(s.newConfig)
+ require.NoError(t, sp.reload(s.newConfig))
for fp, newCacheAddr := range cacheAddr(sp) {
if s.keep {
require.Equal(t, initCacheAddr[fp], newCacheAddr, "step %d: old cache and new cache are not the same", i)
@@ -3933,7 +3817,7 @@ func TestReuseScrapeCache(t *testing.T) {
}
}
initCacheAddr = cacheAddr(sp)
- sp.reload(s.newConfig)
+ require.NoError(t, sp.reload(s.newConfig))
for fp, newCacheAddr := range cacheAddr(sp) {
require.Equal(t, initCacheAddr[fp], newCacheAddr, "step %d: reloading the exact config invalidates the cache", i)
}
@@ -3942,16 +3826,14 @@ func TestReuseScrapeCache(t *testing.T) {
func TestScrapeAddFast(t *testing.T) {
s := teststorage.New(t)
- defer s.Close()
+ t.Cleanup(func() { _ = s.Close() })
- ctx, cancel := context.WithCancel(context.Background())
- sl := newBasicScrapeLoop(t, ctx, &testScraper{}, s.Appender, 0)
- defer cancel()
+ sl, _ := newTestScrapeLoop(t, withAppendable(s))
- slApp := sl.appender(ctx)
- _, _, _, err := sl.append(slApp, []byte("up 1\n"), "text/plain", time.Time{})
+ app := sl.appender()
+ _, _, _, err := app.append([]byte("up 1\n"), "text/plain", time.Time{})
require.NoError(t, err)
- require.NoError(t, slApp.Commit())
+ require.NoError(t, app.Commit())
// Poison the cache. There is just one entry, and one series in the
// storage. Changing the ref will create a 'not found' error.
@@ -3959,15 +3841,14 @@ func TestScrapeAddFast(t *testing.T) {
v.ref++
}
- slApp = sl.appender(ctx)
- _, _, _, err = sl.append(slApp, []byte("up 1\n"), "text/plain", time.Time{}.Add(time.Second))
+ app = sl.appender()
+ _, _, _, err = app.append([]byte("up 1\n"), "text/plain", time.Time{}.Add(time.Second))
require.NoError(t, err)
- require.NoError(t, slApp.Commit())
+ require.NoError(t, app.Commit())
}
func TestReuseCacheRace(t *testing.T) {
var (
- app = &nopAppendable{}
cfg = &config.ScrapeConfig{
JobName: "Prometheus",
ScrapeTimeout: model.Duration(5 * time.Second),
@@ -3977,7 +3858,7 @@ func TestReuseCacheRace(t *testing.T) {
MetricNameEscapingScheme: model.AllowUTF8,
}
buffers = pool.New(1e3, 100e6, 3, func(sz int) any { return make([]byte, 0, sz) })
- sp, _ = newScrapePool(cfg, app, 0, nil, buffers, &Options{}, newTestScrapeMetrics(t))
+ sp, _ = newScrapePool(cfg, teststorage.NewAppendable(), 0, nil, buffers, &Options{}, newTestScrapeMetrics(t))
t1 = &Target{
labels: labels.FromStrings("labelNew", "nameNew"),
scrapeConfig: &config.ScrapeConfig{},
@@ -3991,7 +3872,7 @@ func TestReuseCacheRace(t *testing.T) {
if time.Since(start) > 5*time.Second {
break
}
- sp.reload(&config.ScrapeConfig{
+ require.NoError(t, sp.reload(&config.ScrapeConfig{
JobName: "Prometheus",
ScrapeTimeout: model.Duration(1 * time.Millisecond),
ScrapeInterval: model.Duration(1 * time.Millisecond),
@@ -3999,39 +3880,42 @@ func TestReuseCacheRace(t *testing.T) {
SampleLimit: i,
MetricNameValidationScheme: model.UTF8Validation,
MetricNameEscapingScheme: model.AllowUTF8,
- })
+ }))
}
}
func TestCheckAddError(t *testing.T) {
var appErrs appendErrors
- sl := scrapeLoop{l: promslog.NewNopLogger(), metrics: newTestScrapeMetrics(t)}
- sl.checkAddError(nil, storage.ErrOutOfOrderSample, nil, nil, &appErrs)
+ sl, _ := newTestScrapeLoop(t)
+ // TODO: Check err etc
+ _, _ = sl.checkAddError(nil, storage.ErrOutOfOrderSample, nil, nil, &appErrs)
require.Equal(t, 1, appErrs.numOutOfOrder)
+
+ // TODO(bwplotka): Test partial error check and other cases
}
func TestScrapeReportSingleAppender(t *testing.T) {
t.Parallel()
s := teststorage.New(t)
- defer s.Close()
+ t.Cleanup(func() { _ = s.Close() })
- var (
- signal = make(chan struct{}, 1)
- scraper = &testScraper{}
- )
+ signal := make(chan struct{}, 1)
- ctx, cancel := context.WithCancel(context.Background())
- // Since we're writing samples directly below we need to provide a protocol fallback.
- sl := newBasicScrapeLoopWithFallback(t, ctx, scraper, s.Appender, 10*time.Millisecond, "text/plain")
+ ctx, cancel := context.WithCancel(t.Context())
+ sl, scraper := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl.ctx = ctx
+ sl.appendable = s
+ // Since we're writing samples directly below we need to provide a protocol fallback.
+ sl.fallbackScrapeProtocol = "text/plain"
+ })
numScrapes := 0
-
scraper.scrapeFunc = func(_ context.Context, w io.Writer) error {
numScrapes++
if numScrapes%4 == 0 {
return errors.New("scrape failed")
}
- w.Write([]byte("metric_a 44\nmetric_b 44\nmetric_c 44\nmetric_d 44\n"))
+ _, _ = w.Write([]byte("metric_a 44\nmetric_b 44\nmetric_c 44\nmetric_d 44\n"))
return nil
}
@@ -4055,7 +3939,7 @@ func TestScrapeReportSingleAppender(t *testing.T) {
}
require.Equal(t, 0, c%9, "Appended samples not as expected: %d", c)
- q.Close()
+ require.NoError(t, q.Close())
}
cancel()
@@ -4068,7 +3952,7 @@ func TestScrapeReportSingleAppender(t *testing.T) {
func TestScrapeReportLimit(t *testing.T) {
s := teststorage.New(t)
- defer s.Close()
+ t.Cleanup(func() { _ = s.Close() })
cfg := &config.ScrapeConfig{
JobName: "test",
@@ -4106,7 +3990,7 @@ func TestScrapeReportLimit(t *testing.T) {
ctx := t.Context()
q, err := s.Querier(time.Time{}.UnixNano(), time.Now().UnixNano())
require.NoError(t, err)
- defer q.Close()
+ t.Cleanup(func() { _ = q.Close() })
series := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", "up"))
var found bool
@@ -4124,7 +4008,7 @@ func TestScrapeReportLimit(t *testing.T) {
func TestScrapeUTF8(t *testing.T) {
s := teststorage.New(t)
- defer s.Close()
+ t.Cleanup(func() { _ = s.Close() })
cfg := &config.ScrapeConfig{
JobName: "test",
@@ -4160,7 +4044,7 @@ func TestScrapeUTF8(t *testing.T) {
ctx := t.Context()
q, err := s.Querier(time.Time{}.UnixNano(), time.Now().UnixNano())
require.NoError(t, err)
- defer q.Close()
+ t.Cleanup(func() { _ = q.Close() })
series := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", "with.dots"))
require.True(t, series.Next(), "series not found in tsdb")
@@ -4232,30 +4116,29 @@ func TestScrapeLoopLabelLimit(t *testing.T) {
}
for _, test := range tests {
- app := &collectResultAppender{}
-
discoveryLabels := &Target{
labels: labels.FromStrings(test.discoveryLabels...),
}
- sl := newBasicScrapeLoop(t, context.Background(), nil, func(context.Context) storage.Appender { return app }, 0)
- sl.sampleMutator = func(l labels.Labels) labels.Labels {
- return mutateSampleLabels(l, discoveryLabels, false, nil)
- }
- sl.reportSampleMutator = func(l labels.Labels) labels.Labels {
- return mutateReportSampleLabels(l, discoveryLabels)
- }
- sl.labelLimits = &test.labelLimits
+ sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl.sampleMutator = func(l labels.Labels) labels.Labels {
+ return mutateSampleLabels(l, discoveryLabels, false, nil)
+ }
+ sl.reportSampleMutator = func(l labels.Labels) labels.Labels {
+ return mutateReportSampleLabels(l, discoveryLabels)
+ }
+ sl.labelLimits = &test.labelLimits
+ })
- slApp := sl.appender(context.Background())
- _, _, _, err := sl.append(slApp, []byte(test.scrapeLabels), "text/plain", time.Now())
+ app := sl.appender()
+ _, _, _, err := app.append([]byte(test.scrapeLabels), "text/plain", time.Now())
t.Logf("Test:%s", test.title)
if test.expectErr {
require.Error(t, err)
} else {
require.NoError(t, err)
- require.NoError(t, slApp.Commit())
+ require.NoError(t, app.Commit())
}
}
}
@@ -4263,7 +4146,7 @@ func TestScrapeLoopLabelLimit(t *testing.T) {
func TestTargetScrapeIntervalAndTimeoutRelabel(t *testing.T) {
interval, _ := model.ParseDuration("2s")
timeout, _ := model.ParseDuration("500ms")
- config := &config.ScrapeConfig{
+ cfg := &config.ScrapeConfig{
ScrapeInterval: interval,
ScrapeTimeout: timeout,
MetricNameValidationScheme: model.UTF8Validation,
@@ -4287,7 +4170,7 @@ func TestTargetScrapeIntervalAndTimeoutRelabel(t *testing.T) {
},
},
}
- sp, _ := newScrapePool(config, &nopAppendable{}, 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
+ sp, _ := newScrapePool(cfg, teststorage.NewAppendable(), 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
tgts := []*targetgroup.Group{
{
Targets: []model.LabelSet{{model.AddressLabel: "127.0.0.1:9090"}},
@@ -4303,10 +4186,10 @@ func TestTargetScrapeIntervalAndTimeoutRelabel(t *testing.T) {
// Testing whether we can remove trailing .0 from histogram 'le' and summary 'quantile' labels.
func TestLeQuantileReLabel(t *testing.T) {
- simpleStorage := teststorage.New(t)
- defer simpleStorage.Close()
+ s := teststorage.New(t)
+ t.Cleanup(func() { _ = s.Close() })
- config := &config.ScrapeConfig{
+ cfg := &config.ScrapeConfig{
JobName: "test",
MetricRelabelConfigs: []*relabel.Config{
{
@@ -4373,7 +4256,7 @@ test_summary_count 199
ts, scrapedTwice := newScrapableServer(metricsText)
defer ts.Close()
- sp, err := newScrapePool(config, simpleStorage, 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
+ sp, err := newScrapePool(cfg, s, 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
require.NoError(t, err)
defer sp.stop()
@@ -4393,9 +4276,9 @@ test_summary_count 199
}
ctx := t.Context()
- q, err := simpleStorage.Querier(time.Time{}.UnixNano(), time.Now().UnixNano())
+ q, err := s.Querier(time.Time{}.UnixNano(), time.Now().UnixNano())
require.NoError(t, err)
- defer q.Close()
+ t.Cleanup(func() { _ = q.Close() })
checkValues := func(labelName string, expectedValues []string, series storage.SeriesSet) {
foundLeValues := map[string]bool{}
@@ -4423,30 +4306,22 @@ test_summary_count 199
// Testing whether we can automatically convert scraped classic histograms into native histograms with custom buckets.
func TestConvertClassicHistogramsToNHCB(t *testing.T) {
t.Parallel()
- genTestCounterText := func(name string, value int, withMetadata bool) string {
- if withMetadata {
- return fmt.Sprintf(`
+
+ genTestCounterText := func(name string) string {
+ return fmt.Sprintf(`
# HELP %s some help text
# TYPE %s counter
-%s{address="0.0.0.0",port="5001"} %d
-`, name, name, name, value)
- }
- return fmt.Sprintf(`
-%s %d
-`, name, value)
+%s{address="0.0.0.0",port="5001"} 1
+`, name, name, name)
}
- genTestHistText := func(name string, withMetadata bool) string {
+ genTestHistText := func(name string) string {
data := map[string]any{
"name": name,
}
b := &bytes.Buffer{}
- if withMetadata {
- template.Must(template.New("").Parse(`
+ require.NoError(t, template.Must(template.New("").Parse(`
# HELP {{.name}} This is a histogram with default buckets
# TYPE {{.name}} histogram
-`)).Execute(b, data)
- }
- template.Must(template.New("").Parse(`
{{.name}}_bucket{address="0.0.0.0",port="5001",le="0.005"} 0
{{.name}}_bucket{address="0.0.0.0",port="5001",le="0.01"} 0
{{.name}}_bucket{address="0.0.0.0",port="5001",le="0.025"} 0
@@ -4461,10 +4336,10 @@ func TestConvertClassicHistogramsToNHCB(t *testing.T) {
{{.name}}_bucket{address="0.0.0.0",port="5001",le="+Inf"} 1
{{.name}}_sum{address="0.0.0.0",port="5001"} 10
{{.name}}_count{address="0.0.0.0",port="5001"} 1
-`)).Execute(b, data)
+`)).Execute(b, data))
return b.String()
}
- genTestCounterProto := func(name string, value int) string {
+ genTestCounterProto := func(name string) string {
return fmt.Sprintf(`
name: "%s"
help: "some help text"
@@ -4482,7 +4357,7 @@ metric: <
value: %d
>
>
-`, name, value)
+`, name, 1)
}
genTestHistProto := func(name string, hasClassic, hasExponential bool) string {
var classic string
@@ -4576,60 +4451,60 @@ metric: <
}{
"text": {
text: []string{
- genTestCounterText("test_metric_1", 1, true),
- genTestCounterText("test_metric_1_count", 1, true),
- genTestCounterText("test_metric_1_sum", 1, true),
- genTestCounterText("test_metric_1_bucket", 1, true),
- genTestHistText("test_histogram_1", true),
- genTestCounterText("test_metric_2", 1, true),
- genTestCounterText("test_metric_2_count", 1, true),
- genTestCounterText("test_metric_2_sum", 1, true),
- genTestCounterText("test_metric_2_bucket", 1, true),
- genTestHistText("test_histogram_2", true),
- genTestCounterText("test_metric_3", 1, true),
- genTestCounterText("test_metric_3_count", 1, true),
- genTestCounterText("test_metric_3_sum", 1, true),
- genTestCounterText("test_metric_3_bucket", 1, true),
- genTestHistText("test_histogram_3", true),
+ genTestCounterText("test_metric_1"),
+ genTestCounterText("test_metric_1_count"),
+ genTestCounterText("test_metric_1_sum"),
+ genTestCounterText("test_metric_1_bucket"),
+ genTestHistText("test_histogram_1"),
+ genTestCounterText("test_metric_2"),
+ genTestCounterText("test_metric_2_count"),
+ genTestCounterText("test_metric_2_sum"),
+ genTestCounterText("test_metric_2_bucket"),
+ genTestHistText("test_histogram_2"),
+ genTestCounterText("test_metric_3"),
+ genTestCounterText("test_metric_3_count"),
+ genTestCounterText("test_metric_3_sum"),
+ genTestCounterText("test_metric_3_bucket"),
+ genTestHistText("test_histogram_3"),
},
hasClassic: true,
},
"text, in different order": {
text: []string{
- genTestCounterText("test_metric_1", 1, true),
- genTestCounterText("test_metric_1_count", 1, true),
- genTestCounterText("test_metric_1_sum", 1, true),
- genTestCounterText("test_metric_1_bucket", 1, true),
- genTestHistText("test_histogram_1", true),
- genTestCounterText("test_metric_2", 1, true),
- genTestCounterText("test_metric_2_count", 1, true),
- genTestCounterText("test_metric_2_sum", 1, true),
- genTestCounterText("test_metric_2_bucket", 1, true),
- genTestHistText("test_histogram_2", true),
- genTestHistText("test_histogram_3", true),
- genTestCounterText("test_metric_3", 1, true),
- genTestCounterText("test_metric_3_count", 1, true),
- genTestCounterText("test_metric_3_sum", 1, true),
- genTestCounterText("test_metric_3_bucket", 1, true),
+ genTestCounterText("test_metric_1"),
+ genTestCounterText("test_metric_1_count"),
+ genTestCounterText("test_metric_1_sum"),
+ genTestCounterText("test_metric_1_bucket"),
+ genTestHistText("test_histogram_1"),
+ genTestCounterText("test_metric_2"),
+ genTestCounterText("test_metric_2_count"),
+ genTestCounterText("test_metric_2_sum"),
+ genTestCounterText("test_metric_2_bucket"),
+ genTestHistText("test_histogram_2"),
+ genTestHistText("test_histogram_3"),
+ genTestCounterText("test_metric_3"),
+ genTestCounterText("test_metric_3_count"),
+ genTestCounterText("test_metric_3_sum"),
+ genTestCounterText("test_metric_3_bucket"),
},
hasClassic: true,
},
"protobuf": {
text: []string{
- genTestCounterProto("test_metric_1", 1),
- genTestCounterProto("test_metric_1_count", 1),
- genTestCounterProto("test_metric_1_sum", 1),
- genTestCounterProto("test_metric_1_bucket", 1),
+ genTestCounterProto("test_metric_1"),
+ genTestCounterProto("test_metric_1_count"),
+ genTestCounterProto("test_metric_1_sum"),
+ genTestCounterProto("test_metric_1_bucket"),
genTestHistProto("test_histogram_1", true, false),
- genTestCounterProto("test_metric_2", 1),
- genTestCounterProto("test_metric_2_count", 1),
- genTestCounterProto("test_metric_2_sum", 1),
- genTestCounterProto("test_metric_2_bucket", 1),
+ genTestCounterProto("test_metric_2"),
+ genTestCounterProto("test_metric_2_count"),
+ genTestCounterProto("test_metric_2_sum"),
+ genTestCounterProto("test_metric_2_bucket"),
genTestHistProto("test_histogram_2", true, false),
- genTestCounterProto("test_metric_3", 1),
- genTestCounterProto("test_metric_3_count", 1),
- genTestCounterProto("test_metric_3_sum", 1),
- genTestCounterProto("test_metric_3_bucket", 1),
+ genTestCounterProto("test_metric_3"),
+ genTestCounterProto("test_metric_3_count"),
+ genTestCounterProto("test_metric_3_sum"),
+ genTestCounterProto("test_metric_3_bucket"),
genTestHistProto("test_histogram_3", true, false),
},
contentType: "application/vnd.google.protobuf",
@@ -4638,40 +4513,40 @@ metric: <
"protobuf, in different order": {
text: []string{
genTestHistProto("test_histogram_1", true, false),
- genTestCounterProto("test_metric_1", 1),
- genTestCounterProto("test_metric_1_count", 1),
- genTestCounterProto("test_metric_1_sum", 1),
- genTestCounterProto("test_metric_1_bucket", 1),
+ genTestCounterProto("test_metric_1"),
+ genTestCounterProto("test_metric_1_count"),
+ genTestCounterProto("test_metric_1_sum"),
+ genTestCounterProto("test_metric_1_bucket"),
genTestHistProto("test_histogram_2", true, false),
- genTestCounterProto("test_metric_2", 1),
- genTestCounterProto("test_metric_2_count", 1),
- genTestCounterProto("test_metric_2_sum", 1),
- genTestCounterProto("test_metric_2_bucket", 1),
+ genTestCounterProto("test_metric_2"),
+ genTestCounterProto("test_metric_2_count"),
+ genTestCounterProto("test_metric_2_sum"),
+ genTestCounterProto("test_metric_2_bucket"),
genTestHistProto("test_histogram_3", true, false),
- genTestCounterProto("test_metric_3", 1),
- genTestCounterProto("test_metric_3_count", 1),
- genTestCounterProto("test_metric_3_sum", 1),
- genTestCounterProto("test_metric_3_bucket", 1),
+ genTestCounterProto("test_metric_3"),
+ genTestCounterProto("test_metric_3_count"),
+ genTestCounterProto("test_metric_3_sum"),
+ genTestCounterProto("test_metric_3_bucket"),
},
contentType: "application/vnd.google.protobuf",
hasClassic: true,
},
"protobuf, with additional native exponential histogram": {
text: []string{
- genTestCounterProto("test_metric_1", 1),
- genTestCounterProto("test_metric_1_count", 1),
- genTestCounterProto("test_metric_1_sum", 1),
- genTestCounterProto("test_metric_1_bucket", 1),
+ genTestCounterProto("test_metric_1"),
+ genTestCounterProto("test_metric_1_count"),
+ genTestCounterProto("test_metric_1_sum"),
+ genTestCounterProto("test_metric_1_bucket"),
genTestHistProto("test_histogram_1", true, true),
- genTestCounterProto("test_metric_2", 1),
- genTestCounterProto("test_metric_2_count", 1),
- genTestCounterProto("test_metric_2_sum", 1),
- genTestCounterProto("test_metric_2_bucket", 1),
+ genTestCounterProto("test_metric_2"),
+ genTestCounterProto("test_metric_2_count"),
+ genTestCounterProto("test_metric_2_sum"),
+ genTestCounterProto("test_metric_2_bucket"),
genTestHistProto("test_histogram_2", true, true),
- genTestCounterProto("test_metric_3", 1),
- genTestCounterProto("test_metric_3_count", 1),
- genTestCounterProto("test_metric_3_sum", 1),
- genTestCounterProto("test_metric_3_bucket", 1),
+ genTestCounterProto("test_metric_3"),
+ genTestCounterProto("test_metric_3_count"),
+ genTestCounterProto("test_metric_3_sum"),
+ genTestCounterProto("test_metric_3_bucket"),
genTestHistProto("test_histogram_3", true, true),
},
contentType: "application/vnd.google.protobuf",
@@ -4680,20 +4555,20 @@ metric: <
},
"protobuf, with only native exponential histogram": {
text: []string{
- genTestCounterProto("test_metric_1", 1),
- genTestCounterProto("test_metric_1_count", 1),
- genTestCounterProto("test_metric_1_sum", 1),
- genTestCounterProto("test_metric_1_bucket", 1),
+ genTestCounterProto("test_metric_1"),
+ genTestCounterProto("test_metric_1_count"),
+ genTestCounterProto("test_metric_1_sum"),
+ genTestCounterProto("test_metric_1_bucket"),
genTestHistProto("test_histogram_1", false, true),
- genTestCounterProto("test_metric_2", 1),
- genTestCounterProto("test_metric_2_count", 1),
- genTestCounterProto("test_metric_2_sum", 1),
- genTestCounterProto("test_metric_2_bucket", 1),
+ genTestCounterProto("test_metric_2"),
+ genTestCounterProto("test_metric_2_count"),
+ genTestCounterProto("test_metric_2_sum"),
+ genTestCounterProto("test_metric_2_bucket"),
genTestHistProto("test_histogram_2", false, true),
- genTestCounterProto("test_metric_3", 1),
- genTestCounterProto("test_metric_3_count", 1),
- genTestCounterProto("test_metric_3_sum", 1),
- genTestCounterProto("test_metric_3_bucket", 1),
+ genTestCounterProto("test_metric_3"),
+ genTestCounterProto("test_metric_3_count"),
+ genTestCounterProto("test_metric_3_sum"),
+ genTestCounterProto("test_metric_3_bucket"),
genTestHistProto("test_histogram_3", false, true),
},
contentType: "application/vnd.google.protobuf",
@@ -4701,7 +4576,7 @@ metric: <
},
}
- checkBucketValues := func(expectedCount int, series storage.SeriesSet) {
+ checkBucketValues := func(t testing.TB, expectedCount int, series storage.SeriesSet) {
labelName := "le"
var expectedValues []string
if expectedCount > 0 {
@@ -4723,7 +4598,7 @@ metric: <
}
// Checks that the expected series is present and runs a basic sanity check of the float values.
- checkFloatSeries := func(series storage.SeriesSet, expectedCount int, expectedFloat float64) {
+ checkFloatSeries := func(t testing.TB, series storage.SeriesSet, expectedCount int, expectedFloat float64) {
count := 0
for series.Next() {
i := series.At().Iterator(nil)
@@ -4749,7 +4624,7 @@ metric: <
}
// Checks that the expected series is present and runs a basic sanity check of the histogram values.
- checkHistSeries := func(series storage.SeriesSet, expectedCount int, expectedSchema int32) {
+ checkHistSeries := func(t testing.TB, series storage.SeriesSet, expectedCount int, expectedSchema int32) {
count := 0
for series.Next() {
i := series.At().Iterator(nil)
@@ -4831,14 +4706,15 @@ metric: <
t.Run(fmt.Sprintf("%s with %s", name, metricsTextName), func(t *testing.T) {
t.Parallel()
- simpleStorage := teststorage.New(t)
- defer simpleStorage.Close()
+ s := teststorage.New(t)
+ t.Cleanup(func() { _ = s.Close() })
- sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return simpleStorage.Appender(ctx) }, 0)
- sl.alwaysScrapeClassicHist = tc.alwaysScrapeClassicHistograms
- sl.convertClassicHistToNHCB = tc.convertClassicHistToNHCB
- sl.enableNativeHistogramScraping = true
- app := simpleStorage.Appender(context.Background())
+ sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl.appendable = s
+ sl.alwaysScrapeClassicHist = tc.alwaysScrapeClassicHistograms
+ sl.convertClassicHistToNHCB = tc.convertClassicHistToNHCB
+ sl.enableNativeHistogramScraping = true
+ })
var content []byte
contentType := metricsText.contentType
@@ -4862,47 +4738,50 @@ metric: <
default:
t.Error("unexpected content type")
}
- sl.append(app, content, contentType, time.Now())
+ now := time.Now()
+ app := sl.appender()
+ _, _, _, err := app.append(content, contentType, now)
+ require.NoError(t, err)
require.NoError(t, app.Commit())
+ var expectedSchema int32
+ if expectCustomBuckets {
+ expectedSchema = histogram.CustomBucketsSchema
+ } else {
+ expectedSchema = 3
+ }
+
+ // Validated what was appended can be queried.
ctx := t.Context()
- q, err := simpleStorage.Querier(time.Time{}.UnixNano(), time.Now().UnixNano())
+ q, err := s.Querier(time.Time{}.UnixNano(), time.Now().UnixNano())
require.NoError(t, err)
- defer q.Close()
+ t.Cleanup(func() { _ = q.Close() })
var series storage.SeriesSet
-
for i := 1; i <= 3; i++ {
series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", fmt.Sprintf("test_metric_%d", i)))
- checkFloatSeries(series, 1, 1.)
+ checkFloatSeries(t, series, 1, 1.)
series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", fmt.Sprintf("test_metric_%d_count", i)))
- checkFloatSeries(series, 1, 1.)
+ checkFloatSeries(t, series, 1, 1.)
series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", fmt.Sprintf("test_metric_%d_sum", i)))
- checkFloatSeries(series, 1, 1.)
+ checkFloatSeries(t, series, 1, 1.)
series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", fmt.Sprintf("test_metric_%d_bucket", i)))
- checkFloatSeries(series, 1, 1.)
+ checkFloatSeries(t, series, 1, 1.)
series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", fmt.Sprintf("test_histogram_%d_count", i)))
- checkFloatSeries(series, expectedClassicHistCount, 1.)
+ checkFloatSeries(t, series, expectedClassicHistCount, 1.)
series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", fmt.Sprintf("test_histogram_%d_sum", i)))
- checkFloatSeries(series, expectedClassicHistCount, 10.)
+ checkFloatSeries(t, series, expectedClassicHistCount, 10.)
series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", fmt.Sprintf("test_histogram_%d_bucket", i)))
- checkBucketValues(expectedClassicHistCount, series)
+ checkBucketValues(t, expectedClassicHistCount, series)
series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", fmt.Sprintf("test_histogram_%d", i)))
-
- var expectedSchema int32
- if expectCustomBuckets {
- expectedSchema = histogram.CustomBucketsSchema
- } else {
- expectedSchema = 3
- }
- checkHistSeries(series, expectedNativeHistCount, expectedSchema)
+ checkHistSeries(t, series, expectedNativeHistCount, expectedSchema)
}
})
}
@@ -4910,10 +4789,10 @@ metric: <
}
func TestTypeUnitReLabel(t *testing.T) {
- simpleStorage := teststorage.New(t)
- defer simpleStorage.Close()
+ s := teststorage.New(t)
+ t.Cleanup(func() { _ = s.Close() })
- config := &config.ScrapeConfig{
+ cfg := &config.ScrapeConfig{
JobName: "test",
MetricRelabelConfigs: []*relabel.Config{
{
@@ -4958,7 +4837,7 @@ disk_usage_bytes 456
ts, scrapedTwice := newScrapableServer(metricsText)
defer ts.Close()
- sp, err := newScrapePool(config, simpleStorage, 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
+ sp, err := newScrapePool(cfg, s, 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
require.NoError(t, err)
defer sp.stop()
@@ -4978,9 +4857,9 @@ disk_usage_bytes 456
}
ctx := t.Context()
- q, err := simpleStorage.Querier(time.Time{}.UnixNano(), time.Now().UnixNano())
+ q, err := s.Querier(time.Time{}.UnixNano(), time.Now().UnixNano())
require.NoError(t, err)
- defer q.Close()
+ t.Cleanup(func() { _ = q.Close() })
series := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*_total$"))
for series.Next() {
@@ -4996,26 +4875,25 @@ disk_usage_bytes 456
}
func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrapeForTimestampedMetrics(t *testing.T) {
- appender := &collectResultAppender{}
- var (
- signal = make(chan struct{}, 1)
- scraper = &testScraper{}
- app = func(context.Context) storage.Appender { return appender }
- )
+ signal := make(chan struct{}, 1)
+
+ ctx, cancel := context.WithCancel(t.Context())
+ appTest := teststorage.NewAppendable()
+ sl, scraper := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl.ctx = ctx
+ sl.appendable = appTest // Since we're writing samples directly below we need to provide a protocol fallback.
+ sl.fallbackScrapeProtocol = "text/plain"
+ sl.trackTimestampsStaleness = true
+ })
- ctx, cancel := context.WithCancel(context.Background())
- // Since we're writing samples directly below we need to provide a protocol fallback.
- sl := newBasicScrapeLoopWithFallback(t, ctx, scraper, app, 10*time.Millisecond, "text/plain")
- sl.trackTimestampsStaleness = true
// Succeed once, several failures, then stop.
numScrapes := 0
-
scraper.scrapeFunc = func(_ context.Context, w io.Writer) error {
numScrapes++
switch numScrapes {
case 1:
- fmt.Fprintf(w, "metric_a 42 %d\n", time.Now().UnixNano()/int64(time.Millisecond))
+ _, _ = fmt.Fprintf(w, "metric_a 42 %d\n", time.Now().UnixNano()/int64(time.Millisecond))
return nil
case 5:
cancel()
@@ -5033,17 +4911,19 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrapeForTimestampedMetrics(t *
case <-time.After(5 * time.Second):
t.Fatalf("Scrape wasn't stopped.")
}
+
+ got := appTest.ResultSamples()
// 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for
// each scrape successful or not.
- require.Len(t, appender.resultFloats, 27, "Appended samples not as expected:\n%s", appender)
- require.Equal(t, 42.0, appender.resultFloats[0].f, "Appended first sample not as expected")
- require.True(t, value.IsStaleNaN(appender.resultFloats[6].f),
- "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.resultFloats[6].f))
+ require.Len(t, got, 27, "Appended samples not as expected:\n%s", appTest)
+ require.Equal(t, 42.0, got[0].V, "Appended first sample not as expected")
+ require.True(t, value.IsStaleNaN(got[6].V),
+ "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(got[6].V))
}
func TestScrapeLoopCompression(t *testing.T) {
- simpleStorage := teststorage.New(t)
- defer simpleStorage.Close()
+ s := teststorage.New(t)
+ t.Cleanup(func() { _ = s.Close() })
metricsText := makeTestGauges(10)
@@ -5065,12 +4945,12 @@ func TestScrapeLoopCompression(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
require.Equal(t, tc.acceptEncoding, r.Header.Get("Accept-Encoding"), "invalid value of the Accept-Encoding header")
- fmt.Fprint(w, string(metricsText))
+ _, _ = fmt.Fprint(w, string(metricsText))
close(scraped)
}))
defer ts.Close()
- config := &config.ScrapeConfig{
+ cfg := &config.ScrapeConfig{
JobName: "test",
SampleLimit: 100,
Scheme: "http",
@@ -5081,7 +4961,7 @@ func TestScrapeLoopCompression(t *testing.T) {
MetricNameEscapingScheme: model.AllowUTF8,
}
- sp, err := newScrapePool(config, simpleStorage, 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
+ sp, err := newScrapePool(cfg, s, 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
require.NoError(t, err)
defer sp.stop()
@@ -5191,11 +5071,11 @@ func BenchmarkTargetScraperGzip(b *testing.B) {
gw := gzip.NewWriter(&buf)
for j := 0; j < scenarios[i].metricsCount; j++ {
name = fmt.Sprintf("go_memstats_alloc_bytes_total_%d", j)
- fmt.Fprintf(gw, "# HELP %s Total number of bytes allocated, even if freed.\n", name)
- fmt.Fprintf(gw, "# TYPE %s counter\n", name)
- fmt.Fprintf(gw, "%s %d\n", name, i*j)
+ _, _ = fmt.Fprintf(gw, "# HELP %s Total number of bytes allocated, even if freed.\n", name)
+ _, _ = fmt.Fprintf(gw, "# TYPE %s counter\n", name)
+ _, _ = fmt.Fprintf(gw, "%s %d\n", name, i*j)
}
- gw.Close()
+ require.NoError(b, gw.Close())
scenarios[i].body = buf.Bytes()
}
@@ -5204,7 +5084,7 @@ func BenchmarkTargetScraperGzip(b *testing.B) {
w.Header().Set("Content-Encoding", "gzip")
for _, scenario := range scenarios {
if strconv.Itoa(scenario.metricsCount) == r.URL.Query()["count"][0] {
- w.Write(scenario.body)
+ _, _ = w.Write(scenario.body)
return
}
}
@@ -5253,31 +5133,31 @@ func BenchmarkTargetScraperGzip(b *testing.B) {
// When a scrape contains multiple instances for the same time series we should increment
// prometheus_target_scrapes_sample_duplicate_timestamp_total metric.
func TestScrapeLoopSeriesAddedDuplicates(t *testing.T) {
- ctx, sl := simpleTestScrapeLoop(t)
+ sl, _ := newTestScrapeLoop(t)
- slApp := sl.appender(ctx)
- total, added, seriesAdded, err := sl.append(slApp, []byte("test_metric 1\ntest_metric 2\ntest_metric 3\n"), "text/plain", time.Time{})
+ app := sl.appender()
+ total, added, seriesAdded, err := app.append([]byte("test_metric 1\ntest_metric 2\ntest_metric 3\n"), "text/plain", time.Time{})
require.NoError(t, err)
- require.NoError(t, slApp.Commit())
+ require.NoError(t, app.Commit())
require.Equal(t, 3, total)
require.Equal(t, 3, added)
require.Equal(t, 1, seriesAdded)
require.Equal(t, 2.0, prom_testutil.ToFloat64(sl.metrics.targetScrapeSampleDuplicate))
- slApp = sl.appender(ctx)
- total, added, seriesAdded, err = sl.append(slApp, []byte("test_metric 1\ntest_metric 1\ntest_metric 1\n"), "text/plain", time.Time{})
+ app = sl.appender()
+ total, added, seriesAdded, err = app.append([]byte("test_metric 1\ntest_metric 1\ntest_metric 1\n"), "text/plain", time.Time{})
require.NoError(t, err)
- require.NoError(t, slApp.Commit())
+ require.NoError(t, app.Commit())
require.Equal(t, 3, total)
require.Equal(t, 3, added)
require.Equal(t, 0, seriesAdded)
require.Equal(t, 4.0, prom_testutil.ToFloat64(sl.metrics.targetScrapeSampleDuplicate))
// When different timestamps are supplied, multiple samples are accepted.
- slApp = sl.appender(ctx)
- total, added, seriesAdded, err = sl.append(slApp, []byte("test_metric 1 1001\ntest_metric 1 1002\ntest_metric 1 1003\n"), "text/plain", time.Time{})
+ app = sl.appender()
+ total, added, seriesAdded, err = app.append([]byte("test_metric 1 1001\ntest_metric 1 1002\ntest_metric 1 1003\n"), "text/plain", time.Time{})
require.NoError(t, err)
- require.NoError(t, slApp.Commit())
+ require.NoError(t, app.Commit())
require.Equal(t, 3, total)
require.Equal(t, 3, added)
require.Equal(t, 0, seriesAdded)
@@ -5325,7 +5205,7 @@ func testNativeHistogramMaxSchemaSet(t *testing.T, minBucketFactor string, expec
},
)
registry := prometheus.NewRegistry()
- registry.Register(nativeHistogram)
+ require.NoError(t, registry.Register(nativeHistogram))
nativeHistogram.Observe(1.0)
nativeHistogram.Observe(1.0)
nativeHistogram.Observe(1.0)
@@ -5339,10 +5219,10 @@ func testNativeHistogramMaxSchemaSet(t *testing.T, minBucketFactor string, expec
histogramMetricFamily := gathered[0]
buffer := protoMarshalDelimited(t, histogramMetricFamily)
- // Create a HTTP server to serve /metrics via ProtoBuf
+ // Create an HTTP server to serve /metrics via ProtoBuf
metricsServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`)
- w.Write(buffer)
+ _, _ = w.Write(buffer)
}))
defer metricsServer.Close()
@@ -5361,18 +5241,17 @@ scrape_configs:
`, minBucketFactor, strings.ReplaceAll(metricsServer.URL, "http://", ""))
s := teststorage.New(t)
- defer s.Close()
+ t.Cleanup(func() { _ = s.Close() })
reg := prometheus.NewRegistry()
mng, err := NewManager(&Options{DiscoveryReloadInterval: model.Duration(10 * time.Millisecond)}, nil, nil, s, reg)
require.NoError(t, err)
cfg, err := config.Load(configStr, promslog.NewNopLogger())
require.NoError(t, err)
- mng.ApplyConfig(cfg)
+ require.NoError(t, mng.ApplyConfig(cfg))
tsets := make(chan map[string][]*targetgroup.Group)
go func() {
- err = mng.Run(tsets)
- require.NoError(t, err)
+ require.NoError(t, mng.Run(tsets))
}()
defer mng.Stop()
@@ -5401,7 +5280,7 @@ scrape_configs:
q, err := s.Querier(0, math.MaxInt64)
require.NoError(t, err)
seriesS := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "__name__", "testing_example_native_histogram"))
- histogramSamples := []*histogram.Histogram{}
+ var histogramSamples []*histogram.Histogram
for seriesS.Next() {
series := seriesS.At()
it := series.Iterator(nil)
@@ -5447,7 +5326,7 @@ func TestTargetScrapeConfigWithLabels(t *testing.T) {
require.Equal(t, expectedPath, r.URL.Path)
w.Header().Set("Content-Type", `text/plain; version=0.0.4`)
- w.Write([]byte("metric_a 1\nmetric_b 2\n"))
+ _, _ = w.Write([]byte("metric_a 1\nmetric_b 2\n"))
}),
)
t.Cleanup(server.Close)
@@ -5467,7 +5346,7 @@ func TestTargetScrapeConfigWithLabels(t *testing.T) {
}
}
- sp, err := newScrapePool(cfg, &nopAppendable{}, 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
+ sp, err := newScrapePool(cfg, teststorage.NewAppendable(), 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
require.NoError(t, err)
t.Cleanup(sp.stop)
@@ -5595,7 +5474,7 @@ func newScrapableServer(scrapeText string) (s *httptest.Server, scrapedTwice cha
scrapedTwice = make(chan bool)
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
- fmt.Fprint(w, scrapeText)
+ _, _ = fmt.Fprint(w, scrapeText)
scrapes++
if scrapes == 2 {
close(scrapedTwice)
@@ -5607,7 +5486,7 @@ func newScrapableServer(scrapeText string) (s *httptest.Server, scrapedTwice cha
func TestScrapePoolScrapeAfterReload(t *testing.T) {
h := httptest.NewServer(http.HandlerFunc(
func(w http.ResponseWriter, _ *http.Request) {
- w.Write([]byte{0x42, 0x42})
+ _, _ = w.Write([]byte{0x42, 0x42})
},
))
t.Cleanup(h.Close)
@@ -5630,7 +5509,7 @@ func TestScrapePoolScrapeAfterReload(t *testing.T) {
},
}
- p, err := newScrapePool(cfg, &nopAppendable{}, 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
+ p, err := newScrapePool(cfg, teststorage.NewAppendable(), 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
require.NoError(t, err)
t.Cleanup(p.stop)
@@ -5657,103 +5536,105 @@ func TestScrapeAppendWithParseError(t *testing.T) {
# EOF`
)
- sl := newBasicScrapeLoop(t, context.Background(), nil, nil, 0)
- sl.cache = newScrapeCache(sl.metrics)
-
+ appTest := teststorage.NewAppendable()
+ sl, _ := newTestScrapeLoop(t, withAppendable(appTest))
now := time.Now()
- capp := &collectResultAppender{next: nopAppender{}}
- _, _, _, err := sl.append(capp, []byte(scrape1), "application/openmetrics-text", now)
+
+ app := sl.appender()
+ _, _, _, err := app.append([]byte(scrape1), "application/openmetrics-text", now)
require.Error(t, err)
- _, _, _, err = sl.append(capp, nil, "application/openmetrics-text", now)
- require.NoError(t, err)
- require.Empty(t, capp.resultFloats)
+ require.NoError(t, app.Rollback())
- capp = &collectResultAppender{next: nopAppender{}}
- _, _, _, err = sl.append(capp, []byte(scrape2), "application/openmetrics-text", now.Add(15*time.Second))
+ app = sl.appender()
+ _, _, _, err = app.append(nil, "application/openmetrics-text", now)
require.NoError(t, err)
- require.NoError(t, capp.Commit())
+ require.NoError(t, app.Commit())
+ require.Empty(t, appTest.ResultSamples())
- want := []floatSample{
+ app = sl.appender()
+ _, _, _, err = app.append([]byte(scrape2), "application/openmetrics-text", now.Add(15*time.Second))
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+
+ want := []sample{
{
- metric: labels.FromStrings(model.MetricNameLabel, "metric_a"),
- t: timestamp.FromTime(now.Add(15 * time.Second)),
- f: 11,
+ L: labels.FromStrings(model.MetricNameLabel, "metric_a"),
+ T: timestamp.FromTime(now.Add(15 * time.Second)),
+ V: 11,
},
}
- requireEqual(t, want, capp.resultFloats, "Appended samples not as expected:\n%s", capp)
+ requireEqual(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", appTest)
}
-// This test covers a case where there's a target with sample_limit set and the some of exporter samples
+// This test covers a case where there's a target with sample_limit set and some samples
// changes between scrapes.
func TestScrapeLoopAppendSampleLimitWithDisappearingSeries(t *testing.T) {
const sampleLimit = 4
- resApp := &collectResultAppender{}
- sl := newBasicScrapeLoop(t, context.Background(), nil, func(_ context.Context) storage.Appender {
- return resApp
- }, 0)
- sl.sampleLimit = sampleLimit
+
+ appTest := teststorage.NewAppendable()
+ sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl.appendable = appTest
+ sl.sampleLimit = sampleLimit
+ })
now := time.Now()
- slApp := sl.appender(context.Background())
- samplesScraped, samplesAfterRelabel, createdSeries, err := sl.append(
- slApp,
+ app := sl.appender()
+ samplesScraped, samplesAfterRelabel, createdSeries, err := app.append(
// Start with 3 samples, all accepted.
[]byte("metric_a 1\nmetric_b 1\nmetric_c 1\n"),
"text/plain",
now,
)
require.NoError(t, err)
- require.NoError(t, slApp.Commit())
+ require.NoError(t, app.Commit())
require.Equal(t, 3, samplesScraped) // All on scrape.
require.Equal(t, 3, samplesAfterRelabel) // This is series after relabeling.
require.Equal(t, 3, createdSeries) // Newly added to TSDB.
- want := []floatSample{
+ want := []sample{
{
- metric: labels.FromStrings(model.MetricNameLabel, "metric_a"),
- t: timestamp.FromTime(now),
- f: 1,
+ L: labels.FromStrings(model.MetricNameLabel, "metric_a"),
+ T: timestamp.FromTime(now),
+ V: 1,
},
{
- metric: labels.FromStrings(model.MetricNameLabel, "metric_b"),
- t: timestamp.FromTime(now),
- f: 1,
+ L: labels.FromStrings(model.MetricNameLabel, "metric_b"),
+ T: timestamp.FromTime(now),
+ V: 1,
},
{
- metric: labels.FromStrings(model.MetricNameLabel, "metric_c"),
- t: timestamp.FromTime(now),
- f: 1,
+ L: labels.FromStrings(model.MetricNameLabel, "metric_c"),
+ T: timestamp.FromTime(now),
+ V: 1,
},
}
- requireEqual(t, want, resApp.resultFloats, "Appended samples not as expected:\n%s", slApp)
+ requireEqual(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", app)
now = now.Add(time.Minute)
- slApp = sl.appender(context.Background())
- samplesScraped, samplesAfterRelabel, createdSeries, err = sl.append(
- slApp,
+ app = sl.appender()
+ samplesScraped, samplesAfterRelabel, createdSeries, err = app.append(
// Start exporting 3 more samples, so we're over the limit now.
[]byte("metric_a 1\nmetric_b 1\nmetric_c 1\nmetric_d 1\nmetric_e 1\nmetric_f 1\n"),
"text/plain",
now,
)
require.ErrorIs(t, err, errSampleLimit)
- require.NoError(t, slApp.Rollback())
+ require.NoError(t, app.Rollback())
require.Equal(t, 6, samplesScraped)
require.Equal(t, 6, samplesAfterRelabel)
require.Equal(t, 1, createdSeries) // We've added one series before hitting the limit.
- requireEqual(t, want, resApp.resultFloats, "Appended samples not as expected:\n%s", slApp)
+ requireEqual(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", app)
sl.cache.iterDone(false)
now = now.Add(time.Minute)
- slApp = sl.appender(context.Background())
- samplesScraped, samplesAfterRelabel, createdSeries, err = sl.append(
- slApp,
+ app = sl.appender()
+ samplesScraped, samplesAfterRelabel, createdSeries, err = app.append(
// Remove all samples except first 2.
[]byte("metric_a 1\nmetric_b 1\n"),
"text/plain",
now,
)
require.NoError(t, err)
- require.NoError(t, slApp.Commit())
+ require.NoError(t, app.Commit())
require.Equal(t, 2, samplesScraped)
require.Equal(t, 2, samplesAfterRelabel)
require.Equal(t, 0, createdSeries)
@@ -5762,152 +5643,147 @@ func TestScrapeLoopAppendSampleLimitWithDisappearingSeries(t *testing.T) {
// - Append with stale markers for metric_c - this series was added during first scrape but disappeared during last scrape.
// - Append with stale marker for metric_d - this series was added during second scrape before we hit the sample_limit.
// We should NOT see:
- // - Appends with stale markers for metric_e & metric_f - both over the limit during second scrape and so they never made it into TSDB.
- want = append(want, []floatSample{
+ // - Appends with stale markers for metric_e & metric_f - both over the limit during second scrape, and so they never made it into TSDB.
+ want = append(want, []sample{
{
- metric: labels.FromStrings(model.MetricNameLabel, "metric_a"),
- t: timestamp.FromTime(now),
- f: 1,
+ L: labels.FromStrings(model.MetricNameLabel, "metric_a"),
+ T: timestamp.FromTime(now),
+ V: 1,
},
{
- metric: labels.FromStrings(model.MetricNameLabel, "metric_b"),
- t: timestamp.FromTime(now),
- f: 1,
+ L: labels.FromStrings(model.MetricNameLabel, "metric_b"),
+ T: timestamp.FromTime(now),
+ V: 1,
},
{
- metric: labels.FromStrings(model.MetricNameLabel, "metric_c"),
- t: timestamp.FromTime(now),
- f: math.Float64frombits(value.StaleNaN),
+ L: labels.FromStrings(model.MetricNameLabel, "metric_c"),
+ T: timestamp.FromTime(now),
+ V: math.Float64frombits(value.StaleNaN),
},
{
- metric: labels.FromStrings(model.MetricNameLabel, "metric_d"),
- t: timestamp.FromTime(now),
- f: math.Float64frombits(value.StaleNaN),
+ L: labels.FromStrings(model.MetricNameLabel, "metric_d"),
+ T: timestamp.FromTime(now),
+ V: math.Float64frombits(value.StaleNaN),
},
}...)
- requireEqual(t, want, resApp.resultFloats, "Appended samples not as expected:\n%s", slApp)
+ requireEqual(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", app)
}
// This test covers a case where there's a target with sample_limit set and each scrape sees a completely
// different set of samples.
func TestScrapeLoopAppendSampleLimitReplaceAllSamples(t *testing.T) {
const sampleLimit = 4
- resApp := &collectResultAppender{}
- sl := newBasicScrapeLoop(t, context.Background(), nil, func(_ context.Context) storage.Appender {
- return resApp
- }, 0)
- sl.sampleLimit = sampleLimit
+
+ appTest := teststorage.NewAppendable()
+ sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl.appendable = appTest
+ sl.sampleLimit = sampleLimit
+ })
now := time.Now()
- slApp := sl.appender(context.Background())
- samplesScraped, samplesAfterRelabel, createdSeries, err := sl.append(
- slApp,
+ app := sl.appender()
+ samplesScraped, samplesAfterRelabel, createdSeries, err := app.append(
// Start with 4 samples, all accepted.
[]byte("metric_a 1\nmetric_b 1\nmetric_c 1\nmetric_d 1\n"),
"text/plain",
now,
)
require.NoError(t, err)
- require.NoError(t, slApp.Commit())
+ require.NoError(t, app.Commit())
require.Equal(t, 4, samplesScraped) // All on scrape.
require.Equal(t, 4, samplesAfterRelabel) // This is series after relabeling.
require.Equal(t, 4, createdSeries) // Newly added to TSDB.
- want := []floatSample{
+ want := []sample{
{
- metric: labels.FromStrings(model.MetricNameLabel, "metric_a"),
- t: timestamp.FromTime(now),
- f: 1,
+ L: labels.FromStrings(model.MetricNameLabel, "metric_a"),
+ T: timestamp.FromTime(now),
+ V: 1,
},
{
- metric: labels.FromStrings(model.MetricNameLabel, "metric_b"),
- t: timestamp.FromTime(now),
- f: 1,
+ L: labels.FromStrings(model.MetricNameLabel, "metric_b"),
+ T: timestamp.FromTime(now),
+ V: 1,
},
{
- metric: labels.FromStrings(model.MetricNameLabel, "metric_c"),
- t: timestamp.FromTime(now),
- f: 1,
+ L: labels.FromStrings(model.MetricNameLabel, "metric_c"),
+ T: timestamp.FromTime(now),
+ V: 1,
},
{
- metric: labels.FromStrings(model.MetricNameLabel, "metric_d"),
- t: timestamp.FromTime(now),
- f: 1,
+ L: labels.FromStrings(model.MetricNameLabel, "metric_d"),
+ T: timestamp.FromTime(now),
+ V: 1,
},
}
- requireEqual(t, want, resApp.resultFloats, "Appended samples not as expected:\n%s", slApp)
+ requireEqual(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", app)
now = now.Add(time.Minute)
- slApp = sl.appender(context.Background())
- samplesScraped, samplesAfterRelabel, createdSeries, err = sl.append(
- slApp,
+ app = sl.appender()
+ samplesScraped, samplesAfterRelabel, createdSeries, err = app.append(
// Replace all samples with new time series.
[]byte("metric_e 1\nmetric_f 1\nmetric_g 1\nmetric_h 1\n"),
"text/plain",
now,
)
require.NoError(t, err)
- require.NoError(t, slApp.Commit())
+ require.NoError(t, app.Commit())
require.Equal(t, 4, samplesScraped)
require.Equal(t, 4, samplesAfterRelabel)
require.Equal(t, 4, createdSeries)
// We replaced all samples from first scrape with new set of samples.
- // We expect to see:
+ // We expected to see:
// - 4 appends for new samples.
// - 4 appends with staleness markers for old samples.
- want = append(want, []floatSample{
+ want = append(want, []sample{
{
- metric: labels.FromStrings(model.MetricNameLabel, "metric_e"),
- t: timestamp.FromTime(now),
- f: 1,
+ L: labels.FromStrings(model.MetricNameLabel, "metric_e"),
+ T: timestamp.FromTime(now),
+ V: 1,
},
{
- metric: labels.FromStrings(model.MetricNameLabel, "metric_f"),
- t: timestamp.FromTime(now),
- f: 1,
+ L: labels.FromStrings(model.MetricNameLabel, "metric_f"),
+ T: timestamp.FromTime(now),
+ V: 1,
},
{
- metric: labels.FromStrings(model.MetricNameLabel, "metric_g"),
- t: timestamp.FromTime(now),
- f: 1,
+ L: labels.FromStrings(model.MetricNameLabel, "metric_g"),
+ T: timestamp.FromTime(now),
+ V: 1,
},
{
- metric: labels.FromStrings(model.MetricNameLabel, "metric_h"),
- t: timestamp.FromTime(now),
- f: 1,
+ L: labels.FromStrings(model.MetricNameLabel, "metric_h"),
+ T: timestamp.FromTime(now),
+ V: 1,
},
{
- metric: labels.FromStrings(model.MetricNameLabel, "metric_a"),
- t: timestamp.FromTime(now),
- f: math.Float64frombits(value.StaleNaN),
+ L: labels.FromStrings(model.MetricNameLabel, "metric_a"),
+ T: timestamp.FromTime(now),
+ V: math.Float64frombits(value.StaleNaN),
},
{
- metric: labels.FromStrings(model.MetricNameLabel, "metric_b"),
- t: timestamp.FromTime(now),
- f: math.Float64frombits(value.StaleNaN),
+ L: labels.FromStrings(model.MetricNameLabel, "metric_b"),
+ T: timestamp.FromTime(now),
+ V: math.Float64frombits(value.StaleNaN),
},
{
- metric: labels.FromStrings(model.MetricNameLabel, "metric_c"),
- t: timestamp.FromTime(now),
- f: math.Float64frombits(value.StaleNaN),
+ L: labels.FromStrings(model.MetricNameLabel, "metric_c"),
+ T: timestamp.FromTime(now),
+ V: math.Float64frombits(value.StaleNaN),
},
{
- metric: labels.FromStrings(model.MetricNameLabel, "metric_d"),
- t: timestamp.FromTime(now),
- f: math.Float64frombits(value.StaleNaN),
+ L: labels.FromStrings(model.MetricNameLabel, "metric_d"),
+ T: timestamp.FromTime(now),
+ V: math.Float64frombits(value.StaleNaN),
},
}...)
- requireEqual(t, want, resApp.resultFloats, "Appended samples not as expected:\n%s", slApp)
+ requireEqual(t, want, appTest.ResultSamples(), "Appended samples not as expected:\n%s", app)
}
func TestScrapeLoopDisableStalenessMarkerInjection(t *testing.T) {
- var (
- loopDone = atomic.NewBool(false)
- appender = &collectResultAppender{}
- scraper = &testScraper{}
- app = func(_ context.Context) storage.Appender { return appender }
- )
+ loopDone := atomic.NewBool(false)
- sl := newBasicScrapeLoop(t, context.Background(), scraper, app, 10*time.Millisecond)
+ appTest := teststorage.NewAppendable()
+ sl, scraper := newTestScrapeLoop(t, withAppendable(appTest))
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
if _, err := w.Write([]byte("metric_a 42\n")); err != nil {
return err
@@ -5923,9 +5799,7 @@ func TestScrapeLoopDisableStalenessMarkerInjection(t *testing.T) {
// Wait for some samples to be appended.
require.Eventually(t, func() bool {
- appender.mtx.Lock()
- defer appender.mtx.Unlock()
- return len(appender.resultFloats) > 2
+ return len(appTest.ResultSamples()) > 2
}, 5*time.Second, 100*time.Millisecond, "Scrape loop didn't append any samples.")
// Disable end of run staleness markers and stop the loop.
@@ -5936,9 +5810,169 @@ func TestScrapeLoopDisableStalenessMarkerInjection(t *testing.T) {
}, 5*time.Second, 100*time.Millisecond, "Scrape loop didn't stop.")
// No stale markers should be appended, since they were disabled.
- for _, s := range appender.resultFloats {
- if value.IsStaleNaN(s.f) {
- t.Fatalf("Got stale NaN samples while end of run staleness is disabled: %x", math.Float64bits(s.f))
+ for _, s := range appTest.ResultSamples() {
+ if value.IsStaleNaN(s.V) {
+ t.Fatalf("Got stale NaN samples while end of run staleness is disabled: %x", math.Float64bits(s.V))
}
}
}
+
+// Recommended CLI invocation:
+/*
+ export bench=restartLoops && go test ./scrape/... \
+ -run '^$' -bench '^BenchmarkScrapePoolRestartLoops' \
+ -benchtime 5s -count 6 -cpu 2 -timeout 999m \
+ | tee ${bench}.txt
+*/
+func BenchmarkScrapePoolRestartLoops(b *testing.B) {
+ sp, err := newScrapePool(
+ &config.ScrapeConfig{
+ MetricNameValidationScheme: model.UTF8Validation,
+ ScrapeInterval: model.Duration(1 * time.Hour),
+ ScrapeTimeout: model.Duration(1 * time.Hour),
+ },
+ nil,
+ 0,
+ nil,
+ nil,
+ &Options{},
+ newTestScrapeMetrics(b),
+ )
+ require.NoError(b, err)
+ b.Cleanup(sp.stop)
+
+ for i := range 1000 {
+ sp.activeTargets[uint64(i)] = &Target{scrapeConfig: &config.ScrapeConfig{}}
+ sp.loops[uint64(i)] = noopLoop() // First restart will supplement those with proper scrapeLoops.
+ }
+ sp.restartLoops(true)
+
+ b.ReportAllocs()
+ b.ResetTimer()
+ for b.Loop() {
+ sp.restartLoops(true)
+ }
+}
+
+// TestNewScrapeLoopHonorLabelsWiring verifies that newScrapeLoop correctly wires
+// HonorLabels (not HonorTimestamps) to the sampleMutator.
+func TestNewScrapeLoopHonorLabelsWiring(t *testing.T) {
+ // Scraped metric has label "lbl" with value "scraped".
+ // Discovery target has label "lbl" with value "discovery".
+ // With honor_labels=true, the scraped value should win.
+ // With honor_labels=false, the discovery value should win and scraped moves to exported_lbl.
+ testCases := []struct {
+ name string
+ honorLabels bool
+ expectedLbl string
+ expectedExpLbl string // exported_lbl value, empty if not expected
+ }{
+ {
+ name: "honor_labels=true",
+ honorLabels: true,
+ expectedLbl: "scraped",
+ },
+ {
+ name: "honor_labels=false",
+ honorLabels: false,
+ expectedLbl: "discovery",
+ expectedExpLbl: "scraped",
+ },
+ }
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ ts, scrapedTwice := newScrapableServer(`metric{lbl="scraped"} 1`)
+ defer ts.Close()
+
+ testURL, err := url.Parse(ts.URL)
+ require.NoError(t, err)
+
+ s := teststorage.New(t)
+ defer s.Close()
+
+ cfg := &config.ScrapeConfig{
+ JobName: "test",
+ Scheme: "http",
+ HonorLabels: tc.honorLabels,
+ HonorTimestamps: !tc.honorLabels, // Opposite of HonorLabels to catch wiring bugs
+ ScrapeInterval: model.Duration(1 * time.Second),
+ ScrapeTimeout: model.Duration(100 * time.Millisecond),
+ MetricNameValidationScheme: model.UTF8Validation,
+ }
+
+ sp, err := newScrapePool(cfg, s, 0, nil, nil, &Options{skipOffsetting: true}, newTestScrapeMetrics(t))
+ require.NoError(t, err)
+ defer sp.stop()
+
+ // Sync with a target that has a conflicting label.
+ sp.Sync([]*targetgroup.Group{{
+ Targets: []model.LabelSet{{
+ model.AddressLabel: model.LabelValue(testURL.Host),
+ "lbl": "discovery",
+ }},
+ }})
+ require.Len(t, sp.ActiveTargets(), 1)
+
+ // Wait for scrape to complete.
+ select {
+ case <-time.After(5 * time.Second):
+ t.Fatal("scrape did not complete in time")
+ case <-scrapedTwice:
+ }
+
+ // Query the storage to verify label values.
+ q, err := s.Querier(time.Time{}.UnixNano(), time.Now().UnixNano())
+ require.NoError(t, err)
+ defer q.Close()
+
+ series := q.Select(t.Context(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "__name__", "metric"))
+ require.True(t, series.Next(), "metric series not found")
+ require.Equal(t, tc.expectedLbl, series.At().Labels().Get("lbl"))
+ require.Equal(t, tc.expectedExpLbl, series.At().Labels().Get("exported_lbl"))
+ })
+ }
+}
+
+func TestDropsSeriesFromMetricRelabeling(t *testing.T) {
+ target := &Target{}
+ relabelConfig := []*relabel.Config{
+ {
+ SourceLabels: model.LabelNames{"__name__"},
+ Regex: relabel.MustNewRegexp("test_metric.*$"),
+ Action: relabel.Keep,
+ NameValidationScheme: model.UTF8Validation,
+ },
+ {
+ SourceLabels: model.LabelNames{"__name__"},
+ Regex: relabel.MustNewRegexp("test_metric_2$"),
+ Action: relabel.Drop,
+ NameValidationScheme: model.UTF8Validation,
+ },
+ }
+ sl, _ := newTestScrapeLoop(t, func(sl *scrapeLoop) {
+ sl.sampleMutator = func(l labels.Labels) labels.Labels {
+ return mutateSampleLabels(l, target, true, relabelConfig)
+ }
+ })
+
+ app := sl.appender()
+ total, added, seriesAdded, err := app.append([]byte("test_metric_1 1\n"), "text/plain", time.Time{})
+ require.NoError(t, err)
+ require.Equal(t, 1, total)
+ require.Equal(t, 1, added)
+ require.Equal(t, 1, seriesAdded)
+
+ total, added, seriesAdded, err = app.append([]byte("test_metric_2 1\n"), "text/plain", time.Time{})
+ require.NoError(t, err)
+ require.Equal(t, 1, total)
+ require.Equal(t, 0, added)
+ require.Equal(t, 0, seriesAdded)
+
+ total, added, seriesAdded, err = app.append([]byte("unwanted_metric 1\n"), "text/plain", time.Time{})
+ require.NoError(t, err)
+ require.Equal(t, 1, total)
+ require.Equal(t, 0, added)
+ require.Equal(t, 0, seriesAdded)
+
+ require.NoError(t, app.Commit())
+}
diff --git a/scrape/target.go b/scrape/target.go
index 2aabff20e2..4265f9e782 100644
--- a/scrape/target.go
+++ b/scrape/target.go
@@ -1,4 +1,4 @@
-// Copyright 2013 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/scrape/target_test.go b/scrape/target_test.go
index 582b198c79..06227da816 100644
--- a/scrape/target_test.go
+++ b/scrape/target_test.go
@@ -1,4 +1,4 @@
-// Copyright 2013 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -14,7 +14,6 @@
package scrape
import (
- "context"
"crypto/tls"
"crypto/x509"
"fmt"
@@ -36,7 +35,7 @@ import (
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/timestamp"
- "github.com/prometheus/prometheus/storage"
+ "github.com/prometheus/prometheus/util/teststorage"
)
const (
@@ -611,12 +610,12 @@ func TestBucketLimitAppender(t *testing.T) {
},
}
- resApp := &collectResultAppender{}
+ appTest := teststorage.NewAppendable()
for _, c := range cases {
for _, floatHisto := range []bool{true, false} {
t.Run(fmt.Sprintf("floatHistogram=%t", floatHisto), func(t *testing.T) {
- app := &bucketLimitAppender{Appender: resApp, limit: c.limit}
+ app := &bucketLimitAppender{Appender: appTest.Appender(t.Context()), limit: c.limit}
ts := int64(10 * time.Minute / time.Millisecond)
lbls := labels.FromStrings("__name__", "sparse_histogram_series")
var err error
@@ -697,12 +696,12 @@ func TestMaxSchemaAppender(t *testing.T) {
},
}
- resApp := &collectResultAppender{}
+ appTest := teststorage.NewAppendable()
for _, c := range cases {
for _, floatHisto := range []bool{true, false} {
t.Run(fmt.Sprintf("floatHistogram=%t", floatHisto), func(t *testing.T) {
- app := &maxSchemaAppender{Appender: resApp, maxSchema: c.maxSchema}
+ app := &maxSchemaAppender{Appender: appTest.Appender(t.Context()), maxSchema: c.maxSchema}
ts := int64(10 * time.Minute / time.Millisecond)
lbls := labels.FromStrings("__name__", "sparse_histogram_series")
var err error
@@ -723,17 +722,12 @@ func TestMaxSchemaAppender(t *testing.T) {
}
}
-// Test sample_limit when a scrape containst Native Histograms.
+// Test sample_limit when a scrape contains Native Histograms.
func TestAppendWithSampleLimitAndNativeHistogram(t *testing.T) {
- const sampleLimit = 2
- resApp := &collectResultAppender{}
- sl := newBasicScrapeLoop(t, context.Background(), nil, func(_ context.Context) storage.Appender {
- return resApp
- }, 0)
- sl.sampleLimit = sampleLimit
+ appTest := teststorage.NewAppendable()
now := time.Now()
- app := appender(sl.appender(context.Background()), sl.sampleLimit, sl.bucketLimit, sl.maxSchema)
+ app := appenderWithLimits(appTest.Appender(t.Context()), 2, 0, histogram.ExponentialSchemaMax)
// sample_limit is set to 2, so first two scrapes should work
_, err := app.Append(0, labels.FromStrings(model.MetricNameLabel, "foo"), timestamp.FromTime(now), 1)
diff --git a/scripts/golangci-lint.yml b/scripts/golangci-lint.yml
index 75f886d546..ae5fdc80ec 100644
--- a/scripts/golangci-lint.yml
+++ b/scripts/golangci-lint.yml
@@ -24,11 +24,11 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
persist-credentials: false
- name: Install Go
- uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
+ uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
with:
go-version: 1.25.x
- name: Install snmp_exporter/generator dependencies
@@ -38,7 +38,7 @@ jobs:
id: golangci-lint-version
run: echo "version=$(make print-golangci-lint-version)" >> $GITHUB_OUTPUT
- name: Lint
- uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0
+ uses: golangci/golangci-lint-action@1e7e51e771db61008b38414a730f564565cf7c20 # v9.2.0
with:
args: --verbose
version: ${{ steps.golangci-lint-version.outputs.version }}
diff --git a/storage/buffer.go b/storage/buffer.go
index bc27948fd0..223c4fa42b 100644
--- a/storage/buffer.go
+++ b/storage/buffer.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/buffer_test.go b/storage/buffer_test.go
index 259e54d6f7..fc6603d4a5 100644
--- a/storage/buffer_test.go
+++ b/storage/buffer_test.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/errors.go b/storage/errors.go
index dd48066db6..4dd61e2523 100644
--- a/storage/errors.go
+++ b/storage/errors.go
@@ -1,4 +1,4 @@
-// Copyright 2014 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/errors_test.go b/storage/errors_test.go
index b3e202b49b..0e7277bf8b 100644
--- a/storage/errors_test.go
+++ b/storage/errors_test.go
@@ -1,4 +1,4 @@
-// Copyright 2014 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/fanout.go b/storage/fanout.go
index a699a97b02..246a955b73 100644
--- a/storage/fanout.go
+++ b/storage/fanout.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/fanout_test.go b/storage/fanout_test.go
index b1762ec555..ed4cf17696 100644
--- a/storage/fanout_test.go
+++ b/storage/fanout_test.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/generic.go b/storage/generic.go
index e5f4b4d03a..e85ac77b9c 100644
--- a/storage/generic.go
+++ b/storage/generic.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/interface.go b/storage/interface.go
index 19b4db4210..23b8b48a0c 100644
--- a/storage/interface.go
+++ b/storage/interface.go
@@ -1,4 +1,4 @@
-// Copyright 2014 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -49,6 +49,7 @@ var (
// NOTE(bwplotka): This can be both an instrumentation failure or commonly expected
// behaviour, and we currently don't have a way to determine this. As a result
// it's recommended to ignore this error for now.
+ // TODO(bwplotka): Remove with appender v1 flow; not used in v2.
ErrOutOfOrderST = errors.New("start timestamp out of order, ignoring")
ErrSTNewerThanSample = errors.New("ST is newer or the same as sample's timestamp, ignoring")
)
@@ -58,11 +59,14 @@ var (
// their own reference types.
type SeriesRef uint64
-// Appendable allows creating appenders.
+// Appendable allows creating Appender.
+//
+// WARNING: Work AppendableV2 is in progress. Appendable will be removed soon (ETA: Q2 2026).
type Appendable interface {
- // Appender returns a new appender for the storage. The implementation
- // can choose whether or not to use the context, for deadlines or to check
- // for errors.
+ // Appender returns a new appender for the storage.
+ //
+ // Implementations CAN choose whether to use the context e.g. for deadlines,
+ // but it's not mandatory.
Appender(ctx context.Context) Appender
}
@@ -255,7 +259,13 @@ func (f QueryableFunc) Querier(mint, maxt int64) (Querier, error) {
return f(mint, maxt)
}
+// AppendOptions provides options for implementations of the Appender interface.
+//
+// WARNING: Work AppendableV2 is in progress. Appendable will be removed soon (ETA: Q2 2026).
type AppendOptions struct {
+ // DiscardOutOfOrder tells implementation that this append should not be out
+ // of order. An OOO append MUST be rejected with storage.ErrOutOfOrderSample
+ // error.
DiscardOutOfOrder bool
}
@@ -264,10 +274,14 @@ type AppendOptions struct {
//
// Operations on the Appender interface are not goroutine-safe.
//
-// The order of samples appended via the Appender is preserved within each
-// series. I.e. samples are not reordered per timestamp, or by float/histogram
+// The order of samples appended via the Appender is preserved within each series.
+// I.e. timestamp order within batch is not validated, samples are not reordered per timestamp or by float/histogram
// type.
+//
+// WARNING: Work AppendableV2 is in progress. Appendable will be removed soon (ETA: Q2 2026).
type Appender interface {
+ AppenderTransaction
+
// Append adds a sample pair for the given series.
// An optional series reference can be provided to accelerate calls.
// A series reference number is returned which can be used to add further
@@ -278,16 +292,6 @@ type Appender interface {
// If the reference is 0 it must not be used for caching.
Append(ref SeriesRef, l labels.Labels, t int64, v float64) (SeriesRef, error)
- // Commit submits the collected samples and purges the batch. If Commit
- // returns a non-nil error, it also rolls back all modifications made in
- // the appender so far, as Rollback would do. In any case, an Appender
- // must not be used anymore after Commit has been called.
- Commit() error
-
- // Rollback rolls back all modifications made in the appender so far.
- // Appender has to be discarded after rollback.
- Rollback() error
-
// SetOptions configures the appender with specific append options such as
// discarding out-of-order samples even if out-of-order is enabled in the TSDB.
SetOptions(opts *AppendOptions)
@@ -301,8 +305,8 @@ type Appender interface {
// GetRef is an extra interface on Appenders used by downstream projects
// (e.g. Cortex) to avoid maintaining a parallel set of references.
type GetRef interface {
- // Returns reference number that can be used to pass to Appender.Append(),
- // and a set of labels that will not cause another copy when passed to Appender.Append().
+ // GetRef returns a reference number that can be used to pass to AppenderV2.Append(),
+ // and a set of labels that will not cause another copy when passed to AppenderV2.Append().
// 0 means the appender does not have a reference to this series.
// hash should be a hash of lset.
GetRef(lset labels.Labels, hash uint64) (SeriesRef, labels.Labels)
@@ -310,6 +314,8 @@ type GetRef interface {
// ExemplarAppender provides an interface for adding samples to exemplar storage, which
// within Prometheus is in-memory only.
+//
+// WARNING: Work AppendableV2 is in progress. Appendable will be removed soon (ETA: Q2 2026).
type ExemplarAppender interface {
// AppendExemplar adds an exemplar for the given series labels.
// An optional reference number can be provided to accelerate calls.
@@ -326,6 +332,8 @@ type ExemplarAppender interface {
}
// HistogramAppender provides an interface for appending histograms to the storage.
+//
+// WARNING: Work AppendableV2 is in progress. Appendable will be removed soon (ETA: Q2 2026).
type HistogramAppender interface {
// AppendHistogram adds a histogram for the given series labels. An
// optional reference number can be provided to accelerate calls. A
@@ -356,6 +364,8 @@ type HistogramAppender interface {
}
// MetadataUpdater provides an interface for associating metadata to stored series.
+//
+// WARNING: Work AppendableV2 is in progress. Appendable will be removed soon (ETA: Q2 2026).
type MetadataUpdater interface {
// UpdateMetadata updates a metadata entry for the given series and labels.
// A series reference number is returned which can be used to modify the
@@ -368,6 +378,8 @@ type MetadataUpdater interface {
}
// StartTimestampAppender provides an interface for appending ST to storage.
+//
+// WARNING: Work AppendableV2 is in progress. Appendable will be removed soon (ETA: Q2 2026).
type StartTimestampAppender interface {
// AppendSTZeroSample adds synthetic zero sample for the given st timestamp,
// which will be associated with given series, labels and the incoming
@@ -390,10 +402,10 @@ type SeriesSet interface {
Next() bool
// At returns full series. Returned series should be iterable even after Next is called.
At() Series
- // The error that iteration has failed with.
+ // Err returns the error that iteration has failed with.
// When an error occurs, set cannot continue to iterate.
Err() error
- // A collection of warnings for the whole set.
+ // Warnings returns a collection of warnings for the whole set.
// Warnings could be return even iteration has not failed with error.
Warnings() annotations.Annotations
}
diff --git a/storage/interface_append.go b/storage/interface_append.go
new file mode 100644
index 0000000000..cc7045dbd5
--- /dev/null
+++ b/storage/interface_append.go
@@ -0,0 +1,182 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+ "context"
+ "errors"
+
+ "github.com/prometheus/prometheus/model/exemplar"
+ "github.com/prometheus/prometheus/model/histogram"
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/model/metadata"
+)
+
+// AppendableV2 allows creating AppenderV2.
+type AppendableV2 interface {
+ // AppenderV2 returns a new appender for the storage.
+ //
+ // Implementations CAN choose whether to use the context e.g. for deadlines,
+ // but it's not mandatory.
+ AppenderV2(ctx context.Context) AppenderV2
+}
+
+// AOptions is a shorthand for AppendV2Options.
+// NOTE: AppendOption is used already.
+type AOptions = AppendV2Options
+
+// AppendV2Options provides optional, auxiliary data and configuration for AppenderV2.Append.
+type AppendV2Options struct {
+ // MetricFamilyName (optional) provides metric family name for the appended sample's
+ // series. If the client of the AppenderV2 has this information
+ // (e.g. from scrape) it's recommended to pass it to the appender.
+ //
+ // Provided string bytes are unsafe to reuse, it only lives for the duration of the Append call.
+ //
+ // Some implementations use this to avoid slow and prone to error metric family detection for:
+ // * Metadata per metric family storages (e.g. Prometheus metadata WAL/API/RW1)
+ // * Strictly complex types storages (e.g. OpenTelemetry Collector).
+ //
+ // NOTE(krajorama): Example purpose is highlighted in OTLP ingestion: OTLP calculates the
+ // metric family name for all metrics and uses it for generating summary,
+ // histogram series by adding the magic suffixes. The metric family name is
+ // passed down to the appender in case the storage needs it for metadata updates.
+ // Known user of this is Mimir that implements /api/v1/metadata and uses
+ // Remote-Write 1.0 for this. Might be removed later if no longer
+ // needed by any downstream project.
+ // NOTE(bwplotka): Long term, once Prometheus uses complex types on storage level
+ // the MetricFamilyName can be removed as MetricFamilyName will equal to __name__ always.
+ MetricFamilyName string
+
+ // Metadata (optional) attached to the appended sample.
+ // Metadata strings are safe for reuse.
+ // IMPORTANT: Appender v1 was only providing update. This field MUST be
+ // set (if known) even if it didn't change since the last iteration.
+ // This moves the responsibility for metadata storage options to TSDB.
+ Metadata metadata.Metadata
+
+ // Exemplars (optional) attached to the appended sample.
+ // Exemplar slice MUST be sorted by Exemplar.TS.
+ // Exemplar slice is unsafe for reuse.
+ Exemplars []exemplar.Exemplar
+
+ // RejectOutOfOrder tells implementation that this append should not be out
+ // of order. An OOO append MUST be rejected with storage.ErrOutOfOrderSample
+ // error.
+ RejectOutOfOrder bool
+}
+
+// AppendPartialError represents an AppenderV2.Append error that tells
+// callers sample was written but some auxiliary optional data (e.g. exemplars)
+// was not (or partially written)
+//
+// It's up to the caller to decide if it's an ignorable error or not, plus
+// it allows extra reporting (e.g. for Remote Write 2.0 X-Remote-Write-Written headers).
+type AppendPartialError struct {
+ ExemplarErrors []error
+}
+
+// Error returns combined error string.
+func (e *AppendPartialError) Error() string {
+ errs := errors.Join(e.ExemplarErrors...)
+ if errs == nil {
+ return ""
+ }
+ return errs.Error()
+}
+
+var _ error = &AppendPartialError{}
+
+// AppenderV2 provides appends against a storage for all types of samples.
+// It must be completed with a call to Commit or Rollback and must not be reused afterwards.
+//
+// Operations on the AppenderV2 interface are not goroutine-safe.
+//
+// The order of samples appended via the AppenderV2 is preserved within each series.
+// I.e. timestamp order within batch is not validated, samples are not reordered per timestamp or by float/histogram
+// type.
+type AppenderV2 interface {
+ AppenderTransaction
+
+ // Append appends a sample and related exemplars, metadata, and start timestamp (st) to the storage.
+ //
+ // ref (optional) represents the stable ID for the given series identified by ls (excluding metadata).
+ // Callers MAY provide the ref to help implementation avoid ls -> ref computation, otherwise ref MUST be 0 (unknown).
+ //
+ // ls represents labels for the sample's series.
+ //
+ // st (optional) represents sample start timestamp. 0 means unknown. Implementations
+ // are responsible for any potential ST storage logic (e.g. ST zero injections).
+ //
+ // t represents sample timestamp.
+ //
+ // v, h, fh represents sample value for each sample type.
+ // Callers MUST only provide one of the sample types (either v, h or fh).
+ // Implementations can detect the type of the sample with the following switch:
+ //
+ // switch {
+ // case fh != nil: It's a float histogram append.
+ // case h != nil: It's a histogram append.
+ // default: It's a float append.
+ // }
+ // TODO(bwplotka): We plan to experiment on using generics for complex sampleType, but do it after we unify interface (derisk) and before we add native summaries.
+ //
+ // Implementations MUST attempt to append sample even if metadata, exemplar or (st) start timestamp appends fail.
+ // Implementations MAY return AppendPartialError as an error. Use errors.As to detect.
+ // For the successful Append, Implementations MUST return valid SeriesRef that represents ls.
+ // NOTE(bwplotka): Given OTLP and native histograms and the relaxation of the requirement for
+ // type and unit suffixes in metric names we start to hit cases of ls being not enough for id
+ // of the series (metadata matters). Current solution is to enable 'type-and-unit-label' features for those cases, but we may
+ // start to extend the id with metadata one day.
+ Append(ref SeriesRef, ls labels.Labels, st, t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, opts AppendV2Options) (SeriesRef, error)
+}
+
+// AppenderTransaction allows transactional appends.
+type AppenderTransaction interface {
+ // Commit submits the collected samples and purges the batch. If Commit
+ // returns a non-nil error, it also rolls back all modifications made in
+ // the appender so far, as Rollback would do. In any case, an Appender
+ // must not be used anymore after Commit has been called.
+ Commit() error
+
+ // Rollback rolls back all modifications made in the appender so far.
+ // Appender has to be discarded after rollback.
+ Rollback() error
+}
+
+// LimitedAppenderV1 is an Appender that only supports appending float and histogram samples.
+// This is to support migration to AppenderV2.
+// TODO(bwplotka): Remove once migration to AppenderV2 is fully complete.
+type LimitedAppenderV1 interface {
+ Append(ref SeriesRef, l labels.Labels, t int64, v float64) (SeriesRef, error)
+ AppendHistogram(ref SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (SeriesRef, error)
+}
+
+// AppenderV2AsLimitedV1 returns appender that exposes AppenderV2 as LimitedAppenderV1
+// TODO(bwplotka): Remove once migration to AppenderV2 is fully complete.
+func AppenderV2AsLimitedV1(app AppenderV2) LimitedAppenderV1 {
+ return &limitedAppenderV1{AppenderV2: app}
+}
+
+type limitedAppenderV1 struct {
+ AppenderV2
+}
+
+func (a *limitedAppenderV1) Append(ref SeriesRef, l labels.Labels, t int64, v float64) (SeriesRef, error) {
+ return a.AppenderV2.Append(ref, l, 0, t, v, nil, nil, AppendV2Options{})
+}
+
+func (a *limitedAppenderV1) AppendHistogram(ref SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (SeriesRef, error) {
+ return a.AppenderV2.Append(ref, l, 0, t, 0, h, fh, AppendV2Options{})
+}
diff --git a/storage/interface_test.go b/storage/interface_test.go
index ba60721736..d28e5177e3 100644
--- a/storage/interface_test.go
+++ b/storage/interface_test.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/lazy.go b/storage/lazy.go
index fab974c286..2851ba7135 100644
--- a/storage/lazy.go
+++ b/storage/lazy.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/memoized_iterator.go b/storage/memoized_iterator.go
index 273b3caa1d..b248bca641 100644
--- a/storage/memoized_iterator.go
+++ b/storage/memoized_iterator.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/memoized_iterator_test.go b/storage/memoized_iterator_test.go
index 81e517f96e..1a1a5f7680 100644
--- a/storage/memoized_iterator_test.go
+++ b/storage/memoized_iterator_test.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/merge.go b/storage/merge.go
index f8ba1ab76a..a86a26891f 100644
--- a/storage/merge.go
+++ b/storage/merge.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/merge_test.go b/storage/merge_test.go
index 90f2097054..6e2daaeb3a 100644
--- a/storage/merge_test.go
+++ b/storage/merge_test.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/noop.go b/storage/noop.go
index f5092da7c7..751e6304db 100644
--- a/storage/noop.go
+++ b/storage/noop.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/remote/azuread/azuread.go b/storage/remote/azuread/azuread.go
index ea2a816d94..fe0c4f9e21 100644
--- a/storage/remote/azuread/azuread.go
+++ b/storage/remote/azuread/azuread.go
@@ -1,4 +1,4 @@
-// Copyright 2023 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -103,6 +103,9 @@ type AzureADConfig struct { //nolint:revive // exported.
// Cloud is the Azure cloud in which the service is running. Example: AzurePublic/AzureGovernment/AzureChina.
Cloud string `yaml:"cloud,omitempty"`
+
+ // Scope is the custom OAuth 2.0 scope to request when acquiring tokens.
+ Scope string `yaml:"scope,omitempty"`
}
// azureADRoundTripper is used to store the roundtripper and the tokenprovider.
@@ -211,6 +214,12 @@ func (c *AzureADConfig) Validate() error {
}
}
+ if c.Scope != "" {
+ if matched, err := regexp.MatchString("^[\\w\\s:/.\\-]+$", c.Scope); err != nil || !matched {
+ return errors.New("the provided scope contains invalid characters")
+ }
+ }
+
return nil
}
@@ -360,14 +369,22 @@ func newSDKTokenCredential(clientOpts *azcore.ClientOptions, sdkConfig *SDKConfi
// newTokenProvider helps to fetch accessToken for different types of credential. This also takes care of
// refreshing the accessToken before expiry. This accessToken is attached to the Authorization header while making requests.
func newTokenProvider(cfg *AzureADConfig, cred azcore.TokenCredential) (*tokenProvider, error) {
- audience, err := getAudience(cfg.Cloud)
- if err != nil {
- return nil, err
+ var scopes []string
+
+ // Use custom scope if provided, otherwise fallback to cloud-specific audience
+ if cfg.Scope != "" {
+ scopes = []string{cfg.Scope}
+ } else {
+ audience, err := getAudience(cfg.Cloud)
+ if err != nil {
+ return nil, err
+ }
+ scopes = []string{audience}
}
tokenProvider := &tokenProvider{
credentialClient: cred,
- options: &policy.TokenRequestOptions{Scopes: []string{audience}},
+ options: &policy.TokenRequestOptions{Scopes: scopes},
}
return tokenProvider, nil
diff --git a/storage/remote/azuread/azuread_test.go b/storage/remote/azuread/azuread_test.go
index d581f0218a..857ecdba8a 100644
--- a/storage/remote/azuread/azuread_test.go
+++ b/storage/remote/azuread/azuread_test.go
@@ -1,4 +1,4 @@
-// Copyright 2023 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -198,6 +198,11 @@ func TestAzureAdConfig(t *testing.T) {
filename: "testdata/azuread_bad_workloadidentity_missingtenantid.yaml",
err: "must provide an Azure Workload Identity tenant_id in the Azure AD config",
},
+ // Invalid scope validation.
+ {
+ filename: "testdata/azuread_bad_scope_invalid.yaml",
+ err: "the provided scope contains invalid characters",
+ },
// Valid config with missing optionally cloud field.
{
filename: "testdata/azuread_good_cloudmissing.yaml",
@@ -222,6 +227,10 @@ func TestAzureAdConfig(t *testing.T) {
{
filename: "testdata/azuread_good_workloadidentity.yaml",
},
+ // Valid OAuth config with custom scope.
+ {
+ filename: "testdata/azuread_good_oauth_customscope.yaml",
+ },
}
for _, c := range cases {
_, err := loadAzureAdConfig(c.filename)
@@ -387,3 +396,87 @@ func getToken() azcore.AccessToken {
ExpiresOn: time.Now().Add(10 * time.Second),
}
}
+
+func TestCustomScopeSupport(t *testing.T) {
+ mockCredential := new(mockCredential)
+ testToken := &azcore.AccessToken{
+ Token: testTokenString,
+ ExpiresOn: testTokenExpiry(),
+ }
+
+ cases := []struct {
+ name string
+ cfg *AzureADConfig
+ expectedScope string
+ }{
+ {
+ name: "Custom scope with OAuth",
+ cfg: &AzureADConfig{
+ Cloud: "AzurePublic",
+ OAuth: &OAuthConfig{
+ ClientID: dummyClientID,
+ ClientSecret: dummyClientSecret,
+ TenantID: dummyTenantID,
+ },
+ Scope: "https://custom-app.com/.default",
+ },
+ expectedScope: "https://custom-app.com/.default",
+ },
+ {
+ name: "Custom scope with Managed Identity",
+ cfg: &AzureADConfig{
+ Cloud: "AzurePublic",
+ ManagedIdentity: &ManagedIdentityConfig{
+ ClientID: dummyClientID,
+ },
+ Scope: "https://monitor.azure.com//.default",
+ },
+ expectedScope: "https://monitor.azure.com//.default",
+ },
+ {
+ name: "Default scope fallback with OAuth",
+ cfg: &AzureADConfig{
+ Cloud: "AzurePublic",
+ OAuth: &OAuthConfig{
+ ClientID: dummyClientID,
+ ClientSecret: dummyClientSecret,
+ TenantID: dummyTenantID,
+ },
+ },
+ expectedScope: IngestionPublicAudience,
+ },
+ {
+ name: "Default scope fallback with China cloud",
+ cfg: &AzureADConfig{
+ Cloud: "AzureChina",
+ OAuth: &OAuthConfig{
+ ClientID: dummyClientID,
+ ClientSecret: dummyClientSecret,
+ TenantID: dummyTenantID,
+ },
+ },
+ expectedScope: IngestionChinaAudience,
+ },
+ }
+
+ for _, c := range cases {
+ t.Run(c.name, func(t *testing.T) {
+ // Set up mock to capture the actual scopes used
+ mockCredential.On("GetToken", mock.Anything, mock.MatchedBy(func(options policy.TokenRequestOptions) bool {
+ return len(options.Scopes) == 1 && options.Scopes[0] == c.expectedScope
+ })).Return(*testToken, nil).Once()
+
+ tokenProvider, err := newTokenProvider(c.cfg, mockCredential)
+ require.NoError(t, err)
+ require.NotNil(t, tokenProvider)
+
+ // Verify that the token provider uses the expected scope
+ token, err := tokenProvider.getAccessToken(context.Background())
+ require.NoError(t, err)
+ require.Equal(t, testTokenString, token)
+
+ // Reset mock for next test
+ mockCredential.ExpectedCalls = nil
+ })
+ }
+}
diff --git a/storage/remote/azuread/testdata/azuread_bad_scope_invalid.yaml b/storage/remote/azuread/testdata/azuread_bad_scope_invalid.yaml
new file mode 100644
index 0000000000..2e5678d783
--- /dev/null
+++ b/storage/remote/azuread/testdata/azuread_bad_scope_invalid.yaml
@@ -0,0 +1,6 @@
+cloud: AzurePublic
+oauth:
+ client_id: 00000000-0000-0000-0000-000000000000
+ client_secret: Cl1ent$ecret!
+ tenant_id: 00000000-a12b-3cd4-e56f-000000000000
+scope: "invalid<>scope*chars"
diff --git a/storage/remote/azuread/testdata/azuread_good_oauth_customscope.yaml b/storage/remote/azuread/testdata/azuread_good_oauth_customscope.yaml
new file mode 100644
index 0000000000..f7adf8b0af
--- /dev/null
+++ b/storage/remote/azuread/testdata/azuread_good_oauth_customscope.yaml
@@ -0,0 +1,6 @@
+cloud: AzurePublic
+oauth:
+ client_id: 00000000-0000-0000-0000-000000000000
+ client_secret: Cl1ent$ecret!
+ tenant_id: 00000000-a12b-3cd4-e56f-000000000000
+scope: "https://custom-app.com/.default"
diff --git a/storage/remote/chunked.go b/storage/remote/chunked.go
index aa5addd6aa..b6cadf8691 100644
--- a/storage/remote/chunked.go
+++ b/storage/remote/chunked.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/remote/chunked_test.go b/storage/remote/chunked_test.go
index 82ed866345..7493d734a3 100644
--- a/storage/remote/chunked_test.go
+++ b/storage/remote/chunked_test.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/remote/client.go b/storage/remote/client.go
index c535ea3425..78405b378e 100644
--- a/storage/remote/client.go
+++ b/storage/remote/client.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -301,6 +301,9 @@ func (c *Client) Store(ctx context.Context, req []byte, attempt int) (WriteRespo
_ = httpResp.Body.Close()
}()
+ // NOTE(bwplotka): Only PRW2 spec defines response HTTP headers. However, spec does not block
+ // PRW1 from sending them too for reliability. Support this case.
+ //
// TODO(bwplotka): Pass logger and emit debug on error?
// Parsing error means there were some response header values we can't parse,
// we can continue handling.
diff --git a/storage/remote/client_test.go b/storage/remote/client_test.go
index 7fb670a24d..d5f126342a 100644
--- a/storage/remote/client_test.go
+++ b/storage/remote/client_test.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/remote/codec.go b/storage/remote/codec.go
index 059d5e66ce..9f0fb7d92a 100644
--- a/storage/remote/codec.go
+++ b/storage/remote/codec.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/remote/codec_test.go b/storage/remote/codec_test.go
index ba67ff33d9..e6e7813c7b 100644
--- a/storage/remote/codec_test.go
+++ b/storage/remote/codec_test.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/remote/dial_context.go b/storage/remote/dial_context.go
index b842728e4c..f7a52442ed 100644
--- a/storage/remote/dial_context.go
+++ b/storage/remote/dial_context.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/remote/dial_context_test.go b/storage/remote/dial_context_test.go
index 5a0cd7c88c..61b929401f 100644
--- a/storage/remote/dial_context_test.go
+++ b/storage/remote/dial_context_test.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/remote/ewma.go b/storage/remote/ewma.go
index ea4472c494..27ba39c35d 100644
--- a/storage/remote/ewma.go
+++ b/storage/remote/ewma.go
@@ -1,4 +1,4 @@
-// Copyright 2013 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/remote/googleiam/googleiam.go b/storage/remote/googleiam/googleiam.go
index acf3bd5a68..0ca7185ab7 100644
--- a/storage/remote/googleiam/googleiam.go
+++ b/storage/remote/googleiam/googleiam.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -19,6 +19,7 @@ import (
"context"
"fmt"
"net/http"
+ "os"
"golang.org/x/oauth2/google"
"google.golang.org/api/option"
@@ -41,7 +42,15 @@ func NewRoundTripper(cfg *Config, next http.RoundTripper) (http.RoundTripper, er
option.WithScopes(scopes),
}
if cfg.CredentialsFile != "" {
- opts = append(opts, option.WithCredentialsFile(cfg.CredentialsFile))
+ credBytes, err := os.ReadFile(cfg.CredentialsFile)
+ if err != nil {
+ return nil, fmt.Errorf("error reading Google credentials file: %w", err)
+ }
+ creds, err := google.CredentialsFromJSON(ctx, credBytes, scopes)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing Google credentials file: %w", err)
+ }
+ opts = append(opts, option.WithCredentials(creds))
} else {
creds, err := google.FindDefaultCredentials(ctx, scopes)
if err != nil {
diff --git a/storage/remote/intern.go b/storage/remote/intern.go
index 34edeb370e..193cdf96db 100644
--- a/storage/remote/intern.go
+++ b/storage/remote/intern.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/remote/intern_test.go b/storage/remote/intern_test.go
index f992b2ada6..fd0ebed16f 100644
--- a/storage/remote/intern_test.go
+++ b/storage/remote/intern_test.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/remote/max_timestamp.go b/storage/remote/max_timestamp.go
index bb67d9bb98..61dbda6bc6 100644
--- a/storage/remote/max_timestamp.go
+++ b/storage/remote/max_timestamp.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/remote/metadata_watcher.go b/storage/remote/metadata_watcher.go
index b1f98038fc..f231691e30 100644
--- a/storage/remote/metadata_watcher.go
+++ b/storage/remote/metadata_watcher.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/remote/metadata_watcher_test.go b/storage/remote/metadata_watcher_test.go
index 6c4608b3dd..f911a145bc 100644
--- a/storage/remote/metadata_watcher_test.go
+++ b/storage/remote/metadata_watcher_test.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/remote/otlptranslator/prometheusremotewrite/combined_appender_test.go b/storage/remote/otlptranslator/prometheusremotewrite/combined_appender_test.go
index 753112cf82..a1a17fe82b 100644
--- a/storage/remote/otlptranslator/prometheusremotewrite/combined_appender_test.go
+++ b/storage/remote/otlptranslator/prometheusremotewrite/combined_appender_test.go
@@ -1,4 +1,4 @@
-// Copyright 2025 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/remote/otlptranslator/prometheusremotewrite/context.go b/storage/remote/otlptranslator/prometheusremotewrite/context.go
index 5c6dd20f18..db3c180036 100644
--- a/storage/remote/otlptranslator/prometheusremotewrite/context.go
+++ b/storage/remote/otlptranslator/prometheusremotewrite/context.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/remote/otlptranslator/prometheusremotewrite/context_test.go b/storage/remote/otlptranslator/prometheusremotewrite/context_test.go
index 4b47964313..8aa24a8110 100644
--- a/storage/remote/otlptranslator/prometheusremotewrite/context_test.go
+++ b/storage/remote/otlptranslator/prometheusremotewrite/context_test.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/remote/otlptranslator/prometheusremotewrite/helper.go b/storage/remote/otlptranslator/prometheusremotewrite/helper.go
index aa54433836..7e3c9d5021 100644
--- a/storage/remote/otlptranslator/prometheusremotewrite/helper.go
+++ b/storage/remote/otlptranslator/prometheusremotewrite/helper.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go b/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go
index 893fe97ec4..b06bf3d416 100644
--- a/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go
+++ b/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/remote/otlptranslator/prometheusremotewrite/histograms.go b/storage/remote/otlptranslator/prometheusremotewrite/histograms.go
index c93a00db76..db7c0e1275 100644
--- a/storage/remote/otlptranslator/prometheusremotewrite/histograms.go
+++ b/storage/remote/otlptranslator/prometheusremotewrite/histograms.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go b/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go
index 22e654ab9c..644ec2e01b 100644
--- a/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go
+++ b/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go
index f43e4964b1..41de42548a 100644
--- a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go
+++ b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go
index e409b4e8b5..8eb0029dd7 100644
--- a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go
+++ b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go
index 8f30dbb6b6..e3814ce095 100644
--- a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go
+++ b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go
index 32435020c5..77bc212c76 100644
--- a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go
+++ b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go b/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go
index 49f96e0019..0292790156 100644
--- a/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go
+++ b/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/remote/otlptranslator/prometheusremotewrite/testutil_test.go b/storage/remote/otlptranslator/prometheusremotewrite/testutil_test.go
index 187127fcb2..5194925cfe 100644
--- a/storage/remote/otlptranslator/prometheusremotewrite/testutil_test.go
+++ b/storage/remote/otlptranslator/prometheusremotewrite/testutil_test.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/remote/queue_manager.go b/storage/remote/queue_manager.go
index 73a4896f19..2b26179e58 100644
--- a/storage/remote/queue_manager.go
+++ b/storage/remote/queue_manager.go
@@ -1,4 +1,4 @@
-// Copyright 2013 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -2299,7 +2299,7 @@ func (b *batchMetricsUpdater) recordRetry(sc sendBatchContext) {
b.metrics.retriedHistogramsTotal.Add(float64(sc.histogramCount))
}
-// createSpan creates and configures an OpenTelemetry span for batch sending.
+// createBatchSpan creates and configures an OpenTelemetry span for batch sending.
func createBatchSpan(ctx context.Context, sc sendBatchContext, remoteName, remoteURL string, try int) (context.Context, trace.Span) {
ctx, span := otel.Tracer("").Start(ctx, "Remote Send Batch")
span.SetAttributes(
diff --git a/storage/remote/queue_manager_test.go b/storage/remote/queue_manager_test.go
index 704a5628d3..f1462b4406 100644
--- a/storage/remote/queue_manager_test.go
+++ b/storage/remote/queue_manager_test.go
@@ -1,4 +1,4 @@
-// Copyright 2013 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/remote/read.go b/storage/remote/read.go
index e21d1538f5..70b55980b8 100644
--- a/storage/remote/read.go
+++ b/storage/remote/read.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/remote/read_handler.go b/storage/remote/read_handler.go
index 3e315a6157..a628dd34ff 100644
--- a/storage/remote/read_handler.go
+++ b/storage/remote/read_handler.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/remote/read_handler_test.go b/storage/remote/read_handler_test.go
index 355973e4be..a59c940f30 100644
--- a/storage/remote/read_handler_test.go
+++ b/storage/remote/read_handler_test.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -15,7 +15,6 @@ package remote
import (
"bytes"
- "context"
"errors"
"io"
"net/http"
@@ -28,6 +27,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/config"
+ "github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/prompb"
"github.com/prometheus/prometheus/promql/promqltest"
@@ -64,13 +64,19 @@ func TestSampledReadEndpoint(t *testing.T) {
matcher3, err := labels.NewMatcher(labels.MatchEqual, "__name__", "test_histogram_metric1")
require.NoError(t, err)
+ matcher4, err := labels.NewMatcher(labels.MatchEqual, "__name__", "test_nhcb_metric1")
+ require.NoError(t, err)
+
query1, err := ToQuery(0, 1, []*labels.Matcher{matcher1, matcher2}, &storage.SelectHints{Step: 0, Func: "avg"})
require.NoError(t, err)
query2, err := ToQuery(0, 1, []*labels.Matcher{matcher3, matcher2}, &storage.SelectHints{Step: 0, Func: "avg"})
require.NoError(t, err)
- req := &prompb.ReadRequest{Queries: []*prompb.Query{query1, query2}}
+ query3, err := ToQuery(0, 1, []*labels.Matcher{matcher4, matcher2}, &storage.SelectHints{Step: 0, Func: "avg"})
+ require.NoError(t, err)
+
+ req := &prompb.ReadRequest{Queries: []*prompb.Query{query1, query2, query3}}
data, err := proto.Marshal(req)
require.NoError(t, err)
@@ -97,7 +103,7 @@ func TestSampledReadEndpoint(t *testing.T) {
err = proto.Unmarshal(uncompressed, &resp)
require.NoError(t, err)
- require.Len(t, resp.Results, 2, "Expected 2 results.")
+ require.Len(t, resp.Results, 3, "Expected 3 results.")
require.Equal(t, &prompb.QueryResult{
Timeseries: []*prompb.TimeSeries{
@@ -129,6 +135,33 @@ func TestSampledReadEndpoint(t *testing.T) {
},
},
}, resp.Results[1])
+
+ require.Equal(t, &prompb.QueryResult{
+ Timeseries: []*prompb.TimeSeries{
+ {
+ Labels: []prompb.Label{
+ {Name: "__name__", Value: "test_nhcb_metric1"},
+ {Name: "b", Value: "c"},
+ {Name: "baz", Value: "qux"},
+ {Name: "d", Value: "e"},
+ },
+ Histograms: []prompb.Histogram{{
+ // We cannot use prompb.FromFloatHistogram as that's one
+ // of the things we are testing here.
+ Schema: histogram.CustomBucketsSchema,
+ Count: &prompb.Histogram_CountFloat{CountFloat: 5},
+ Sum: 18.4,
+ ZeroCount: &prompb.Histogram_ZeroCountFloat{},
+ PositiveSpans: []prompb.BucketSpan{
+ {Offset: 0, Length: 2},
+ {Offset: 1, Length: 2},
+ },
+ PositiveCounts: []float64{1, 2, 1, 1},
+ CustomValues: []float64{0, 1, 2, 3, 4},
+ }},
+ },
+ },
+ }, resp.Results[2])
}
func BenchmarkStreamReadEndpoint(b *testing.B) {
@@ -433,10 +466,17 @@ func TestStreamReadEndpoint(t *testing.T) {
func addNativeHistogramsToTestSuite(t *testing.T, storage *teststorage.TestStorage, n int) {
lbls := labels.FromStrings("__name__", "test_histogram_metric1", "baz", "qux")
- app := storage.Appender(context.TODO())
+ app := storage.Appender(t.Context())
for i, fh := range tsdbutil.GenerateTestFloatHistograms(n) {
_, err := app.AppendHistogram(0, lbls, int64(i)*int64(60*time.Second/time.Millisecond), nil, fh)
require.NoError(t, err)
}
+
+ lbls = labels.FromStrings("__name__", "test_nhcb_metric1", "baz", "qux")
+ for i, fh := range tsdbutil.GenerateTestCustomBucketsFloatHistograms(n) {
+ _, err := app.AppendHistogram(0, lbls, int64(i)*int64(60*time.Second/time.Millisecond), nil, fh)
+ require.NoError(t, err)
+ }
+
require.NoError(t, app.Commit())
}
diff --git a/storage/remote/read_test.go b/storage/remote/read_test.go
index da0b7f81d4..49f29d9001 100644
--- a/storage/remote/read_test.go
+++ b/storage/remote/read_test.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/remote/stats.go b/storage/remote/stats.go
index 89d00ffc31..3a1bfed805 100644
--- a/storage/remote/stats.go
+++ b/storage/remote/stats.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/remote/storage.go b/storage/remote/storage.go
index 648c91c955..f482597249 100644
--- a/storage/remote/storage.go
+++ b/storage/remote/storage.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/remote/storage_test.go b/storage/remote/storage_test.go
index f567c7a80b..416468cf79 100644
--- a/storage/remote/storage_test.go
+++ b/storage/remote/storage_test.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/remote/write.go b/storage/remote/write.go
index 1a036c1795..92f447d624 100644
--- a/storage/remote/write.go
+++ b/storage/remote/write.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go
index b95c85b6c4..c29896b843 100644
--- a/storage/remote/write_handler.go
+++ b/storage/remote/write_handler.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -96,6 +96,10 @@ func isHistogramValidationError(err error) bool {
}
// Store implements remoteapi.writeStorage interface.
+// TODO(bwplotka): Improve remoteapi.Store API. Right now it's confusing if PRWv1 flows should use WriteResponse or not.
+// If it's not filled, it will be "confirmed zero" which caused partial error reporting on client side in the past.
+// Temporary fix was done to only care about WriteResponse stats for PRW2 (see https://github.com/prometheus/client_golang/pull/1927
+// but better approach would be to only confirm if explicit stats were injected.
func (h *writeHandler) Store(r *http.Request, msgType remoteapi.WriteMessageType) (*remoteapi.WriteResponse, error) {
// Store receives request with decompressed content in body.
body, err := io.ReadAll(r.Body)
diff --git a/storage/remote/write_handler_test.go b/storage/remote/write_handler_test.go
index afc0d985ff..ac75d56095 100644
--- a/storage/remote/write_handler_test.go
+++ b/storage/remote/write_handler_test.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -1608,3 +1608,74 @@ func TestHistogramsReduction(t *testing.T) {
})
}
}
+
+// Regression test for https://github.com/prometheus/prometheus/issues/17659
+func TestRemoteWriteHandler_ResponseStats(t *testing.T) {
+ payloadV1, _, _, err := buildWriteRequest(nil, writeRequestFixture.Timeseries, nil, nil, nil, nil, "snappy")
+ require.NoError(t, err)
+ payloadV2, _, _, err := buildV2WriteRequest(nil, writeV2RequestFixture.Timeseries, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy")
+ require.NoError(t, err)
+
+ for _, tt := range []struct {
+ msgType remoteapi.WriteMessageType
+ payload []byte
+ forceInjectHeaders bool
+ expectHeaders bool
+ }{
+ {
+ msgType: remoteapi.WriteV1MessageType,
+ payload: payloadV1,
+ },
+ {
+ msgType: remoteapi.WriteV1MessageType,
+ payload: payloadV1,
+ forceInjectHeaders: true,
+ expectHeaders: true,
+ },
+ {
+ msgType: remoteapi.WriteV2MessageType,
+ payload: payloadV2,
+ expectHeaders: true,
+ },
+ } {
+ t.Run(fmt.Sprintf("msg=%v/force-inject-headers=%v", tt.msgType, tt.forceInjectHeaders), func(t *testing.T) {
+ // Setup server side.
+ appendable := &mockAppendable{}
+ handler := NewWriteHandler(
+ promslog.NewNopLogger(),
+ nil,
+ appendable,
+ []remoteapi.WriteMessageType{remoteapi.WriteV1MessageType, remoteapi.WriteV2MessageType},
+ false,
+ false,
+ false,
+ )
+
+ if tt.forceInjectHeaders {
+ base := handler
+ handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // Inject response header. This simulates PRWv1 server that uses PRWv2 response headers
+ // for confirmation of samples. This is not against spec and we support it.
+ w.Header().Set(rw20WrittenSamplesHeader, "2")
+
+ base.ServeHTTP(w, r)
+ })
+ }
+
+ srv := httptest.NewServer(handler)
+
+ // Send message and do the parse response flow.
+ c := &Client{Client: srv.Client(), urlString: srv.URL, timeout: 5 * time.Minute, writeProtoMsg: tt.msgType}
+
+ stats, err := c.Store(t.Context(), tt.payload, 0)
+ require.NoError(t, err)
+
+ if tt.expectHeaders {
+ require.True(t, stats.Confirmed)
+ require.Equal(t, len(appendable.samples), stats.Samples)
+ } else {
+ require.False(t, stats.Confirmed)
+ }
+ })
+ }
+}
diff --git a/storage/remote/write_test.go b/storage/remote/write_test.go
index 2bf317465c..099a2f1cab 100644
--- a/storage/remote/write_test.go
+++ b/storage/remote/write_test.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/secondary.go b/storage/secondary.go
index 1cf8024b65..a071ddcfa3 100644
--- a/storage/secondary.go
+++ b/storage/secondary.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/series.go b/storage/series.go
index 8d0a4e5494..ce989ef846 100644
--- a/storage/series.go
+++ b/storage/series.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/storage/series_test.go b/storage/series_test.go
index 1ade558648..954d62f1b3 100644
--- a/storage/series_test.go
+++ b/storage/series_test.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/template/template.go b/template/template.go
index ea7e93b18c..0ea7382ed3 100644
--- a/template/template.go
+++ b/template/template.go
@@ -1,4 +1,4 @@
-// Copyright 2013 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -36,6 +36,7 @@ import (
"golang.org/x/text/language"
"github.com/prometheus/prometheus/promql"
+ "github.com/prometheus/prometheus/util/features"
"github.com/prometheus/prometheus/util/strutil"
)
@@ -413,3 +414,29 @@ func floatToTime(v float64) (*time.Time, error) {
t := model.TimeFromUnixNano(int64(timestamp)).Time().UTC()
return &t, nil
}
+
+// templateFunctions returns a representative funcMap with all available template functions.
+// This is used to discover which functions are available for feature registration.
+func templateFunctions() text_template.FuncMap {
+ // Create a dummy expander to get the function map.
+ expander := NewTemplateExpander(
+ context.Background(),
+ "",
+ "",
+ nil,
+ 0,
+ nil,
+ &url.URL{},
+ nil,
+ )
+ return expander.funcMap
+}
+
+// RegisterFeatures registers all template functions with the feature registry.
+func RegisterFeatures(r features.Collector) {
+ // Get all function names from the template function map.
+ funcMap := templateFunctions()
+ for name := range funcMap {
+ r.Enable(features.TemplatingFunctions, name)
+ }
+}
diff --git a/template/template_amd64_test.go b/template/template_amd64_test.go
index 913a7e2b81..15db39b646 100644
--- a/template/template_amd64_test.go
+++ b/template/template_amd64_test.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/template/template_test.go b/template/template_test.go
index f3348caae6..073300a39b 100644
--- a/template/template_test.go
+++ b/template/template_test.go
@@ -1,4 +1,4 @@
-// Copyright 2014 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tracing/tracing.go b/tracing/tracing.go
index 91ac48007b..b35673b2b4 100644
--- a/tracing/tracing.go
+++ b/tracing/tracing.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tracing/tracing_test.go b/tracing/tracing_test.go
index e735e1a18a..0840abafdf 100644
--- a/tracing/tracing_test.go
+++ b/tracing/tracing_test.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/agent/db.go b/tsdb/agent/db.go
index 5c9774cd58..7de2ed678f 100644
--- a/tsdb/agent/db.go
+++ b/tsdb/agent/db.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -84,6 +84,15 @@ type Options struct {
// OutOfOrderTimeWindow specifies how much out of order is allowed, if any.
OutOfOrderTimeWindow int64
+
+ // EnableSTAsZeroSample represents 'created-timestamp-zero-ingestion' feature flag.
+ // If true, ST, if non-empty and earlier than sample timestamp, will be stored
+ // as a zero sample before the actual sample.
+ //
+ // The zero sample is best-effort, only debug log on failure is emitted.
+ // NOTE(bwplotka): This feature might be deprecated and removed once PROM-60
+ // is implemented.
+ EnableSTAsZeroSample bool
}
// DefaultOptions used for the WAL storage. They are reasonable for setups using
@@ -233,8 +242,9 @@ type DB struct {
wal *wlog.WL
locker *tsdbutil.DirLocker
- appenderPool sync.Pool
- bufPool sync.Pool
+ appenderPool sync.Pool
+ appenderV2Pool sync.Pool
+ bufPool sync.Pool
// These pools are only used during WAL replay and are reset at the end.
// NOTE: Adjust resetWALReplayResources() upon changes to the pools.
@@ -303,12 +313,26 @@ func Open(l *slog.Logger, reg prometheus.Registerer, rs *remote.Storage, dir str
db.appenderPool.New = func() any {
return &appender{
- DB: db,
- pendingSeries: make([]record.RefSeries, 0, 100),
- pendingSamples: make([]record.RefSample, 0, 100),
- pendingHistograms: make([]record.RefHistogramSample, 0, 100),
- pendingFloatHistograms: make([]record.RefFloatHistogramSample, 0, 100),
- pendingExamplars: make([]record.RefExemplar, 0, 10),
+ appenderBase: appenderBase{
+ DB: db,
+ pendingSeries: make([]record.RefSeries, 0, 100),
+ pendingSamples: make([]record.RefSample, 0, 100),
+ pendingHistograms: make([]record.RefHistogramSample, 0, 100),
+ pendingFloatHistograms: make([]record.RefFloatHistogramSample, 0, 100),
+ pendingExamplars: make([]record.RefExemplar, 0, 10),
+ },
+ }
+ }
+ db.appenderV2Pool.New = func() any {
+ return &appenderV2{
+ appenderBase: appenderBase{
+ DB: db,
+ pendingSeries: make([]record.RefSeries, 0, 100),
+ pendingSamples: make([]record.RefSample, 0, 100),
+ pendingHistograms: make([]record.RefHistogramSample, 0, 100),
+ pendingFloatHistograms: make([]record.RefFloatHistogramSample, 0, 100),
+ pendingExamplars: make([]record.RefExemplar, 0, 10),
+ },
}
}
@@ -777,9 +801,8 @@ func (db *DB) Close() error {
return tsdb_errors.NewMulti(db.locker.Release(), db.wal.Close()).Err()
}
-type appender struct {
+type appenderBase struct {
*DB
- hints *storage.AppendOptions
pendingSeries []record.RefSeries
pendingSamples []record.RefSample
@@ -800,6 +823,12 @@ type appender struct {
floatHistogramSeries []*memSeries
}
+type appender struct {
+ appenderBase
+
+ hints *storage.AppendOptions
+}
+
func (a *appender) SetOptions(opts *storage.AppendOptions) {
a.hints = opts
}
@@ -810,26 +839,10 @@ func (a *appender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v flo
series := a.series.GetByID(headRef)
if series == nil {
- // Ensure no empty or duplicate labels have gotten through. This mirrors the
- // equivalent validation code in the TSDB's headAppender.
- l = l.WithoutEmpty()
- if l.IsEmpty() {
- return 0, fmt.Errorf("empty labelset: %w", tsdb.ErrInvalidSample)
- }
-
- if lbl, dup := l.HasDuplicateLabelNames(); dup {
- return 0, fmt.Errorf(`label name "%s" is not unique: %w`, lbl, tsdb.ErrInvalidSample)
- }
-
- var created bool
- series, created = a.getOrCreate(l)
- if created {
- a.pendingSeries = append(a.pendingSeries, record.RefSeries{
- Ref: series.ref,
- Labels: l,
- })
-
- a.metrics.numActiveSeries.Inc()
+ var err error
+ series, err = a.getOrCreate(l)
+ if err != nil {
+ return 0, err
}
}
@@ -853,18 +866,35 @@ func (a *appender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v flo
return storage.SeriesRef(series.ref), nil
}
-func (a *appender) getOrCreate(l labels.Labels) (series *memSeries, created bool) {
+func (a *appenderBase) getOrCreate(l labels.Labels) (series *memSeries, err error) {
+ // Ensure no empty or duplicate labels have gotten through. This mirrors the
+ // equivalent validation code in the TSDB's headAppender.
+ l = l.WithoutEmpty()
+ if l.IsEmpty() {
+ return nil, fmt.Errorf("empty labelset: %w", tsdb.ErrInvalidSample)
+ }
+
+ if lbl, dup := l.HasDuplicateLabelNames(); dup {
+ return nil, fmt.Errorf(`label name "%s" is not unique: %w`, lbl, tsdb.ErrInvalidSample)
+ }
+
hash := l.Hash()
series = a.series.GetByHash(hash, l)
if series != nil {
- return series, false
+ return series, nil
}
ref := chunks.HeadSeriesRef(a.nextRef.Inc())
series = &memSeries{ref: ref, lset: l, lastTs: math.MinInt64}
a.series.Set(hash, series)
- return series, true
+
+ a.pendingSeries = append(a.pendingSeries, record.RefSeries{
+ Ref: series.ref,
+ Labels: l,
+ })
+ a.metrics.numActiveSeries.Inc()
+ return series, nil
}
func (a *appender) AppendExemplar(ref storage.SeriesRef, _ labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) {
@@ -879,47 +909,53 @@ func (a *appender) AppendExemplar(ref storage.SeriesRef, _ labels.Labels, e exem
// Ensure no empty labels have gotten through.
e.Labels = e.Labels.WithoutEmpty()
- if lbl, dup := e.Labels.HasDuplicateLabelNames(); dup {
- return 0, fmt.Errorf(`label name "%s" is not unique: %w`, lbl, tsdb.ErrInvalidExemplar)
- }
-
- // Exemplar label length does not include chars involved in text rendering such as quotes
- // equals sign, or commas. See definition of const ExemplarMaxLabelLength.
- labelSetLen := 0
- err := e.Labels.Validate(func(l labels.Label) error {
- labelSetLen += utf8.RuneCountInString(l.Name)
- labelSetLen += utf8.RuneCountInString(l.Value)
-
- if labelSetLen > exemplar.ExemplarMaxLabelSetLength {
- return storage.ErrExemplarLabelLength
+ if err := a.validateExemplar(s.ref, e); err != nil {
+ if errors.Is(err, storage.ErrDuplicateExemplar) {
+ // Duplicate, don't return an error but don't accept the exemplar.
+ return 0, nil
}
- return nil
- })
- if err != nil {
return 0, err
}
- // Check for duplicate vs last stored exemplar for this series, and discard those.
- // Otherwise, record the current exemplar as the latest.
- // Prometheus' TSDB returns 0 when encountering duplicates, so we do the same here.
- prevExemplar := a.series.GetLatestExemplar(s.ref)
- if prevExemplar != nil && prevExemplar.Equals(e) {
- // Duplicate, don't return an error but don't accept the exemplar.
- return 0, nil
- }
a.series.SetLatestExemplar(s.ref, &e)
-
a.pendingExamplars = append(a.pendingExamplars, record.RefExemplar{
Ref: s.ref,
T: e.Ts,
V: e.Value,
Labels: e.Labels,
})
-
a.metrics.totalAppendedExemplars.Inc()
return storage.SeriesRef(s.ref), nil
}
+func (a *appenderBase) validateExemplar(ref chunks.HeadSeriesRef, e exemplar.Exemplar) error {
+ if lbl, dup := e.Labels.HasDuplicateLabelNames(); dup {
+ return fmt.Errorf(`label name "%s" is not unique: %w`, lbl, tsdb.ErrInvalidExemplar)
+ }
+
+ // Exemplar label length does not include chars involved in text rendering such as quotes
+ // equals sign, or commas. See definition of const ExemplarMaxLabelLength.
+ labelSetLen := 0
+ if err := e.Labels.Validate(func(l labels.Label) error {
+ labelSetLen += utf8.RuneCountInString(l.Name)
+ labelSetLen += utf8.RuneCountInString(l.Value)
+ if labelSetLen > exemplar.ExemplarMaxLabelSetLength {
+ return storage.ErrExemplarLabelLength
+ }
+ return nil
+ }); err != nil {
+ return err
+ }
+ // Check for duplicate vs last stored exemplar for this series, and discard those.
+ // Otherwise, record the current exemplar as the latest.
+ // Prometheus' TSDB returns 0 when encountering duplicates, so we do the same here.
+ prevExemplar := a.series.GetLatestExemplar(ref)
+ if prevExemplar != nil && prevExemplar.Equals(e) {
+ return storage.ErrDuplicateExemplar
+ }
+ return nil
+}
+
func (a *appender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
if h != nil {
if err := h.Validate(); err != nil {
@@ -938,26 +974,10 @@ func (a *appender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int
series := a.series.GetByID(headRef)
if series == nil {
- // Ensure no empty or duplicate labels have gotten through. This mirrors the
- // equivalent validation code in the TSDB's headAppender.
- l = l.WithoutEmpty()
- if l.IsEmpty() {
- return 0, fmt.Errorf("empty labelset: %w", tsdb.ErrInvalidSample)
- }
-
- if lbl, dup := l.HasDuplicateLabelNames(); dup {
- return 0, fmt.Errorf(`label name "%s" is not unique: %w`, lbl, tsdb.ErrInvalidSample)
- }
-
- var created bool
- series, created = a.getOrCreate(l)
- if created {
- a.pendingSeries = append(a.pendingSeries, record.RefSeries{
- Ref: series.ref,
- Labels: l,
- })
-
- a.metrics.numActiveSeries.Inc()
+ var err error
+ series, err = a.getOrCreate(l)
+ if err != nil {
+ return 0, err
}
}
@@ -1014,24 +1034,10 @@ func (a *appender) AppendHistogramSTZeroSample(ref storage.SeriesRef, l labels.L
series := a.series.GetByID(chunks.HeadSeriesRef(ref))
if series == nil {
- // Ensure no empty labels have gotten through.
- l = l.WithoutEmpty()
- if l.IsEmpty() {
- return 0, fmt.Errorf("empty labelset: %w", tsdb.ErrInvalidSample)
- }
-
- if lbl, dup := l.HasDuplicateLabelNames(); dup {
- return 0, fmt.Errorf(`label name "%s" is not unique: %w`, lbl, tsdb.ErrInvalidSample)
- }
-
- var created bool
- series, created = a.getOrCreate(l)
- if created {
- a.pendingSeries = append(a.pendingSeries, record.RefSeries{
- Ref: series.ref,
- Labels: l,
- })
- a.metrics.numActiveSeries.Inc()
+ var err error
+ series, err = a.getOrCreate(l)
+ if err != nil {
+ return 0, err
}
}
@@ -1046,6 +1052,9 @@ func (a *appender) AppendHistogramSTZeroSample(ref storage.SeriesRef, l labels.L
// discard the sample if it's out of order.
return 0, storage.ErrOutOfOrderST
}
+ // NOTE(bwplotka): This is a bug, as we "commit" pending sample TS as the WAL last TS. It was likely done
+ // to satisfy incorrect TestDBStartTimestampSamplesIngestion test. We are leaving it as-is given the planned removal
+ // of AppenderV1 as per https://github.com/prometheus/prometheus/issues/17632.
series.lastTs = st
switch {
@@ -1077,25 +1086,11 @@ func (a *appender) AppendSTZeroSample(ref storage.SeriesRef, l labels.Labels, t,
series := a.series.GetByID(chunks.HeadSeriesRef(ref))
if series == nil {
- l = l.WithoutEmpty()
- if l.IsEmpty() {
- return 0, fmt.Errorf("empty labelset: %w", tsdb.ErrInvalidSample)
+ var err error
+ series, err = a.getOrCreate(l)
+ if err != nil {
+ return 0, err
}
-
- if lbl, dup := l.HasDuplicateLabelNames(); dup {
- return 0, fmt.Errorf(`label name "%s" is not unique: %w`, lbl, tsdb.ErrInvalidSample)
- }
-
- newSeries, created := a.getOrCreate(l)
- if created {
- a.pendingSeries = append(a.pendingSeries, record.RefSeries{
- Ref: newSeries.ref,
- Labels: l,
- })
- a.metrics.numActiveSeries.Inc()
- }
-
- series = newSeries
}
series.Lock()
@@ -1110,6 +1105,9 @@ func (a *appender) AppendSTZeroSample(ref storage.SeriesRef, l labels.Labels, t,
// discard the sample if it's out of order.
return 0, storage.ErrOutOfOrderST
}
+ // NOTE(bwplotka): This is a bug, as we "commit" pending sample TS as the WAL last TS. It was likely done
+ // to satisfy incorrect TestDBStartTimestampSamplesIngestion test. We are leaving it as-is given the planned removal
+ // of AppenderV1 as per https://github.com/prometheus/prometheus/issues/17632.
series.lastTs = st
// NOTE: always modify pendingSamples and sampleSeries together.
@@ -1127,12 +1125,21 @@ func (a *appender) AppendSTZeroSample(ref storage.SeriesRef, l labels.Labels, t,
// Commit submits the collected samples and purges the batch.
func (a *appender) Commit() error {
+ defer a.appenderPool.Put(a)
+ return a.commit()
+}
+
+func (a *appender) Rollback() error {
+ defer a.appenderPool.Put(a)
+ return a.rollback()
+}
+
+func (a *appenderBase) commit() error {
if err := a.log(); err != nil {
return err
}
a.clearData()
- a.appenderPool.Put(a)
if a.writeNotified != nil {
a.writeNotified.Notify()
@@ -1141,7 +1148,7 @@ func (a *appender) Commit() error {
}
// log logs all pending data to the WAL.
-func (a *appender) log() error {
+func (a *appenderBase) log() error {
a.mtx.RLock()
defer a.mtx.RUnlock()
@@ -1235,7 +1242,7 @@ func (a *appender) log() error {
}
// clearData clears all pending data.
-func (a *appender) clearData() {
+func (a *appenderBase) clearData() {
a.pendingSeries = a.pendingSeries[:0]
a.pendingSamples = a.pendingSamples[:0]
a.pendingHistograms = a.pendingHistograms[:0]
@@ -1246,7 +1253,7 @@ func (a *appender) clearData() {
a.floatHistogramSeries = a.floatHistogramSeries[:0]
}
-func (a *appender) Rollback() error {
+func (a *appenderBase) rollback() error {
// Series are created in-memory regardless of rollback. This means we must
// log them to the WAL, otherwise subsequent commits may reference a series
// which was never written to the WAL.
@@ -1255,12 +1262,11 @@ func (a *appender) Rollback() error {
}
a.clearData()
- a.appenderPool.Put(a)
return nil
}
// logSeries logs only pending series records to the WAL.
-func (a *appender) logSeries() error {
+func (a *appenderBase) logSeries() error {
a.mtx.RLock()
defer a.mtx.RUnlock()
@@ -1283,7 +1289,7 @@ func (a *appender) logSeries() error {
// minValidTime returns the minimum timestamp that a sample can have
// and is needed for preventing underflow.
-func (a *appender) minValidTime(lastTs int64) int64 {
+func (a *appenderBase) minValidTime(lastTs int64) int64 {
if lastTs < math.MinInt64+a.opts.OutOfOrderTimeWindow {
return math.MinInt64
}
diff --git a/tsdb/agent/db_append_v2.go b/tsdb/agent/db_append_v2.go
new file mode 100644
index 0000000000..bb2601e1e3
--- /dev/null
+++ b/tsdb/agent/db_append_v2.go
@@ -0,0 +1,218 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package agent
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "github.com/prometheus/prometheus/model/exemplar"
+ "github.com/prometheus/prometheus/model/histogram"
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/model/value"
+ "github.com/prometheus/prometheus/storage"
+ "github.com/prometheus/prometheus/tsdb/chunks"
+ "github.com/prometheus/prometheus/tsdb/record"
+)
+
+// AppenderV2 implements storage.AppenderV2.
+func (db *DB) AppenderV2(context.Context) storage.AppenderV2 {
+ return db.appenderV2Pool.Get().(storage.AppenderV2)
+}
+
+type appenderV2 struct {
+ appenderBase
+}
+
+// Append appends pending sample to agent's DB.
+// TODO: Wire metadata in the Agent's appender.
+func (a *appenderV2) Append(ref storage.SeriesRef, ls labels.Labels, st, t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, opts storage.AOptions) (storage.SeriesRef, error) {
+ var (
+ // Avoid shadowing err variables for reliability.
+ valErr, partialErr error
+ sampleMetricType = sampleMetricTypeFloat
+ isStale bool
+ )
+ // Fail fast on incorrect histograms.
+ switch {
+ case fh != nil:
+ sampleMetricType = sampleMetricTypeHistogram
+ valErr = fh.Validate()
+ case h != nil:
+ sampleMetricType = sampleMetricTypeHistogram
+ valErr = h.Validate()
+ }
+ if valErr != nil {
+ return 0, valErr
+ }
+
+ // series references and chunk references are identical for agent mode.
+ s := a.series.GetByID(chunks.HeadSeriesRef(ref))
+ if s == nil {
+ var err error
+ s, err = a.getOrCreate(ls)
+ if err != nil {
+ return 0, err
+ }
+ }
+
+ s.Lock()
+ lastTS := s.lastTs
+ s.Unlock()
+
+ // TODO(bwplotka): Handle ST natively (as per PROM-60).
+ if a.opts.EnableSTAsZeroSample && st != 0 {
+ a.bestEffortAppendSTZeroSample(s, ls, lastTS, st, t, h, fh)
+ }
+
+ if t <= a.minValidTime(lastTS) {
+ a.metrics.totalOutOfOrderSamples.Inc()
+ return 0, storage.ErrOutOfOrderSample
+ }
+
+ switch {
+ case fh != nil:
+ isStale = value.IsStaleNaN(fh.Sum)
+ // NOTE: always modify pendingFloatHistograms and floatHistogramSeries together
+ a.pendingFloatHistograms = append(a.pendingFloatHistograms, record.RefFloatHistogramSample{
+ Ref: s.ref,
+ T: t,
+ FH: fh,
+ })
+ a.floatHistogramSeries = append(a.floatHistogramSeries, s)
+ case h != nil:
+ isStale = value.IsStaleNaN(h.Sum)
+ // NOTE: always modify pendingHistograms and histogramSeries together
+ a.pendingHistograms = append(a.pendingHistograms, record.RefHistogramSample{
+ Ref: s.ref,
+ T: t,
+ H: h,
+ })
+ a.histogramSeries = append(a.histogramSeries, s)
+ default:
+ isStale = value.IsStaleNaN(v)
+
+ // NOTE: always modify pendingSamples and sampleSeries together.
+ a.pendingSamples = append(a.pendingSamples, record.RefSample{
+ Ref: s.ref,
+ T: t,
+ V: v,
+ })
+ a.sampleSeries = append(a.sampleSeries, s)
+ }
+ a.metrics.totalAppendedSamples.WithLabelValues(sampleMetricType).Inc()
+ if isStale {
+ // For stale values we never attempt to process metadata/exemplars, claim the success.
+ return storage.SeriesRef(s.ref), nil
+ }
+
+ // Append exemplars if any and if storage was configured for it.
+ // TODO(bwplotka): Agent does not have equivalent of a.head.opts.EnableExemplarStorage && a.head.opts.MaxExemplars.Load() > 0 ?
+ if len(opts.Exemplars) > 0 {
+ // Currently only exemplars can return partial errors.
+ partialErr = a.appendExemplars(s, opts.Exemplars)
+ }
+ return storage.SeriesRef(s.ref), partialErr
+}
+
+func (a *appenderV2) Commit() error {
+ defer a.appenderV2Pool.Put(a)
+ return a.commit()
+}
+
+func (a *appenderV2) Rollback() error {
+ defer a.appenderV2Pool.Put(a)
+ return a.rollback()
+}
+
+func (a *appenderV2) appendExemplars(s *memSeries, exemplar []exemplar.Exemplar) error {
+ var errs []error
+ for _, e := range exemplar {
+ // Ensure no empty labels have gotten through.
+ e.Labels = e.Labels.WithoutEmpty()
+
+ if err := a.validateExemplar(s.ref, e); err != nil {
+ if !errors.Is(err, storage.ErrDuplicateExemplar) {
+ // Except duplicates, return partial errors.
+ errs = append(errs, err)
+ continue
+ }
+ if !errors.Is(err, storage.ErrOutOfOrderExemplar) {
+ a.logger.Debug("Error while adding an exemplar on AppendSample", "exemplars", fmt.Sprintf("%+v", e), "err", e)
+ }
+ continue
+ }
+
+ a.series.SetLatestExemplar(s.ref, &e)
+ a.pendingExamplars = append(a.pendingExamplars, record.RefExemplar{
+ Ref: s.ref,
+ T: e.Ts,
+ V: e.Value,
+ Labels: e.Labels,
+ })
+ a.metrics.totalAppendedExemplars.Inc()
+ }
+ if len(errs) > 0 {
+ return &storage.AppendPartialError{ExemplarErrors: errs}
+ }
+ return nil
+}
+
+// NOTE(bwplotka): This feature might be deprecated and removed once PROM-60
+// is implemented.
+//
+// ST is an experimental feature, we don't fail the append on errors, just debug log.
+func (a *appenderV2) bestEffortAppendSTZeroSample(s *memSeries, ls labels.Labels, lastTS, st, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) {
+ // NOTE: Use lset instead of s.lset to avoid locking memSeries. Using s.ref is acceptable without locking.
+ if st >= t {
+ a.logger.Debug("Error when appending ST", "series", ls.String(), "st", st, "t", t, "err", storage.ErrSTNewerThanSample)
+ return
+ }
+ if st <= lastTS {
+ a.logger.Debug("Error when appending ST", "series", ls.String(), "st", st, "t", t, "err", storage.ErrOutOfOrderST)
+ return
+ }
+
+ switch {
+ case fh != nil:
+ zeroFloatHistogram := &histogram.FloatHistogram{
+ // The STZeroSample represents a counter reset by definition.
+ CounterResetHint: histogram.CounterReset,
+ // Replicate other fields to avoid needless chunk creation.
+ Schema: fh.Schema,
+ ZeroThreshold: fh.ZeroThreshold,
+ CustomValues: fh.CustomValues,
+ }
+ a.pendingFloatHistograms = append(a.pendingFloatHistograms, record.RefFloatHistogramSample{Ref: s.ref, T: st, FH: zeroFloatHistogram})
+ a.floatHistogramSeries = append(a.floatHistogramSeries, s)
+ a.metrics.totalAppendedSamples.WithLabelValues(sampleMetricTypeHistogram).Inc()
+ case h != nil:
+ zeroHistogram := &histogram.Histogram{
+ // The STZeroSample represents a counter reset by definition.
+ CounterResetHint: histogram.CounterReset,
+ // Replicate other fields to avoid needless chunk creation.
+ Schema: h.Schema,
+ ZeroThreshold: h.ZeroThreshold,
+ CustomValues: h.CustomValues,
+ }
+ a.pendingHistograms = append(a.pendingHistograms, record.RefHistogramSample{Ref: s.ref, T: st, H: zeroHistogram})
+ a.histogramSeries = append(a.histogramSeries, s)
+ a.metrics.totalAppendedSamples.WithLabelValues(sampleMetricTypeHistogram).Inc()
+ default:
+ a.pendingSamples = append(a.pendingSamples, record.RefSample{Ref: s.ref, T: st, V: 0})
+ a.sampleSeries = append(a.sampleSeries, s)
+ a.metrics.totalAppendedSamples.WithLabelValues(sampleMetricTypeFloat).Inc()
+ }
+}
diff --git a/tsdb/agent/db_append_v2_test.go b/tsdb/agent/db_append_v2_test.go
new file mode 100644
index 0000000000..3e10a1163b
--- /dev/null
+++ b/tsdb/agent/db_append_v2_test.go
@@ -0,0 +1,1169 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package agent
+
+import (
+ "context"
+ "fmt"
+ "math"
+ "path/filepath"
+ "testing"
+ "time"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/common/model"
+ "github.com/prometheus/common/promslog"
+ "github.com/stretchr/testify/require"
+
+ "github.com/prometheus/prometheus/model/exemplar"
+ "github.com/prometheus/prometheus/model/histogram"
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/storage"
+ "github.com/prometheus/prometheus/storage/remote"
+ "github.com/prometheus/prometheus/tsdb"
+ "github.com/prometheus/prometheus/tsdb/chunks"
+ "github.com/prometheus/prometheus/tsdb/record"
+ "github.com/prometheus/prometheus/tsdb/tsdbutil"
+ "github.com/prometheus/prometheus/tsdb/wlog"
+ "github.com/prometheus/prometheus/util/testutil"
+)
+
+func TestDB_InvalidSeries_AppendV2(t *testing.T) {
+ s := createTestAgentDB(t, nil, DefaultOptions())
+ defer s.Close()
+
+ app := s.AppenderV2(context.Background())
+ t.Run("Samples", func(t *testing.T) {
+ _, err := app.Append(0, labels.Labels{}, 0, 0, 0, nil, nil, storage.AOptions{})
+ require.ErrorIs(t, err, tsdb.ErrInvalidSample, "should reject empty labels")
+
+ _, err = app.Append(0, labels.FromStrings("a", "1", "a", "2"), 0, 0, 0, nil, nil, storage.AOptions{})
+ require.ErrorIs(t, err, tsdb.ErrInvalidSample, "should reject duplicate labels")
+ })
+
+ t.Run("Histograms", func(t *testing.T) {
+ _, err := app.Append(0, labels.Labels{}, 0, 0, 0, tsdbutil.GenerateTestHistograms(1)[0], nil, storage.AOptions{})
+ require.ErrorIs(t, err, tsdb.ErrInvalidSample, "should reject empty labels")
+
+ _, err = app.Append(0, labels.FromStrings("a", "1", "a", "2"), 0, 0, 0, tsdbutil.GenerateTestHistograms(1)[0], nil, storage.AOptions{})
+ require.ErrorIs(t, err, tsdb.ErrInvalidSample, "should reject duplicate labels")
+ })
+
+ t.Run("Exemplars", func(t *testing.T) {
+ e := exemplar.Exemplar{Labels: labels.FromStrings("a", "1", "a", "2")}
+ _, err := app.Append(0, labels.FromStrings("a", "1"), 0, 0, 0, nil, nil, storage.AOptions{
+ Exemplars: []exemplar.Exemplar{e},
+ })
+ partErr := &storage.AppendPartialError{}
+ require.ErrorAs(t, err, &partErr)
+ require.Len(t, partErr.ExemplarErrors, 1)
+ require.ErrorIs(t, partErr.ExemplarErrors[0], tsdb.ErrInvalidExemplar, "should reject duplicate labels")
+
+ e = exemplar.Exemplar{Labels: labels.FromStrings("a_somewhat_long_trace_id", "nYJSNtFrFTY37VR7mHzEE/LIDt7cdAQcuOzFajgmLDAdBSRHYPDzrxhMA4zz7el8naI/AoXFv9/e/G0vcETcIoNUi3OieeLfaIRQci2oa")}
+ _, err = app.Append(0, labels.FromStrings("a", "2"), 0, 0, 0, nil, nil, storage.AOptions{
+ Exemplars: []exemplar.Exemplar{e},
+ })
+ partErr = &storage.AppendPartialError{}
+ require.ErrorAs(t, err, &partErr)
+ require.Len(t, partErr.ExemplarErrors, 1)
+ require.ErrorIs(t, partErr.ExemplarErrors[0], storage.ErrExemplarLabelLength, "should reject too long label length")
+
+ // Inverse check.
+ e = exemplar.Exemplar{Labels: labels.FromStrings("a", "1"), Value: 20, Ts: 10, HasTs: true}
+ _, err = app.Append(0, labels.FromStrings("a", "1"), 0, 0, 0, nil, nil, storage.AOptions{
+ Exemplars: []exemplar.Exemplar{e},
+ })
+ require.NoError(t, err, "should not reject valid exemplars")
+ })
+}
+
+func TestCommit_AppendV2(t *testing.T) {
+ const (
+ numDatapoints = 1000
+ numHistograms = 100
+ numSeries = 8
+ )
+
+ s := createTestAgentDB(t, nil, DefaultOptions())
+ app := s.AppenderV2(context.TODO())
+
+ lbls := labelsForTest(t.Name(), numSeries)
+ for _, l := range lbls {
+ lset := labels.New(l...)
+
+ for i := range numDatapoints {
+ sample := chunks.GenerateSamples(0, 1)
+ _, err := app.Append(0, lset, 0, sample[0].T(), sample[0].F(), nil, nil, storage.AOptions{
+ Exemplars: []exemplar.Exemplar{{
+ Labels: lset,
+ Ts: sample[0].T() + int64(i),
+ Value: sample[0].F(),
+ HasTs: true,
+ }},
+ })
+ require.NoError(t, err)
+ }
+ }
+
+ lbls = labelsForTest(t.Name()+"_histogram", numSeries)
+ for _, l := range lbls {
+ lset := labels.New(l...)
+
+ histograms := tsdbutil.GenerateTestHistograms(numHistograms)
+
+ for i := range numHistograms {
+ _, err := app.Append(0, lset, 0, int64(i), 0, histograms[i], nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+ }
+
+ lbls = labelsForTest(t.Name()+"_custom_buckets_histogram", numSeries)
+ for _, l := range lbls {
+ lset := labels.New(l...)
+
+ customBucketHistograms := tsdbutil.GenerateTestCustomBucketsHistograms(numHistograms)
+
+ for i := range numHistograms {
+ _, err := app.Append(0, lset, 0, int64(i), 0, customBucketHistograms[i], nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+ }
+
+ lbls = labelsForTest(t.Name()+"_float_histogram", numSeries)
+ for _, l := range lbls {
+ lset := labels.New(l...)
+
+ floatHistograms := tsdbutil.GenerateTestFloatHistograms(numHistograms)
+
+ for i := range numHistograms {
+ _, err := app.Append(0, lset, 0, int64(i), 0, nil, floatHistograms[i], storage.AOptions{})
+ require.NoError(t, err)
+ }
+ }
+
+ lbls = labelsForTest(t.Name()+"_custom_buckets_float_histogram", numSeries)
+ for _, l := range lbls {
+ lset := labels.New(l...)
+
+ customBucketFloatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numHistograms)
+
+ for i := range numHistograms {
+ _, err := app.Append(0, lset, 0, int64(i), 0, nil, customBucketFloatHistograms[i], storage.AOptions{})
+ require.NoError(t, err)
+ }
+ }
+
+ require.NoError(t, app.Commit())
+ require.NoError(t, s.Close())
+
+ sr, err := wlog.NewSegmentsReader(s.wal.Dir())
+ require.NoError(t, err)
+ defer func() {
+ require.NoError(t, sr.Close())
+ }()
+
+ // Read records from WAL and check for expected count of series, samples, and exemplars.
+ var (
+ r = wlog.NewReader(sr)
+ dec = record.NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
+
+ walSeriesCount, walSamplesCount, walExemplarsCount, walHistogramCount, walFloatHistogramCount int
+ )
+ for r.Next() {
+ rec := r.Record()
+ switch dec.Type(rec) {
+ case record.Series:
+ var series []record.RefSeries
+ series, err = dec.Series(rec, series)
+ require.NoError(t, err)
+ walSeriesCount += len(series)
+
+ case record.Samples:
+ var samples []record.RefSample
+ samples, err = dec.Samples(rec, samples)
+ require.NoError(t, err)
+ walSamplesCount += len(samples)
+
+ case record.HistogramSamples, record.CustomBucketsHistogramSamples:
+ var histograms []record.RefHistogramSample
+ histograms, err = dec.HistogramSamples(rec, histograms)
+ require.NoError(t, err)
+ walHistogramCount += len(histograms)
+
+ case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples:
+ var floatHistograms []record.RefFloatHistogramSample
+ floatHistograms, err = dec.FloatHistogramSamples(rec, floatHistograms)
+ require.NoError(t, err)
+ walFloatHistogramCount += len(floatHistograms)
+
+ case record.Exemplars:
+ var exemplars []record.RefExemplar
+ exemplars, err = dec.Exemplars(rec, exemplars)
+ require.NoError(t, err)
+ walExemplarsCount += len(exemplars)
+
+ default:
+ }
+ }
+
+ // Check that the WAL contained the same number of committed series/samples/exemplars.
+ require.Equal(t, numSeries*5, walSeriesCount, "unexpected number of series")
+ require.Equal(t, numSeries*numDatapoints, walSamplesCount, "unexpected number of samples")
+ require.Equal(t, numSeries*numDatapoints, walExemplarsCount, "unexpected number of exemplars")
+ require.Equal(t, numSeries*numHistograms*2, walHistogramCount, "unexpected number of histograms")
+ require.Equal(t, numSeries*numHistograms*2, walFloatHistogramCount, "unexpected number of float histograms")
+
+ // Check that we can still create both kinds of Appender - see https://github.com/prometheus/prometheus/issues/17800.
+ _ = s.Appender(context.TODO())
+ _ = s.AppenderV2(context.TODO())
+}
+
+func TestRollback_AppendV2(t *testing.T) {
+ const (
+ numDatapoints = 1000
+ numHistograms = 100
+ numSeries = 8
+ )
+
+ s := createTestAgentDB(t, nil, DefaultOptions())
+ app := s.AppenderV2(context.TODO())
+
+ lbls := labelsForTest(t.Name(), numSeries)
+ for _, l := range lbls {
+ lset := labels.New(l...)
+
+ for range numDatapoints {
+ sample := chunks.GenerateSamples(0, 1)
+ _, err := app.Append(0, lset, 0, sample[0].T(), sample[0].F(), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+ }
+
+ lbls = labelsForTest(t.Name()+"_histogram", numSeries)
+ for _, l := range lbls {
+ lset := labels.New(l...)
+
+ histograms := tsdbutil.GenerateTestHistograms(numHistograms)
+
+ for i := range numHistograms {
+ _, err := app.Append(0, lset, 0, int64(i), 0, histograms[i], nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+ }
+
+ lbls = labelsForTest(t.Name()+"_custom_buckets_histogram", numSeries)
+ for _, l := range lbls {
+ lset := labels.New(l...)
+
+ histograms := tsdbutil.GenerateTestCustomBucketsHistograms(numHistograms)
+
+ for i := range numHistograms {
+ _, err := app.Append(0, lset, 0, int64(i), 0, histograms[i], nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+ }
+
+ lbls = labelsForTest(t.Name()+"_float_histogram", numSeries)
+ for _, l := range lbls {
+ lset := labels.New(l...)
+
+ floatHistograms := tsdbutil.GenerateTestFloatHistograms(numHistograms)
+
+ for i := range numHistograms {
+ _, err := app.Append(0, lset, 0, int64(i), 0, nil, floatHistograms[i], storage.AOptions{})
+ require.NoError(t, err)
+ }
+ }
+
+ lbls = labelsForTest(t.Name()+"_custom_buckets_float_histogram", numSeries)
+ for _, l := range lbls {
+ lset := labels.New(l...)
+
+ floatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numHistograms)
+
+ for i := range numHistograms {
+ _, err := app.Append(0, lset, 0, int64(i), 0, nil, floatHistograms[i], storage.AOptions{})
+ require.NoError(t, err)
+ }
+ }
+
+ // Do a rollback, which should clear uncommitted data. A followup call to
+ // commit should persist nothing to the WAL.
+ require.NoError(t, app.Rollback())
+ require.NoError(t, app.Commit())
+ require.NoError(t, s.Close())
+
+ sr, err := wlog.NewSegmentsReader(s.wal.Dir())
+ require.NoError(t, err)
+ defer func() {
+ require.NoError(t, sr.Close())
+ }()
+
+ // Read records from WAL and check for expected count of series and samples.
+ var (
+ r = wlog.NewReader(sr)
+ dec = record.NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
+
+ walSeriesCount, walSamplesCount, walHistogramCount, walFloatHistogramCount, walExemplarsCount int
+ )
+ for r.Next() {
+ rec := r.Record()
+ switch dec.Type(rec) {
+ case record.Series:
+ var series []record.RefSeries
+ series, err = dec.Series(rec, series)
+ require.NoError(t, err)
+ walSeriesCount += len(series)
+
+ case record.Samples:
+ var samples []record.RefSample
+ samples, err = dec.Samples(rec, samples)
+ require.NoError(t, err)
+ walSamplesCount += len(samples)
+
+ case record.Exemplars:
+ var exemplars []record.RefExemplar
+ exemplars, err = dec.Exemplars(rec, exemplars)
+ require.NoError(t, err)
+ walExemplarsCount += len(exemplars)
+
+ case record.HistogramSamples, record.CustomBucketsHistogramSamples:
+ var histograms []record.RefHistogramSample
+ histograms, err = dec.HistogramSamples(rec, histograms)
+ require.NoError(t, err)
+ walHistogramCount += len(histograms)
+
+ case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples:
+ var floatHistograms []record.RefFloatHistogramSample
+ floatHistograms, err = dec.FloatHistogramSamples(rec, floatHistograms)
+ require.NoError(t, err)
+ walFloatHistogramCount += len(floatHistograms)
+
+ default:
+ }
+ }
+
+ // Check that only series get stored after calling Rollback.
+ require.Equal(t, numSeries*5, walSeriesCount, "series should have been written to WAL")
+ require.Equal(t, 0, walSamplesCount, "samples should not have been written to WAL")
+ require.Equal(t, 0, walExemplarsCount, "exemplars should not have been written to WAL")
+ require.Equal(t, 0, walHistogramCount, "histograms should not have been written to WAL")
+ require.Equal(t, 0, walFloatHistogramCount, "float histograms should not have been written to WAL")
+}
+
+func TestFullTruncateWAL_AppendV2(t *testing.T) {
+ const (
+ numDatapoints = 1000
+ numHistograms = 100
+ numSeries = 800
+ lastTs = 500
+ )
+
+ reg := prometheus.NewRegistry()
+ opts := DefaultOptions()
+ opts.TruncateFrequency = time.Minute * 2
+
+ s := createTestAgentDB(t, reg, opts)
+ defer func() {
+ require.NoError(t, s.Close())
+ }()
+ app := s.AppenderV2(context.TODO())
+
+ lbls := labelsForTest(t.Name(), numSeries)
+ for _, l := range lbls {
+ lset := labels.New(l...)
+
+ for range numDatapoints {
+ _, err := app.Append(0, lset, 0, int64(lastTs), 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+ }
+
+ lbls = labelsForTest(t.Name()+"_histogram", numSeries)
+ for _, l := range lbls {
+ lset := labels.New(l...)
+
+ histograms := tsdbutil.GenerateTestHistograms(numHistograms)
+
+ for i := range numHistograms {
+ _, err := app.Append(0, lset, 0, int64(lastTs), 0, histograms[i], nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+ }
+
+ lbls = labelsForTest(t.Name()+"_custom_buckets_histogram", numSeries)
+ for _, l := range lbls {
+ lset := labels.New(l...)
+
+ histograms := tsdbutil.GenerateTestCustomBucketsHistograms(numHistograms)
+
+ for i := range numHistograms {
+ _, err := app.Append(0, lset, 0, int64(lastTs), 0, histograms[i], nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+ }
+
+ lbls = labelsForTest(t.Name()+"_float_histogram", numSeries)
+ for _, l := range lbls {
+ lset := labels.New(l...)
+
+ floatHistograms := tsdbutil.GenerateTestFloatHistograms(numHistograms)
+
+ for i := range numHistograms {
+ _, err := app.Append(0, lset, 0, int64(lastTs), 0, nil, floatHistograms[i], storage.AOptions{})
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+ }
+
+ lbls = labelsForTest(t.Name()+"_custom_buckets_float_histogram", numSeries)
+ for _, l := range lbls {
+ lset := labels.New(l...)
+
+ floatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numHistograms)
+
+ for i := range numHistograms {
+ _, err := app.Append(0, lset, 0, int64(lastTs), 0, nil, floatHistograms[i], storage.AOptions{})
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+ }
+
+ // Truncate WAL with mint to GC all the samples.
+ s.truncate(lastTs + 1)
+
+ m := gatherFamily(t, reg, "prometheus_agent_deleted_series")
+ require.Equal(t, float64(numSeries*5), m.Metric[0].Gauge.GetValue(), "agent wal truncate mismatch of deleted series count")
+}
+
+func TestPartialTruncateWAL_AppendV2(t *testing.T) {
+ const (
+ numDatapoints = 1000
+ numSeries = 800
+ )
+
+ opts := DefaultOptions()
+
+ reg := prometheus.NewRegistry()
+ s := createTestAgentDB(t, reg, opts)
+ defer func() {
+ require.NoError(t, s.Close())
+ }()
+ app := s.AppenderV2(context.TODO())
+
+ // Create first batch of 800 series with 1000 data-points with a fixed lastTs as 500.
+ var lastTs int64 = 500
+ lbls := labelsForTest(t.Name()+"batch-1", numSeries)
+ for _, l := range lbls {
+ lset := labels.New(l...)
+
+ for range numDatapoints {
+ _, err := app.Append(0, lset, 0, lastTs, 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+ }
+
+ lbls = labelsForTest(t.Name()+"_histogram_batch-1", numSeries)
+ for _, l := range lbls {
+ lset := labels.New(l...)
+
+ histograms := tsdbutil.GenerateTestHistograms(numDatapoints)
+
+ for i := range numDatapoints {
+ _, err := app.Append(0, lset, 0, lastTs, 0, histograms[i], nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+ }
+
+ lbls = labelsForTest(t.Name()+"_custom_buckets_histogram_batch-1", numSeries)
+ for _, l := range lbls {
+ lset := labels.New(l...)
+
+ histograms := tsdbutil.GenerateTestCustomBucketsHistograms(numDatapoints)
+
+ for i := range numDatapoints {
+ _, err := app.Append(0, lset, 0, lastTs, 0, histograms[i], nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+ }
+
+ lbls = labelsForTest(t.Name()+"_float_histogram_batch-1", numSeries)
+ for _, l := range lbls {
+ lset := labels.New(l...)
+
+ floatHistograms := tsdbutil.GenerateTestFloatHistograms(numDatapoints)
+
+ for i := range numDatapoints {
+ _, err := app.Append(0, lset, 0, lastTs, 0, nil, floatHistograms[i], storage.AOptions{})
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+ }
+
+ lbls = labelsForTest(t.Name()+"_custom_buckets_float_histogram_batch-1", numSeries)
+ for _, l := range lbls {
+ lset := labels.New(l...)
+
+ floatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numDatapoints)
+
+ for i := range numDatapoints {
+ _, err := app.Append(0, lset, 0, lastTs, 0, nil, floatHistograms[i], storage.AOptions{})
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+ }
+
+ // Create second batch of 800 series with 1000 data-points with a fixed lastTs as 600.
+ lastTs = 600
+ lbls = labelsForTest(t.Name()+"batch-2", numSeries)
+ for _, l := range lbls {
+ lset := labels.New(l...)
+
+ for range numDatapoints {
+ _, err := app.Append(0, lset, 0, lastTs, 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+ }
+
+ lbls = labelsForTest(t.Name()+"_histogram_batch-2", numSeries)
+ for _, l := range lbls {
+ lset := labels.New(l...)
+
+ histograms := tsdbutil.GenerateTestHistograms(numDatapoints)
+
+ for i := range numDatapoints {
+ _, err := app.Append(0, lset, 0, lastTs, 0, histograms[i], nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+ }
+
+ lbls = labelsForTest(t.Name()+"_custom_buckets_histogram_batch-2", numSeries)
+ for _, l := range lbls {
+ lset := labels.New(l...)
+
+ histograms := tsdbutil.GenerateTestCustomBucketsHistograms(numDatapoints)
+
+ for i := range numDatapoints {
+ _, err := app.Append(0, lset, 0, lastTs, 0, histograms[i], nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+ }
+
+ lbls = labelsForTest(t.Name()+"_float_histogram_batch-2", numSeries)
+ for _, l := range lbls {
+ lset := labels.New(l...)
+
+ floatHistograms := tsdbutil.GenerateTestFloatHistograms(numDatapoints)
+
+ for i := range numDatapoints {
+ _, err := app.Append(0, lset, 0, lastTs, 0, nil, floatHistograms[i], storage.AOptions{})
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+ }
+
+ lbls = labelsForTest(t.Name()+"_custom_buckets_float_histogram_batch-2", numSeries)
+ for _, l := range lbls {
+ lset := labels.New(l...)
+
+ floatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numDatapoints)
+
+ for i := range numDatapoints {
+ _, err := app.Append(0, lset, 0, lastTs, 0, nil, floatHistograms[i], storage.AOptions{})
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+ }
+
+ // Truncate WAL with mint to GC only the first batch of 800 series and retaining 2nd batch of 800 series.
+ s.truncate(lastTs - 1)
+
+ m := gatherFamily(t, reg, "prometheus_agent_deleted_series")
+ require.Len(t, m.Metric, 1)
+ require.Equal(t, float64(numSeries*5), m.Metric[0].Gauge.GetValue(), "agent wal truncate mismatch of deleted series count")
+}
+
+func TestWALReplay_AppendV2(t *testing.T) {
+ const (
+ numDatapoints = 1000
+ numHistograms = 100
+ numSeries = 8
+ lastTs = 500
+ )
+
+ s := createTestAgentDB(t, nil, DefaultOptions())
+ app := s.AppenderV2(context.TODO())
+
+ lbls := labelsForTest(t.Name(), numSeries)
+ for _, l := range lbls {
+ lset := labels.New(l...)
+
+ for range numDatapoints {
+ _, err := app.Append(0, lset, 0, lastTs, 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+ }
+
+ lbls = labelsForTest(t.Name()+"_histogram", numSeries)
+ for _, l := range lbls {
+ lset := labels.New(l...)
+
+ histograms := tsdbutil.GenerateTestHistograms(numHistograms)
+
+ for i := range numHistograms {
+ _, err := app.Append(0, lset, 0, lastTs, 0, histograms[i], nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+ }
+
+ lbls = labelsForTest(t.Name()+"_custom_buckets_histogram", numSeries)
+ for _, l := range lbls {
+ lset := labels.New(l...)
+
+ histograms := tsdbutil.GenerateTestCustomBucketsHistograms(numHistograms)
+
+ for i := range numHistograms {
+ _, err := app.Append(0, lset, 0, lastTs, 0, histograms[i], nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+ }
+
+ lbls = labelsForTest(t.Name()+"_float_histogram", numSeries)
+ for _, l := range lbls {
+ lset := labels.New(l...)
+
+ floatHistograms := tsdbutil.GenerateTestFloatHistograms(numHistograms)
+
+ for i := range numHistograms {
+ _, err := app.Append(0, lset, 0, lastTs, 0, nil, floatHistograms[i], storage.AOptions{})
+ require.NoError(t, err)
+ }
+ }
+
+ lbls = labelsForTest(t.Name()+"_custom_buckets_float_histogram", numSeries)
+ for _, l := range lbls {
+ lset := labels.New(l...)
+
+ floatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numHistograms)
+
+ for i := range numHistograms {
+ _, err := app.Append(0, lset, 0, lastTs, 0, nil, floatHistograms[i], storage.AOptions{})
+ require.NoError(t, err)
+ }
+ }
+
+ require.NoError(t, app.Commit())
+ require.NoError(t, s.Close())
+
+ // Hack: s.wal.Dir() is the /wal subdirectory of the original storage path.
+ // We need the original directory so we can recreate the storage for replay.
+ storageDir := filepath.Dir(s.wal.Dir())
+
+ reg := prometheus.NewRegistry()
+ replayStorage, err := Open(s.logger, reg, nil, storageDir, s.opts)
+ if err != nil {
+ t.Fatalf("unable to create storage for the agent: %v", err)
+ }
+ defer func() {
+ require.NoError(t, replayStorage.Close())
+ }()
+
+ // Check if all the series are retrieved back from the WAL.
+ m := gatherFamily(t, reg, "prometheus_agent_active_series")
+ require.Equal(t, float64(numSeries*5), m.Metric[0].Gauge.GetValue(), "agent wal replay mismatch of active series count")
+
+ // Check if lastTs of the samples retrieved from the WAL is retained.
+ metrics := replayStorage.series.series
+ for i := range metrics {
+ mp := metrics[i]
+ for _, v := range mp {
+ require.Equal(t, v.lastTs, int64(lastTs))
+ }
+ }
+}
+
+func Test_ExistingWAL_NextRef_AppendV2(t *testing.T) {
+ dbDir := t.TempDir()
+ rs := remote.NewStorage(promslog.NewNopLogger(), nil, startTime, dbDir, time.Second*30, nil, false)
+ defer func() {
+ require.NoError(t, rs.Close())
+ }()
+
+ db, err := Open(promslog.NewNopLogger(), nil, rs, dbDir, DefaultOptions())
+ require.NoError(t, err)
+
+ seriesCount := 10
+
+ // Append series
+ app := db.AppenderV2(context.Background())
+ for i := range seriesCount {
+ lset := labels.FromStrings(model.MetricNameLabel, fmt.Sprintf("series_%d", i))
+ _, err := app.Append(0, lset, 0, 0, 100, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+
+ histogramCount := 10
+ histograms := tsdbutil.GenerateTestHistograms(histogramCount)
+ // Append series
+ for i := range histogramCount {
+ lset := labels.FromStrings(model.MetricNameLabel, fmt.Sprintf("histogram_%d", i))
+ _, err := app.Append(0, lset, 0, 0, 0, histograms[i], nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+
+ // Truncate the WAL to force creation of a new segment.
+ require.NoError(t, db.truncate(0))
+ require.NoError(t, db.Close())
+
+ // Create a new storage and see what nextRef is initialized to.
+ db, err = Open(promslog.NewNopLogger(), nil, rs, dbDir, DefaultOptions())
+ require.NoError(t, err)
+ defer func() {
+ require.NoError(t, db.Close())
+ }()
+
+ require.Equal(t, uint64(seriesCount+histogramCount), db.nextRef.Load(), "nextRef should be equal to the number of series written across the entire WAL")
+}
+
+func TestStorage_DuplicateExemplarsIgnored_AppendV2(t *testing.T) {
+ s := createTestAgentDB(t, nil, DefaultOptions())
+ app := s.AppenderV2(context.Background())
+ defer s.Close()
+
+ // Write a few exemplars to our appender and call Commit().
+ // If the Labels, Value or Timestamp are different than the last exemplar,
+ // then a new one should be appended; Otherwise, it should be skipped.
+ e1 := exemplar.Exemplar{Labels: labels.FromStrings("a", "1"), Value: 20, Ts: 10, HasTs: true}
+ e2 := exemplar.Exemplar{Labels: labels.FromStrings("b", "2"), Value: 20, Ts: 10, HasTs: true}
+ e3 := exemplar.Exemplar{Labels: labels.FromStrings("b", "2"), Value: 42, Ts: 10, HasTs: true}
+ e4 := exemplar.Exemplar{Labels: labels.FromStrings("b", "2"), Value: 42, Ts: 25, HasTs: true}
+
+ _, err := app.Append(0, labels.FromStrings("a", "1"), 0, 0, 0, nil, nil, storage.AOptions{
+ Exemplars: []exemplar.Exemplar{e1, e1, e2, e2, e2, e3, e3, e4, e4},
+ })
+ require.NoError(t, err, "should not reject valid series")
+ require.NoError(t, app.Commit())
+
+ // Read back what was written to the WAL.
+ var walExemplarsCount int
+ sr, err := wlog.NewSegmentsReader(s.wal.Dir())
+ require.NoError(t, err)
+ defer sr.Close()
+ r := wlog.NewReader(sr)
+
+ dec := record.NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
+ for r.Next() {
+ rec := r.Record()
+ if dec.Type(rec) == record.Exemplars {
+ var exemplars []record.RefExemplar
+ exemplars, err = dec.Exemplars(rec, exemplars)
+ require.NoError(t, err)
+ walExemplarsCount += len(exemplars)
+ }
+ }
+
+ // We had 9 calls to AppendExemplar but only 4 of those should have gotten through.
+ require.Equal(t, 4, walExemplarsCount)
+}
+
+func TestDBAllowOOOSamples_AppendV2(t *testing.T) {
+ const (
+ numDatapoints = 5
+ numHistograms = 5
+ numSeries = 4
+ offset = 100
+ )
+
+ reg := prometheus.NewRegistry()
+ opts := DefaultOptions()
+ opts.OutOfOrderTimeWindow = math.MaxInt64
+ s := createTestAgentDB(t, reg, opts)
+ app := s.AppenderV2(context.TODO())
+
+ // Let's add some samples in the [offset, offset+numDatapoints) range.
+ lbls := labelsForTest(t.Name(), numSeries)
+ for _, l := range lbls {
+ lset := labels.New(l...)
+
+ for i := offset; i < numDatapoints+offset; i++ {
+ _, err := app.Append(0, lset, 0, int64(i), float64(i), nil, nil, storage.AOptions{
+ Exemplars: []exemplar.Exemplar{{
+ Labels: lset,
+ Ts: int64(i) * 2,
+ Value: float64(i),
+ HasTs: true,
+ }},
+ })
+ require.NoError(t, err)
+ }
+ }
+
+ lbls = labelsForTest(t.Name()+"_histogram", numSeries)
+ for _, l := range lbls {
+ lset := labels.New(l...)
+
+ histograms := tsdbutil.GenerateTestHistograms(numHistograms)
+
+ for i := offset; i < numDatapoints+offset; i++ {
+ _, err := app.Append(0, lset, 0, int64(i), 0, histograms[i-offset], nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+ }
+
+ lbls = labelsForTest(t.Name()+"_custom_buckets_histogram", numSeries)
+ for _, l := range lbls {
+ lset := labels.New(l...)
+
+ histograms := tsdbutil.GenerateTestCustomBucketsHistograms(numHistograms)
+
+ for i := offset; i < numDatapoints+offset; i++ {
+ _, err := app.Append(0, lset, 0, int64(i), 0, histograms[i-offset], nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+ }
+
+ lbls = labelsForTest(t.Name()+"_float_histogram", numSeries)
+ for _, l := range lbls {
+ lset := labels.New(l...)
+
+ floatHistograms := tsdbutil.GenerateTestFloatHistograms(numHistograms)
+
+ for i := offset; i < numDatapoints+offset; i++ {
+ _, err := app.Append(0, lset, 0, int64(i), 0, nil, floatHistograms[i-offset], storage.AOptions{})
+ require.NoError(t, err)
+ }
+ }
+
+ lbls = labelsForTest(t.Name()+"_custom_buckets_float_histogram", numSeries)
+ for _, l := range lbls {
+ lset := labels.New(l...)
+
+ floatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numHistograms)
+
+ for i := offset; i < numDatapoints+offset; i++ {
+ _, err := app.Append(0, lset, 0, int64(i), 0, nil, floatHistograms[i-offset], storage.AOptions{})
+ require.NoError(t, err)
+ }
+ }
+
+ require.NoError(t, app.Commit())
+ m := gatherFamily(t, reg, "prometheus_agent_samples_appended_total")
+ require.Equal(t, float64(20), m.Metric[0].Counter.GetValue(), "agent wal mismatch of total appended samples")
+ require.Equal(t, float64(80), m.Metric[1].Counter.GetValue(), "agent wal mismatch of total appended histograms")
+ require.NoError(t, s.Close())
+
+ // Hack: s.wal.Dir() is the /wal subdirectory of the original storage path.
+ // We need the original directory so we can recreate the storage for replay.
+ storageDir := filepath.Dir(s.wal.Dir())
+
+ // Replay the storage so that the lastTs for each series is recorded.
+ reg2 := prometheus.NewRegistry()
+ db, err := Open(s.logger, reg2, nil, storageDir, s.opts)
+ if err != nil {
+ t.Fatalf("unable to create storage for the agent: %v", err)
+ }
+
+ app = db.AppenderV2(context.Background())
+
+ // Now the lastTs will have been recorded successfully.
+ // Let's try appending twice as many OOO samples in the [0, numDatapoints) range.
+ lbls = labelsForTest(t.Name()+"_histogram", numSeries*2)
+ for _, l := range lbls {
+ lset := labels.New(l...)
+
+ for i := range numDatapoints {
+ _, err := app.Append(0, lset, 0, int64(i), float64(i), nil, nil, storage.AOptions{
+ Exemplars: []exemplar.Exemplar{{
+ Labels: lset,
+ Ts: int64(i) * 2,
+ Value: float64(i),
+ HasTs: true,
+ }},
+ })
+ require.NoError(t, err)
+ }
+ }
+
+ lbls = labelsForTest(t.Name()+"_histogram", numSeries*2)
+ for _, l := range lbls {
+ lset := labels.New(l...)
+
+ histograms := tsdbutil.GenerateTestHistograms(numHistograms)
+
+ for i := range numDatapoints {
+ _, err := app.Append(0, lset, 0, int64(i), 0, histograms[i], nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+ }
+
+ lbls = labelsForTest(t.Name()+"_custom_buckets_histogram", numSeries*2)
+ for _, l := range lbls {
+ lset := labels.New(l...)
+
+ histograms := tsdbutil.GenerateTestCustomBucketsHistograms(numHistograms)
+
+ for i := range numDatapoints {
+ _, err := app.Append(0, lset, 0, int64(i), 0, histograms[i], nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+ }
+
+ lbls = labelsForTest(t.Name()+"_float_histogram", numSeries*2)
+ for _, l := range lbls {
+ lset := labels.New(l...)
+
+ floatHistograms := tsdbutil.GenerateTestFloatHistograms(numHistograms)
+
+ for i := range numDatapoints {
+ _, err := app.Append(0, lset, 0, int64(i), 0, nil, floatHistograms[i], storage.AOptions{})
+ require.NoError(t, err)
+ }
+ }
+
+ lbls = labelsForTest(t.Name()+"_custom_buckets_float_histogram", numSeries*2)
+ for _, l := range lbls {
+ lset := labels.New(l...)
+
+ floatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numHistograms)
+
+ for i := range numDatapoints {
+ _, err := app.Append(0, lset, 0, int64(i), 0, nil, floatHistograms[i], storage.AOptions{})
+ require.NoError(t, err)
+ }
+ }
+
+ require.NoError(t, app.Commit())
+ m = gatherFamily(t, reg2, "prometheus_agent_samples_appended_total")
+ require.Equal(t, float64(40), m.Metric[0].Counter.GetValue(), "agent wal mismatch of total appended samples")
+ require.Equal(t, float64(160), m.Metric[1].Counter.GetValue(), "agent wal mismatch of total appended histograms")
+ require.NoError(t, db.Close())
+}
+
+func TestDBOutOfOrderTimeWindow_AppendV2(t *testing.T) {
+ tc := []struct {
+ outOfOrderTimeWindow, firstTs, secondTs int64
+ expectedError error
+ }{
+ {0, 100, 101, nil},
+ {0, 100, 100, storage.ErrOutOfOrderSample},
+ {0, 100, 99, storage.ErrOutOfOrderSample},
+ {100, 100, 1, nil},
+ {100, 100, 0, storage.ErrOutOfOrderSample},
+ }
+
+ for _, c := range tc {
+ t.Run(fmt.Sprintf("outOfOrderTimeWindow=%d, firstTs=%d, secondTs=%d, expectedError=%s", c.outOfOrderTimeWindow, c.firstTs, c.secondTs, c.expectedError), func(t *testing.T) {
+ reg := prometheus.NewRegistry()
+ opts := DefaultOptions()
+ opts.OutOfOrderTimeWindow = c.outOfOrderTimeWindow
+ s := createTestAgentDB(t, reg, opts)
+ app := s.AppenderV2(context.TODO())
+
+ lbls := labelsForTest(t.Name()+"_histogram", 1)
+ lset := labels.New(lbls[0]...)
+ _, err := app.Append(0, lset, 0, c.firstTs, 0, tsdbutil.GenerateTestHistograms(1)[0], nil, storage.AOptions{})
+ require.NoError(t, err)
+ err = app.Commit()
+ require.NoError(t, err)
+ _, err = app.Append(0, lset, 0, c.secondTs, 0, tsdbutil.GenerateTestHistograms(1)[0], nil, storage.AOptions{})
+ require.ErrorIs(t, err, c.expectedError)
+
+ lbls = labelsForTest(t.Name(), 1)
+ lset = labels.New(lbls[0]...)
+ _, err = app.Append(0, lset, 0, c.firstTs, 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ err = app.Commit()
+ require.NoError(t, err)
+ _, err = app.Append(0, lset, 0, c.secondTs, 0, nil, nil, storage.AOptions{})
+ require.ErrorIs(t, err, c.expectedError)
+
+ expectedAppendedSamples := float64(2)
+ if c.expectedError != nil {
+ expectedAppendedSamples = 1
+ }
+ m := gatherFamily(t, reg, "prometheus_agent_samples_appended_total")
+ require.Equal(t, expectedAppendedSamples, m.Metric[0].Counter.GetValue(), "agent wal mismatch of total appended samples")
+ require.Equal(t, expectedAppendedSamples, m.Metric[1].Counter.GetValue(), "agent wal mismatch of total appended histograms")
+ require.NoError(t, s.Close())
+ })
+ }
+}
+
+// TestDB_EnableSTZeroInjection_AppendV2 replaces TestDBStartTimestampSamplesIngestion.
+func TestDB_EnableSTZeroInjection_AppendV2(t *testing.T) {
+ t.Parallel()
+
+ // NOTE: Eventually wal sample and appendable sample should be the same.
+ type appendableSample struct {
+ st, t int64
+ v float64
+ lbls labels.Labels
+ h *histogram.Histogram
+ }
+
+ testHistograms := tsdbutil.GenerateTestHistograms(2)
+ zeroHistogram := &histogram.Histogram{
+ // The STZeroSample represents a counter reset by definition.
+ CounterResetHint: histogram.CounterReset,
+ // Replicate other fields to avoid needless chunk creation.
+ Schema: testHistograms[0].Schema,
+ ZeroThreshold: testHistograms[0].ZeroThreshold,
+ CustomValues: testHistograms[0].CustomValues,
+ }
+
+ lbls := labelsForTest(t.Name(), 1)
+ defLbls := labels.New(lbls[0]...)
+
+ testCases := []struct {
+ name string
+ inputSamples []appendableSample
+ expectedSamples []walSample
+ expectedSeriesCount int
+ }{
+ {
+ name: "in order ct+normal sample/floatSamples",
+ inputSamples: []appendableSample{
+ {t: 100, st: 1, v: 10, lbls: defLbls},
+ {t: 101, st: 1, v: 10, lbls: defLbls},
+ },
+ expectedSamples: []walSample{
+ {t: 1, f: 0, lbls: defLbls, ref: 1},
+ {t: 100, f: 10, lbls: defLbls, ref: 1},
+ {t: 101, f: 10, lbls: defLbls, ref: 1},
+ },
+ },
+ {
+ name: "ST+float && ST+histogram samples",
+ inputSamples: []appendableSample{
+ {
+ t: 100,
+ st: 30,
+ v: 20,
+ lbls: defLbls,
+ },
+ {
+ t: 300,
+ st: 230,
+ h: testHistograms[0],
+ lbls: defLbls,
+ },
+ },
+ expectedSamples: []walSample{
+ {t: 30, f: 0, lbls: defLbls, ref: 1},
+ {t: 100, f: 20, lbls: defLbls, ref: 1},
+ {t: 230, h: zeroHistogram, lbls: defLbls, ref: 1},
+ {t: 300, h: testHistograms[0], lbls: defLbls, ref: 1},
+ },
+ expectedSeriesCount: 1,
+ },
+ {
+ name: "ST+float && ST+histogram samples with error; should be ignored",
+ inputSamples: []appendableSample{
+ {
+ // invalid ST
+ t: 100,
+ st: 100,
+ v: 10,
+ lbls: defLbls,
+ },
+ {
+ // invalid ST histogram
+ t: 300,
+ st: 300,
+ h: testHistograms[0],
+ lbls: defLbls,
+ },
+ },
+ expectedSamples: []walSample{
+ {t: 100, f: 10, lbls: defLbls, ref: 1},
+ {t: 300, h: testHistograms[0], lbls: defLbls, ref: 1},
+ },
+ expectedSeriesCount: 0,
+ },
+ {
+ name: "In order ct+normal sample/histogram",
+ inputSamples: []appendableSample{
+ {t: 100, h: testHistograms[0], st: 1, lbls: defLbls},
+ {t: 101, h: testHistograms[1], st: 1, lbls: defLbls},
+ },
+ expectedSamples: []walSample{
+ {t: 1, h: zeroHistogram, lbls: defLbls, ref: 1},
+ {t: 100, h: testHistograms[0], lbls: defLbls, ref: 1},
+ {t: 101, h: testHistograms[1], lbls: defLbls, ref: 1},
+ },
+ },
+ {
+ name: "ct+normal then OOO sample/float",
+ inputSamples: []appendableSample{
+ {t: 60_000, st: 40_000, v: 10, lbls: defLbls},
+ {t: 120_000, st: 40_000, v: 10, lbls: defLbls},
+ {t: 180_000, st: 40_000, v: 10, lbls: defLbls},
+ {t: 50_000, st: 40_000, v: 10, lbls: defLbls},
+ },
+ expectedSamples: []walSample{
+ {t: 40_000, f: 0, lbls: defLbls, ref: 1},
+ {t: 60_000, f: 10, lbls: defLbls, ref: 1},
+ {t: 120_000, f: 10, lbls: defLbls, ref: 1},
+ {t: 180_000, f: 10, lbls: defLbls, ref: 1},
+ {t: 50_000, f: 10, lbls: defLbls, ref: 1}, // OOO sample.
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ t.Parallel()
+
+ reg := prometheus.NewRegistry()
+ opts := DefaultOptions()
+ opts.OutOfOrderTimeWindow = 360_000
+ opts.EnableSTAsZeroSample = true
+ s := createTestAgentDB(t, reg, opts)
+
+ for _, sample := range tc.inputSamples {
+ // Simulate one sample per series logic we have in all our ingestion paths in Prometheus.
+ app := s.AppenderV2(t.Context())
+ _, err := app.Append(0, sample.lbls, sample.st, sample.t, sample.v, sample.h, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ }
+
+ // Close the DB to ensure all data is flushed to the WAL
+ require.NoError(t, s.Close())
+
+ // Check that we don't have any OOO samples in the WAL by checking metrics
+ families, err := reg.Gather()
+ require.NoError(t, err, "failed to gather metrics")
+ for _, f := range families {
+ if f.GetName() == "prometheus_agent_out_of_order_samples_total" {
+ t.Fatalf("unexpected metric %s", f.GetName())
+ }
+ }
+
+ got := readWALSamples(t, s.wal.Dir())
+ testutil.RequireEqualWithOptions(t, tc.expectedSamples, got, cmp.Options{cmp.AllowUnexported(walSample{})})
+ })
+ }
+}
diff --git a/tsdb/agent/db_test.go b/tsdb/agent/db_test.go
index 7409f79ec5..31e309d3fd 100644
--- a/tsdb/agent/db_test.go
+++ b/tsdb/agent/db_test.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -24,6 +24,7 @@ import (
"testing"
"time"
+ "github.com/google/go-cmp/cmp"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/model"
@@ -258,6 +259,9 @@ func TestCommit(t *testing.T) {
require.Equal(t, numSeries*numDatapoints, walExemplarsCount, "unexpected number of exemplars")
require.Equal(t, numSeries*numHistograms*2, walHistogramCount, "unexpected number of histograms")
require.Equal(t, numSeries*numHistograms*2, walFloatHistogramCount, "unexpected number of float histograms")
+
+ // Check that we can get another appender after this - see https://github.com/prometheus/prometheus/issues/17800.
+ _ = s.Appender(context.TODO())
}
func TestRollback(t *testing.T) {
@@ -1142,6 +1146,10 @@ type walSample struct {
ref storage.SeriesRef
}
+// NOTE(bwplotka): This test is testing behaviour of storage.Appender interface against its invariants (see
+// storage.Appender comment) around validation of the order of samples within a single Appender. This results
+// in a slight bug in AppendSTZero* methods. We are leaving it as-is given the planned removal of AppenderV1 as
+// per https://github.com/prometheus/prometheus/issues/17632.
func TestDBStartTimestampSamplesIngestion(t *testing.T) {
t.Parallel()
@@ -1154,7 +1162,7 @@ func TestDBStartTimestampSamplesIngestion(t *testing.T) {
expectsError bool
}
- testHistogram := tsdbutil.GenerateTestHistograms(1)[0]
+ testHistograms := tsdbutil.GenerateTestHistograms(2)
zeroHistogram := &histogram.Histogram{}
lbls := labelsForTest(t.Name(), 1)
@@ -1163,7 +1171,7 @@ func TestDBStartTimestampSamplesIngestion(t *testing.T) {
testCases := []struct {
name string
inputSamples []appendableSample
- expectedSamples []*walSample
+ expectedSamples []walSample
expectedSeriesCount int
}{
{
@@ -1172,10 +1180,10 @@ func TestDBStartTimestampSamplesIngestion(t *testing.T) {
{t: 100, st: 1, v: 10, lbls: defLbls},
{t: 101, st: 1, v: 10, lbls: defLbls},
},
- expectedSamples: []*walSample{
- {t: 1, f: 0, lbls: defLbls},
- {t: 100, f: 10, lbls: defLbls},
- {t: 101, f: 10, lbls: defLbls},
+ expectedSamples: []walSample{
+ {t: 1, f: 0, lbls: defLbls, ref: 1},
+ {t: 100, f: 10, lbls: defLbls, ref: 1},
+ {t: 101, f: 10, lbls: defLbls, ref: 1},
},
},
{
@@ -1190,15 +1198,15 @@ func TestDBStartTimestampSamplesIngestion(t *testing.T) {
{
t: 300,
st: 230,
- h: testHistogram,
+ h: testHistograms[0],
lbls: defLbls,
},
},
- expectedSamples: []*walSample{
- {t: 30, f: 0, lbls: defLbls},
- {t: 100, f: 20, lbls: defLbls},
- {t: 230, h: zeroHistogram, lbls: defLbls},
- {t: 300, h: testHistogram, lbls: defLbls},
+ expectedSamples: []walSample{
+ {t: 30, f: 0, lbls: defLbls, ref: 1},
+ {t: 100, f: 20, lbls: defLbls, ref: 1},
+ {t: 230, h: zeroHistogram, lbls: defLbls, ref: 1},
+ {t: 300, h: testHistograms[0], lbls: defLbls, ref: 1},
},
expectedSeriesCount: 1,
},
@@ -1217,27 +1225,27 @@ func TestDBStartTimestampSamplesIngestion(t *testing.T) {
// invalid ST histogram
t: 300,
st: 300,
- h: testHistogram,
+ h: testHistograms[0],
lbls: defLbls,
expectsError: true,
},
},
- expectedSamples: []*walSample{
- {t: 100, f: 10, lbls: defLbls},
- {t: 300, h: testHistogram, lbls: defLbls},
+ expectedSamples: []walSample{
+ {t: 100, f: 10, lbls: defLbls, ref: 1},
+ {t: 300, h: testHistograms[0], lbls: defLbls, ref: 1},
},
expectedSeriesCount: 0,
},
{
name: "In order ct+normal sample/histogram",
inputSamples: []appendableSample{
- {t: 100, h: testHistogram, st: 1, lbls: defLbls},
- {t: 101, h: testHistogram, st: 1, lbls: defLbls},
+ {t: 100, h: testHistograms[0], st: 1, lbls: defLbls},
+ {t: 101, h: testHistograms[1], st: 1, lbls: defLbls},
},
- expectedSamples: []*walSample{
- {t: 1, h: &histogram.Histogram{}},
- {t: 100, h: testHistogram},
- {t: 101, h: &histogram.Histogram{CounterResetHint: histogram.NotCounterReset}},
+ expectedSamples: []walSample{
+ {t: 1, h: &histogram.Histogram{}, lbls: defLbls, ref: 1},
+ {t: 100, h: testHistograms[0], lbls: defLbls, ref: 1},
+ {t: 101, h: testHistograms[1], lbls: defLbls, ref: 1},
},
},
{
@@ -1248,12 +1256,12 @@ func TestDBStartTimestampSamplesIngestion(t *testing.T) {
{t: 180_000, st: 40_000, v: 10, lbls: defLbls},
{t: 50_000, st: 40_000, v: 10, lbls: defLbls},
},
- expectedSamples: []*walSample{
- {t: 40_000, f: 0, lbls: defLbls},
- {t: 50_000, f: 10, lbls: defLbls},
- {t: 60_000, f: 10, lbls: defLbls},
- {t: 120_000, f: 10, lbls: defLbls},
- {t: 180_000, f: 10, lbls: defLbls},
+ expectedSamples: []walSample{
+ {t: 40_000, f: 0, lbls: defLbls, ref: 1},
+ {t: 60_000, f: 10, lbls: defLbls, ref: 1},
+ {t: 120_000, f: 10, lbls: defLbls, ref: 1},
+ {t: 180_000, f: 10, lbls: defLbls, ref: 1},
+ {t: 50_000, f: 10, lbls: defLbls, ref: 1}, // OOO sample.
},
},
}
@@ -1294,7 +1302,7 @@ func TestDBStartTimestampSamplesIngestion(t *testing.T) {
// Close the DB to ensure all data is flushed to the WAL
require.NoError(t, s.Close())
- // Check that we dont have any OOO samples in the WAL by checking metrics
+ // Check that we don't have any OOO samples in the WAL by checking metrics
families, err := reg.Gather()
require.NoError(t, err, "failed to gather metrics")
for _, f := range families {
@@ -1303,26 +1311,13 @@ func TestDBStartTimestampSamplesIngestion(t *testing.T) {
}
}
- outputSamples := readWALSamples(t, s.wal.Dir())
-
- require.Len(t, outputSamples, len(tc.expectedSamples), "Expected %d samples", len(tc.expectedSamples))
-
- for i, expectedSample := range tc.expectedSamples {
- for _, sample := range outputSamples {
- if sample.t == expectedSample.t && sample.lbls.String() == expectedSample.lbls.String() {
- if expectedSample.h != nil {
- require.Equal(t, expectedSample.h, sample.h, "histogram value mismatch (sample index %d)", i)
- } else {
- require.Equal(t, expectedSample.f, sample.f, "value mismatch (sample index %d)", i)
- }
- }
- }
- }
+ got := readWALSamples(t, s.wal.Dir())
+ testutil.RequireEqualWithOptions(t, tc.expectedSamples, got, cmp.Options{cmp.AllowUnexported(walSample{})})
})
}
}
-func readWALSamples(t *testing.T, walDir string) []*walSample {
+func readWALSamples(t *testing.T, walDir string) []walSample {
t.Helper()
sr, err := wlog.NewSegmentsReader(walDir)
require.NoError(t, err)
@@ -1339,7 +1334,7 @@ func readWALSamples(t *testing.T, walDir string) []*walSample {
histograms []record.RefHistogramSample
lastSeries record.RefSeries
- outputSamples = make([]*walSample, 0)
+ outputSamples = make([]walSample, 0)
)
for r.Next() {
@@ -1353,7 +1348,7 @@ func readWALSamples(t *testing.T, walDir string) []*walSample {
samples, err = dec.Samples(rec, samples[:0])
require.NoError(t, err)
for _, s := range samples {
- outputSamples = append(outputSamples, &walSample{
+ outputSamples = append(outputSamples, walSample{
t: s.T,
f: s.V,
lbls: lastSeries.Labels.Copy(),
@@ -1364,7 +1359,7 @@ func readWALSamples(t *testing.T, walDir string) []*walSample {
histograms, err = dec.HistogramSamples(rec, histograms[:0])
require.NoError(t, err)
for _, h := range histograms {
- outputSamples = append(outputSamples, &walSample{
+ outputSamples = append(outputSamples, walSample{
t: h.T,
h: h.H,
lbls: lastSeries.Labels.Copy(),
@@ -1373,14 +1368,14 @@ func readWALSamples(t *testing.T, walDir string) []*walSample {
}
}
}
-
return outputSamples
}
-func BenchmarkCreateSeries(b *testing.B) {
+func BenchmarkGetOrCreate(b *testing.B) {
s := createTestAgentDB(b, nil, DefaultOptions())
defer s.Close()
+ // NOTE: This benchmarks appenderBase, so it does not matter if it's V1 or V2.
app := s.Appender(context.Background()).(*appender)
lbls := make([]labels.Labels, b.N)
diff --git a/tsdb/agent/series.go b/tsdb/agent/series.go
index 76e7342171..4eb691bfd5 100644
--- a/tsdb/agent/series.go
+++ b/tsdb/agent/series.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/agent/series_test.go b/tsdb/agent/series_test.go
index 036a80de4c..4b277b36b7 100644
--- a/tsdb/agent/series_test.go
+++ b/tsdb/agent/series_test.go
@@ -1,4 +1,4 @@
-// Copyright 2022 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/block.go b/tsdb/block.go
index 41b3037989..92638df164 100644
--- a/tsdb/block.go
+++ b/tsdb/block.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -102,11 +102,6 @@ type IndexReader interface {
// LabelNames returns all the unique label names present in the index in sorted order.
LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, error)
- // LabelValueFor returns label value for the given label name in the series referred to by ID.
- // If the series couldn't be found or the series doesn't have the requested label a
- // storage.ErrNotFound is returned as error.
- LabelValueFor(ctx context.Context, id storage.SeriesRef, label string) (string, error)
-
// LabelNamesFor returns all the label names for the series referred to by the postings.
// The names returned are sorted.
LabelNamesFor(ctx context.Context, postings index.Postings) ([]string, error)
@@ -567,11 +562,6 @@ func (r blockIndexReader) Close() error {
return nil
}
-// LabelValueFor returns label value for the given label name in the series referred to by ID.
-func (r blockIndexReader) LabelValueFor(ctx context.Context, id storage.SeriesRef, label string) (string, error) {
- return r.ir.LabelValueFor(ctx, id, label)
-}
-
// LabelNamesFor returns all the label names for the series referred to by the postings.
// The names returned are sorted.
func (r blockIndexReader) LabelNamesFor(ctx context.Context, postings index.Postings) ([]string, error) {
diff --git a/tsdb/block_test.go b/tsdb/block_test.go
index d02f83a9e9..855fa5638a 100644
--- a/tsdb/block_test.go
+++ b/tsdb/block_test.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/blockwriter.go b/tsdb/blockwriter.go
index 14137f12cc..af83a98083 100644
--- a/tsdb/blockwriter.go
+++ b/tsdb/blockwriter.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -86,6 +86,12 @@ func (w *BlockWriter) Appender(ctx context.Context) storage.Appender {
return w.head.Appender(ctx)
}
+// AppenderV2 returns a new appender on the database.
+// AppenderV2 can't be called concurrently. However, the returned AppenderV2 can safely be used concurrently.
+func (w *BlockWriter) AppenderV2(ctx context.Context) storage.AppenderV2 {
+ return w.head.AppenderV2(ctx)
+}
+
// Flush implements the Writer interface. This is where actual block writing
// happens. After flush completes, no writes can be done.
func (w *BlockWriter) Flush(ctx context.Context) (ulid.ULID, error) {
diff --git a/tsdb/blockwriter_test.go b/tsdb/blockwriter_test.go
index e7c3146247..33f0e5a0f3 100644
--- a/tsdb/blockwriter_test.go
+++ b/tsdb/blockwriter_test.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -23,6 +23,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunks"
)
@@ -59,3 +60,37 @@ func TestBlockWriter(t *testing.T) {
require.NoError(t, w.Close())
}
+
+func TestBlockWriter_AppenderV2(t *testing.T) {
+ ctx := context.Background()
+ outputDir := t.TempDir()
+ w, err := NewBlockWriter(promslog.NewNopLogger(), outputDir, DefaultBlockDuration)
+ require.NoError(t, err)
+
+ // Add some series.
+ app := w.AppenderV2(ctx)
+ ts1, v1 := int64(44), float64(7)
+ _, err = app.Append(0, labels.FromStrings("a", "b"), 0, ts1, v1, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ ts2, v2 := int64(55), float64(12)
+ _, err = app.Append(0, labels.FromStrings("c", "d"), 0, ts2, v2, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ id, err := w.Flush(ctx)
+ require.NoError(t, err)
+
+ // Confirm the block has the correct data.
+ blockpath := filepath.Join(outputDir, id.String())
+ b, err := OpenBlock(nil, blockpath, nil, nil)
+ require.NoError(t, err)
+ defer func() { require.NoError(t, b.Close()) }()
+ q, err := NewBlockQuerier(b, math.MinInt64, math.MaxInt64)
+ require.NoError(t, err)
+ series := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "", ".*"))
+ sample1 := []chunks.Sample{sample{t: ts1, f: v1}}
+ sample2 := []chunks.Sample{sample{t: ts2, f: v2}}
+ expectedSeries := map[string][]chunks.Sample{"{a=\"b\"}": sample1, "{c=\"d\"}": sample2}
+ require.Equal(t, expectedSeries, series)
+
+ require.NoError(t, w.Close())
+}
diff --git a/tsdb/chunkenc/bstream.go b/tsdb/chunkenc/bstream.go
index 6e01798f72..abf6e4dbef 100644
--- a/tsdb/chunkenc/bstream.go
+++ b/tsdb/chunkenc/bstream.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/chunkenc/bstream_test.go b/tsdb/chunkenc/bstream_test.go
index 8ac45ef0b6..3098be5945 100644
--- a/tsdb/chunkenc/bstream_test.go
+++ b/tsdb/chunkenc/bstream_test.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/chunkenc/chunk.go b/tsdb/chunkenc/chunk.go
index 8cccb189fa..fed28c5701 100644
--- a/tsdb/chunkenc/chunk.go
+++ b/tsdb/chunkenc/chunk.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/chunkenc/chunk_test.go b/tsdb/chunkenc/chunk_test.go
index eac9e12b29..d2d0e4c053 100644
--- a/tsdb/chunkenc/chunk_test.go
+++ b/tsdb/chunkenc/chunk_test.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/chunkenc/float_histogram.go b/tsdb/chunkenc/float_histogram.go
index d960e835f2..797bc596b5 100644
--- a/tsdb/chunkenc/float_histogram.go
+++ b/tsdb/chunkenc/float_histogram.go
@@ -1,4 +1,4 @@
-// Copyright 2022 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/chunkenc/float_histogram_test.go b/tsdb/chunkenc/float_histogram_test.go
index d112c81f1c..f27de97516 100644
--- a/tsdb/chunkenc/float_histogram_test.go
+++ b/tsdb/chunkenc/float_histogram_test.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/chunkenc/histogram.go b/tsdb/chunkenc/histogram.go
index be1c31ae76..e05c49c81d 100644
--- a/tsdb/chunkenc/histogram.go
+++ b/tsdb/chunkenc/histogram.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/chunkenc/histogram_meta.go b/tsdb/chunkenc/histogram_meta.go
index 22bc4a6d3d..874e086812 100644
--- a/tsdb/chunkenc/histogram_meta.go
+++ b/tsdb/chunkenc/histogram_meta.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/chunkenc/histogram_meta_test.go b/tsdb/chunkenc/histogram_meta_test.go
index d3aa979b5e..3eb2a13962 100644
--- a/tsdb/chunkenc/histogram_meta_test.go
+++ b/tsdb/chunkenc/histogram_meta_test.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/chunkenc/histogram_test.go b/tsdb/chunkenc/histogram_test.go
index c11102b470..38bbd58465 100644
--- a/tsdb/chunkenc/histogram_test.go
+++ b/tsdb/chunkenc/histogram_test.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/chunkenc/varbit.go b/tsdb/chunkenc/varbit.go
index 00ba027dda..4338555328 100644
--- a/tsdb/chunkenc/varbit.go
+++ b/tsdb/chunkenc/varbit.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/chunkenc/varbit_test.go b/tsdb/chunkenc/varbit_test.go
index 8042b98dc1..dcb43f08df 100644
--- a/tsdb/chunkenc/varbit_test.go
+++ b/tsdb/chunkenc/varbit_test.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/chunkenc/xor.go b/tsdb/chunkenc/xor.go
index 29e2110705..bbe12a893b 100644
--- a/tsdb/chunkenc/xor.go
+++ b/tsdb/chunkenc/xor.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/chunkenc/xor_test.go b/tsdb/chunkenc/xor_test.go
index 609a3ac5ea..904e536b49 100644
--- a/tsdb/chunkenc/xor_test.go
+++ b/tsdb/chunkenc/xor_test.go
@@ -1,4 +1,4 @@
-// Copyright 2022 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/chunks/chunk_write_queue.go b/tsdb/chunks/chunk_write_queue.go
index bb9f239707..1a046ea00a 100644
--- a/tsdb/chunks/chunk_write_queue.go
+++ b/tsdb/chunks/chunk_write_queue.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/chunks/chunk_write_queue_test.go b/tsdb/chunks/chunk_write_queue_test.go
index fd81011091..489ff74210 100644
--- a/tsdb/chunks/chunk_write_queue_test.go
+++ b/tsdb/chunks/chunk_write_queue_test.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/chunks/chunks.go b/tsdb/chunks/chunks.go
index 8b8f5d0f81..681fceb2fb 100644
--- a/tsdb/chunks/chunks.go
+++ b/tsdb/chunks/chunks.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/chunks/chunks_test.go b/tsdb/chunks/chunks_test.go
index 6eb00f12ad..f40f996fde 100644
--- a/tsdb/chunks/chunks_test.go
+++ b/tsdb/chunks/chunks_test.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/chunks/head_chunks.go b/tsdb/chunks/head_chunks.go
index 5e143b8b32..ffe7e70fc6 100644
--- a/tsdb/chunks/head_chunks.go
+++ b/tsdb/chunks/head_chunks.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/chunks/head_chunks_other.go b/tsdb/chunks/head_chunks_other.go
index f30c5e55e9..42e94fc54d 100644
--- a/tsdb/chunks/head_chunks_other.go
+++ b/tsdb/chunks/head_chunks_other.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/chunks/head_chunks_test.go b/tsdb/chunks/head_chunks_test.go
index 2d7744193d..17efd44aa6 100644
--- a/tsdb/chunks/head_chunks_test.go
+++ b/tsdb/chunks/head_chunks_test.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/chunks/head_chunks_windows.go b/tsdb/chunks/head_chunks_windows.go
index 214ee42f59..a16d0ff38e 100644
--- a/tsdb/chunks/head_chunks_windows.go
+++ b/tsdb/chunks/head_chunks_windows.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/chunks/queue.go b/tsdb/chunks/queue.go
index 860381a5fe..454d939ce6 100644
--- a/tsdb/chunks/queue.go
+++ b/tsdb/chunks/queue.go
@@ -1,4 +1,4 @@
-// Copyright 2022 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/chunks/queue_test.go b/tsdb/chunks/queue_test.go
index ab4dd14838..377a8181ff 100644
--- a/tsdb/chunks/queue_test.go
+++ b/tsdb/chunks/queue_test.go
@@ -1,4 +1,4 @@
-// Copyright 2022 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/chunks/samples.go b/tsdb/chunks/samples.go
index a5b16094df..8097bcd72b 100644
--- a/tsdb/chunks/samples.go
+++ b/tsdb/chunks/samples.go
@@ -1,4 +1,4 @@
-// Copyright 2023 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/compact.go b/tsdb/compact.go
index a5f27f9c0d..35e0a5b1fd 100644
--- a/tsdb/compact.go
+++ b/tsdb/compact.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -87,6 +87,7 @@ type LeveledCompactor struct {
maxBlockChunkSegmentSize int64
useUncachedIO bool
mergeFunc storage.VerticalChunkSeriesMergeFunc
+ blockExcludeFunc BlockExcludeFilterFunc
postingsEncoder index.PostingsEncoder
postingsDecoderFactory PostingsDecoderFactory
enableOverlappingCompaction bool
@@ -160,16 +161,24 @@ type LeveledCompactorOptions struct {
// PE specifies the postings encoder. It is called when compactor is writing out the postings for a label name/value pair during compaction.
// If it is nil then the default encoder is used. At the moment that is the "raw" encoder. See index.EncodePostingsRaw for more.
PE index.PostingsEncoder
+
// PD specifies the postings decoder factory to return different postings decoder based on BlockMeta. It is called when opening a block or opening the index file.
// If it is nil then a default decoder is used, compatible with Prometheus v2.
PD PostingsDecoderFactory
+
// MaxBlockChunkSegmentSize is the max block chunk segment size. If it is 0 then the default chunks.DefaultChunkSegmentSize is used.
MaxBlockChunkSegmentSize int64
+
// MergeFunc is used for merging series together in vertical compaction. By default storage.NewCompactingChunkSeriesMerger(storage.ChainedSeriesMerge) is used.
MergeFunc storage.VerticalChunkSeriesMergeFunc
+
+ // BlockExcludeFilter is used to decide which blocks are exluded from compactions.
+ BlockExcludeFilter BlockExcludeFilterFunc
+
// EnableOverlappingCompaction enables compaction of overlapping blocks. In Prometheus it is always enabled.
// It is useful for downstream projects like Mimir, Cortex, Thanos where they have a separate component that does compaction.
EnableOverlappingCompaction bool
+
// Metrics is set of metrics for Compactor. By default, NewCompactorMetrics would be called to initialize metrics unless it is provided.
Metrics *CompactorMetrics
// UseUncachedIO allows bypassing the page cache when appropriate.
@@ -178,7 +187,9 @@ type LeveledCompactorOptions struct {
type PostingsDecoderFactory func(meta *BlockMeta) index.PostingsDecoder
-func DefaultPostingsDecoderFactory(*BlockMeta) index.PostingsDecoder {
+type BlockExcludeFilterFunc func(meta *BlockMeta) bool
+
+func DefaultPostingsDecoderFactory(_ *BlockMeta) index.PostingsDecoder {
return index.DecodePostingsRaw
}
@@ -226,6 +237,7 @@ func NewLeveledCompactorWithOptions(ctx context.Context, r prometheus.Registerer
postingsEncoder: pe,
postingsDecoderFactory: opts.PD,
enableOverlappingCompaction: opts.EnableOverlappingCompaction,
+ blockExcludeFunc: opts.BlockExcludeFilter,
}, nil
}
@@ -250,12 +262,19 @@ func (c *LeveledCompactor) Plan(dir string) ([]string, error) {
if err != nil {
return nil, err
}
+ if c.blockExcludeFunc != nil && c.blockExcludeFunc(meta) {
+ break
+ }
dms = append(dms, dirMeta{dir, meta})
}
return c.plan(dms)
}
func (c *LeveledCompactor) plan(dms []dirMeta) ([]string, error) {
+ if len(dms) == 0 {
+ return nil, nil
+ }
+
slices.SortFunc(dms, func(a, b dirMeta) int {
switch {
case a.meta.MinTime < b.meta.MinTime:
diff --git a/tsdb/compact_test.go b/tsdb/compact_test.go
index 203a04dec8..29b90d9bbc 100644
--- a/tsdb/compact_test.go
+++ b/tsdb/compact_test.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -1257,10 +1257,7 @@ func BenchmarkCompactionFromOOOHead(b *testing.B) {
// This is needed for unit tests that rely on
// checking state before and after a compaction.
func TestDisableAutoCompactions(t *testing.T) {
- db := openTestDB(t, nil, nil)
- defer func() {
- require.NoError(t, db.Close())
- }()
+ db := newTestDB(t)
blockRange := db.compactor.(*LeveledCompactor).ranges[0]
label := labels.FromStrings("foo", "bar")
@@ -1418,10 +1415,7 @@ func TestDeleteCompactionBlockAfterFailedReload(t *testing.T) {
t.Run(title, func(t *testing.T) {
ctx := context.Background()
- db := openTestDB(t, nil, []int64{1, 100})
- defer func() {
- require.NoError(t, db.Close())
- }()
+ db := newTestDB(t, withRngs(1, 100))
db.DisableCompactions()
expBlocks := bootStrap(db)
@@ -1993,14 +1987,11 @@ func TestDelayedCompaction(t *testing.T) {
}
t.Parallel()
- var options *Options
+ var opts *Options
if c.compactionDelay > 0 {
- options = &Options{CompactionDelay: c.compactionDelay}
+ opts = &Options{CompactionDelay: c.compactionDelay}
}
- db := openTestDB(t, options, []int64{10})
- defer func() {
- require.NoError(t, db.Close())
- }()
+ db := newTestDB(t, withOpts(opts), withRngs(10))
label := labels.FromStrings("foo", "bar")
diff --git a/tsdb/db.go b/tsdb/db.go
index ccbd303dba..23240358ef 100644
--- a/tsdb/db.go
+++ b/tsdb/db.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -47,6 +47,7 @@ import (
"github.com/prometheus/prometheus/tsdb/tsdbutil"
"github.com/prometheus/prometheus/tsdb/wlog"
"github.com/prometheus/prometheus/util/compression"
+ "github.com/prometheus/prometheus/util/features"
)
const (
@@ -93,6 +94,7 @@ func DefaultOptions() *Options {
CompactionDelayMaxPercent: DefaultCompactionDelayMaxPercent,
CompactionDelay: time.Duration(0),
PostingsDecoderFactory: DefaultPostingsDecoderFactory,
+ BlockReloadInterval: 1 * time.Minute,
}
}
@@ -227,6 +229,30 @@ type Options struct {
// StaleSeriesCompactionThreshold is a number between 0.0-1.0 indicating the % of stale series in
// the in-memory Head block. If the % of stale series crosses this threshold, stale series compaction is run immediately.
StaleSeriesCompactionThreshold float64
+
+ // EnableSTAsZeroSample represents 'created-timestamp-zero-ingestion' feature flag.
+ // If true, ST, if non-zero and earlier than sample timestamp, will be stored
+ // as a zero sample before the actual sample.
+ //
+ // The zero sample is best-effort, only debug log on failure is emitted.
+ // NOTE(bwplotka): This feature might be deprecated and removed once PROM-60
+ // is implemented.
+ EnableSTAsZeroSample bool
+
+ // EnableMetadataWALRecords represents 'metadata-wal-records' feature flag.
+ // NOTE(bwplotka): This feature might be deprecated and removed once PROM-60
+ // is implemented.
+ EnableMetadataWALRecords bool
+
+ // BlockCompactionExcludeFunc is a function which returns true for blocks that should NOT be compacted.
+ // It's passed down to the TSDB compactor.
+ BlockCompactionExcludeFunc BlockExcludeFilterFunc
+
+ // BlockReloadInterval is the interval at which blocks are reloaded.
+ BlockReloadInterval time.Duration
+
+ // FeatureRegistry is used to register TSDB features.
+ FeatureRegistry features.Collector
}
type NewCompactorFunc func(ctx context.Context, r prometheus.Registerer, l *slog.Logger, ranges []int64, pool chunkenc.Pool, opts *Options) (Compactor, error)
@@ -787,6 +813,15 @@ func Open(dir string, l *slog.Logger, r prometheus.Registerer, opts *Options, st
var rngs []int64
opts, rngs = validateOpts(opts, nil)
+ // Register TSDB features if a registry is provided.
+ if opts.FeatureRegistry != nil {
+ opts.FeatureRegistry.Set(features.TSDB, "exemplar_storage", opts.EnableExemplarStorage)
+ opts.FeatureRegistry.Set(features.TSDB, "delayed_compaction", opts.EnableDelayedCompaction)
+ opts.FeatureRegistry.Set(features.TSDB, "isolation", !opts.IsolationDisabled)
+ opts.FeatureRegistry.Set(features.TSDB, "use_uncached_io", opts.UseUncachedIO)
+ opts.FeatureRegistry.Enable(features.TSDB, "native_histograms")
+ }
+
return open(dir, l, r, opts, rngs, stats)
}
@@ -821,6 +856,9 @@ func validateOpts(opts *Options, rngs []int64) (*Options, []int64) {
if opts.OutOfOrderTimeWindow < 0 {
opts.OutOfOrderTimeWindow = 0
}
+ if opts.BlockReloadInterval < 1*time.Second {
+ opts.BlockReloadInterval = 1 * time.Second
+ }
if len(rngs) == 0 {
// Start with smallest block duration and create exponential buckets until the exceed the
@@ -919,6 +957,7 @@ func open(dir string, l *slog.Logger, r prometheus.Registerer, opts *Options, rn
EnableOverlappingCompaction: opts.EnableOverlappingCompaction,
PD: opts.PostingsDecoderFactory,
UseUncachedIO: opts.UseUncachedIO,
+ BlockExcludeFilter: opts.BlockCompactionExcludeFunc,
})
}
if err != nil {
@@ -979,6 +1018,8 @@ func open(dir string, l *slog.Logger, r prometheus.Registerer, opts *Options, rn
headOpts.OutOfOrderTimeWindow.Store(opts.OutOfOrderTimeWindow)
headOpts.OutOfOrderCapMax.Store(opts.OutOfOrderCapMax)
headOpts.EnableSharding = opts.EnableSharding
+ headOpts.EnableSTAsZeroSample = opts.EnableSTAsZeroSample
+ headOpts.EnableMetadataWALRecords = opts.EnableMetadataWALRecords
if opts.WALReplayConcurrency > 0 {
headOpts.WALReplayConcurrency = opts.WALReplayConcurrency
}
@@ -1108,7 +1149,7 @@ func (db *DB) run(ctx context.Context) {
}
select {
- case <-time.After(1 * time.Minute):
+ case <-time.After(db.opts.BlockReloadInterval):
db.cmtx.Lock()
if err := db.reloadBlocks(); err != nil {
db.logger.Error("reloadBlocks", "err", err)
@@ -1154,19 +1195,16 @@ func (db *DB) run(ctx context.Context) {
}
}
-func nextStepAlignedTime(step time.Duration) (next time.Time) {
- next = time.Now().Round(step)
- if next.Before(time.Now()) {
- next = next.Add(step)
- }
- return
-}
-
-// Appender opens a new appender against the database.
+// Appender opens a new Appender against the database.
func (db *DB) Appender(ctx context.Context) storage.Appender {
return dbAppender{db: db, Appender: db.head.Appender(ctx)}
}
+// AppenderV2 opens a new AppenderV2 against the database.
+func (db *DB) AppenderV2(ctx context.Context) storage.AppenderV2 {
+ return dbAppenderV2{db: db, AppenderV2: db.head.AppenderV2(ctx)}
+}
+
// ApplyConfig applies a new config to the DB.
// Behaviour of 'OutOfOrderTimeWindow' is as follows:
// OOO enabled = oooTimeWindow > 0. OOO disabled = oooTimeWindow is 0.
@@ -1282,6 +1320,36 @@ func (a dbAppender) Commit() error {
return err
}
+// dbAppenderV2 wraps the DB's head appender and triggers compactions on commit
+// if necessary.
+type dbAppenderV2 struct {
+ storage.AppenderV2
+ db *DB
+}
+
+var _ storage.GetRef = dbAppenderV2{}
+
+func (a dbAppenderV2) GetRef(lset labels.Labels, hash uint64) (storage.SeriesRef, labels.Labels) {
+ if g, ok := a.AppenderV2.(storage.GetRef); ok {
+ return g.GetRef(lset, hash)
+ }
+ return 0, labels.EmptyLabels()
+}
+
+func (a dbAppenderV2) Commit() error {
+ err := a.AppenderV2.Commit()
+
+ // We could just run this check every few minutes practically. But for benchmarks
+ // and high frequency use cases this is the safer way.
+ if a.db.head.compactable() {
+ select {
+ case a.db.compactc <- struct{}{}:
+ default:
+ }
+ }
+ return err
+}
+
// waitingForCompactionDelay returns true if the DB is waiting for the Head compaction delay.
// This doesn't guarantee that the Head is really compactable.
func (db *DB) waitingForCompactionDelay() bool {
@@ -2060,6 +2128,13 @@ func (db *DB) Head() *Head {
// Close the partition.
func (db *DB) Close() error {
+ // Allow close-after-close operation for simpler use (e.g. tests).
+ select {
+ case <-db.donec:
+ return nil
+ default:
+ }
+
close(db.stopc)
if db.compactCancel != nil {
db.compactCancel()
diff --git a/tsdb/db_append_v2_test.go b/tsdb/db_append_v2_test.go
new file mode 100644
index 0000000000..344b1d6943
--- /dev/null
+++ b/tsdb/db_append_v2_test.go
@@ -0,0 +1,7621 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tsdb
+
+import (
+ "bufio"
+ "context"
+ "fmt"
+ "log/slog"
+ "math"
+ "math/rand"
+ "os"
+ "path"
+ "path/filepath"
+ "runtime"
+ "sort"
+ "strconv"
+ "testing"
+ "time"
+
+ "github.com/oklog/ulid/v2"
+ "github.com/prometheus/client_golang/prometheus"
+ prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
+ "github.com/prometheus/common/promslog"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/atomic"
+
+ "github.com/prometheus/prometheus/config"
+ "github.com/prometheus/prometheus/model/histogram"
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/model/metadata"
+ "github.com/prometheus/prometheus/storage"
+ "github.com/prometheus/prometheus/tsdb/chunkenc"
+ "github.com/prometheus/prometheus/tsdb/chunks"
+ "github.com/prometheus/prometheus/tsdb/fileutil"
+ "github.com/prometheus/prometheus/tsdb/index"
+ "github.com/prometheus/prometheus/tsdb/record"
+ "github.com/prometheus/prometheus/tsdb/tombstones"
+ "github.com/prometheus/prometheus/tsdb/tsdbutil"
+ "github.com/prometheus/prometheus/tsdb/wlog"
+ "github.com/prometheus/prometheus/util/annotations"
+ "github.com/prometheus/prometheus/util/compression"
+ "github.com/prometheus/prometheus/util/testutil"
+)
+
+// TODO(bwplotka): Ensure non-ported tests are not deleted from db_test.go when removing AppenderV1 flow (#17632):
+// * TestQuerier_ShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChunks
+// * TestChunkQuerier_ShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChunks
+// * TestEmptyLabelsetCausesError
+// * TestQueryHistogramFromBlocksWithCompaction
+
+// TODO(krajorama): Add histograms test cases.
+func TestDataAvailableOnlyAfterCommit_AppendV2(t *testing.T) {
+ db := newTestDB(t)
+
+ ctx := context.Background()
+ app := db.AppenderV2(ctx)
+
+ _, err := app.Append(0, labels.FromStrings("foo", "bar"), 0, 0, 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+
+ querier, err := db.Querier(0, 1)
+ require.NoError(t, err)
+ seriesSet := query(t, querier, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
+ require.Equal(t, map[string][]chunks.Sample{}, seriesSet)
+
+ err = app.Commit()
+ require.NoError(t, err)
+
+ querier, err = db.Querier(0, 1)
+ require.NoError(t, err)
+ defer querier.Close()
+
+ seriesSet = query(t, querier, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
+
+ require.Equal(t, map[string][]chunks.Sample{`{foo="bar"}`: {sample{t: 0, f: 0}}}, seriesSet)
+}
+
+// TestNoPanicAfterWALCorruption ensures that querying the db after a WAL corruption doesn't cause a panic.
+// https://github.com/prometheus/prometheus/issues/7548
+func TestNoPanicAfterWALCorruption_AppendV2(t *testing.T) {
+ db := newTestDB(t, withOpts(&Options{WALSegmentSize: 32 * 1024}))
+
+ // Append until the first mmapped head chunk.
+ // This is to ensure that all samples can be read from the mmapped chunks when the WAL is corrupted.
+ var expSamples []chunks.Sample
+ var maxt int64
+ ctx := context.Background()
+ {
+ // Appending 121 samples because on the 121st a new chunk will be created.
+ for range 121 {
+ app := db.AppenderV2(ctx)
+ _, err := app.Append(0, labels.FromStrings("foo", "bar"), 0, maxt, 0, nil, nil, storage.AOptions{})
+ expSamples = append(expSamples, sample{t: maxt, f: 0})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ maxt++
+ }
+ require.NoError(t, db.Close())
+ }
+
+ // Corrupt the WAL after the first sample of the series so that it has at least one sample and
+ // it is not garbage collected.
+ // The repair deletes all WAL records after the corrupted record and these are read from the mmapped chunk.
+ {
+ walFiles, err := os.ReadDir(path.Join(db.Dir(), "wal"))
+ require.NoError(t, err)
+ f, err := os.OpenFile(path.Join(db.Dir(), "wal", walFiles[0].Name()), os.O_RDWR, 0o666)
+ require.NoError(t, err)
+ r := wlog.NewReader(bufio.NewReader(f))
+ require.True(t, r.Next(), "reading the series record")
+ require.True(t, r.Next(), "reading the first sample record")
+ // Write an invalid record header to corrupt everything after the first wal sample.
+ _, err = f.WriteAt([]byte{99}, r.Offset())
+ require.NoError(t, err)
+ f.Close()
+ }
+
+ // Query the data.
+ {
+ db := newTestDB(t, withDir(db.Dir()))
+ require.Equal(t, 1.0, prom_testutil.ToFloat64(db.head.metrics.walCorruptionsTotal), "WAL corruption count mismatch")
+
+ querier, err := db.Querier(0, maxt)
+ require.NoError(t, err)
+ seriesSet := query(t, querier, labels.MustNewMatcher(labels.MatchEqual, "", ""))
+ // The last sample should be missing as it was after the WAL segment corruption.
+ require.Equal(t, map[string][]chunks.Sample{`{foo="bar"}`: expSamples[0 : len(expSamples)-1]}, seriesSet)
+ }
+}
+
+func TestDataNotAvailableAfterRollback_AppendV2(t *testing.T) {
+ db := newTestDB(t)
+
+ app := db.AppenderV2(context.Background())
+ _, err := app.Append(0, labels.FromStrings("type", "float"), 0, 0, 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+
+ _, err = app.Append(
+ 0, labels.FromStrings("type", "histogram"), 0, 0, 0,
+ &histogram.Histogram{Count: 42, Sum: math.NaN()}, nil,
+ storage.AOptions{},
+ )
+ require.NoError(t, err)
+
+ _, err = app.Append(
+ 0, labels.FromStrings("type", "floathistogram"), 0, 0, 0,
+ nil, &histogram.FloatHistogram{Count: 42, Sum: math.NaN()},
+ storage.AOptions{},
+ )
+ require.NoError(t, err)
+
+ err = app.Rollback()
+ require.NoError(t, err)
+
+ for _, typ := range []string{"float", "histogram", "floathistogram"} {
+ querier, err := db.Querier(0, 1)
+ require.NoError(t, err)
+ seriesSet := query(t, querier, labels.MustNewMatcher(labels.MatchEqual, "type", typ))
+ require.Equal(t, map[string][]chunks.Sample{}, seriesSet)
+ }
+
+ sr, err := wlog.NewSegmentsReader(db.head.wal.Dir())
+ require.NoError(t, err)
+ defer func() {
+ require.NoError(t, sr.Close())
+ }()
+
+ // Read records from WAL and check for expected count of series and samples.
+ var (
+ r = wlog.NewReader(sr)
+ dec = record.NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
+
+ walSeriesCount, walSamplesCount, walHistogramCount, walFloatHistogramCount, walExemplarsCount int
+ )
+ for r.Next() {
+ rec := r.Record()
+ switch dec.Type(rec) {
+ case record.Series:
+ var series []record.RefSeries
+ series, err = dec.Series(rec, series)
+ require.NoError(t, err)
+ walSeriesCount += len(series)
+
+ case record.Samples:
+ var samples []record.RefSample
+ samples, err = dec.Samples(rec, samples)
+ require.NoError(t, err)
+ walSamplesCount += len(samples)
+
+ case record.Exemplars:
+ var exemplars []record.RefExemplar
+ exemplars, err = dec.Exemplars(rec, exemplars)
+ require.NoError(t, err)
+ walExemplarsCount += len(exemplars)
+
+ case record.HistogramSamples, record.CustomBucketsHistogramSamples:
+ var histograms []record.RefHistogramSample
+ histograms, err = dec.HistogramSamples(rec, histograms)
+ require.NoError(t, err)
+ walHistogramCount += len(histograms)
+
+ case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples:
+ var floatHistograms []record.RefFloatHistogramSample
+ floatHistograms, err = dec.FloatHistogramSamples(rec, floatHistograms)
+ require.NoError(t, err)
+ walFloatHistogramCount += len(floatHistograms)
+
+ default:
+ }
+ }
+
+ // Check that only series get stored after calling Rollback.
+ require.Equal(t, 3, walSeriesCount, "series should have been written to WAL")
+ require.Equal(t, 0, walSamplesCount, "samples should not have been written to WAL")
+ require.Equal(t, 0, walExemplarsCount, "exemplars should not have been written to WAL")
+ require.Equal(t, 0, walHistogramCount, "histograms should not have been written to WAL")
+ require.Equal(t, 0, walFloatHistogramCount, "float histograms should not have been written to WAL")
+}
+
+func TestDBAppenderV2_AddRef(t *testing.T) {
+ db := newTestDB(t)
+
+ ctx := context.Background()
+ app1 := db.AppenderV2(ctx)
+
+ ref1, err := app1.Append(0, labels.FromStrings("a", "b"), 0, 123, 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+
+ // Reference should already work before commit.
+ ref2, err := app1.Append(ref1, labels.EmptyLabels(), 0, 124, 1, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.Equal(t, ref1, ref2)
+
+ err = app1.Commit()
+ require.NoError(t, err)
+
+ app2 := db.AppenderV2(ctx)
+
+ // first ref should already work in next transaction.
+ ref3, err := app2.Append(ref1, labels.EmptyLabels(), 0, 125, 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.Equal(t, ref1, ref3)
+
+ ref4, err := app2.Append(ref1, labels.FromStrings("a", "b"), 0, 133, 1, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.Equal(t, ref1, ref4)
+
+ // Reference must be valid to add another sample.
+ ref5, err := app2.Append(ref2, labels.EmptyLabels(), 0, 143, 2, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.Equal(t, ref1, ref5)
+
+ // Missing labels & invalid refs should fail.
+ _, err = app2.Append(9999999, labels.EmptyLabels(), 0, 1, 1, nil, nil, storage.AOptions{})
+ require.ErrorIs(t, err, ErrInvalidSample)
+
+ require.NoError(t, app2.Commit())
+
+ q, err := db.Querier(0, 200)
+ require.NoError(t, err)
+
+ res := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
+
+ require.Equal(t, map[string][]chunks.Sample{
+ labels.FromStrings("a", "b").String(): {
+ sample{t: 123, f: 0},
+ sample{t: 124, f: 1},
+ sample{t: 125, f: 0},
+ sample{t: 133, f: 1},
+ sample{t: 143, f: 2},
+ },
+ }, res)
+}
+
+func TestDBAppenderV2_EmptyLabelsIgnored(t *testing.T) {
+ db := newTestDB(t)
+
+ ctx := context.Background()
+ app1 := db.AppenderV2(ctx)
+
+ ref1, err := app1.Append(0, labels.FromStrings("a", "b"), 0, 123, 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+
+ // Add with empty label.
+ ref2, err := app1.Append(0, labels.FromStrings("a", "b", "c", ""), 0, 124, 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+
+ // Should be the same series.
+ require.Equal(t, ref1, ref2)
+
+ err = app1.Commit()
+ require.NoError(t, err)
+}
+
+func TestDBAppenderV2_EmptyLabelsetCausesError(t *testing.T) {
+ db := newTestDB(t)
+
+ ctx := context.Background()
+ app := db.AppenderV2(ctx)
+ _, err := app.Append(0, labels.Labels{}, 0, 0, 0, nil, nil, storage.AOptions{})
+ require.Error(t, err)
+ require.Equal(t, "empty labelset: invalid sample", err.Error())
+}
+
+func TestDeleteSimple_AppendV2(t *testing.T) {
+ const numSamples int64 = 10
+
+ cases := []struct {
+ Intervals tombstones.Intervals
+ remaint []int64
+ }{
+ {
+ Intervals: tombstones.Intervals{{Mint: 0, Maxt: 3}},
+ remaint: []int64{4, 5, 6, 7, 8, 9},
+ },
+ {
+ Intervals: tombstones.Intervals{{Mint: 1, Maxt: 3}},
+ remaint: []int64{0, 4, 5, 6, 7, 8, 9},
+ },
+ {
+ Intervals: tombstones.Intervals{{Mint: 1, Maxt: 3}, {Mint: 4, Maxt: 7}},
+ remaint: []int64{0, 8, 9},
+ },
+ {
+ Intervals: tombstones.Intervals{{Mint: 1, Maxt: 3}, {Mint: 4, Maxt: 700}},
+ remaint: []int64{0},
+ },
+ { // This case is to ensure that labels and symbols are deleted.
+ Intervals: tombstones.Intervals{{Mint: 0, Maxt: 9}},
+ remaint: []int64{},
+ },
+ }
+
+ for _, c := range cases {
+ t.Run("", func(t *testing.T) {
+ db := newTestDB(t)
+
+ ctx := context.Background()
+ app := db.AppenderV2(ctx)
+
+ smpls := make([]float64, numSamples)
+ for i := range numSamples {
+ smpls[i] = rand.Float64()
+ app.Append(0, labels.FromStrings("a", "b"), 0, i, smpls[i], nil, nil, storage.AOptions{})
+ }
+
+ require.NoError(t, app.Commit())
+
+ // TODO(gouthamve): Reset the tombstones somehow.
+ // Delete the ranges.
+ for _, r := range c.Intervals {
+ require.NoError(t, db.Delete(ctx, r.Mint, r.Maxt, labels.MustNewMatcher(labels.MatchEqual, "a", "b")))
+ }
+
+ // Compare the result.
+ q, err := db.Querier(0, numSamples)
+ require.NoError(t, err)
+
+ res := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
+
+ expSamples := make([]chunks.Sample, 0, len(c.remaint))
+ for _, ts := range c.remaint {
+ expSamples = append(expSamples, sample{ts, smpls[ts], nil, nil})
+ }
+
+ expss := newMockSeriesSet([]storage.Series{
+ storage.NewListSeries(labels.FromStrings("a", "b"), expSamples),
+ })
+
+ for {
+ eok, rok := expss.Next(), res.Next()
+ require.Equal(t, eok, rok)
+
+ if !eok {
+ require.Empty(t, res.Warnings())
+ break
+ }
+ sexp := expss.At()
+ sres := res.At()
+
+ require.Equal(t, sexp.Labels(), sres.Labels())
+
+ smplExp, errExp := storage.ExpandSamples(sexp.Iterator(nil), nil)
+ smplRes, errRes := storage.ExpandSamples(sres.Iterator(nil), nil)
+
+ require.Equal(t, errExp, errRes)
+ require.Equal(t, smplExp, smplRes)
+ }
+ })
+ }
+}
+
+func TestAmendHistogramDatapointCausesError_AppendV2(t *testing.T) {
+ db := newTestDB(t)
+
+ ctx := context.Background()
+ app := db.AppenderV2(ctx)
+ _, err := app.Append(0, labels.FromStrings("a", "b"), 0, 0, 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+
+ app = db.AppenderV2(ctx)
+ _, err = app.Append(0, labels.FromStrings("a", "b"), 0, 0, 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ _, err = app.Append(0, labels.FromStrings("a", "b"), 0, 0, 1, nil, nil, storage.AOptions{})
+ require.ErrorIs(t, err, storage.ErrDuplicateSampleForTimestamp)
+ require.NoError(t, app.Rollback())
+
+ h := histogram.Histogram{
+ Schema: 3,
+ Count: 52,
+ Sum: 2.7,
+ ZeroThreshold: 0.1,
+ ZeroCount: 42,
+ PositiveSpans: []histogram.Span{
+ {Offset: 0, Length: 4},
+ {Offset: 10, Length: 3},
+ },
+ PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
+ }
+ fh := h.ToFloat(nil)
+
+ app = db.AppenderV2(ctx)
+ _, err = app.Append(0, labels.FromStrings("a", "c"), 0, 0, 0, h.Copy(), nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+
+ app = db.AppenderV2(ctx)
+ _, err = app.Append(0, labels.FromStrings("a", "c"), 0, 0, 0, h.Copy(), nil, storage.AOptions{})
+ require.NoError(t, err)
+ h.Schema = 2
+ _, err = app.Append(0, labels.FromStrings("a", "c"), 0, 0, 0, h.Copy(), nil, storage.AOptions{})
+ require.Equal(t, storage.ErrDuplicateSampleForTimestamp, err)
+ require.NoError(t, app.Rollback())
+
+ // Float histogram.
+ app = db.AppenderV2(ctx)
+ _, err = app.Append(0, labels.FromStrings("a", "d"), 0, 0, 0, nil, fh.Copy(), storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+
+ app = db.AppenderV2(ctx)
+ _, err = app.Append(0, labels.FromStrings("a", "d"), 0, 0, 0, nil, fh.Copy(), storage.AOptions{})
+ require.NoError(t, err)
+ fh.Schema = 2
+ _, err = app.Append(0, labels.FromStrings("a", "d"), 0, 0, 0, nil, fh.Copy(), storage.AOptions{})
+ require.Equal(t, storage.ErrDuplicateSampleForTimestamp, err)
+ require.NoError(t, app.Rollback())
+}
+
+func TestDuplicateNaNDatapointNoAmendError_AppendV2(t *testing.T) {
+ db := newTestDB(t)
+
+ ctx := context.Background()
+ app := db.AppenderV2(ctx)
+ _, err := app.Append(0, labels.FromStrings("a", "b"), 0, 0, math.NaN(), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+
+ app = db.AppenderV2(ctx)
+ _, err = app.Append(0, labels.FromStrings("a", "b"), 0, 0, math.NaN(), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+}
+
+func TestNonDuplicateNaNDatapointsCausesAmendError_AppendV2(t *testing.T) {
+ db := newTestDB(t)
+
+ ctx := context.Background()
+ app := db.AppenderV2(ctx)
+ _, err := app.Append(0, labels.FromStrings("a", "b"), 0, 0, math.Float64frombits(0x7ff0000000000001), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+
+ app = db.AppenderV2(ctx)
+ _, err = app.Append(0, labels.FromStrings("a", "b"), 0, 0, math.Float64frombits(0x7ff0000000000002), nil, nil, storage.AOptions{})
+ require.ErrorIs(t, err, storage.ErrDuplicateSampleForTimestamp)
+}
+
+func TestSkippingInvalidValuesInSameTxn_AppendV2(t *testing.T) {
+ db := newTestDB(t)
+
+ // Append AmendedValue.
+ ctx := context.Background()
+ app := db.AppenderV2(ctx)
+ _, err := app.Append(0, labels.FromStrings("a", "b"), 0, 0, 1, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ _, err = app.Append(0, labels.FromStrings("a", "b"), 0, 0, 2, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+
+ // Make sure the right value is stored.
+ q, err := db.Querier(0, 10)
+ require.NoError(t, err)
+
+ ssMap := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
+
+ require.Equal(t, map[string][]chunks.Sample{
+ labels.New(labels.Label{Name: "a", Value: "b"}).String(): {sample{0, 1, nil, nil}},
+ }, ssMap)
+
+ // Append Out of Order Value.
+ app = db.AppenderV2(ctx)
+ _, err = app.Append(0, labels.FromStrings("a", "b"), 0, 10, 3, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ _, err = app.Append(0, labels.FromStrings("a", "b"), 0, 7, 5, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+
+ q, err = db.Querier(0, 10)
+ require.NoError(t, err)
+
+ ssMap = query(t, q, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
+
+ require.Equal(t, map[string][]chunks.Sample{
+ labels.New(labels.Label{Name: "a", Value: "b"}).String(): {sample{0, 1, nil, nil}, sample{10, 3, nil, nil}},
+ }, ssMap)
+}
+
+func TestDB_Snapshot_AppendV2(t *testing.T) {
+ db := newTestDB(t)
+
+ // append data
+ ctx := context.Background()
+ app := db.AppenderV2(ctx)
+ mint := int64(1414141414000)
+ for i := range 1000 {
+ _, err := app.Append(0, labels.FromStrings("foo", "bar"), 0, mint+int64(i), 1.0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+
+ // create snapshot
+ snap := t.TempDir()
+ require.NoError(t, db.Snapshot(snap, true))
+ require.NoError(t, db.Close())
+
+ // reopen DB from snapshot
+ db = newTestDB(t, withDir(snap))
+
+ querier, err := db.Querier(mint, mint+1000)
+ require.NoError(t, err)
+ defer func() { require.NoError(t, querier.Close()) }()
+
+ // sum values
+ seriesSet := querier.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
+ var series chunkenc.Iterator
+ sum := 0.0
+ for seriesSet.Next() {
+ series = seriesSet.At().Iterator(series)
+ for series.Next() == chunkenc.ValFloat {
+ _, v := series.At()
+ sum += v
+ }
+ require.NoError(t, series.Err())
+ }
+ require.NoError(t, seriesSet.Err())
+ require.Empty(t, seriesSet.Warnings())
+ require.Equal(t, 1000.0, sum)
+}
+
+// TestDB_Snapshot_ChunksOutsideOfCompactedRange ensures that a snapshot removes chunks samples
+// that are outside the set block time range.
+// See https://github.com/prometheus/prometheus/issues/5105
+func TestDB_Snapshot_ChunksOutsideOfCompactedRange_AppendV2(t *testing.T) {
+ db := newTestDB(t)
+
+ ctx := context.Background()
+ app := db.AppenderV2(ctx)
+ mint := int64(1414141414000)
+ for i := range 1000 {
+ _, err := app.Append(0, labels.FromStrings("foo", "bar"), 0, mint+int64(i), 1.0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+
+ snap := t.TempDir()
+
+ // Hackingly introduce "race", by having lower max time then maxTime in last chunk.
+ db.head.maxTime.Sub(10)
+
+ require.NoError(t, db.Snapshot(snap, true))
+ require.NoError(t, db.Close())
+
+ // reopen DB from snapshot
+ db = newTestDB(t, withDir(snap))
+
+ querier, err := db.Querier(mint, mint+1000)
+ require.NoError(t, err)
+ defer func() { require.NoError(t, querier.Close()) }()
+
+ // Sum values.
+ seriesSet := querier.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
+ var series chunkenc.Iterator
+ sum := 0.0
+ for seriesSet.Next() {
+ series = seriesSet.At().Iterator(series)
+ for series.Next() == chunkenc.ValFloat {
+ _, v := series.At()
+ sum += v
+ }
+ require.NoError(t, series.Err())
+ }
+ require.NoError(t, seriesSet.Err())
+ require.Empty(t, seriesSet.Warnings())
+
+ // Since we snapshotted with MaxTime - 10, so expect 10 less samples.
+ require.Equal(t, 1000.0-10, sum)
+}
+
+func TestDB_SnapshotWithDelete_AppendV2(t *testing.T) {
+ const numSamples int64 = 10
+
+ db := newTestDB(t)
+
+ ctx := context.Background()
+ app := db.AppenderV2(ctx)
+
+ smpls := make([]float64, numSamples)
+ for i := range numSamples {
+ smpls[i] = rand.Float64()
+ app.Append(0, labels.FromStrings("a", "b"), 0, i, smpls[i], nil, nil, storage.AOptions{})
+ }
+
+ require.NoError(t, app.Commit())
+ cases := []struct {
+ intervals tombstones.Intervals
+ remaint []int64
+ }{
+ {
+ intervals: tombstones.Intervals{{Mint: 1, Maxt: 3}, {Mint: 4, Maxt: 7}},
+ remaint: []int64{0, 8, 9},
+ },
+ }
+
+ for _, c := range cases {
+ t.Run("", func(t *testing.T) {
+ // TODO(gouthamve): Reset the tombstones somehow.
+ // Delete the ranges.
+ for _, r := range c.intervals {
+ require.NoError(t, db.Delete(ctx, r.Mint, r.Maxt, labels.MustNewMatcher(labels.MatchEqual, "a", "b")))
+ }
+
+ // create snapshot
+ snap := t.TempDir()
+
+ require.NoError(t, db.Snapshot(snap, true))
+
+ // reopen DB from snapshot
+ db := newTestDB(t, withDir(snap))
+
+ // Compare the result.
+ q, err := db.Querier(0, numSamples)
+ require.NoError(t, err)
+ defer func() { require.NoError(t, q.Close()) }()
+
+ res := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
+
+ expSamples := make([]chunks.Sample, 0, len(c.remaint))
+ for _, ts := range c.remaint {
+ expSamples = append(expSamples, sample{ts, smpls[ts], nil, nil})
+ }
+
+ expss := newMockSeriesSet([]storage.Series{
+ storage.NewListSeries(labels.FromStrings("a", "b"), expSamples),
+ })
+
+ if len(expSamples) == 0 {
+ require.False(t, res.Next())
+ return
+ }
+
+ for {
+ eok, rok := expss.Next(), res.Next()
+ require.Equal(t, eok, rok)
+
+ if !eok {
+ require.Empty(t, res.Warnings())
+ break
+ }
+ sexp := expss.At()
+ sres := res.At()
+
+ require.Equal(t, sexp.Labels(), sres.Labels())
+
+ smplExp, errExp := storage.ExpandSamples(sexp.Iterator(nil), nil)
+ smplRes, errRes := storage.ExpandSamples(sres.Iterator(nil), nil)
+
+ require.Equal(t, errExp, errRes)
+ require.Equal(t, smplExp, smplRes)
+ }
+ })
+ }
+}
+
+func TestDB_e2e_AppendV2(t *testing.T) {
+ const (
+ numDatapoints = 1000
+ numRanges = 1000
+ timeInterval = int64(3)
+ )
+ // Create 8 series with 1000 data-points of different ranges and run queries.
+ lbls := [][]labels.Label{
+ {
+ {Name: "a", Value: "b"},
+ {Name: "instance", Value: "localhost:9090"},
+ {Name: "job", Value: "prometheus"},
+ },
+ {
+ {Name: "a", Value: "b"},
+ {Name: "instance", Value: "127.0.0.1:9090"},
+ {Name: "job", Value: "prometheus"},
+ },
+ {
+ {Name: "a", Value: "b"},
+ {Name: "instance", Value: "127.0.0.1:9090"},
+ {Name: "job", Value: "prom-k8s"},
+ },
+ {
+ {Name: "a", Value: "b"},
+ {Name: "instance", Value: "localhost:9090"},
+ {Name: "job", Value: "prom-k8s"},
+ },
+ {
+ {Name: "a", Value: "c"},
+ {Name: "instance", Value: "localhost:9090"},
+ {Name: "job", Value: "prometheus"},
+ },
+ {
+ {Name: "a", Value: "c"},
+ {Name: "instance", Value: "127.0.0.1:9090"},
+ {Name: "job", Value: "prometheus"},
+ },
+ {
+ {Name: "a", Value: "c"},
+ {Name: "instance", Value: "127.0.0.1:9090"},
+ {Name: "job", Value: "prom-k8s"},
+ },
+ {
+ {Name: "a", Value: "c"},
+ {Name: "instance", Value: "localhost:9090"},
+ {Name: "job", Value: "prom-k8s"},
+ },
+ }
+
+ seriesMap := map[string][]chunks.Sample{}
+ for _, l := range lbls {
+ seriesMap[labels.New(l...).String()] = []chunks.Sample{}
+ }
+
+ db := newTestDB(t)
+
+ ctx := context.Background()
+ app := db.AppenderV2(ctx)
+
+ for _, l := range lbls {
+ lset := labels.New(l...)
+ series := []chunks.Sample{}
+
+ ts := rand.Int63n(300)
+ for range numDatapoints {
+ v := rand.Float64()
+
+ series = append(series, sample{ts, v, nil, nil})
+
+ _, err := app.Append(0, lset, 0, ts, v, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+
+ ts += rand.Int63n(timeInterval) + 1
+ }
+
+ seriesMap[lset.String()] = series
+ }
+
+ require.NoError(t, app.Commit())
+
+ // Query each selector on 1000 random time-ranges.
+ queries := []struct {
+ ms []*labels.Matcher
+ }{
+ {
+ ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "b")},
+ },
+ {
+ ms: []*labels.Matcher{
+ labels.MustNewMatcher(labels.MatchEqual, "a", "b"),
+ labels.MustNewMatcher(labels.MatchEqual, "job", "prom-k8s"),
+ },
+ },
+ {
+ ms: []*labels.Matcher{
+ labels.MustNewMatcher(labels.MatchEqual, "a", "c"),
+ labels.MustNewMatcher(labels.MatchEqual, "instance", "localhost:9090"),
+ labels.MustNewMatcher(labels.MatchEqual, "job", "prometheus"),
+ },
+ },
+ // TODO: Add Regexp Matchers.
+ }
+
+ for _, qry := range queries {
+ matched := labels.Slice{}
+ for _, l := range lbls {
+ s := labels.Selector(qry.ms)
+ ls := labels.New(l...)
+ if s.Matches(ls) {
+ matched = append(matched, ls)
+ }
+ }
+
+ sort.Sort(matched)
+
+ for range numRanges {
+ mint := rand.Int63n(300)
+ maxt := mint + rand.Int63n(timeInterval*int64(numDatapoints))
+
+ expected := map[string][]chunks.Sample{}
+
+ // Build the mockSeriesSet.
+ for _, m := range matched {
+ smpls := boundedSamples(seriesMap[m.String()], mint, maxt)
+ if len(smpls) > 0 {
+ expected[m.String()] = smpls
+ }
+ }
+
+ q, err := db.Querier(mint, maxt)
+ require.NoError(t, err)
+
+ ss := q.Select(ctx, false, nil, qry.ms...)
+ result := map[string][]chunks.Sample{}
+
+ for ss.Next() {
+ x := ss.At()
+
+ smpls, err := storage.ExpandSamples(x.Iterator(nil), newSample)
+ require.NoError(t, err)
+
+ if len(smpls) > 0 {
+ result[x.Labels().String()] = smpls
+ }
+ }
+
+ require.NoError(t, ss.Err())
+ require.Empty(t, ss.Warnings())
+ require.Equal(t, expected, result)
+
+ q.Close()
+ }
+ }
+}
+
+func TestWALFlushedOnDBClose_AppendV2(t *testing.T) {
+ db := newTestDB(t)
+
+ lbls := labels.FromStrings("labelname", "labelvalue")
+
+ ctx := context.Background()
+ app := db.AppenderV2(ctx)
+ _, err := app.Append(0, lbls, 0, 0, 1, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+
+ require.NoError(t, db.Close())
+
+ db = newTestDB(t, withDir(db.Dir()))
+
+ q, err := db.Querier(0, 1)
+ require.NoError(t, err)
+
+ values, ws, err := q.LabelValues(ctx, "labelname", nil)
+ require.NoError(t, err)
+ require.Empty(t, ws)
+ require.Equal(t, []string{"labelvalue"}, values)
+}
+
+func TestWALSegmentSizeOptions_AppendV2(t *testing.T) {
+ tests := map[int]func(dbdir string, segmentSize int){
+ // Default Wal Size.
+ 0: func(dbDir string, _ int) {
+ filesAndDir, err := os.ReadDir(filepath.Join(dbDir, "wal"))
+ require.NoError(t, err)
+ files := []os.FileInfo{}
+ for _, f := range filesAndDir {
+ if !f.IsDir() {
+ fi, err := f.Info()
+ require.NoError(t, err)
+ files = append(files, fi)
+ }
+ }
+ // All the full segment files (all but the last) should match the segment size option.
+ for _, f := range files[:len(files)-1] {
+ require.Equal(t, int64(DefaultOptions().WALSegmentSize), f.Size(), "WAL file size doesn't match WALSegmentSize option, filename: %v", f.Name())
+ }
+ lastFile := files[len(files)-1]
+ require.Greater(t, int64(DefaultOptions().WALSegmentSize), lastFile.Size(), "last WAL file size is not smaller than the WALSegmentSize option, filename: %v", lastFile.Name())
+ },
+ // Custom Wal Size.
+ 2 * 32 * 1024: func(dbDir string, segmentSize int) {
+ filesAndDir, err := os.ReadDir(filepath.Join(dbDir, "wal"))
+ require.NoError(t, err)
+ files := []os.FileInfo{}
+ for _, f := range filesAndDir {
+ if !f.IsDir() {
+ fi, err := f.Info()
+ require.NoError(t, err)
+ files = append(files, fi)
+ }
+ }
+ require.NotEmpty(t, files, "current WALSegmentSize should result in more than a single WAL file.")
+ // All the full segment files (all but the last) should match the segment size option.
+ for _, f := range files[:len(files)-1] {
+ require.Equal(t, int64(segmentSize), f.Size(), "WAL file size doesn't match WALSegmentSize option, filename: %v", f.Name())
+ }
+ lastFile := files[len(files)-1]
+ require.Greater(t, int64(segmentSize), lastFile.Size(), "last WAL file size is not smaller than the WALSegmentSize option, filename: %v", lastFile.Name())
+ },
+ // Wal disabled.
+ -1: func(dbDir string, _ int) {
+ // Check that WAL dir is not there.
+ _, err := os.Stat(filepath.Join(dbDir, "wal"))
+ require.Error(t, err)
+ // Check that there is chunks dir.
+ _, err = os.Stat(mmappedChunksDir(dbDir))
+ require.NoError(t, err)
+ },
+ }
+ for segmentSize, testFunc := range tests {
+ t.Run(fmt.Sprintf("WALSegmentSize %d test", segmentSize), func(t *testing.T) {
+ opts := DefaultOptions()
+ opts.WALSegmentSize = segmentSize
+ db := newTestDB(t, withOpts(opts))
+
+ for i := range int64(155) {
+ app := db.AppenderV2(context.Background())
+ ref, err := app.Append(0, labels.FromStrings("wal"+strconv.Itoa(int(i)), "size"), 0, i, rand.Float64(), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ for j := int64(1); j <= 78; j++ {
+ _, err := app.Append(ref, labels.EmptyLabels(), 0, i+j, rand.Float64(), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+ }
+
+ require.NoError(t, db.Close())
+ testFunc(db.Dir(), opts.WALSegmentSize)
+ })
+ }
+}
+
+// https://github.com/prometheus/prometheus/issues/9846
+// https://github.com/prometheus/prometheus/issues/9859
+func TestWALReplayRaceOnSamplesLoggedBeforeSeries_AppendV2(t *testing.T) {
+ const (
+ numRuns = 1
+ numSamplesBeforeSeriesCreation = 1000
+ )
+
+ // We test both with few and many samples appended after series creation. If samples are < 120 then there's no
+ // mmap-ed chunk, otherwise there's at least 1 mmap-ed chunk when replaying the WAL.
+ for _, numSamplesAfterSeriesCreation := range []int{1, 1000} {
+ for run := 1; run <= numRuns; run++ {
+ t.Run(fmt.Sprintf("samples after series creation = %d, run = %d", numSamplesAfterSeriesCreation, run), func(t *testing.T) {
+ testWALReplayRaceOnSamplesLoggedBeforeSeriesAppendV2(t, numSamplesBeforeSeriesCreation, numSamplesAfterSeriesCreation)
+ })
+ }
+ }
+}
+
+func testWALReplayRaceOnSamplesLoggedBeforeSeriesAppendV2(t *testing.T, numSamplesBeforeSeriesCreation, numSamplesAfterSeriesCreation int) {
+ const numSeries = 1000
+
+ db := newTestDB(t)
+ db.DisableCompactions()
+
+ for seriesRef := 1; seriesRef <= numSeries; seriesRef++ {
+ // Log samples before the series is logged to the WAL.
+ var enc record.Encoder
+ var samples []record.RefSample
+
+ for ts := range numSamplesBeforeSeriesCreation {
+ samples = append(samples, record.RefSample{
+ Ref: chunks.HeadSeriesRef(uint64(seriesRef)),
+ T: int64(ts),
+ V: float64(ts),
+ })
+ }
+
+ err := db.Head().wal.Log(enc.Samples(samples, nil))
+ require.NoError(t, err)
+
+ // Add samples via appender so that they're logged after the series in the WAL.
+ app := db.AppenderV2(context.Background())
+ lbls := labels.FromStrings("series_id", strconv.Itoa(seriesRef))
+
+ for ts := numSamplesBeforeSeriesCreation; ts < numSamplesBeforeSeriesCreation+numSamplesAfterSeriesCreation; ts++ {
+ _, err := app.Append(0, lbls, 0, int64(ts), float64(ts), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+ }
+
+ require.NoError(t, db.Close())
+
+ // Reopen the DB, replaying the WAL.
+ db = newTestDB(t, withDir(db.Dir()))
+
+ // Query back chunks for all series.
+ q, err := db.ChunkQuerier(math.MinInt64, math.MaxInt64)
+ require.NoError(t, err)
+
+ set := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchRegexp, "series_id", ".+"))
+ actualSeries := 0
+ var chunksIt chunks.Iterator
+
+ for set.Next() {
+ actualSeries++
+ actualChunks := 0
+
+ chunksIt = set.At().Iterator(chunksIt)
+ for chunksIt.Next() {
+ actualChunks++
+ }
+ require.NoError(t, chunksIt.Err())
+
+ // We expect 1 chunk every 120 samples after series creation.
+ require.Equalf(t, (numSamplesAfterSeriesCreation/120)+1, actualChunks, "series: %s", set.At().Labels().String())
+ }
+
+ require.NoError(t, set.Err())
+ require.Equal(t, numSeries, actualSeries)
+}
+
+func TestTombstoneClean_AppendV2(t *testing.T) {
+ t.Parallel()
+ const numSamples int64 = 10
+
+ db := newTestDB(t)
+
+ ctx := context.Background()
+ app := db.AppenderV2(ctx)
+
+ smpls := make([]float64, numSamples)
+ for i := range numSamples {
+ smpls[i] = rand.Float64()
+ app.Append(0, labels.FromStrings("a", "b"), 0, i, smpls[i], nil, nil, storage.AOptions{})
+ }
+
+ require.NoError(t, app.Commit())
+ cases := []struct {
+ intervals tombstones.Intervals
+ remaint []int64
+ }{
+ {
+ intervals: tombstones.Intervals{{Mint: 1, Maxt: 3}, {Mint: 4, Maxt: 7}},
+ remaint: []int64{0, 8, 9},
+ },
+ }
+
+ for _, c := range cases {
+ // Delete the ranges.
+
+ // Create snapshot.
+ snap := t.TempDir()
+ require.NoError(t, db.Snapshot(snap, true))
+ require.NoError(t, db.Close())
+
+ // Reopen DB from snapshot.
+ db := newTestDB(t, withDir(snap))
+
+ for _, r := range c.intervals {
+ require.NoError(t, db.Delete(ctx, r.Mint, r.Maxt, labels.MustNewMatcher(labels.MatchEqual, "a", "b")))
+ }
+
+ // All of the setup for THIS line.
+ require.NoError(t, db.CleanTombstones())
+
+ // Compare the result.
+ q, err := db.Querier(0, numSamples)
+ require.NoError(t, err)
+ defer q.Close()
+
+ res := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
+
+ expSamples := make([]chunks.Sample, 0, len(c.remaint))
+ for _, ts := range c.remaint {
+ expSamples = append(expSamples, sample{ts, smpls[ts], nil, nil})
+ }
+
+ expss := newMockSeriesSet([]storage.Series{
+ storage.NewListSeries(labels.FromStrings("a", "b"), expSamples),
+ })
+
+ if len(expSamples) == 0 {
+ require.False(t, res.Next())
+ continue
+ }
+
+ for {
+ eok, rok := expss.Next(), res.Next()
+ require.Equal(t, eok, rok)
+
+ if !eok {
+ break
+ }
+ sexp := expss.At()
+ sres := res.At()
+
+ require.Equal(t, sexp.Labels(), sres.Labels())
+
+ smplExp, errExp := storage.ExpandSamples(sexp.Iterator(nil), nil)
+ smplRes, errRes := storage.ExpandSamples(sres.Iterator(nil), nil)
+
+ require.Equal(t, errExp, errRes)
+ require.Equal(t, smplExp, smplRes)
+ }
+ require.Empty(t, res.Warnings())
+
+ for _, b := range db.Blocks() {
+ require.Equal(t, tombstones.NewMemTombstones(), b.tombstones)
+ }
+ }
+}
+
+// TestTombstoneCleanResultEmptyBlock tests that a TombstoneClean that results in empty blocks (no timeseries)
+// will also delete the resultant block.
+func TestTombstoneCleanResultEmptyBlock_AppendV2(t *testing.T) {
+ t.Parallel()
+ numSamples := int64(10)
+
+ db := newTestDB(t)
+
+ ctx := context.Background()
+ app := db.AppenderV2(ctx)
+
+ smpls := make([]float64, numSamples)
+ for i := range numSamples {
+ smpls[i] = rand.Float64()
+ app.Append(0, labels.FromStrings("a", "b"), 0, i, smpls[i], nil, nil, storage.AOptions{})
+ }
+
+ require.NoError(t, app.Commit())
+ // Interval should cover the whole block.
+ intervals := tombstones.Intervals{{Mint: 0, Maxt: numSamples}}
+
+ // Create snapshot.
+ snap := t.TempDir()
+ require.NoError(t, db.Snapshot(snap, true))
+ require.NoError(t, db.Close())
+
+ // Reopen DB from snapshot.
+ db = newTestDB(t, withDir(snap))
+
+ // Create tombstones by deleting all samples.
+ for _, r := range intervals {
+ require.NoError(t, db.Delete(ctx, r.Mint, r.Maxt, labels.MustNewMatcher(labels.MatchEqual, "a", "b")))
+ }
+
+ require.NoError(t, db.CleanTombstones())
+
+ // After cleaning tombstones that covers the entire block, no blocks should be left behind.
+ actualBlockDirs, err := blockDirs(db.Dir())
+ require.NoError(t, err)
+ require.Empty(t, actualBlockDirs)
+}
+
+func TestSizeRetention_AppendV2(t *testing.T) {
+ t.Parallel()
+ opts := DefaultOptions()
+ opts.OutOfOrderTimeWindow = 100
+ db := newTestDB(t, withOpts(opts), withRngs(100))
+
+ blocks := []*BlockMeta{
+ {MinTime: 100, MaxTime: 200}, // Oldest block
+ {MinTime: 200, MaxTime: 300},
+ {MinTime: 300, MaxTime: 400},
+ {MinTime: 400, MaxTime: 500},
+ {MinTime: 500, MaxTime: 600}, // Newest Block
+ }
+
+ for _, m := range blocks {
+ createBlock(t, db.Dir(), genSeries(100, 10, m.MinTime, m.MaxTime))
+ }
+
+ headBlocks := []*BlockMeta{
+ {MinTime: 700, MaxTime: 800},
+ }
+
+ // Add some data to the WAL.
+ headApp := db.Head().AppenderV2(context.Background())
+ var aSeries labels.Labels
+ var it chunkenc.Iterator
+ for _, m := range headBlocks {
+ series := genSeries(100, 10, m.MinTime, m.MaxTime+1)
+ for _, s := range series {
+ aSeries = s.Labels()
+ it = s.Iterator(it)
+ for it.Next() == chunkenc.ValFloat {
+ tim, v := it.At()
+ _, err := headApp.Append(0, s.Labels(), 0, tim, v, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+ require.NoError(t, it.Err())
+ }
+ }
+ require.NoError(t, headApp.Commit())
+ db.Head().mmapHeadChunks()
+
+ require.Eventually(t, func() bool {
+ return db.Head().chunkDiskMapper.IsQueueEmpty()
+ }, 2*time.Second, 100*time.Millisecond)
+
+ // Test that registered size matches the actual disk size.
+ require.NoError(t, db.reloadBlocks()) // Reload the db to register the new db size.
+ require.Len(t, db.Blocks(), len(blocks)) // Ensure all blocks are registered.
+ blockSize := int64(prom_testutil.ToFloat64(db.metrics.blocksBytes)) // Use the actual internal metrics.
+ walSize, err := db.Head().wal.Size()
+ require.NoError(t, err)
+ cdmSize, err := db.Head().chunkDiskMapper.Size()
+ require.NoError(t, err)
+ require.NotZero(t, cdmSize)
+ // Expected size should take into account block size + WAL size + Head
+ // chunks size
+ expSize := blockSize + walSize + cdmSize
+ actSize, err := fileutil.DirSize(db.Dir())
+ require.NoError(t, err)
+ require.Equal(t, expSize, actSize, "registered size doesn't match actual disk size")
+
+ // Create a WAL checkpoint, and compare sizes.
+ first, last, err := wlog.Segments(db.Head().wal.Dir())
+ require.NoError(t, err)
+ _, err = wlog.Checkpoint(promslog.NewNopLogger(), db.Head().wal, first, last-1, func(chunks.HeadSeriesRef) bool { return false }, 0)
+ require.NoError(t, err)
+ blockSize = int64(prom_testutil.ToFloat64(db.metrics.blocksBytes)) // Use the actual internal metrics.
+ walSize, err = db.Head().wal.Size()
+ require.NoError(t, err)
+ cdmSize, err = db.Head().chunkDiskMapper.Size()
+ require.NoError(t, err)
+ require.NotZero(t, cdmSize)
+ expSize = blockSize + walSize + cdmSize
+ actSize, err = fileutil.DirSize(db.Dir())
+ require.NoError(t, err)
+ require.Equal(t, expSize, actSize, "registered size doesn't match actual disk size")
+
+ // Truncate Chunk Disk Mapper and compare sizes.
+ require.NoError(t, db.Head().chunkDiskMapper.Truncate(900))
+ cdmSize, err = db.Head().chunkDiskMapper.Size()
+ require.NoError(t, err)
+ require.NotZero(t, cdmSize)
+ expSize = blockSize + walSize + cdmSize
+ actSize, err = fileutil.DirSize(db.Dir())
+ require.NoError(t, err)
+ require.Equal(t, expSize, actSize, "registered size doesn't match actual disk size")
+
+ // Add some out of order samples to check the size of WBL.
+ headApp = db.Head().AppenderV2(context.Background())
+ for ts := int64(750); ts < 800; ts++ {
+ _, err := headApp.Append(0, aSeries, 0, ts, float64(ts), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+ require.NoError(t, headApp.Commit())
+
+ walSize, err = db.Head().wal.Size()
+ require.NoError(t, err)
+ wblSize, err := db.Head().wbl.Size()
+ require.NoError(t, err)
+ require.NotZero(t, wblSize)
+ cdmSize, err = db.Head().chunkDiskMapper.Size()
+ require.NoError(t, err)
+ expSize = blockSize + walSize + wblSize + cdmSize
+ actSize, err = fileutil.DirSize(db.Dir())
+ require.NoError(t, err)
+ require.Equal(t, expSize, actSize, "registered size doesn't match actual disk size")
+
+ // Decrease the max bytes limit so that a delete is triggered.
+ // Check total size, total count and check that the oldest block was deleted.
+ firstBlockSize := db.Blocks()[0].Size()
+ sizeLimit := actSize - firstBlockSize
+ db.opts.MaxBytes = sizeLimit // Set the new db size limit one block smaller that the actual size.
+ require.NoError(t, db.reloadBlocks()) // Reload the db to register the new db size.
+
+ expBlocks := blocks[1:]
+ actBlocks := db.Blocks()
+ blockSize = int64(prom_testutil.ToFloat64(db.metrics.blocksBytes))
+ walSize, err = db.Head().wal.Size()
+ require.NoError(t, err)
+ cdmSize, err = db.Head().chunkDiskMapper.Size()
+ require.NoError(t, err)
+ require.NotZero(t, cdmSize)
+ // Expected size should take into account block size + WAL size + WBL size
+ expSize = blockSize + walSize + wblSize + cdmSize
+ actRetentionCount := int(prom_testutil.ToFloat64(db.metrics.sizeRetentionCount))
+ actSize, err = fileutil.DirSize(db.Dir())
+ require.NoError(t, err)
+
+ require.Equal(t, 1, actRetentionCount, "metric retention count mismatch")
+ require.Equal(t, expSize, actSize, "metric db size doesn't match actual disk size")
+ require.LessOrEqual(t, expSize, sizeLimit, "actual size (%v) is expected to be less than or equal to limit (%v)", expSize, sizeLimit)
+ require.Len(t, actBlocks, len(blocks)-1, "new block count should be decreased from:%v to:%v", len(blocks), len(blocks)-1)
+ require.Equal(t, expBlocks[0].MaxTime, actBlocks[0].meta.MaxTime, "maxT mismatch of the first block")
+ require.Equal(t, expBlocks[len(expBlocks)-1].MaxTime, actBlocks[len(actBlocks)-1].meta.MaxTime, "maxT mismatch of the last block")
+}
+
+func TestNotMatcherSelectsLabelsUnsetSeries_AppendV2(t *testing.T) {
+ db := newTestDB(t)
+
+ labelpairs := []labels.Labels{
+ labels.FromStrings("a", "abcd", "b", "abcde"),
+ labels.FromStrings("labelname", "labelvalue"),
+ }
+
+ ctx := context.Background()
+ app := db.AppenderV2(ctx)
+ for _, lbls := range labelpairs {
+ _, err := app.Append(0, lbls, 0, 0, 1, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+
+ cases := []struct {
+ selector labels.Selector
+ series []labels.Labels
+ }{{
+ selector: labels.Selector{
+ labels.MustNewMatcher(labels.MatchNotEqual, "lname", "lvalue"),
+ },
+ series: labelpairs,
+ }, {
+ selector: labels.Selector{
+ labels.MustNewMatcher(labels.MatchEqual, "a", "abcd"),
+ labels.MustNewMatcher(labels.MatchNotEqual, "b", "abcde"),
+ },
+ series: []labels.Labels{},
+ }, {
+ selector: labels.Selector{
+ labels.MustNewMatcher(labels.MatchEqual, "a", "abcd"),
+ labels.MustNewMatcher(labels.MatchNotEqual, "b", "abc"),
+ },
+ series: []labels.Labels{labelpairs[0]},
+ }, {
+ selector: labels.Selector{
+ labels.MustNewMatcher(labels.MatchNotRegexp, "a", "abd.*"),
+ },
+ series: labelpairs,
+ }, {
+ selector: labels.Selector{
+ labels.MustNewMatcher(labels.MatchNotRegexp, "a", "abc.*"),
+ },
+ series: labelpairs[1:],
+ }, {
+ selector: labels.Selector{
+ labels.MustNewMatcher(labels.MatchNotRegexp, "c", "abd.*"),
+ },
+ series: labelpairs,
+ }, {
+ selector: labels.Selector{
+ labels.MustNewMatcher(labels.MatchNotRegexp, "labelname", "labelvalue"),
+ },
+ series: labelpairs[:1],
+ }}
+
+ q, err := db.Querier(0, 10)
+ require.NoError(t, err)
+ defer func() { require.NoError(t, q.Close()) }()
+
+ for _, c := range cases {
+ ss := q.Select(ctx, false, nil, c.selector...)
+ lres, _, ws, err := expandSeriesSet(ss)
+ require.NoError(t, err)
+ require.Empty(t, ws)
+ require.Equal(t, c.series, lres)
+ }
+}
+
+// Regression test for https://github.com/prometheus/tsdb/issues/347
+func TestChunkAtBlockBoundary_AppendV2(t *testing.T) {
+ t.Parallel()
+ db := newTestDB(t)
+
+ ctx := context.Background()
+ app := db.AppenderV2(ctx)
+
+ blockRange := db.compactor.(*LeveledCompactor).ranges[0]
+ label := labels.FromStrings("foo", "bar")
+
+ for i := range int64(3) {
+ _, err := app.Append(0, label, 0, i*blockRange, 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ _, err = app.Append(0, label, 0, i*blockRange+1000, 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+
+ err := app.Commit()
+ require.NoError(t, err)
+
+ err = db.Compact(ctx)
+ require.NoError(t, err)
+
+ var builder labels.ScratchBuilder
+
+ for _, block := range db.Blocks() {
+ r, err := block.Index()
+ require.NoError(t, err)
+ defer r.Close()
+
+ meta := block.Meta()
+
+ k, v := index.AllPostingsKey()
+ p, err := r.Postings(ctx, k, v)
+ require.NoError(t, err)
+
+ var chks []chunks.Meta
+
+ chunkCount := 0
+
+ for p.Next() {
+ err = r.Series(p.At(), &builder, &chks)
+ require.NoError(t, err)
+ for _, c := range chks {
+ require.True(t, meta.MinTime <= c.MinTime && c.MaxTime <= meta.MaxTime,
+ "chunk spans beyond block boundaries: [block.MinTime=%d, block.MaxTime=%d]; [chunk.MinTime=%d, chunk.MaxTime=%d]",
+ meta.MinTime, meta.MaxTime, c.MinTime, c.MaxTime)
+ chunkCount++
+ }
+ }
+ require.Equal(t, 1, chunkCount, "expected 1 chunk in block %s, got %d", meta.ULID, chunkCount)
+ }
+}
+
+func TestQuerierWithBoundaryChunks_AppendV2(t *testing.T) {
+ t.Parallel()
+ db := newTestDB(t)
+
+ ctx := context.Background()
+ app := db.AppenderV2(ctx)
+
+ blockRange := db.compactor.(*LeveledCompactor).ranges[0]
+ label := labels.FromStrings("foo", "bar")
+
+ for i := range int64(5) {
+ _, err := app.Append(0, label, 0, i*blockRange, 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ _, err = app.Append(0, labels.FromStrings("blockID", strconv.FormatInt(i, 10)), 0, i*blockRange, 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+
+ err := app.Commit()
+ require.NoError(t, err)
+
+ err = db.Compact(ctx)
+ require.NoError(t, err)
+
+ require.GreaterOrEqual(t, len(db.blocks), 3, "invalid test, less than three blocks in DB")
+
+ q, err := db.Querier(blockRange, 2*blockRange)
+ require.NoError(t, err)
+ defer q.Close()
+
+ // The requested interval covers 2 blocks, so the querier's label values for blockID should give us 2 values, one from each block.
+ b, ws, err := q.LabelValues(ctx, "blockID", nil)
+ require.NoError(t, err)
+ var nilAnnotations annotations.Annotations
+ require.Equal(t, nilAnnotations, ws)
+ require.Equal(t, []string{"1", "2"}, b)
+}
+
+// TestInitializeHeadTimestamp ensures that the h.minTime is set properly.
+// - no blocks no WAL: set to the time of the first appended sample
+// - no blocks with WAL: set to the smallest sample from the WAL
+// - with blocks no WAL: set to the last block maxT
+// - with blocks with WAL: same as above
+func TestInitializeHeadTimestamp_AppendV2(t *testing.T) {
+ t.Parallel()
+ t.Run("clean", func(t *testing.T) {
+ db := newTestDB(t)
+
+ // Should be set to init values if no WAL or blocks exist so far.
+ require.Equal(t, int64(math.MaxInt64), db.head.MinTime())
+ require.Equal(t, int64(math.MinInt64), db.head.MaxTime())
+ require.False(t, db.head.initialized())
+
+ // First added sample initializes the writable range.
+ ctx := context.Background()
+ app := db.AppenderV2(ctx)
+ _, err := app.Append(0, labels.FromStrings("a", "b"), 0, 1000, 1, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+
+ require.Equal(t, int64(1000), db.head.MinTime())
+ require.Equal(t, int64(1000), db.head.MaxTime())
+ require.True(t, db.head.initialized())
+ })
+ t.Run("wal-only", func(t *testing.T) {
+ dir := t.TempDir()
+
+ require.NoError(t, os.MkdirAll(path.Join(dir, "wal"), 0o777))
+ w, err := wlog.New(nil, nil, path.Join(dir, "wal"), compression.None)
+ require.NoError(t, err)
+
+ var enc record.Encoder
+ err = w.Log(
+ enc.Series([]record.RefSeries{
+ {Ref: 123, Labels: labels.FromStrings("a", "1")},
+ {Ref: 124, Labels: labels.FromStrings("a", "2")},
+ }, nil),
+ enc.Samples([]record.RefSample{
+ {Ref: 123, T: 5000, V: 1},
+ {Ref: 124, T: 15000, V: 1},
+ }, nil),
+ )
+ require.NoError(t, err)
+ require.NoError(t, w.Close())
+
+ db := newTestDB(t, withDir(dir))
+
+ require.Equal(t, int64(5000), db.head.MinTime())
+ require.Equal(t, int64(15000), db.head.MaxTime())
+ require.True(t, db.head.initialized())
+ })
+ t.Run("existing-block", func(t *testing.T) {
+ dir := t.TempDir()
+
+ createBlock(t, dir, genSeries(1, 1, 1000, 2000))
+
+ db := newTestDB(t, withDir(dir))
+
+ require.Equal(t, int64(2000), db.head.MinTime())
+ require.Equal(t, int64(2000), db.head.MaxTime())
+ require.True(t, db.head.initialized())
+ })
+ t.Run("existing-block-and-wal", func(t *testing.T) {
+ dir := t.TempDir()
+
+ createBlock(t, dir, genSeries(1, 1, 1000, 6000))
+
+ require.NoError(t, os.MkdirAll(path.Join(dir, "wal"), 0o777))
+ w, err := wlog.New(nil, nil, path.Join(dir, "wal"), compression.None)
+ require.NoError(t, err)
+
+ var enc record.Encoder
+ err = w.Log(
+ enc.Series([]record.RefSeries{
+ {Ref: 123, Labels: labels.FromStrings("a", "1")},
+ {Ref: 124, Labels: labels.FromStrings("a", "2")},
+ }, nil),
+ enc.Samples([]record.RefSample{
+ {Ref: 123, T: 5000, V: 1},
+ {Ref: 124, T: 15000, V: 1},
+ }, nil),
+ )
+ require.NoError(t, err)
+ require.NoError(t, w.Close())
+
+ db := newTestDB(t, withDir(dir))
+
+ require.Equal(t, int64(6000), db.head.MinTime())
+ require.Equal(t, int64(15000), db.head.MaxTime())
+ require.True(t, db.head.initialized())
+ // Check that old series has been GCed.
+ require.Equal(t, 1.0, prom_testutil.ToFloat64(db.head.metrics.series))
+ })
+}
+
+func TestNoEmptyBlocks_AppendV2(t *testing.T) {
+ t.Parallel()
+ db := newTestDB(t, withRngs(100))
+ ctx := context.Background()
+
+ db.DisableCompactions()
+
+ rangeToTriggerCompaction := db.compactor.(*LeveledCompactor).ranges[0]/2*3 - 1
+ defaultLabel := labels.FromStrings("foo", "bar")
+ defaultMatcher := labels.MustNewMatcher(labels.MatchRegexp, "", ".*")
+
+ t.Run("Test no blocks after compact with empty head.", func(t *testing.T) {
+ require.NoError(t, db.Compact(ctx))
+ actBlocks, err := blockDirs(db.Dir())
+ require.NoError(t, err)
+ require.Len(t, actBlocks, len(db.Blocks()))
+ require.Empty(t, actBlocks)
+ require.Equal(t, 0, int(prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran)), "no compaction should be triggered here")
+ })
+
+ t.Run("Test no blocks after deleting all samples from head.", func(t *testing.T) {
+ app := db.AppenderV2(ctx)
+ _, err := app.Append(0, defaultLabel, 0, 1, 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ _, err = app.Append(0, defaultLabel, 0, 2, 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ _, err = app.Append(0, defaultLabel, 0, 3+rangeToTriggerCompaction, 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ require.NoError(t, db.Delete(ctx, math.MinInt64, math.MaxInt64, defaultMatcher))
+ require.NoError(t, db.Compact(ctx))
+ require.Equal(t, 1, int(prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran)), "compaction should have been triggered here")
+
+ actBlocks, err := blockDirs(db.Dir())
+ require.NoError(t, err)
+ require.Len(t, actBlocks, len(db.Blocks()))
+ require.Empty(t, actBlocks)
+
+ app = db.AppenderV2(ctx)
+ _, err = app.Append(0, defaultLabel, 0, 1, 0, nil, nil, storage.AOptions{})
+ require.Equal(t, storage.ErrOutOfBounds, err, "the head should be truncated so no samples in the past should be allowed")
+
+ // Adding new blocks.
+ currentTime := db.Head().MaxTime()
+ _, err = app.Append(0, defaultLabel, 0, currentTime, 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ _, err = app.Append(0, defaultLabel, 0, currentTime+1, 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ _, err = app.Append(0, defaultLabel, 0, currentTime+rangeToTriggerCompaction, 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+
+ require.NoError(t, db.Compact(ctx))
+ require.Equal(t, 2, int(prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran)), "compaction should have been triggered here")
+ actBlocks, err = blockDirs(db.Dir())
+ require.NoError(t, err)
+ require.Len(t, actBlocks, len(db.Blocks()))
+ require.Len(t, actBlocks, 1, "No blocks created when compacting with >0 samples")
+ })
+
+ t.Run(`When no new block is created from head, and there are some blocks on disk
+ compaction should not run into infinite loop (was seen during development).`, func(t *testing.T) {
+ oldBlocks := db.Blocks()
+ app := db.AppenderV2(ctx)
+ currentTime := db.Head().MaxTime()
+ _, err := app.Append(0, defaultLabel, 0, currentTime, 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ _, err = app.Append(0, defaultLabel, 0, currentTime+1, 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ _, err = app.Append(0, defaultLabel, 0, currentTime+rangeToTriggerCompaction, 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ require.NoError(t, db.head.Delete(ctx, math.MinInt64, math.MaxInt64, defaultMatcher))
+ require.NoError(t, db.Compact(ctx))
+ require.Equal(t, 3, int(prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran)), "compaction should have been triggered here")
+ require.Equal(t, oldBlocks, db.Blocks())
+ })
+
+ t.Run("Test no blocks remaining after deleting all samples from disk.", func(t *testing.T) {
+ currentTime := db.Head().MaxTime()
+ blocks := []*BlockMeta{
+ {MinTime: currentTime, MaxTime: currentTime + db.compactor.(*LeveledCompactor).ranges[0]},
+ {MinTime: currentTime + 100, MaxTime: currentTime + 100 + db.compactor.(*LeveledCompactor).ranges[0]},
+ }
+ for _, m := range blocks {
+ createBlock(t, db.Dir(), genSeries(2, 2, m.MinTime, m.MaxTime))
+ }
+
+ oldBlocks := db.Blocks()
+ require.NoError(t, db.reloadBlocks()) // Reload the db to register the new blocks.
+ require.Len(t, db.Blocks(), len(blocks)+len(oldBlocks)) // Ensure all blocks are registered.
+ require.NoError(t, db.Delete(ctx, math.MinInt64, math.MaxInt64, defaultMatcher))
+ require.NoError(t, db.Compact(ctx))
+ require.Equal(t, 5, int(prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran)), "compaction should have been triggered here once for each block that have tombstones")
+
+ actBlocks, err := blockDirs(db.Dir())
+ require.NoError(t, err)
+ require.Len(t, actBlocks, len(db.Blocks()))
+ require.Len(t, actBlocks, 1, "All samples are deleted. Only the most recent block should remain after compaction.")
+ })
+}
+
+func TestDB_LabelNames_AppendV2(t *testing.T) {
+ ctx := context.Background()
+ tests := []struct {
+ // Add 'sampleLabels1' -> Test Head -> Compact -> Test Disk ->
+ // -> Add 'sampleLabels2' -> Test Head+Disk
+
+ sampleLabels1 [][2]string // For checking head and disk separately.
+ // To test Head+Disk, sampleLabels2 should have
+ // at least 1 unique label name which is not in sampleLabels1.
+ sampleLabels2 [][2]string // For checking head and disk together.
+ exp1 []string // after adding sampleLabels1.
+ exp2 []string // after adding sampleLabels1 and sampleLabels2.
+ }{
+ {
+ sampleLabels1: [][2]string{
+ {"name1", "1"},
+ {"name3", "3"},
+ {"name2", "2"},
+ },
+ sampleLabels2: [][2]string{
+ {"name4", "4"},
+ {"name1", "1"},
+ },
+ exp1: []string{"name1", "name2", "name3"},
+ exp2: []string{"name1", "name2", "name3", "name4"},
+ },
+ {
+ sampleLabels1: [][2]string{
+ {"name2", "2"},
+ {"name1", "1"},
+ {"name2", "2"},
+ },
+ sampleLabels2: [][2]string{
+ {"name6", "6"},
+ {"name0", "0"},
+ },
+ exp1: []string{"name1", "name2"},
+ exp2: []string{"name0", "name1", "name2", "name6"},
+ },
+ }
+
+ blockRange := int64(1000)
+ // Appends samples into the database.
+ appendSamples := func(db *DB, mint, maxt int64, sampleLabels [][2]string) {
+ t.Helper()
+ app := db.AppenderV2(ctx)
+ for i := mint; i <= maxt; i++ {
+ for _, tuple := range sampleLabels {
+ label := labels.FromStrings(tuple[0], tuple[1])
+ _, err := app.Append(0, label, 0, i*blockRange, 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+ }
+ err := app.Commit()
+ require.NoError(t, err)
+ }
+ for _, tst := range tests {
+ t.Run("", func(t *testing.T) {
+ ctx := context.Background()
+ db := newTestDB(t)
+
+ appendSamples(db, 0, 4, tst.sampleLabels1)
+
+ // Testing head.
+ headIndexr, err := db.head.Index()
+ require.NoError(t, err)
+ labelNames, err := headIndexr.LabelNames(ctx)
+ require.NoError(t, err)
+ require.Equal(t, tst.exp1, labelNames)
+ require.NoError(t, headIndexr.Close())
+
+ // Testing disk.
+ err = db.Compact(ctx)
+ require.NoError(t, err)
+ // All blocks have same label names, hence check them individually.
+ // No need to aggregate and check.
+ for _, b := range db.Blocks() {
+ blockIndexr, err := b.Index()
+ require.NoError(t, err)
+ labelNames, err = blockIndexr.LabelNames(ctx)
+ require.NoError(t, err)
+ require.Equal(t, tst.exp1, labelNames)
+ require.NoError(t, blockIndexr.Close())
+ }
+
+ // Adding more samples to head with new label names
+ // so that we can test (head+disk).LabelNames(ctx) (the union).
+ appendSamples(db, 5, 9, tst.sampleLabels2)
+
+ // Testing DB (union).
+ q, err := db.Querier(math.MinInt64, math.MaxInt64)
+ require.NoError(t, err)
+ var ws annotations.Annotations
+ labelNames, ws, err = q.LabelNames(ctx, nil)
+ require.NoError(t, err)
+ require.Empty(t, ws)
+ require.NoError(t, q.Close())
+ require.Equal(t, tst.exp2, labelNames)
+ })
+ }
+}
+
+func TestCorrectNumTombstones_AppendV2(t *testing.T) {
+ t.Parallel()
+ db := newTestDB(t)
+
+ blockRange := db.compactor.(*LeveledCompactor).ranges[0]
+ name, value := "foo", "bar"
+ defaultLabel := labels.FromStrings(name, value)
+ defaultMatcher := labels.MustNewMatcher(labels.MatchEqual, name, value)
+
+ ctx := context.Background()
+ app := db.AppenderV2(ctx)
+ for i := range int64(3) {
+ for j := range int64(15) {
+ _, err := app.Append(0, defaultLabel, 0, i*blockRange+j, 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+ }
+ require.NoError(t, app.Commit())
+
+ err := db.Compact(ctx)
+ require.NoError(t, err)
+ require.Len(t, db.blocks, 1)
+
+ require.NoError(t, db.Delete(ctx, 0, 1, defaultMatcher))
+ require.Equal(t, uint64(1), db.blocks[0].meta.Stats.NumTombstones)
+
+ // {0, 1} and {2, 3} are merged to form 1 tombstone.
+ require.NoError(t, db.Delete(ctx, 2, 3, defaultMatcher))
+ require.Equal(t, uint64(1), db.blocks[0].meta.Stats.NumTombstones)
+
+ require.NoError(t, db.Delete(ctx, 5, 6, defaultMatcher))
+ require.Equal(t, uint64(2), db.blocks[0].meta.Stats.NumTombstones)
+
+ require.NoError(t, db.Delete(ctx, 9, 11, defaultMatcher))
+ require.Equal(t, uint64(3), db.blocks[0].meta.Stats.NumTombstones)
+}
+
+// TestBlockRanges checks the following use cases:
+// - No samples can be added with timestamps lower than the last block maxt.
+// - The compactor doesn't create overlapping blocks
+//
+// even when the last blocks is not within the default boundaries.
+// - Lower boundary is based on the smallest sample in the head and
+//
+// upper boundary is rounded to the configured block range.
+//
+// This ensures that a snapshot that includes the head and creates a block with a custom time range
+// will not overlap with the first block created by the next compaction.
+func TestBlockRanges_AppendV2(t *testing.T) {
+ t.Parallel()
+ logger := promslog.New(&promslog.Config{})
+ ctx := context.Background()
+
+ dir := t.TempDir()
+
+ // Test that the compactor doesn't create overlapping blocks
+ // when a non standard block already exists.
+ firstBlockMaxT := int64(3)
+ createBlock(t, dir, genSeries(1, 1, 0, firstBlockMaxT))
+ db, err := open(dir, logger, nil, DefaultOptions(), []int64{10000}, nil)
+ require.NoError(t, err)
+
+ rangeToTriggerCompaction := db.compactor.(*LeveledCompactor).ranges[0]/2*3 + 1
+
+ app := db.AppenderV2(ctx)
+ lbl := labels.FromStrings("a", "b")
+ _, err = app.Append(0, lbl, 0, firstBlockMaxT-1, rand.Float64(), nil, nil, storage.AOptions{})
+ require.Error(t, err, "appending a sample with a timestamp covered by a previous block shouldn't be possible")
+ _, err = app.Append(0, lbl, 0, firstBlockMaxT+1, rand.Float64(), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ _, err = app.Append(0, lbl, 0, firstBlockMaxT+2, rand.Float64(), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ secondBlockMaxt := firstBlockMaxT + rangeToTriggerCompaction
+ _, err = app.Append(0, lbl, 0, secondBlockMaxt, rand.Float64(), nil, nil, storage.AOptions{}) // Add samples to trigger a new compaction
+
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ for range 100 {
+ if len(db.Blocks()) == 2 {
+ break
+ }
+ time.Sleep(100 * time.Millisecond)
+ }
+ require.Len(t, db.Blocks(), 2, "no new block created after the set timeout")
+
+ require.LessOrEqual(t, db.Blocks()[1].Meta().MinTime, db.Blocks()[0].Meta().MaxTime,
+ "new block overlaps old:%v,new:%v", db.Blocks()[0].Meta(), db.Blocks()[1].Meta())
+
+ // Test that wal records are skipped when an existing block covers the same time ranges
+ // and compaction doesn't create an overlapping block.
+ app = db.AppenderV2(ctx)
+ db.DisableCompactions()
+ _, err = app.Append(0, lbl, 0, secondBlockMaxt+1, rand.Float64(), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ _, err = app.Append(0, lbl, 0, secondBlockMaxt+2, rand.Float64(), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ _, err = app.Append(0, lbl, 0, secondBlockMaxt+3, rand.Float64(), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ _, err = app.Append(0, lbl, 0, secondBlockMaxt+4, rand.Float64(), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ require.NoError(t, db.Close())
+
+ thirdBlockMaxt := secondBlockMaxt + 2
+ createBlock(t, dir, genSeries(1, 1, secondBlockMaxt+1, thirdBlockMaxt))
+
+ db, err = open(dir, logger, nil, DefaultOptions(), []int64{10000}, nil)
+ require.NoError(t, err)
+
+ defer db.Close()
+ require.Len(t, db.Blocks(), 3, "db doesn't include expected number of blocks")
+ require.Equal(t, db.Blocks()[2].Meta().MaxTime, thirdBlockMaxt, "unexpected maxt of the last block")
+
+ app = db.AppenderV2(ctx)
+ _, err = app.Append(0, lbl, 0, thirdBlockMaxt+rangeToTriggerCompaction, rand.Float64(), nil, nil, storage.AOptions{}) // Trigger a compaction
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ for range 100 {
+ if len(db.Blocks()) == 4 {
+ break
+ }
+ time.Sleep(100 * time.Millisecond)
+ }
+
+ require.Len(t, db.Blocks(), 4, "no new block created after the set timeout")
+
+ require.LessOrEqual(t, db.Blocks()[3].Meta().MinTime, db.Blocks()[2].Meta().MaxTime,
+ "new block overlaps old:%v,new:%v", db.Blocks()[2].Meta(), db.Blocks()[3].Meta())
+}
+
+// TestDBReadOnly ensures that opening a DB in readonly mode doesn't modify any files on the disk.
+// It also checks that the API calls return equivalent results as a normal db.Open() mode.
+func TestDBReadOnly_AppendV2(t *testing.T) {
+ t.Parallel()
+ var (
+ dbDir = t.TempDir()
+ logger = promslog.New(&promslog.Config{})
+ expBlocks []*Block
+ expBlock *Block
+ expSeries map[string][]chunks.Sample
+ expChunks map[string][][]chunks.Sample
+ expDBHash []byte
+ matchAll = labels.MustNewMatcher(labels.MatchEqual, "", "")
+ err error
+ )
+
+ // Bootstrap the db.
+ {
+ dbBlocks := []*BlockMeta{
+ // Create three 2-sample blocks.
+ {MinTime: 10, MaxTime: 12},
+ {MinTime: 12, MaxTime: 14},
+ {MinTime: 14, MaxTime: 16},
+ }
+
+ for _, m := range dbBlocks {
+ _ = createBlock(t, dbDir, genSeries(1, 1, m.MinTime, m.MaxTime))
+ }
+
+ // Add head to test DBReadOnly WAL reading capabilities.
+ w, err := wlog.New(logger, nil, filepath.Join(dbDir, "wal"), compression.Snappy)
+ require.NoError(t, err)
+ h := createHead(t, w, genSeries(1, 1, 16, 18), dbDir)
+ require.NoError(t, h.Close())
+ }
+
+ // Open a normal db to use for a comparison.
+ {
+ dbWritable := newTestDB(t, withDir(dbDir))
+ dbWritable.DisableCompactions()
+
+ dbSizeBeforeAppend, err := fileutil.DirSize(dbWritable.Dir())
+ require.NoError(t, err)
+ app := dbWritable.AppenderV2(context.Background())
+ _, err = app.Append(0, labels.FromStrings("foo", "bar"), 0, dbWritable.Head().MaxTime()+1, 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+
+ expBlocks = dbWritable.Blocks()
+ expBlock = expBlocks[0]
+ expDbSize, err := fileutil.DirSize(dbWritable.Dir())
+ require.NoError(t, err)
+ require.Greater(t, expDbSize, dbSizeBeforeAppend, "db size didn't increase after an append")
+
+ q, err := dbWritable.Querier(math.MinInt64, math.MaxInt64)
+ require.NoError(t, err)
+ expSeries = query(t, q, matchAll)
+ cq, err := dbWritable.ChunkQuerier(math.MinInt64, math.MaxInt64)
+ require.NoError(t, err)
+ expChunks = queryAndExpandChunks(t, cq, matchAll)
+
+ require.NoError(t, dbWritable.Close()) // Close here to allow getting the dir hash for windows.
+ expDBHash = testutil.DirHash(t, dbWritable.Dir())
+ }
+
+ // Open a read only db and ensure that the API returns the same result as the normal DB.
+ dbReadOnly, err := OpenDBReadOnly(dbDir, "", logger)
+ require.NoError(t, err)
+ defer func() { require.NoError(t, dbReadOnly.Close()) }()
+
+ t.Run("blocks", func(t *testing.T) {
+ blocks, err := dbReadOnly.Blocks()
+ require.NoError(t, err)
+ require.Len(t, blocks, len(expBlocks))
+ for i, expBlock := range expBlocks {
+ require.Equal(t, expBlock.Meta(), blocks[i].Meta(), "block meta mismatch")
+ }
+ })
+ t.Run("block", func(t *testing.T) {
+ blockID := expBlock.meta.ULID.String()
+ block, err := dbReadOnly.Block(blockID, nil)
+ require.NoError(t, err)
+ require.Equal(t, expBlock.Meta(), block.Meta(), "block meta mismatch")
+ })
+ t.Run("invalid block ID", func(t *testing.T) {
+ blockID := "01GTDVZZF52NSWB5SXQF0P2PGF"
+ _, err := dbReadOnly.Block(blockID, nil)
+ require.Error(t, err)
+ })
+ t.Run("last block ID", func(t *testing.T) {
+ blockID, err := dbReadOnly.LastBlockID()
+ require.NoError(t, err)
+ require.Equal(t, expBlocks[2].Meta().ULID.String(), blockID)
+ })
+ t.Run("querier", func(t *testing.T) {
+ // Open a read only db and ensure that the API returns the same result as the normal DB.
+ q, err := dbReadOnly.Querier(math.MinInt64, math.MaxInt64)
+ require.NoError(t, err)
+ readOnlySeries := query(t, q, matchAll)
+ readOnlyDBHash := testutil.DirHash(t, dbDir)
+
+ require.Len(t, readOnlySeries, len(expSeries), "total series mismatch")
+ require.Equal(t, expSeries, readOnlySeries, "series mismatch")
+ require.Equal(t, expDBHash, readOnlyDBHash, "after all read operations the db hash should remain the same")
+ })
+ t.Run("chunk querier", func(t *testing.T) {
+ cq, err := dbReadOnly.ChunkQuerier(math.MinInt64, math.MaxInt64)
+ require.NoError(t, err)
+ readOnlySeries := queryAndExpandChunks(t, cq, matchAll)
+ readOnlyDBHash := testutil.DirHash(t, dbDir)
+
+ require.Len(t, readOnlySeries, len(expChunks), "total series mismatch")
+ require.Equal(t, expChunks, readOnlySeries, "series chunks mismatch")
+ require.Equal(t, expDBHash, readOnlyDBHash, "after all read operations the db hash should remain the same")
+ })
+}
+
+func TestDBReadOnly_FlushWAL_AppendV2(t *testing.T) {
+ t.Parallel()
+ var (
+ dbDir = t.TempDir()
+ logger = promslog.New(&promslog.Config{})
+ err error
+ maxt int
+ ctx = context.Background()
+ )
+
+ // Bootstrap the db.
+ {
+ // Append data to the WAL.
+ db := newTestDB(t, withDir(dbDir))
+ db.DisableCompactions()
+ app := db.AppenderV2(ctx)
+ maxt = 1000
+ for i := 0; i < maxt; i++ {
+ _, err := app.Append(0, labels.FromStrings(defaultLabelName, "flush"), 0, int64(i), 1.0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+ require.NoError(t, db.Close())
+ }
+
+ // Flush WAL.
+ db, err := OpenDBReadOnly(dbDir, "", logger)
+ require.NoError(t, err)
+
+ flush := t.TempDir()
+ require.NoError(t, db.FlushWAL(flush))
+ require.NoError(t, db.Close())
+
+ // Reopen the DB from the flushed WAL block.
+ db, err = OpenDBReadOnly(flush, "", logger)
+ require.NoError(t, err)
+ defer func() { require.NoError(t, db.Close()) }()
+ blocks, err := db.Blocks()
+ require.NoError(t, err)
+ require.Len(t, blocks, 1)
+
+ querier, err := db.Querier(0, int64(maxt)-1)
+ require.NoError(t, err)
+ defer func() { require.NoError(t, querier.Close()) }()
+
+ // Sum the values.
+ seriesSet := querier.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, defaultLabelName, "flush"))
+ var series chunkenc.Iterator
+
+ sum := 0.0
+ for seriesSet.Next() {
+ series = seriesSet.At().Iterator(series)
+ for series.Next() == chunkenc.ValFloat {
+ _, v := series.At()
+ sum += v
+ }
+ require.NoError(t, series.Err())
+ }
+ require.NoError(t, seriesSet.Err())
+ require.Empty(t, seriesSet.Warnings())
+ require.Equal(t, 1000.0, sum)
+}
+
+func TestDBReadOnly_Querier_NoAlteration_AppendV2(t *testing.T) {
+ countChunks := func(dir string) int {
+ files, err := os.ReadDir(mmappedChunksDir(dir))
+ require.NoError(t, err)
+ return len(files)
+ }
+
+ dirHash := func(dir string) (hash []byte) {
+ // Windows requires the DB to be closed: "xxx\lock: The process cannot access the file because it is being used by another process."
+ // But closing the DB alters the directory in this case (it'll cut a new chunk).
+ if runtime.GOOS != "windows" {
+ hash = testutil.DirHash(t, dir)
+ }
+ return hash
+ }
+
+ spinUpQuerierAndCheck := func(dir, sandboxDir string, chunksCount int) {
+ dBDirHash := dirHash(dir)
+ // Bootstrap a RO db from the same dir and set up a querier.
+ dbReadOnly, err := OpenDBReadOnly(dir, sandboxDir, nil)
+ require.NoError(t, err)
+ require.Equal(t, chunksCount, countChunks(dir))
+ q, err := dbReadOnly.Querier(math.MinInt, math.MaxInt)
+ require.NoError(t, err)
+ require.NoError(t, q.Close())
+ require.NoError(t, dbReadOnly.Close())
+ // The RO Head doesn't alter RW db chunks_head/.
+ require.Equal(t, chunksCount, countChunks(dir))
+ require.Equal(t, dirHash(dir), dBDirHash)
+ }
+
+ t.Run("doesn't cut chunks while replaying WAL", func(t *testing.T) {
+ db := newTestDB(t)
+
+ // Append until the first mmapped head chunk.
+ for i := range 121 {
+ app := db.AppenderV2(context.Background())
+ _, err := app.Append(0, labels.FromStrings("foo", "bar"), 0, int64(i), 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ }
+
+ spinUpQuerierAndCheck(db.Dir(), t.TempDir(), 0)
+
+ // The RW Head should have no problem cutting its own chunk,
+ // this also proves that a chunk needed to be cut.
+ require.NotPanics(t, func() { db.ForceHeadMMap() })
+ require.Equal(t, 1, countChunks(db.Dir()))
+ })
+
+ t.Run("doesn't truncate corrupted chunks", func(t *testing.T) {
+ db := newTestDB(t)
+ require.NoError(t, db.Close())
+
+ // Simulate a corrupted chunk: without a header.
+ chunk, err := os.Create(path.Join(mmappedChunksDir(db.Dir()), "000001"))
+ require.NoError(t, err)
+ require.NoError(t, chunk.Close())
+
+ spinUpQuerierAndCheck(db.Dir(), t.TempDir(), 1)
+
+ // The RW Head should have no problem truncating its corrupted file:
+ // this proves that the chunk needed to be truncated.
+ db = newTestDB(t, withDir(db.Dir()))
+
+ require.NoError(t, err)
+ require.Equal(t, 0, countChunks(db.Dir()))
+ })
+}
+
+func TestDBCannotSeePartialCommits_AppendV2(t *testing.T) {
+ if defaultIsolationDisabled {
+ t.Skip("skipping test since tsdb isolation is disabled")
+ }
+
+ db := newTestDB(t)
+
+ stop := make(chan struct{})
+ firstInsert := make(chan struct{})
+ ctx := context.Background()
+
+ // Insert data in batches.
+ go func() {
+ iter := 0
+ for {
+ app := db.AppenderV2(ctx)
+
+ for j := range 100 {
+ _, err := app.Append(0, labels.FromStrings("foo", "bar", "a", strconv.Itoa(j)), 0, int64(iter), float64(iter), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+
+ if iter == 0 {
+ close(firstInsert)
+ }
+ iter++
+
+ select {
+ case <-stop:
+ return
+ default:
+ }
+ }
+ }()
+
+ <-firstInsert
+
+ // This is a race condition, so do a few tests to tickle it.
+ // Usually most will fail.
+ inconsistencies := 0
+ for range 10 {
+ func() {
+ querier, err := db.Querier(0, 1000000)
+ require.NoError(t, err)
+ defer querier.Close()
+
+ ss := querier.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
+ _, seriesSet, ws, err := expandSeriesSet(ss)
+ require.NoError(t, err)
+ require.Empty(t, ws)
+
+ values := map[float64]struct{}{}
+ for _, series := range seriesSet {
+ values[series[len(series)-1].f] = struct{}{}
+ }
+ if len(values) != 1 {
+ inconsistencies++
+ }
+ }()
+ }
+ stop <- struct{}{}
+
+ require.Equal(t, 0, inconsistencies, "Some queries saw inconsistent results.")
+}
+
+func TestDBQueryDoesntSeeAppendsAfterCreation_AppendV2(t *testing.T) {
+ if defaultIsolationDisabled {
+ t.Skip("skipping test since tsdb isolation is disabled")
+ }
+
+ db := newTestDB(t)
+ querierBeforeAdd, err := db.Querier(0, 1000000)
+ require.NoError(t, err)
+ defer querierBeforeAdd.Close()
+
+ ctx := context.Background()
+ app := db.AppenderV2(ctx)
+ _, err = app.Append(0, labels.FromStrings("foo", "bar"), 0, 0, 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+
+ querierAfterAddButBeforeCommit, err := db.Querier(0, 1000000)
+ require.NoError(t, err)
+ defer querierAfterAddButBeforeCommit.Close()
+
+ // None of the queriers should return anything after the Add but before the commit.
+ ss := querierBeforeAdd.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
+ _, seriesSet, ws, err := expandSeriesSet(ss)
+ require.NoError(t, err)
+ require.Empty(t, ws)
+ require.Equal(t, map[string][]sample{}, seriesSet)
+
+ ss = querierAfterAddButBeforeCommit.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
+ _, seriesSet, ws, err = expandSeriesSet(ss)
+ require.NoError(t, err)
+ require.Empty(t, ws)
+ require.Equal(t, map[string][]sample{}, seriesSet)
+
+ // This commit is after the queriers are created, so should not be returned.
+ err = app.Commit()
+ require.NoError(t, err)
+
+ // Nothing returned for querier created before the Add.
+ ss = querierBeforeAdd.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
+ _, seriesSet, ws, err = expandSeriesSet(ss)
+ require.NoError(t, err)
+ require.Empty(t, ws)
+ require.Equal(t, map[string][]sample{}, seriesSet)
+
+ // Series exists but has no samples for querier created after Add.
+ ss = querierAfterAddButBeforeCommit.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
+ _, seriesSet, ws, err = expandSeriesSet(ss)
+ require.NoError(t, err)
+ require.Empty(t, ws)
+ require.Equal(t, map[string][]sample{`{foo="bar"}`: {}}, seriesSet)
+
+ querierAfterCommit, err := db.Querier(0, 1000000)
+ require.NoError(t, err)
+ defer querierAfterCommit.Close()
+
+ // Samples are returned for querier created after Commit.
+ ss = querierAfterCommit.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
+ _, seriesSet, ws, err = expandSeriesSet(ss)
+ require.NoError(t, err)
+ require.Empty(t, ws)
+ require.Equal(t, map[string][]sample{`{foo="bar"}`: {{t: 0, f: 0}}}, seriesSet)
+}
+
+// TestCompactHead ensures that the head compaction
+// creates a block that is ready for loading and
+// does not cause data loss.
+// This test:
+// * opens a storage;
+// * appends values;
+// * compacts the head; and
+// * queries the db to ensure the samples are present from the compacted head.
+func TestCompactHead_AppendV2(t *testing.T) {
+ t.Parallel()
+
+ // Open a DB and append data to the WAL.
+ opts := &Options{
+ RetentionDuration: int64(time.Hour * 24 * 15 / time.Millisecond),
+ NoLockfile: true,
+ MinBlockDuration: int64(time.Hour * 2 / time.Millisecond),
+ MaxBlockDuration: int64(time.Hour * 2 / time.Millisecond),
+ WALCompression: compression.Snappy,
+ }
+ db := newTestDB(t, withOpts(opts))
+ ctx := context.Background()
+ app := db.AppenderV2(ctx)
+ var expSamples []sample
+ maxt := 100
+ for i := range maxt {
+ val := rand.Float64()
+ _, err := app.Append(0, labels.FromStrings("a", "b"), 0, int64(i), val, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ expSamples = append(expSamples, sample{int64(i), val, nil, nil})
+ }
+ require.NoError(t, app.Commit())
+
+ // Compact the Head to create a new block.
+ require.NoError(t, db.CompactHead(NewRangeHead(db.Head(), 0, int64(maxt)-1)))
+ require.NoError(t, db.Close())
+
+ // Delete everything but the new block and
+ // reopen the db to query it to ensure it includes the head data.
+ require.NoError(t, deleteNonBlocks(db.Dir()))
+ db = newTestDB(t, withDir(db.Dir()), withOpts(opts))
+ require.Len(t, db.Blocks(), 1)
+ require.Equal(t, int64(maxt), db.Head().MinTime())
+ defer func() { require.NoError(t, db.Close()) }()
+ querier, err := db.Querier(0, int64(maxt)-1)
+ require.NoError(t, err)
+ defer func() { require.NoError(t, querier.Close()) }()
+
+ seriesSet := querier.Select(ctx, false, nil, &labels.Matcher{Type: labels.MatchEqual, Name: "a", Value: "b"})
+ var series chunkenc.Iterator
+ var actSamples []sample
+
+ for seriesSet.Next() {
+ series = seriesSet.At().Iterator(series)
+ for series.Next() == chunkenc.ValFloat {
+ time, val := series.At()
+ actSamples = append(actSamples, sample{time, val, nil, nil})
+ }
+ require.NoError(t, series.Err())
+ }
+ require.Equal(t, expSamples, actSamples)
+ require.NoError(t, seriesSet.Err())
+}
+
+// TestCompactHeadWithDeletion tests https://github.com/prometheus/prometheus/issues/11585.
+func TestCompactHeadWithDeletion_AppendV2(t *testing.T) {
+ db := newTestDB(t)
+
+ ctx := context.Background()
+
+ app := db.AppenderV2(ctx)
+ _, err := app.Append(0, labels.FromStrings("a", "b"), 0, 10, rand.Float64(), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+
+ err = db.Delete(ctx, 0, 100, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
+ require.NoError(t, err)
+
+ // This recreates the bug.
+ require.NoError(t, db.CompactHead(NewRangeHead(db.Head(), 0, 100)))
+ require.NoError(t, db.Close())
+}
+
+func TestOneCheckpointPerCompactCall_AppendV2(t *testing.T) {
+ t.Parallel()
+ blockRange := int64(1000)
+ opts := &Options{
+ RetentionDuration: blockRange * 1000,
+ NoLockfile: true,
+ MinBlockDuration: blockRange,
+ MaxBlockDuration: blockRange,
+ }
+
+ ctx := context.Background()
+
+ db := newTestDB(t, withOpts(opts))
+ db.DisableCompactions()
+
+ // Case 1: Lot's of uncompacted data in Head.
+
+ lbls := labels.FromStrings("foo_d", "choco_bar")
+ // Append samples spanning 59 block ranges.
+ app := db.AppenderV2(context.Background())
+ for i := range int64(60) {
+ _, err := app.Append(0, lbls, 0, blockRange*i, rand.Float64(), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ _, err = app.Append(0, lbls, 0, (blockRange*i)+blockRange/2, rand.Float64(), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ // Rotate the WAL file so that there is >3 files for checkpoint to happen.
+ _, err = db.head.wal.NextSegment()
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+
+ // Check the existing WAL files.
+ first, last, err := wlog.Segments(db.head.wal.Dir())
+ require.NoError(t, err)
+ require.Equal(t, 0, first)
+ require.Equal(t, 60, last)
+
+ require.Equal(t, 0.0, prom_testutil.ToFloat64(db.head.metrics.checkpointCreationTotal))
+ require.NoError(t, db.Compact(ctx))
+ require.Equal(t, 1.0, prom_testutil.ToFloat64(db.head.metrics.checkpointCreationTotal))
+
+ // As the data spans for 59 blocks, 58 go to disk and 1 remains in Head.
+ require.Len(t, db.Blocks(), 58)
+ // Though WAL was truncated only once, head should be truncated after each compaction.
+ require.Equal(t, 58.0, prom_testutil.ToFloat64(db.head.metrics.headTruncateTotal))
+
+ // The compaction should have only truncated first 2/3 of WAL (while also rotating the files).
+ first, last, err = wlog.Segments(db.head.wal.Dir())
+ require.NoError(t, err)
+ require.Equal(t, 40, first)
+ require.Equal(t, 61, last)
+
+ // The first checkpoint would be for first 2/3rd of WAL, hence till 39.
+ // That should be the last checkpoint.
+ _, cno, err := wlog.LastCheckpoint(db.head.wal.Dir())
+ require.NoError(t, err)
+ require.Equal(t, 39, cno)
+
+ // Case 2: Old blocks on disk.
+ // The above blocks will act as old blocks.
+
+ // Creating a block to cover the data in the Head so that
+ // Head will skip the data during replay and start fresh.
+ blocks := db.Blocks()
+ newBlockMint := blocks[len(blocks)-1].Meta().MaxTime
+ newBlockMaxt := db.Head().MaxTime() + 1
+ require.NoError(t, db.Close())
+
+ createBlock(t, db.Dir(), genSeries(1, 1, newBlockMint, newBlockMaxt))
+
+ db = newTestDB(t, withDir(db.Dir()), withOpts(opts))
+ db.DisableCompactions()
+
+ // 1 block more.
+ require.Len(t, db.Blocks(), 59)
+ // No series in Head because of this new block.
+ require.Equal(t, 0, int(db.head.NumSeries()))
+
+ // Adding sample way into the future.
+ app = db.AppenderV2(context.Background())
+ _, err = app.Append(0, lbls, 0, blockRange*120, rand.Float64(), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+
+ // The mint of head is the last block maxt, that means the gap between mint and maxt
+ // of Head is too large. This will trigger many compactions.
+ require.Equal(t, newBlockMaxt, db.head.MinTime())
+
+ // Another WAL file was rotated.
+ first, last, err = wlog.Segments(db.head.wal.Dir())
+ require.NoError(t, err)
+ require.Equal(t, 40, first)
+ require.Equal(t, 62, last)
+
+ require.Equal(t, 0.0, prom_testutil.ToFloat64(db.head.metrics.checkpointCreationTotal))
+ require.NoError(t, db.Compact(ctx))
+ require.Equal(t, 1.0, prom_testutil.ToFloat64(db.head.metrics.checkpointCreationTotal))
+
+ // No new blocks should be created as there was not data in between the new samples and the blocks.
+ require.Len(t, db.Blocks(), 59)
+
+ // The compaction should have only truncated first 2/3 of WAL (while also rotating the files).
+ first, last, err = wlog.Segments(db.head.wal.Dir())
+ require.NoError(t, err)
+ require.Equal(t, 55, first)
+ require.Equal(t, 63, last)
+
+ // The first checkpoint would be for first 2/3rd of WAL, hence till 54.
+ // That should be the last checkpoint.
+ _, cno, err = wlog.LastCheckpoint(db.head.wal.Dir())
+ require.NoError(t, err)
+ require.Equal(t, 54, cno)
+}
+
+func TestQuerierShouldNotFailIfOOOCompactionOccursAfterRetrievingQuerier_AppendV2(t *testing.T) {
+ opts := DefaultOptions()
+ opts.OutOfOrderTimeWindow = 3 * DefaultBlockDuration
+ db := newTestDB(t, withOpts(opts))
+
+ // Disable compactions so we can control it.
+ db.DisableCompactions()
+
+ metric := labels.FromStrings(labels.MetricName, "test_metric")
+ ctx := context.Background()
+ interval := int64(15 * time.Second / time.Millisecond)
+ ts := int64(0)
+ samplesWritten := 0
+
+ // Capture the first timestamp - this will be the timestamp of the OOO sample we'll append below.
+ oooTS := ts
+ ts += interval
+
+ // Push samples after the OOO sample we'll write below.
+ for ; ts < 10*interval; ts += interval {
+ app := db.AppenderV2(ctx)
+ _, err := app.Append(0, metric, 0, ts, float64(ts), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ samplesWritten++
+ }
+
+ // Push a single OOO sample.
+ app := db.AppenderV2(ctx)
+ _, err := app.Append(0, metric, 0, oooTS, float64(ts), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ samplesWritten++
+
+ // Get a querier.
+ querierCreatedBeforeCompaction, err := db.ChunkQuerier(0, math.MaxInt64)
+ require.NoError(t, err)
+
+ // Start OOO head compaction.
+ compactionComplete := atomic.NewBool(false)
+ go func() {
+ defer compactionComplete.Store(true)
+
+ require.NoError(t, db.CompactOOOHead(ctx))
+ require.Equal(t, float64(1), prom_testutil.ToFloat64(db.Head().metrics.chunksRemoved))
+ }()
+
+ // Give CompactOOOHead time to start work.
+ // If it does not wait for querierCreatedBeforeCompaction to be closed, then the query will return incorrect results or fail.
+ time.Sleep(time.Second)
+ require.False(t, compactionComplete.Load(), "compaction completed before reading chunks or closing querier created before compaction")
+
+ // Get another querier. This one should only use the compacted blocks from disk and ignore the chunks that will be garbage collected.
+ querierCreatedAfterCompaction, err := db.ChunkQuerier(0, math.MaxInt64)
+ require.NoError(t, err)
+
+ testQuerier := func(q storage.ChunkQuerier) {
+ // Query back the series.
+ hints := &storage.SelectHints{Start: 0, End: math.MaxInt64, Step: interval}
+ seriesSet := q.Select(ctx, true, hints, labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, "test_metric"))
+
+ // Collect the iterator for the series.
+ var iterators []chunks.Iterator
+ for seriesSet.Next() {
+ iterators = append(iterators, seriesSet.At().Iterator(nil))
+ }
+ require.NoError(t, seriesSet.Err())
+ require.Len(t, iterators, 1)
+ iterator := iterators[0]
+
+ // Check that we can still successfully read all samples.
+ samplesRead := 0
+ for iterator.Next() {
+ samplesRead += iterator.At().Chunk.NumSamples()
+ }
+
+ require.NoError(t, iterator.Err())
+ require.Equal(t, samplesWritten, samplesRead)
+ }
+
+ testQuerier(querierCreatedBeforeCompaction)
+
+ require.False(t, compactionComplete.Load(), "compaction completed before closing querier created before compaction")
+ require.NoError(t, querierCreatedBeforeCompaction.Close())
+ require.Eventually(t, compactionComplete.Load, time.Second, 10*time.Millisecond, "compaction should complete after querier created before compaction was closed, and not wait for querier created after compaction")
+
+ // Use the querier created after compaction and confirm it returns the expected results (ie. from the disk block created from OOO head and in-order head) without error.
+ testQuerier(querierCreatedAfterCompaction)
+ require.NoError(t, querierCreatedAfterCompaction.Close())
+}
+
+func TestQuerierShouldNotFailIfOOOCompactionOccursAfterSelecting_AppendV2(t *testing.T) {
+ opts := DefaultOptions()
+ opts.OutOfOrderTimeWindow = 3 * DefaultBlockDuration
+ db := newTestDB(t, withOpts(opts))
+
+ // Disable compactions so we can control it.
+ db.DisableCompactions()
+
+ metric := labels.FromStrings(labels.MetricName, "test_metric")
+ ctx := context.Background()
+ interval := int64(15 * time.Second / time.Millisecond)
+ ts := int64(0)
+ samplesWritten := 0
+
+ // Capture the first timestamp - this will be the timestamp of the OOO sample we'll append below.
+ oooTS := ts
+ ts += interval
+
+ // Push samples after the OOO sample we'll write below.
+ for ; ts < 10*interval; ts += interval {
+ app := db.AppenderV2(ctx)
+ _, err := app.Append(0, metric, 0, ts, float64(ts), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ samplesWritten++
+ }
+
+ // Push a single OOO sample.
+ app := db.AppenderV2(ctx)
+ _, err := app.Append(0, metric, 0, oooTS, float64(ts), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ samplesWritten++
+
+ // Get a querier.
+ querier, err := db.ChunkQuerier(0, math.MaxInt64)
+ require.NoError(t, err)
+
+ // Query back the series.
+ hints := &storage.SelectHints{Start: 0, End: math.MaxInt64, Step: interval}
+ seriesSet := querier.Select(ctx, true, hints, labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, "test_metric"))
+
+ // Start OOO head compaction.
+ compactionComplete := atomic.NewBool(false)
+ go func() {
+ defer compactionComplete.Store(true)
+
+ require.NoError(t, db.CompactOOOHead(ctx))
+ require.Equal(t, float64(1), prom_testutil.ToFloat64(db.Head().metrics.chunksRemoved))
+ }()
+
+ // Give CompactOOOHead time to start work.
+ // If it does not wait for the querier to be closed, then the query will return incorrect results or fail.
+ time.Sleep(time.Second)
+ require.False(t, compactionComplete.Load(), "compaction completed before reading chunks or closing querier")
+
+ // Collect the iterator for the series.
+ var iterators []chunks.Iterator
+ for seriesSet.Next() {
+ iterators = append(iterators, seriesSet.At().Iterator(nil))
+ }
+ require.NoError(t, seriesSet.Err())
+ require.Len(t, iterators, 1)
+ iterator := iterators[0]
+
+ // Check that we can still successfully read all samples.
+ samplesRead := 0
+ for iterator.Next() {
+ samplesRead += iterator.At().Chunk.NumSamples()
+ }
+
+ require.NoError(t, iterator.Err())
+ require.Equal(t, samplesWritten, samplesRead)
+
+ require.False(t, compactionComplete.Load(), "compaction completed before closing querier")
+ require.NoError(t, querier.Close())
+ require.Eventually(t, compactionComplete.Load, time.Second, 10*time.Millisecond, "compaction should complete after querier was closed")
+}
+
+func TestQuerierShouldNotFailIfOOOCompactionOccursAfterRetrievingIterators_AppendV2(t *testing.T) {
+ opts := DefaultOptions()
+ opts.OutOfOrderTimeWindow = 3 * DefaultBlockDuration
+ db := newTestDB(t, withOpts(opts))
+
+ // Disable compactions so we can control it.
+ db.DisableCompactions()
+
+ metric := labels.FromStrings(labels.MetricName, "test_metric")
+ ctx := context.Background()
+ interval := int64(15 * time.Second / time.Millisecond)
+ ts := int64(0)
+ samplesWritten := 0
+
+ // Capture the first timestamp - this will be the timestamp of the OOO sample we'll append below.
+ oooTS := ts
+ ts += interval
+
+ // Push samples after the OOO sample we'll write below.
+ for ; ts < 10*interval; ts += interval {
+ app := db.AppenderV2(ctx)
+ _, err := app.Append(0, metric, 0, ts, float64(ts), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ samplesWritten++
+ }
+
+ // Push a single OOO sample.
+ app := db.AppenderV2(ctx)
+ _, err := app.Append(0, metric, 0, oooTS, float64(ts), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ samplesWritten++
+
+ // Get a querier.
+ querier, err := db.ChunkQuerier(0, math.MaxInt64)
+ require.NoError(t, err)
+
+ // Query back the series.
+ hints := &storage.SelectHints{Start: 0, End: math.MaxInt64, Step: interval}
+ seriesSet := querier.Select(ctx, true, hints, labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, "test_metric"))
+
+ // Collect the iterator for the series.
+ var iterators []chunks.Iterator
+ for seriesSet.Next() {
+ iterators = append(iterators, seriesSet.At().Iterator(nil))
+ }
+ require.NoError(t, seriesSet.Err())
+ require.Len(t, iterators, 1)
+ iterator := iterators[0]
+
+ // Start OOO head compaction.
+ compactionComplete := atomic.NewBool(false)
+ go func() {
+ defer compactionComplete.Store(true)
+
+ require.NoError(t, db.CompactOOOHead(ctx))
+ require.Equal(t, float64(1), prom_testutil.ToFloat64(db.Head().metrics.chunksRemoved))
+ }()
+
+ // Give CompactOOOHead time to start work.
+ // If it does not wait for the querier to be closed, then the query will return incorrect results or fail.
+ time.Sleep(time.Second)
+ require.False(t, compactionComplete.Load(), "compaction completed before reading chunks or closing querier")
+
+ // Check that we can still successfully read all samples.
+ samplesRead := 0
+ for iterator.Next() {
+ samplesRead += iterator.At().Chunk.NumSamples()
+ }
+
+ require.NoError(t, iterator.Err())
+ require.Equal(t, samplesWritten, samplesRead)
+
+ require.False(t, compactionComplete.Load(), "compaction completed before closing querier")
+ require.NoError(t, querier.Close())
+ require.Eventually(t, compactionComplete.Load, time.Second, 10*time.Millisecond, "compaction should complete after querier was closed")
+}
+
+func TestOOOWALWrite_AppendV2(t *testing.T) {
+ minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() }
+
+ s := labels.NewSymbolTable()
+ scratchBuilder1 := labels.NewScratchBuilderWithSymbolTable(s, 1)
+ scratchBuilder1.Add("l", "v1")
+ s1 := scratchBuilder1.Labels()
+ scratchBuilder2 := labels.NewScratchBuilderWithSymbolTable(s, 1)
+ scratchBuilder2.Add("l", "v2")
+ s2 := scratchBuilder2.Labels()
+
+ scenarios := map[string]struct {
+ appendSample func(app storage.AppenderV2, l labels.Labels, mins int64) (storage.SeriesRef, error)
+ expectedOOORecords []any
+ expectedInORecords []any
+ }{
+ "float": {
+ appendSample: func(app storage.AppenderV2, l labels.Labels, mins int64) (storage.SeriesRef, error) {
+ seriesRef, err := app.Append(0, l, 0, minutes(mins), float64(mins), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ return seriesRef, nil
+ },
+ expectedOOORecords: []any{
+ // The MmapRef in this are not hand calculated, and instead taken from the test run.
+ // What is important here is the order of records, and that MmapRef increases for each record.
+ []record.RefMmapMarker{
+ {Ref: 1},
+ },
+ []record.RefSample{
+ {Ref: 1, T: minutes(40), V: 40},
+ },
+
+ []record.RefMmapMarker{
+ {Ref: 2},
+ },
+ []record.RefSample{
+ {Ref: 2, T: minutes(42), V: 42},
+ },
+
+ []record.RefSample{
+ {Ref: 2, T: minutes(45), V: 45},
+ {Ref: 1, T: minutes(35), V: 35},
+ },
+ []record.RefMmapMarker{ // 3rd sample, hence m-mapped.
+ {Ref: 1, MmapRef: 0x100000000 + 8},
+ },
+ []record.RefSample{
+ {Ref: 1, T: minutes(36), V: 36},
+ {Ref: 1, T: minutes(37), V: 37},
+ },
+
+ []record.RefMmapMarker{ // 3rd sample, hence m-mapped.
+ {Ref: 1, MmapRef: 0x100000000 + 58},
+ },
+ []record.RefSample{ // Does not contain the in-order sample here.
+ {Ref: 1, T: minutes(50), V: 50},
+ },
+
+ // Single commit but multiple OOO records.
+ []record.RefMmapMarker{
+ {Ref: 2, MmapRef: 0x100000000 + 107},
+ },
+ []record.RefSample{
+ {Ref: 2, T: minutes(50), V: 50},
+ {Ref: 2, T: minutes(51), V: 51},
+ },
+ []record.RefMmapMarker{
+ {Ref: 2, MmapRef: 0x100000000 + 156},
+ },
+ []record.RefSample{
+ {Ref: 2, T: minutes(52), V: 52},
+ {Ref: 2, T: minutes(53), V: 53},
+ },
+ },
+ expectedInORecords: []any{
+ []record.RefSeries{
+ {Ref: 1, Labels: s1},
+ {Ref: 2, Labels: s2},
+ },
+ []record.RefSample{
+ {Ref: 1, T: minutes(60), V: 60},
+ {Ref: 2, T: minutes(60), V: 60},
+ },
+ []record.RefSample{
+ {Ref: 1, T: minutes(40), V: 40},
+ },
+ []record.RefSample{
+ {Ref: 2, T: minutes(42), V: 42},
+ },
+ []record.RefSample{
+ {Ref: 2, T: minutes(45), V: 45},
+ {Ref: 1, T: minutes(35), V: 35},
+ {Ref: 1, T: minutes(36), V: 36},
+ {Ref: 1, T: minutes(37), V: 37},
+ },
+ []record.RefSample{ // Contains both in-order and ooo sample.
+ {Ref: 1, T: minutes(50), V: 50},
+ {Ref: 2, T: minutes(65), V: 65},
+ },
+ []record.RefSample{
+ {Ref: 2, T: minutes(50), V: 50},
+ {Ref: 2, T: minutes(51), V: 51},
+ {Ref: 2, T: minutes(52), V: 52},
+ {Ref: 2, T: minutes(53), V: 53},
+ },
+ },
+ },
+ "integer histogram": {
+ appendSample: func(app storage.AppenderV2, l labels.Labels, mins int64) (storage.SeriesRef, error) {
+ seriesRef, err := app.Append(0, l, 0, minutes(mins), 0, tsdbutil.GenerateTestHistogram(mins), nil, storage.AOptions{})
+ require.NoError(t, err)
+ return seriesRef, nil
+ },
+ expectedOOORecords: []any{
+ // The MmapRef in this are not hand calculated, and instead taken from the test run.
+ // What is important here is the order of records, and that MmapRef increases for each record.
+ []record.RefMmapMarker{
+ {Ref: 1},
+ },
+ []record.RefHistogramSample{
+ {Ref: 1, T: minutes(40), H: tsdbutil.GenerateTestHistogram(40)},
+ },
+
+ []record.RefMmapMarker{
+ {Ref: 2},
+ },
+ []record.RefHistogramSample{
+ {Ref: 2, T: minutes(42), H: tsdbutil.GenerateTestHistogram(42)},
+ },
+
+ []record.RefHistogramSample{
+ {Ref: 2, T: minutes(45), H: tsdbutil.GenerateTestHistogram(45)},
+ {Ref: 1, T: minutes(35), H: tsdbutil.GenerateTestHistogram(35)},
+ },
+ []record.RefMmapMarker{ // 3rd sample, hence m-mapped.
+ {Ref: 1, MmapRef: 0x100000000 + 8},
+ },
+ []record.RefHistogramSample{
+ {Ref: 1, T: minutes(36), H: tsdbutil.GenerateTestHistogram(36)},
+ {Ref: 1, T: minutes(37), H: tsdbutil.GenerateTestHistogram(37)},
+ },
+
+ []record.RefMmapMarker{ // 3rd sample, hence m-mapped.
+ {Ref: 1, MmapRef: 0x100000000 + 89},
+ },
+ []record.RefHistogramSample{ // Does not contain the in-order sample here.
+ {Ref: 1, T: minutes(50), H: tsdbutil.GenerateTestHistogram(50)},
+ },
+
+ // Single commit but multiple OOO records.
+ []record.RefMmapMarker{
+ {Ref: 2, MmapRef: 0x100000000 + 172},
+ },
+ []record.RefHistogramSample{
+ {Ref: 2, T: minutes(50), H: tsdbutil.GenerateTestHistogram(50)},
+ {Ref: 2, T: minutes(51), H: tsdbutil.GenerateTestHistogram(51)},
+ },
+ []record.RefMmapMarker{
+ {Ref: 2, MmapRef: 0x100000000 + 257},
+ },
+ []record.RefHistogramSample{
+ {Ref: 2, T: minutes(52), H: tsdbutil.GenerateTestHistogram(52)},
+ {Ref: 2, T: minutes(53), H: tsdbutil.GenerateTestHistogram(53)},
+ },
+ },
+ expectedInORecords: []any{
+ []record.RefSeries{
+ {Ref: 1, Labels: s1},
+ {Ref: 2, Labels: s2},
+ },
+ []record.RefHistogramSample{
+ {Ref: 1, T: minutes(60), H: tsdbutil.GenerateTestHistogram(60)},
+ {Ref: 2, T: minutes(60), H: tsdbutil.GenerateTestHistogram(60)},
+ },
+ []record.RefHistogramSample{
+ {Ref: 1, T: minutes(40), H: tsdbutil.GenerateTestHistogram(40)},
+ },
+ []record.RefHistogramSample{
+ {Ref: 2, T: minutes(42), H: tsdbutil.GenerateTestHistogram(42)},
+ },
+ []record.RefHistogramSample{
+ {Ref: 2, T: minutes(45), H: tsdbutil.GenerateTestHistogram(45)},
+ {Ref: 1, T: minutes(35), H: tsdbutil.GenerateTestHistogram(35)},
+ {Ref: 1, T: minutes(36), H: tsdbutil.GenerateTestHistogram(36)},
+ {Ref: 1, T: minutes(37), H: tsdbutil.GenerateTestHistogram(37)},
+ },
+ []record.RefHistogramSample{ // Contains both in-order and ooo sample.
+ {Ref: 1, T: minutes(50), H: tsdbutil.GenerateTestHistogram(50)},
+ {Ref: 2, T: minutes(65), H: tsdbutil.GenerateTestHistogram(65)},
+ },
+ []record.RefHistogramSample{
+ {Ref: 2, T: minutes(50), H: tsdbutil.GenerateTestHistogram(50)},
+ {Ref: 2, T: minutes(51), H: tsdbutil.GenerateTestHistogram(51)},
+ {Ref: 2, T: minutes(52), H: tsdbutil.GenerateTestHistogram(52)},
+ {Ref: 2, T: minutes(53), H: tsdbutil.GenerateTestHistogram(53)},
+ },
+ },
+ },
+ "float histogram": {
+ appendSample: func(app storage.AppenderV2, l labels.Labels, mins int64) (storage.SeriesRef, error) {
+ seriesRef, err := app.Append(0, l, 0, minutes(mins), 0, nil, tsdbutil.GenerateTestFloatHistogram(mins), storage.AOptions{})
+ require.NoError(t, err)
+ return seriesRef, nil
+ },
+ expectedOOORecords: []any{
+ // The MmapRef in this are not hand calculated, and instead taken from the test run.
+ // What is important here is the order of records, and that MmapRef increases for each record.
+ []record.RefMmapMarker{
+ {Ref: 1},
+ },
+ []record.RefFloatHistogramSample{
+ {Ref: 1, T: minutes(40), FH: tsdbutil.GenerateTestFloatHistogram(40)},
+ },
+
+ []record.RefMmapMarker{
+ {Ref: 2},
+ },
+ []record.RefFloatHistogramSample{
+ {Ref: 2, T: minutes(42), FH: tsdbutil.GenerateTestFloatHistogram(42)},
+ },
+
+ []record.RefFloatHistogramSample{
+ {Ref: 2, T: minutes(45), FH: tsdbutil.GenerateTestFloatHistogram(45)},
+ {Ref: 1, T: minutes(35), FH: tsdbutil.GenerateTestFloatHistogram(35)},
+ },
+ []record.RefMmapMarker{ // 3rd sample, hence m-mapped.
+ {Ref: 1, MmapRef: 0x100000000 + 8},
+ },
+ []record.RefFloatHistogramSample{
+ {Ref: 1, T: minutes(36), FH: tsdbutil.GenerateTestFloatHistogram(36)},
+ {Ref: 1, T: minutes(37), FH: tsdbutil.GenerateTestFloatHistogram(37)},
+ },
+
+ []record.RefMmapMarker{ // 3rd sample, hence m-mapped.
+ {Ref: 1, MmapRef: 0x100000000 + 177},
+ },
+ []record.RefFloatHistogramSample{ // Does not contain the in-order sample here.
+ {Ref: 1, T: minutes(50), FH: tsdbutil.GenerateTestFloatHistogram(50)},
+ },
+
+ // Single commit but multiple OOO records.
+ []record.RefMmapMarker{
+ {Ref: 2, MmapRef: 0x100000000 + 348},
+ },
+ []record.RefFloatHistogramSample{
+ {Ref: 2, T: minutes(50), FH: tsdbutil.GenerateTestFloatHistogram(50)},
+ {Ref: 2, T: minutes(51), FH: tsdbutil.GenerateTestFloatHistogram(51)},
+ },
+ []record.RefMmapMarker{
+ {Ref: 2, MmapRef: 0x100000000 + 521},
+ },
+ []record.RefFloatHistogramSample{
+ {Ref: 2, T: minutes(52), FH: tsdbutil.GenerateTestFloatHistogram(52)},
+ {Ref: 2, T: minutes(53), FH: tsdbutil.GenerateTestFloatHistogram(53)},
+ },
+ },
+ expectedInORecords: []any{
+ []record.RefSeries{
+ {Ref: 1, Labels: s1},
+ {Ref: 2, Labels: s2},
+ },
+ []record.RefFloatHistogramSample{
+ {Ref: 1, T: minutes(60), FH: tsdbutil.GenerateTestFloatHistogram(60)},
+ {Ref: 2, T: minutes(60), FH: tsdbutil.GenerateTestFloatHistogram(60)},
+ },
+ []record.RefFloatHistogramSample{
+ {Ref: 1, T: minutes(40), FH: tsdbutil.GenerateTestFloatHistogram(40)},
+ },
+ []record.RefFloatHistogramSample{
+ {Ref: 2, T: minutes(42), FH: tsdbutil.GenerateTestFloatHistogram(42)},
+ },
+ []record.RefFloatHistogramSample{
+ {Ref: 2, T: minutes(45), FH: tsdbutil.GenerateTestFloatHistogram(45)},
+ {Ref: 1, T: minutes(35), FH: tsdbutil.GenerateTestFloatHistogram(35)},
+ {Ref: 1, T: minutes(36), FH: tsdbutil.GenerateTestFloatHistogram(36)},
+ {Ref: 1, T: minutes(37), FH: tsdbutil.GenerateTestFloatHistogram(37)},
+ },
+ []record.RefFloatHistogramSample{ // Contains both in-order and ooo sample.
+ {Ref: 1, T: minutes(50), FH: tsdbutil.GenerateTestFloatHistogram(50)},
+ {Ref: 2, T: minutes(65), FH: tsdbutil.GenerateTestFloatHistogram(65)},
+ },
+ []record.RefFloatHistogramSample{
+ {Ref: 2, T: minutes(50), FH: tsdbutil.GenerateTestFloatHistogram(50)},
+ {Ref: 2, T: minutes(51), FH: tsdbutil.GenerateTestFloatHistogram(51)},
+ {Ref: 2, T: minutes(52), FH: tsdbutil.GenerateTestFloatHistogram(52)},
+ {Ref: 2, T: minutes(53), FH: tsdbutil.GenerateTestFloatHistogram(53)},
+ },
+ },
+ },
+ "custom buckets histogram": {
+ appendSample: func(app storage.AppenderV2, l labels.Labels, mins int64) (storage.SeriesRef, error) {
+ seriesRef, err := app.Append(0, l, 0, minutes(mins), 0, tsdbutil.GenerateTestCustomBucketsHistogram(mins), nil, storage.AOptions{})
+ require.NoError(t, err)
+ return seriesRef, nil
+ },
+ expectedOOORecords: []any{
+ // The MmapRef in this are not hand calculated, and instead taken from the test run.
+ // What is important here is the order of records, and that MmapRef increases for each record.
+ []record.RefMmapMarker{
+ {Ref: 1},
+ },
+ []record.RefHistogramSample{
+ {Ref: 1, T: minutes(40), H: tsdbutil.GenerateTestCustomBucketsHistogram(40)},
+ },
+
+ []record.RefMmapMarker{
+ {Ref: 2},
+ },
+ []record.RefHistogramSample{
+ {Ref: 2, T: minutes(42), H: tsdbutil.GenerateTestCustomBucketsHistogram(42)},
+ },
+
+ []record.RefHistogramSample{
+ {Ref: 2, T: minutes(45), H: tsdbutil.GenerateTestCustomBucketsHistogram(45)},
+ {Ref: 1, T: minutes(35), H: tsdbutil.GenerateTestCustomBucketsHistogram(35)},
+ },
+ []record.RefMmapMarker{ // 3rd sample, hence m-mapped.
+ {Ref: 1, MmapRef: 0x100000000 + 8},
+ },
+ []record.RefHistogramSample{
+ {Ref: 1, T: minutes(36), H: tsdbutil.GenerateTestCustomBucketsHistogram(36)},
+ {Ref: 1, T: minutes(37), H: tsdbutil.GenerateTestCustomBucketsHistogram(37)},
+ },
+
+ []record.RefMmapMarker{ // 3rd sample, hence m-mapped.
+ {Ref: 1, MmapRef: 0x100000000 + 82},
+ },
+ []record.RefHistogramSample{ // Does not contain the in-order sample here.
+ {Ref: 1, T: minutes(50), H: tsdbutil.GenerateTestCustomBucketsHistogram(50)},
+ },
+
+ // Single commit but multiple OOO records.
+ []record.RefMmapMarker{
+ {Ref: 2, MmapRef: 0x100000000 + 160},
+ },
+ []record.RefHistogramSample{
+ {Ref: 2, T: minutes(50), H: tsdbutil.GenerateTestCustomBucketsHistogram(50)},
+ {Ref: 2, T: minutes(51), H: tsdbutil.GenerateTestCustomBucketsHistogram(51)},
+ },
+ []record.RefMmapMarker{
+ {Ref: 2, MmapRef: 0x100000000 + 239},
+ },
+ []record.RefHistogramSample{
+ {Ref: 2, T: minutes(52), H: tsdbutil.GenerateTestCustomBucketsHistogram(52)},
+ {Ref: 2, T: minutes(53), H: tsdbutil.GenerateTestCustomBucketsHistogram(53)},
+ },
+ },
+ expectedInORecords: []any{
+ []record.RefSeries{
+ {Ref: 1, Labels: s1},
+ {Ref: 2, Labels: s2},
+ },
+ []record.RefHistogramSample{
+ {Ref: 1, T: minutes(60), H: tsdbutil.GenerateTestCustomBucketsHistogram(60)},
+ {Ref: 2, T: minutes(60), H: tsdbutil.GenerateTestCustomBucketsHistogram(60)},
+ },
+ []record.RefHistogramSample{
+ {Ref: 1, T: minutes(40), H: tsdbutil.GenerateTestCustomBucketsHistogram(40)},
+ },
+ []record.RefHistogramSample{
+ {Ref: 2, T: minutes(42), H: tsdbutil.GenerateTestCustomBucketsHistogram(42)},
+ },
+ []record.RefHistogramSample{
+ {Ref: 2, T: minutes(45), H: tsdbutil.GenerateTestCustomBucketsHistogram(45)},
+ {Ref: 1, T: minutes(35), H: tsdbutil.GenerateTestCustomBucketsHistogram(35)},
+ {Ref: 1, T: minutes(36), H: tsdbutil.GenerateTestCustomBucketsHistogram(36)},
+ {Ref: 1, T: minutes(37), H: tsdbutil.GenerateTestCustomBucketsHistogram(37)},
+ },
+ []record.RefHistogramSample{ // Contains both in-order and ooo sample.
+ {Ref: 1, T: minutes(50), H: tsdbutil.GenerateTestCustomBucketsHistogram(50)},
+ {Ref: 2, T: minutes(65), H: tsdbutil.GenerateTestCustomBucketsHistogram(65)},
+ },
+ []record.RefHistogramSample{
+ {Ref: 2, T: minutes(50), H: tsdbutil.GenerateTestCustomBucketsHistogram(50)},
+ {Ref: 2, T: minutes(51), H: tsdbutil.GenerateTestCustomBucketsHistogram(51)},
+ {Ref: 2, T: minutes(52), H: tsdbutil.GenerateTestCustomBucketsHistogram(52)},
+ {Ref: 2, T: minutes(53), H: tsdbutil.GenerateTestCustomBucketsHistogram(53)},
+ },
+ },
+ },
+ "custom buckets float histogram": {
+ appendSample: func(app storage.AppenderV2, l labels.Labels, mins int64) (storage.SeriesRef, error) {
+ seriesRef, err := app.Append(0, l, 0, minutes(mins), 0, nil, tsdbutil.GenerateTestCustomBucketsFloatHistogram(mins), storage.AOptions{})
+ require.NoError(t, err)
+ return seriesRef, nil
+ },
+ expectedOOORecords: []any{
+ // The MmapRef in this are not hand calculated, and instead taken from the test run.
+ // What is important here is the order of records, and that MmapRef increases for each record.
+ []record.RefMmapMarker{
+ {Ref: 1},
+ },
+ []record.RefFloatHistogramSample{
+ {Ref: 1, T: minutes(40), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(40)},
+ },
+
+ []record.RefMmapMarker{
+ {Ref: 2},
+ },
+ []record.RefFloatHistogramSample{
+ {Ref: 2, T: minutes(42), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(42)},
+ },
+
+ []record.RefFloatHistogramSample{
+ {Ref: 2, T: minutes(45), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(45)},
+ {Ref: 1, T: minutes(35), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(35)},
+ },
+ []record.RefMmapMarker{ // 3rd sample, hence m-mapped.
+ {Ref: 1, MmapRef: 0x100000000 + 8},
+ },
+ []record.RefFloatHistogramSample{
+ {Ref: 1, T: minutes(36), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(36)},
+ {Ref: 1, T: minutes(37), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(37)},
+ },
+
+ []record.RefMmapMarker{ // 3rd sample, hence m-mapped.
+ {Ref: 1, MmapRef: 0x100000000 + 134},
+ },
+ []record.RefFloatHistogramSample{ // Does not contain the in-order sample here.
+ {Ref: 1, T: minutes(50), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(50)},
+ },
+
+ // Single commit but multiple OOO records.
+ []record.RefMmapMarker{
+ {Ref: 2, MmapRef: 0x100000000 + 263},
+ },
+ []record.RefFloatHistogramSample{
+ {Ref: 2, T: minutes(50), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(50)},
+ {Ref: 2, T: minutes(51), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(51)},
+ },
+ []record.RefMmapMarker{
+ {Ref: 2, MmapRef: 0x100000000 + 393},
+ },
+ []record.RefFloatHistogramSample{
+ {Ref: 2, T: minutes(52), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(52)},
+ {Ref: 2, T: minutes(53), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(53)},
+ },
+ },
+ expectedInORecords: []any{
+ []record.RefSeries{
+ {Ref: 1, Labels: s1},
+ {Ref: 2, Labels: s2},
+ },
+ []record.RefFloatHistogramSample{
+ {Ref: 1, T: minutes(60), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(60)},
+ {Ref: 2, T: minutes(60), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(60)},
+ },
+ []record.RefFloatHistogramSample{
+ {Ref: 1, T: minutes(40), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(40)},
+ },
+ []record.RefFloatHistogramSample{
+ {Ref: 2, T: minutes(42), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(42)},
+ },
+ []record.RefFloatHistogramSample{
+ {Ref: 2, T: minutes(45), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(45)},
+ {Ref: 1, T: minutes(35), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(35)},
+ {Ref: 1, T: minutes(36), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(36)},
+ {Ref: 1, T: minutes(37), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(37)},
+ },
+ []record.RefFloatHistogramSample{ // Contains both in-order and ooo sample.
+ {Ref: 1, T: minutes(50), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(50)},
+ {Ref: 2, T: minutes(65), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(65)},
+ },
+ []record.RefFloatHistogramSample{
+ {Ref: 2, T: minutes(50), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(50)},
+ {Ref: 2, T: minutes(51), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(51)},
+ {Ref: 2, T: minutes(52), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(52)},
+ {Ref: 2, T: minutes(53), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(53)},
+ },
+ },
+ },
+ }
+ for name, scenario := range scenarios {
+ t.Run(name, func(t *testing.T) {
+ testOOOWALWriteAppendV2(t, scenario.appendSample, scenario.expectedOOORecords, scenario.expectedInORecords)
+ })
+ }
+}
+
+func testOOOWALWriteAppendV2(t *testing.T,
+ appendSample func(app storage.AppenderV2, l labels.Labels, mins int64) (storage.SeriesRef, error),
+ expectedOOORecords []any,
+ expectedInORecords []any,
+) {
+ opts := DefaultOptions()
+ opts.OutOfOrderCapMax = 2
+ opts.OutOfOrderTimeWindow = 30 * time.Minute.Milliseconds()
+ db := newTestDB(t, withOpts(opts))
+
+ s1, s2 := labels.FromStrings("l", "v1"), labels.FromStrings("l", "v2")
+
+ // Ingest sample at 1h.
+ app := db.AppenderV2(context.Background())
+ appendSample(app, s1, 60)
+ appendSample(app, s2, 60)
+ require.NoError(t, app.Commit())
+
+ // OOO for s1.
+ app = db.AppenderV2(context.Background())
+ appendSample(app, s1, 40)
+ require.NoError(t, app.Commit())
+
+ // OOO for s2.
+ app = db.AppenderV2(context.Background())
+ appendSample(app, s2, 42)
+ require.NoError(t, app.Commit())
+
+ // OOO for both s1 and s2 in the same commit.
+ app = db.AppenderV2(context.Background())
+ appendSample(app, s2, 45)
+ appendSample(app, s1, 35)
+ appendSample(app, s1, 36) // m-maps.
+ appendSample(app, s1, 37)
+ require.NoError(t, app.Commit())
+
+ // OOO for s1 but not for s2 in the same commit.
+ app = db.AppenderV2(context.Background())
+ appendSample(app, s1, 50) // m-maps.
+ appendSample(app, s2, 65)
+ require.NoError(t, app.Commit())
+
+ // Single commit has 2 times m-mapping and more samples after m-map.
+ app = db.AppenderV2(context.Background())
+ appendSample(app, s2, 50) // m-maps.
+ appendSample(app, s2, 51)
+ appendSample(app, s2, 52) // m-maps.
+ appendSample(app, s2, 53)
+ require.NoError(t, app.Commit())
+
+ getRecords := func(walDir string) []any {
+ sr, err := wlog.NewSegmentsReader(walDir)
+ require.NoError(t, err)
+ r := wlog.NewReader(sr)
+ defer func() {
+ require.NoError(t, sr.Close())
+ }()
+
+ var records []any
+ dec := record.NewDecoder(nil, promslog.NewNopLogger())
+ for r.Next() {
+ rec := r.Record()
+ switch typ := dec.Type(rec); typ {
+ case record.Series:
+ series, err := dec.Series(rec, nil)
+ require.NoError(t, err)
+ records = append(records, series)
+ case record.Samples:
+ samples, err := dec.Samples(rec, nil)
+ require.NoError(t, err)
+ records = append(records, samples)
+ case record.MmapMarkers:
+ markers, err := dec.MmapMarkers(rec, nil)
+ require.NoError(t, err)
+ records = append(records, markers)
+ case record.HistogramSamples, record.CustomBucketsHistogramSamples:
+ histogramSamples, err := dec.HistogramSamples(rec, nil)
+ require.NoError(t, err)
+ records = append(records, histogramSamples)
+ case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples:
+ floatHistogramSamples, err := dec.FloatHistogramSamples(rec, nil)
+ require.NoError(t, err)
+ records = append(records, floatHistogramSamples)
+ default:
+ t.Fatalf("got a WAL record that is not series or samples: %v", typ)
+ }
+ }
+
+ return records
+ }
+
+ // The normal WAL.
+ actRecs := getRecords(path.Join(db.Dir(), "wal"))
+ require.Equal(t, expectedInORecords, actRecs)
+
+ // The WBL.
+ actRecs = getRecords(path.Join(db.Dir(), wlog.WblDirName))
+ require.Equal(t, expectedOOORecords, actRecs)
+}
+
+// Tests https://github.com/prometheus/prometheus/issues/10291#issuecomment-1044373110.
+func TestDBPanicOnMmappingHeadChunk_AppendV2(t *testing.T) {
+ var err error
+ ctx := context.Background()
+
+ db := newTestDB(t)
+ db.DisableCompactions()
+
+ // Choosing scrape interval of 45s to have chunk larger than 1h.
+ itvl := int64(45 * time.Second / time.Millisecond)
+
+ lastTs := int64(0)
+ addSamples := func(numSamples int) {
+ app := db.AppenderV2(context.Background())
+ var ref storage.SeriesRef
+ lbls := labels.FromStrings("__name__", "testing", "foo", "bar")
+ for i := range numSamples {
+ ref, err = app.Append(ref, lbls, 0, lastTs, float64(lastTs), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ lastTs += itvl
+ if i%10 == 0 {
+ require.NoError(t, app.Commit())
+ app = db.AppenderV2(context.Background())
+ }
+ }
+ require.NoError(t, app.Commit())
+ }
+
+ // Ingest samples upto 2h50m to make the head "about to compact".
+ numSamples := int(170*time.Minute/time.Millisecond) / int(itvl)
+ addSamples(numSamples)
+
+ require.Empty(t, db.Blocks())
+ require.NoError(t, db.Compact(ctx))
+ require.Empty(t, db.Blocks())
+
+ // Restarting.
+ require.NoError(t, db.Close())
+
+ db = newTestDB(t, withDir(db.Dir()))
+ db.DisableCompactions()
+
+ // Ingest samples upto 20m more to make the head compact.
+ numSamples = int(20*time.Minute/time.Millisecond) / int(itvl)
+ addSamples(numSamples)
+
+ require.Empty(t, db.Blocks())
+ require.NoError(t, db.Compact(ctx))
+ require.Len(t, db.Blocks(), 1)
+
+ // More samples to m-map and panic.
+ numSamples = int(120*time.Minute/time.Millisecond) / int(itvl)
+ addSamples(numSamples)
+
+ require.NoError(t, db.Close())
+}
+
+// TODO(bwplotka): Add cases ensuring stale sample appends will skipp metadata persisting.
+func TestMetadataInWAL_AppenderV2(t *testing.T) {
+ opts := DefaultOptions()
+ opts.EnableMetadataWALRecords = true
+ db := newTestDB(t, withOpts(opts))
+ ctx := context.Background()
+
+ // Add some series so we can attach metadata to them.
+ s1 := labels.FromStrings("a", "b")
+ s2 := labels.FromStrings("c", "d")
+ s3 := labels.FromStrings("e", "f")
+ s4 := labels.FromStrings("g", "h")
+
+ // Add a first round of metadata to the first three series.
+ m1 := metadata.Metadata{Type: "gauge", Unit: "unit_1", Help: "help_1"}
+ m2 := metadata.Metadata{Type: "gauge", Unit: "unit_2", Help: "help_2"}
+ m3 := metadata.Metadata{Type: "gauge", Unit: "unit_3", Help: "help_3"}
+
+ app := db.AppenderV2(ctx)
+ ts := int64(0)
+ _, err := app.Append(0, s1, 0, ts, 0, nil, nil, storage.AOptions{Metadata: m1})
+ require.NoError(t, err)
+ _, err = app.Append(0, s2, 0, ts, 0, nil, nil, storage.AOptions{Metadata: m2})
+ require.NoError(t, err)
+ _, err = app.Append(0, s3, 0, ts, 0, nil, nil, storage.AOptions{Metadata: m3})
+ require.NoError(t, err)
+ _, err = app.Append(0, s4, 0, ts, 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+
+ // Add a replicated metadata entry to the first series,
+ // a completely new metadata entry for the fourth series,
+ // and a changed metadata entry to the second series.
+ m4 := metadata.Metadata{Type: "counter", Unit: "unit_4", Help: "help_4"}
+ m5 := metadata.Metadata{Type: "counter", Unit: "unit_5", Help: "help_5"}
+ app = db.AppenderV2(ctx)
+ ts++
+ _, err = app.Append(0, s1, 0, ts, 0, nil, nil, storage.AOptions{Metadata: m1})
+ require.NoError(t, err)
+ _, err = app.Append(0, s4, 0, ts, 0, nil, nil, storage.AOptions{Metadata: m4})
+ require.NoError(t, err)
+ _, err = app.Append(0, s2, 0, ts, 0, nil, nil, storage.AOptions{Metadata: m5})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+
+ // Read the WAL to see if the disk storage format is correct.
+ recs := readTestWAL(t, path.Join(db.Dir(), "wal"))
+ var gotMetadataBlocks [][]record.RefMetadata
+ for _, rec := range recs {
+ if mr, ok := rec.([]record.RefMetadata); ok {
+ gotMetadataBlocks = append(gotMetadataBlocks, mr)
+ }
+ }
+
+ expectedMetadata := []record.RefMetadata{
+ {Ref: 1, Type: record.GetMetricType(m1.Type), Unit: m1.Unit, Help: m1.Help},
+ {Ref: 2, Type: record.GetMetricType(m2.Type), Unit: m2.Unit, Help: m2.Help},
+ {Ref: 3, Type: record.GetMetricType(m3.Type), Unit: m3.Unit, Help: m3.Help},
+ {Ref: 4, Type: record.GetMetricType(m4.Type), Unit: m4.Unit, Help: m4.Help},
+ {Ref: 2, Type: record.GetMetricType(m5.Type), Unit: m5.Unit, Help: m5.Help},
+ }
+ require.Len(t, gotMetadataBlocks, 2)
+ require.Equal(t, expectedMetadata[:3], gotMetadataBlocks[0])
+ require.Equal(t, expectedMetadata[3:], gotMetadataBlocks[1])
+}
+
+func TestMetadataCheckpointingOnlyKeepsLatestEntry_AppendV2(t *testing.T) {
+ ctx := context.Background()
+ numSamples := 10000
+ hb, w := newTestHead(t, int64(numSamples)*10, compression.None, false)
+ hb.opts.EnableMetadataWALRecords = true
+
+ // Add some series so we can append metadata to them.
+ s1 := labels.FromStrings("a", "b")
+ s2 := labels.FromStrings("c", "d")
+ s3 := labels.FromStrings("e", "f")
+ s4 := labels.FromStrings("g", "h")
+
+ m1 := metadata.Metadata{Type: "gauge", Unit: "unit_1", Help: "help_1"}
+ m2 := metadata.Metadata{Type: "gauge", Unit: "unit_2", Help: "help_2"}
+ m3 := metadata.Metadata{Type: "gauge", Unit: "unit_3", Help: "help_3"}
+ m4 := metadata.Metadata{Type: "gauge", Unit: "unit_4", Help: "help_4"}
+
+ app := hb.AppenderV2(ctx)
+ ts := int64(0)
+ _, err := app.Append(0, s1, 0, ts, 0, nil, nil, storage.AOptions{Metadata: m1})
+ require.NoError(t, err)
+ _, err = app.Append(0, s2, 0, ts, 0, nil, nil, storage.AOptions{Metadata: m2})
+ require.NoError(t, err)
+ _, err = app.Append(0, s3, 0, ts, 0, nil, nil, storage.AOptions{Metadata: m3})
+ require.NoError(t, err)
+ _, err = app.Append(0, s4, 0, ts, 0, nil, nil, storage.AOptions{Metadata: m4})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+
+ // Update metadata for first series.
+ m5 := metadata.Metadata{Type: "counter", Unit: "unit_5", Help: "help_5"}
+ app = hb.AppenderV2(ctx)
+ ts++
+ _, err = app.Append(0, s1, 0, ts, 0, nil, nil, storage.AOptions{Metadata: m5})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+
+ // Switch back-and-forth metadata for second series.
+ // Since it ended on a new metadata record, we expect a single new entry.
+ m6 := metadata.Metadata{Type: "counter", Unit: "unit_6", Help: "help_6"}
+
+ app = hb.AppenderV2(ctx)
+ ts++
+ _, err = app.Append(0, s2, 0, ts, 0, nil, nil, storage.AOptions{Metadata: m6})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+
+ app = hb.AppenderV2(ctx)
+ ts++
+ _, err = app.Append(0, s2, 0, ts, 0, nil, nil, storage.AOptions{Metadata: m2})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+
+ app = hb.AppenderV2(ctx)
+ ts++
+ _, err = app.Append(0, s2, 0, ts, 0, nil, nil, storage.AOptions{Metadata: m6})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+
+ app = hb.AppenderV2(ctx)
+ ts++
+ _, err = app.Append(0, s2, 0, ts, 0, nil, nil, storage.AOptions{Metadata: m2})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+
+ app = hb.AppenderV2(ctx)
+ ts++
+ _, err = app.Append(0, s2, 0, ts, 0, nil, nil, storage.AOptions{Metadata: m6})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+
+ // Let's create a checkpoint.
+ first, last, err := wlog.Segments(w.Dir())
+ require.NoError(t, err)
+ keep := func(id chunks.HeadSeriesRef) bool {
+ return id != 3
+ }
+ _, err = wlog.Checkpoint(promslog.NewNopLogger(), w, first, last-1, keep, 0)
+ require.NoError(t, err)
+
+ // Confirm there's been a checkpoint.
+ cdir, _, err := wlog.LastCheckpoint(w.Dir())
+ require.NoError(t, err)
+
+ // Read in checkpoint and WAL.
+ recs := readTestWAL(t, cdir)
+ var gotMetadataBlocks [][]record.RefMetadata
+ for _, rec := range recs {
+ if mr, ok := rec.([]record.RefMetadata); ok {
+ gotMetadataBlocks = append(gotMetadataBlocks, mr)
+ }
+ }
+
+ // There should only be 1 metadata block present, with only the latest
+ // metadata kept around.
+ wantMetadata := []record.RefMetadata{
+ {Ref: 1, Type: record.GetMetricType(m5.Type), Unit: m5.Unit, Help: m5.Help},
+ {Ref: 2, Type: record.GetMetricType(m6.Type), Unit: m6.Unit, Help: m6.Help},
+ {Ref: 4, Type: record.GetMetricType(m4.Type), Unit: m4.Unit, Help: m4.Help},
+ }
+ require.Len(t, gotMetadataBlocks, 1)
+ require.Len(t, gotMetadataBlocks[0], 3)
+ gotMetadataBlock := gotMetadataBlocks[0]
+
+ sort.Slice(gotMetadataBlock, func(i, j int) bool { return gotMetadataBlock[i].Ref < gotMetadataBlock[j].Ref })
+ require.Equal(t, wantMetadata, gotMetadataBlock)
+ require.NoError(t, hb.Close())
+}
+
+func TestMetadataAssertInMemoryData_AppendV2(t *testing.T) {
+ opts := DefaultOptions()
+ opts.EnableMetadataWALRecords = true
+ db := newTestDB(t, withOpts(opts))
+ ctx := context.Background()
+
+ // Add some series so we can append metadata to them.
+ s1 := labels.FromStrings("a", "b")
+ s2 := labels.FromStrings("c", "d")
+ s3 := labels.FromStrings("e", "f")
+ s4 := labels.FromStrings("g", "h")
+
+ // Add a first round of metadata to the first three series.
+ // The in-memory data held in the db Head should hold the metadata.
+ m1 := metadata.Metadata{Type: "gauge", Unit: "unit_1", Help: "help_1"}
+ m2 := metadata.Metadata{Type: "gauge", Unit: "unit_2", Help: "help_2"}
+ m3 := metadata.Metadata{Type: "gauge", Unit: "unit_3", Help: "help_3"}
+
+ app := db.AppenderV2(ctx)
+ ts := int64(0)
+ _, err := app.Append(0, s1, 0, ts, 0, nil, nil, storage.AOptions{Metadata: m1})
+ require.NoError(t, err)
+ _, err = app.Append(0, s2, 0, ts, 0, nil, nil, storage.AOptions{Metadata: m2})
+ require.NoError(t, err)
+ _, err = app.Append(0, s3, 0, ts, 0, nil, nil, storage.AOptions{Metadata: m3})
+ require.NoError(t, err)
+ _, err = app.Append(0, s4, 0, ts, 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+
+ series1 := db.head.series.getByHash(s1.Hash(), s1)
+ series2 := db.head.series.getByHash(s2.Hash(), s2)
+ series3 := db.head.series.getByHash(s3.Hash(), s3)
+ series4 := db.head.series.getByHash(s4.Hash(), s4)
+ require.Equal(t, *series1.meta, m1)
+ require.Equal(t, *series2.meta, m2)
+ require.Equal(t, *series3.meta, m3)
+ require.Nil(t, series4.meta)
+
+ // Add a replicated metadata entry to the first series,
+ // a changed metadata entry to the second series,
+ // and a completely new metadata entry for the fourth series.
+ // The in-memory data held in the db Head should be correctly updated.
+ m4 := metadata.Metadata{Type: "counter", Unit: "unit_4", Help: "help_4"}
+ m5 := metadata.Metadata{Type: "counter", Unit: "unit_5", Help: "help_5"}
+ app = db.AppenderV2(ctx)
+ ts++
+ _, err = app.Append(0, s1, 0, ts, 0, nil, nil, storage.AOptions{Metadata: m1})
+ require.NoError(t, err)
+ _, err = app.Append(0, s4, 0, ts, 0, nil, nil, storage.AOptions{Metadata: m4})
+ require.NoError(t, err)
+ _, err = app.Append(0, s2, 0, ts, 0, nil, nil, storage.AOptions{Metadata: m5})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+
+ series1 = db.head.series.getByHash(s1.Hash(), s1)
+ series2 = db.head.series.getByHash(s2.Hash(), s2)
+ series3 = db.head.series.getByHash(s3.Hash(), s3)
+ series4 = db.head.series.getByHash(s4.Hash(), s4)
+ require.Equal(t, *series1.meta, m1)
+ require.Equal(t, *series2.meta, m5)
+ require.Equal(t, *series3.meta, m3)
+ require.Equal(t, *series4.meta, m4)
+
+ require.NoError(t, db.Close())
+
+ // Reopen the DB, replaying the WAL. The Head must have been replayed
+ // correctly in memory.
+ reopenDB, err := Open(db.Dir(), nil, nil, nil, nil)
+ require.NoError(t, err)
+ t.Cleanup(func() {
+ require.NoError(t, reopenDB.Close())
+ })
+
+ _, err = reopenDB.head.wal.Size()
+ require.NoError(t, err)
+
+ require.Equal(t, *reopenDB.head.series.getByHash(s1.Hash(), s1).meta, m1)
+ require.Equal(t, *reopenDB.head.series.getByHash(s2.Hash(), s2).meta, m5)
+ require.Equal(t, *reopenDB.head.series.getByHash(s3.Hash(), s3).meta, m3)
+ require.Equal(t, *reopenDB.head.series.getByHash(s4.Hash(), s4).meta, m4)
+}
+
+// TestMultipleEncodingsCommitOrder mainly serves to demonstrate when happens when committing a batch of samples for the
+// same series when there are multiple encodings. With issue #15177 fixed, this now all works as expected.
+func TestMultipleEncodingsCommitOrder_AppendV2(t *testing.T) {
+ opts := DefaultOptions()
+ opts.OutOfOrderCapMax = 30
+ opts.OutOfOrderTimeWindow = 24 * time.Hour.Milliseconds()
+
+ db := newTestDB(t, withOpts(opts))
+ db.DisableCompactions()
+
+ series1 := labels.FromStrings("foo", "bar1")
+ addSample := func(app storage.AppenderV2, ts int64, valType chunkenc.ValueType) chunks.Sample {
+ if valType == chunkenc.ValFloat {
+ _, err := app.Append(0, labels.FromStrings("foo", "bar1"), 0, ts, float64(ts), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ return sample{t: ts, f: float64(ts)}
+ }
+ if valType == chunkenc.ValHistogram {
+ h := tsdbutil.GenerateTestHistogram(ts)
+ _, err := app.Append(0, labels.FromStrings("foo", "bar1"), 0, ts, 0, h, nil, storage.AOptions{})
+ require.NoError(t, err)
+ return sample{t: ts, h: h}
+ }
+ fh := tsdbutil.GenerateTestFloatHistogram(ts)
+ _, err := app.Append(0, labels.FromStrings("foo", "bar1"), 0, ts, 0, nil, fh, storage.AOptions{})
+ require.NoError(t, err)
+ return sample{t: ts, fh: fh}
+ }
+
+ verifySamples := func(minT, maxT int64, expSamples []chunks.Sample, oooCount int) {
+ requireEqualOOOSamples(t, oooCount, db)
+
+ // Verify samples querier.
+ querier, err := db.Querier(minT, maxT)
+ require.NoError(t, err)
+ defer querier.Close()
+
+ seriesSet := query(t, querier, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar1"))
+ require.Len(t, seriesSet, 1)
+ gotSamples := seriesSet[series1.String()]
+ requireEqualSamples(t, series1.String(), expSamples, gotSamples, requireEqualSamplesIgnoreCounterResets)
+
+ // Verify chunks querier.
+ chunkQuerier, err := db.ChunkQuerier(minT, maxT)
+ require.NoError(t, err)
+ defer chunkQuerier.Close()
+
+ chks := queryChunks(t, chunkQuerier, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar1"))
+ require.NotNil(t, chks[series1.String()])
+ require.Len(t, chks, 1)
+ var gotChunkSamples []chunks.Sample
+ for _, chunk := range chks[series1.String()] {
+ it := chunk.Chunk.Iterator(nil)
+ smpls, err := storage.ExpandSamples(it, newSample)
+ require.NoError(t, err)
+ gotChunkSamples = append(gotChunkSamples, smpls...)
+ require.NoError(t, it.Err())
+ }
+ requireEqualSamples(t, series1.String(), expSamples, gotChunkSamples, requireEqualSamplesIgnoreCounterResets)
+ }
+
+ var expSamples []chunks.Sample
+
+ // Append samples with different encoding types and then commit them at once.
+ app := db.AppenderV2(context.Background())
+
+ for i := 100; i < 105; i++ {
+ s := addSample(app, int64(i), chunkenc.ValFloat)
+ expSamples = append(expSamples, s)
+ }
+ for i := 110; i < 120; i++ {
+ s := addSample(app, int64(i), chunkenc.ValHistogram)
+ expSamples = append(expSamples, s)
+ }
+ for i := 120; i < 130; i++ {
+ s := addSample(app, int64(i), chunkenc.ValFloatHistogram)
+ expSamples = append(expSamples, s)
+ }
+ for i := 140; i < 150; i++ {
+ s := addSample(app, int64(i), chunkenc.ValFloatHistogram)
+ expSamples = append(expSamples, s)
+ }
+ // These samples will be marked as out-of-order.
+ for i := 130; i < 135; i++ {
+ s := addSample(app, int64(i), chunkenc.ValFloat)
+ expSamples = append(expSamples, s)
+ }
+
+ require.NoError(t, app.Commit())
+
+ sort.Slice(expSamples, func(i, j int) bool {
+ return expSamples[i].T() < expSamples[j].T()
+ })
+
+ // oooCount = 5 for the samples 130 to 134.
+ verifySamples(100, 150, expSamples, 5)
+
+ // Append and commit some in-order histograms by themselves.
+ app = db.AppenderV2(context.Background())
+ for i := 150; i < 160; i++ {
+ s := addSample(app, int64(i), chunkenc.ValHistogram)
+ expSamples = append(expSamples, s)
+ }
+ require.NoError(t, app.Commit())
+
+ // oooCount remains at 5.
+ verifySamples(100, 160, expSamples, 5)
+
+ // Append and commit samples for all encoding types. This time all samples will be treated as OOO because samples
+ // with newer timestamps have already been committed.
+ app = db.AppenderV2(context.Background())
+ for i := 50; i < 55; i++ {
+ s := addSample(app, int64(i), chunkenc.ValFloat)
+ expSamples = append(expSamples, s)
+ }
+ for i := 60; i < 70; i++ {
+ s := addSample(app, int64(i), chunkenc.ValHistogram)
+ expSamples = append(expSamples, s)
+ }
+ for i := 70; i < 75; i++ {
+ s := addSample(app, int64(i), chunkenc.ValFloat)
+ expSamples = append(expSamples, s)
+ }
+ for i := 80; i < 90; i++ {
+ s := addSample(app, int64(i), chunkenc.ValFloatHistogram)
+ expSamples = append(expSamples, s)
+ }
+ require.NoError(t, app.Commit())
+
+ // Sort samples again because OOO samples have been added.
+ sort.Slice(expSamples, func(i, j int) bool {
+ return expSamples[i].T() < expSamples[j].T()
+ })
+
+ // oooCount = 35 as we've added 30 more OOO samples.
+ verifySamples(50, 160, expSamples, 35)
+}
+
+// TODO(codesome): test more samples incoming once compaction has started. To verify new samples after the start
+//
+// are not included in this compaction.
+func TestOOOCompaction_AppendV2(t *testing.T) {
+ for name, scenario := range sampleTypeScenarios {
+ t.Run(name, func(t *testing.T) {
+ testOOOCompactionAppenderV2(t, scenario, false)
+ })
+ t.Run(name+"+extra", func(t *testing.T) {
+ testOOOCompactionAppenderV2(t, scenario, true)
+ })
+ }
+}
+
+func testOOOCompactionAppenderV2(t *testing.T, scenario sampleTypeScenario, addExtraSamples bool) {
+ ctx := context.Background()
+
+ opts := DefaultOptions()
+ opts.OutOfOrderCapMax = 30
+ opts.OutOfOrderTimeWindow = 300 * time.Minute.Milliseconds()
+ db := newTestDB(t, withOpts(opts))
+ db.DisableCompactions() // We want to manually call it.
+
+ series1 := labels.FromStrings("foo", "bar1")
+ series2 := labels.FromStrings("foo", "bar2")
+
+ addSample := func(fromMins, toMins int64) {
+ app := db.AppenderV2(context.Background())
+ for m := fromMins; m <= toMins; m++ {
+ ts := m * time.Minute.Milliseconds()
+ _, _, err := scenario.appendFunc(storage.AppenderV2AsLimitedV1(app), series1, ts, ts)
+ require.NoError(t, err)
+ _, _, err = scenario.appendFunc(storage.AppenderV2AsLimitedV1(app), series2, ts, 2*ts)
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+ }
+
+ // Add an in-order samples.
+ addSample(250, 300)
+
+ // Verify that the in-memory ooo chunk is empty.
+ checkEmptyOOOChunk := func(lbls labels.Labels) {
+ ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls, false)
+ require.NoError(t, err)
+ require.False(t, created)
+ require.Nil(t, ms.ooo)
+ }
+ checkEmptyOOOChunk(series1)
+ checkEmptyOOOChunk(series2)
+
+ // Add ooo samples that creates multiple chunks.
+ // 90 to 300 spans across 3 block ranges: [0, 120), [120, 240), [240, 360)
+ addSample(90, 300)
+ // Adding same samples to create overlapping chunks.
+ // Since the active chunk won't start at 90 again, all the new
+ // chunks will have different time ranges than the previous chunks.
+ addSample(90, 300)
+
+ var highest int64 = 300
+
+ verifyDBSamples := func() {
+ var series1Samples, series2Samples []chunks.Sample
+ for _, r := range [][2]int64{{90, 119}, {120, 239}, {240, highest}} {
+ fromMins, toMins := r[0], r[1]
+ for m := fromMins; m <= toMins; m++ {
+ ts := m * time.Minute.Milliseconds()
+ series1Samples = append(series1Samples, scenario.sampleFunc(ts, ts))
+ series2Samples = append(series2Samples, scenario.sampleFunc(ts, 2*ts))
+ }
+ }
+ expRes := map[string][]chunks.Sample{
+ series1.String(): series1Samples,
+ series2.String(): series2Samples,
+ }
+
+ q, err := db.Querier(math.MinInt64, math.MaxInt64)
+ require.NoError(t, err)
+
+ actRes := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*"))
+ requireEqualSeries(t, expRes, actRes, true)
+ }
+
+ verifyDBSamples() // Before any compaction.
+
+ // Verify that the in-memory ooo chunk is not empty.
+ checkNonEmptyOOOChunk := func(lbls labels.Labels) {
+ ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls, false)
+ require.NoError(t, err)
+ require.False(t, created)
+ require.Positive(t, ms.ooo.oooHeadChunk.chunk.NumSamples())
+ require.Len(t, ms.ooo.oooMmappedChunks, 13) // 7 original, 6 duplicate.
+ }
+ checkNonEmptyOOOChunk(series1)
+ checkNonEmptyOOOChunk(series2)
+
+ // No blocks before compaction.
+ require.Empty(t, db.Blocks())
+
+ // There is a 0th WBL file.
+ require.NoError(t, db.head.wbl.Sync()) // syncing to make sure wbl is flushed in windows
+ files, err := os.ReadDir(db.head.wbl.Dir())
+ require.NoError(t, err)
+ require.Len(t, files, 1)
+ require.Equal(t, "00000000", files[0].Name())
+ f, err := files[0].Info()
+ require.NoError(t, err)
+ require.Greater(t, f.Size(), int64(100))
+
+ if addExtraSamples {
+ compactOOOHeadTestingCallback = func() {
+ addSample(90, 120) // Back in time, to generate a new OOO chunk.
+ addSample(300, 330) // Now some samples after the previous highest timestamp.
+ addSample(300, 330) // Repeat to generate an OOO chunk at these timestamps.
+ }
+ highest = 330
+ }
+
+ // OOO compaction happens here.
+ require.NoError(t, db.CompactOOOHead(ctx))
+
+ // 3 blocks exist now. [0, 120), [120, 240), [240, 360)
+ require.Len(t, db.Blocks(), 3)
+
+ verifyDBSamples() // Blocks created out of OOO head now.
+
+ // 0th WBL file will be deleted and 1st will be the only present.
+ files, err = os.ReadDir(db.head.wbl.Dir())
+ require.NoError(t, err)
+ require.Len(t, files, 1)
+ require.Equal(t, "00000001", files[0].Name())
+ f, err = files[0].Info()
+ require.NoError(t, err)
+
+ if !addExtraSamples {
+ require.Equal(t, int64(0), f.Size())
+ // OOO stuff should not be present in the Head now.
+ checkEmptyOOOChunk(series1)
+ checkEmptyOOOChunk(series2)
+ }
+
+ verifySamples := func(block *Block, fromMins, toMins int64) {
+ series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
+ series2Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
+ for m := fromMins; m <= toMins; m++ {
+ ts := m * time.Minute.Milliseconds()
+ series1Samples = append(series1Samples, scenario.sampleFunc(ts, ts))
+ series2Samples = append(series2Samples, scenario.sampleFunc(ts, 2*ts))
+ }
+ expRes := map[string][]chunks.Sample{
+ series1.String(): series1Samples,
+ series2.String(): series2Samples,
+ }
+
+ q, err := NewBlockQuerier(block, math.MinInt64, math.MaxInt64)
+ require.NoError(t, err)
+
+ actRes := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*"))
+ requireEqualSeries(t, expRes, actRes, true)
+ }
+
+ // Checking for expected data in the blocks.
+ verifySamples(db.Blocks()[0], 90, 119)
+ verifySamples(db.Blocks()[1], 120, 239)
+ verifySamples(db.Blocks()[2], 240, 299)
+
+ // There should be a single m-map file.
+ mmapDir := mmappedChunksDir(db.head.opts.ChunkDirRoot)
+ files, err = os.ReadDir(mmapDir)
+ require.NoError(t, err)
+ require.Len(t, files, 1)
+
+ // Compact the in-order head and expect another block.
+ // Since this is a forced compaction, this block is not aligned with 2h.
+ err = db.CompactHead(NewRangeHead(db.head, 250*time.Minute.Milliseconds(), 350*time.Minute.Milliseconds()))
+ require.NoError(t, err)
+ require.Len(t, db.Blocks(), 4) // [0, 120), [120, 240), [240, 360), [250, 351)
+ verifySamples(db.Blocks()[3], 250, highest)
+
+ verifyDBSamples() // Blocks created out of normal and OOO head now. But not merged.
+
+ // The compaction also clears out the old m-map files. Including
+ // the file that has ooo chunks.
+ files, err = os.ReadDir(mmapDir)
+ require.NoError(t, err)
+ require.Len(t, files, 1)
+ require.Equal(t, "000001", files[0].Name())
+
+ // This will merge overlapping block.
+ require.NoError(t, db.Compact(ctx))
+
+ require.Len(t, db.Blocks(), 3) // [0, 120), [120, 240), [240, 360)
+ verifySamples(db.Blocks()[0], 90, 119)
+ verifySamples(db.Blocks()[1], 120, 239)
+ verifySamples(db.Blocks()[2], 240, highest) // Merged block.
+
+ verifyDBSamples() // Final state. Blocks from normal and OOO head are merged.
+}
+
+// TestOOOCompactionWithNormalCompaction tests if OOO compaction is performed
+// when the normal head's compaction is done.
+func TestOOOCompactionWithNormalCompaction_AppendV2(t *testing.T) {
+ t.Parallel()
+ for name, scenario := range sampleTypeScenarios {
+ t.Run(name, func(t *testing.T) {
+ testOOOCompactionWithNormalCompactionAppendV2(t, scenario)
+ })
+ }
+}
+
+func testOOOCompactionWithNormalCompactionAppendV2(t *testing.T, scenario sampleTypeScenario) {
+ t.Parallel()
+ ctx := context.Background()
+
+ opts := DefaultOptions()
+ opts.OutOfOrderCapMax = 30
+ opts.OutOfOrderTimeWindow = 300 * time.Minute.Milliseconds()
+
+ db := newTestDB(t, withOpts(opts))
+ db.DisableCompactions() // We want to manually call it.
+
+ series1 := labels.FromStrings("foo", "bar1")
+ series2 := labels.FromStrings("foo", "bar2")
+
+ addSamples := func(fromMins, toMins int64) {
+ app := db.AppenderV2(context.Background())
+ for m := fromMins; m <= toMins; m++ {
+ ts := m * time.Minute.Milliseconds()
+ _, _, err := scenario.appendFunc(storage.AppenderV2AsLimitedV1(app), series1, ts, ts)
+ require.NoError(t, err)
+ _, _, err = scenario.appendFunc(storage.AppenderV2AsLimitedV1(app), series2, ts, 2*ts)
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+ }
+
+ // Add an in-order samples.
+ addSamples(250, 350)
+
+ // Add ooo samples that will result into a single block.
+ addSamples(90, 110)
+
+ // Checking that ooo chunk is not empty.
+ for _, lbls := range []labels.Labels{series1, series2} {
+ ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls, false)
+ require.NoError(t, err)
+ require.False(t, created)
+ require.Positive(t, ms.ooo.oooHeadChunk.chunk.NumSamples())
+ }
+
+ // If the normal Head is not compacted, the OOO head compaction does not take place.
+ require.NoError(t, db.Compact(ctx))
+ require.Empty(t, db.Blocks())
+
+ // Add more in-order samples in future that would trigger the compaction.
+ addSamples(400, 450)
+
+ // No blocks before compaction.
+ require.Empty(t, db.Blocks())
+
+ // Compacts normal and OOO head.
+ require.NoError(t, db.Compact(ctx))
+
+ // 2 blocks exist now. [0, 120), [250, 360)
+ require.Len(t, db.Blocks(), 2)
+ require.Equal(t, int64(0), db.Blocks()[0].MinTime())
+ require.Equal(t, 120*time.Minute.Milliseconds(), db.Blocks()[0].MaxTime())
+ require.Equal(t, 250*time.Minute.Milliseconds(), db.Blocks()[1].MinTime())
+ require.Equal(t, 360*time.Minute.Milliseconds(), db.Blocks()[1].MaxTime())
+
+ // Checking that ooo chunk is empty.
+ for _, lbls := range []labels.Labels{series1, series2} {
+ ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls, false)
+ require.NoError(t, err)
+ require.False(t, created)
+ require.Nil(t, ms.ooo)
+ }
+
+ verifySamples := func(block *Block, fromMins, toMins int64) {
+ series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
+ series2Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
+ for m := fromMins; m <= toMins; m++ {
+ ts := m * time.Minute.Milliseconds()
+ series1Samples = append(series1Samples, scenario.sampleFunc(ts, ts))
+ series2Samples = append(series2Samples, scenario.sampleFunc(ts, 2*ts))
+ }
+ expRes := map[string][]chunks.Sample{
+ series1.String(): series1Samples,
+ series2.String(): series2Samples,
+ }
+
+ q, err := NewBlockQuerier(block, math.MinInt64, math.MaxInt64)
+ require.NoError(t, err)
+
+ actRes := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*"))
+ requireEqualSeries(t, expRes, actRes, true)
+ }
+
+ // Checking for expected data in the blocks.
+ verifySamples(db.Blocks()[0], 90, 110)
+ verifySamples(db.Blocks()[1], 250, 350)
+}
+
+// TestOOOCompactionWithDisabledWriteLog tests the scenario where the TSDB is
+// configured to not have wal and wbl but its able to compact both the in-order
+// and out-of-order head.
+func TestOOOCompactionWithDisabledWriteLog_AppendV2(t *testing.T) {
+ t.Parallel()
+ for name, scenario := range sampleTypeScenarios {
+ t.Run(name, func(t *testing.T) {
+ testOOOCompactionWithDisabledWriteLogAppend2(t, scenario)
+ })
+ }
+}
+
+func testOOOCompactionWithDisabledWriteLogAppend2(t *testing.T, scenario sampleTypeScenario) {
+ t.Parallel()
+ ctx := context.Background()
+
+ opts := DefaultOptions()
+ opts.OutOfOrderCapMax = 30
+ opts.OutOfOrderTimeWindow = 300 * time.Minute.Milliseconds()
+ opts.WALSegmentSize = -1 // disabled WAL and WBL
+
+ db := newTestDB(t, withOpts(opts))
+ db.DisableCompactions() // We want to manually call it.
+
+ series1 := labels.FromStrings("foo", "bar1")
+ series2 := labels.FromStrings("foo", "bar2")
+
+ addSamples := func(fromMins, toMins int64) {
+ app := db.AppenderV2(context.Background())
+ for m := fromMins; m <= toMins; m++ {
+ ts := m * time.Minute.Milliseconds()
+ _, _, err := scenario.appendFunc(storage.AppenderV2AsLimitedV1(app), series1, ts, ts)
+ require.NoError(t, err)
+ _, _, err = scenario.appendFunc(storage.AppenderV2AsLimitedV1(app), series2, ts, 2*ts)
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+ }
+
+ // Add an in-order samples.
+ addSamples(250, 350)
+
+ // Add ooo samples that will result into a single block.
+ addSamples(90, 110)
+
+ // Checking that ooo chunk is not empty.
+ for _, lbls := range []labels.Labels{series1, series2} {
+ ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls, false)
+ require.NoError(t, err)
+ require.False(t, created)
+ require.Positive(t, ms.ooo.oooHeadChunk.chunk.NumSamples())
+ }
+
+ // If the normal Head is not compacted, the OOO head compaction does not take place.
+ require.NoError(t, db.Compact(ctx))
+ require.Empty(t, db.Blocks())
+
+ // Add more in-order samples in future that would trigger the compaction.
+ addSamples(400, 450)
+
+ // No blocks before compaction.
+ require.Empty(t, db.Blocks())
+
+ // Compacts normal and OOO head.
+ require.NoError(t, db.Compact(ctx))
+
+ // 2 blocks exist now. [0, 120), [250, 360)
+ require.Len(t, db.Blocks(), 2)
+ require.Equal(t, int64(0), db.Blocks()[0].MinTime())
+ require.Equal(t, 120*time.Minute.Milliseconds(), db.Blocks()[0].MaxTime())
+ require.Equal(t, 250*time.Minute.Milliseconds(), db.Blocks()[1].MinTime())
+ require.Equal(t, 360*time.Minute.Milliseconds(), db.Blocks()[1].MaxTime())
+
+ // Checking that ooo chunk is empty.
+ for _, lbls := range []labels.Labels{series1, series2} {
+ ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls, false)
+ require.NoError(t, err)
+ require.False(t, created)
+ require.Nil(t, ms.ooo)
+ }
+
+ verifySamples := func(block *Block, fromMins, toMins int64) {
+ series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
+ series2Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
+ for m := fromMins; m <= toMins; m++ {
+ ts := m * time.Minute.Milliseconds()
+ series1Samples = append(series1Samples, scenario.sampleFunc(ts, ts))
+ series2Samples = append(series2Samples, scenario.sampleFunc(ts, 2*ts))
+ }
+ expRes := map[string][]chunks.Sample{
+ series1.String(): series1Samples,
+ series2.String(): series2Samples,
+ }
+
+ q, err := NewBlockQuerier(block, math.MinInt64, math.MaxInt64)
+ require.NoError(t, err)
+
+ actRes := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*"))
+ requireEqualSeries(t, expRes, actRes, true)
+ }
+
+ // Checking for expected data in the blocks.
+ verifySamples(db.Blocks()[0], 90, 110)
+ verifySamples(db.Blocks()[1], 250, 350)
+}
+
+// TestOOOQueryAfterRestartWithSnapshotAndRemovedWBL_AppendV2 tests the scenario where the WBL goes
+// missing after a restart while snapshot was enabled, but the query still returns the right
+// data from the mmap chunks.
+func TestOOOQueryAfterRestartWithSnapshotAndRemovedWBL_AppendV2(t *testing.T) {
+ t.Parallel()
+ for name, scenario := range sampleTypeScenarios {
+ t.Run(name, func(t *testing.T) {
+ testOOOQueryAfterRestartWithSnapshotAndRemovedWBLAppendV2(t, scenario)
+ })
+ }
+}
+
+func testOOOQueryAfterRestartWithSnapshotAndRemovedWBLAppendV2(t *testing.T, scenario sampleTypeScenario) {
+ ctx := context.Background()
+
+ opts := DefaultOptions()
+ opts.OutOfOrderCapMax = 10
+ opts.OutOfOrderTimeWindow = 300 * time.Minute.Milliseconds()
+ opts.EnableMemorySnapshotOnShutdown = true
+
+ db := newTestDB(t, withOpts(opts))
+ db.DisableCompactions() // We want to manually call it.
+
+ series1 := labels.FromStrings("foo", "bar1")
+ series2 := labels.FromStrings("foo", "bar2")
+
+ addSamples := func(fromMins, toMins int64) {
+ app := db.AppenderV2(context.Background())
+ for m := fromMins; m <= toMins; m++ {
+ ts := m * time.Minute.Milliseconds()
+ _, _, err := scenario.appendFunc(storage.AppenderV2AsLimitedV1(app), series1, ts, ts)
+ require.NoError(t, err)
+ _, _, err = scenario.appendFunc(storage.AppenderV2AsLimitedV1(app), series2, ts, 2*ts)
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+ }
+
+ // Add an in-order samples.
+ addSamples(250, 350)
+
+ // Add ooo samples that will result into a single block.
+ addSamples(90, 110) // The sample 110 will not be in m-map chunks.
+
+ // Checking that there are some ooo m-map chunks.
+ for _, lbls := range []labels.Labels{series1, series2} {
+ ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls, false)
+ require.NoError(t, err)
+ require.False(t, created)
+ require.Len(t, ms.ooo.oooMmappedChunks, 2)
+ require.NotNil(t, ms.ooo.oooHeadChunk)
+ }
+
+ // Restart DB.
+ require.NoError(t, db.Close())
+
+ // For some reason wbl goes missing.
+ require.NoError(t, os.RemoveAll(path.Join(db.Dir(), "wbl")))
+
+ db = newTestDB(t, withDir(db.Dir()))
+ db.DisableCompactions() // We want to manually call it.
+
+ // Check ooo m-map chunks again.
+ for _, lbls := range []labels.Labels{series1, series2} {
+ ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls, false)
+ require.NoError(t, err)
+ require.False(t, created)
+ require.Len(t, ms.ooo.oooMmappedChunks, 2)
+ require.Equal(t, 109*time.Minute.Milliseconds(), ms.ooo.oooMmappedChunks[1].maxTime)
+ require.Nil(t, ms.ooo.oooHeadChunk) // Because of missing wbl.
+ }
+
+ verifySamples := func(fromMins, toMins int64) {
+ series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
+ series2Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
+ for m := fromMins; m <= toMins; m++ {
+ ts := m * time.Minute.Milliseconds()
+ series1Samples = append(series1Samples, scenario.sampleFunc(ts, ts))
+ series2Samples = append(series2Samples, scenario.sampleFunc(ts, ts*2))
+ }
+ expRes := map[string][]chunks.Sample{
+ series1.String(): series1Samples,
+ series2.String(): series2Samples,
+ }
+
+ q, err := db.Querier(fromMins*time.Minute.Milliseconds(), toMins*time.Minute.Milliseconds())
+ require.NoError(t, err)
+
+ actRes := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*"))
+ requireEqualSeries(t, expRes, actRes, true)
+ }
+
+ // Checking for expected ooo data from mmap chunks.
+ verifySamples(90, 109)
+
+ // Compaction should also work fine.
+ require.Empty(t, db.Blocks())
+ require.NoError(t, db.CompactOOOHead(ctx))
+ require.Len(t, db.Blocks(), 1) // One block from OOO data.
+ require.Equal(t, int64(0), db.Blocks()[0].MinTime())
+ require.Equal(t, 120*time.Minute.Milliseconds(), db.Blocks()[0].MaxTime())
+
+ // Checking that ooo chunk is empty in Head.
+ for _, lbls := range []labels.Labels{series1, series2} {
+ ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls, false)
+ require.NoError(t, err)
+ require.False(t, created)
+ require.Nil(t, ms.ooo)
+ }
+
+ verifySamples(90, 109)
+}
+
+func TestQuerierOOOQuery_AppendV2(t *testing.T) {
+ scenarios := map[string]struct {
+ appendFunc func(app storage.AppenderV2, ts int64, counterReset bool) (storage.SeriesRef, error)
+ sampleFunc func(ts int64) chunks.Sample
+ }{
+ "float": {
+ appendFunc: func(app storage.AppenderV2, ts int64, _ bool) (storage.SeriesRef, error) {
+ return app.Append(0, labels.FromStrings("foo", "bar1"), 0, ts, float64(ts), nil, nil, storage.AOptions{})
+ },
+ sampleFunc: func(ts int64) chunks.Sample {
+ return sample{t: ts, f: float64(ts)}
+ },
+ },
+ "integer histogram": {
+ appendFunc: func(app storage.AppenderV2, ts int64, counterReset bool) (storage.SeriesRef, error) {
+ h := tsdbutil.GenerateTestHistogram(ts)
+ if counterReset {
+ h.CounterResetHint = histogram.CounterReset
+ }
+ return app.Append(0, labels.FromStrings("foo", "bar1"), 0, ts, 0, h, nil, storage.AOptions{})
+ },
+ sampleFunc: func(ts int64) chunks.Sample {
+ return sample{t: ts, h: tsdbutil.GenerateTestHistogram(ts)}
+ },
+ },
+ "float histogram": {
+ appendFunc: func(app storage.AppenderV2, ts int64, counterReset bool) (storage.SeriesRef, error) {
+ fh := tsdbutil.GenerateTestFloatHistogram(ts)
+ if counterReset {
+ fh.CounterResetHint = histogram.CounterReset
+ }
+ return app.Append(0, labels.FromStrings("foo", "bar1"), 0, ts, 0, nil, fh, storage.AOptions{})
+ },
+ sampleFunc: func(ts int64) chunks.Sample {
+ return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(ts)}
+ },
+ },
+ "integer histogram counter resets": {
+ // Adding counter reset to all histograms means each histogram will have its own chunk.
+ appendFunc: func(app storage.AppenderV2, ts int64, _ bool) (storage.SeriesRef, error) {
+ h := tsdbutil.GenerateTestHistogram(ts)
+ h.CounterResetHint = histogram.CounterReset // For this scenario, ignore the counterReset argument.
+ return app.Append(0, labels.FromStrings("foo", "bar1"), 0, ts, 0, h, nil, storage.AOptions{})
+ },
+ sampleFunc: func(ts int64) chunks.Sample {
+ return sample{t: ts, h: tsdbutil.GenerateTestHistogram(ts)}
+ },
+ },
+ }
+
+ for name, scenario := range scenarios {
+ t.Run(name, func(t *testing.T) {
+ testQuerierOOOQueryAppendV2(t, scenario.appendFunc, scenario.sampleFunc)
+ })
+ }
+}
+
+func testQuerierOOOQueryAppendV2(t *testing.T,
+ appendFunc func(app storage.AppenderV2, ts int64, counterReset bool) (storage.SeriesRef, error),
+ sampleFunc func(ts int64) chunks.Sample,
+) {
+ opts := DefaultOptions()
+ opts.OutOfOrderTimeWindow = 24 * time.Hour.Milliseconds()
+
+ series1 := labels.FromStrings("foo", "bar1")
+
+ type filterFunc func(t int64) bool
+ defaultFilterFunc := func(int64) bool { return true }
+
+ minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() }
+ addSample := func(db *DB, fromMins, toMins, queryMinT, queryMaxT int64, expSamples []chunks.Sample, filter filterFunc, counterReset bool) ([]chunks.Sample, int) {
+ app := db.AppenderV2(context.Background())
+ totalAppended := 0
+ for m := fromMins; m <= toMins; m += time.Minute.Milliseconds() {
+ if !filter(m / time.Minute.Milliseconds()) {
+ continue
+ }
+ _, err := appendFunc(app, m, counterReset)
+ if m >= queryMinT && m <= queryMaxT {
+ expSamples = append(expSamples, sampleFunc(m))
+ }
+ require.NoError(t, err)
+ totalAppended++
+ }
+ require.NoError(t, app.Commit())
+ require.Positive(t, totalAppended, 0) // Sanity check that filter is not too zealous.
+ return expSamples, totalAppended
+ }
+
+ type sampleBatch struct {
+ minT int64
+ maxT int64
+ filter filterFunc
+ counterReset bool
+ isOOO bool
+ }
+
+ tests := []struct {
+ name string
+ oooCap int64
+ queryMinT int64
+ queryMaxT int64
+ batches []sampleBatch
+ }{
+ {
+ name: "query interval covering ooomint and inordermaxt returns all ingested samples",
+ oooCap: 30,
+ queryMinT: minutes(0),
+ queryMaxT: minutes(200),
+ batches: []sampleBatch{
+ {
+ minT: minutes(100),
+ maxT: minutes(200),
+ filter: defaultFilterFunc,
+ },
+ {
+ minT: minutes(0),
+ maxT: minutes(99),
+ filter: defaultFilterFunc,
+ isOOO: true,
+ },
+ },
+ },
+ {
+ name: "partial query interval returns only samples within interval",
+ oooCap: 30,
+ queryMinT: minutes(20),
+ queryMaxT: minutes(180),
+ batches: []sampleBatch{
+ {
+ minT: minutes(100),
+ maxT: minutes(200),
+ filter: defaultFilterFunc,
+ },
+ {
+ minT: minutes(0),
+ maxT: minutes(99),
+ filter: defaultFilterFunc,
+ isOOO: true,
+ },
+ },
+ },
+ {
+ name: "alternating OOO batches", // In order: 100-200 normal. out of order first path: 0, 2, 4, ... 98 (no counter reset), second pass: 1, 3, 5, ... 99 (with counter reset).
+ queryMinT: minutes(0),
+ queryMaxT: minutes(200),
+ batches: []sampleBatch{
+ {
+ minT: minutes(100),
+ maxT: minutes(200),
+ filter: defaultFilterFunc,
+ },
+ {
+ minT: minutes(0),
+ maxT: minutes(99),
+ filter: func(t int64) bool { return t%2 == 0 },
+ isOOO: true,
+ },
+ {
+ minT: minutes(0),
+ maxT: minutes(99),
+ filter: func(t int64) bool { return t%2 == 1 },
+ counterReset: true,
+ isOOO: true,
+ },
+ },
+ },
+ {
+ name: "query overlapping inorder and ooo samples returns all ingested samples at the end of the interval",
+ oooCap: 30,
+ queryMinT: minutes(0),
+ queryMaxT: minutes(200),
+ batches: []sampleBatch{
+ {
+ minT: minutes(100),
+ maxT: minutes(200),
+ filter: func(t int64) bool { return t%2 == 0 },
+ isOOO: false,
+ },
+ {
+ minT: minutes(170),
+ maxT: minutes(180),
+ filter: func(t int64) bool { return t%2 == 1 },
+ isOOO: true,
+ },
+ },
+ },
+ {
+ name: "query overlapping inorder and ooo in-memory samples returns all ingested samples at the beginning of the interval",
+ oooCap: 30,
+ queryMinT: minutes(0),
+ queryMaxT: minutes(200),
+ batches: []sampleBatch{
+ {
+ minT: minutes(100),
+ maxT: minutes(200),
+ filter: func(t int64) bool { return t%2 == 0 },
+ isOOO: false,
+ },
+ {
+ minT: minutes(100),
+ maxT: minutes(110),
+ filter: func(t int64) bool { return t%2 == 1 },
+ isOOO: true,
+ },
+ },
+ },
+ {
+ name: "query inorder contain ooo mmapped samples returns all ingested samples at the beginning of the interval",
+ oooCap: 5,
+ queryMinT: minutes(0),
+ queryMaxT: minutes(200),
+ batches: []sampleBatch{
+ {
+ minT: minutes(100),
+ maxT: minutes(200),
+ filter: func(t int64) bool { return t%2 == 0 },
+ isOOO: false,
+ },
+ {
+ minT: minutes(101),
+ maxT: minutes(101 + (5-1)*2), // Append samples to fit in a single mmapped OOO chunk and fit inside the first in-order mmapped chunk.
+ filter: func(t int64) bool { return t%2 == 1 },
+ isOOO: true,
+ },
+ {
+ minT: minutes(191),
+ maxT: minutes(193), // Append some more OOO samples to trigger mapping the OOO chunk, but use time 151 to not overlap with in-order head chunk.
+ filter: func(t int64) bool { return t%2 == 1 },
+ isOOO: true,
+ },
+ },
+ },
+ {
+ name: "query overlapping inorder and ooo mmapped samples returns all ingested samples at the beginning of the interval",
+ oooCap: 30,
+ queryMinT: minutes(0),
+ queryMaxT: minutes(200),
+ batches: []sampleBatch{
+ {
+ minT: minutes(100),
+ maxT: minutes(200),
+ filter: func(t int64) bool { return t%2 == 0 },
+ isOOO: false,
+ },
+ {
+ minT: minutes(101),
+ maxT: minutes(101 + (30-1)*2), // Append samples to fit in a single mmapped OOO chunk and overlap the first in-order mmapped chunk.
+ filter: func(t int64) bool { return t%2 == 1 },
+ isOOO: true,
+ },
+ {
+ minT: minutes(191),
+ maxT: minutes(193), // Append some more OOO samples to trigger mapping the OOO chunk, but use time 151 to not overlap with in-order head chunk.
+ filter: func(t int64) bool { return t%2 == 1 },
+ isOOO: true,
+ },
+ },
+ },
+ }
+ for _, tc := range tests {
+ t.Run(fmt.Sprintf("name=%s", tc.name), func(t *testing.T) {
+ opts.OutOfOrderCapMax = tc.oooCap
+ db := newTestDB(t, withOpts(opts))
+ db.DisableCompactions()
+
+ var expSamples []chunks.Sample
+ var oooSamples, appendedCount int
+
+ for _, batch := range tc.batches {
+ expSamples, appendedCount = addSample(db, batch.minT, batch.maxT, tc.queryMinT, tc.queryMaxT, expSamples, batch.filter, batch.counterReset)
+ if batch.isOOO {
+ oooSamples += appendedCount
+ }
+ }
+
+ sort.Slice(expSamples, func(i, j int) bool {
+ return expSamples[i].T() < expSamples[j].T()
+ })
+
+ querier, err := db.Querier(tc.queryMinT, tc.queryMaxT)
+ require.NoError(t, err)
+ defer querier.Close()
+
+ seriesSet := query(t, querier, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar1"))
+ gotSamples := seriesSet[series1.String()]
+ require.NotNil(t, gotSamples)
+ require.Len(t, seriesSet, 1)
+ requireEqualSamples(t, series1.String(), expSamples, gotSamples, requireEqualSamplesIgnoreCounterResets)
+ requireEqualOOOSamples(t, oooSamples, db)
+ })
+ }
+}
+
+func TestChunkQuerierOOOQuery_AppendV2(t *testing.T) {
+ nBucketHistogram := func(n int64) *histogram.Histogram {
+ h := &histogram.Histogram{
+ Count: uint64(n),
+ Sum: float64(n),
+ }
+ if n == 0 {
+ h.PositiveSpans = []histogram.Span{}
+ h.PositiveBuckets = []int64{}
+ return h
+ }
+ h.PositiveSpans = []histogram.Span{{Offset: 0, Length: uint32(n)}}
+ h.PositiveBuckets = make([]int64, n)
+ h.PositiveBuckets[0] = 1
+ return h
+ }
+
+ scenarios := map[string]struct {
+ appendFunc func(app storage.AppenderV2, ts int64, counterReset bool) (storage.SeriesRef, error)
+ sampleFunc func(ts int64) chunks.Sample
+ checkInUseBucket bool
+ }{
+ "float": {
+ appendFunc: func(app storage.AppenderV2, ts int64, _ bool) (storage.SeriesRef, error) {
+ return app.Append(0, labels.FromStrings("foo", "bar1"), 0, ts, float64(ts), nil, nil, storage.AOptions{})
+ },
+ sampleFunc: func(ts int64) chunks.Sample {
+ return sample{t: ts, f: float64(ts)}
+ },
+ },
+ "integer histogram": {
+ appendFunc: func(app storage.AppenderV2, ts int64, counterReset bool) (storage.SeriesRef, error) {
+ h := tsdbutil.GenerateTestHistogram(ts)
+ if counterReset {
+ h.CounterResetHint = histogram.CounterReset
+ }
+ return app.Append(0, labels.FromStrings("foo", "bar1"), 0, ts, 0, h, nil, storage.AOptions{})
+ },
+ sampleFunc: func(ts int64) chunks.Sample {
+ return sample{t: ts, h: tsdbutil.GenerateTestHistogram(ts)}
+ },
+ },
+ "float histogram": {
+ appendFunc: func(app storage.AppenderV2, ts int64, counterReset bool) (storage.SeriesRef, error) {
+ fh := tsdbutil.GenerateTestFloatHistogram(ts)
+ if counterReset {
+ fh.CounterResetHint = histogram.CounterReset
+ }
+ return app.Append(0, labels.FromStrings("foo", "bar1"), 0, ts, 0, nil, fh, storage.AOptions{})
+ },
+ sampleFunc: func(ts int64) chunks.Sample {
+ return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(ts)}
+ },
+ },
+ "integer histogram counter resets": {
+ // Adding counter reset to all histograms means each histogram will have its own chunk.
+ appendFunc: func(app storage.AppenderV2, ts int64, _ bool) (storage.SeriesRef, error) {
+ h := tsdbutil.GenerateTestHistogram(ts)
+ h.CounterResetHint = histogram.CounterReset // For this scenario, ignore the counterReset argument.
+ return app.Append(0, labels.FromStrings("foo", "bar1"), 0, ts, 0, h, nil, storage.AOptions{})
+ },
+ sampleFunc: func(ts int64) chunks.Sample {
+ return sample{t: ts, h: tsdbutil.GenerateTestHistogram(ts)}
+ },
+ },
+ "integer histogram with recode": {
+ // Histograms have increasing number of buckets so their chunks are recoded.
+ appendFunc: func(app storage.AppenderV2, ts int64, _ bool) (storage.SeriesRef, error) {
+ n := ts / time.Minute.Milliseconds()
+ return app.Append(0, labels.FromStrings("foo", "bar1"), 0, ts, 0, nBucketHistogram(n), nil, storage.AOptions{})
+ },
+ sampleFunc: func(ts int64) chunks.Sample {
+ n := ts / time.Minute.Milliseconds()
+ return sample{t: ts, h: nBucketHistogram(n)}
+ },
+ // Only check in-use buckets for this scenario.
+ // Recoding adds empty buckets.
+ checkInUseBucket: true,
+ },
+ }
+ for name, scenario := range scenarios {
+ t.Run(name, func(t *testing.T) {
+ testChunkQuerierOOOQueryAppendV2(t, scenario.appendFunc, scenario.sampleFunc, scenario.checkInUseBucket)
+ })
+ }
+}
+
+func testChunkQuerierOOOQueryAppendV2(t *testing.T,
+ appendFunc func(app storage.AppenderV2, ts int64, counterReset bool) (storage.SeriesRef, error),
+ sampleFunc func(ts int64) chunks.Sample,
+ checkInUseBuckets bool,
+) {
+ opts := DefaultOptions()
+ opts.OutOfOrderCapMax = 30
+ opts.OutOfOrderTimeWindow = 24 * time.Hour.Milliseconds()
+
+ series1 := labels.FromStrings("foo", "bar1")
+
+ type filterFunc func(t int64) bool
+ defaultFilterFunc := func(int64) bool { return true }
+
+ minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() }
+ addSample := func(db *DB, fromMins, toMins, queryMinT, queryMaxT int64, expSamples []chunks.Sample, filter filterFunc, counterReset bool) ([]chunks.Sample, int) {
+ app := db.AppenderV2(context.Background())
+ totalAppended := 0
+ for m := fromMins; m <= toMins; m += time.Minute.Milliseconds() {
+ if !filter(m / time.Minute.Milliseconds()) {
+ continue
+ }
+ _, err := appendFunc(app, m, counterReset)
+ if m >= queryMinT && m <= queryMaxT {
+ expSamples = append(expSamples, sampleFunc(m))
+ }
+ require.NoError(t, err)
+ totalAppended++
+ }
+ require.NoError(t, app.Commit())
+ require.Positive(t, totalAppended) // Sanity check that filter is not too zealous.
+ return expSamples, totalAppended
+ }
+
+ type sampleBatch struct {
+ minT int64
+ maxT int64
+ filter filterFunc
+ counterReset bool
+ isOOO bool
+ }
+
+ tests := []struct {
+ name string
+ oooCap int64
+ queryMinT int64
+ queryMaxT int64
+ batches []sampleBatch
+ }{
+ {
+ name: "query interval covering ooomint and inordermaxt returns all ingested samples",
+ oooCap: 30,
+ queryMinT: minutes(0),
+ queryMaxT: minutes(200),
+ batches: []sampleBatch{
+ {
+ minT: minutes(100),
+ maxT: minutes(200),
+ filter: defaultFilterFunc,
+ },
+ {
+ minT: minutes(0),
+ maxT: minutes(99),
+ filter: defaultFilterFunc,
+ isOOO: true,
+ },
+ },
+ },
+ {
+ name: "partial query interval returns only samples within interval",
+ oooCap: 30,
+ queryMinT: minutes(20),
+ queryMaxT: minutes(180),
+ batches: []sampleBatch{
+ {
+ minT: minutes(100),
+ maxT: minutes(200),
+ filter: defaultFilterFunc,
+ },
+ {
+ minT: minutes(0),
+ maxT: minutes(99),
+ filter: defaultFilterFunc,
+ isOOO: true,
+ },
+ },
+ },
+ {
+ name: "alternating OOO batches", // In order: 100-200 normal. out of order first path: 0, 2, 4, ... 98 (no counter reset), second pass: 1, 3, 5, ... 99 (with counter reset).
+ queryMinT: minutes(0),
+ queryMaxT: minutes(200),
+ batches: []sampleBatch{
+ {
+ minT: minutes(100),
+ maxT: minutes(200),
+ filter: defaultFilterFunc,
+ },
+ {
+ minT: minutes(0),
+ maxT: minutes(99),
+ filter: func(t int64) bool { return t%2 == 0 },
+ isOOO: true,
+ },
+ {
+ minT: minutes(0),
+ maxT: minutes(99),
+ filter: func(t int64) bool { return t%2 == 1 },
+ counterReset: true,
+ isOOO: true,
+ },
+ },
+ },
+ {
+ name: "query overlapping inorder and ooo samples returns all ingested samples at the end of the interval",
+ oooCap: 30,
+ queryMinT: minutes(0),
+ queryMaxT: minutes(200),
+ batches: []sampleBatch{
+ {
+ minT: minutes(100),
+ maxT: minutes(200),
+ filter: func(t int64) bool { return t%2 == 0 },
+ isOOO: false,
+ },
+ {
+ minT: minutes(170),
+ maxT: minutes(180),
+ filter: func(t int64) bool { return t%2 == 1 },
+ isOOO: true,
+ },
+ },
+ },
+ {
+ name: "query overlapping inorder and ooo in-memory samples returns all ingested samples at the beginning of the interval",
+ oooCap: 30,
+ queryMinT: minutes(0),
+ queryMaxT: minutes(200),
+ batches: []sampleBatch{
+ {
+ minT: minutes(100),
+ maxT: minutes(200),
+ filter: func(t int64) bool { return t%2 == 0 },
+ isOOO: false,
+ },
+ {
+ minT: minutes(100),
+ maxT: minutes(110),
+ filter: func(t int64) bool { return t%2 == 1 },
+ isOOO: true,
+ },
+ },
+ },
+ {
+ name: "query inorder contain ooo mmapped samples returns all ingested samples at the beginning of the interval",
+ oooCap: 5,
+ queryMinT: minutes(0),
+ queryMaxT: minutes(200),
+ batches: []sampleBatch{
+ {
+ minT: minutes(100),
+ maxT: minutes(200),
+ filter: func(t int64) bool { return t%2 == 0 },
+ isOOO: false,
+ },
+ {
+ minT: minutes(101),
+ maxT: minutes(101 + (5-1)*2), // Append samples to fit in a single mmapped OOO chunk and fit inside the first in-order mmapped chunk.
+ filter: func(t int64) bool { return t%2 == 1 },
+ isOOO: true,
+ },
+ {
+ minT: minutes(191),
+ maxT: minutes(193), // Append some more OOO samples to trigger mapping the OOO chunk, but use time 151 to not overlap with in-order head chunk.
+ filter: func(t int64) bool { return t%2 == 1 },
+ isOOO: true,
+ },
+ },
+ },
+ {
+ name: "query overlapping inorder and ooo mmapped samples returns all ingested samples at the beginning of the interval",
+ oooCap: 30,
+ queryMinT: minutes(0),
+ queryMaxT: minutes(200),
+ batches: []sampleBatch{
+ {
+ minT: minutes(100),
+ maxT: minutes(200),
+ filter: func(t int64) bool { return t%2 == 0 },
+ isOOO: false,
+ },
+ {
+ minT: minutes(101),
+ maxT: minutes(101 + (30-1)*2), // Append samples to fit in a single mmapped OOO chunk and overlap the first in-order mmapped chunk.
+ filter: func(t int64) bool { return t%2 == 1 },
+ isOOO: true,
+ },
+ {
+ minT: minutes(191),
+ maxT: minutes(193), // Append some more OOO samples to trigger mapping the OOO chunk, but use time 151 to not overlap with in-order head chunk.
+ filter: func(t int64) bool { return t%2 == 1 },
+ isOOO: true,
+ },
+ },
+ },
+ }
+ for _, tc := range tests {
+ t.Run(fmt.Sprintf("name=%s", tc.name), func(t *testing.T) {
+ opts.OutOfOrderCapMax = tc.oooCap
+ db := newTestDB(t, withOpts(opts))
+ db.DisableCompactions()
+
+ var expSamples []chunks.Sample
+ var oooSamples, appendedCount int
+
+ for _, batch := range tc.batches {
+ expSamples, appendedCount = addSample(db, batch.minT, batch.maxT, tc.queryMinT, tc.queryMaxT, expSamples, batch.filter, batch.counterReset)
+ if batch.isOOO {
+ oooSamples += appendedCount
+ }
+ }
+
+ sort.Slice(expSamples, func(i, j int) bool {
+ return expSamples[i].T() < expSamples[j].T()
+ })
+
+ querier, err := db.ChunkQuerier(tc.queryMinT, tc.queryMaxT)
+ require.NoError(t, err)
+ defer querier.Close()
+
+ chks := queryChunks(t, querier, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar1"))
+ require.NotNil(t, chks[series1.String()])
+ require.Len(t, chks, 1)
+ requireEqualOOOSamples(t, oooSamples, db)
+ var gotSamples []chunks.Sample
+ for _, chunk := range chks[series1.String()] {
+ it := chunk.Chunk.Iterator(nil)
+ smpls, err := storage.ExpandSamples(it, newSample)
+ require.NoError(t, err)
+
+ // Verify that no sample is outside the chunk's time range.
+ for i, s := range smpls {
+ switch i {
+ case 0:
+ require.Equal(t, chunk.MinTime, s.T(), "first sample %v not at chunk min time %v", s, chunk.MinTime)
+ case len(smpls) - 1:
+ require.Equal(t, chunk.MaxTime, s.T(), "last sample %v not at chunk max time %v", s, chunk.MaxTime)
+ default:
+ require.GreaterOrEqual(t, s.T(), chunk.MinTime, "sample %v before chunk min time %v", s, chunk.MinTime)
+ require.LessOrEqual(t, s.T(), chunk.MaxTime, "sample %v after chunk max time %v", s, chunk.MaxTime)
+ }
+ }
+
+ gotSamples = append(gotSamples, smpls...)
+ require.NoError(t, it.Err())
+ }
+ if checkInUseBuckets {
+ requireEqualSamples(t, series1.String(), expSamples, gotSamples, requireEqualSamplesIgnoreCounterResets, requireEqualSamplesInUseBucketCompare)
+ } else {
+ requireEqualSamples(t, series1.String(), expSamples, gotSamples, requireEqualSamplesIgnoreCounterResets)
+ }
+ })
+ }
+}
+
+// TestOOONativeHistogramsWithCounterResets verifies the counter reset headers for in-order and out-of-order samples
+// upon ingestion. Note that when the counter reset(s) occur in OOO samples, the header is set to UnknownCounterReset
+// rather than CounterReset. This is because with OOO native histogram samples, it cannot be definitely
+// determined if a counter reset occurred because the samples are not consecutive, and another sample
+// could potentially come in that would change the status of the header. In this case, the UnknownCounterReset
+// headers would be re-checked at query time and updated as needed. However, this test is checking the counter
+// reset headers at the time of storage.
+func TestOOONativeHistogramsWithCounterResets_AppendV2(t *testing.T) {
+ for name, scenario := range sampleTypeScenarios {
+ t.Run(name, func(t *testing.T) {
+ if name == intHistogram || name == floatHistogram {
+ testOOONativeHistogramsWithCounterResetsAppendV2(t, scenario)
+ }
+ })
+ }
+}
+
+func testOOONativeHistogramsWithCounterResetsAppendV2(t *testing.T, scenario sampleTypeScenario) {
+ opts := DefaultOptions()
+ opts.OutOfOrderCapMax = 30
+ opts.OutOfOrderTimeWindow = 24 * time.Hour.Milliseconds()
+
+ type resetFunc func(v int64) bool
+ defaultResetFunc := func(int64) bool { return false }
+
+ lbls := labels.FromStrings("foo", "bar1")
+ minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() }
+
+ type sampleBatch struct {
+ from int64
+ until int64
+ shouldReset resetFunc
+ expCounterResetHints []histogram.CounterResetHint
+ }
+
+ tests := []struct {
+ name string
+ queryMin int64
+ queryMax int64
+ batches []sampleBatch
+ expectedSamples []chunks.Sample
+ }{
+ {
+ name: "Counter reset within in-order samples",
+ queryMin: minutes(40),
+ queryMax: minutes(55),
+ batches: []sampleBatch{
+ // In-order samples
+ {
+ from: 40,
+ until: 50,
+ shouldReset: func(v int64) bool {
+ return v == 45
+ },
+ expCounterResetHints: []histogram.CounterResetHint{histogram.UnknownCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.UnknownCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.NotCounterReset},
+ },
+ },
+ },
+ {
+ name: "Counter reset right at beginning of OOO samples",
+ queryMin: minutes(40),
+ queryMax: minutes(55),
+ batches: []sampleBatch{
+ // In-order samples
+ {
+ from: 40,
+ until: 45,
+ shouldReset: defaultResetFunc,
+ expCounterResetHints: []histogram.CounterResetHint{histogram.UnknownCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.NotCounterReset},
+ },
+ {
+ from: 50,
+ until: 55,
+ shouldReset: defaultResetFunc,
+ expCounterResetHints: []histogram.CounterResetHint{histogram.UnknownCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.NotCounterReset},
+ },
+ // OOO samples
+ {
+ from: 45,
+ until: 50,
+ shouldReset: func(v int64) bool {
+ return v == 45
+ },
+ expCounterResetHints: []histogram.CounterResetHint{histogram.UnknownCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.NotCounterReset},
+ },
+ },
+ },
+ {
+ name: "Counter resets in both in-order and OOO samples",
+ queryMin: minutes(40),
+ queryMax: minutes(55),
+ batches: []sampleBatch{
+ // In-order samples
+ {
+ from: 40,
+ until: 45,
+ shouldReset: func(v int64) bool {
+ return v == 44
+ },
+ expCounterResetHints: []histogram.CounterResetHint{histogram.UnknownCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.UnknownCounterReset},
+ },
+ {
+ from: 50,
+ until: 55,
+ shouldReset: defaultResetFunc,
+ expCounterResetHints: []histogram.CounterResetHint{histogram.UnknownCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.NotCounterReset},
+ },
+ // OOO samples
+ {
+ from: 45,
+ until: 50,
+ shouldReset: func(v int64) bool {
+ return v == 49
+ },
+ expCounterResetHints: []histogram.CounterResetHint{histogram.UnknownCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.NotCounterReset, histogram.UnknownCounterReset},
+ },
+ },
+ },
+ }
+ for _, tc := range tests {
+ t.Run(fmt.Sprintf("name=%s", tc.name), func(t *testing.T) {
+ db := newTestDB(t, withOpts(opts))
+ db.DisableCompactions()
+
+ app := db.AppenderV2(context.Background())
+
+ expSamples := make(map[string][]chunks.Sample)
+
+ for _, batch := range tc.batches {
+ j := batch.from
+ smplIdx := 0
+ for i := batch.from; i < batch.until; i++ {
+ resetCount := batch.shouldReset(i)
+ if resetCount {
+ j = 0
+ }
+ _, s, err := scenario.appendFunc(storage.AppenderV2AsLimitedV1(app), lbls, minutes(i), j)
+ require.NoError(t, err)
+ if s.Type() == chunkenc.ValHistogram {
+ s.H().CounterResetHint = batch.expCounterResetHints[smplIdx]
+ } else if s.Type() == chunkenc.ValFloatHistogram {
+ s.FH().CounterResetHint = batch.expCounterResetHints[smplIdx]
+ }
+ expSamples[lbls.String()] = append(expSamples[lbls.String()], s)
+ j++
+ smplIdx++
+ }
+ }
+
+ require.NoError(t, app.Commit())
+
+ for k, v := range expSamples {
+ sort.Slice(v, func(i, j int) bool {
+ return v[i].T() < v[j].T()
+ })
+ expSamples[k] = v
+ }
+
+ querier, err := db.Querier(tc.queryMin, tc.queryMax)
+ require.NoError(t, err)
+ defer querier.Close()
+
+ seriesSet := query(t, querier, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar1"))
+ require.NotNil(t, seriesSet[lbls.String()])
+ require.Len(t, seriesSet, 1)
+ requireEqualSeries(t, expSamples, seriesSet, false)
+ })
+ }
+}
+
+func TestOOOInterleavedImplicitCounterResets_AppendV2(t *testing.T) {
+ for name, scenario := range sampleTypeScenarios {
+ t.Run(name, func(t *testing.T) {
+ testOOOInterleavedImplicitCounterResetsV2(t, name, scenario)
+ })
+ }
+}
+
+func testOOOInterleavedImplicitCounterResetsV2(t *testing.T, name string, scenario sampleTypeScenario) {
+ var appendFunc func(app storage.AppenderV2, ts, v int64) error
+
+ if scenario.sampleType != sampleMetricTypeHistogram {
+ return
+ }
+
+ switch name {
+ case intHistogram:
+ appendFunc = func(app storage.AppenderV2, ts, v int64) error {
+ h := &histogram.Histogram{
+ Count: uint64(v),
+ Sum: float64(v),
+ PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}},
+ PositiveBuckets: []int64{v},
+ }
+ _, err := app.Append(0, labels.FromStrings("foo", "bar1"), 0, ts, 0, h, nil, storage.AOptions{})
+ return err
+ }
+ case floatHistogram:
+ appendFunc = func(app storage.AppenderV2, ts, v int64) error {
+ fh := &histogram.FloatHistogram{
+ Count: float64(v),
+ Sum: float64(v),
+ PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}},
+ PositiveBuckets: []float64{float64(v)},
+ }
+ _, err := app.Append(0, labels.FromStrings("foo", "bar1"), 0, ts, 0, nil, fh, storage.AOptions{})
+ return err
+ }
+ case customBucketsIntHistogram:
+ appendFunc = func(app storage.AppenderV2, ts, v int64) error {
+ h := &histogram.Histogram{
+ Schema: -53,
+ Count: uint64(v),
+ Sum: float64(v),
+ PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}},
+ PositiveBuckets: []int64{v},
+ CustomValues: []float64{float64(1), float64(2), float64(3)},
+ }
+ _, err := app.Append(0, labels.FromStrings("foo", "bar1"), 0, ts, 0, h, nil, storage.AOptions{})
+ return err
+ }
+ case customBucketsFloatHistogram:
+ appendFunc = func(app storage.AppenderV2, ts, v int64) error {
+ fh := &histogram.FloatHistogram{
+ Schema: -53,
+ Count: float64(v),
+ Sum: float64(v),
+ PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}},
+ PositiveBuckets: []float64{float64(v)},
+ CustomValues: []float64{float64(1), float64(2), float64(3)},
+ }
+ _, err := app.Append(0, labels.FromStrings("foo", "bar1"), 0, ts, 0, nil, fh, storage.AOptions{})
+ return err
+ }
+ case gaugeIntHistogram, gaugeFloatHistogram:
+ return
+ }
+
+ // Not a sample, we're encoding an integer counter that we convert to a
+ // histogram with a single bucket.
+ type tsValue struct {
+ ts int64
+ v int64
+ }
+
+ type expectedTsValue struct {
+ ts int64
+ v int64
+ hint histogram.CounterResetHint
+ }
+
+ type expectedChunk struct {
+ hint histogram.CounterResetHint
+ size int
+ }
+
+ cases := map[string]struct {
+ samples []tsValue
+ oooCap int64
+ // The expected samples with counter reset.
+ expectedSamples []expectedTsValue
+ // The expected counter reset hint for each chunk.
+ expectedChunks []expectedChunk
+ }{
+ "counter reset in-order cleared by in-memory OOO chunk": {
+ samples: []tsValue{
+ {1, 40}, // New in In-order. I1.
+ {4, 30}, // In-order counter reset. I2.
+ {2, 40}, // New in OOO. O1.
+ {3, 10}, // OOO counter reset. O2.
+ },
+ oooCap: 30,
+ // Expect all to be set to UnknownCounterReset because we switch between
+ // in-order and out-of-order samples.
+ expectedSamples: []expectedTsValue{
+ {1, 40, histogram.UnknownCounterReset}, // I1.
+ {2, 40, histogram.UnknownCounterReset}, // O1.
+ {3, 10, histogram.UnknownCounterReset}, // O2.
+ {4, 30, histogram.UnknownCounterReset}, // I2. Counter reset cleared by iterator change.
+ },
+ expectedChunks: []expectedChunk{
+ {histogram.UnknownCounterReset, 1}, // I1.
+ {histogram.UnknownCounterReset, 1}, // O1.
+ {histogram.UnknownCounterReset, 1}, // O2.
+ {histogram.UnknownCounterReset, 1}, // I2.
+ },
+ },
+ "counter reset in OOO mmapped chunk cleared by in-memory ooo chunk": {
+ samples: []tsValue{
+ {8, 30}, // In-order, new chunk. I1.
+ {1, 10}, // OOO, new chunk (will be mmapped). MO1.
+ {2, 20}, // OOO, no reset (will be mmapped). MO1.
+ {3, 30}, // OOO, no reset (will be mmapped). MO1.
+ {5, 20}, // OOO, reset (will be mmapped). MO2.
+ {6, 10}, // OOO, reset (will be mmapped). MO3.
+ {7, 20}, // OOO, no reset (will be mmapped). MO3.
+ {4, 10}, // OOO, inserted into memory, triggers mmap. O1.
+ },
+ oooCap: 6,
+ expectedSamples: []expectedTsValue{
+ {1, 10, histogram.UnknownCounterReset}, // MO1.
+ {2, 20, histogram.NotCounterReset}, // MO1.
+ {3, 30, histogram.NotCounterReset}, // MO1.
+ {4, 10, histogram.UnknownCounterReset}, // O1. Counter reset cleared by iterator change.
+ {5, 20, histogram.UnknownCounterReset}, // MO2.
+ {6, 10, histogram.UnknownCounterReset}, // MO3.
+ {7, 20, histogram.NotCounterReset}, // MO3.
+ {8, 30, histogram.UnknownCounterReset}, // I1.
+ },
+ expectedChunks: []expectedChunk{
+ {histogram.UnknownCounterReset, 3}, // MO1.
+ {histogram.UnknownCounterReset, 1}, // O1.
+ {histogram.UnknownCounterReset, 1}, // MO2.
+ {histogram.UnknownCounterReset, 2}, // MO3.
+ {histogram.UnknownCounterReset, 1}, // I1.
+ },
+ },
+ "counter reset in OOO mmapped chunk cleared by another OOO mmapped chunk": {
+ samples: []tsValue{
+ {8, 100}, // In-order, new chunk. I1.
+ {1, 50}, // OOO, new chunk (will be mmapped). MO1.
+ {5, 40}, // OOO, reset (will be mmapped). MO2.
+ {6, 50}, // OOO, no reset (will be mmapped). MO2.
+ {2, 10}, // OOO, new chunk no reset (will be mmapped). MO3.
+ {3, 20}, // OOO, no reset (will be mmapped). MO3.
+ {4, 30}, // OOO, no reset (will be mmapped). MO3.
+ {7, 60}, // OOO, no reset in memory. O1.
+ },
+ oooCap: 3,
+ expectedSamples: []expectedTsValue{
+ {1, 50, histogram.UnknownCounterReset}, // MO1.
+ {2, 10, histogram.UnknownCounterReset}, // MO3.
+ {3, 20, histogram.NotCounterReset}, // MO3.
+ {4, 30, histogram.NotCounterReset}, // MO3.
+ {5, 40, histogram.UnknownCounterReset}, // MO2.
+ {6, 50, histogram.NotCounterReset}, // MO2.
+ {7, 60, histogram.UnknownCounterReset}, // O1.
+ {8, 100, histogram.UnknownCounterReset}, // I1.
+ },
+ expectedChunks: []expectedChunk{
+ {histogram.UnknownCounterReset, 1}, // MO1.
+ {histogram.UnknownCounterReset, 3}, // MO3.
+ {histogram.UnknownCounterReset, 2}, // MO2.
+ {histogram.UnknownCounterReset, 1}, // O1.
+ {histogram.UnknownCounterReset, 1}, // I1.
+ },
+ },
+ }
+
+ for tcName, tc := range cases {
+ t.Run(tcName, func(t *testing.T) {
+ opts := DefaultOptions()
+ opts.OutOfOrderCapMax = tc.oooCap
+ opts.OutOfOrderTimeWindow = 24 * time.Hour.Milliseconds()
+
+ db := newTestDB(t, withOpts(opts))
+ db.DisableCompactions()
+
+ app := db.AppenderV2(context.Background())
+ for _, s := range tc.samples {
+ require.NoError(t, appendFunc(app, s.ts, s.v))
+ }
+ require.NoError(t, app.Commit())
+
+ t.Run("querier", func(t *testing.T) {
+ querier, err := db.Querier(0, 10)
+ require.NoError(t, err)
+ defer querier.Close()
+
+ seriesSet := query(t, querier, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar1"))
+ require.Len(t, seriesSet, 1)
+ samples, ok := seriesSet["{foo=\"bar1\"}"]
+ require.True(t, ok)
+ require.Len(t, samples, len(tc.samples))
+ require.Len(t, samples, len(tc.expectedSamples))
+
+ // We expect all unknown counter resets because we clear the counter reset
+ // hint when we switch between in-order and out-of-order samples.
+ for i, s := range samples {
+ switch name {
+ case intHistogram:
+ require.Equal(t, tc.expectedSamples[i].hint, s.H().CounterResetHint, "sample %d", i)
+ require.Equal(t, tc.expectedSamples[i].v, int64(s.H().Count), "sample %d", i)
+ case floatHistogram:
+ require.Equal(t, tc.expectedSamples[i].hint, s.FH().CounterResetHint, "sample %d", i)
+ require.Equal(t, tc.expectedSamples[i].v, int64(s.FH().Count), "sample %d", i)
+ case customBucketsIntHistogram:
+ require.Equal(t, tc.expectedSamples[i].hint, s.H().CounterResetHint, "sample %d", i)
+ require.Equal(t, tc.expectedSamples[i].v, int64(s.H().Count), "sample %d", i)
+ case customBucketsFloatHistogram:
+ require.Equal(t, tc.expectedSamples[i].hint, s.FH().CounterResetHint, "sample %d", i)
+ require.Equal(t, tc.expectedSamples[i].v, int64(s.FH().Count), "sample %d", i)
+ default:
+ t.Fatalf("unexpected sample type %s", name)
+ }
+ }
+ })
+
+ t.Run("chunk-querier", func(t *testing.T) {
+ querier, err := db.ChunkQuerier(0, 10)
+ require.NoError(t, err)
+ defer querier.Close()
+
+ chunkSet := queryAndExpandChunks(t, querier, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar1"))
+ require.Len(t, chunkSet, 1)
+ chunks, ok := chunkSet["{foo=\"bar1\"}"]
+ require.True(t, ok)
+ require.Len(t, chunks, len(tc.expectedChunks))
+ idx := 0
+ for i, samples := range chunks {
+ require.Len(t, samples, tc.expectedChunks[i].size)
+ for j, s := range samples {
+ expectHint := tc.expectedChunks[i].hint
+ if j > 0 {
+ expectHint = histogram.NotCounterReset
+ }
+ switch name {
+ case intHistogram:
+ require.Equal(t, expectHint, s.H().CounterResetHint, "sample %d", idx)
+ require.Equal(t, tc.expectedSamples[idx].v, int64(s.H().Count), "sample %d", idx)
+ case floatHistogram:
+ require.Equal(t, expectHint, s.FH().CounterResetHint, "sample %d", idx)
+ require.Equal(t, tc.expectedSamples[idx].v, int64(s.FH().Count), "sample %d", idx)
+ case customBucketsIntHistogram:
+ require.Equal(t, expectHint, s.H().CounterResetHint, "sample %d", idx)
+ require.Equal(t, tc.expectedSamples[idx].v, int64(s.H().Count), "sample %d", idx)
+ case customBucketsFloatHistogram:
+ require.Equal(t, expectHint, s.FH().CounterResetHint, "sample %d", idx)
+ require.Equal(t, tc.expectedSamples[idx].v, int64(s.FH().Count), "sample %d", idx)
+ default:
+ t.Fatalf("unexpected sample type %s", name)
+ }
+ idx++
+ }
+ }
+ })
+ })
+ }
+}
+
+func TestOOOAppendAndQuery_AppendV2(t *testing.T) {
+ for name, scenario := range sampleTypeScenarios {
+ t.Run(name, func(t *testing.T) {
+ testOOOAppendAndQueryAppendV2(t, scenario)
+ })
+ }
+}
+
+func testOOOAppendAndQueryAppendV2(t *testing.T, scenario sampleTypeScenario) {
+ opts := DefaultOptions()
+ opts.OutOfOrderCapMax = 30
+ opts.OutOfOrderTimeWindow = 4 * time.Hour.Milliseconds()
+
+ db := newTestDB(t, withOpts(opts))
+ db.DisableCompactions()
+
+ s1 := labels.FromStrings("foo", "bar1")
+ s2 := labels.FromStrings("foo", "bar2")
+
+ minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() }
+ appendedSamples := make(map[string][]chunks.Sample)
+ totalSamples := 0
+ addSample := func(lbls labels.Labels, fromMins, toMins int64, faceError bool) {
+ app := db.AppenderV2(context.Background())
+ key := lbls.String()
+ from, to := minutes(fromMins), minutes(toMins)
+ for m := from; m <= to; m += time.Minute.Milliseconds() {
+ val := rand.Intn(1000)
+ _, s, err := scenario.appendFunc(storage.AppenderV2AsLimitedV1(app), lbls, m, int64(val))
+ if faceError {
+ require.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ appendedSamples[key] = append(appendedSamples[key], s)
+ totalSamples++
+ }
+ }
+ if faceError {
+ require.NoError(t, app.Rollback())
+ } else {
+ require.NoError(t, app.Commit())
+ }
+ }
+
+ testQuery := func(from, to int64) {
+ querier, err := db.Querier(from, to)
+ require.NoError(t, err)
+
+ seriesSet := query(t, querier, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar."))
+
+ for k, v := range appendedSamples {
+ sort.Slice(v, func(i, j int) bool {
+ return v[i].T() < v[j].T()
+ })
+ appendedSamples[k] = v
+ }
+
+ expSamples := make(map[string][]chunks.Sample)
+ for k, samples := range appendedSamples {
+ for _, s := range samples {
+ if s.T() < from {
+ continue
+ }
+ if s.T() > to {
+ continue
+ }
+ expSamples[k] = append(expSamples[k], s)
+ }
+ }
+ requireEqualSeries(t, expSamples, seriesSet, true)
+ requireEqualOOOSamples(t, totalSamples-2, db)
+ }
+
+ verifyOOOMinMaxTimes := func(expMin, expMax int64) {
+ require.Equal(t, minutes(expMin), db.head.MinOOOTime())
+ require.Equal(t, minutes(expMax), db.head.MaxOOOTime())
+ }
+
+ // In-order samples.
+ addSample(s1, 300, 300, false)
+ addSample(s2, 290, 290, false)
+ require.Equal(t, float64(2), prom_testutil.ToFloat64(db.head.metrics.chunksCreated))
+ testQuery(math.MinInt64, math.MaxInt64)
+
+ // Some ooo samples.
+ addSample(s1, 250, 260, false)
+ addSample(s2, 255, 265, false)
+ verifyOOOMinMaxTimes(250, 265)
+ testQuery(math.MinInt64, math.MaxInt64)
+ testQuery(minutes(250), minutes(265)) // Test querying ooo data time range.
+ testQuery(minutes(290), minutes(300)) // Test querying in-order data time range.
+ testQuery(minutes(250), minutes(300)) // Test querying the entire range.
+
+ // Out of time window.
+ addSample(s1, 59, 59, true)
+ addSample(s2, 49, 49, true)
+ verifyOOOMinMaxTimes(250, 265)
+ testQuery(math.MinInt64, math.MaxInt64)
+
+ // At the edge of time window, also it would be "out of bound" without the ooo support.
+ addSample(s1, 60, 65, false)
+ verifyOOOMinMaxTimes(60, 265)
+ testQuery(math.MinInt64, math.MaxInt64)
+
+ // This sample is not within the time window w.r.t. the head's maxt, but it is within the window
+ // w.r.t. the series' maxt. But we consider only head's maxt.
+ addSample(s2, 59, 59, true)
+ verifyOOOMinMaxTimes(60, 265)
+ testQuery(math.MinInt64, math.MaxInt64)
+
+ // Now the sample is within time window w.r.t. the head's maxt.
+ addSample(s2, 60, 65, false)
+ verifyOOOMinMaxTimes(60, 265)
+ testQuery(math.MinInt64, math.MaxInt64)
+
+ // Out of time window again.
+ addSample(s1, 59, 59, true)
+ addSample(s2, 49, 49, true)
+ testQuery(math.MinInt64, math.MaxInt64)
+
+ // Generating some m-map chunks. The m-map chunks here are in such a way
+ // that when sorted w.r.t. mint, the last chunk's maxt is not the overall maxt
+ // of the merged chunk. This tests a bug fixed in https://github.com/grafana/mimir-prometheus/pull/238/.
+ require.Equal(t, float64(4), prom_testutil.ToFloat64(db.head.metrics.chunksCreated))
+ addSample(s1, 180, 249, false)
+ require.Equal(t, float64(6), prom_testutil.ToFloat64(db.head.metrics.chunksCreated))
+ verifyOOOMinMaxTimes(60, 265)
+ testQuery(math.MinInt64, math.MaxInt64)
+}
+
+func TestOOODisabled_AppendV2(t *testing.T) {
+ for name, scenario := range sampleTypeScenarios {
+ t.Run(name, func(t *testing.T) {
+ testOOODisabledAppendV2(t, scenario)
+ })
+ }
+}
+
+func testOOODisabledAppendV2(t *testing.T, scenario sampleTypeScenario) {
+ opts := DefaultOptions()
+ opts.OutOfOrderTimeWindow = 0
+ db := newTestDB(t, withOpts(opts))
+ db.DisableCompactions()
+
+ s1 := labels.FromStrings("foo", "bar1")
+ minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() }
+ expSamples := make(map[string][]chunks.Sample)
+ totalSamples := 0
+ failedSamples := 0
+
+ addSample := func(db *DB, lbls labels.Labels, fromMins, toMins int64, faceError bool) {
+ app := db.AppenderV2(context.Background())
+ key := lbls.String()
+ from, to := minutes(fromMins), minutes(toMins)
+ for m := from; m <= to; m += time.Minute.Milliseconds() {
+ _, _, err := scenario.appendFunc(storage.AppenderV2AsLimitedV1(app), lbls, m, m)
+ if faceError {
+ require.Error(t, err)
+ failedSamples++
+ } else {
+ require.NoError(t, err)
+ expSamples[key] = append(expSamples[key], scenario.sampleFunc(m, m))
+ totalSamples++
+ }
+ }
+ if faceError {
+ require.NoError(t, app.Rollback())
+ } else {
+ require.NoError(t, app.Commit())
+ }
+ }
+
+ addSample(db, s1, 300, 300, false) // In-order samples.
+ addSample(db, s1, 250, 260, true) // Some ooo samples.
+ addSample(db, s1, 59, 59, true) // Out of time window.
+ addSample(db, s1, 60, 65, true) // At the edge of time window, also it would be "out of bound" without the ooo support.
+ addSample(db, s1, 59, 59, true) // Out of time window again.
+ addSample(db, s1, 301, 310, false) // More in-order samples.
+
+ querier, err := db.Querier(math.MinInt64, math.MaxInt64)
+ require.NoError(t, err)
+
+ seriesSet := query(t, querier, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar."))
+ requireEqualSeries(t, expSamples, seriesSet, true)
+ requireEqualOOOSamples(t, 0, db)
+ require.Equal(t, float64(failedSamples),
+ prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(scenario.sampleType))+prom_testutil.ToFloat64(db.head.metrics.outOfBoundSamples.WithLabelValues(scenario.sampleType)),
+ "number of ooo/oob samples mismatch")
+
+ // Verifying that no OOO artifacts were generated.
+ _, err = os.ReadDir(path.Join(db.Dir(), wlog.WblDirName))
+ require.True(t, os.IsNotExist(err))
+
+ ms, created, err := db.head.getOrCreate(s1.Hash(), s1, false)
+ require.NoError(t, err)
+ require.False(t, created)
+ require.NotNil(t, ms)
+ require.Nil(t, ms.ooo)
+}
+
+func TestWBLAndMmapReplay_AppendV2(t *testing.T) {
+ for name, scenario := range sampleTypeScenarios {
+ t.Run(name, func(t *testing.T) {
+ testWBLAndMmapReplayAppendV2(t, scenario)
+ })
+ }
+}
+
+func testWBLAndMmapReplayAppendV2(t *testing.T, scenario sampleTypeScenario) {
+ opts := DefaultOptions()
+ opts.OutOfOrderCapMax = 30
+ opts.OutOfOrderTimeWindow = 4 * time.Hour.Milliseconds()
+
+ db := newTestDB(t, withOpts(opts))
+ db.DisableCompactions()
+
+ s1 := labels.FromStrings("foo", "bar1")
+
+ minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() }
+ expSamples := make(map[string][]chunks.Sample)
+ totalSamples := 0
+ addSample := func(lbls labels.Labels, fromMins, toMins int64) {
+ app := db.AppenderV2(context.Background())
+ key := lbls.String()
+ from, to := minutes(fromMins), minutes(toMins)
+ for m := from; m <= to; m += time.Minute.Milliseconds() {
+ val := rand.Intn(1000)
+ _, s, err := scenario.appendFunc(storage.AppenderV2AsLimitedV1(app), lbls, m, int64(val))
+ require.NoError(t, err)
+ expSamples[key] = append(expSamples[key], s)
+ totalSamples++
+ }
+ require.NoError(t, app.Commit())
+ }
+
+ testQuery := func(exp map[string][]chunks.Sample) {
+ querier, err := db.Querier(math.MinInt64, math.MaxInt64)
+ require.NoError(t, err)
+
+ seriesSet := query(t, querier, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar."))
+
+ for k, v := range exp {
+ sort.Slice(v, func(i, j int) bool {
+ return v[i].T() < v[j].T()
+ })
+ exp[k] = v
+ }
+ requireEqualSeries(t, exp, seriesSet, true)
+ }
+
+ // In-order samples.
+ addSample(s1, 300, 300)
+ require.Equal(t, float64(1), prom_testutil.ToFloat64(db.head.metrics.chunksCreated))
+
+ // Some ooo samples.
+ addSample(s1, 250, 260)
+ addSample(s1, 195, 249) // This creates some m-map chunks.
+ require.Equal(t, float64(4), prom_testutil.ToFloat64(db.head.metrics.chunksCreated))
+ testQuery(expSamples)
+ oooMint, oooMaxt := minutes(195), minutes(260)
+
+ // Collect the samples only present in the ooo m-map chunks.
+ ms, created, err := db.head.getOrCreate(s1.Hash(), s1, false)
+ require.False(t, created)
+ require.NoError(t, err)
+ var s1MmapSamples []chunks.Sample
+ for _, mc := range ms.ooo.oooMmappedChunks {
+ chk, err := db.head.chunkDiskMapper.Chunk(mc.ref)
+ require.NoError(t, err)
+ it := chk.Iterator(nil)
+ smpls, err := storage.ExpandSamples(it, newSample)
+ require.NoError(t, err)
+ s1MmapSamples = append(s1MmapSamples, smpls...)
+ }
+ require.NotEmpty(t, s1MmapSamples)
+
+ require.NoError(t, db.Close())
+
+ // Making a copy of original state of WBL and Mmap files to use it later.
+ mmapDir := mmappedChunksDir(db.head.opts.ChunkDirRoot)
+ wblDir := db.head.wbl.Dir()
+ originalWblDir := filepath.Join(t.TempDir(), "original_wbl")
+ originalMmapDir := filepath.Join(t.TempDir(), "original_mmap")
+ require.NoError(t, fileutil.CopyDirs(wblDir, originalWblDir))
+ require.NoError(t, fileutil.CopyDirs(mmapDir, originalMmapDir))
+ resetWBLToOriginal := func() {
+ require.NoError(t, os.RemoveAll(wblDir))
+ require.NoError(t, fileutil.CopyDirs(originalWblDir, wblDir))
+ }
+ resetMmapToOriginal := func() {
+ require.NoError(t, os.RemoveAll(mmapDir))
+ require.NoError(t, fileutil.CopyDirs(originalMmapDir, mmapDir))
+ }
+
+ t.Run("Restart DB with both WBL and M-map files for ooo data", func(t *testing.T) {
+ db = newTestDB(t, withDir(db.Dir()), withOpts(opts))
+ require.Equal(t, oooMint, db.head.MinOOOTime())
+ require.Equal(t, oooMaxt, db.head.MaxOOOTime())
+ testQuery(expSamples)
+ })
+
+ t.Run("Restart DB with only WBL for ooo data", func(t *testing.T) {
+ require.NoError(t, os.RemoveAll(mmapDir))
+
+ db = newTestDB(t, withDir(db.Dir()), withOpts(opts))
+ require.Equal(t, oooMint, db.head.MinOOOTime())
+ require.Equal(t, oooMaxt, db.head.MaxOOOTime())
+ testQuery(expSamples)
+ })
+
+ t.Run("Restart DB with only M-map files for ooo data", func(t *testing.T) {
+ require.NoError(t, os.RemoveAll(wblDir))
+ resetMmapToOriginal()
+
+ db = newTestDB(t, withDir(db.Dir()), withOpts(opts))
+ require.Equal(t, oooMint, db.head.MinOOOTime())
+ require.Equal(t, oooMaxt, db.head.MaxOOOTime())
+ inOrderSample := expSamples[s1.String()][len(expSamples[s1.String()])-1]
+ testQuery(map[string][]chunks.Sample{
+ s1.String(): append(s1MmapSamples, inOrderSample),
+ })
+ })
+
+ t.Run("Restart DB with WBL+Mmap while increasing the OOOCapMax", func(t *testing.T) {
+ resetWBLToOriginal()
+ resetMmapToOriginal()
+
+ opts.OutOfOrderCapMax = 60
+ db = newTestDB(t, withDir(db.Dir()), withOpts(opts))
+ require.NoError(t, err)
+ require.Equal(t, oooMint, db.head.MinOOOTime())
+ require.Equal(t, oooMaxt, db.head.MaxOOOTime())
+ testQuery(expSamples)
+ })
+
+ t.Run("Restart DB with WBL+Mmap while decreasing the OOOCapMax", func(t *testing.T) {
+ resetMmapToOriginal() // We need to reset because new duplicate chunks can be written above.
+
+ opts.OutOfOrderCapMax = 10
+ db = newTestDB(t, withDir(db.Dir()), withOpts(opts))
+ require.NoError(t, err)
+ require.Equal(t, oooMint, db.head.MinOOOTime())
+ require.Equal(t, oooMaxt, db.head.MaxOOOTime())
+ testQuery(expSamples)
+ })
+
+ t.Run("Restart DB with WBL+Mmap while having no m-map markers in WBL", func(t *testing.T) {
+ resetMmapToOriginal() // We neet to reset because new duplicate chunks can be written above.
+
+ // Removing m-map markers in WBL by rewriting it.
+ newWbl, err := wlog.New(promslog.NewNopLogger(), nil, filepath.Join(t.TempDir(), "new_wbl"), compression.None)
+ require.NoError(t, err)
+ sr, err := wlog.NewSegmentsReader(originalWblDir)
+ require.NoError(t, err)
+ dec := record.NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
+ r, markers, addedRecs := wlog.NewReader(sr), 0, 0
+ for r.Next() {
+ rec := r.Record()
+ if dec.Type(rec) == record.MmapMarkers {
+ markers++
+ continue
+ }
+ addedRecs++
+ require.NoError(t, newWbl.Log(rec))
+ }
+ require.Positive(t, markers)
+ require.Positive(t, addedRecs)
+ require.NoError(t, newWbl.Close())
+ require.NoError(t, sr.Close())
+ require.NoError(t, os.RemoveAll(wblDir))
+ require.NoError(t, os.Rename(newWbl.Dir(), wblDir))
+
+ opts.OutOfOrderCapMax = 30
+ db = newTestDB(t, withDir(db.Dir()), withOpts(opts))
+ require.NoError(t, err)
+ require.Equal(t, oooMint, db.head.MinOOOTime())
+ require.Equal(t, oooMaxt, db.head.MaxOOOTime())
+ testQuery(expSamples)
+ })
+}
+
+func TestOOOHistogramCompactionWithCounterResets_AppendV2(t *testing.T) {
+ for _, floatHistogram := range []bool{false, true} {
+ ctx := context.Background()
+
+ opts := DefaultOptions()
+ opts.OutOfOrderCapMax = 30
+ opts.OutOfOrderTimeWindow = 500 * time.Minute.Milliseconds()
+
+ db := newTestDB(t, withOpts(opts))
+ db.DisableCompactions() // We want to manually call it.
+
+ series1 := labels.FromStrings("foo", "bar1")
+ series2 := labels.FromStrings("foo", "bar2")
+
+ var series1ExpSamplesPreCompact, series2ExpSamplesPreCompact, series1ExpSamplesPostCompact, series2ExpSamplesPostCompact []chunks.Sample
+
+ addSample := func(ts int64, l labels.Labels, val int, hint histogram.CounterResetHint) sample {
+ app := db.AppenderV2(context.Background())
+ tsMs := ts * time.Minute.Milliseconds()
+ if floatHistogram {
+ h := tsdbutil.GenerateTestFloatHistogram(int64(val))
+ h.CounterResetHint = hint
+ _, err := app.Append(0, l, 0, tsMs, 0, nil, h, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ return sample{t: tsMs, fh: h.Copy()}
+ }
+
+ h := tsdbutil.GenerateTestHistogram(int64(val))
+ h.CounterResetHint = hint
+ _, err := app.Append(0, l, 0, tsMs, 0, h, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ return sample{t: tsMs, h: h.Copy()}
+ }
+
+ // Add an in-order sample to each series.
+ s := addSample(520, series1, 1000000, histogram.UnknownCounterReset)
+ series1ExpSamplesPreCompact = append(series1ExpSamplesPreCompact, s)
+ series1ExpSamplesPostCompact = append(series1ExpSamplesPostCompact, s)
+
+ s = addSample(520, series2, 1000000, histogram.UnknownCounterReset)
+ series2ExpSamplesPreCompact = append(series2ExpSamplesPreCompact, s)
+ series2ExpSamplesPostCompact = append(series2ExpSamplesPostCompact, s)
+
+ // Verify that the in-memory ooo chunk is empty.
+ checkEmptyOOOChunk := func(lbls labels.Labels) {
+ ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls, false)
+ require.NoError(t, err)
+ require.False(t, created)
+ require.Nil(t, ms.ooo)
+ }
+
+ checkEmptyOOOChunk(series1)
+ checkEmptyOOOChunk(series2)
+
+ // Add samples for series1. There are three head chunks that will be created:
+ // Chunk 1 - Samples between 100 - 440. One explicit counter reset at ts 250.
+ // Chunk 2 - Samples between 105 - 395. Overlaps with Chunk 1. One detected counter reset at ts 165.
+ // Chunk 3 - Samples between 480 - 509. All within one block boundary. One detected counter reset at 490.
+
+ // Chunk 1.
+ // First add 10 samples.
+ for i := 100; i < 200; i += 10 {
+ s = addSample(int64(i), series1, 100000+i, histogram.UnknownCounterReset)
+ // Before compaction, all the samples have UnknownCounterReset even though they've been added to the same
+ // chunk. This is because they overlap with the samples from chunk two and when merging two chunks on read,
+ // the header is set as unknown when the next sample is not in the same chunk as the previous one.
+ series1ExpSamplesPreCompact = append(series1ExpSamplesPreCompact, s)
+ // After compaction, samples from multiple mmapped chunks will be merged, so there won't be any overlapping
+ // chunks. Therefore, most samples will have the NotCounterReset header.
+ // 100 is the first sample in the first chunk in the blocks, so is still set to UnknownCounterReset.
+ // 120 is a block boundary - after compaction, 120 will be the first sample in a chunk, so is still set to
+ // UnknownCounterReset.
+ if i > 100 && i != 120 {
+ s = copyWithCounterReset(s, histogram.NotCounterReset)
+ }
+ series1ExpSamplesPostCompact = append(series1ExpSamplesPostCompact, s)
+ }
+ // Explicit counter reset - the counter reset header is set to CounterReset but the value is higher
+ // than for the previous timestamp. Explicit counter reset headers are actually ignored though, so when reading
+ // the sample back you actually get unknown/not counter reset. This is as the chainSampleIterator ignores
+ // existing headers and sets the header as UnknownCounterReset if the next sample is not in the same chunk as
+ // the previous one, and counter resets always create a new chunk.
+ // This case has been added to document what's happening, though it might not be the ideal behavior.
+ s = addSample(250, series1, 100000+250, histogram.CounterReset)
+ series1ExpSamplesPreCompact = append(series1ExpSamplesPreCompact, copyWithCounterReset(s, histogram.UnknownCounterReset))
+ series1ExpSamplesPostCompact = append(series1ExpSamplesPostCompact, copyWithCounterReset(s, histogram.NotCounterReset))
+
+ // Add 19 more samples to complete a chunk.
+ for i := 260; i < 450; i += 10 {
+ s = addSample(int64(i), series1, 100000+i, histogram.UnknownCounterReset)
+ // The samples with timestamp less than 410 overlap with the samples from chunk 2, so before compaction,
+ // they're all UnknownCounterReset. Samples greater than or equal to 410 don't overlap with other chunks
+ // so they're always detected as NotCounterReset pre and post compaction.
+ if i >= 410 {
+ s = copyWithCounterReset(s, histogram.NotCounterReset)
+ }
+ series1ExpSamplesPreCompact = append(series1ExpSamplesPreCompact, s)
+ //
+ // 360 is a block boundary, so after compaction its header is still UnknownCounterReset.
+ if i != 360 {
+ s = copyWithCounterReset(s, histogram.NotCounterReset)
+ }
+ series1ExpSamplesPostCompact = append(series1ExpSamplesPostCompact, s)
+ }
+
+ // Chunk 2.
+ // Add six OOO samples.
+ for i := 105; i < 165; i += 10 {
+ s = addSample(int64(i), series1, 100000+i, histogram.UnknownCounterReset)
+ // Samples overlap with chunk 1 so before compaction all headers are UnknownCounterReset.
+ series1ExpSamplesPreCompact = append(series1ExpSamplesPreCompact, s)
+ series1ExpSamplesPostCompact = append(series1ExpSamplesPostCompact, copyWithCounterReset(s, histogram.NotCounterReset))
+ }
+
+ // Add sample that will be detected as a counter reset.
+ s = addSample(165, series1, 100000, histogram.UnknownCounterReset)
+ // Before compaction, sample has an UnknownCounterReset header due to the chainSampleIterator.
+ series1ExpSamplesPreCompact = append(series1ExpSamplesPreCompact, s)
+ // After compaction, the sample's counter reset is still UnknownCounterReset as we cannot trust CounterReset
+ // headers in chunks at the moment, so when reading the first sample in a chunk, its hint is set to
+ // UnknownCounterReset.
+ series1ExpSamplesPostCompact = append(series1ExpSamplesPostCompact, s)
+
+ // Add 23 more samples to complete a chunk.
+ for i := 175; i < 405; i += 10 {
+ s = addSample(int64(i), series1, 100000+i, histogram.UnknownCounterReset)
+ // Samples between 205-255 overlap with chunk 1 so before compaction those samples will have the
+ // UnknownCounterReset header.
+ if i >= 205 && i < 255 {
+ s = copyWithCounterReset(s, histogram.NotCounterReset)
+ }
+ series1ExpSamplesPreCompact = append(series1ExpSamplesPreCompact, s)
+ // 245 is the first sample >= the block boundary at 240, so it's still UnknownCounterReset after compaction.
+ if i != 245 {
+ s = copyWithCounterReset(s, histogram.NotCounterReset)
+ } else {
+ s = copyWithCounterReset(s, histogram.UnknownCounterReset)
+ }
+ series1ExpSamplesPostCompact = append(series1ExpSamplesPostCompact, s)
+ }
+
+ // Chunk 3.
+ for i := 480; i < 490; i++ {
+ s = addSample(int64(i), series1, 100000+i, histogram.UnknownCounterReset)
+ // No overlapping samples in other chunks, so all other samples will already be detected as NotCounterReset
+ // before compaction.
+ if i > 480 {
+ s = copyWithCounterReset(s, histogram.NotCounterReset)
+ }
+ series1ExpSamplesPreCompact = append(series1ExpSamplesPreCompact, s)
+ // 480 is block boundary.
+ if i == 480 {
+ s = copyWithCounterReset(s, histogram.UnknownCounterReset)
+ }
+ series1ExpSamplesPostCompact = append(series1ExpSamplesPostCompact, s)
+ }
+ // Counter reset.
+ s = addSample(int64(490), series1, 100000, histogram.UnknownCounterReset)
+ series1ExpSamplesPreCompact = append(series1ExpSamplesPreCompact, s)
+ series1ExpSamplesPostCompact = append(series1ExpSamplesPostCompact, s)
+ // Add some more samples after the counter reset.
+ for i := 491; i < 510; i++ {
+ s = addSample(int64(i), series1, 100000+i, histogram.UnknownCounterReset)
+ s = copyWithCounterReset(s, histogram.NotCounterReset)
+ series1ExpSamplesPreCompact = append(series1ExpSamplesPreCompact, s)
+ series1ExpSamplesPostCompact = append(series1ExpSamplesPostCompact, s)
+ }
+
+ // Add samples for series2 - one chunk with one detected counter reset at 300.
+ for i := 200; i < 300; i += 10 {
+ s = addSample(int64(i), series2, 100000+i, histogram.UnknownCounterReset)
+ if i > 200 {
+ s = copyWithCounterReset(s, histogram.NotCounterReset)
+ }
+ series2ExpSamplesPreCompact = append(series2ExpSamplesPreCompact, s)
+ if i == 240 {
+ s = copyWithCounterReset(s, histogram.UnknownCounterReset)
+ }
+ series2ExpSamplesPostCompact = append(series2ExpSamplesPostCompact, s)
+ }
+ // Counter reset.
+ s = addSample(int64(300), series2, 100000, histogram.UnknownCounterReset)
+ series2ExpSamplesPreCompact = append(series2ExpSamplesPreCompact, s)
+ series2ExpSamplesPostCompact = append(series2ExpSamplesPostCompact, s)
+ // Add some more samples after the counter reset.
+ for i := 310; i < 500; i += 10 {
+ s := addSample(int64(i), series2, 100000+i, histogram.UnknownCounterReset)
+ s = copyWithCounterReset(s, histogram.NotCounterReset)
+ series2ExpSamplesPreCompact = append(series2ExpSamplesPreCompact, s)
+ // 360 and 480 are block boundaries.
+ if i == 360 || i == 480 {
+ s = copyWithCounterReset(s, histogram.UnknownCounterReset)
+ }
+ series2ExpSamplesPostCompact = append(series2ExpSamplesPostCompact, s)
+ }
+
+ // Sort samples (as OOO samples not added in time-order).
+ sort.Slice(series1ExpSamplesPreCompact, func(i, j int) bool {
+ return series1ExpSamplesPreCompact[i].T() < series1ExpSamplesPreCompact[j].T()
+ })
+ sort.Slice(series1ExpSamplesPostCompact, func(i, j int) bool {
+ return series1ExpSamplesPostCompact[i].T() < series1ExpSamplesPostCompact[j].T()
+ })
+ sort.Slice(series2ExpSamplesPreCompact, func(i, j int) bool {
+ return series2ExpSamplesPreCompact[i].T() < series2ExpSamplesPreCompact[j].T()
+ })
+ sort.Slice(series2ExpSamplesPostCompact, func(i, j int) bool {
+ return series2ExpSamplesPostCompact[i].T() < series2ExpSamplesPostCompact[j].T()
+ })
+
+ verifyDBSamples := func(s1Samples, s2Samples []chunks.Sample) {
+ expRes := map[string][]chunks.Sample{
+ series1.String(): s1Samples,
+ series2.String(): s2Samples,
+ }
+
+ q, err := db.Querier(math.MinInt64, math.MaxInt64)
+ require.NoError(t, err)
+ actRes := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*"))
+ requireEqualSeries(t, expRes, actRes, false)
+ }
+
+ // Verify DB samples before compaction.
+ verifyDBSamples(series1ExpSamplesPreCompact, series2ExpSamplesPreCompact)
+
+ // Verify that the in-memory ooo chunk is not empty.
+ checkNonEmptyOOOChunk := func(lbls labels.Labels) {
+ ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls, false)
+ require.NoError(t, err)
+ require.False(t, created)
+ require.Positive(t, ms.ooo.oooHeadChunk.chunk.NumSamples())
+ }
+
+ checkNonEmptyOOOChunk(series1)
+ checkNonEmptyOOOChunk(series2)
+
+ // No blocks before compaction.
+ require.Empty(t, db.Blocks())
+
+ // There is a 0th WBL file.
+ require.NoError(t, db.head.wbl.Sync()) // syncing to make sure wbl is flushed in windows
+ files, err := os.ReadDir(db.head.wbl.Dir())
+ require.NoError(t, err)
+ require.Len(t, files, 1)
+ require.Equal(t, "00000000", files[0].Name())
+ f, err := files[0].Info()
+ require.NoError(t, err)
+ require.Greater(t, f.Size(), int64(100))
+
+ // OOO compaction happens here.
+ require.NoError(t, db.CompactOOOHead(ctx))
+
+ // Check that blocks are created after compaction.
+ require.Len(t, db.Blocks(), 5)
+
+ // Check samples after compaction.
+ verifyDBSamples(series1ExpSamplesPostCompact, series2ExpSamplesPostCompact)
+
+ // 0th WBL file will be deleted and 1st will be the only present.
+ files, err = os.ReadDir(db.head.wbl.Dir())
+ require.NoError(t, err)
+ require.Len(t, files, 1)
+ require.Equal(t, "00000001", files[0].Name())
+ f, err = files[0].Info()
+ require.NoError(t, err)
+ require.Equal(t, int64(0), f.Size())
+
+ // OOO stuff should not be present in the Head now.
+ checkEmptyOOOChunk(series1)
+ checkEmptyOOOChunk(series2)
+
+ verifyBlockSamples := func(block *Block, fromMins, toMins int64) {
+ var series1Samples, series2Samples []chunks.Sample
+
+ for _, s := range series1ExpSamplesPostCompact {
+ if s.T() >= fromMins*time.Minute.Milliseconds() {
+ // Samples should be sorted, so break out of loop when we reach a timestamp that's too big.
+ if s.T() > toMins*time.Minute.Milliseconds() {
+ break
+ }
+ series1Samples = append(series1Samples, s)
+ }
+ }
+ for _, s := range series2ExpSamplesPostCompact {
+ if s.T() >= fromMins*time.Minute.Milliseconds() {
+ // Samples should be sorted, so break out of loop when we reach a timestamp that's too big.
+ if s.T() > toMins*time.Minute.Milliseconds() {
+ break
+ }
+ series2Samples = append(series2Samples, s)
+ }
+ }
+
+ expRes := map[string][]chunks.Sample{}
+ if len(series1Samples) != 0 {
+ expRes[series1.String()] = series1Samples
+ }
+ if len(series2Samples) != 0 {
+ expRes[series2.String()] = series2Samples
+ }
+
+ q, err := NewBlockQuerier(block, math.MinInt64, math.MaxInt64)
+ require.NoError(t, err)
+
+ actRes := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*"))
+ requireEqualSeries(t, expRes, actRes, false)
+ }
+
+ // Checking for expected data in the blocks.
+ verifyBlockSamples(db.Blocks()[0], 100, 119)
+ verifyBlockSamples(db.Blocks()[1], 120, 239)
+ verifyBlockSamples(db.Blocks()[2], 240, 359)
+ verifyBlockSamples(db.Blocks()[3], 360, 479)
+ verifyBlockSamples(db.Blocks()[4], 480, 509)
+
+ // There should be a single m-map file.
+ mmapDir := mmappedChunksDir(db.head.opts.ChunkDirRoot)
+ files, err = os.ReadDir(mmapDir)
+ require.NoError(t, err)
+ require.Len(t, files, 1)
+
+ // Compact the in-order head and expect another block.
+ // Since this is a forced compaction, this block is not aligned with 2h.
+ err = db.CompactHead(NewRangeHead(db.head, 500*time.Minute.Milliseconds(), 550*time.Minute.Milliseconds()))
+ require.NoError(t, err)
+ require.Len(t, db.Blocks(), 6)
+ verifyBlockSamples(db.Blocks()[5], 520, 520)
+
+ // Blocks created out of normal and OOO head now. But not merged.
+ verifyDBSamples(series1ExpSamplesPostCompact, series2ExpSamplesPostCompact)
+
+ // The compaction also clears out the old m-map files. Including
+ // the file that has ooo chunks.
+ files, err = os.ReadDir(mmapDir)
+ require.NoError(t, err)
+ require.Len(t, files, 1)
+ require.Equal(t, "000001", files[0].Name())
+
+ // This will merge overlapping block.
+ require.NoError(t, db.Compact(ctx))
+
+ require.Len(t, db.Blocks(), 5)
+ verifyBlockSamples(db.Blocks()[0], 100, 119)
+ verifyBlockSamples(db.Blocks()[1], 120, 239)
+ verifyBlockSamples(db.Blocks()[2], 240, 359)
+ verifyBlockSamples(db.Blocks()[3], 360, 479)
+ verifyBlockSamples(db.Blocks()[4], 480, 520) // Merged block.
+
+ // Final state. Blocks from normal and OOO head are merged.
+ verifyDBSamples(series1ExpSamplesPostCompact, series2ExpSamplesPostCompact)
+ }
+}
+
+func TestInterleavedInOrderAndOOOHistogramCompactionWithCounterResets_AppendV2(t *testing.T) {
+ for _, floatHistogram := range []bool{false, true} {
+ ctx := context.Background()
+
+ opts := DefaultOptions()
+ opts.OutOfOrderCapMax = 30
+ opts.OutOfOrderTimeWindow = 500 * time.Minute.Milliseconds()
+
+ db := newTestDB(t, withOpts(opts))
+ db.DisableCompactions() // We want to manually call it.
+
+ series1 := labels.FromStrings("foo", "bar1")
+
+ addSample := func(ts int64, l labels.Labels, val int) sample {
+ app := db.AppenderV2(context.Background())
+ tsMs := ts
+ if floatHistogram {
+ h := tsdbutil.GenerateTestFloatHistogram(int64(val))
+ _, err := app.Append(0, l, 0, tsMs, 0, nil, h, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ return sample{t: tsMs, fh: h.Copy()}
+ }
+
+ h := tsdbutil.GenerateTestHistogram(int64(val))
+ _, err := app.Append(0, l, 0, tsMs, 0, h, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ return sample{t: tsMs, h: h.Copy()}
+ }
+
+ var expSamples []chunks.Sample
+
+ s := addSample(0, series1, 0)
+ expSamples = append(expSamples, s)
+ s = addSample(1, series1, 10)
+ expSamples = append(expSamples, copyWithCounterReset(s, histogram.NotCounterReset))
+ s = addSample(3, series1, 3)
+ expSamples = append(expSamples, copyWithCounterReset(s, histogram.UnknownCounterReset))
+ s = addSample(2, series1, 0)
+ expSamples = append(expSamples, copyWithCounterReset(s, histogram.UnknownCounterReset))
+
+ // Sort samples (as OOO samples not added in time-order).
+ sort.Slice(expSamples, func(i, j int) bool {
+ return expSamples[i].T() < expSamples[j].T()
+ })
+
+ verifyDBSamples := func(s1Samples []chunks.Sample) {
+ t.Helper()
+ expRes := map[string][]chunks.Sample{
+ series1.String(): s1Samples,
+ }
+
+ q, err := db.Querier(math.MinInt64, math.MaxInt64)
+ require.NoError(t, err)
+ actRes := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*"))
+ requireEqualSeries(t, expRes, actRes, false)
+ }
+
+ // Verify DB samples before compaction.
+ verifyDBSamples(expSamples)
+
+ require.NoError(t, db.CompactOOOHead(ctx))
+
+ // Check samples after OOO compaction.
+ verifyDBSamples(expSamples)
+
+ // Checking for expected data in the blocks.
+ // Check that blocks are created after compaction.
+ require.Len(t, db.Blocks(), 1)
+
+ // Compact the in-order head and expect another block.
+ // Since this is a forced compaction, this block is not aligned with 2h.
+ err := db.CompactHead(NewRangeHead(db.head, 0, 3))
+ require.NoError(t, err)
+ require.Len(t, db.Blocks(), 2)
+
+ // Blocks created out of normal and OOO head now. But not merged.
+ verifyDBSamples(expSamples)
+
+ // This will merge overlapping block.
+ require.NoError(t, db.Compact(ctx))
+
+ require.Len(t, db.Blocks(), 1)
+
+ // Final state. Blocks from normal and OOO head are merged.
+ verifyDBSamples(expSamples)
+ }
+}
+
+func TestOOOCompactionFailure_AppendV2(t *testing.T) {
+ t.Parallel()
+ for name, scenario := range sampleTypeScenarios {
+ t.Run(name, func(t *testing.T) {
+ testOOOCompactionFailureAppendV2(t, scenario)
+ })
+ }
+}
+
+func testOOOCompactionFailureAppendV2(t *testing.T, scenario sampleTypeScenario) {
+ ctx := context.Background()
+
+ opts := DefaultOptions()
+ opts.OutOfOrderCapMax = 30
+ opts.OutOfOrderTimeWindow = 300 * time.Minute.Milliseconds()
+ db := newTestDB(t, withOpts(opts))
+ db.DisableCompactions() // We want to manually call it.
+ t.Cleanup(func() {
+ require.NoError(t, db.Close())
+ })
+
+ series1 := labels.FromStrings("foo", "bar1")
+
+ addSample := func(fromMins, toMins int64) {
+ app := db.AppenderV2(context.Background())
+ for m := fromMins; m <= toMins; m++ {
+ ts := m * time.Minute.Milliseconds()
+ _, _, err := scenario.appendFunc(storage.AppenderV2AsLimitedV1(app), series1, ts, ts)
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+ }
+
+ // Add an in-order samples.
+ addSample(250, 350)
+
+ // Add ooo samples that creates multiple chunks.
+ addSample(90, 310)
+
+ // No blocks before compaction.
+ require.Empty(t, db.Blocks())
+
+ // There is a 0th WBL file.
+ verifyFirstWBLFileIs0 := func(count int) {
+ require.NoError(t, db.head.wbl.Sync()) // Syncing to make sure wbl is flushed in windows.
+ files, err := os.ReadDir(db.head.wbl.Dir())
+ require.NoError(t, err)
+ require.Len(t, files, count)
+ require.Equal(t, "00000000", files[0].Name())
+ f, err := files[0].Info()
+ require.NoError(t, err)
+ require.Greater(t, f.Size(), int64(100))
+ }
+ verifyFirstWBLFileIs0(1)
+
+ verifyMmapFiles := func(exp ...string) {
+ mmapDir := mmappedChunksDir(db.head.opts.ChunkDirRoot)
+ files, err := os.ReadDir(mmapDir)
+ require.NoError(t, err)
+ require.Len(t, files, len(exp))
+ for i, f := range files {
+ require.Equal(t, exp[i], f.Name())
+ }
+ }
+
+ verifyMmapFiles("000001")
+
+ // OOO compaction fails 5 times.
+ originalCompactor := db.compactor
+ db.compactor = &mockCompactorFailing{t: t}
+ for range 5 {
+ require.Error(t, db.CompactOOOHead(ctx))
+ }
+ require.Empty(t, db.Blocks())
+
+ // M-map files don't change after failed compaction.
+ verifyMmapFiles("000001")
+
+ // Because of 5 compaction attempts, there are 6 files now.
+ verifyFirstWBLFileIs0(6)
+
+ db.compactor = originalCompactor
+ require.NoError(t, db.CompactOOOHead(ctx))
+ oldBlocks := db.Blocks()
+ require.Len(t, db.Blocks(), 3)
+
+ // Check that the ooo chunks were removed.
+ ms, created, err := db.head.getOrCreate(series1.Hash(), series1, false)
+ require.NoError(t, err)
+ require.False(t, created)
+ require.Nil(t, ms.ooo)
+
+ // The failed compaction should not have left the ooo Head corrupted.
+ // Hence, expect no new blocks with another OOO compaction call.
+ require.NoError(t, db.CompactOOOHead(ctx))
+ require.Len(t, db.Blocks(), 3)
+ require.Equal(t, oldBlocks, db.Blocks())
+
+ // There should be a single m-map file.
+ verifyMmapFiles("000001")
+
+ // All but last WBL file will be deleted.
+ // 8 files in total (starting at 0) because of 7 compaction calls.
+ files, err := os.ReadDir(db.head.wbl.Dir())
+ require.NoError(t, err)
+ require.Len(t, files, 1)
+ require.Equal(t, "00000007", files[0].Name())
+ f, err := files[0].Info()
+ require.NoError(t, err)
+ require.Equal(t, int64(0), f.Size())
+
+ verifySamples := func(block *Block, fromMins, toMins int64) {
+ series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
+ for m := fromMins; m <= toMins; m++ {
+ ts := m * time.Minute.Milliseconds()
+ series1Samples = append(series1Samples, scenario.sampleFunc(ts, ts))
+ }
+ expRes := map[string][]chunks.Sample{
+ series1.String(): series1Samples,
+ }
+
+ q, err := NewBlockQuerier(block, math.MinInt64, math.MaxInt64)
+ require.NoError(t, err)
+ actRes := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*"))
+ requireEqualSeries(t, expRes, actRes, true)
+ }
+
+ // Checking for expected data in the blocks.
+ verifySamples(db.Blocks()[0], 90, 119)
+ verifySamples(db.Blocks()[1], 120, 239)
+ verifySamples(db.Blocks()[2], 240, 310)
+
+ // Compact the in-order head and expect another block.
+ // Since this is a forced compaction, this block is not aligned with 2h.
+ err = db.CompactHead(NewRangeHead(db.head, 250*time.Minute.Milliseconds(), 350*time.Minute.Milliseconds()))
+ require.NoError(t, err)
+ require.Len(t, db.Blocks(), 4) // [0, 120), [120, 240), [240, 360), [250, 351)
+ verifySamples(db.Blocks()[3], 250, 350)
+
+ // The compaction also clears out the old m-map files. Including
+ // the file that has ooo chunks.
+ verifyMmapFiles("000001")
+}
+
+func TestWBLCorruption_AppendV2(t *testing.T) {
+ opts := DefaultOptions()
+ opts.OutOfOrderCapMax = 30
+ opts.OutOfOrderTimeWindow = 300 * time.Minute.Milliseconds()
+
+ db := newTestDB(t, withOpts(opts))
+
+ series1 := labels.FromStrings("foo", "bar1")
+ var allSamples, expAfterRestart []chunks.Sample
+ addSamples := func(fromMins, toMins int64, afterRestart bool) {
+ app := db.AppenderV2(context.Background())
+ for m := fromMins; m <= toMins; m++ {
+ ts := m * time.Minute.Milliseconds()
+ _, err := app.Append(0, series1, 0, ts, float64(ts), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ allSamples = append(allSamples, sample{t: ts, f: float64(ts)})
+ if afterRestart {
+ expAfterRestart = append(expAfterRestart, sample{t: ts, f: float64(ts)})
+ }
+ }
+ require.NoError(t, app.Commit())
+ }
+
+ // Add an in-order samples.
+ addSamples(340, 350, true)
+
+ // OOO samples.
+ addSamples(90, 99, true)
+ addSamples(100, 119, true)
+ addSamples(120, 130, true)
+
+ // Moving onto the second file.
+ _, err := db.head.wbl.NextSegment()
+ require.NoError(t, err)
+
+ // More OOO samples.
+ addSamples(200, 230, true)
+ addSamples(240, 255, true)
+
+ // We corrupt WBL after the sample at 255. So everything added later
+ // should be deleted after replay.
+
+ // Checking where we corrupt it.
+ require.NoError(t, db.head.wbl.Sync()) // Syncing to make sure wbl is flushed in windows.
+ files, err := os.ReadDir(db.head.wbl.Dir())
+ require.NoError(t, err)
+ require.Len(t, files, 2)
+ f1, err := files[1].Info()
+ require.NoError(t, err)
+ corruptIndex := f1.Size()
+ corruptFilePath := path.Join(db.head.wbl.Dir(), files[1].Name())
+
+ // Corrupt the WBL by adding a malformed record.
+ require.NoError(t, db.head.wbl.Log([]byte{byte(record.Samples), 99, 9, 99, 9, 99, 9, 99}))
+
+ // More samples after the corruption point.
+ addSamples(260, 280, false)
+ addSamples(290, 300, false)
+
+ // Another file.
+ _, err = db.head.wbl.NextSegment()
+ require.NoError(t, err)
+
+ addSamples(310, 320, false)
+
+ // Verifying that we have data after corruption point.
+ require.NoError(t, db.head.wbl.Sync()) // Syncing to make sure wbl is flushed in windows.
+ files, err = os.ReadDir(db.head.wbl.Dir())
+ require.NoError(t, err)
+ require.Len(t, files, 3)
+ f1, err = files[1].Info()
+ require.NoError(t, err)
+ require.Greater(t, f1.Size(), corruptIndex)
+ f0, err := files[0].Info()
+ require.NoError(t, err)
+ require.Greater(t, f0.Size(), int64(100))
+ f2, err := files[2].Info()
+ require.NoError(t, err)
+ require.Greater(t, f2.Size(), int64(100))
+
+ verifySamples := func(expSamples []chunks.Sample) {
+ sort.Slice(expSamples, func(i, j int) bool {
+ return expSamples[i].T() < expSamples[j].T()
+ })
+
+ expRes := map[string][]chunks.Sample{
+ series1.String(): expSamples,
+ }
+
+ q, err := db.Querier(math.MinInt64, math.MaxInt64)
+ require.NoError(t, err)
+
+ actRes := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*"))
+ require.Equal(t, expRes, actRes)
+ }
+
+ verifySamples(allSamples)
+
+ require.NoError(t, db.Close())
+
+ // We want everything to be replayed from the WBL. So we delete the m-map files.
+ require.NoError(t, os.RemoveAll(mmappedChunksDir(db.head.opts.ChunkDirRoot)))
+
+ // Restart does the replay and repair.
+ db = newTestDB(t, withDir(db.Dir()), withOpts(opts))
+ require.NoError(t, err)
+ require.Equal(t, 1.0, prom_testutil.ToFloat64(db.head.metrics.walCorruptionsTotal))
+ require.Less(t, len(expAfterRestart), len(allSamples))
+ verifySamples(expAfterRestart)
+
+ // Verify that it did the repair on disk.
+ files, err = os.ReadDir(db.head.wbl.Dir())
+ require.NoError(t, err)
+ require.Len(t, files, 3)
+ f0, err = files[0].Info()
+ require.NoError(t, err)
+ require.Greater(t, f0.Size(), int64(100))
+ f2, err = files[2].Info()
+ require.NoError(t, err)
+ require.Equal(t, int64(0), f2.Size())
+ require.Equal(t, corruptFilePath, path.Join(db.head.wbl.Dir(), files[1].Name()))
+
+ // Verifying that everything after the corruption point is set to 0.
+ b, err := os.ReadFile(corruptFilePath)
+ require.NoError(t, err)
+ sum := 0
+ for _, val := range b[corruptIndex:] {
+ sum += int(val)
+ }
+ require.Equal(t, 0, sum)
+
+ // Another restart, everything normal with no repair.
+ require.NoError(t, db.Close())
+ db = newTestDB(t, withDir(db.Dir()), withOpts(opts))
+ require.NoError(t, err)
+ require.Equal(t, 0.0, prom_testutil.ToFloat64(db.head.metrics.walCorruptionsTotal))
+ verifySamples(expAfterRestart)
+}
+
+func TestOOOMmapCorruption_AppendV2(t *testing.T) {
+ for name, scenario := range sampleTypeScenarios {
+ t.Run(name, func(t *testing.T) {
+ testOOOMmapCorruptionAppendV2(t, scenario)
+ })
+ }
+}
+
+func testOOOMmapCorruptionAppendV2(t *testing.T, scenario sampleTypeScenario) {
+ opts := DefaultOptions()
+ opts.OutOfOrderCapMax = 10
+ opts.OutOfOrderTimeWindow = 300 * time.Minute.Milliseconds()
+
+ db := newTestDB(t, withOpts(opts))
+
+ series1 := labels.FromStrings("foo", "bar1")
+ var allSamples, expInMmapChunks []chunks.Sample
+ addSamples := func(fromMins, toMins int64, inMmapAfterCorruption bool) {
+ app := db.AppenderV2(context.Background())
+ for m := fromMins; m <= toMins; m++ {
+ ts := m * time.Minute.Milliseconds()
+ _, s, err := scenario.appendFunc(storage.AppenderV2AsLimitedV1(app), series1, ts, ts)
+ require.NoError(t, err)
+ allSamples = append(allSamples, s)
+ if inMmapAfterCorruption {
+ expInMmapChunks = append(expInMmapChunks, s)
+ }
+ }
+ require.NoError(t, app.Commit())
+ }
+
+ // Add an in-order samples.
+ addSamples(340, 350, true)
+
+ // OOO samples.
+ addSamples(90, 99, true)
+ addSamples(100, 109, true)
+ // This sample m-maps a chunk. But 120 goes into a new chunk.
+ addSamples(120, 120, false)
+
+ // Second m-map file. We will corrupt this file. Sample 120 goes into this new file.
+ db.head.chunkDiskMapper.CutNewFile()
+
+ // More OOO samples.
+ addSamples(200, 230, false)
+ addSamples(240, 255, false)
+
+ db.head.chunkDiskMapper.CutNewFile()
+ addSamples(260, 290, false)
+
+ verifySamples := func(expSamples []chunks.Sample) {
+ sort.Slice(expSamples, func(i, j int) bool {
+ return expSamples[i].T() < expSamples[j].T()
+ })
+
+ expRes := map[string][]chunks.Sample{
+ series1.String(): expSamples,
+ }
+
+ q, err := db.Querier(math.MinInt64, math.MaxInt64)
+ require.NoError(t, err)
+
+ actRes := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*"))
+ requireEqualSeries(t, expRes, actRes, true)
+ }
+
+ verifySamples(allSamples)
+
+ // Verifying existing files.
+ mmapDir := mmappedChunksDir(db.head.opts.ChunkDirRoot)
+ files, err := os.ReadDir(mmapDir)
+ require.NoError(t, err)
+ require.Len(t, files, 3)
+
+ // Corrupting the 2nd file.
+ f, err := os.OpenFile(path.Join(mmapDir, files[1].Name()), os.O_RDWR, 0o666)
+ require.NoError(t, err)
+ _, err = f.WriteAt([]byte{99, 9, 99, 9, 99}, 20)
+ require.NoError(t, err)
+ require.NoError(t, f.Close())
+ firstFileName := files[0].Name()
+
+ require.NoError(t, db.Close())
+
+ // Moving OOO WBL to use it later.
+ wblDir := db.head.wbl.Dir()
+ wblDirTmp := path.Join(t.TempDir(), "wbl_tmp")
+ require.NoError(t, os.Rename(wblDir, wblDirTmp))
+
+ // Restart does the replay and repair of m-map files.
+ db = newTestDB(t, withDir(db.Dir()), withOpts(opts))
+ require.NoError(t, err)
+ require.Equal(t, 1.0, prom_testutil.ToFloat64(db.head.metrics.mmapChunkCorruptionTotal))
+ require.Less(t, len(expInMmapChunks), len(allSamples))
+
+ // Since there is no WBL, only samples from m-map chunks comes in the query.
+ verifySamples(expInMmapChunks)
+
+ // Verify that it did the repair on disk. All files from the point of corruption
+ // should be deleted.
+ files, err = os.ReadDir(mmapDir)
+ require.NoError(t, err)
+ require.Len(t, files, 1)
+ f0, err := files[0].Info()
+ require.NoError(t, err)
+ require.Greater(t, f0.Size(), int64(100))
+ require.Equal(t, firstFileName, files[0].Name())
+
+ // Another restart, everything normal with no repair.
+ require.NoError(t, db.Close())
+ db = newTestDB(t, withDir(db.Dir()), withOpts(opts))
+ require.NoError(t, err)
+ require.Equal(t, 0.0, prom_testutil.ToFloat64(db.head.metrics.mmapChunkCorruptionTotal))
+ verifySamples(expInMmapChunks)
+
+ // Restart again with the WBL, all samples should be present now.
+ require.NoError(t, db.Close())
+ require.NoError(t, os.RemoveAll(wblDir))
+ require.NoError(t, os.Rename(wblDirTmp, wblDir))
+ db = newTestDB(t, withDir(db.Dir()), withOpts(opts))
+ require.NoError(t, err)
+ verifySamples(allSamples)
+}
+
+func TestOutOfOrderRuntimeConfig_AppendV2(t *testing.T) {
+ t.Parallel()
+ for name, scenario := range sampleTypeScenarios {
+ t.Run(name, func(t *testing.T) {
+ testOutOfOrderRuntimeConfigAppendV2(t, scenario)
+ })
+ }
+}
+
+func testOutOfOrderRuntimeConfigAppendV2(t *testing.T, scenario sampleTypeScenario) {
+ ctx := context.Background()
+
+ getDB := func(oooTimeWindow int64) *DB {
+ opts := DefaultOptions()
+ opts.OutOfOrderTimeWindow = oooTimeWindow
+ db := newTestDB(t, withOpts(opts))
+ db.DisableCompactions()
+ return db
+ }
+
+ makeConfig := func(oooTimeWindow int) *config.Config {
+ return &config.Config{
+ StorageConfig: config.StorageConfig{
+ TSDBConfig: &config.TSDBConfig{
+ OutOfOrderTimeWindow: int64(oooTimeWindow) * time.Minute.Milliseconds(),
+ },
+ },
+ }
+ }
+
+ series1 := labels.FromStrings("foo", "bar1")
+ addSamples := func(t *testing.T, db *DB, fromMins, toMins int64, success bool, allSamples []chunks.Sample) []chunks.Sample {
+ app := db.AppenderV2(context.Background())
+ for m := fromMins; m <= toMins; m++ {
+ ts := m * time.Minute.Milliseconds()
+ _, s, err := scenario.appendFunc(storage.AppenderV2AsLimitedV1(app), series1, ts, ts)
+ if success {
+ require.NoError(t, err)
+ allSamples = append(allSamples, s)
+ } else {
+ require.Error(t, err)
+ }
+ }
+ require.NoError(t, app.Commit())
+ return allSamples
+ }
+
+ verifySamples := func(t *testing.T, db *DB, expSamples []chunks.Sample) {
+ sort.Slice(expSamples, func(i, j int) bool {
+ return expSamples[i].T() < expSamples[j].T()
+ })
+
+ expRes := map[string][]chunks.Sample{
+ series1.String(): expSamples,
+ }
+
+ q, err := db.Querier(math.MinInt64, math.MaxInt64)
+ require.NoError(t, err)
+
+ actRes := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*"))
+ requireEqualSeries(t, expRes, actRes, true)
+ }
+
+ doOOOCompaction := func(t *testing.T, db *DB) {
+ // WBL is not empty.
+ size, err := db.head.wbl.Size()
+ require.NoError(t, err)
+ require.Positive(t, size)
+
+ require.Empty(t, db.Blocks())
+ require.NoError(t, db.compactOOOHead(ctx))
+ require.NotEmpty(t, db.Blocks())
+
+ // WBL is empty.
+ size, err = db.head.wbl.Size()
+ require.NoError(t, err)
+ require.Equal(t, int64(0), size)
+ }
+
+ t.Run("increase time window", func(t *testing.T) {
+ var allSamples []chunks.Sample
+ db := getDB(30 * time.Minute.Milliseconds())
+
+ // In-order.
+ allSamples = addSamples(t, db, 300, 310, true, allSamples)
+
+ // OOO upto 30m old is success.
+ allSamples = addSamples(t, db, 281, 290, true, allSamples)
+
+ // OOO of 59m old fails.
+ s := addSamples(t, db, 251, 260, false, nil)
+ require.Empty(t, s)
+ verifySamples(t, db, allSamples)
+
+ oldWblPtr := fmt.Sprintf("%p", db.head.wbl)
+
+ // Increase time window and try adding again.
+ err := db.ApplyConfig(makeConfig(60))
+ require.NoError(t, err)
+ allSamples = addSamples(t, db, 251, 260, true, allSamples)
+
+ // WBL does not change.
+ newWblPtr := fmt.Sprintf("%p", db.head.wbl)
+ require.Equal(t, oldWblPtr, newWblPtr)
+
+ doOOOCompaction(t, db)
+ verifySamples(t, db, allSamples)
+ })
+
+ t.Run("decrease time window and increase again", func(t *testing.T) {
+ var allSamples []chunks.Sample
+ db := getDB(60 * time.Minute.Milliseconds())
+
+ // In-order.
+ allSamples = addSamples(t, db, 300, 310, true, allSamples)
+
+ // OOO upto 59m old is success.
+ allSamples = addSamples(t, db, 251, 260, true, allSamples)
+
+ oldWblPtr := fmt.Sprintf("%p", db.head.wbl)
+ // Decrease time window.
+ err := db.ApplyConfig(makeConfig(30))
+ require.NoError(t, err)
+
+ // OOO of 49m old fails.
+ s := addSamples(t, db, 261, 270, false, nil)
+ require.Empty(t, s)
+
+ // WBL does not change.
+ newWblPtr := fmt.Sprintf("%p", db.head.wbl)
+ require.Equal(t, oldWblPtr, newWblPtr)
+
+ verifySamples(t, db, allSamples)
+
+ // Increase time window again and check
+ err = db.ApplyConfig(makeConfig(60))
+ require.NoError(t, err)
+ allSamples = addSamples(t, db, 261, 270, true, allSamples)
+ verifySamples(t, db, allSamples)
+
+ // WBL does not change.
+ newWblPtr = fmt.Sprintf("%p", db.head.wbl)
+ require.Equal(t, oldWblPtr, newWblPtr)
+
+ doOOOCompaction(t, db)
+ verifySamples(t, db, allSamples)
+ })
+
+ t.Run("disabled to enabled", func(t *testing.T) {
+ var allSamples []chunks.Sample
+ db := getDB(0)
+
+ // In-order.
+ allSamples = addSamples(t, db, 300, 310, true, allSamples)
+
+ // OOO fails.
+ s := addSamples(t, db, 251, 260, false, nil)
+ require.Empty(t, s)
+ verifySamples(t, db, allSamples)
+
+ require.Nil(t, db.head.wbl)
+
+ // Increase time window and try adding again.
+ err := db.ApplyConfig(makeConfig(60))
+ require.NoError(t, err)
+ allSamples = addSamples(t, db, 251, 260, true, allSamples)
+
+ // WBL gets created.
+ require.NotNil(t, db.head.wbl)
+
+ verifySamples(t, db, allSamples)
+
+ // OOO compaction works now.
+ doOOOCompaction(t, db)
+ verifySamples(t, db, allSamples)
+ })
+
+ t.Run("enabled to disabled", func(t *testing.T) {
+ var allSamples []chunks.Sample
+ db := getDB(60 * time.Minute.Milliseconds())
+
+ // In-order.
+ allSamples = addSamples(t, db, 300, 310, true, allSamples)
+
+ // OOO upto 59m old is success.
+ allSamples = addSamples(t, db, 251, 260, true, allSamples)
+
+ oldWblPtr := fmt.Sprintf("%p", db.head.wbl)
+ // Time Window to 0, hence disabled.
+ err := db.ApplyConfig(makeConfig(0))
+ require.NoError(t, err)
+
+ // OOO within old time window fails.
+ s := addSamples(t, db, 290, 309, false, nil)
+ require.Empty(t, s)
+
+ // WBL does not change and is not removed.
+ newWblPtr := fmt.Sprintf("%p", db.head.wbl)
+ require.Equal(t, oldWblPtr, newWblPtr)
+
+ verifySamples(t, db, allSamples)
+
+ // Compaction still works after disabling with WBL cleanup.
+ doOOOCompaction(t, db)
+ verifySamples(t, db, allSamples)
+ })
+
+ t.Run("disabled to disabled", func(t *testing.T) {
+ var allSamples []chunks.Sample
+ db := getDB(0)
+
+ // In-order.
+ allSamples = addSamples(t, db, 300, 310, true, allSamples)
+
+ // OOO fails.
+ s := addSamples(t, db, 290, 309, false, nil)
+ require.Empty(t, s)
+ verifySamples(t, db, allSamples)
+ require.Nil(t, db.head.wbl)
+
+ // Time window to 0.
+ err := db.ApplyConfig(makeConfig(0))
+ require.NoError(t, err)
+
+ // OOO still fails.
+ s = addSamples(t, db, 290, 309, false, nil)
+ require.Empty(t, s)
+ verifySamples(t, db, allSamples)
+ require.Nil(t, db.head.wbl)
+ })
+}
+
+func TestNoGapAfterRestartWithOOO_AppendV2(t *testing.T) {
+ t.Parallel()
+ for name, scenario := range sampleTypeScenarios {
+ t.Run(name, func(t *testing.T) {
+ testNoGapAfterRestartWithOOOAppendV2(t, scenario)
+ })
+ }
+}
+
+func testNoGapAfterRestartWithOOOAppendV2(t *testing.T, scenario sampleTypeScenario) {
+ series1 := labels.FromStrings("foo", "bar1")
+ addSamples := func(t *testing.T, db *DB, fromMins, toMins int64, success bool) {
+ app := db.AppenderV2(context.Background())
+ for m := fromMins; m <= toMins; m++ {
+ ts := m * time.Minute.Milliseconds()
+ _, _, err := scenario.appendFunc(storage.AppenderV2AsLimitedV1(app), series1, ts, ts)
+ if success {
+ require.NoError(t, err)
+ } else {
+ require.Error(t, err)
+ }
+ }
+ require.NoError(t, app.Commit())
+ }
+
+ verifySamples := func(t *testing.T, db *DB, fromMins, toMins int64) {
+ var expSamples []chunks.Sample
+ for m := fromMins; m <= toMins; m++ {
+ ts := m * time.Minute.Milliseconds()
+ expSamples = append(expSamples, scenario.sampleFunc(ts, ts))
+ }
+
+ expRes := map[string][]chunks.Sample{
+ series1.String(): expSamples,
+ }
+
+ q, err := db.Querier(math.MinInt64, math.MaxInt64)
+ require.NoError(t, err)
+
+ actRes := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*"))
+ requireEqualSeries(t, expRes, actRes, true)
+ }
+
+ cases := []struct {
+ inOrderMint, inOrderMaxt int64
+ oooMint, oooMaxt int64
+ // After compaction.
+ blockRanges [][2]int64
+ headMint, headMaxt int64
+ }{
+ {
+ 300, 490,
+ 489, 489,
+ [][2]int64{{300, 360}, {480, 600}},
+ 360, 490,
+ },
+ {
+ 300, 490,
+ 479, 479,
+ [][2]int64{{300, 360}, {360, 480}},
+ 360, 490,
+ },
+ }
+
+ for i, c := range cases {
+ t.Run(fmt.Sprintf("case=%d", i), func(t *testing.T) {
+ ctx := context.Background()
+
+ opts := DefaultOptions()
+ opts.OutOfOrderTimeWindow = 30 * time.Minute.Milliseconds()
+ db := newTestDB(t, withOpts(opts))
+ db.DisableCompactions()
+
+ // 3h10m=190m worth in-order data.
+ addSamples(t, db, c.inOrderMint, c.inOrderMaxt, true)
+ verifySamples(t, db, c.inOrderMint, c.inOrderMaxt)
+
+ // One ooo samples.
+ addSamples(t, db, c.oooMint, c.oooMaxt, true)
+ verifySamples(t, db, c.inOrderMint, c.inOrderMaxt)
+
+ // We get 2 blocks. 1 from OOO, 1 from in-order.
+ require.NoError(t, db.Compact(ctx))
+ verifyBlockRanges := func() {
+ blocks := db.Blocks()
+ require.Len(t, blocks, len(c.blockRanges))
+ for j, br := range c.blockRanges {
+ require.Equal(t, br[0]*time.Minute.Milliseconds(), blocks[j].MinTime())
+ require.Equal(t, br[1]*time.Minute.Milliseconds(), blocks[j].MaxTime())
+ }
+ }
+ verifyBlockRanges()
+ require.Equal(t, c.headMint*time.Minute.Milliseconds(), db.head.MinTime())
+ require.Equal(t, c.headMaxt*time.Minute.Milliseconds(), db.head.MaxTime())
+
+ // Restart and expect all samples to be present.
+ require.NoError(t, db.Close())
+
+ db = newTestDB(t, withDir(db.Dir()), withOpts(opts))
+ db.DisableCompactions()
+
+ verifyBlockRanges()
+ require.Equal(t, c.headMint*time.Minute.Milliseconds(), db.head.MinTime())
+ require.Equal(t, c.headMaxt*time.Minute.Milliseconds(), db.head.MaxTime())
+ verifySamples(t, db, c.inOrderMint, c.inOrderMaxt)
+ })
+ }
+}
+
+func TestWblReplayAfterOOODisableAndRestart_AppendV2(t *testing.T) {
+ for name, scenario := range sampleTypeScenarios {
+ t.Run(name, func(t *testing.T) {
+ testWblReplayAfterOOODisableAndRestartAppendV2(t, scenario)
+ })
+ }
+}
+
+func testWblReplayAfterOOODisableAndRestartAppendV2(t *testing.T, scenario sampleTypeScenario) {
+ opts := DefaultOptions()
+ opts.OutOfOrderTimeWindow = 60 * time.Minute.Milliseconds()
+
+ db := newTestDB(t, withOpts(opts))
+
+ series1 := labels.FromStrings("foo", "bar1")
+ var allSamples []chunks.Sample
+ addSamples := func(fromMins, toMins int64) {
+ app := db.AppenderV2(context.Background())
+ for m := fromMins; m <= toMins; m++ {
+ ts := m * time.Minute.Milliseconds()
+ _, s, err := scenario.appendFunc(storage.AppenderV2AsLimitedV1(app), series1, ts, ts)
+ require.NoError(t, err)
+ allSamples = append(allSamples, s)
+ }
+ require.NoError(t, app.Commit())
+ }
+
+ // In-order samples.
+ addSamples(290, 300)
+ // OOO samples.
+ addSamples(250, 260)
+
+ verifySamples := func(expSamples []chunks.Sample) {
+ sort.Slice(expSamples, func(i, j int) bool {
+ return expSamples[i].T() < expSamples[j].T()
+ })
+
+ expRes := map[string][]chunks.Sample{
+ series1.String(): expSamples,
+ }
+
+ q, err := db.Querier(math.MinInt64, math.MaxInt64)
+ require.NoError(t, err)
+
+ actRes := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*"))
+ requireEqualSeries(t, expRes, actRes, true)
+ }
+
+ verifySamples(allSamples)
+
+ // Restart DB with OOO disabled.
+ require.NoError(t, db.Close())
+
+ opts.OutOfOrderTimeWindow = 0
+ db = newTestDB(t, withDir(db.Dir()), withOpts(opts))
+
+ // We can still query OOO samples when OOO is disabled.
+ verifySamples(allSamples)
+}
+
+func TestPanicOnApplyConfig_AppendV2(t *testing.T) {
+ for name, scenario := range sampleTypeScenarios {
+ t.Run(name, func(t *testing.T) {
+ testPanicOnApplyConfigAppendV2(t, scenario)
+ })
+ }
+}
+
+func testPanicOnApplyConfigAppendV2(t *testing.T, scenario sampleTypeScenario) {
+ opts := DefaultOptions()
+ opts.OutOfOrderTimeWindow = 60 * time.Minute.Milliseconds()
+
+ db := newTestDB(t, withOpts(opts))
+
+ series1 := labels.FromStrings("foo", "bar1")
+ var allSamples []chunks.Sample
+ addSamples := func(fromMins, toMins int64) {
+ app := db.AppenderV2(context.Background())
+ for m := fromMins; m <= toMins; m++ {
+ ts := m * time.Minute.Milliseconds()
+ _, s, err := scenario.appendFunc(storage.AppenderV2AsLimitedV1(app), series1, ts, ts)
+ require.NoError(t, err)
+ allSamples = append(allSamples, s)
+ }
+ require.NoError(t, app.Commit())
+ }
+
+ // In-order samples.
+ addSamples(290, 300)
+ // OOO samples.
+ addSamples(250, 260)
+
+ // Restart DB with OOO disabled.
+ require.NoError(t, db.Close())
+
+ opts.OutOfOrderTimeWindow = 0
+ db = newTestDB(t, withDir(db.Dir()), withOpts(opts))
+
+ // ApplyConfig with OOO enabled and expect no panic.
+ err := db.ApplyConfig(&config.Config{
+ StorageConfig: config.StorageConfig{
+ TSDBConfig: &config.TSDBConfig{
+ OutOfOrderTimeWindow: 60 * time.Minute.Milliseconds(),
+ },
+ },
+ })
+ require.NoError(t, err)
+}
+
+func TestDiskFillingUpAfterDisablingOOO_AppendV2(t *testing.T) {
+ t.Parallel()
+ for name, scenario := range sampleTypeScenarios {
+ t.Run(name, func(t *testing.T) {
+ testDiskFillingUpAfterDisablingOOOAppenderV2(t, scenario)
+ })
+ }
+}
+
+func testDiskFillingUpAfterDisablingOOOAppenderV2(t *testing.T, scenario sampleTypeScenario) {
+ t.Parallel()
+ ctx := context.Background()
+
+ opts := DefaultOptions()
+ opts.OutOfOrderTimeWindow = 60 * time.Minute.Milliseconds()
+
+ db := newTestDB(t, withOpts(opts))
+ db.DisableCompactions()
+
+ series1 := labels.FromStrings("foo", "bar1")
+ var allSamples []chunks.Sample
+ addSamples := func(fromMins, toMins int64) {
+ app := db.AppenderV2(context.Background())
+ for m := fromMins; m <= toMins; m++ {
+ ts := m * time.Minute.Milliseconds()
+ _, s, err := scenario.appendFunc(storage.AppenderV2AsLimitedV1(app), series1, ts, ts)
+ require.NoError(t, err)
+ allSamples = append(allSamples, s)
+ }
+ require.NoError(t, app.Commit())
+ }
+
+ // In-order samples.
+ addSamples(290, 300)
+ // OOO samples.
+ addSamples(250, 299)
+
+ // Restart DB with OOO disabled.
+ require.NoError(t, db.Close())
+
+ opts.OutOfOrderTimeWindow = 0
+ db = newTestDB(t, withDir(db.Dir()), withOpts(opts))
+ db.DisableCompactions()
+
+ ms := db.head.series.getByHash(series1.Hash(), series1)
+ require.NotEmpty(t, ms.ooo.oooMmappedChunks, "OOO mmap chunk was not replayed")
+
+ checkMmapFileContents := func(contains, notContains []string) {
+ mmapDir := mmappedChunksDir(db.head.opts.ChunkDirRoot)
+ files, err := os.ReadDir(mmapDir)
+ require.NoError(t, err)
+
+ fnames := make([]string, 0, len(files))
+ for _, f := range files {
+ fnames = append(fnames, f.Name())
+ }
+
+ for _, f := range contains {
+ require.Contains(t, fnames, f)
+ }
+ for _, f := range notContains {
+ require.NotContains(t, fnames, f)
+ }
+ }
+
+ // Add in-order samples until ready for compaction..
+ addSamples(301, 500)
+
+ // Check that m-map files gets deleted properly after compactions.
+
+ db.head.mmapHeadChunks()
+ checkMmapFileContents([]string{"000001", "000002"}, nil)
+ require.NoError(t, db.Compact(ctx))
+ checkMmapFileContents([]string{"000002"}, []string{"000001"})
+ require.Nil(t, ms.ooo, "OOO mmap chunk was not compacted")
+
+ addSamples(501, 650)
+ db.head.mmapHeadChunks()
+ checkMmapFileContents([]string{"000002", "000003"}, []string{"000001"})
+ require.NoError(t, db.Compact(ctx))
+ checkMmapFileContents(nil, []string{"000001", "000002", "000003"})
+
+ // Verify that WBL is empty.
+ files, err := os.ReadDir(db.head.wbl.Dir())
+ require.NoError(t, err)
+ require.Len(t, files, 1) // Last empty file after compaction.
+ finfo, err := files[0].Info()
+ require.NoError(t, err)
+ require.Equal(t, int64(0), finfo.Size())
+}
+
+func TestHistogramAppendAndQuery_AppendV2(t *testing.T) {
+ t.Run("integer histograms", func(t *testing.T) {
+ testHistogramAppendAndQueryHelperAppendV2(t, false)
+ })
+ t.Run("float histograms", func(t *testing.T) {
+ testHistogramAppendAndQueryHelperAppendV2(t, true)
+ })
+}
+
+func testHistogramAppendAndQueryHelperAppendV2(t *testing.T, floatHistogram bool) {
+ t.Helper()
+ db := newTestDB(t)
+ minute := func(m int) int64 { return int64(m) * time.Minute.Milliseconds() }
+ t.Cleanup(func() {
+ require.NoError(t, db.Close())
+ })
+
+ ctx := context.Background()
+ appendHistogram := func(t *testing.T,
+ lbls labels.Labels, tsMinute int, h *histogram.Histogram,
+ exp *[]chunks.Sample, expCRH histogram.CounterResetHint,
+ ) {
+ t.Helper()
+ var err error
+ app := db.AppenderV2(ctx)
+ if floatHistogram {
+ _, err = app.Append(0, lbls, 0, minute(tsMinute), 0, nil, h.ToFloat(nil), storage.AOptions{})
+ efh := h.ToFloat(nil)
+ efh.CounterResetHint = expCRH
+ *exp = append(*exp, sample{t: minute(tsMinute), fh: efh})
+ } else {
+ _, err = app.Append(0, lbls, 0, minute(tsMinute), 0, h.Copy(), nil, storage.AOptions{})
+ eh := h.Copy()
+ eh.CounterResetHint = expCRH
+ *exp = append(*exp, sample{t: minute(tsMinute), h: eh})
+ }
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ }
+ appendFloat := func(t *testing.T, lbls labels.Labels, tsMinute int, val float64, exp *[]chunks.Sample) {
+ t.Helper()
+ app := db.AppenderV2(ctx)
+ _, err := app.Append(0, lbls, 0, minute(tsMinute), val, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ *exp = append(*exp, sample{t: minute(tsMinute), f: val})
+ }
+
+ testQuery := func(t *testing.T, name, value string, exp map[string][]chunks.Sample) {
+ t.Helper()
+ q, err := db.Querier(math.MinInt64, math.MaxInt64)
+ require.NoError(t, err)
+ act := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, name, value))
+ require.Equal(t, exp, act)
+ }
+
+ baseH := &histogram.Histogram{
+ Count: 15,
+ ZeroCount: 4,
+ ZeroThreshold: 0.001,
+ Sum: 35.5,
+ Schema: 1,
+ PositiveSpans: []histogram.Span{
+ {Offset: 0, Length: 2},
+ {Offset: 2, Length: 2},
+ },
+ PositiveBuckets: []int64{1, 1, -1, 0},
+ NegativeSpans: []histogram.Span{
+ {Offset: 0, Length: 1},
+ {Offset: 1, Length: 2},
+ },
+ NegativeBuckets: []int64{1, 2, -1},
+ }
+
+ var (
+ series1 = labels.FromStrings("foo", "bar1")
+ series2 = labels.FromStrings("foo", "bar2")
+ series3 = labels.FromStrings("foo", "bar3")
+ series4 = labels.FromStrings("foo", "bar4")
+ exp1, exp2, exp3, exp4 []chunks.Sample
+ )
+
+ // TODO(codesome): test everything for negative buckets as well.
+ t.Run("series with only histograms", func(t *testing.T) {
+ h := baseH.Copy() // This is shared across all sub tests.
+
+ appendHistogram(t, series1, 100, h, &exp1, histogram.UnknownCounterReset)
+ testQuery(t, "foo", "bar1", map[string][]chunks.Sample{series1.String(): exp1})
+
+ h.PositiveBuckets[0]++
+ h.NegativeBuckets[0] += 2
+ h.Count += 10
+ appendHistogram(t, series1, 101, h, &exp1, histogram.NotCounterReset)
+ testQuery(t, "foo", "bar1", map[string][]chunks.Sample{series1.String(): exp1})
+
+ t.Run("changing schema", func(t *testing.T) {
+ h.Schema = 2
+ appendHistogram(t, series1, 102, h, &exp1, histogram.UnknownCounterReset)
+ testQuery(t, "foo", "bar1", map[string][]chunks.Sample{series1.String(): exp1})
+
+ // Schema back to old.
+ h.Schema = 1
+ appendHistogram(t, series1, 103, h, &exp1, histogram.UnknownCounterReset)
+ testQuery(t, "foo", "bar1", map[string][]chunks.Sample{series1.String(): exp1})
+ })
+
+ t.Run("new buckets incoming", func(t *testing.T) {
+ // In the previous unit test, during the last histogram append, we
+ // changed the schema and that caused a new chunk creation. Because
+ // of the next append the layout of the last histogram will change
+ // because the chunk will be re-encoded. So this forces us to modify
+ // the last histogram in exp1 so when we query we get the expected
+ // results.
+ if floatHistogram {
+ lh := exp1[len(exp1)-1].FH().Copy()
+ lh.PositiveSpans[1].Length++
+ lh.PositiveBuckets = append(lh.PositiveBuckets, 0)
+ exp1[len(exp1)-1] = sample{t: exp1[len(exp1)-1].T(), fh: lh}
+ } else {
+ lh := exp1[len(exp1)-1].H().Copy()
+ lh.PositiveSpans[1].Length++
+ lh.PositiveBuckets = append(lh.PositiveBuckets, -2) // -2 makes the last bucket 0.
+ exp1[len(exp1)-1] = sample{t: exp1[len(exp1)-1].T(), h: lh}
+ }
+
+ // This histogram with new bucket at the end causes the re-encoding of the previous histogram.
+ // Hence the previous histogram is recoded into this new layout.
+ // But the query returns the histogram from the in-memory buffer, hence we don't see the recode here yet.
+ h.PositiveSpans[1].Length++
+ h.PositiveBuckets = append(h.PositiveBuckets, 1)
+ h.Count += 3
+ appendHistogram(t, series1, 104, h, &exp1, histogram.NotCounterReset)
+ testQuery(t, "foo", "bar1", map[string][]chunks.Sample{series1.String(): exp1})
+
+ // Because of the previous two histograms being on the active chunk,
+ // and the next append is only adding a new bucket, the active chunk
+ // will be re-encoded to the new layout.
+ if floatHistogram {
+ lh := exp1[len(exp1)-2].FH().Copy()
+ lh.PositiveSpans[0].Length++
+ lh.PositiveSpans[1].Offset--
+ lh.PositiveBuckets = []float64{2, 3, 0, 2, 2, 0}
+ exp1[len(exp1)-2] = sample{t: exp1[len(exp1)-2].T(), fh: lh}
+
+ lh = exp1[len(exp1)-1].FH().Copy()
+ lh.PositiveSpans[0].Length++
+ lh.PositiveSpans[1].Offset--
+ lh.PositiveBuckets = []float64{2, 3, 0, 2, 2, 3}
+ exp1[len(exp1)-1] = sample{t: exp1[len(exp1)-1].T(), fh: lh}
+ } else {
+ lh := exp1[len(exp1)-2].H().Copy()
+ lh.PositiveSpans[0].Length++
+ lh.PositiveSpans[1].Offset--
+ lh.PositiveBuckets = []int64{2, 1, -3, 2, 0, -2}
+ exp1[len(exp1)-2] = sample{t: exp1[len(exp1)-2].T(), h: lh}
+
+ lh = exp1[len(exp1)-1].H().Copy()
+ lh.PositiveSpans[0].Length++
+ lh.PositiveSpans[1].Offset--
+ lh.PositiveBuckets = []int64{2, 1, -3, 2, 0, 1}
+ exp1[len(exp1)-1] = sample{t: exp1[len(exp1)-1].T(), h: lh}
+ }
+
+ // Now we add the new buckets in between. Empty bucket is again not present for the old histogram.
+ h.PositiveSpans[0].Length++
+ h.PositiveSpans[1].Offset--
+ h.Count += 3
+ // {2, 1, -1, 0, 1} -> {2, 1, 0, -1, 0, 1}
+ h.PositiveBuckets = append(h.PositiveBuckets[:2], append([]int64{0}, h.PositiveBuckets[2:]...)...)
+ appendHistogram(t, series1, 105, h, &exp1, histogram.NotCounterReset)
+ testQuery(t, "foo", "bar1", map[string][]chunks.Sample{series1.String(): exp1})
+
+ // We add 4 more histograms to clear out the buffer and see the re-encoded histograms.
+ appendHistogram(t, series1, 106, h, &exp1, histogram.NotCounterReset)
+ appendHistogram(t, series1, 107, h, &exp1, histogram.NotCounterReset)
+ appendHistogram(t, series1, 108, h, &exp1, histogram.NotCounterReset)
+ appendHistogram(t, series1, 109, h, &exp1, histogram.NotCounterReset)
+
+ // Update the expected histograms to reflect the re-encoding.
+ if floatHistogram {
+ l := len(exp1)
+ h7 := exp1[l-7].FH()
+ h7.PositiveSpans = exp1[l-1].FH().PositiveSpans
+ h7.PositiveBuckets = []float64{2, 3, 0, 2, 2, 0}
+ exp1[l-7] = sample{t: exp1[l-7].T(), fh: h7}
+
+ h6 := exp1[l-6].FH()
+ h6.PositiveSpans = exp1[l-1].FH().PositiveSpans
+ h6.PositiveBuckets = []float64{2, 3, 0, 2, 2, 3}
+ exp1[l-6] = sample{t: exp1[l-6].T(), fh: h6}
+ } else {
+ l := len(exp1)
+ h7 := exp1[l-7].H()
+ h7.PositiveSpans = exp1[l-1].H().PositiveSpans
+ h7.PositiveBuckets = []int64{2, 1, -3, 2, 0, -2} // -3 and -2 are the empty buckets.
+ exp1[l-7] = sample{t: exp1[l-7].T(), h: h7}
+
+ h6 := exp1[l-6].H()
+ h6.PositiveSpans = exp1[l-1].H().PositiveSpans
+ h6.PositiveBuckets = []int64{2, 1, -3, 2, 0, 1} // -3 is the empty bucket.
+ exp1[l-6] = sample{t: exp1[l-6].T(), h: h6}
+ }
+
+ testQuery(t, "foo", "bar1", map[string][]chunks.Sample{series1.String(): exp1})
+ })
+
+ t.Run("buckets disappearing", func(t *testing.T) {
+ h.PositiveSpans[1].Length--
+ h.PositiveBuckets = h.PositiveBuckets[:len(h.PositiveBuckets)-1]
+ h.Count -= 3
+ appendHistogram(t, series1, 110, h, &exp1, histogram.UnknownCounterReset)
+ testQuery(t, "foo", "bar1", map[string][]chunks.Sample{series1.String(): exp1})
+ })
+ })
+
+ t.Run("series starting with float and then getting histograms", func(t *testing.T) {
+ appendFloat(t, series2, 100, 100, &exp2)
+ appendFloat(t, series2, 101, 101, &exp2)
+ appendFloat(t, series2, 102, 102, &exp2)
+ testQuery(t, "foo", "bar2", map[string][]chunks.Sample{series2.String(): exp2})
+
+ h := baseH.Copy()
+ appendHistogram(t, series2, 103, h, &exp2, histogram.UnknownCounterReset)
+ appendHistogram(t, series2, 104, h, &exp2, histogram.NotCounterReset)
+ appendHistogram(t, series2, 105, h, &exp2, histogram.NotCounterReset)
+ testQuery(t, "foo", "bar2", map[string][]chunks.Sample{series2.String(): exp2})
+
+ // Switching between float and histograms again.
+ appendFloat(t, series2, 106, 106, &exp2)
+ appendFloat(t, series2, 107, 107, &exp2)
+ testQuery(t, "foo", "bar2", map[string][]chunks.Sample{series2.String(): exp2})
+
+ appendHistogram(t, series2, 108, h, &exp2, histogram.UnknownCounterReset)
+ appendHistogram(t, series2, 109, h, &exp2, histogram.NotCounterReset)
+ testQuery(t, "foo", "bar2", map[string][]chunks.Sample{series2.String(): exp2})
+ })
+
+ t.Run("series starting with histogram and then getting float", func(t *testing.T) {
+ h := baseH.Copy()
+ appendHistogram(t, series3, 101, h, &exp3, histogram.UnknownCounterReset)
+ appendHistogram(t, series3, 102, h, &exp3, histogram.NotCounterReset)
+ appendHistogram(t, series3, 103, h, &exp3, histogram.NotCounterReset)
+ testQuery(t, "foo", "bar3", map[string][]chunks.Sample{series3.String(): exp3})
+
+ appendFloat(t, series3, 104, 100, &exp3)
+ appendFloat(t, series3, 105, 101, &exp3)
+ appendFloat(t, series3, 106, 102, &exp3)
+ testQuery(t, "foo", "bar3", map[string][]chunks.Sample{series3.String(): exp3})
+
+ // Switching between histogram and float again.
+ appendHistogram(t, series3, 107, h, &exp3, histogram.UnknownCounterReset)
+ appendHistogram(t, series3, 108, h, &exp3, histogram.NotCounterReset)
+ testQuery(t, "foo", "bar3", map[string][]chunks.Sample{series3.String(): exp3})
+
+ appendFloat(t, series3, 109, 106, &exp3)
+ appendFloat(t, series3, 110, 107, &exp3)
+ testQuery(t, "foo", "bar3", map[string][]chunks.Sample{series3.String(): exp3})
+ })
+
+ t.Run("query mix of histogram and float series", func(t *testing.T) {
+ // A float only series.
+ appendFloat(t, series4, 100, 100, &exp4)
+ appendFloat(t, series4, 101, 101, &exp4)
+ appendFloat(t, series4, 102, 102, &exp4)
+
+ testQuery(t, "foo", "bar.*", map[string][]chunks.Sample{
+ series1.String(): exp1,
+ series2.String(): exp2,
+ series3.String(): exp3,
+ series4.String(): exp4,
+ })
+ })
+}
+
+func TestOOONativeHistogramsSettings_AppendV2(t *testing.T) {
+ h := &histogram.Histogram{
+ Count: 9,
+ ZeroCount: 4,
+ ZeroThreshold: 0.001,
+ Sum: 35.5,
+ Schema: 1,
+ PositiveSpans: []histogram.Span{
+ {Offset: 0, Length: 2},
+ {Offset: 2, Length: 2},
+ },
+ PositiveBuckets: []int64{1, 1, -1, 0},
+ }
+
+ l := labels.FromStrings("foo", "bar")
+
+ t.Run("Test OOO native histograms if OOO is disabled and Native Histograms is enabled", func(t *testing.T) {
+ opts := DefaultOptions()
+ opts.OutOfOrderTimeWindow = 0
+ db := newTestDB(t, withOpts(opts), withRngs(100))
+
+ app := db.AppenderV2(context.Background())
+ _, err := app.Append(0, l, 0, 100, 0, h, nil, storage.AOptions{})
+ require.NoError(t, err)
+
+ _, err = app.Append(0, l, 0, 50, 0, h, nil, storage.AOptions{})
+ require.NoError(t, err) // The OOO sample is not detected until it is committed, so no error is returned
+
+ require.NoError(t, app.Commit())
+
+ q, err := db.Querier(math.MinInt, math.MaxInt64)
+ require.NoError(t, err)
+ act := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
+ require.Equal(t, map[string][]chunks.Sample{
+ l.String(): {sample{t: 100, h: h}},
+ }, act)
+ })
+ t.Run("Test OOO native histograms when both OOO and Native Histograms are enabled", func(t *testing.T) {
+ opts := DefaultOptions()
+ opts.OutOfOrderTimeWindow = 100
+ db := newTestDB(t, withOpts(opts), withRngs(100))
+
+ // Add in-order samples
+ app := db.AppenderV2(context.Background())
+ _, err := app.Append(0, l, 0, 200, 0, h, nil, storage.AOptions{})
+ require.NoError(t, err)
+
+ // Add OOO samples
+ _, err = app.Append(0, l, 0, 100, 0, h, nil, storage.AOptions{})
+ require.NoError(t, err)
+ _, err = app.Append(0, l, 0, 150, 0, h, nil, storage.AOptions{})
+ require.NoError(t, err)
+
+ require.NoError(t, app.Commit())
+
+ q, err := db.Querier(math.MinInt, math.MaxInt64)
+ require.NoError(t, err)
+ act := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
+ requireEqualSeries(t, map[string][]chunks.Sample{
+ l.String(): {sample{t: 100, h: h}, sample{t: 150, h: h}, sample{t: 200, h: h}},
+ }, act, true)
+ })
+}
+
+// TestChunkQuerierReadWriteRace looks for any possible race between appending
+// samples and reading chunks because the head chunk that is being appended to
+// can be read in parallel and we should be able to make a copy of the chunk without
+// worrying about the parallel write.
+func TestChunkQuerierReadWriteRace_AppendV2(t *testing.T) {
+ t.Parallel()
+ db := newTestDB(t)
+
+ lbls := labels.FromStrings("foo", "bar")
+
+ writer := func() error {
+ <-time.After(5 * time.Millisecond) // Initial pause while readers start.
+ ts := 0
+ for range 500 {
+ app := db.AppenderV2(context.Background())
+ for range 10 {
+ ts++
+ _, err := app.Append(0, lbls, 0, int64(ts), float64(ts*100), nil, nil, storage.AOptions{})
+ if err != nil {
+ return err
+ }
+ }
+ err := app.Commit()
+ if err != nil {
+ return err
+ }
+ <-time.After(time.Millisecond)
+ }
+ return nil
+ }
+
+ reader := func() {
+ querier, err := db.ChunkQuerier(math.MinInt64, math.MaxInt64)
+ require.NoError(t, err)
+ defer func(q storage.ChunkQuerier) {
+ require.NoError(t, q.Close())
+ }(querier)
+ ss := querier.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
+ for ss.Next() {
+ cs := ss.At()
+ it := cs.Iterator(nil)
+ for it.Next() {
+ m := it.At()
+ b := m.Chunk.Bytes()
+ bb := make([]byte, len(b))
+ copy(bb, b) // This copying of chunk bytes detects any race.
+ }
+ }
+ require.NoError(t, ss.Err())
+ }
+
+ ch := make(chan struct{})
+ var writerErr error
+ go func() {
+ defer close(ch)
+ writerErr = writer()
+ }()
+
+Outer:
+ for {
+ reader()
+ select {
+ case <-ch:
+ break Outer
+ default:
+ }
+ }
+
+ require.NoError(t, writerErr)
+}
+
+// Regression test for https://github.com/prometheus/prometheus/pull/13754
+func TestAbortBlockCompactions_AppendV2(t *testing.T) {
+ // Create a test DB
+ db := newTestDB(t)
+ // It should NOT be compactable at the beginning of the test
+ require.False(t, db.head.compactable(), "head should NOT be compactable")
+
+ // Track the number of compactions run inside db.compactBlocks()
+ var compactions int
+
+ // Use a mock compactor with custom Plan() implementation
+ db.compactor = &mockCompactorFn{
+ planFn: func() ([]string, error) {
+ // On every Plan() run increment compactions. After 4 compactions
+ // update HEAD to make it compactable to force an exit from db.compactBlocks() loop.
+ compactions++
+ if compactions > 3 {
+ chunkRange := db.head.chunkRange.Load()
+ db.head.minTime.Store(0)
+ db.head.maxTime.Store(chunkRange * 2)
+ require.True(t, db.head.compactable(), "head should be compactable")
+ }
+ // Our custom Plan() will always return something to compact.
+ return []string{"1", "2", "3"}, nil
+ },
+ compactFn: func() ([]ulid.ULID, error) {
+ return []ulid.ULID{}, nil
+ },
+ writeFn: func() ([]ulid.ULID, error) {
+ return []ulid.ULID{}, nil
+ },
+ }
+
+ err := db.Compact(context.Background())
+ require.NoError(t, err)
+ require.True(t, db.head.compactable(), "head should be compactable")
+ require.Equal(t, 4, compactions, "expected 4 compactions to be completed")
+}
+
+func TestNewCompactorFunc_AppendV2(t *testing.T) {
+ opts := DefaultOptions()
+ block1 := ulid.MustNew(1, nil)
+ block2 := ulid.MustNew(2, nil)
+ opts.NewCompactorFunc = func(context.Context, prometheus.Registerer, *slog.Logger, []int64, chunkenc.Pool, *Options) (Compactor, error) {
+ return &mockCompactorFn{
+ planFn: func() ([]string, error) {
+ return []string{block1.String(), block2.String()}, nil
+ },
+ compactFn: func() ([]ulid.ULID, error) {
+ return []ulid.ULID{block1}, nil
+ },
+ writeFn: func() ([]ulid.ULID, error) {
+ return []ulid.ULID{block2}, nil
+ },
+ }, nil
+ }
+ db := newTestDB(t, withOpts(opts))
+
+ plans, err := db.compactor.Plan("")
+ require.NoError(t, err)
+ require.Equal(t, []string{block1.String(), block2.String()}, plans)
+ ulids, err := db.compactor.Compact("", nil, nil)
+ require.NoError(t, err)
+ require.Len(t, ulids, 1)
+ require.Equal(t, block1, ulids[0])
+ ulids, err = db.compactor.Write("", nil, 0, 1, nil)
+ require.NoError(t, err)
+ require.Len(t, ulids, 1)
+ require.Equal(t, block2, ulids[0])
+}
diff --git a/tsdb/db_test.go b/tsdb/db_test.go
index 440d1e328d..a55264c24e 100644
--- a/tsdb/db_test.go
+++ b/tsdb/db_test.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -81,26 +81,69 @@ func TestMain(m *testing.M) {
goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"))
}
-func openTestDB(t testing.TB, opts *Options, rngs []int64) (db *DB) {
- tmpdir := t.TempDir()
- var err error
+type testDBOptions struct {
+ dir string
+ opts *Options
+ rngs []int64
+}
+type testDBOpt func(o *testDBOptions)
- if opts == nil {
- opts = DefaultOptions()
+func withDir(dir string) testDBOpt {
+ return func(o *testDBOptions) {
+ o.dir = dir
+ }
+}
+
+func withOpts(opts *Options) testDBOpt {
+ return func(o *testDBOptions) {
+ o.opts = opts
+ }
+}
+
+func withRngs(rngs ...int64) testDBOpt {
+ return func(o *testDBOptions) {
+ o.rngs = rngs
+ }
+}
+
+func newTestDB(t testing.TB, opts ...testDBOpt) (db *DB) {
+ var o testDBOptions
+ for _, opt := range opts {
+ opt(&o)
+ }
+ if o.opts == nil {
+ o.opts = DefaultOptions()
+ }
+ if o.dir == "" {
+ o.dir = t.TempDir()
}
- if len(rngs) == 0 {
- db, err = Open(tmpdir, nil, nil, opts, nil)
+ var err error
+ if len(o.rngs) == 0 {
+ db, err = Open(o.dir, nil, nil, o.opts, nil)
} else {
- opts, rngs = validateOpts(opts, rngs)
- db, err = open(tmpdir, nil, nil, opts, rngs, nil)
+ o.opts, o.rngs = validateOpts(o.opts, o.rngs)
+ db, err = open(o.dir, nil, nil, o.opts, o.rngs, nil)
}
require.NoError(t, err)
-
- // Do not Close() the test database by default as it will deadlock on test failures.
+ t.Cleanup(func() {
+ // Always close. DB is safe for close-after-close.
+ require.NoError(t, db.Close())
+ })
return db
}
+func TestDBClose_AfterClose(t *testing.T) {
+ db := newTestDB(t)
+ require.NoError(t, db.Close())
+ require.NoError(t, db.Close())
+
+ // Double check if we are closing correct DB after reuse.
+ db = newTestDB(t)
+ require.NoError(t, db.Close())
+ require.NoError(t, db.Close())
+}
+
// queryHelper runs a matcher query against the querier and fully expands its data.
func queryHelper(t testing.TB, q storage.Querier, withNaNReplacement bool, matchers ...*labels.Matcher) map[string][]chunks.Sample {
ss := q.Select(context.Background(), false, nil, matchers...)
@@ -199,10 +242,7 @@ func queryChunks(t testing.TB, q storage.ChunkQuerier, matchers ...*labels.Match
// Ensure that blocks are held in memory in their time order
// and not in ULID order as they are read from the directory.
func TestDB_reloadOrder(t *testing.T) {
- db := openTestDB(t, nil, nil)
- defer func() {
- require.NoError(t, db.Close())
- }()
+ db := newTestDB(t)
metas := []BlockMeta{
{MinTime: 90, MaxTime: 100},
@@ -225,10 +265,7 @@ func TestDB_reloadOrder(t *testing.T) {
}
func TestDataAvailableOnlyAfterCommit(t *testing.T) {
- db := openTestDB(t, nil, nil)
- defer func() {
- require.NoError(t, db.Close())
- }()
+ db := newTestDB(t)
ctx := context.Background()
app := db.Appender(ctx)
@@ -256,7 +293,7 @@ func TestDataAvailableOnlyAfterCommit(t *testing.T) {
// TestNoPanicAfterWALCorruption ensures that querying the db after a WAL corruption doesn't cause a panic.
// https://github.com/prometheus/prometheus/issues/7548
func TestNoPanicAfterWALCorruption(t *testing.T) {
- db := openTestDB(t, &Options{WALSegmentSize: 32 * 1024}, nil)
+ db := newTestDB(t, withOpts(&Options{WALSegmentSize: 32 * 1024}))
// Append until the first mmapped head chunk.
// This is to ensure that all samples can be read from the mmapped chunks when the WAL is corrupted.
@@ -295,11 +332,7 @@ func TestNoPanicAfterWALCorruption(t *testing.T) {
// Query the data.
{
- db, err := Open(db.Dir(), nil, nil, nil, nil)
- require.NoError(t, err)
- defer func() {
- require.NoError(t, db.Close())
- }()
+ db := newTestDB(t, withDir(db.Dir()))
require.Equal(t, 1.0, prom_testutil.ToFloat64(db.head.metrics.walCorruptionsTotal), "WAL corruption count mismatch")
querier, err := db.Querier(0, maxt)
@@ -311,10 +344,7 @@ func TestNoPanicAfterWALCorruption(t *testing.T) {
}
func TestDataNotAvailableAfterRollback(t *testing.T) {
- db := openTestDB(t, nil, nil)
- defer func() {
- require.NoError(t, db.Close())
- }()
+ db := newTestDB(t)
app := db.Appender(context.Background())
_, err := app.Append(0, labels.FromStrings("type", "float"), 0, 0)
@@ -401,10 +431,7 @@ func TestDataNotAvailableAfterRollback(t *testing.T) {
}
func TestDBAppenderAddRef(t *testing.T) {
- db := openTestDB(t, nil, nil)
- defer func() {
- require.NoError(t, db.Close())
- }()
+ db := newTestDB(t)
ctx := context.Background()
app1 := db.Appender(ctx)
@@ -459,10 +486,7 @@ func TestDBAppenderAddRef(t *testing.T) {
}
func TestAppendEmptyLabelsIgnored(t *testing.T) {
- db := openTestDB(t, nil, nil)
- defer func() {
- require.NoError(t, db.Close())
- }()
+ db := newTestDB(t)
ctx := context.Background()
app1 := db.Appender(ctx)
@@ -512,10 +536,7 @@ func TestDeleteSimple(t *testing.T) {
for _, c := range cases {
t.Run("", func(t *testing.T) {
- db := openTestDB(t, nil, nil)
- defer func() {
- require.NoError(t, db.Close())
- }()
+ db := newTestDB(t)
ctx := context.Background()
app := db.Appender(ctx)
@@ -573,10 +594,7 @@ func TestDeleteSimple(t *testing.T) {
}
func TestAmendHistogramDatapointCausesError(t *testing.T) {
- db := openTestDB(t, nil, nil)
- defer func() {
- require.NoError(t, db.Close())
- }()
+ db := newTestDB(t)
ctx := context.Background()
app := db.Appender(ctx)
@@ -634,10 +652,7 @@ func TestAmendHistogramDatapointCausesError(t *testing.T) {
}
func TestDuplicateNaNDatapointNoAmendError(t *testing.T) {
- db := openTestDB(t, nil, nil)
- defer func() {
- require.NoError(t, db.Close())
- }()
+ db := newTestDB(t)
ctx := context.Background()
app := db.Appender(ctx)
@@ -651,10 +666,7 @@ func TestDuplicateNaNDatapointNoAmendError(t *testing.T) {
}
func TestNonDuplicateNaNDatapointsCausesAmendError(t *testing.T) {
- db := openTestDB(t, nil, nil)
- defer func() {
- require.NoError(t, db.Close())
- }()
+ db := newTestDB(t)
ctx := context.Background()
app := db.Appender(ctx)
@@ -668,10 +680,7 @@ func TestNonDuplicateNaNDatapointsCausesAmendError(t *testing.T) {
}
func TestEmptyLabelsetCausesError(t *testing.T) {
- db := openTestDB(t, nil, nil)
- defer func() {
- require.NoError(t, db.Close())
- }()
+ db := newTestDB(t)
ctx := context.Background()
app := db.Appender(ctx)
@@ -681,10 +690,7 @@ func TestEmptyLabelsetCausesError(t *testing.T) {
}
func TestSkippingInvalidValuesInSameTxn(t *testing.T) {
- db := openTestDB(t, nil, nil)
- defer func() {
- require.NoError(t, db.Close())
- }()
+ db := newTestDB(t)
// Append AmendedValue.
ctx := context.Background()
@@ -724,7 +730,7 @@ func TestSkippingInvalidValuesInSameTxn(t *testing.T) {
}
func TestDB_Snapshot(t *testing.T) {
- db := openTestDB(t, nil, nil)
+ db := newTestDB(t)
// append data
ctx := context.Background()
@@ -742,9 +748,7 @@ func TestDB_Snapshot(t *testing.T) {
require.NoError(t, db.Close())
// reopen DB from snapshot
- db, err := Open(snap, nil, nil, nil, nil)
- require.NoError(t, err)
- defer func() { require.NoError(t, db.Close()) }()
+ db = newTestDB(t, withDir(snap))
querier, err := db.Querier(mint, mint+1000)
require.NoError(t, err)
@@ -771,7 +775,7 @@ func TestDB_Snapshot(t *testing.T) {
// that are outside the set block time range.
// See https://github.com/prometheus/prometheus/issues/5105
func TestDB_Snapshot_ChunksOutsideOfCompactedRange(t *testing.T) {
- db := openTestDB(t, nil, nil)
+ db := newTestDB(t)
ctx := context.Background()
app := db.Appender(ctx)
@@ -790,10 +794,8 @@ func TestDB_Snapshot_ChunksOutsideOfCompactedRange(t *testing.T) {
require.NoError(t, db.Snapshot(snap, true))
require.NoError(t, db.Close())
- // Reopen DB from snapshot.
- db, err := Open(snap, nil, nil, nil, nil)
- require.NoError(t, err)
- defer func() { require.NoError(t, db.Close()) }()
+ // reopen DB from snapshot
+ db = newTestDB(t, withDir(snap))
querier, err := db.Querier(mint, mint+1000)
require.NoError(t, err)
@@ -821,8 +823,7 @@ func TestDB_Snapshot_ChunksOutsideOfCompactedRange(t *testing.T) {
func TestDB_SnapshotWithDelete(t *testing.T) {
const numSamples int64 = 10
- db := openTestDB(t, nil, nil)
- defer func() { require.NoError(t, db.Close()) }()
+ db := newTestDB(t)
ctx := context.Background()
app := db.Appender(ctx)
@@ -858,12 +859,10 @@ func TestDB_SnapshotWithDelete(t *testing.T) {
require.NoError(t, db.Snapshot(snap, true))
// reopen DB from snapshot
- newDB, err := Open(snap, nil, nil, nil, nil)
- require.NoError(t, err)
- defer func() { require.NoError(t, newDB.Close()) }()
+ db := newTestDB(t, withDir(snap))
// Compare the result.
- q, err := newDB.Querier(0, numSamples)
+ q, err := db.Querier(0, numSamples)
require.NoError(t, err)
defer func() { require.NoError(t, q.Close()) }()
@@ -961,10 +960,7 @@ func TestDB_e2e(t *testing.T) {
seriesMap[labels.New(l...).String()] = []chunks.Sample{}
}
- db := openTestDB(t, nil, nil)
- defer func() {
- require.NoError(t, db.Close())
- }()
+ db := newTestDB(t)
ctx := context.Background()
app := db.Appender(ctx)
@@ -1066,9 +1062,7 @@ func TestDB_e2e(t *testing.T) {
}
func TestWALFlushedOnDBClose(t *testing.T) {
- db := openTestDB(t, nil, nil)
-
- dirDb := db.Dir()
+ db := newTestDB(t)
lbls := labels.FromStrings("labelname", "labelvalue")
@@ -1080,9 +1074,7 @@ func TestWALFlushedOnDBClose(t *testing.T) {
require.NoError(t, db.Close())
- db, err = Open(dirDb, nil, nil, nil, nil)
- require.NoError(t, err)
- defer func() { require.NoError(t, db.Close()) }()
+ db = newTestDB(t, withDir(db.Dir()))
q, err := db.Querier(0, 1)
require.NoError(t, err)
@@ -1148,7 +1140,7 @@ func TestWALSegmentSizeOptions(t *testing.T) {
t.Run(fmt.Sprintf("WALSegmentSize %d test", segmentSize), func(t *testing.T) {
opts := DefaultOptions()
opts.WALSegmentSize = segmentSize
- db := openTestDB(t, opts, nil)
+ db := newTestDB(t, withOpts(opts))
for i := range int64(155) {
app := db.Appender(context.Background())
@@ -1161,9 +1153,8 @@ func TestWALSegmentSizeOptions(t *testing.T) {
require.NoError(t, app.Commit())
}
- dbDir := db.Dir()
require.NoError(t, db.Close())
- testFunc(dbDir, opts.WALSegmentSize)
+ testFunc(db.Dir(), opts.WALSegmentSize)
})
}
}
@@ -1190,7 +1181,7 @@ func TestWALReplayRaceOnSamplesLoggedBeforeSeries(t *testing.T) {
func testWALReplayRaceOnSamplesLoggedBeforeSeries(t *testing.T, numSamplesBeforeSeriesCreation, numSamplesAfterSeriesCreation int) {
const numSeries = 1000
- db := openTestDB(t, nil, nil)
+ db := newTestDB(t)
db.DisableCompactions()
for seriesRef := 1; seriesRef <= numSeries; seriesRef++ {
@@ -1223,14 +1214,10 @@ func testWALReplayRaceOnSamplesLoggedBeforeSeries(t *testing.T, numSamplesBefore
require.NoError(t, db.Close())
// Reopen the DB, replaying the WAL.
- reopenDB, err := Open(db.Dir(), promslog.New(&promslog.Config{}), nil, nil, nil)
- require.NoError(t, err)
- t.Cleanup(func() {
- require.NoError(t, reopenDB.Close())
- })
+ db = newTestDB(t, withDir(db.Dir()))
// Query back chunks for all series.
- q, err := reopenDB.ChunkQuerier(math.MinInt64, math.MaxInt64)
+ q, err := db.ChunkQuerier(math.MinInt64, math.MaxInt64)
require.NoError(t, err)
set := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchRegexp, "series_id", ".+"))
@@ -1259,7 +1246,7 @@ func TestTombstoneClean(t *testing.T) {
t.Parallel()
const numSamples int64 = 10
- db := openTestDB(t, nil, nil)
+ db := newTestDB(t)
ctx := context.Background()
app := db.Appender(ctx)
@@ -1290,9 +1277,7 @@ func TestTombstoneClean(t *testing.T) {
require.NoError(t, db.Close())
// Reopen DB from snapshot.
- db, err := Open(snap, nil, nil, nil, nil)
- require.NoError(t, err)
- defer db.Close()
+ db := newTestDB(t, withDir(snap))
for _, r := range c.intervals {
require.NoError(t, db.Delete(ctx, r.Mint, r.Maxt, labels.MustNewMatcher(labels.MatchEqual, "a", "b")))
@@ -1354,7 +1339,7 @@ func TestTombstoneCleanResultEmptyBlock(t *testing.T) {
t.Parallel()
numSamples := int64(10)
- db := openTestDB(t, nil, nil)
+ db := newTestDB(t)
ctx := context.Background()
app := db.Appender(ctx)
@@ -1375,9 +1360,7 @@ func TestTombstoneCleanResultEmptyBlock(t *testing.T) {
require.NoError(t, db.Close())
// Reopen DB from snapshot.
- db, err := Open(snap, nil, nil, nil, nil)
- require.NoError(t, err)
- defer db.Close()
+ db = newTestDB(t, withDir(snap))
// Create tombstones by deleting all samples.
for _, r := range intervals {
@@ -1387,7 +1370,7 @@ func TestTombstoneCleanResultEmptyBlock(t *testing.T) {
require.NoError(t, db.CleanTombstones())
// After cleaning tombstones that covers the entire block, no blocks should be left behind.
- actualBlockDirs, err := blockDirs(db.dir)
+ actualBlockDirs, err := blockDirs(db.Dir())
require.NoError(t, err)
require.Empty(t, actualBlockDirs)
}
@@ -1397,10 +1380,7 @@ func TestTombstoneCleanResultEmptyBlock(t *testing.T) {
// if TombstoneClean leaves any blocks behind these will overlap.
func TestTombstoneCleanFail(t *testing.T) {
t.Parallel()
- db := openTestDB(t, nil, nil)
- defer func() {
- require.NoError(t, db.Close())
- }()
+ db := newTestDB(t)
var oldBlockDirs []string
@@ -1432,7 +1412,7 @@ func TestTombstoneCleanFail(t *testing.T) {
require.Error(t, db.CleanTombstones())
// Now check that the CleanTombstones replaced the old block even after a failure.
- actualBlockDirs, err := blockDirs(db.dir)
+ actualBlockDirs, err := blockDirs(db.Dir())
require.NoError(t, err)
// Only one block should have been replaced by a new block.
require.Len(t, actualBlockDirs, len(oldBlockDirs))
@@ -1533,10 +1513,7 @@ func TestTimeRetention(t *testing.T) {
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
- db := openTestDB(t, nil, []int64{1000})
- defer func() {
- require.NoError(t, db.Close())
- }()
+ db := newTestDB(t, withRngs(1000))
for _, m := range tc.blocks {
createBlock(t, db.Dir(), genSeries(10, 10, m.MinTime, m.MaxTime))
@@ -1562,12 +1539,9 @@ func TestTimeRetention(t *testing.T) {
}
func TestRetentionDurationMetric(t *testing.T) {
- db := openTestDB(t, &Options{
+ db := newTestDB(t, withOpts(&Options{
RetentionDuration: 1000,
- }, []int64{100})
- defer func() {
- require.NoError(t, db.Close())
- }()
+ }), withRngs(100))
expRetentionDuration := 1.0
actRetentionDuration := prom_testutil.ToFloat64(db.metrics.retentionDuration)
@@ -1578,10 +1552,7 @@ func TestSizeRetention(t *testing.T) {
t.Parallel()
opts := DefaultOptions()
opts.OutOfOrderTimeWindow = 100
- db := openTestDB(t, opts, []int64{100})
- defer func() {
- require.NoError(t, db.Close())
- }()
+ db := newTestDB(t, withOpts(opts), withRngs(100))
blocks := []*BlockMeta{
{MinTime: 100, MaxTime: 200}, // Oldest block
@@ -1725,12 +1696,9 @@ func TestSizeRetentionMetric(t *testing.T) {
}
for _, c := range cases {
- db := openTestDB(t, &Options{
+ db := newTestDB(t, withOpts(&Options{
MaxBytes: c.maxBytes,
- }, []int64{100})
- defer func() {
- require.NoError(t, db.Close())
- }()
+ }), withRngs(100))
actMaxBytes := int64(prom_testutil.ToFloat64(db.metrics.maxBytes))
require.Equal(t, c.expMaxBytes, actMaxBytes, "metric retention limit bytes mismatch")
@@ -1747,12 +1715,9 @@ func TestRuntimeRetentionConfigChange(t *testing.T) {
shorterRetentionDuration = int64(1 * time.Hour / time.Millisecond) // 1 hour
)
- db := openTestDB(t, &Options{
+ db := newTestDB(t, withOpts(&Options{
RetentionDuration: initialRetentionDuration,
- }, []int64{100})
- defer func() {
- require.NoError(t, db.Close())
- }()
+ }), withRngs(100))
nineHoursMs := int64(9 * time.Hour / time.Millisecond)
nineAndHalfHoursMs := int64((9*time.Hour + 30*time.Minute) / time.Millisecond)
@@ -1807,10 +1772,7 @@ func TestRuntimeRetentionConfigChange(t *testing.T) {
}
func TestNotMatcherSelectsLabelsUnsetSeries(t *testing.T) {
- db := openTestDB(t, nil, nil)
- defer func() {
- require.NoError(t, db.Close())
- }()
+ db := newTestDB(t)
labelpairs := []labels.Labels{
labels.FromStrings("a", "abcd", "b", "abcde"),
@@ -1995,10 +1957,7 @@ func TestOverlappingBlocksDetectsAllOverlaps(t *testing.T) {
// Regression test for https://github.com/prometheus/tsdb/issues/347
func TestChunkAtBlockBoundary(t *testing.T) {
t.Parallel()
- db := openTestDB(t, nil, nil)
- defer func() {
- require.NoError(t, db.Close())
- }()
+ db := newTestDB(t)
ctx := context.Background()
app := db.Appender(ctx)
@@ -2052,10 +2011,7 @@ func TestChunkAtBlockBoundary(t *testing.T) {
func TestQuerierWithBoundaryChunks(t *testing.T) {
t.Parallel()
- db := openTestDB(t, nil, nil)
- defer func() {
- require.NoError(t, db.Close())
- }()
+ db := newTestDB(t)
ctx := context.Background()
app := db.Appender(ctx)
@@ -2098,11 +2054,7 @@ func TestQuerierWithBoundaryChunks(t *testing.T) {
func TestInitializeHeadTimestamp(t *testing.T) {
t.Parallel()
t.Run("clean", func(t *testing.T) {
- dir := t.TempDir()
-
- db, err := Open(dir, nil, nil, nil, nil)
- require.NoError(t, err)
- defer db.Close()
+ db := newTestDB(t)
// Should be set to init values if no WAL or blocks exist so far.
require.Equal(t, int64(math.MaxInt64), db.head.MinTime())
@@ -2112,7 +2064,7 @@ func TestInitializeHeadTimestamp(t *testing.T) {
// First added sample initializes the writable range.
ctx := context.Background()
app := db.Appender(ctx)
- _, err = app.Append(0, labels.FromStrings("a", "b"), 1000, 1)
+ _, err := app.Append(0, labels.FromStrings("a", "b"), 1000, 1)
require.NoError(t, err)
require.Equal(t, int64(1000), db.head.MinTime())
@@ -2140,9 +2092,7 @@ func TestInitializeHeadTimestamp(t *testing.T) {
require.NoError(t, err)
require.NoError(t, w.Close())
- db, err := Open(dir, nil, nil, nil, nil)
- require.NoError(t, err)
- defer db.Close()
+ db := newTestDB(t, withDir(dir))
require.Equal(t, int64(5000), db.head.MinTime())
require.Equal(t, int64(15000), db.head.MaxTime())
@@ -2153,9 +2103,7 @@ func TestInitializeHeadTimestamp(t *testing.T) {
createBlock(t, dir, genSeries(1, 1, 1000, 2000))
- db, err := Open(dir, nil, nil, nil, nil)
- require.NoError(t, err)
- defer db.Close()
+ db := newTestDB(t, withDir(dir))
require.Equal(t, int64(2000), db.head.MinTime())
require.Equal(t, int64(2000), db.head.MaxTime())
@@ -2184,11 +2132,7 @@ func TestInitializeHeadTimestamp(t *testing.T) {
require.NoError(t, err)
require.NoError(t, w.Close())
- r := prometheus.NewRegistry()
-
- db, err := Open(dir, nil, r, nil, nil)
- require.NoError(t, err)
- defer db.Close()
+ db := newTestDB(t, withDir(dir))
require.Equal(t, int64(6000), db.head.MinTime())
require.Equal(t, int64(15000), db.head.MaxTime())
@@ -2200,11 +2144,9 @@ func TestInitializeHeadTimestamp(t *testing.T) {
func TestNoEmptyBlocks(t *testing.T) {
t.Parallel()
- db := openTestDB(t, nil, []int64{100})
+ db := newTestDB(t, withRngs(100))
ctx := context.Background()
- defer func() {
- require.NoError(t, db.Close())
- }()
+
db.DisableCompactions()
rangeToTriggerCompaction := db.compactor.(*LeveledCompactor).ranges[0]/2*3 - 1
@@ -2361,10 +2303,7 @@ func TestDB_LabelNames(t *testing.T) {
for _, tst := range tests {
t.Run("", func(t *testing.T) {
ctx := context.Background()
- db := openTestDB(t, nil, nil)
- defer func() {
- require.NoError(t, db.Close())
- }()
+ db := newTestDB(t)
appendSamples(db, 0, 4, tst.sampleLabels1)
@@ -2409,10 +2348,7 @@ func TestDB_LabelNames(t *testing.T) {
func TestCorrectNumTombstones(t *testing.T) {
t.Parallel()
- db := openTestDB(t, nil, nil)
- defer func() {
- require.NoError(t, db.Close())
- }()
+ db := newTestDB(t)
blockRange := db.compactor.(*LeveledCompactor).ranges[0]
name, value := "foo", "bar"
@@ -2545,8 +2481,7 @@ func TestBlockRanges(t *testing.T) {
func TestDBReadOnly(t *testing.T) {
t.Parallel()
var (
- dbDir string
- logger = promslog.New(&promslog.Config{})
+ dbDir = t.TempDir()
expBlocks []*Block
expBlock *Block
expSeries map[string][]chunks.Sample
@@ -2558,8 +2493,6 @@ func TestDBReadOnly(t *testing.T) {
// Bootstrap the db.
{
- dbDir = t.TempDir()
-
dbBlocks := []*BlockMeta{
// Create three 2-sample blocks.
{MinTime: 10, MaxTime: 12},
@@ -2572,7 +2505,7 @@ func TestDBReadOnly(t *testing.T) {
}
// Add head to test DBReadOnly WAL reading capabilities.
- w, err := wlog.New(logger, nil, filepath.Join(dbDir, "wal"), compression.Snappy)
+ w, err := wlog.New(nil, nil, filepath.Join(dbDir, "wal"), compression.Snappy)
require.NoError(t, err)
h := createHead(t, w, genSeries(1, 1, 16, 18), dbDir)
require.NoError(t, h.Close())
@@ -2580,8 +2513,7 @@ func TestDBReadOnly(t *testing.T) {
// Open a normal db to use for a comparison.
{
- dbWritable, err := Open(dbDir, logger, nil, nil, nil)
- require.NoError(t, err)
+ dbWritable := newTestDB(t, withDir(dbDir))
dbWritable.DisableCompactions()
dbSizeBeforeAppend, err := fileutil.DirSize(dbWritable.Dir())
@@ -2609,7 +2541,7 @@ func TestDBReadOnly(t *testing.T) {
}
// Open a read only db and ensure that the API returns the same result as the normal DB.
- dbReadOnly, err := OpenDBReadOnly(dbDir, "", logger)
+ dbReadOnly, err := OpenDBReadOnly(dbDir, "", nil)
require.NoError(t, err)
defer func() { require.NoError(t, dbReadOnly.Close()) }()
@@ -2682,24 +2614,20 @@ func TestDBReadOnlyClosing(t *testing.T) {
func TestDBReadOnly_FlushWAL(t *testing.T) {
t.Parallel()
var (
- dbDir string
- logger = promslog.New(&promslog.Config{})
- err error
- maxt int
- ctx = context.Background()
+ dbDir = t.TempDir()
+ err error
+ maxt int
+ ctx = context.Background()
)
// Bootstrap the db.
{
- dbDir = t.TempDir()
-
// Append data to the WAL.
- db, err := Open(dbDir, logger, nil, nil, nil)
- require.NoError(t, err)
+ db := newTestDB(t, withDir(dbDir))
db.DisableCompactions()
app := db.Appender(ctx)
maxt = 1000
- for i := 0; i < maxt; i++ {
+ for i := range maxt {
_, err := app.Append(0, labels.FromStrings(defaultLabelName, "flush"), int64(i), 1.0)
require.NoError(t, err)
}
@@ -2708,7 +2636,7 @@ func TestDBReadOnly_FlushWAL(t *testing.T) {
}
// Flush WAL.
- db, err := OpenDBReadOnly(dbDir, "", logger)
+ db, err := OpenDBReadOnly(dbDir, "", nil)
require.NoError(t, err)
flush := t.TempDir()
@@ -2716,7 +2644,7 @@ func TestDBReadOnly_FlushWAL(t *testing.T) {
require.NoError(t, db.Close())
// Reopen the DB from the flushed WAL block.
- db, err = OpenDBReadOnly(flush, "", logger)
+ db, err = OpenDBReadOnly(flush, "", nil)
require.NoError(t, err)
defer func() { require.NoError(t, db.Close()) }()
blocks, err := db.Blocks()
@@ -2777,10 +2705,7 @@ func TestDBReadOnly_Querier_NoAlteration(t *testing.T) {
}
t.Run("doesn't cut chunks while replaying WAL", func(t *testing.T) {
- db := openTestDB(t, nil, nil)
- defer func() {
- require.NoError(t, db.Close())
- }()
+ db := newTestDB(t)
// Append until the first mmapped head chunk.
for i := range 121 {
@@ -2790,33 +2715,31 @@ func TestDBReadOnly_Querier_NoAlteration(t *testing.T) {
require.NoError(t, app.Commit())
}
- spinUpQuerierAndCheck(db.dir, t.TempDir(), 0)
+ spinUpQuerierAndCheck(db.Dir(), t.TempDir(), 0)
// The RW Head should have no problem cutting its own chunk,
// this also proves that a chunk needed to be cut.
require.NotPanics(t, func() { db.ForceHeadMMap() })
- require.Equal(t, 1, countChunks(db.dir))
+ require.Equal(t, 1, countChunks(db.Dir()))
})
t.Run("doesn't truncate corrupted chunks", func(t *testing.T) {
- db := openTestDB(t, nil, nil)
+ db := newTestDB(t)
require.NoError(t, db.Close())
// Simulate a corrupted chunk: without a header.
- chunk, err := os.Create(path.Join(mmappedChunksDir(db.dir), "000001"))
+ chunk, err := os.Create(path.Join(mmappedChunksDir(db.Dir()), "000001"))
require.NoError(t, err)
require.NoError(t, chunk.Close())
- spinUpQuerierAndCheck(db.dir, t.TempDir(), 1)
+ spinUpQuerierAndCheck(db.Dir(), t.TempDir(), 1)
// The RW Head should have no problem truncating its corrupted file:
// this proves that the chunk needed to be truncated.
- db, err = Open(db.dir, nil, nil, nil, nil)
- defer func() {
- require.NoError(t, db.Close())
- }()
+ db = newTestDB(t, withDir(db.Dir()))
+
require.NoError(t, err)
- require.Equal(t, 0, countChunks(db.dir))
+ require.Equal(t, 0, countChunks(db.Dir()))
})
}
@@ -2825,11 +2748,7 @@ func TestDBCannotSeePartialCommits(t *testing.T) {
t.Skip("skipping test since tsdb isolation is disabled")
}
- tmpdir := t.TempDir()
-
- db, err := Open(tmpdir, nil, nil, nil, nil)
- require.NoError(t, err)
- defer db.Close()
+ db := newTestDB(t)
stop := make(chan struct{})
firstInsert := make(chan struct{})
@@ -2845,8 +2764,7 @@ func TestDBCannotSeePartialCommits(t *testing.T) {
_, err := app.Append(0, labels.FromStrings("foo", "bar", "a", strconv.Itoa(j)), int64(iter), float64(iter))
require.NoError(t, err)
}
- err = app.Commit()
- require.NoError(t, err)
+ require.NoError(t, app.Commit())
if iter == 0 {
close(firstInsert)
@@ -2896,12 +2814,7 @@ func TestDBQueryDoesntSeeAppendsAfterCreation(t *testing.T) {
t.Skip("skipping test since tsdb isolation is disabled")
}
- tmpdir := t.TempDir()
-
- db, err := Open(tmpdir, nil, nil, nil, nil)
- require.NoError(t, err)
- defer db.Close()
-
+ db := newTestDB(t)
querierBeforeAdd, err := db.Querier(0, 1000000)
require.NoError(t, err)
defer querierBeforeAdd.Close()
@@ -3219,19 +3132,16 @@ func TestChunkReader_ConcurrentReads(t *testing.T) {
// * queries the db to ensure the samples are present from the compacted head.
func TestCompactHead(t *testing.T) {
t.Parallel()
- dbDir := t.TempDir()
// Open a DB and append data to the WAL.
- tsdbCfg := &Options{
+ opts := &Options{
RetentionDuration: int64(time.Hour * 24 * 15 / time.Millisecond),
NoLockfile: true,
MinBlockDuration: int64(time.Hour * 2 / time.Millisecond),
MaxBlockDuration: int64(time.Hour * 2 / time.Millisecond),
WALCompression: compression.Snappy,
}
-
- db, err := Open(dbDir, promslog.NewNopLogger(), prometheus.NewRegistry(), tsdbCfg, nil)
- require.NoError(t, err)
+ db := newTestDB(t, withOpts(opts))
ctx := context.Background()
app := db.Appender(ctx)
var expSamples []sample
@@ -3251,8 +3161,7 @@ func TestCompactHead(t *testing.T) {
// Delete everything but the new block and
// reopen the db to query it to ensure it includes the head data.
require.NoError(t, deleteNonBlocks(db.Dir()))
- db, err = Open(dbDir, promslog.NewNopLogger(), prometheus.NewRegistry(), tsdbCfg, nil)
- require.NoError(t, err)
+ db = newTestDB(t, withDir(db.Dir()), withOpts(opts))
require.Len(t, db.Blocks(), 1)
require.Equal(t, int64(maxt), db.Head().MinTime())
defer func() { require.NoError(t, db.Close()) }()
@@ -3278,13 +3187,12 @@ func TestCompactHead(t *testing.T) {
// TestCompactHeadWithDeletion tests https://github.com/prometheus/prometheus/issues/11585.
func TestCompactHeadWithDeletion(t *testing.T) {
- db, err := Open(t.TempDir(), promslog.NewNopLogger(), prometheus.NewRegistry(), nil, nil)
- require.NoError(t, err)
+ db := newTestDB(t)
ctx := context.Background()
app := db.Appender(ctx)
- _, err = app.Append(0, labels.FromStrings("a", "b"), 10, rand.Float64())
+ _, err := app.Append(0, labels.FromStrings("a", "b"), 10, rand.Float64())
require.NoError(t, err)
require.NoError(t, app.Commit())
@@ -3293,7 +3201,6 @@ func TestCompactHeadWithDeletion(t *testing.T) {
// This recreates the bug.
require.NoError(t, db.CompactHead(NewRangeHead(db.Head(), 0, 100)))
- require.NoError(t, db.Close())
}
func deleteNonBlocks(dbDir string) error {
@@ -3403,9 +3310,7 @@ func TestOpen_VariousBlockStates(t *testing.T) {
opts := DefaultOptions()
opts.RetentionDuration = 0
- db, err := Open(tmpDir, promslog.New(&promslog.Config{}), nil, opts, nil)
- require.NoError(t, err)
-
+ db := newTestDB(t, withDir(tmpDir), withOpts(opts))
loadedBlocks := db.Blocks()
var loaded int
@@ -3438,21 +3343,16 @@ func TestOpen_VariousBlockStates(t *testing.T) {
func TestOneCheckpointPerCompactCall(t *testing.T) {
t.Parallel()
blockRange := int64(1000)
- tsdbCfg := &Options{
+ opts := &Options{
RetentionDuration: blockRange * 1000,
NoLockfile: true,
MinBlockDuration: blockRange,
MaxBlockDuration: blockRange,
}
- tmpDir := t.TempDir()
ctx := context.Background()
- db, err := Open(tmpDir, promslog.NewNopLogger(), prometheus.NewRegistry(), tsdbCfg, nil)
- require.NoError(t, err)
- t.Cleanup(func() {
- require.NoError(t, db.Close())
- })
+ db := newTestDB(t, withOpts(opts))
db.DisableCompactions()
// Case 1: Lot's of uncompacted data in Head.
@@ -3508,10 +3408,9 @@ func TestOneCheckpointPerCompactCall(t *testing.T) {
newBlockMaxt := db.Head().MaxTime() + 1
require.NoError(t, db.Close())
- createBlock(t, db.dir, genSeries(1, 1, newBlockMint, newBlockMaxt))
+ createBlock(t, db.Dir(), genSeries(1, 1, newBlockMint, newBlockMaxt))
- db, err = Open(db.dir, promslog.NewNopLogger(), prometheus.NewRegistry(), tsdbCfg, nil)
- require.NoError(t, err)
+ db = newTestDB(t, withDir(db.Dir()), withOpts(opts))
db.DisableCompactions()
// 1 block more.
@@ -3604,10 +3503,7 @@ func testQuerierShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChunks(t
maxStressAllocationBytes = 512 * 1024
)
- db := openTestDB(t, nil, nil)
- defer func() {
- require.NoError(t, db.Close())
- }()
+ db := newTestDB(t)
// Disable compactions so we can control it.
db.DisableCompactions()
@@ -3740,10 +3636,7 @@ func testChunkQuerierShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChun
maxStressAllocationBytes = 512 * 1024
)
- db := openTestDB(t, nil, nil)
- defer func() {
- require.NoError(t, db.Close())
- }()
+ db := newTestDB(t)
// Disable compactions so we can control it.
db.DisableCompactions()
@@ -3845,10 +3738,7 @@ func testChunkQuerierShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChun
func TestQuerierShouldNotFailIfOOOCompactionOccursAfterRetrievingQuerier(t *testing.T) {
opts := DefaultOptions()
opts.OutOfOrderTimeWindow = 3 * DefaultBlockDuration
- db := openTestDB(t, opts, nil)
- defer func() {
- require.NoError(t, db.Close())
- }()
+ db := newTestDB(t, withOpts(opts))
// Disable compactions so we can control it.
db.DisableCompactions()
@@ -3939,10 +3829,7 @@ func TestQuerierShouldNotFailIfOOOCompactionOccursAfterRetrievingQuerier(t *test
func TestQuerierShouldNotFailIfOOOCompactionOccursAfterSelecting(t *testing.T) {
opts := DefaultOptions()
opts.OutOfOrderTimeWindow = 3 * DefaultBlockDuration
- db := openTestDB(t, opts, nil)
- defer func() {
- require.NoError(t, db.Close())
- }()
+ db := newTestDB(t, withOpts(opts))
// Disable compactions so we can control it.
db.DisableCompactions()
@@ -4021,10 +3908,7 @@ func TestQuerierShouldNotFailIfOOOCompactionOccursAfterSelecting(t *testing.T) {
func TestQuerierShouldNotFailIfOOOCompactionOccursAfterRetrievingIterators(t *testing.T) {
opts := DefaultOptions()
opts.OutOfOrderTimeWindow = 3 * DefaultBlockDuration
- db := openTestDB(t, opts, nil)
- defer func() {
- require.NoError(t, db.Close())
- }()
+ db := newTestDB(t, withOpts(opts))
// Disable compactions so we can control it.
db.DisableCompactions()
@@ -4100,17 +3984,6 @@ func TestQuerierShouldNotFailIfOOOCompactionOccursAfterRetrievingIterators(t *te
require.Eventually(t, compactionComplete.Load, time.Second, 10*time.Millisecond, "compaction should complete after querier was closed")
}
-func newTestDB(t *testing.T) *DB {
- dir := t.TempDir()
-
- db, err := Open(dir, nil, nil, DefaultOptions(), nil)
- require.NoError(t, err)
- t.Cleanup(func() {
- require.NoError(t, db.Close())
- })
- return db
-}
-
func TestOOOWALWrite(t *testing.T) {
minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() }
@@ -4595,18 +4468,10 @@ func testOOOWALWrite(t *testing.T,
expectedOOORecords []any,
expectedInORecords []any,
) {
- dir := t.TempDir()
-
opts := DefaultOptions()
opts.OutOfOrderCapMax = 2
opts.OutOfOrderTimeWindow = 30 * time.Minute.Milliseconds()
-
- db, err := Open(dir, nil, nil, opts, nil)
- require.NoError(t, err)
-
- t.Cleanup(func() {
- require.NoError(t, db.Close())
- })
+ db := newTestDB(t, withOpts(opts))
s1, s2 := labels.FromStrings("l", "v1"), labels.FromStrings("l", "v2")
@@ -4690,21 +4555,20 @@ func testOOOWALWrite(t *testing.T,
}
// The normal WAL.
- actRecs := getRecords(path.Join(dir, "wal"))
+ actRecs := getRecords(path.Join(db.Dir(), "wal"))
require.Equal(t, expectedInORecords, actRecs)
// The WBL.
- actRecs = getRecords(path.Join(dir, wlog.WblDirName))
+ actRecs = getRecords(path.Join(db.Dir(), wlog.WblDirName))
require.Equal(t, expectedOOORecords, actRecs)
}
// Tests https://github.com/prometheus/prometheus/issues/10291#issuecomment-1044373110.
func TestDBPanicOnMmappingHeadChunk(t *testing.T) {
- dir := t.TempDir()
+ var err error
ctx := context.Background()
- db, err := Open(dir, nil, nil, DefaultOptions(), nil)
- require.NoError(t, err)
+ db := newTestDB(t)
db.DisableCompactions()
// Choosing scrape interval of 45s to have chunk larger than 1h.
@@ -4738,8 +4602,7 @@ func TestDBPanicOnMmappingHeadChunk(t *testing.T) {
// Restarting.
require.NoError(t, db.Close())
- db, err = Open(dir, nil, nil, DefaultOptions(), nil)
- require.NoError(t, err)
+ db = newTestDB(t, withDir(db.Dir()))
db.DisableCompactions()
// Ingest samples upto 20m more to make the head compact.
@@ -4932,7 +4795,7 @@ func TestMetadataAssertInMemoryData(t *testing.T) {
require.NoError(t, err)
}
- db := openTestDB(t, nil, nil)
+ db := newTestDB(t)
ctx := context.Background()
// Add some series so we can append metadata to them.
@@ -4993,19 +4856,14 @@ func TestMetadataAssertInMemoryData(t *testing.T) {
// Reopen the DB, replaying the WAL. The Head must have been replayed
// correctly in memory.
- reopenDB, err := Open(db.Dir(), nil, nil, nil, nil)
- require.NoError(t, err)
- t.Cleanup(func() {
- require.NoError(t, reopenDB.Close())
- })
-
- _, err = reopenDB.head.wal.Size()
+ db = newTestDB(t, withDir(db.Dir()))
+ _, err := db.head.wal.Size()
require.NoError(t, err)
- require.Equal(t, *reopenDB.head.series.getByHash(s1.Hash(), s1).meta, m1)
- require.Equal(t, *reopenDB.head.series.getByHash(s2.Hash(), s2).meta, m5)
- require.Equal(t, *reopenDB.head.series.getByHash(s3.Hash(), s3).meta, m3)
- require.Equal(t, *reopenDB.head.series.getByHash(s4.Hash(), s4).meta, m4)
+ require.Equal(t, *db.head.series.getByHash(s1.Hash(), s1).meta, m1)
+ require.Equal(t, *db.head.series.getByHash(s2.Hash(), s2).meta, m5)
+ require.Equal(t, *db.head.series.getByHash(s3.Hash(), s3).meta, m3)
+ require.Equal(t, *db.head.series.getByHash(s4.Hash(), s4).meta, m4)
}
// TestMultipleEncodingsCommitOrder mainly serves to demonstrate when happens when committing a batch of samples for the
@@ -5015,14 +4873,10 @@ func TestMultipleEncodingsCommitOrder(t *testing.T) {
opts.OutOfOrderCapMax = 30
opts.OutOfOrderTimeWindow = 24 * time.Hour.Milliseconds()
- series1 := labels.FromStrings("foo", "bar1")
-
- db := openTestDB(t, opts, nil)
+ db := newTestDB(t, withOpts(opts))
db.DisableCompactions()
- defer func() {
- require.NoError(t, db.Close())
- }()
+ series1 := labels.FromStrings("foo", "bar1")
addSample := func(app storage.Appender, ts int64, valType chunkenc.ValueType) chunks.Sample {
if valType == chunkenc.ValFloat {
_, err := app.Append(0, labels.FromStrings("foo", "bar1"), ts, float64(ts))
@@ -5165,19 +5019,13 @@ func TestOOOCompaction(t *testing.T) {
}
func testOOOCompaction(t *testing.T, scenario sampleTypeScenario, addExtraSamples bool) {
- dir := t.TempDir()
ctx := context.Background()
opts := DefaultOptions()
opts.OutOfOrderCapMax = 30
opts.OutOfOrderTimeWindow = 300 * time.Minute.Milliseconds()
-
- db, err := Open(dir, nil, nil, opts, nil)
- require.NoError(t, err)
+ db := newTestDB(t, withOpts(opts))
db.DisableCompactions() // We want to manually call it.
- t.Cleanup(func() {
- require.NoError(t, db.Close())
- })
series1 := labels.FromStrings("foo", "bar1")
series2 := labels.FromStrings("foo", "bar2")
@@ -5368,19 +5216,14 @@ func TestOOOCompactionWithNormalCompaction(t *testing.T) {
func testOOOCompactionWithNormalCompaction(t *testing.T, scenario sampleTypeScenario) {
t.Parallel()
- dir := t.TempDir()
ctx := context.Background()
opts := DefaultOptions()
opts.OutOfOrderCapMax = 30
opts.OutOfOrderTimeWindow = 300 * time.Minute.Milliseconds()
- db, err := Open(dir, nil, nil, opts, nil)
- require.NoError(t, err)
+ db := newTestDB(t, withOpts(opts))
db.DisableCompactions() // We want to manually call it.
- t.Cleanup(func() {
- require.NoError(t, db.Close())
- })
series1 := labels.FromStrings("foo", "bar1")
series2 := labels.FromStrings("foo", "bar2")
@@ -5478,7 +5321,6 @@ func TestOOOCompactionWithDisabledWriteLog(t *testing.T) {
func testOOOCompactionWithDisabledWriteLog(t *testing.T, scenario sampleTypeScenario) {
t.Parallel()
- dir := t.TempDir()
ctx := context.Background()
opts := DefaultOptions()
@@ -5486,12 +5328,8 @@ func testOOOCompactionWithDisabledWriteLog(t *testing.T, scenario sampleTypeScen
opts.OutOfOrderTimeWindow = 300 * time.Minute.Milliseconds()
opts.WALSegmentSize = -1 // disabled WAL and WBL
- db, err := Open(dir, nil, nil, opts, nil)
- require.NoError(t, err)
+ db := newTestDB(t, withOpts(opts))
db.DisableCompactions() // We want to manually call it.
- t.Cleanup(func() {
- require.NoError(t, db.Close())
- })
series1 := labels.FromStrings("foo", "bar1")
series2 := labels.FromStrings("foo", "bar2")
@@ -5588,7 +5426,6 @@ func TestOOOQueryAfterRestartWithSnapshotAndRemovedWBL(t *testing.T) {
}
func testOOOQueryAfterRestartWithSnapshotAndRemovedWBL(t *testing.T, scenario sampleTypeScenario) {
- dir := t.TempDir()
ctx := context.Background()
opts := DefaultOptions()
@@ -5596,12 +5433,8 @@ func testOOOQueryAfterRestartWithSnapshotAndRemovedWBL(t *testing.T, scenario sa
opts.OutOfOrderTimeWindow = 300 * time.Minute.Milliseconds()
opts.EnableMemorySnapshotOnShutdown = true
- db, err := Open(dir, nil, nil, opts, nil)
- require.NoError(t, err)
+ db := newTestDB(t, withOpts(opts))
db.DisableCompactions() // We want to manually call it.
- t.Cleanup(func() {
- require.NoError(t, db.Close())
- })
series1 := labels.FromStrings("foo", "bar1")
series2 := labels.FromStrings("foo", "bar2")
@@ -5637,10 +5470,9 @@ func testOOOQueryAfterRestartWithSnapshotAndRemovedWBL(t *testing.T, scenario sa
require.NoError(t, db.Close())
// For some reason wbl goes missing.
- require.NoError(t, os.RemoveAll(path.Join(dir, "wbl")))
+ require.NoError(t, os.RemoveAll(path.Join(db.Dir(), "wbl")))
- db, err = Open(dir, nil, nil, opts, nil)
- require.NoError(t, err)
+ db = newTestDB(t, withDir(db.Dir()))
db.DisableCompactions() // We want to manually call it.
// Check ooo m-map chunks again.
@@ -5957,11 +5789,8 @@ func testQuerierOOOQuery(t *testing.T,
for _, tc := range tests {
t.Run(fmt.Sprintf("name=%s", tc.name), func(t *testing.T) {
opts.OutOfOrderCapMax = tc.oooCap
- db := openTestDB(t, opts, nil)
+ db := newTestDB(t, withOpts(opts))
db.DisableCompactions()
- defer func() {
- require.NoError(t, db.Close())
- }()
var expSamples []chunks.Sample
var oooSamples, appendedCount int
@@ -6286,11 +6115,8 @@ func testChunkQuerierOOOQuery(t *testing.T,
for _, tc := range tests {
t.Run(fmt.Sprintf("name=%s", tc.name), func(t *testing.T) {
opts.OutOfOrderCapMax = tc.oooCap
- db := openTestDB(t, opts, nil)
+ db := newTestDB(t, withOpts(opts))
db.DisableCompactions()
- defer func() {
- require.NoError(t, db.Close())
- }()
var expSamples []chunks.Sample
var oooSamples, appendedCount int
@@ -6466,11 +6292,8 @@ func testOOONativeHistogramsWithCounterResets(t *testing.T, scenario sampleTypeS
}
for _, tc := range tests {
t.Run(fmt.Sprintf("name=%s", tc.name), func(t *testing.T) {
- db := openTestDB(t, opts, nil)
+ db := newTestDB(t, withOpts(opts))
db.DisableCompactions()
- defer func() {
- require.NoError(t, db.Close())
- }()
app := db.Appender(context.Background())
@@ -6703,11 +6526,8 @@ func testOOOInterleavedImplicitCounterResets(t *testing.T, name string, scenario
opts.OutOfOrderCapMax = tc.oooCap
opts.OutOfOrderTimeWindow = 24 * time.Hour.Milliseconds()
- db := openTestDB(t, opts, nil)
+ db := newTestDB(t, withOpts(opts))
db.DisableCompactions()
- defer func() {
- require.NoError(t, db.Close())
- }()
app := db.Appender(context.Background())
for _, s := range tc.samples {
@@ -6804,11 +6624,8 @@ func testOOOAppendAndQuery(t *testing.T, scenario sampleTypeScenario) {
opts.OutOfOrderCapMax = 30
opts.OutOfOrderTimeWindow = 4 * time.Hour.Milliseconds()
- db := openTestDB(t, opts, nil)
+ db := newTestDB(t, withOpts(opts))
db.DisableCompactions()
- t.Cleanup(func() {
- require.NoError(t, db.Close())
- })
s1 := labels.FromStrings("foo", "bar1")
s2 := labels.FromStrings("foo", "bar2")
@@ -6935,11 +6752,8 @@ func TestOOODisabled(t *testing.T) {
func testOOODisabled(t *testing.T, scenario sampleTypeScenario) {
opts := DefaultOptions()
opts.OutOfOrderTimeWindow = 0
- db := openTestDB(t, opts, nil)
+ db := newTestDB(t, withOpts(opts))
db.DisableCompactions()
- t.Cleanup(func() {
- require.NoError(t, db.Close())
- })
s1 := labels.FromStrings("foo", "bar1")
minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() }
@@ -7010,11 +6824,8 @@ func testWBLAndMmapReplay(t *testing.T, scenario sampleTypeScenario) {
opts.OutOfOrderCapMax = 30
opts.OutOfOrderTimeWindow = 4 * time.Hour.Milliseconds()
- db := openTestDB(t, opts, nil)
+ db := newTestDB(t, withOpts(opts))
db.DisableCompactions()
- t.Cleanup(func() {
- require.NoError(t, db.Close())
- })
s1 := labels.FromStrings("foo", "bar1")
@@ -7095,38 +6906,32 @@ func testWBLAndMmapReplay(t *testing.T, scenario sampleTypeScenario) {
}
t.Run("Restart DB with both WBL and M-map files for ooo data", func(t *testing.T) {
- db, err = Open(db.dir, nil, nil, opts, nil)
- require.NoError(t, err)
+ db = newTestDB(t, withDir(db.Dir()), withOpts(opts))
require.Equal(t, oooMint, db.head.MinOOOTime())
require.Equal(t, oooMaxt, db.head.MaxOOOTime())
testQuery(expSamples)
- require.NoError(t, db.Close())
})
t.Run("Restart DB with only WBL for ooo data", func(t *testing.T) {
require.NoError(t, os.RemoveAll(mmapDir))
- db, err = Open(db.dir, nil, nil, opts, nil)
- require.NoError(t, err)
+ db = newTestDB(t, withDir(db.Dir()), withOpts(opts))
require.Equal(t, oooMint, db.head.MinOOOTime())
require.Equal(t, oooMaxt, db.head.MaxOOOTime())
testQuery(expSamples)
- require.NoError(t, db.Close())
})
t.Run("Restart DB with only M-map files for ooo data", func(t *testing.T) {
require.NoError(t, os.RemoveAll(wblDir))
resetMmapToOriginal()
- db, err = Open(db.dir, nil, nil, opts, nil)
- require.NoError(t, err)
+ db = newTestDB(t, withDir(db.Dir()), withOpts(opts))
require.Equal(t, oooMint, db.head.MinOOOTime())
require.Equal(t, oooMaxt, db.head.MaxOOOTime())
inOrderSample := expSamples[s1.String()][len(expSamples[s1.String()])-1]
testQuery(map[string][]chunks.Sample{
s1.String(): append(s1MmapSamples, inOrderSample),
})
- require.NoError(t, db.Close())
})
t.Run("Restart DB with WBL+Mmap while increasing the OOOCapMax", func(t *testing.T) {
@@ -7134,24 +6939,22 @@ func testWBLAndMmapReplay(t *testing.T, scenario sampleTypeScenario) {
resetMmapToOriginal()
opts.OutOfOrderCapMax = 60
- db, err = Open(db.dir, nil, nil, opts, nil)
+ db = newTestDB(t, withDir(db.Dir()), withOpts(opts))
require.NoError(t, err)
require.Equal(t, oooMint, db.head.MinOOOTime())
require.Equal(t, oooMaxt, db.head.MaxOOOTime())
testQuery(expSamples)
- require.NoError(t, db.Close())
})
t.Run("Restart DB with WBL+Mmap while decreasing the OOOCapMax", func(t *testing.T) {
resetMmapToOriginal() // We need to reset because new duplicate chunks can be written above.
opts.OutOfOrderCapMax = 10
- db, err = Open(db.dir, nil, nil, opts, nil)
+ db = newTestDB(t, withDir(db.Dir()), withOpts(opts))
require.NoError(t, err)
require.Equal(t, oooMint, db.head.MinOOOTime())
require.Equal(t, oooMaxt, db.head.MaxOOOTime())
testQuery(expSamples)
- require.NoError(t, db.Close())
})
t.Run("Restart DB with WBL+Mmap while having no m-map markers in WBL", func(t *testing.T) {
@@ -7181,7 +6984,7 @@ func testWBLAndMmapReplay(t *testing.T, scenario sampleTypeScenario) {
require.NoError(t, os.Rename(newWbl.Dir(), wblDir))
opts.OutOfOrderCapMax = 30
- db, err = Open(db.dir, nil, nil, opts, nil)
+ db = newTestDB(t, withDir(db.Dir()), withOpts(opts))
require.NoError(t, err)
require.Equal(t, oooMint, db.head.MinOOOTime())
require.Equal(t, oooMaxt, db.head.MaxOOOTime())
@@ -7191,19 +6994,14 @@ func testWBLAndMmapReplay(t *testing.T, scenario sampleTypeScenario) {
func TestOOOHistogramCompactionWithCounterResets(t *testing.T) {
for _, floatHistogram := range []bool{false, true} {
- dir := t.TempDir()
ctx := context.Background()
opts := DefaultOptions()
opts.OutOfOrderCapMax = 30
opts.OutOfOrderTimeWindow = 500 * time.Minute.Milliseconds()
- db, err := Open(dir, nil, nil, opts, nil)
- require.NoError(t, err)
+ db := newTestDB(t, withOpts(opts))
db.DisableCompactions() // We want to manually call it.
- t.Cleanup(func() {
- require.NoError(t, db.Close())
- })
series1 := labels.FromStrings("foo", "bar1")
series2 := labels.FromStrings("foo", "bar2")
@@ -7216,7 +7014,7 @@ func TestOOOHistogramCompactionWithCounterResets(t *testing.T) {
if floatHistogram {
h := tsdbutil.GenerateTestFloatHistogram(int64(val))
h.CounterResetHint = hint
- _, err = app.AppendHistogram(0, l, tsMs, nil, h)
+ _, err := app.AppendHistogram(0, l, tsMs, nil, h)
require.NoError(t, err)
require.NoError(t, app.Commit())
return sample{t: tsMs, fh: h.Copy()}
@@ -7224,7 +7022,7 @@ func TestOOOHistogramCompactionWithCounterResets(t *testing.T) {
h := tsdbutil.GenerateTestHistogram(int64(val))
h.CounterResetHint = hint
- _, err = app.AppendHistogram(0, l, tsMs, h, nil)
+ _, err := app.AppendHistogram(0, l, tsMs, h, nil)
require.NoError(t, err)
require.NoError(t, app.Commit())
return sample{t: tsMs, h: h.Copy()}
@@ -7551,19 +7349,14 @@ func TestOOOHistogramCompactionWithCounterResets(t *testing.T) {
func TestInterleavedInOrderAndOOOHistogramCompactionWithCounterResets(t *testing.T) {
for _, floatHistogram := range []bool{false, true} {
- dir := t.TempDir()
ctx := context.Background()
opts := DefaultOptions()
opts.OutOfOrderCapMax = 30
opts.OutOfOrderTimeWindow = 500 * time.Minute.Milliseconds()
- db, err := Open(dir, nil, nil, opts, nil)
- require.NoError(t, err)
+ db := newTestDB(t, withOpts(opts))
db.DisableCompactions() // We want to manually call it.
- t.Cleanup(func() {
- require.NoError(t, db.Close())
- })
series1 := labels.FromStrings("foo", "bar1")
@@ -7572,14 +7365,14 @@ func TestInterleavedInOrderAndOOOHistogramCompactionWithCounterResets(t *testing
tsMs := ts
if floatHistogram {
h := tsdbutil.GenerateTestFloatHistogram(int64(val))
- _, err = app.AppendHistogram(0, l, tsMs, nil, h)
+ _, err := app.AppendHistogram(0, l, tsMs, nil, h)
require.NoError(t, err)
require.NoError(t, app.Commit())
return sample{t: tsMs, fh: h.Copy()}
}
h := tsdbutil.GenerateTestHistogram(int64(val))
- _, err = app.AppendHistogram(0, l, tsMs, h, nil)
+ _, err := app.AppendHistogram(0, l, tsMs, h, nil)
require.NoError(t, err)
require.NoError(t, app.Commit())
return sample{t: tsMs, h: h.Copy()}
@@ -7627,8 +7420,7 @@ func TestInterleavedInOrderAndOOOHistogramCompactionWithCounterResets(t *testing
// Compact the in-order head and expect another block.
// Since this is a forced compaction, this block is not aligned with 2h.
- err = db.CompactHead(NewRangeHead(db.head, 0, 3))
- require.NoError(t, err)
+ require.NoError(t, db.CompactHead(NewRangeHead(db.head, 0, 3)))
require.Len(t, db.Blocks(), 2)
// Blocks created out of normal and OOO head now. But not merged.
@@ -7666,19 +7458,13 @@ func TestOOOCompactionFailure(t *testing.T) {
}
func testOOOCompactionFailure(t *testing.T, scenario sampleTypeScenario) {
- dir := t.TempDir()
ctx := context.Background()
opts := DefaultOptions()
opts.OutOfOrderCapMax = 30
opts.OutOfOrderTimeWindow = 300 * time.Minute.Milliseconds()
-
- db, err := Open(dir, nil, nil, opts, nil)
- require.NoError(t, err)
+ db := newTestDB(t, withOpts(opts))
db.DisableCompactions() // We want to manually call it.
- t.Cleanup(func() {
- require.NoError(t, db.Close())
- })
series1 := labels.FromStrings("foo", "bar1")
@@ -7804,18 +7590,11 @@ func testOOOCompactionFailure(t *testing.T, scenario sampleTypeScenario) {
}
func TestWBLCorruption(t *testing.T) {
- dir := t.TempDir()
-
opts := DefaultOptions()
opts.OutOfOrderCapMax = 30
opts.OutOfOrderTimeWindow = 300 * time.Minute.Milliseconds()
- db, err := Open(dir, nil, nil, opts, nil)
- require.NoError(t, err)
- db.DisableCompactions()
- t.Cleanup(func() {
- require.NoError(t, db.Close())
- })
+ db := newTestDB(t, withOpts(opts))
series1 := labels.FromStrings("foo", "bar1")
var allSamples, expAfterRestart []chunks.Sample
@@ -7842,7 +7621,7 @@ func TestWBLCorruption(t *testing.T) {
addSamples(120, 130, true)
// Moving onto the second file.
- _, err = db.head.wbl.NextSegment()
+ _, err := db.head.wbl.NextSegment()
require.NoError(t, err)
// More OOO samples.
@@ -7914,7 +7693,7 @@ func TestWBLCorruption(t *testing.T) {
require.NoError(t, os.RemoveAll(mmappedChunksDir(db.head.opts.ChunkDirRoot)))
// Restart does the replay and repair.
- db, err = Open(db.dir, nil, nil, opts, nil)
+ db = newTestDB(t, withDir(db.Dir()), withOpts(opts))
require.NoError(t, err)
require.Equal(t, 1.0, prom_testutil.ToFloat64(db.head.metrics.walCorruptionsTotal))
require.Less(t, len(expAfterRestart), len(allSamples))
@@ -7943,7 +7722,7 @@ func TestWBLCorruption(t *testing.T) {
// Another restart, everything normal with no repair.
require.NoError(t, db.Close())
- db, err = Open(db.dir, nil, nil, opts, nil)
+ db = newTestDB(t, withDir(db.Dir()), withOpts(opts))
require.NoError(t, err)
require.Equal(t, 0.0, prom_testutil.ToFloat64(db.head.metrics.walCorruptionsTotal))
verifySamples(expAfterRestart)
@@ -7958,18 +7737,11 @@ func TestOOOMmapCorruption(t *testing.T) {
}
func testOOOMmapCorruption(t *testing.T, scenario sampleTypeScenario) {
- dir := t.TempDir()
-
opts := DefaultOptions()
opts.OutOfOrderCapMax = 10
opts.OutOfOrderTimeWindow = 300 * time.Minute.Milliseconds()
- db, err := Open(dir, nil, nil, opts, nil)
- require.NoError(t, err)
- db.DisableCompactions()
- t.Cleanup(func() {
- require.NoError(t, db.Close())
- })
+ db := newTestDB(t, withOpts(opts))
series1 := labels.FromStrings("foo", "bar1")
var allSamples, expInMmapChunks []chunks.Sample
@@ -8046,7 +7818,7 @@ func testOOOMmapCorruption(t *testing.T, scenario sampleTypeScenario) {
require.NoError(t, os.Rename(wblDir, wblDirTmp))
// Restart does the replay and repair of m-map files.
- db, err = Open(db.dir, nil, nil, opts, nil)
+ db = newTestDB(t, withDir(db.Dir()), withOpts(opts))
require.NoError(t, err)
require.Equal(t, 1.0, prom_testutil.ToFloat64(db.head.metrics.mmapChunkCorruptionTotal))
require.Less(t, len(expInMmapChunks), len(allSamples))
@@ -8066,7 +7838,7 @@ func testOOOMmapCorruption(t *testing.T, scenario sampleTypeScenario) {
// Another restart, everything normal with no repair.
require.NoError(t, db.Close())
- db, err = Open(db.dir, nil, nil, opts, nil)
+ db = newTestDB(t, withDir(db.Dir()), withOpts(opts))
require.NoError(t, err)
require.Equal(t, 0.0, prom_testutil.ToFloat64(db.head.metrics.mmapChunkCorruptionTotal))
verifySamples(expInMmapChunks)
@@ -8075,7 +7847,7 @@ func testOOOMmapCorruption(t *testing.T, scenario sampleTypeScenario) {
require.NoError(t, db.Close())
require.NoError(t, os.RemoveAll(wblDir))
require.NoError(t, os.Rename(wblDirTmp, wblDir))
- db, err = Open(db.dir, nil, nil, opts, nil)
+ db = newTestDB(t, withDir(db.Dir()), withOpts(opts))
require.NoError(t, err)
verifySamples(allSamples)
}
@@ -8093,18 +7865,10 @@ func testOutOfOrderRuntimeConfig(t *testing.T, scenario sampleTypeScenario) {
ctx := context.Background()
getDB := func(oooTimeWindow int64) *DB {
- dir := t.TempDir()
-
opts := DefaultOptions()
opts.OutOfOrderTimeWindow = oooTimeWindow
-
- db, err := Open(dir, nil, nil, opts, nil)
- require.NoError(t, err)
+ db := newTestDB(t, withOpts(opts))
db.DisableCompactions()
- t.Cleanup(func() {
- require.NoError(t, db.Close())
- })
-
return db
}
@@ -8386,18 +8150,12 @@ func testNoGapAfterRestartWithOOO(t *testing.T, scenario sampleTypeScenario) {
for i, c := range cases {
t.Run(fmt.Sprintf("case=%d", i), func(t *testing.T) {
- dir := t.TempDir()
ctx := context.Background()
opts := DefaultOptions()
opts.OutOfOrderTimeWindow = 30 * time.Minute.Milliseconds()
-
- db, err := Open(dir, nil, nil, opts, nil)
- require.NoError(t, err)
+ db := newTestDB(t, withOpts(opts))
db.DisableCompactions()
- t.Cleanup(func() {
- require.NoError(t, db.Close())
- })
// 3h10m=190m worth in-order data.
addSamples(t, db, c.inOrderMint, c.inOrderMaxt, true)
@@ -8424,8 +8182,7 @@ func testNoGapAfterRestartWithOOO(t *testing.T, scenario sampleTypeScenario) {
// Restart and expect all samples to be present.
require.NoError(t, db.Close())
- db, err = Open(dir, nil, nil, opts, nil)
- require.NoError(t, err)
+ db = newTestDB(t, withDir(db.Dir()), withOpts(opts))
db.DisableCompactions()
verifyBlockRanges()
@@ -8445,17 +8202,10 @@ func TestWblReplayAfterOOODisableAndRestart(t *testing.T) {
}
func testWblReplayAfterOOODisableAndRestart(t *testing.T, scenario sampleTypeScenario) {
- dir := t.TempDir()
-
opts := DefaultOptions()
opts.OutOfOrderTimeWindow = 60 * time.Minute.Milliseconds()
- db, err := Open(dir, nil, nil, opts, nil)
- require.NoError(t, err)
- db.DisableCompactions()
- t.Cleanup(func() {
- require.NoError(t, db.Close())
- })
+ db := newTestDB(t, withOpts(opts))
series1 := labels.FromStrings("foo", "bar1")
var allSamples []chunks.Sample
@@ -8495,9 +8245,9 @@ func testWblReplayAfterOOODisableAndRestart(t *testing.T, scenario sampleTypeSce
// Restart DB with OOO disabled.
require.NoError(t, db.Close())
+
opts.OutOfOrderTimeWindow = 0
- db, err = Open(db.dir, nil, nil, opts, nil)
- require.NoError(t, err)
+ db = newTestDB(t, withDir(db.Dir()), withOpts(opts))
// We can still query OOO samples when OOO is disabled.
verifySamples(allSamples)
@@ -8512,17 +8262,10 @@ func TestPanicOnApplyConfig(t *testing.T) {
}
func testPanicOnApplyConfig(t *testing.T, scenario sampleTypeScenario) {
- dir := t.TempDir()
-
opts := DefaultOptions()
opts.OutOfOrderTimeWindow = 60 * time.Minute.Milliseconds()
- db, err := Open(dir, nil, nil, opts, nil)
- require.NoError(t, err)
- db.DisableCompactions()
- t.Cleanup(func() {
- require.NoError(t, db.Close())
- })
+ db := newTestDB(t, withOpts(opts))
series1 := labels.FromStrings("foo", "bar1")
var allSamples []chunks.Sample
@@ -8544,12 +8287,12 @@ func testPanicOnApplyConfig(t *testing.T, scenario sampleTypeScenario) {
// Restart DB with OOO disabled.
require.NoError(t, db.Close())
+
opts.OutOfOrderTimeWindow = 0
- db, err = Open(db.dir, nil, prometheus.NewRegistry(), opts, nil)
- require.NoError(t, err)
+ db = newTestDB(t, withDir(db.Dir()), withOpts(opts))
// ApplyConfig with OOO enabled and expect no panic.
- err = db.ApplyConfig(&config.Config{
+ err := db.ApplyConfig(&config.Config{
StorageConfig: config.StorageConfig{
TSDBConfig: &config.TSDBConfig{
OutOfOrderTimeWindow: 60 * time.Minute.Milliseconds(),
@@ -8570,18 +8313,13 @@ func TestDiskFillingUpAfterDisablingOOO(t *testing.T) {
func testDiskFillingUpAfterDisablingOOO(t *testing.T, scenario sampleTypeScenario) {
t.Parallel()
- dir := t.TempDir()
ctx := context.Background()
opts := DefaultOptions()
opts.OutOfOrderTimeWindow = 60 * time.Minute.Milliseconds()
- db, err := Open(dir, nil, nil, opts, nil)
- require.NoError(t, err)
+ db := newTestDB(t, withOpts(opts))
db.DisableCompactions()
- t.Cleanup(func() {
- require.NoError(t, db.Close())
- })
series1 := labels.FromStrings("foo", "bar1")
var allSamples []chunks.Sample
@@ -8603,9 +8341,9 @@ func testDiskFillingUpAfterDisablingOOO(t *testing.T, scenario sampleTypeScenari
// Restart DB with OOO disabled.
require.NoError(t, db.Close())
+
opts.OutOfOrderTimeWindow = 0
- db, err = Open(db.dir, nil, prometheus.NewRegistry(), opts, nil)
- require.NoError(t, err)
+ db = newTestDB(t, withDir(db.Dir()), withOpts(opts))
db.DisableCompactions()
ms := db.head.series.getByHash(series1.Hash(), series1)
@@ -8666,11 +8404,8 @@ func TestHistogramAppendAndQuery(t *testing.T) {
func testHistogramAppendAndQueryHelper(t *testing.T, floatHistogram bool) {
t.Helper()
- db := openTestDB(t, nil, nil)
+ db := newTestDB(t)
minute := func(m int) int64 { return int64(m) * time.Minute.Milliseconds() }
- t.Cleanup(func() {
- require.NoError(t, db.Close())
- })
ctx := context.Background()
appendHistogram := func(t *testing.T,
@@ -8937,10 +8672,7 @@ func TestQueryHistogramFromBlocksWithCompaction(t *testing.T) {
t.Helper()
opts := DefaultOptions()
- db := openTestDB(t, opts, nil)
- t.Cleanup(func() {
- require.NoError(t, db.Close())
- })
+ db := newTestDB(t, withOpts(opts))
var it chunkenc.Iterator
exp := make(map[string][]chunks.Sample)
@@ -9083,10 +8815,7 @@ func TestOOONativeHistogramsSettings(t *testing.T) {
t.Run("Test OOO native histograms if OOO is disabled and Native Histograms is enabled", func(t *testing.T) {
opts := DefaultOptions()
opts.OutOfOrderTimeWindow = 0
- db := openTestDB(t, opts, []int64{100})
- defer func() {
- require.NoError(t, db.Close())
- }()
+ db := newTestDB(t, withOpts(opts), withRngs(100))
app := db.Appender(context.Background())
_, err := app.AppendHistogram(0, l, 100, h, nil)
@@ -9107,10 +8836,7 @@ func TestOOONativeHistogramsSettings(t *testing.T) {
t.Run("Test OOO native histograms when both OOO and Native Histograms are enabled", func(t *testing.T) {
opts := DefaultOptions()
opts.OutOfOrderTimeWindow = 100
- db := openTestDB(t, opts, []int64{100})
- defer func() {
- require.NoError(t, db.Close())
- }()
+ db := newTestDB(t, withOpts(opts), withRngs(100))
// Add in-order samples
app := db.Appender(context.Background())
@@ -9200,10 +8926,7 @@ func compareSeries(t require.TestingT, expected, actual map[string][]chunks.Samp
// worrying about the parallel write.
func TestChunkQuerierReadWriteRace(t *testing.T) {
t.Parallel()
- db := openTestDB(t, nil, nil)
- defer func() {
- require.NoError(t, db.Close())
- }()
+ db := newTestDB(t)
lbls := labels.FromStrings("foo", "bar")
@@ -9289,10 +9012,7 @@ func (c *mockCompactorFn) Write(string, BlockReader, int64, int64, *BlockMeta) (
// Regression test for https://github.com/prometheus/prometheus/pull/13754
func TestAbortBlockCompactions(t *testing.T) {
// Create a test DB
- db := openTestDB(t, nil, nil)
- defer func() {
- require.NoError(t, db.Close())
- }()
+ db := newTestDB(t)
// It should NOT be compactable at the beginning of the test
require.False(t, db.head.compactable(), "head should NOT be compactable")
@@ -9345,10 +9065,8 @@ func TestNewCompactorFunc(t *testing.T) {
},
}, nil
}
- db := openTestDB(t, opts, nil)
- defer func() {
- require.NoError(t, db.Close())
- }()
+ db := newTestDB(t, withOpts(opts))
+
plans, err := db.compactor.Plan("")
require.NoError(t, err)
require.Equal(t, []string{block1.String(), block2.String()}, plans)
@@ -9379,10 +9097,7 @@ func TestBlockQuerierAndBlockChunkQuerier(t *testing.T) {
return storage.NoopChunkedQuerier(), nil
}
- db := openTestDB(t, opts, nil)
- defer func() {
- require.NoError(t, db.Close())
- }()
+ db := newTestDB(t, withOpts(opts))
metas := []BlockMeta{
{Compaction: BlockMetaCompaction{Hints: []string{"test-hint"}}},
@@ -9467,10 +9182,8 @@ func TestGenerateCompactionDelay(t *testing.T) {
for _, c := range cases {
opts.CompactionDelayMaxPercent = c.compactionDelayPercent
- db := openTestDB(t, opts, []int64{60000})
- defer func() {
- require.NoError(t, db.Close())
- }()
+ db := newTestDB(t, withOpts(opts), withRngs(60000))
+
// The offset is generated and changed while opening.
assertDelay(db.opts.CompactionDelay, c.compactionDelayPercent)
@@ -9513,15 +9226,17 @@ func TestBlockClosingBlockedDuringRemoteRead(t *testing.T) {
dir := t.TempDir()
createBlock(t, dir, genSeries(2, 1, 0, 10))
+
+ // Not using newTestDB as db.Close is expected to return error.
db, err := Open(dir, nil, nil, nil, nil)
require.NoError(t, err)
- // No error checking as manually closing the block is supposed to make this fail.
defer db.Close()
- readAPI := remote.NewReadHandler(nil, nil, db, func() config.Config {
- return config.Config{}
- },
- 0, 1, 0,
+ readAPI := remote.NewReadHandler(
+ nil, nil, db,
+ func() config.Config {
+ return config.Config{}
+ }, 0, 1, 0,
)
matcher, err := labels.NewMatcher(labels.MatchRegexp, "__name__", ".*")
@@ -9587,11 +9302,50 @@ func TestBlockClosingBlockedDuringRemoteRead(t *testing.T) {
}
}
+func TestBlockReloadInterval(t *testing.T) {
+ t.Parallel()
+
+ cases := []struct {
+ name string
+ reloadInterval time.Duration
+ expectedReloads float64
+ }{
+ {
+ name: "extremely small interval",
+ reloadInterval: 1 * time.Millisecond,
+ expectedReloads: 5,
+ },
+ {
+ name: "one second interval",
+ reloadInterval: 1 * time.Second,
+ expectedReloads: 5,
+ },
+ }
+ for _, c := range cases {
+ t.Run(c.name, func(t *testing.T) {
+ t.Parallel()
+ db := newTestDB(t, withOpts(&Options{
+ BlockReloadInterval: c.reloadInterval,
+ }))
+ if c.reloadInterval < 1*time.Second {
+ require.Equal(t, 1*time.Second, db.opts.BlockReloadInterval, "interval should be clamped to minimum of 1 second")
+ }
+ require.Equal(t, float64(1), prom_testutil.ToFloat64(db.metrics.reloads), "there should be one initial reload")
+ require.Eventually(t, func() bool {
+ return prom_testutil.ToFloat64(db.metrics.reloads) == c.expectedReloads
+ },
+ 5*time.Second,
+ 100*time.Millisecond,
+ )
+ })
+ }
+}
+
func TestStaleSeriesCompaction(t *testing.T) {
opts := DefaultOptions()
opts.MinBlockDuration = 1000
opts.MaxBlockDuration = 1000
- db := openTestDB(t, opts, nil)
+ db := newTestDB(t, withOpts(opts))
db.DisableCompactions()
t.Cleanup(func() {
require.NoError(t, db.Close())
@@ -9604,7 +9358,7 @@ func TestStaleSeriesCompaction(t *testing.T) {
staleSeriesCrossingBoundary, staleHistCrossingBoundary, staleFHistCrossingBoundary []labels.Labels
numSeriesPerCategory = 1
)
- for i := 0; i < numSeriesPerCategory; i++ {
+ for i := range numSeriesPerCategory {
nonStaleSeries = append(nonStaleSeries, labels.FromStrings("name", fmt.Sprintf("series%d", 1000+i)))
nonStaleHist = append(nonStaleHist, labels.FromStrings("name", fmt.Sprintf("series%d", 2000+i)))
nonStaleFHist = append(nonStaleFHist, labels.FromStrings("name", fmt.Sprintf("series%d", 3000+i)))
@@ -9629,7 +9383,7 @@ func TestStaleSeriesCompaction(t *testing.T) {
addNormalSamples := func(ts int64, floatSeries, histSeries, floatHistSeries []labels.Labels) {
app := db.Appender(context.Background())
- for i := 0; i < len(floatSeries); i++ {
+ for i := range len(floatSeries) {
_, err := app.Append(0, floatSeries[i], ts, v)
require.NoError(t, err)
_, err = app.AppendHistogram(0, histSeries[i], ts, h, nil)
@@ -9641,7 +9395,7 @@ func TestStaleSeriesCompaction(t *testing.T) {
}
addStaleSamples := func(ts int64, floatSeries, histSeries, floatHistSeries []labels.Labels) {
app := db.Appender(context.Background())
- for i := 0; i < len(floatSeries); i++ {
+ for i := range len(floatSeries) {
_, err := app.Append(0, floatSeries[i], ts, staleV)
require.NoError(t, err)
_, err = app.AppendHistogram(0, histSeries[i], ts, staleH, nil)
@@ -9701,7 +9455,7 @@ func TestStaleSeriesCompaction(t *testing.T) {
require.Equal(t, uint64(0), db.head.NumStaleSeries())
expHeadQuery := make(map[string][]chunks.Sample)
- for i := 0; i < numSeriesPerCategory; i++ {
+ for i := range numSeriesPerCategory {
expHeadQuery[fmt.Sprintf(`{name="%s"}`, nonStaleSeries[i].Get("name"))] = []chunks.Sample{
sample{t: 100, f: v}, sample{t: 200, f: v}, sample{t: 300, f: v},
}
@@ -9727,7 +9481,7 @@ func TestStaleSeriesCompaction(t *testing.T) {
// Verify blocks from stale series.
{
expBlockQuery := make(map[string][]chunks.Sample)
- for i := 0; i < numSeriesPerCategory; i++ {
+ for i := range numSeriesPerCategory {
expBlockQuery[fmt.Sprintf(`{name="%s"}`, staleSeries[i].Get("name"))] = []chunks.Sample{
sample{t: 100, f: v}, sample{t: 200, f: staleV},
}
@@ -9772,14 +9526,14 @@ func TestStaleSeriesCompaction(t *testing.T) {
staleSeries, staleHist, staleFHist,
staleSeriesCrossingBoundary, staleHistCrossingBoundary, staleFHistCrossingBoundary,
} {
- for i := 0; i < numSeriesPerCategory; i++ {
+ for i := range numSeriesPerCategory {
seriesKey := fmt.Sprintf(`{name="%s"}`, category[i].Get("name"))
samples := expBlockQuery[seriesKey]
actSamples, exists := seriesSet[seriesKey]
require.Truef(t, exists, "series not found in result %s", seriesKey)
require.Len(t, actSamples, len(samples))
- for i := 0; i < len(samples)-1; i++ {
+ for i := range len(samples) - 1 {
require.Equal(t, samples[i], actSamples[i])
}
diff --git a/tsdb/encoding/encoding.go b/tsdb/encoding/encoding.go
index cc7d0990f6..a6d6fe4d44 100644
--- a/tsdb/encoding/encoding.go
+++ b/tsdb/encoding/encoding.go
@@ -1,4 +1,4 @@
-// Copyright 2018 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/errors/errors.go b/tsdb/errors/errors.go
index ded4ae3a27..138b38a8d2 100644
--- a/tsdb/errors/errors.go
+++ b/tsdb/errors/errors.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The etcd Authors
+// Copyright The Prometheus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/tsdb/errors/errors_test.go b/tsdb/errors/errors_test.go
index 146c66bf00..acffdea261 100644
--- a/tsdb/errors/errors_test.go
+++ b/tsdb/errors/errors_test.go
@@ -1,4 +1,4 @@
-// Copyright 2025 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/example_test.go b/tsdb/example_test.go
index 46deae5198..88632b69f9 100644
--- a/tsdb/example_test.go
+++ b/tsdb/example_test.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/exemplar.go b/tsdb/exemplar.go
index cdbcd5cde6..b58976c911 100644
--- a/tsdb/exemplar.go
+++ b/tsdb/exemplar.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -36,10 +36,11 @@ const (
)
type CircularExemplarStorage struct {
- lock sync.RWMutex
- exemplars []circularBufferEntry
- nextIndex int
- metrics *ExemplarMetrics
+ lock sync.RWMutex
+ exemplars []circularBufferEntry
+ nextIndex int
+ metrics *ExemplarMetrics
+ oooTimeWindowMillis int64
// Map of series labels as a string to index entry, which points to the first
// and last exemplar for the series in the exemplars circular buffer.
@@ -55,6 +56,7 @@ type indexEntry struct {
type circularBufferEntry struct {
exemplar exemplar.Exemplar
next int
+ prev int
ref *indexEntry
}
@@ -115,15 +117,19 @@ func NewExemplarMetrics(reg prometheus.Registerer) *ExemplarMetrics {
// If we assume the average case 95 bytes per exemplar we can fit 5651272 exemplars in
// 1GB of extra memory, accounting for the fact that this is heap allocated space.
// If len <= 0, then the exemplar storage is essentially a noop storage but can later be
-// resized to store exemplars.
-func NewCircularExemplarStorage(length int64, m *ExemplarMetrics) (ExemplarStorage, error) {
+// resized to store exemplars. If oooTimeWindowMillis <= 0, out-of-order exemplars are disabled.
+func NewCircularExemplarStorage(length int64, m *ExemplarMetrics, oooTimeWindowMillis int64) (ExemplarStorage, error) {
if length < 0 {
length = 0
}
+ if oooTimeWindowMillis < 0 {
+ oooTimeWindowMillis = 0
+ }
c := &CircularExemplarStorage{
- exemplars: make([]circularBufferEntry, length),
- index: make(map[string]*indexEntry, length/estimatedExemplarsPerSeries),
- metrics: m,
+ exemplars: make([]circularBufferEntry, length),
+ index: make(map[string]*indexEntry, length/estimatedExemplarsPerSeries),
+ metrics: m,
+ oooTimeWindowMillis: oooTimeWindowMillis,
}
c.metrics.maxExemplars.Set(float64(length))
@@ -171,6 +177,9 @@ func (ce *CircularExemplarStorage) Select(start, end int64, matchers ...[]*label
}
se.SeriesLabels = idx.seriesLabels
+ // TODO: Since we maintain a doubly-linked-list, we can also iterate from head to tail
+ // which might be more performant if the selected interval is skewed to the head.
+
// Loop through all exemplars in the circular buffer for the current series.
for e.exemplar.Ts <= end {
if e.exemplar.Ts >= start {
@@ -253,16 +262,12 @@ func (ce *CircularExemplarStorage) validateExemplar(idx *indexEntry, e exemplar.
return storage.ErrDuplicateExemplar
}
- // Since during the scrape the exemplars are sorted first by timestamp, then value, then labels,
- // if any of these conditions are true, we know that the exemplar is either a duplicate
- // of a previous one (but not the most recent one as that is checked above) or out of order.
- // We now allow exemplars with duplicate timestamps as long as they have different values and/or labels
- // since that can happen for different buckets of a native histogram.
- // We do not distinguish between duplicates and out of order as iterating through the exemplars
- // to check for that would be expensive (versus just comparing with the most recent one) especially
- // since this is run under a lock, and not worth it as we just need to return an error so we do not
- // append the exemplar.
- if e.Ts < newestExemplar.Ts ||
+ // Reject exemplars older than the OOO time window relative to the newest exemplar.
+ // Exemplars with the same timestamp are ordered by value then label hash to detect
+ // duplicates without iterating through all stored exemplars, which would be too
+ // expensive under lock. Exemplars with equal timestamps but different values or
+ // labels are allowed to support multiple buckets of native histograms.
+ if (e.Ts < newestExemplar.Ts && e.Ts <= newestExemplar.Ts-ce.oooTimeWindowMillis) ||
(e.Ts == newestExemplar.Ts && e.Value < newestExemplar.Value) ||
(e.Ts == newestExemplar.Ts && e.Value == newestExemplar.Value && e.Labels.Hash() < newestExemplar.Labels.Hash()) {
if appended {
@@ -273,8 +278,19 @@ func (ce *CircularExemplarStorage) validateExemplar(idx *indexEntry, e exemplar.
return nil
}
-// Resize changes the size of exemplar buffer by allocating a new buffer and migrating data to it.
-// Exemplars are kept when possible. Shrinking will discard oldest data (in order of ingest) as needed.
+// SetOutOfOrderTimeWindow sets the out-of-order time window for exemplars in
+// milliseconds. Exemplars older than it are not added to the circular exemplar
+// buffer.
+func (ce *CircularExemplarStorage) SetOutOfOrderTimeWindow(d int64) {
+ ce.lock.Lock()
+ defer ce.lock.Unlock()
+ ce.oooTimeWindowMillis = d
+}
+
+// Resize changes the size of exemplar buffer by allocating a new buffer and
+// migrating data to it. Exemplars are kept when possible. Shrinking will discard
+// old data (in order of ingestion) as needed. Returns the number of migrated
+// exemplars.
func (ce *CircularExemplarStorage) Resize(l int64) int {
// Accept negative values as just 0 size.
if l <= 0 {
@@ -284,65 +300,83 @@ func (ce *CircularExemplarStorage) Resize(l int64) int {
ce.lock.Lock()
defer ce.lock.Unlock()
- if l == int64(len(ce.exemplars)) {
- return 0
- }
-
- oldBuffer := ce.exemplars
- oldNextIndex := int64(ce.nextIndex)
-
- ce.exemplars = make([]circularBufferEntry, l)
- ce.index = make(map[string]*indexEntry, l/estimatedExemplarsPerSeries)
- ce.nextIndex = 0
-
- // Replay as many entries as needed, starting with oldest first.
- count := min(l, int64(len(oldBuffer)))
-
+ oldSize := int64(len(ce.exemplars))
migrated := 0
-
- if l > 0 && len(oldBuffer) > 0 {
- // Rewind previous next index by count with wrap-around.
- // This math is essentially looking at nextIndex, where we would write the next exemplar to,
- // and find the index in the old exemplar buffer that we should start migrating exemplars from.
- // This way we don't migrate exemplars that would just be overwritten when migrating later exemplars.
- startIndex := (oldNextIndex - count + int64(len(oldBuffer))) % int64(len(oldBuffer))
-
- var buf [1024]byte
- for i := range count {
- idx := (startIndex + i) % int64(len(oldBuffer))
- if oldBuffer[idx].ref != nil {
- ce.migrate(&oldBuffer[idx], buf[:])
- migrated++
- }
- }
+ switch {
+ case l == oldSize:
+ // NOOP.
+ return migrated
+ case l > oldSize:
+ migrated = ce.grow(l)
+ case l < oldSize:
+ migrated = ce.shrink(l)
}
ce.computeMetrics()
ce.metrics.maxExemplars.Set(float64(l))
-
return migrated
}
-// migrate is like AddExemplar but reuses existing structs. Expected to be called in batch and requires
-// external lock and does not compute metrics.
-func (ce *CircularExemplarStorage) migrate(entry *circularBufferEntry, buf []byte) {
- seriesLabels := entry.ref.seriesLabels.Bytes(buf[:0])
-
- idx, ok := ce.index[string(seriesLabels)]
- if !ok {
- idx = entry.ref
- idx.oldest = ce.nextIndex
- ce.index[string(seriesLabels)] = idx
- } else {
- entry.ref = idx
- ce.exemplars[idx.newest].next = ce.nextIndex
+// grow the circular buffer to have size l by allocating a new slice and copying
+// the old data to it. After growing, ce.nextIndex points to the next free entry
+// in the buffer. This function must be called with the lock acquired.
+func (ce *CircularExemplarStorage) grow(l int64) int {
+ oldSize := len(ce.exemplars)
+ newSlice := make([]circularBufferEntry, l)
+ ranges := []intRange{
+ {from: ce.nextIndex, to: oldSize},
+ {from: 0, to: ce.nextIndex},
}
- idx.newest = ce.nextIndex
+ ce.nextIndex = copyExemplarRanges(ce.index, newSlice, ce.exemplars, ranges)
+ ce.exemplars = newSlice
+ return oldSize
+}
- entry.next = noExemplar
- ce.exemplars[ce.nextIndex] = *entry
+// shrink the circular buffer by either trimming from the right or deleting the
+// oldest samples to accommodate the new size l. This function must be called
+// with the lock acquired.
+func (ce *CircularExemplarStorage) shrink(l int64) (migrated int) {
+ oldSize := len(ce.exemplars)
+ diff := int(int64(oldSize) - l)
+ deleteStart := ce.nextIndex
+ deleteEnd := (deleteStart + diff) % oldSize
- ce.nextIndex = (ce.nextIndex + 1) % len(ce.exemplars)
+ // Remove items from the buffer starting from c.nextIndex. This drops older
+ // entries first in the order of ingestion.
+ for i := range diff {
+ idx := (deleteStart + i) % oldSize
+ ref := ce.exemplars[idx].ref
+ if ce.removeExemplar(&ce.exemplars[idx]) {
+ ce.removeIndex(ref)
+ }
+ }
+
+ newSlice := make([]circularBufferEntry, int(l))
+
+ switch {
+ case deleteStart == deleteEnd:
+ // The entire buffer was cleared (shrink to zero). Note that we don't have to
+ // delete the index since removeExemplar already did. Simply remove all elements
+ // and reset tracking pointers.
+ ce.exemplars = newSlice
+ ce.nextIndex = 0
+ return 0
+ case deleteStart < deleteEnd:
+ // We delete an "inner" section of the circular buffer.
+ migrated = copyExemplarRanges(ce.index, newSlice, ce.exemplars, []intRange{
+ {from: deleteEnd, to: oldSize},
+ {from: 0, to: deleteStart},
+ })
+ case deleteStart > deleteEnd:
+ // We keep an "inner" section of the circular buffer.
+ migrated = copyExemplarRanges(ce.index, newSlice, ce.exemplars, []intRange{
+ {from: deleteEnd, to: deleteStart},
+ })
+ }
+
+ ce.nextIndex = migrated % int(l)
+ ce.exemplars = newSlice
+ return migrated
}
func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemplar) error {
@@ -358,7 +392,7 @@ func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemp
var buf [1024]byte
seriesLabels := l.Bytes(buf[:])
- idx, ok := ce.index[string(seriesLabels)]
+ idx, indexExists := ce.index[string(seriesLabels)]
err := ce.validateExemplar(idx, e, true)
if err != nil {
if errors.Is(err, storage.ErrDuplicateExemplar) {
@@ -368,32 +402,77 @@ func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemp
return err
}
- if !ok {
- idx = &indexEntry{oldest: ce.nextIndex, seriesLabels: l}
- ce.index[string(seriesLabels)] = idx
- } else {
- ce.exemplars[idx.newest].next = ce.nextIndex
- }
-
- if prev := &ce.exemplars[ce.nextIndex]; prev.ref != nil {
- // There exists an exemplar already on this ce.nextIndex entry,
- // drop it, to make place for others.
- if prev.next == noExemplar {
- // Last item for this series, remove index entry.
- var buf [1024]byte
- prevLabels := prev.ref.seriesLabels.Bytes(buf[:])
- delete(ce.index, string(prevLabels))
- } else {
- prev.ref.oldest = prev.next
+ // If we insert an out-of-order exemplar, we preemptively find the insertion
+ // index to check for duplicates.
+ var insertionIndex int
+ if indexExists {
+ outOfOrder := e.Ts >= ce.exemplars[idx.oldest].exemplar.Ts && e.Ts < ce.exemplars[idx.newest].exemplar.Ts
+ if outOfOrder {
+ insertionIndex = ce.findInsertionIndex(e, idx)
+ if ce.exemplars[insertionIndex].exemplar.Ts == e.Ts {
+ // Assume duplicate exemplar, noop.
+ // Native histograms will exercise this code path a lot due to
+ // having multiple exemplars per series so checking the
+ // value and labels would be too expensive.
+ return nil
+ }
}
}
- // Default the next value to -1 (which we use to detect that we've iterated through all exemplars for a series in Select)
- // since this is the first exemplar stored for this series.
- ce.exemplars[ce.nextIndex].next = noExemplar
+ // If the index didn't exist (new series), create one.
+ if !indexExists {
+ idx = &indexEntry{seriesLabels: l}
+ ce.index[string(seriesLabels)] = idx
+ }
+
+ // Remove entries if the buffer is full. Note that this doesn't invalidate the
+ // insertion index since out-of-order exemplars cannot be the oldest exemplar.
+ if prev := &ce.exemplars[ce.nextIndex]; prev.ref != nil {
+ prevRef := prev.ref
+ if ce.removeExemplar(prev) {
+ if prevRef == idx {
+ // Do not delete the indexEntry we're inserting to.
+ indexExists = false
+ } else {
+ ce.removeIndex(prevRef)
+ }
+ }
+ }
+
+ // We create a new entry in the linked list.
ce.exemplars[ce.nextIndex].exemplar = e
ce.exemplars[ce.nextIndex].ref = idx
- idx.newest = ce.nextIndex
+
+ switch {
+ case !indexExists:
+ // Add the first and only exemplar to the list.
+ idx.oldest = ce.nextIndex
+ idx.newest = ce.nextIndex
+ ce.exemplars[ce.nextIndex].prev = noExemplar
+ ce.exemplars[ce.nextIndex].next = noExemplar
+ case e.Ts >= ce.exemplars[idx.newest].exemplar.Ts:
+ // Add the exemplar at the tip (after newest).
+ ce.exemplars[idx.newest].next = ce.nextIndex
+ ce.exemplars[ce.nextIndex].prev = idx.newest
+ ce.exemplars[ce.nextIndex].next = noExemplar
+ idx.newest = ce.nextIndex
+ case e.Ts < ce.exemplars[idx.oldest].exemplar.Ts:
+ // Add the exemplar at the tail (before oldest).
+ ce.exemplars[idx.oldest].prev = ce.nextIndex
+ ce.exemplars[ce.nextIndex].prev = noExemplar
+ ce.exemplars[ce.nextIndex].next = idx.oldest
+ idx.oldest = ce.nextIndex
+ default:
+ // Insert the exemplar into the list by finding the most recent
+ // in-order exemplar that precedes it, and placing it after.
+ nextExemplar := ce.exemplars[insertionIndex].next
+ ce.exemplars[ce.nextIndex].prev = insertionIndex
+ ce.exemplars[ce.nextIndex].next = nextExemplar
+ ce.exemplars[insertionIndex].next = ce.nextIndex
+ if nextExemplar != noExemplar {
+ ce.exemplars[nextExemplar].prev = ce.nextIndex
+ }
+ }
ce.nextIndex = (ce.nextIndex + 1) % len(ce.exemplars)
@@ -402,6 +481,56 @@ func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemp
return nil
}
+// removeExemplar removes the given entry from the circular buffer. Returns true
+// iff the deleted entry was the last entry (and the index is now empty).
+// This function must be called with the lock acquired.
+func (ce *CircularExemplarStorage) removeExemplar(entry *circularBufferEntry) bool {
+ ref := entry.ref
+ if ref == nil {
+ return false
+ }
+
+ if entry.prev != noExemplar {
+ ce.exemplars[entry.prev].next = entry.next
+ } else {
+ ref.oldest = entry.next
+ }
+
+ if entry.next != noExemplar {
+ ce.exemplars[entry.next].prev = entry.prev
+ } else {
+ ref.newest = entry.prev
+ }
+
+ // Mark this item as deleted.
+ entry.ref = nil
+
+ return ref.oldest == noExemplar && ref.newest == noExemplar
+}
+
+// removeIndex removes an indexEntry from the circular exemplar storage.
+// This function must be called with the lock acquired.
+func (ce *CircularExemplarStorage) removeIndex(ref *indexEntry) {
+ var buf [1024]byte
+ entryLabels := ref.seriesLabels.Bytes(buf[:])
+ delete(ce.index, string(entryLabels))
+}
+
+// findInsertionIndex finds the position at which e should be placed in the
+// doubly-linked list by traversing the linked list from idx.newest to idx.oldest
+// and following back links. Since out-of-order exemplars commonly lie close to
+// the newest entry, traversing from newest to oldest is usually faster.
+func (ce *CircularExemplarStorage) findInsertionIndex(e exemplar.Exemplar, idx *indexEntry) int {
+ for i := idx.newest; i != noExemplar; {
+ current := ce.exemplars[i]
+ if current.exemplar.Ts <= e.Ts {
+ return i
+ }
+ i = current.prev
+ }
+ return idx.oldest
+}
+
func (ce *CircularExemplarStorage) computeMetrics() {
ce.metrics.seriesWithExemplarsInStorage.Set(float64(len(ce.index)))
@@ -443,3 +572,64 @@ func (ce *CircularExemplarStorage) IterateExemplars(f func(seriesLabels labels.L
}
return nil
}
+
+type intRange struct {
+ from, to int
+}
+
+func (e intRange) contains(i int) bool {
+ return i >= e.from && i < e.to
+}
+
+// copyExemplarRanges copies non-overlapping ranges from src into dest and
+// adjusts list pointers in dest and index accordingly. Returns the number of
+// copied items.
+func copyExemplarRanges(
+ index map[string]*indexEntry,
+ dest, src []circularBufferEntry,
+ ranges []intRange,
+) int {
+ offsets := make([]int, len(ranges))
+ n := 0
+ for i, rng := range ranges {
+ offsets[i] = n - rng.from
+ n += copy(dest[n:], src[rng.from:rng.to])
+ }
+ migratedEntries := n
+ for di := range n {
+ e := &dest[di]
+ if e.ref == nil {
+ // We potentially copied empty entries. Subtract them now to correctly show the
+ // number of "migrated" items.
+ migratedEntries--
+ continue
+ }
+ for i, rng := range ranges {
+ if rng.contains(e.prev) {
+ e.prev += offsets[i]
+ break
+ }
+ }
+ for i, rng := range ranges {
+ if rng.contains(e.next) {
+ e.next += offsets[i]
+ break
+ }
+ }
+ }
+ for _, idx := range index {
+ for i, rng := range ranges {
+ if rng.contains(idx.oldest) {
+ idx.oldest += offsets[i]
+ break
+ }
+ }
+ for i, rng := range ranges {
+ if rng.contains(idx.newest) {
+ idx.newest += offsets[i]
+ break
+ }
+ }
+ }
+ return migratedEntries
+}
diff --git a/tsdb/exemplar_test.go b/tsdb/exemplar_test.go
index bf6ad2fabb..01ffeb9541 100644
--- a/tsdb/exemplar_test.go
+++ b/tsdb/exemplar_test.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -18,6 +18,7 @@ import (
"fmt"
"math"
"reflect"
+ "sort"
"strconv"
"strings"
"sync"
@@ -35,7 +36,7 @@ var eMetrics = NewExemplarMetrics(prometheus.DefaultRegisterer)
// Tests the same exemplar cases as AddExemplar, but specifically the ValidateExemplar function so it can be relied on externally.
func TestValidateExemplar(t *testing.T) {
- exs, err := NewCircularExemplarStorage(2, eMetrics)
+ exs, err := NewCircularExemplarStorage(2, eMetrics, 0)
require.NoError(t, err)
es := exs.(*CircularExemplarStorage)
@@ -76,54 +77,624 @@ func TestValidateExemplar(t *testing.T) {
require.Equal(t, storage.ErrExemplarLabelLength, es.ValidateExemplar(l, e4))
}
-func TestAddExemplar(t *testing.T) {
- exs, err := NewCircularExemplarStorage(2, eMetrics)
- require.NoError(t, err)
- es := exs.(*CircularExemplarStorage)
+func TestCircularExemplarStorage_AddExemplar(t *testing.T) {
+ series1 := labels.FromStrings("trace_id", "foo")
+ series2 := labels.FromStrings("trace_id", "bar")
- l := labels.FromStrings("service", "asdf")
- e := exemplar.Exemplar{
- Labels: labels.FromStrings("trace_id", "qwerty"),
- Value: 0.1,
- Ts: 1,
+ series1Matcher := []*labels.Matcher{{
+ Type: labels.MatchEqual,
+ Name: "trace_id",
+ Value: series1.Get("trace_id"),
+ }}
+
+ series2Matcher := []*labels.Matcher{{
+ Type: labels.MatchEqual,
+ Name: "trace_id",
+ Value: series2.Get("trace_id"),
+ }}
+
+ testCases := []struct {
+ name string
+ size int64
+ exemplars []exemplar.Exemplar
+ wantExemplars []exemplar.Exemplar
+ matcher []*labels.Matcher
+ wantError error
+ }{
+ {
+ name: "insert after newest",
+ size: 3,
+ exemplars: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.1, Ts: 1},
+ {Labels: series1, Value: 0.2, Ts: 2},
+ },
+ matcher: series1Matcher,
+ wantExemplars: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.1, Ts: 1},
+ {Labels: series1, Value: 0.2, Ts: 2},
+ },
+ },
+ {
+ name: "insert before oldest",
+ size: 3,
+ exemplars: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.1, Ts: 2},
+ {Labels: series1, Value: 0.2, Ts: 1},
+ },
+ matcher: series1Matcher,
+ wantExemplars: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.2, Ts: 1},
+ {Labels: series1, Value: 0.1, Ts: 2},
+ },
+ },
+ {
+ name: "insert in between",
+ size: 3,
+ exemplars: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.1, Ts: 1},
+ {Labels: series1, Value: 0.2, Ts: 3},
+ {Labels: series1, Value: 0.3, Ts: 2},
+ },
+ matcher: series1Matcher,
+ wantExemplars: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.1, Ts: 1},
+ {Labels: series1, Value: 0.3, Ts: 2},
+ {Labels: series1, Value: 0.2, Ts: 3},
+ },
+ },
+ {
+ name: "insert after newest with overflow",
+ size: 3,
+ exemplars: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.1, Ts: 1},
+ {Labels: series1, Value: 0.2, Ts: 2},
+ {Labels: series1, Value: 0.3, Ts: 3},
+ {Labels: series1, Value: 0.4, Ts: 4},
+ },
+ matcher: series1Matcher,
+ wantExemplars: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.2, Ts: 2},
+ {Labels: series1, Value: 0.3, Ts: 3},
+ {Labels: series1, Value: 0.4, Ts: 4},
+ },
+ },
+ {
+ name: "insert before oldest with overflow",
+ size: 3,
+ exemplars: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.1, Ts: 1},
+ {Labels: series1, Value: 0.2, Ts: 2},
+ {Labels: series1, Value: 0.3, Ts: 3},
+ {Labels: series1, Value: 0.4, Ts: 0},
+ },
+ matcher: series1Matcher,
+ wantExemplars: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.4, Ts: 0},
+ {Labels: series1, Value: 0.2, Ts: 2},
+ {Labels: series1, Value: 0.3, Ts: 3},
+ },
+ },
+ {
+ name: "insert between with overflow",
+ size: 3,
+ exemplars: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.1, Ts: 1},
+ {Labels: series1, Value: 0.2, Ts: 3},
+ {Labels: series1, Value: 0.3, Ts: 4},
+ {Labels: series1, Value: 0.4, Ts: 2},
+ },
+ matcher: series1Matcher,
+ wantExemplars: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.4, Ts: 2},
+ {Labels: series1, Value: 0.2, Ts: 3},
+ {Labels: series1, Value: 0.3, Ts: 4},
+ },
+ },
+ {
+ name: "insert out of the OOO window",
+ size: 3,
+ exemplars: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.1, Ts: 200},
+ {Labels: series1, Value: 0.2, Ts: 1},
+ },
+ wantError: storage.ErrOutOfOrderExemplar,
+ },
+ {
+ name: "insert multiple series",
+ size: 3,
+ exemplars: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.1, Ts: 1},
+ {Labels: series1, Value: 0.2, Ts: 3},
+ {Labels: series2, Value: 0.3, Ts: 4},
+ },
+ matcher: series1Matcher,
+ wantExemplars: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.1, Ts: 1},
+ {Labels: series1, Value: 0.2, Ts: 3},
+ },
+ },
+ {
+ name: "insert multiple series with overflow",
+ size: 3,
+ exemplars: []exemplar.Exemplar{
+ {Labels: series2, Value: 0.1, Ts: 1},
+ {Labels: series2, Value: 0.2, Ts: 2},
+ {Labels: series2, Value: 0.3, Ts: 3},
+ {Labels: series1, Value: 0.4, Ts: 4},
+ },
+ matcher: series2Matcher,
+ wantExemplars: []exemplar.Exemplar{
+ {Labels: series2, Value: 0.2, Ts: 2},
+ {Labels: series2, Value: 0.3, Ts: 3},
+ },
+ },
+ {
+ name: "series1 overflows series2 out-of-order",
+ size: 3,
+ exemplars: []exemplar.Exemplar{
+ {Labels: series2, Value: 0.1, Ts: 3},
+ {Labels: series2, Value: 0.2, Ts: 2},
+ {Labels: series2, Value: 0.3, Ts: 4},
+ {Labels: series1, Value: 0.4, Ts: 4},
+ {Labels: series1, Value: 0.5, Ts: 1},
+ },
+ matcher: series2Matcher,
+ wantExemplars: []exemplar.Exemplar{
+ {Labels: series2, Value: 0.3, Ts: 4},
+ },
+ },
+ {
+ name: "ignore duplicate exemplars",
+ size: 3,
+ exemplars: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.1, Ts: 3},
+ {Labels: series1, Value: 0.1, Ts: 3},
+ },
+ matcher: series1Matcher,
+ wantExemplars: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.1, Ts: 3},
+ },
+ },
+ {
+ name: "ignore duplicate exemplars when buffer is full",
+ size: 3,
+ exemplars: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.1, Ts: 3},
+ {Labels: series1, Value: 0.2, Ts: 4},
+ {Labels: series1, Value: 0.3, Ts: 5},
+ {Labels: series1, Value: 0.3, Ts: 5},
+ },
+ matcher: series1Matcher,
+ wantExemplars: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.1, Ts: 3},
+ {Labels: series1, Value: 0.2, Ts: 4},
+ {Labels: series1, Value: 0.3, Ts: 5},
+ },
+ },
+ {
+ name: "empty timestamps are valid",
+ size: 3,
+ exemplars: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.1, Ts: 0},
+ {Labels: series1, Value: 0.2, Ts: 0},
+ },
+ matcher: series1Matcher,
+ wantExemplars: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.1, Ts: 0},
+ {Labels: series1, Value: 0.2, Ts: 0},
+ },
+ },
+ {
+ name: "exemplar label length exceeds maximum",
+ size: 3,
+ exemplars: []exemplar.Exemplar{
+ {Labels: labels.FromStrings("a", strings.Repeat("b", exemplar.ExemplarMaxLabelSetLength)), Value: 0.1, Ts: 2},
+ },
+ wantError: storage.ErrExemplarLabelLength,
+ },
+ {
+ name: "native histograms",
+ size: 6,
+ exemplars: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.1, Ts: 1},
+ {Labels: series1, Value: 0.2, Ts: 2},
+ {Labels: series1, Value: 0.3, Ts: 3},
+ {Labels: series1, Value: 0.1, Ts: 1},
+ {Labels: series1, Value: 0.2, Ts: 2},
+ {Labels: series1, Value: 0.3, Ts: 3},
+ },
+ matcher: series1Matcher,
+ wantExemplars: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.1, Ts: 1},
+ {Labels: series1, Value: 0.2, Ts: 2},
+ {Labels: series1, Value: 0.3, Ts: 3},
+ },
+ },
+ {
+ name: "evict only exemplar for series then re-add",
+ size: 2,
+ exemplars: []exemplar.Exemplar{
+ // series1 at index 0, series2 at index 1, then series1 evicts its own only exemplar
+ {Labels: series1, Value: 0.1, Ts: 1},
+ {Labels: series2, Value: 0.2, Ts: 2},
+ {Labels: series1, Value: 0.3, Ts: 3},
+ },
+ matcher: series1Matcher,
+ wantExemplars: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.3, Ts: 3},
+ },
+ },
+ }
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ exs, err := NewCircularExemplarStorage(tc.size, eMetrics, 100)
+ require.NoError(t, err)
+ es := exs.(*CircularExemplarStorage)
+
+ // Add exemplars and compare tc.wantErr against the first exemplar failing.
+ var addError error
+ for i, ex := range tc.exemplars {
+ addError = es.AddExemplar(ex.Labels, ex)
+ if addError != nil {
+ break
+ }
+ if testing.Verbose() {
+ t.Logf("Buffer[%d]:\n%s", i, debugCircularBuffer(es))
+ }
+ }
+ if tc.wantError == nil {
+ require.NoError(t, addError)
+ } else {
+ require.ErrorIs(t, addError, tc.wantError)
+ }
+ if addError != nil {
+ return
+ }
+
+ // Ensure exemplars are returned correctly and in-order.
+ gotExemplars, err := es.Select(0, 1000, tc.matcher)
+ require.NoError(t, err)
+ if len(tc.wantExemplars) == 0 {
+ require.Empty(t, gotExemplars)
+ } else {
+ require.Len(t, gotExemplars, 1)
+ require.Equal(t, tc.wantExemplars, gotExemplars[0].Exemplars)
+ }
+ })
+ }
+}
+
+func TestCircularExemplarStorage_Resize(t *testing.T) {
+ series1 := labels.FromStrings("trace_id", "foo")
+ series2 := labels.FromStrings("trace_id", "bar")
+ matcher1 := []*labels.Matcher{
+ labels.MustNewMatcher(labels.MatchRegexp, "trace_id", "(foo|bar)"),
}
- require.NoError(t, es.AddExemplar(l, e))
- require.Equal(t, 0, es.index[string(l.Bytes(nil))].newest, "exemplar was not stored correctly")
-
- e2 := exemplar.Exemplar{
- Labels: labels.FromStrings("trace_id", "zxcvb"),
- Value: 0.1,
- Ts: 2,
+ testCases := []struct {
+ name string
+ exemplars []exemplar.Exemplar
+ resize int64
+ wantExemplars []exemplar.Exemplar
+ wantNextIndex int
+ wantError error
+ }{
+ {
+ name: "in-order, grow",
+ exemplars: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.1, Ts: 1},
+ {Labels: series1, Value: 0.2, Ts: 2},
+ },
+ resize: 10,
+ wantExemplars: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.1, Ts: 1},
+ {Labels: series1, Value: 0.2, Ts: 2},
+ },
+ wantNextIndex: 2,
+ },
+ {
+ name: "in-order, shrink",
+ exemplars: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.1, Ts: 1},
+ {Labels: series1, Value: 0.2, Ts: 2},
+ {Labels: series1, Value: 0.3, Ts: 3},
+ },
+ resize: 2,
+ wantExemplars: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.2, Ts: 2},
+ {Labels: series1, Value: 0.3, Ts: 3},
+ },
+ wantNextIndex: 0,
+ },
+ {
+ name: "out-of-order, shrink",
+ exemplars: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.3, Ts: 3},
+ {Labels: series1, Value: 0.2, Ts: 2},
+ {Labels: series1, Value: 0.1, Ts: 1},
+ },
+ resize: 2,
+ wantExemplars: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.1, Ts: 1},
+ {Labels: series1, Value: 0.2, Ts: 2},
+ },
+ wantNextIndex: 0,
+ },
+ {
+ name: "out-of-order, grow",
+ exemplars: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.3, Ts: 3},
+ {Labels: series1, Value: 0.2, Ts: 2},
+ },
+ resize: 5,
+ wantExemplars: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.2, Ts: 2},
+ {Labels: series1, Value: 0.3, Ts: 3},
+ },
+ wantNextIndex: 2,
+ },
+ {
+ name: "duplicate timestamps",
+ exemplars: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.1, Ts: 1},
+ {Labels: series1, Value: 0.2, Ts: 1},
+ {Labels: series1, Value: 0.3, Ts: 2},
+ },
+ resize: 3,
+ wantExemplars: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.1, Ts: 1},
+ {Labels: series1, Value: 0.2, Ts: 1},
+ {Labels: series1, Value: 0.3, Ts: 2},
+ },
+ },
+ {
+ name: "empty input, grow",
+ exemplars: []exemplar.Exemplar{},
+ resize: 10,
+ wantExemplars: []exemplar.Exemplar{},
+ wantNextIndex: 0,
+ },
+ {
+ name: "empty input, shrink",
+ exemplars: []exemplar.Exemplar{},
+ resize: 1,
+ wantExemplars: []exemplar.Exemplar{},
+ wantNextIndex: 0,
+ },
+ {
+ name: "shrink to zero",
+ exemplars: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.1, Ts: 1},
+ {Labels: series1, Value: 0.2, Ts: 2},
+ },
+ resize: 0,
+ wantExemplars: []exemplar.Exemplar{},
+ wantNextIndex: 0,
+ },
+ {
+ name: "multiple series, shrink",
+ exemplars: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.1, Ts: 1},
+ {Labels: series2, Value: 1.1, Ts: 2},
+ {Labels: series1, Value: 0.2, Ts: 3},
+ {Labels: series2, Value: 1.2, Ts: 4},
+ },
+ resize: 2,
+ wantExemplars: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.2, Ts: 3},
+ {Labels: series2, Value: 1.2, Ts: 4},
+ },
+ wantNextIndex: 0,
+ },
+ {
+ name: "shrink to one",
+ exemplars: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.1, Ts: 1},
+ {Labels: series1, Value: 0.2, Ts: 2},
+ },
+ resize: 1,
+ wantExemplars: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.2, Ts: 2},
+ },
+ wantNextIndex: 0,
+ },
+ {
+ name: "shrink to two",
+ exemplars: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.1, Ts: 1},
+ },
+ resize: 2,
+ wantExemplars: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.1, Ts: 1},
+ },
+ wantNextIndex: 1,
+ },
}
- require.NoError(t, es.AddExemplar(l, e2))
- require.Equal(t, 1, es.index[string(l.Bytes(nil))].newest, "exemplar was not stored correctly, location of newest exemplar for series in index did not update")
- require.True(t, es.exemplars[es.index[string(l.Bytes(nil))].newest].exemplar.Equals(e2), "exemplar was not stored correctly, expected %+v got: %+v", e2, es.exemplars[es.index[string(l.Bytes(nil))].newest].exemplar)
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ exs, err := NewCircularExemplarStorage(3, eMetrics, 100)
+ require.NoError(t, err)
+ es := exs.(*CircularExemplarStorage)
- require.NoError(t, es.AddExemplar(l, e2), "no error is expected attempting to add duplicate exemplar")
+ for _, ex := range tc.exemplars {
+ require.NoError(t, es.AddExemplar(ex.Labels, ex))
+ }
- e3 := e2
- e3.Ts = 3
- require.NoError(t, es.AddExemplar(l, e3), "no error is expected when attempting to add duplicate exemplar, even with different timestamp")
+ // Resize the circular buffer.
+ if testing.Verbose() {
+ t.Logf("Buffer[before-resize]:\n%s", debugCircularBuffer(es))
+ }
+ es.Resize(tc.resize)
+ if testing.Verbose() {
+ t.Logf("Buffer[after-resize]:\n%s", debugCircularBuffer(es))
+ }
- e3.Ts = 1
- e3.Value = 0.3
- require.Equal(t, storage.ErrOutOfOrderExemplar, es.AddExemplar(l, e3))
-
- e4 := exemplar.Exemplar{
- Labels: labels.FromStrings("a", strings.Repeat("b", exemplar.ExemplarMaxLabelSetLength)),
- Value: 0.1,
- Ts: 2,
+ // Ensure exemplars are returned correctly and in-order.
+ gotExemplars, err := es.Select(0, 1000, matcher1)
+ require.NoError(t, err)
+ flat := make([]exemplar.Exemplar, 0)
+ for _, group := range gotExemplars {
+ flat = append(flat, group.Exemplars...)
+ }
+ sort.Slice(flat, func(i, j int) bool {
+ return flat[i].Ts < flat[j].Ts
+ })
+ require.Equal(t, tc.wantExemplars, flat, "exemplar mismatch")
+ require.Equal(t, tc.wantNextIndex, es.nextIndex, "next index mismatch")
+ })
+ }
+
+ resizeTwiceCases := []struct {
+ name string
+ addExemplars1 []exemplar.Exemplar
+ resize1 int64
+ wantExemplars1 []exemplar.Exemplar
+ resize2 int64
+ addExemplars2 []exemplar.Exemplar
+ wantExemplars2 []exemplar.Exemplar
+ }{
+ {
+ name: "shrink then grow ordered",
+ addExemplars1: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.1, Ts: 1},
+ {Labels: series1, Value: 0.2, Ts: 2},
+ {Labels: series1, Value: 0.3, Ts: 3},
+ {Labels: series1, Value: 0.4, Ts: 4},
+ },
+ resize1: 2,
+ wantExemplars1: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.3, Ts: 3},
+ {Labels: series1, Value: 0.4, Ts: 4},
+ },
+ resize2: 5,
+ addExemplars2: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.5, Ts: 5},
+ {Labels: series1, Value: 0.6, Ts: 6},
+ {Labels: series1, Value: 0.7, Ts: 7},
+ },
+ wantExemplars2: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.3, Ts: 3},
+ {Labels: series1, Value: 0.4, Ts: 4},
+ {Labels: series1, Value: 0.5, Ts: 5},
+ {Labels: series1, Value: 0.6, Ts: 6},
+ {Labels: series1, Value: 0.7, Ts: 7},
+ },
+ },
+ {
+ name: "shrink then grow out-of-order",
+ addExemplars1: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.1, Ts: 1},
+ {Labels: series1, Value: 0.4, Ts: 4},
+ {Labels: series1, Value: 0.2, Ts: 2},
+ {Labels: series1, Value: 0.3, Ts: 3},
+ },
+ resize1: 2,
+ wantExemplars1: []exemplar.Exemplar{
+ // We delete in the order of ingestion, not temporally.
+ {Labels: series1, Value: 0.2, Ts: 2},
+ {Labels: series1, Value: 0.3, Ts: 3},
+ },
+ resize2: 5,
+ addExemplars2: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.7, Ts: 7},
+ {Labels: series1, Value: 0.6, Ts: 6},
+ {Labels: series1, Value: 0.5, Ts: 5},
+ },
+ wantExemplars2: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.2, Ts: 2},
+ {Labels: series1, Value: 0.3, Ts: 3},
+ {Labels: series1, Value: 0.5, Ts: 5},
+ {Labels: series1, Value: 0.6, Ts: 6},
+ {Labels: series1, Value: 0.7, Ts: 7},
+ },
+ },
+ {
+ name: "grow then shrink ordered",
+ addExemplars1: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.1, Ts: 1},
+ {Labels: series1, Value: 0.2, Ts: 2},
+ {Labels: series1, Value: 0.3, Ts: 3},
+ {Labels: series1, Value: 0.4, Ts: 4},
+ },
+ resize1: 5,
+ wantExemplars1: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.2, Ts: 2},
+ {Labels: series1, Value: 0.3, Ts: 3},
+ {Labels: series1, Value: 0.4, Ts: 4},
+ },
+ resize2: 2,
+ addExemplars2: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.5, Ts: 5},
+ {Labels: series1, Value: 0.6, Ts: 6},
+ {Labels: series1, Value: 0.7, Ts: 7},
+ },
+ wantExemplars2: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.6, Ts: 6},
+ {Labels: series1, Value: 0.7, Ts: 7},
+ },
+ },
+ {
+ name: "grow then shrink out-of-order",
+ addExemplars1: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.1, Ts: 1},
+ {Labels: series1, Value: 0.4, Ts: 4},
+ {Labels: series1, Value: 0.2, Ts: 2},
+ {Labels: series1, Value: 0.3, Ts: 3},
+ },
+ resize1: 5,
+ wantExemplars1: []exemplar.Exemplar{
+ // We delete in the order of ingestion, not temporally.
+ {Labels: series1, Value: 0.2, Ts: 2},
+ {Labels: series1, Value: 0.3, Ts: 3},
+ {Labels: series1, Value: 0.4, Ts: 4},
+ },
+ resize2: 2,
+ addExemplars2: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.7, Ts: 7},
+ {Labels: series1, Value: 0.5, Ts: 5},
+ {Labels: series1, Value: 0.6, Ts: 6},
+ },
+ wantExemplars2: []exemplar.Exemplar{
+ {Labels: series1, Value: 0.5, Ts: 5},
+ {Labels: series1, Value: 0.6, Ts: 6},
+ },
+ },
+ }
+
+ for _, tc := range resizeTwiceCases {
+ t.Run(tc.name, func(t *testing.T) {
+ exs, err := NewCircularExemplarStorage(3, eMetrics, 100)
+ require.NoError(t, err)
+ es := exs.(*CircularExemplarStorage)
+ for _, ex := range tc.addExemplars1 {
+ require.NoError(t, es.AddExemplar(ex.Labels, ex))
+ }
+ es.Resize(tc.resize1)
+ gotExemplars, err := es.Select(0, 1000, matcher1)
+ require.NoError(t, err)
+ require.Len(t, gotExemplars, 1)
+ require.Equal(t, tc.wantExemplars1, gotExemplars[0].Exemplars)
+ es.Resize(tc.resize2)
+ for _, ex := range tc.addExemplars2 {
+ require.NoError(t, es.AddExemplar(ex.Labels, ex))
+ }
+ if testing.Verbose() {
+ t.Logf("Buffer[after-resize2]:\n%s", debugCircularBuffer(es))
+ }
+ gotExemplars, err = es.Select(0, 1000, matcher1)
+ require.NoError(t, err)
+ require.Len(t, gotExemplars, 1)
+ require.Equal(t, tc.wantExemplars2, gotExemplars[0].Exemplars)
+ })
}
- require.Equal(t, storage.ErrExemplarLabelLength, es.AddExemplar(l, e4))
}
func TestStorageOverflow(t *testing.T) {
// Test that circular buffer index and assignment
// works properly, adding more exemplars than can
// be stored and then querying for them.
- exs, err := NewCircularExemplarStorage(5, eMetrics)
+ exs, err := NewCircularExemplarStorage(5, eMetrics, 0)
require.NoError(t, err)
es := exs.(*CircularExemplarStorage)
@@ -152,7 +723,7 @@ func TestStorageOverflow(t *testing.T) {
}
func TestSelectExemplar(t *testing.T) {
- exs, err := NewCircularExemplarStorage(5, eMetrics)
+ exs, err := NewCircularExemplarStorage(5, eMetrics, 0)
require.NoError(t, err)
es := exs.(*CircularExemplarStorage)
@@ -179,7 +750,7 @@ func TestSelectExemplar(t *testing.T) {
}
func TestSelectExemplar_MultiSeries(t *testing.T) {
- exs, err := NewCircularExemplarStorage(5, eMetrics)
+ exs, err := NewCircularExemplarStorage(5, eMetrics, 0)
require.NoError(t, err)
es := exs.(*CircularExemplarStorage)
@@ -223,7 +794,7 @@ func TestSelectExemplar_MultiSeries(t *testing.T) {
func TestSelectExemplar_TimeRange(t *testing.T) {
var lenEs int64 = 5
- exs, err := NewCircularExemplarStorage(lenEs, eMetrics)
+ exs, err := NewCircularExemplarStorage(lenEs, eMetrics, 0)
require.NoError(t, err)
es := exs.(*CircularExemplarStorage)
@@ -251,7 +822,7 @@ func TestSelectExemplar_TimeRange(t *testing.T) {
// Test to ensure that even though a series matches more than one matcher from the
// query that it's exemplars are only included in the result a single time.
func TestSelectExemplar_DuplicateSeries(t *testing.T) {
- exs, err := NewCircularExemplarStorage(4, eMetrics)
+ exs, err := NewCircularExemplarStorage(4, eMetrics, 0)
require.NoError(t, err)
es := exs.(*CircularExemplarStorage)
@@ -286,7 +857,7 @@ func TestSelectExemplar_DuplicateSeries(t *testing.T) {
}
func TestIndexOverwrite(t *testing.T) {
- exs, err := NewCircularExemplarStorage(2, eMetrics)
+ exs, err := NewCircularExemplarStorage(2, eMetrics, 0)
require.NoError(t, err)
es := exs.(*CircularExemplarStorage)
@@ -374,7 +945,7 @@ func TestResize(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
- exs, err := NewCircularExemplarStorage(tc.startSize, eMetrics)
+ exs, err := NewCircularExemplarStorage(tc.startSize, eMetrics, 0)
require.NoError(t, err)
es := exs.(*CircularExemplarStorage)
@@ -386,7 +957,14 @@ func TestResize(t *testing.T) {
require.NoError(t, err)
}
+ if testing.Verbose() {
+ t.Logf("Buffer[before-resize]:\n%s", debugCircularBuffer(es))
+ }
resized := es.Resize(tc.newCount)
+ if testing.Verbose() {
+ t.Logf("Buffer[after-resize]:\n%s", debugCircularBuffer(es))
+ }
+
require.Equal(t, tc.expectedMigrated, resized)
q, err := es.Querier(context.TODO())
@@ -421,7 +999,7 @@ func BenchmarkAddExemplar(b *testing.B) {
b.Run(fmt.Sprintf("%d/%d", n, capacity), func(b *testing.B) {
for b.Loop() {
b.StopTimer()
- exs, err := NewCircularExemplarStorage(int64(capacity), eMetrics)
+ exs, err := NewCircularExemplarStorage(int64(capacity), eMetrics, 0)
require.NoError(b, err)
es := exs.(*CircularExemplarStorage)
var l labels.Labels
@@ -442,6 +1020,91 @@ func BenchmarkAddExemplar(b *testing.B) {
}
}
+func BenchmarkAddExemplar_OutOfOrder(b *testing.B) {
+ // We need to include these labels since we do length calculation
+ // before adding.
+ exLabels := labels.FromStrings("trace_id", "89620921")
+
+ const (
+ capacity = 5000
+ )
+
+ fillOneSeries := func(es *CircularExemplarStorage) {
+ for i := range capacity {
+ e := exemplar.Exemplar{Value: float64(i), Ts: int64(i), Labels: exLabels}
+ if err := es.AddExemplar(exLabels, e); err != nil {
+ panic(err)
+ }
+ }
+ }
+
+ fillMultipleSeries := func(es *CircularExemplarStorage) {
+ for i := range capacity {
+ l := labels.FromStrings("service", strconv.Itoa(i))
+ e := exemplar.Exemplar{Value: float64(i), Ts: int64(i), Labels: l}
+ if err := es.AddExemplar(l, e); err != nil {
+ panic(err)
+ }
+ }
+ }
+
+ outOfOrder := func(ts *int64, _ *labels.Labels) {
+ switch *ts % 3 {
+ case 0:
+ return
+ case 1:
+ *ts = capacity - *ts
+ case 2:
+ *ts = (capacity - *ts) + 100
+ }
+ }
+
+ reverseOrder := func(ts *int64, _ *labels.Labels) {
+ *ts = capacity - *ts
+ }
+
+ multipleSeries := func(f func(*int64, *labels.Labels)) func(*int64, *labels.Labels) {
+ return func(ts *int64, l *labels.Labels) {
+ f(ts, l)
+ *l = labels.FromStrings("service", strconv.Itoa(int(*ts)))
+ }
+ }
+
+ for fillName, setup := range map[string]func(es *CircularExemplarStorage){
+ "empty": func(*CircularExemplarStorage) {},
+ "full-one": fillOneSeries,
+ "full-multiple": fillMultipleSeries,
+ } {
+ for orderName, forEach := range map[string]func(ts *int64, l *labels.Labels){
+ "in-order": func(*int64, *labels.Labels) {},
+ "reverse": reverseOrder,
+ "out-of-order": outOfOrder,
+ "multi-in-order": multipleSeries(func(*int64, *labels.Labels) {}),
+ "multi-reverse": multipleSeries(reverseOrder),
+ "multi-out-of-order": multipleSeries(outOfOrder),
+ } {
+ b.Run(fmt.Sprintf("%s/%s", fillName, orderName), func(b *testing.B) {
+ exs, err := NewCircularExemplarStorage(int64(capacity), eMetrics, 100000)
+ require.NoError(b, err)
+ es := exs.(*CircularExemplarStorage)
+ l := labels.FromStrings("service", "0")
+ setup(es)
+ b.ResetTimer()
+ for b.Loop() {
+ for i := range capacity {
+ ts := int64(i)
+ forEach(&ts, &l)
+ err = es.AddExemplar(l, exemplar.Exemplar{Value: float64(i), Ts: ts, Labels: l})
+ if err != nil {
+ b.Fatalf("Failed to insert item %d %s: %v", i, l, err)
+ }
+ }
+ }
+ })
+ }
+ }
+}
+
func BenchmarkResizeExemplars(b *testing.B) {
testCases := []struct {
name string
@@ -479,7 +1142,7 @@ func BenchmarkResizeExemplars(b *testing.B) {
b.Run(fmt.Sprintf("%s-%d-to-%d", tc.name, tc.startSize, tc.endSize), func(b *testing.B) {
for b.Loop() {
b.StopTimer()
- exs, err := NewCircularExemplarStorage(tc.startSize, eMetrics)
+ exs, err := NewCircularExemplarStorage(tc.startSize, eMetrics, 0)
require.NoError(b, err)
es := exs.(*CircularExemplarStorage)
@@ -504,7 +1167,7 @@ func BenchmarkResizeExemplars(b *testing.B) {
// TestCircularExemplarStorage_Concurrent_AddExemplar_Resize tries to provoke a data race between AddExemplar and Resize.
// Run with race detection enabled.
func TestCircularExemplarStorage_Concurrent_AddExemplar_Resize(t *testing.T) {
- exs, err := NewCircularExemplarStorage(0, eMetrics)
+ exs, err := NewCircularExemplarStorage(0, eMetrics, 0)
require.NoError(t, err)
es := exs.(*CircularExemplarStorage)
@@ -537,3 +1200,30 @@ func TestCircularExemplarStorage_Concurrent_AddExemplar_Resize(t *testing.T) {
}
}
}
+
+// debugCircularBuffer iterates all exemplars in the circular exemplar storage
+// and returns them as a string. The textual representation contains index
+// pointers and helps debugging exemplar storage.
+func debugCircularBuffer(ce *CircularExemplarStorage) string {
+ var sb strings.Builder
+ for i, e := range ce.exemplars {
+ if e.ref == nil {
+ continue
+ }
+ sb.WriteString(fmt.Sprintf(
+ "i: %d, ts: %d, next: %d, prev: %d",
+ i, e.exemplar.Ts, e.next, e.prev,
+ ))
+ for _, idx := range ce.index {
+ if i == idx.newest {
+ sb.WriteString(" <- newest " + idx.seriesLabels.String())
+ }
+ if i == idx.oldest {
+ sb.WriteString(" <- oldest " + idx.seriesLabels.String())
+ }
+ }
+ sb.WriteString("\n")
+ }
+ sb.WriteString(fmt.Sprintf("Next index: %d\n", ce.nextIndex))
+ return sb.String()
+}
diff --git a/tsdb/fileutil/dir.go b/tsdb/fileutil/dir.go
index ad039d2231..795c9f221b 100644
--- a/tsdb/fileutil/dir.go
+++ b/tsdb/fileutil/dir.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/fileutil/dir_unix.go b/tsdb/fileutil/dir_unix.go
index 2afb2aeaba..05c24893cd 100644
--- a/tsdb/fileutil/dir_unix.go
+++ b/tsdb/fileutil/dir_unix.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The etcd Authors
+// Copyright The Prometheus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/tsdb/fileutil/dir_windows.go b/tsdb/fileutil/dir_windows.go
index 307077ebc3..cfd55291d5 100644
--- a/tsdb/fileutil/dir_windows.go
+++ b/tsdb/fileutil/dir_windows.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The etcd Authors
+// Copyright The Prometheus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/tsdb/fileutil/direct_io.go b/tsdb/fileutil/direct_io.go
index ad306776ca..76815de6b1 100644
--- a/tsdb/fileutil/direct_io.go
+++ b/tsdb/fileutil/direct_io.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/fileutil/direct_io_force.go b/tsdb/fileutil/direct_io_force.go
index bb65403911..8ae4ef4fd7 100644
--- a/tsdb/fileutil/direct_io_force.go
+++ b/tsdb/fileutil/direct_io_force.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/fileutil/direct_io_linux.go b/tsdb/fileutil/direct_io_linux.go
index a1d5f9577d..0640b503f6 100644
--- a/tsdb/fileutil/direct_io_linux.go
+++ b/tsdb/fileutil/direct_io_linux.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/fileutil/direct_io_unsupported.go b/tsdb/fileutil/direct_io_unsupported.go
index a03782fe42..f17c68705f 100644
--- a/tsdb/fileutil/direct_io_unsupported.go
+++ b/tsdb/fileutil/direct_io_unsupported.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/fileutil/direct_io_writer.go b/tsdb/fileutil/direct_io_writer.go
index 793d081481..3eeb2aa225 100644
--- a/tsdb/fileutil/direct_io_writer.go
+++ b/tsdb/fileutil/direct_io_writer.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/fileutil/direct_io_writer_test.go b/tsdb/fileutil/direct_io_writer_test.go
index e60df1f3bc..367b7fa6aa 100644
--- a/tsdb/fileutil/direct_io_writer_test.go
+++ b/tsdb/fileutil/direct_io_writer_test.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/fileutil/fileutil.go b/tsdb/fileutil/fileutil.go
index 523f99292c..0aa67e113a 100644
--- a/tsdb/fileutil/fileutil.go
+++ b/tsdb/fileutil/fileutil.go
@@ -1,4 +1,4 @@
-// Copyright 2018 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/fileutil/flock.go b/tsdb/fileutil/flock.go
index e0082e2f2c..345581cc92 100644
--- a/tsdb/fileutil/flock.go
+++ b/tsdb/fileutil/flock.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/fileutil/flock_js.go b/tsdb/fileutil/flock_js.go
index 6029cdf4d8..025e678a1d 100644
--- a/tsdb/fileutil/flock_js.go
+++ b/tsdb/fileutil/flock_js.go
@@ -1,4 +1,4 @@
-// Copyright 2022 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/fileutil/flock_plan9.go b/tsdb/fileutil/flock_plan9.go
index 3b9550e7f2..543195e066 100644
--- a/tsdb/fileutil/flock_plan9.go
+++ b/tsdb/fileutil/flock_plan9.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/fileutil/flock_solaris.go b/tsdb/fileutil/flock_solaris.go
index 8ca919f3b0..b7a69d9063 100644
--- a/tsdb/fileutil/flock_solaris.go
+++ b/tsdb/fileutil/flock_solaris.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/fileutil/flock_test.go b/tsdb/fileutil/flock_test.go
index 7aff789a26..dec7d4e98d 100644
--- a/tsdb/fileutil/flock_test.go
+++ b/tsdb/fileutil/flock_test.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/fileutil/flock_unix.go b/tsdb/fileutil/flock_unix.go
index 25de0ffb22..eddf427e7e 100644
--- a/tsdb/fileutil/flock_unix.go
+++ b/tsdb/fileutil/flock_unix.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/fileutil/flock_windows.go b/tsdb/fileutil/flock_windows.go
index 1c17ff4ea3..64ce827324 100644
--- a/tsdb/fileutil/flock_windows.go
+++ b/tsdb/fileutil/flock_windows.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/fileutil/mmap.go b/tsdb/fileutil/mmap.go
index 782ff27ec9..9893d1014b 100644
--- a/tsdb/fileutil/mmap.go
+++ b/tsdb/fileutil/mmap.go
@@ -1,4 +1,4 @@
-// Copyright 2018 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/fileutil/mmap_386.go b/tsdb/fileutil/mmap_386.go
index 85c0cce096..01e4333a42 100644
--- a/tsdb/fileutil/mmap_386.go
+++ b/tsdb/fileutil/mmap_386.go
@@ -1,4 +1,4 @@
-// Copyright 2018 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/fileutil/mmap_amd64.go b/tsdb/fileutil/mmap_amd64.go
index 71fc568bd5..6d426f1866 100644
--- a/tsdb/fileutil/mmap_amd64.go
+++ b/tsdb/fileutil/mmap_amd64.go
@@ -1,4 +1,4 @@
-// Copyright 2018 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/fileutil/mmap_arm64.go b/tsdb/fileutil/mmap_arm64.go
index 71fc568bd5..6d426f1866 100644
--- a/tsdb/fileutil/mmap_arm64.go
+++ b/tsdb/fileutil/mmap_arm64.go
@@ -1,4 +1,4 @@
-// Copyright 2018 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/fileutil/mmap_js.go b/tsdb/fileutil/mmap_js.go
index f29106fc1e..59e1fcf877 100644
--- a/tsdb/fileutil/mmap_js.go
+++ b/tsdb/fileutil/mmap_js.go
@@ -1,4 +1,4 @@
-// Copyright 2022 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/fileutil/mmap_unix.go b/tsdb/fileutil/mmap_unix.go
index 3d15e1a8c1..b35352fef9 100644
--- a/tsdb/fileutil/mmap_unix.go
+++ b/tsdb/fileutil/mmap_unix.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/fileutil/mmap_windows.go b/tsdb/fileutil/mmap_windows.go
index b942264123..8322f68971 100644
--- a/tsdb/fileutil/mmap_windows.go
+++ b/tsdb/fileutil/mmap_windows.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -27,14 +27,15 @@ func mmap(f *os.File, size int) ([]byte, error) {
}
addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(size))
- if addr == 0 {
- return nil, os.NewSyscallError("MapViewOfFile", errno)
- }
if err := syscall.CloseHandle(syscall.Handle(h)); err != nil {
return nil, os.NewSyscallError("CloseHandle", err)
}
+ if addr == 0 {
+ return nil, os.NewSyscallError("MapViewOfFile", errno)
+ }
+
return (*[maxMapSize]byte)(unsafe.Pointer(addr))[:size], nil
}
diff --git a/tsdb/fileutil/preallocate.go b/tsdb/fileutil/preallocate.go
index c747b7cf81..e9a587b2bd 100644
--- a/tsdb/fileutil/preallocate.go
+++ b/tsdb/fileutil/preallocate.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The etcd Authors
+// Copyright The Prometheus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/tsdb/fileutil/preallocate_darwin.go b/tsdb/fileutil/preallocate_darwin.go
index 1d9eb806d1..58f83c5ba5 100644
--- a/tsdb/fileutil/preallocate_darwin.go
+++ b/tsdb/fileutil/preallocate_darwin.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The etcd Authors
+// Copyright The Prometheus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/tsdb/fileutil/preallocate_linux.go b/tsdb/fileutil/preallocate_linux.go
index 026c69b354..1271c48928 100644
--- a/tsdb/fileutil/preallocate_linux.go
+++ b/tsdb/fileutil/preallocate_linux.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The etcd Authors
+// Copyright The Prometheus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/tsdb/fileutil/preallocate_other.go b/tsdb/fileutil/preallocate_other.go
index e7fd937a43..55a44c7636 100644
--- a/tsdb/fileutil/preallocate_other.go
+++ b/tsdb/fileutil/preallocate_other.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The etcd Authors
+// Copyright The Prometheus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/tsdb/fileutil/sync.go b/tsdb/fileutil/sync.go
index e1a4a7fd3d..9390b044a5 100644
--- a/tsdb/fileutil/sync.go
+++ b/tsdb/fileutil/sync.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The etcd Authors
+// Copyright The Prometheus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/tsdb/fileutil/sync_darwin.go b/tsdb/fileutil/sync_darwin.go
index d698b896af..3dc42fc57a 100644
--- a/tsdb/fileutil/sync_darwin.go
+++ b/tsdb/fileutil/sync_darwin.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The etcd Authors
+// Copyright The Prometheus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/tsdb/fileutil/sync_linux.go b/tsdb/fileutil/sync_linux.go
index 2b4c620bb0..138bbee1e5 100644
--- a/tsdb/fileutil/sync_linux.go
+++ b/tsdb/fileutil/sync_linux.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The etcd Authors
+// Copyright The Prometheus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/tsdb/goversion/goversion.go b/tsdb/goversion/goversion.go
index ec23d25f2e..050ced875d 100644
--- a/tsdb/goversion/goversion.go
+++ b/tsdb/goversion/goversion.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/goversion/goversion_test.go b/tsdb/goversion/goversion_test.go
index 853844fb93..1e52b9655c 100644
--- a/tsdb/goversion/goversion_test.go
+++ b/tsdb/goversion/goversion_test.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/goversion/init.go b/tsdb/goversion/init.go
index dd15e1f7af..eb97bf7637 100644
--- a/tsdb/goversion/init.go
+++ b/tsdb/goversion/init.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/head.go b/tsdb/head.go
index 0a646d9e3b..5ab5d9dc34 100644
--- a/tsdb/head.go
+++ b/tsdb/head.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -187,6 +187,20 @@ type HeadOptions struct {
// EnableSharding enables ShardedPostings() support in the Head.
EnableSharding bool
+
+ // EnableSTAsZeroSample represents 'created-timestamp-zero-ingestion' feature flag.
+ // If true, ST, if non-empty and earlier than sample timestamp, will be stored
+ // as a zero sample before the actual sample.
+ //
+ // The zero sample is best-effort, only debug log on failure is emitted.
+ // NOTE(bwplotka): This feature might be deprecated and removed once PROM-60
+ // is implemented.
+ EnableSTAsZeroSample bool
+
+ // EnableMetadataWALRecords represents 'metadata-wal-records' feature flag.
+ // NOTE(bwplotka): This feature might be deprecated and removed once PROM-60
+ // is implemented.
+ EnableMetadataWALRecords bool
}
const (
@@ -313,7 +327,7 @@ func (h *Head) resetInMemoryState() error {
if em == nil {
em = NewExemplarMetrics(h.reg)
}
- es, err := NewCircularExemplarStorage(h.opts.MaxExemplars.Load(), em)
+ es, err := NewCircularExemplarStorage(h.opts.MaxExemplars.Load(), em, h.opts.OutOfOrderTimeWindow.Load())
if err != nil {
return err
}
@@ -1023,6 +1037,8 @@ func (h *Head) ApplyConfig(cfg *config.Config, wbl *wlog.WL) {
return
}
+ h.exemplars.(*CircularExemplarStorage).SetOutOfOrderTimeWindow(oooTimeWindow)
+
// Head uses opts.MaxExemplars in combination with opts.EnableExemplarStorage
// to decide if it should pass exemplars along to its exemplar storage, so we
// need to update opts.MaxExemplars here.
diff --git a/tsdb/head_append.go b/tsdb/head_append.go
index 942c3ce974..fceb80bd34 100644
--- a/tsdb/head_append.go
+++ b/tsdb/head_append.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -165,17 +165,19 @@ func (h *Head) appender() *headAppender {
minValidTime := h.appendableMinValidTime()
appendID, cleanupAppendIDsBelow := h.iso.newAppendID(minValidTime) // Every appender gets an ID that is cleared upon commit/rollback.
return &headAppender{
- head: h,
- minValidTime: minValidTime,
- mint: math.MaxInt64,
- maxt: math.MinInt64,
- headMaxt: h.MaxTime(),
- oooTimeWindow: h.opts.OutOfOrderTimeWindow.Load(),
- seriesRefs: h.getRefSeriesBuffer(),
- series: h.getSeriesBuffer(),
- typesInBatch: h.getTypeMap(),
- appendID: appendID,
- cleanupAppendIDsBelow: cleanupAppendIDsBelow,
+ headAppenderBase: headAppenderBase{
+ head: h,
+ minValidTime: minValidTime,
+ mint: math.MaxInt64,
+ maxt: math.MinInt64,
+ headMaxt: h.MaxTime(),
+ oooTimeWindow: h.opts.OutOfOrderTimeWindow.Load(),
+ seriesRefs: h.getRefSeriesBuffer(),
+ series: h.getSeriesBuffer(),
+ typesInBatch: h.getTypeMap(),
+ appendID: appendID,
+ cleanupAppendIDsBelow: cleanupAppendIDsBelow,
+ },
}
}
@@ -382,7 +384,7 @@ func (b *appendBatch) close(h *Head) {
b.exemplars = nil
}
-type headAppender struct {
+type headAppenderBase struct {
head *Head
minValidTime int64 // No samples below this timestamp are allowed.
mint, maxt int64
@@ -397,7 +399,10 @@ type headAppender struct {
appendID, cleanupAppendIDsBelow uint64
closed bool
- hints *storage.AppendOptions
+}
+type headAppender struct {
+ headAppenderBase
+ hints *storage.AppendOptions
}
func (a *headAppender) SetOptions(opts *storage.AppendOptions) {
@@ -525,7 +530,7 @@ func (a *headAppender) AppendSTZeroSample(ref storage.SeriesRef, lset labels.Lab
return storage.SeriesRef(s.ref), nil
}
-func (a *headAppender) getOrCreate(lset labels.Labels) (s *memSeries, created bool, err error) {
+func (a *headAppenderBase) getOrCreate(lset labels.Labels) (s *memSeries, created bool, err error) {
// Ensure no empty labels have gotten through.
lset = lset.WithoutEmpty()
if lset.IsEmpty() {
@@ -550,7 +555,7 @@ func (a *headAppender) getOrCreate(lset labels.Labels) (s *memSeries, created bo
// getCurrentBatch returns the current batch if it fits the provided sampleType
// for the provided series. Otherwise, it adds a new batch and returns it.
-func (a *headAppender) getCurrentBatch(st sampleType, s chunks.HeadSeriesRef) *appendBatch {
+func (a *headAppenderBase) getCurrentBatch(st sampleType, s chunks.HeadSeriesRef) *appendBatch {
h := a.head
newBatch := func() *appendBatch {
@@ -1043,7 +1048,7 @@ func (a *headAppender) UpdateMetadata(ref storage.SeriesRef, lset labels.Labels,
var _ storage.GetRef = &headAppender{}
-func (a *headAppender) GetRef(lset labels.Labels, hash uint64) (storage.SeriesRef, labels.Labels) {
+func (a *headAppenderBase) GetRef(lset labels.Labels, hash uint64) (storage.SeriesRef, labels.Labels) {
s := a.head.series.getByHash(hash, lset)
if s == nil {
return 0, labels.EmptyLabels()
@@ -1053,7 +1058,7 @@ func (a *headAppender) GetRef(lset labels.Labels, hash uint64) (storage.SeriesRe
}
// log writes all headAppender's data to the WAL.
-func (a *headAppender) log() error {
+func (a *headAppenderBase) log() error {
if a.head.wal == nil {
return nil
}
@@ -1185,7 +1190,7 @@ type appenderCommitContext struct {
}
// commitExemplars adds all exemplars from the provided batch to the head's exemplar storage.
-func (a *headAppender) commitExemplars(b *appendBatch) {
+func (a *headAppenderBase) commitExemplars(b *appendBatch) {
// No errors logging to WAL, so pass the exemplars along to the in memory storage.
for _, e := range b.exemplars {
s := a.head.series.getByID(chunks.HeadSeriesRef(e.ref))
@@ -1205,7 +1210,7 @@ func (a *headAppender) commitExemplars(b *appendBatch) {
}
}
-func (acc *appenderCommitContext) collectOOORecords(a *headAppender) {
+func (acc *appenderCommitContext) collectOOORecords(a *headAppenderBase) {
if a.head.wbl == nil {
// WBL is not enabled. So no need to collect.
acc.wblSamples = nil
@@ -1310,7 +1315,7 @@ func handleAppendableError(err error, appended, oooRejected, oobRejected, tooOld
// operations on the series after appending the samples.
//
// There are also specific functions to commit histograms and float histograms.
-func (a *headAppender) commitFloats(b *appendBatch, acc *appenderCommitContext) {
+func (a *headAppenderBase) commitFloats(b *appendBatch, acc *appenderCommitContext) {
var ok, chunkCreated bool
var series *memSeries
@@ -1466,7 +1471,7 @@ func (a *headAppender) commitFloats(b *appendBatch, acc *appenderCommitContext)
}
// For details on the commitHistograms function, see the commitFloats docs.
-func (a *headAppender) commitHistograms(b *appendBatch, acc *appenderCommitContext) {
+func (a *headAppenderBase) commitHistograms(b *appendBatch, acc *appenderCommitContext) {
var ok, chunkCreated bool
var series *memSeries
@@ -1575,7 +1580,7 @@ func (a *headAppender) commitHistograms(b *appendBatch, acc *appenderCommitConte
}
// For details on the commitFloatHistograms function, see the commitFloats docs.
-func (a *headAppender) commitFloatHistograms(b *appendBatch, acc *appenderCommitContext) {
+func (a *headAppenderBase) commitFloatHistograms(b *appendBatch, acc *appenderCommitContext) {
var ok, chunkCreated bool
var series *memSeries
@@ -1697,7 +1702,7 @@ func commitMetadata(b *appendBatch) {
}
}
-func (a *headAppender) unmarkCreatedSeriesAsPendingCommit() {
+func (a *headAppenderBase) unmarkCreatedSeriesAsPendingCommit() {
for _, s := range a.series {
s.Lock()
s.pendingCommit = false
@@ -1707,7 +1712,7 @@ func (a *headAppender) unmarkCreatedSeriesAsPendingCommit() {
// Commit writes to the WAL and adds the data to the Head.
// TODO(codesome): Refactor this method to reduce indentation and make it more readable.
-func (a *headAppender) Commit() (err error) {
+func (a *headAppenderBase) Commit() (err error) {
if a.closed {
return ErrAppenderClosed
}
@@ -2238,7 +2243,7 @@ func handleChunkWriteError(err error) {
}
// Rollback removes the samples and exemplars from headAppender and writes any series to WAL.
-func (a *headAppender) Rollback() (err error) {
+func (a *headAppenderBase) Rollback() (err error) {
if a.closed {
return ErrAppenderClosed
}
diff --git a/tsdb/head_append_v2.go b/tsdb/head_append_v2.go
new file mode 100644
index 0000000000..241fb42e97
--- /dev/null
+++ b/tsdb/head_append_v2.go
@@ -0,0 +1,398 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tsdb
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "math"
+
+ "github.com/prometheus/prometheus/model/exemplar"
+ "github.com/prometheus/prometheus/model/histogram"
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/model/value"
+ "github.com/prometheus/prometheus/storage"
+ "github.com/prometheus/prometheus/tsdb/chunks"
+ "github.com/prometheus/prometheus/tsdb/record"
+)
+
+// initAppenderV2 is a helper to initialize the time bounds of the head
+// upon the first sample it receives.
+type initAppenderV2 struct {
+ app storage.AppenderV2
+ head *Head
+}
+
+var _ storage.GetRef = &initAppenderV2{}
+
+func (a *initAppenderV2) Append(ref storage.SeriesRef, ls labels.Labels, st, t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, opts storage.AOptions) (storage.SeriesRef, error) {
+ if a.app == nil {
+ a.head.initTime(t)
+ a.app = a.head.appenderV2()
+ }
+ return a.app.Append(ref, ls, st, t, v, h, fh, opts)
+}
+
+func (a *initAppenderV2) GetRef(lset labels.Labels, hash uint64) (storage.SeriesRef, labels.Labels) {
+ if g, ok := a.app.(storage.GetRef); ok {
+ return g.GetRef(lset, hash)
+ }
+ return 0, labels.EmptyLabels()
+}
+
+func (a *initAppenderV2) Commit() error {
+ if a.app == nil {
+ a.head.metrics.activeAppenders.Dec()
+ return nil
+ }
+ return a.app.Commit()
+}
+
+func (a *initAppenderV2) Rollback() error {
+ if a.app == nil {
+ a.head.metrics.activeAppenders.Dec()
+ return nil
+ }
+ return a.app.Rollback()
+}
+
+// AppenderV2 returns a new AppenderV2 on the database.
+func (h *Head) AppenderV2(context.Context) storage.AppenderV2 {
+ h.metrics.activeAppenders.Inc()
+
+ // The head cache might not have a starting point yet. The init appender
+ // picks up the first appended timestamp as the base.
+ if !h.initialized() {
+ return &initAppenderV2{
+ head: h,
+ }
+ }
+ return h.appenderV2()
+}
+
+func (h *Head) appenderV2() *headAppenderV2 {
+ minValidTime := h.appendableMinValidTime()
+ appendID, cleanupAppendIDsBelow := h.iso.newAppendID(minValidTime) // Every appender gets an ID that is cleared upon commit/rollback.
+ return &headAppenderV2{
+ headAppenderBase: headAppenderBase{
+ head: h,
+ minValidTime: minValidTime,
+ mint: math.MaxInt64,
+ maxt: math.MinInt64,
+ headMaxt: h.MaxTime(),
+ oooTimeWindow: h.opts.OutOfOrderTimeWindow.Load(),
+ seriesRefs: h.getRefSeriesBuffer(),
+ series: h.getSeriesBuffer(),
+ typesInBatch: h.getTypeMap(),
+ appendID: appendID,
+ cleanupAppendIDsBelow: cleanupAppendIDsBelow,
+ },
+ }
+}
+
+type headAppenderV2 struct {
+ headAppenderBase
+}
+
+func (a *headAppenderV2) Append(ref storage.SeriesRef, ls labels.Labels, st, t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, opts storage.AOptions) (storage.SeriesRef, error) {
+ var (
+ // Avoid shadowing err variables for reliability.
+ valErr, appErr, partialErr error
+ sampleMetricType = sampleMetricTypeFloat
+ isStale bool
+ )
+ // Fail fast on incorrect histograms.
+
+ switch {
+ case fh != nil:
+ sampleMetricType = sampleMetricTypeHistogram
+ valErr = fh.Validate()
+ case h != nil:
+ sampleMetricType = sampleMetricTypeHistogram
+ valErr = h.Validate()
+ }
+ if valErr != nil {
+ return 0, valErr
+ }
+
+ // Fail fast if OOO is disabled and the sample is out of bounds.
+ // Otherwise, a full check will be done later to decide if the sample is in-order or out-of-order.
+ if a.oooTimeWindow == 0 && t < a.minValidTime {
+ a.head.metrics.outOfBoundSamples.WithLabelValues(sampleMetricType).Inc()
+ return 0, storage.ErrOutOfBounds
+ }
+
+ s := a.head.series.getByID(chunks.HeadSeriesRef(ref))
+ if s == nil {
+ var err error
+ s, _, err = a.getOrCreate(ls)
+ if err != nil {
+ return 0, err
+ }
+ }
+
+ // TODO(bwplotka): Handle ST natively (as per PROM-60).
+ if a.head.opts.EnableSTAsZeroSample && st != 0 {
+ a.bestEffortAppendSTZeroSample(s, ls, st, t, h, fh)
+ }
+
+ switch {
+ case fh != nil:
+ isStale = value.IsStaleNaN(fh.Sum)
+ appErr = a.appendFloatHistogram(s, t, fh, opts.RejectOutOfOrder)
+ case h != nil:
+ isStale = value.IsStaleNaN(h.Sum)
+ appErr = a.appendHistogram(s, t, h, opts.RejectOutOfOrder)
+ default:
+ isStale = value.IsStaleNaN(v)
+ if isStale {
+ // If we have added a sample before with this same appender, we
+ // can check the previously used type and turn a stale float
+ // sample into a stale histogram sample or stale float histogram
+ // sample as appropriate. This prevents an unnecessary creation
+ // of a new batch. However, since other appenders might append
+ // to the same series concurrently, this is not perfect but just
+ // an optimization for the more likely case.
+ switch a.typesInBatch[s.ref] {
+ case stHistogram, stCustomBucketHistogram:
+ return a.Append(storage.SeriesRef(s.ref), ls, st, t, 0, &histogram.Histogram{Sum: v}, nil, storage.AOptions{
+ RejectOutOfOrder: opts.RejectOutOfOrder,
+ })
+ case stFloatHistogram, stCustomBucketFloatHistogram:
+ return a.Append(storage.SeriesRef(s.ref), ls, st, t, 0, nil, &histogram.FloatHistogram{Sum: v}, storage.AOptions{
+ RejectOutOfOrder: opts.RejectOutOfOrder,
+ })
+ }
+ // Note that a series reference not yet in the map will come out
+ // as stNone, but since we do not handle that case separately,
+ // we do not need to check for the difference between "unknown
+ // series" and "known series with stNone".
+ }
+ appErr = a.appendFloat(s, t, v, opts.RejectOutOfOrder)
+ }
+ // Handle append error, if any.
+ if appErr != nil {
+ switch {
+ case errors.Is(appErr, storage.ErrOutOfOrderSample):
+ a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricType).Inc()
+ case errors.Is(appErr, storage.ErrTooOldSample):
+ a.head.metrics.tooOldSamples.WithLabelValues(sampleMetricType).Inc()
+ }
+ return 0, appErr
+ }
+
+ if t < a.mint {
+ a.mint = t
+ }
+ if t > a.maxt {
+ a.maxt = t
+ }
+
+ if isStale {
+ // For stale values we never attempt to process metadata/exemplars, claim the success.
+ return storage.SeriesRef(s.ref), nil
+ }
+
+ // Append exemplars if any and if storage was configured for it.
+ if len(opts.Exemplars) > 0 && a.head.opts.EnableExemplarStorage && a.head.opts.MaxExemplars.Load() > 0 {
+ // Currently only exemplars can return partial errors.
+ partialErr = a.appendExemplars(s, opts.Exemplars)
+ }
+
+ // TODO(bwplotka): Move/reuse metadata tests from scrape, once scrape adopts AppenderV2.
+ // Currently tsdb package does not test metadata.
+ if a.head.opts.EnableMetadataWALRecords && !opts.Metadata.IsEmpty() {
+ s.Lock()
+ metaChanged := s.meta == nil || !s.meta.Equals(opts.Metadata)
+ s.Unlock()
+ if metaChanged {
+ b := a.getCurrentBatch(stNone, s.ref)
+ b.metadata = append(b.metadata, record.RefMetadata{
+ Ref: s.ref,
+ Type: record.GetMetricType(opts.Metadata.Type),
+ Unit: opts.Metadata.Unit,
+ Help: opts.Metadata.Help,
+ })
+ b.metadataSeries = append(b.metadataSeries, s)
+ }
+ }
+ return storage.SeriesRef(s.ref), partialErr
+}
+
+func (a *headAppenderV2) appendFloat(s *memSeries, t int64, v float64, fastRejectOOO bool) error {
+ s.Lock()
+ // TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise
+ // to skip that sample from the WAL and write only in the WBL.
+ isOOO, delta, err := s.appendable(t, v, a.headMaxt, a.minValidTime, a.oooTimeWindow)
+ if isOOO && fastRejectOOO {
+ s.Unlock()
+ return storage.ErrOutOfOrderSample
+ }
+ if err == nil {
+ s.pendingCommit = true
+ }
+ s.Unlock()
+ if delta > 0 {
+ a.head.metrics.oooHistogram.Observe(float64(delta) / 1000)
+ }
+ if err != nil {
+ return err
+ }
+
+ b := a.getCurrentBatch(stFloat, s.ref)
+ b.floats = append(b.floats, record.RefSample{Ref: s.ref, T: t, V: v})
+ b.floatSeries = append(b.floatSeries, s)
+ return nil
+}
+
+func (a *headAppenderV2) appendHistogram(s *memSeries, t int64, h *histogram.Histogram, fastRejectOOO bool) error {
+ s.Lock()
+ // TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise
+ // to skip that sample from the WAL and write only in the WBL.
+ isOOO, delta, err := s.appendableHistogram(t, h, a.headMaxt, a.minValidTime, a.oooTimeWindow)
+ if isOOO && fastRejectOOO {
+ s.Unlock()
+ return storage.ErrOutOfOrderSample
+ }
+ if err == nil {
+ s.pendingCommit = true
+ }
+ s.Unlock()
+ if delta > 0 {
+ a.head.metrics.oooHistogram.Observe(float64(delta) / 1000)
+ }
+ if err != nil {
+ return err
+ }
+ st := stHistogram
+ if h.UsesCustomBuckets() {
+ st = stCustomBucketHistogram
+ }
+ b := a.getCurrentBatch(st, s.ref)
+ b.histograms = append(b.histograms, record.RefHistogramSample{Ref: s.ref, T: t, H: h})
+ b.histogramSeries = append(b.histogramSeries, s)
+ return nil
+}
+
+func (a *headAppenderV2) appendFloatHistogram(s *memSeries, t int64, fh *histogram.FloatHistogram, fastRejectOOO bool) error {
+ s.Lock()
+ // TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise
+ // to skip that sample from the WAL and write only in the WBL.
+ isOOO, delta, err := s.appendableFloatHistogram(t, fh, a.headMaxt, a.minValidTime, a.oooTimeWindow)
+ if isOOO && fastRejectOOO {
+ s.Unlock()
+ return storage.ErrOutOfOrderSample
+ }
+ if err == nil {
+ s.pendingCommit = true
+ }
+ s.Unlock()
+ if delta > 0 {
+ a.head.metrics.oooHistogram.Observe(float64(delta) / 1000)
+ }
+ if err != nil {
+ return err
+ }
+ st := stFloatHistogram
+ if fh.UsesCustomBuckets() {
+ st = stCustomBucketFloatHistogram
+ }
+ b := a.getCurrentBatch(st, s.ref)
+ b.floatHistograms = append(b.floatHistograms, record.RefFloatHistogramSample{Ref: s.ref, T: t, FH: fh})
+ b.floatHistogramSeries = append(b.floatHistogramSeries, s)
+ return nil
+}
+
+func (a *headAppenderV2) appendExemplars(s *memSeries, exemplar []exemplar.Exemplar) error {
+ var errs []error
+ for _, e := range exemplar {
+ // Ensure no empty labels have gotten through.
+ e.Labels = e.Labels.WithoutEmpty()
+ if err := a.head.exemplars.ValidateExemplar(s.labels(), e); err != nil {
+ if !errors.Is(err, storage.ErrDuplicateExemplar) && !errors.Is(err, storage.ErrExemplarsDisabled) {
+ // Except duplicates, return partial errors.
+ errs = append(errs, err)
+ continue
+ }
+ if !errors.Is(err, storage.ErrOutOfOrderExemplar) {
+ a.head.logger.Debug("Error while adding an exemplar on AppendSample", "exemplars", fmt.Sprintf("%+v", e), "err", err)
+ }
+ continue
+ }
+ b := a.getCurrentBatch(stNone, s.ref)
+ b.exemplars = append(b.exemplars, exemplarWithSeriesRef{storage.SeriesRef(s.ref), e})
+ }
+ if len(errs) > 0 {
+ return &storage.AppendPartialError{ExemplarErrors: errs}
+ }
+ return nil
+}
+
+// NOTE(bwplotka): This feature might be deprecated and removed once PROM-60
+// is implemented.
+//
+// ST is an experimental feature, we don't fail the append on errors, just debug log.
+func (a *headAppenderV2) bestEffortAppendSTZeroSample(s *memSeries, ls labels.Labels, st, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) {
+ // NOTE: Use lset instead of s.lset to avoid locking memSeries. Using s.ref is acceptable without locking.
+ if st >= t {
+ a.head.logger.Debug("Error when appending ST", "series", ls.String(), "st", st, "t", t, "err", storage.ErrSTNewerThanSample)
+ return
+ }
+ if st < a.minValidTime {
+ a.head.logger.Debug("Error when appending ST", "series", ls.String(), "st", st, "t", t, "err", storage.ErrOutOfBounds)
+ return
+ }
+
+ var err error
+ switch {
+ case fh != nil:
+ zeroFloatHistogram := &histogram.FloatHistogram{
+ // The STZeroSample represents a counter reset by definition.
+ CounterResetHint: histogram.CounterReset,
+ // Replicate other fields to avoid needless chunk creation.
+ Schema: fh.Schema,
+ ZeroThreshold: fh.ZeroThreshold,
+ CustomValues: fh.CustomValues,
+ }
+ err = a.appendFloatHistogram(s, st, zeroFloatHistogram, true)
+ case h != nil:
+ zeroHistogram := &histogram.Histogram{
+ // The STZeroSample represents a counter reset by definition.
+ CounterResetHint: histogram.CounterReset,
+ // Replicate other fields to avoid needless chunk creation.
+ Schema: h.Schema,
+ ZeroThreshold: h.ZeroThreshold,
+ CustomValues: h.CustomValues,
+ }
+ err = a.appendHistogram(s, st, zeroHistogram, true)
+ default:
+ err = a.appendFloat(s, st, 0, true)
+ }
+
+ if err != nil {
+ if errors.Is(err, storage.ErrOutOfOrderSample) {
+ // OOO errors are common and expected (cumulative). Explicitly ignored.
+ return
+ }
+ a.head.logger.Debug("Error when appending ST", "series", s.lset.String(), "st", st, "t", t, "err", err)
+ return
+ }
+
+ if st > a.maxt {
+ a.maxt = st
+ }
+}
+
+var _ storage.GetRef = &headAppenderV2{}
diff --git a/tsdb/head_append_v2_test.go b/tsdb/head_append_v2_test.go
new file mode 100644
index 0000000000..33bc3aec38
--- /dev/null
+++ b/tsdb/head_append_v2_test.go
@@ -0,0 +1,4724 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tsdb
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "math"
+ "math/rand"
+ "os"
+ "path"
+ "path/filepath"
+ "reflect"
+ "slices"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/prometheus/client_golang/prometheus"
+ prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
+ dto "github.com/prometheus/client_model/go"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/atomic"
+ "golang.org/x/sync/errgroup"
+
+ "github.com/prometheus/prometheus/config"
+ "github.com/prometheus/prometheus/model/exemplar"
+ "github.com/prometheus/prometheus/model/histogram"
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/model/value"
+ "github.com/prometheus/prometheus/storage"
+ "github.com/prometheus/prometheus/tsdb/chunkenc"
+ "github.com/prometheus/prometheus/tsdb/chunks"
+ "github.com/prometheus/prometheus/tsdb/record"
+ "github.com/prometheus/prometheus/tsdb/tombstones"
+ "github.com/prometheus/prometheus/tsdb/tsdbutil"
+ "github.com/prometheus/prometheus/tsdb/wlog"
+ "github.com/prometheus/prometheus/util/compression"
+ "github.com/prometheus/prometheus/util/testutil"
+)
+
+// TODO(bwplotka): Ensure non-ported tests are not deleted from db_test.go when removing AppenderV1 flow (#17632),
+// for example:
+// * TestChunkNotFoundHeadGCRace
+// * TestHeadSeriesChunkRace
+// * TestHeadLabelValuesWithMatchers
+// * TestHeadLabelNamesWithMatchers
+// * TestHeadShardedPostings
+
+// TestHeadAppenderV2_HighConcurrencyReadAndWrite generates 1000 series with a step of 15s and fills a whole block with samples,
+// this means in total it generates 4000 chunks because with a step of 15s there are 4 chunks per block per series.
+// While appending the samples to the head it concurrently queries them from multiple go routines and verifies that the
+// returned results are correct.
+func TestHeadAppenderV2_HighConcurrencyReadAndWrite(t *testing.T) {
+ head, _ := newTestHead(t, DefaultBlockDuration, compression.None, false)
+ defer func() {
+ require.NoError(t, head.Close())
+ }()
+
+ seriesCnt := 1000
+ readConcurrency := 2
+ writeConcurrency := 10
+ startTs := uint64(DefaultBlockDuration) // start at the second block relative to the unix epoch.
+ qryRange := uint64(5 * time.Minute.Milliseconds())
+ step := uint64(15 * time.Second / time.Millisecond)
+ endTs := startTs + uint64(DefaultBlockDuration)
+
+ labelSets := make([]labels.Labels, seriesCnt)
+ for i := range seriesCnt {
+ labelSets[i] = labels.FromStrings("seriesId", strconv.Itoa(i))
+ }
+
+ head.Init(0)
+
+ g, ctx := errgroup.WithContext(context.Background())
+ whileNotCanceled := func(f func() (bool, error)) error {
+ for ctx.Err() == nil {
+ cont, err := f()
+ if err != nil {
+ return err
+ }
+ if !cont {
+ return nil
+ }
+ }
+ return nil
+ }
+
+ // Create one channel for each write worker, the channels will be used by the coordinator
+ // go routine to coordinate which timestamps each write worker has to write.
+ writerTsCh := make([]chan uint64, writeConcurrency)
+ for writerTsChIdx := range writerTsCh {
+ writerTsCh[writerTsChIdx] = make(chan uint64)
+ }
+
+ // workerReadyWg is used to synchronize the start of the test,
+ // we only start the test once all workers signal that they're ready.
+ var workerReadyWg sync.WaitGroup
+ workerReadyWg.Add(writeConcurrency + readConcurrency)
+
+ // Start the write workers.
+ for wid := range writeConcurrency {
+ // Create copy of workerID to be used by worker routine.
+ workerID := wid
+
+ g.Go(func() error {
+ // The label sets which this worker will write.
+ workerLabelSets := labelSets[(seriesCnt/writeConcurrency)*workerID : (seriesCnt/writeConcurrency)*(workerID+1)]
+
+ // Signal that this worker is ready.
+ workerReadyWg.Done()
+
+ return whileNotCanceled(func() (bool, error) {
+ ts, ok := <-writerTsCh[workerID]
+ if !ok {
+ return false, nil
+ }
+
+ app := head.AppenderV2(ctx)
+ for i := range workerLabelSets {
+ // We also use the timestamp as the sample value.
+ _, err := app.Append(0, workerLabelSets[i], 0, int64(ts), float64(ts), nil, nil, storage.AOptions{})
+ if err != nil {
+ return false, fmt.Errorf("Error when appending to head: %w", err)
+ }
+ }
+
+ return true, app.Commit()
+ })
+ })
+ }
+
+ // queryHead is a helper to query the head for a given time range and labelset.
+ queryHead := func(mint, maxt uint64, label labels.Label) (map[string][]chunks.Sample, error) {
+ q, err := NewBlockQuerier(head, int64(mint), int64(maxt))
+ if err != nil {
+ return nil, err
+ }
+ return query(t, q, labels.MustNewMatcher(labels.MatchEqual, label.Name, label.Value)), nil
+ }
+
+ // readerTsCh will be used by the coordinator go routine to coordinate which timestamps the reader should read.
+ readerTsCh := make(chan uint64)
+
+ // Start the read workers.
+ for wid := range readConcurrency {
+ // Create copy of threadID to be used by worker routine.
+ workerID := wid
+
+ g.Go(func() error {
+ querySeriesRef := (seriesCnt / readConcurrency) * workerID
+
+ // Signal that this worker is ready.
+ workerReadyWg.Done()
+
+ return whileNotCanceled(func() (bool, error) {
+ ts, ok := <-readerTsCh
+ if !ok {
+ return false, nil
+ }
+
+ querySeriesRef = (querySeriesRef + 1) % seriesCnt
+ lbls := labelSets[querySeriesRef]
+ // lbls has a single entry; extract it so we can run a query.
+ var lbl labels.Label
+ lbls.Range(func(l labels.Label) {
+ lbl = l
+ })
+ samples, err := queryHead(ts-qryRange, ts, lbl)
+ if err != nil {
+ return false, err
+ }
+
+ if len(samples) != 1 {
+ return false, fmt.Errorf("expected 1 series, got %d", len(samples))
+ }
+
+ series := lbls.String()
+ expectSampleCnt := qryRange/step + 1
+ if expectSampleCnt != uint64(len(samples[series])) {
+ return false, fmt.Errorf("expected %d samples, got %d", expectSampleCnt, len(samples[series]))
+ }
+
+ for sampleIdx, sample := range samples[series] {
+ expectedValue := ts - qryRange + (uint64(sampleIdx) * step)
+ if sample.T() != int64(expectedValue) {
+ return false, fmt.Errorf("expected sample %d to have ts %d, got %d", sampleIdx, expectedValue, sample.T())
+ }
+ if sample.F() != float64(expectedValue) {
+ return false, fmt.Errorf("expected sample %d to have value %d, got %f", sampleIdx, expectedValue, sample.F())
+ }
+ }
+
+ return true, nil
+ })
+ })
+ }
+
+ // Start the coordinator go routine.
+ g.Go(func() error {
+ currTs := startTs
+
+ defer func() {
+ // End of the test, close all channels to stop the workers.
+ for _, ch := range writerTsCh {
+ close(ch)
+ }
+ close(readerTsCh)
+ }()
+
+ // Wait until all workers are ready to start the test.
+ workerReadyWg.Wait()
+ return whileNotCanceled(func() (bool, error) {
+ // Send the current timestamp to each of the writers.
+ for _, ch := range writerTsCh {
+ select {
+ case ch <- currTs:
+ case <-ctx.Done():
+ return false, nil
+ }
+ }
+
+ // Once data for at least has been ingested, send the current timestamp to the readers.
+ if currTs > startTs+qryRange {
+ select {
+ case readerTsCh <- currTs - step:
+ case <-ctx.Done():
+ return false, nil
+ }
+ }
+
+ currTs += step
+ if currTs > endTs {
+ return false, nil
+ }
+
+ return true, nil
+ })
+ })
+
+ require.NoError(t, g.Wait())
+}
+
+func TestHeadAppenderV2_WALMultiRef(t *testing.T) {
+ head, w := newTestHead(t, 1000, compression.None, false)
+
+ require.NoError(t, head.Init(0))
+
+ app := head.AppenderV2(context.Background())
+ ref1, err := app.Append(0, labels.FromStrings("foo", "bar"), 0, 100, 1, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ require.Equal(t, 1.0, prom_testutil.ToFloat64(head.metrics.chunksCreated))
+
+ // Add another sample outside chunk range to mmap a chunk.
+ app = head.AppenderV2(context.Background())
+ _, err = app.Append(0, labels.FromStrings("foo", "bar"), 0, 1500, 2, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ require.Equal(t, 2.0, prom_testutil.ToFloat64(head.metrics.chunksCreated))
+
+ require.NoError(t, head.Truncate(1600))
+
+ app = head.AppenderV2(context.Background())
+ ref2, err := app.Append(0, labels.FromStrings("foo", "bar"), 0, 1700, 3, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ require.Equal(t, 3.0, prom_testutil.ToFloat64(head.metrics.chunksCreated))
+
+ // Add another sample outside chunk range to mmap a chunk.
+ app = head.AppenderV2(context.Background())
+ _, err = app.Append(0, labels.FromStrings("foo", "bar"), 0, 2000, 4, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ require.Equal(t, 4.0, prom_testutil.ToFloat64(head.metrics.chunksCreated))
+
+ require.NotEqual(t, ref1, ref2, "Refs are the same")
+ require.NoError(t, head.Close())
+
+ w, err = wlog.New(nil, nil, w.Dir(), compression.None)
+ require.NoError(t, err)
+
+ opts := DefaultHeadOptions()
+ opts.ChunkRange = 1000
+ opts.ChunkDirRoot = head.opts.ChunkDirRoot
+ head, err = NewHead(nil, nil, w, nil, opts, nil)
+ require.NoError(t, err)
+ require.NoError(t, head.Init(0))
+ defer func() {
+ require.NoError(t, head.Close())
+ }()
+
+ q, err := NewBlockQuerier(head, 0, 2100)
+ require.NoError(t, err)
+ series := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
+ // The samples before the new ref should be discarded since Head truncation
+ // happens only after compacting the Head.
+ require.Equal(t, map[string][]chunks.Sample{`{foo="bar"}`: {
+ sample{1700, 3, nil, nil},
+ sample{2000, 4, nil, nil},
+ }}, series)
+}
+
+func TestHeadAppenderV2_ActiveAppenders(t *testing.T) {
+ head, _ := newTestHead(t, 1000, compression.None, false)
+ defer head.Close()
+
+ require.NoError(t, head.Init(0))
+
+ // First rollback with no samples.
+ app := head.AppenderV2(context.Background())
+ require.Equal(t, 1.0, prom_testutil.ToFloat64(head.metrics.activeAppenders))
+ require.NoError(t, app.Rollback())
+ require.Equal(t, 0.0, prom_testutil.ToFloat64(head.metrics.activeAppenders))
+
+ // Then commit with no samples.
+ app = head.AppenderV2(context.Background())
+ require.NoError(t, app.Commit())
+ require.Equal(t, 0.0, prom_testutil.ToFloat64(head.metrics.activeAppenders))
+
+ // Now rollback with one sample.
+ app = head.AppenderV2(context.Background())
+ _, err := app.Append(0, labels.FromStrings("foo", "bar"), 0, 100, 1, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.Equal(t, 1.0, prom_testutil.ToFloat64(head.metrics.activeAppenders))
+ require.NoError(t, app.Rollback())
+ require.Equal(t, 0.0, prom_testutil.ToFloat64(head.metrics.activeAppenders))
+
+ // Now commit with one sample.
+ app = head.AppenderV2(context.Background())
+ _, err = app.Append(0, labels.FromStrings("foo", "bar"), 0, 100, 1, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ require.Equal(t, 0.0, prom_testutil.ToFloat64(head.metrics.activeAppenders))
+}
+
+func TestHeadAppenderV2_RaceBetweenSeriesCreationAndGC(t *testing.T) {
+ head, _ := newTestHead(t, 1000, compression.None, false)
+ t.Cleanup(func() { _ = head.Close() })
+ require.NoError(t, head.Init(0))
+
+ const totalSeries = 100_000
+ series := make([]labels.Labels, totalSeries)
+ for i := range totalSeries {
+ series[i] = labels.FromStrings("foo", strconv.Itoa(i))
+ }
+ done := atomic.NewBool(false)
+
+ go func() {
+ defer done.Store(true)
+ app := head.AppenderV2(context.Background())
+ defer func() {
+ if err := app.Commit(); err != nil {
+ t.Errorf("Failed to commit: %v", err)
+ }
+ }()
+ for i := range totalSeries {
+ _, err := app.Append(0, series[i], 0, 100, 1, nil, nil, storage.AOptions{})
+ if err != nil {
+ t.Errorf("Failed to append: %v", err)
+ return
+ }
+ }
+ }()
+
+ // Don't check the atomic.Bool on all iterations in order to perform more gc iterations and make the race condition more likely.
+ for i := 1; i%128 != 0 || !done.Load(); i++ {
+ head.gc()
+ }
+
+ require.Equal(t, totalSeries, int(head.NumSeries()))
+}
+
+func TestHeadAppenderV2_CanGCSeriesCreatedWithoutSamples(t *testing.T) {
+ for op, finishTxn := range map[string]func(app storage.AppenderTransaction) error{
+ "after commit": func(app storage.AppenderTransaction) error { return app.Commit() },
+ "after rollback": func(app storage.AppenderTransaction) error { return app.Rollback() },
+ } {
+ t.Run(op, func(t *testing.T) {
+ chunkRange := time.Hour.Milliseconds()
+ head, _ := newTestHead(t, chunkRange, compression.None, true)
+ t.Cleanup(func() { _ = head.Close() })
+
+ require.NoError(t, head.Init(0))
+
+ firstSampleTime := 10 * chunkRange
+ {
+ // Append first sample, it should init head max time to firstSampleTime.
+ app := head.AppenderV2(context.Background())
+ _, err := app.Append(0, labels.FromStrings("lbl", "ok"), 0, firstSampleTime, 1, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ require.Equal(t, 1, int(head.NumSeries()))
+ }
+
+ // Append a sample in a time range that is not covered by the chunk range,
+ // We would create series first and then append no sample.
+ app := head.AppenderV2(context.Background())
+ invalidSampleTime := firstSampleTime - chunkRange
+ _, err := app.Append(0, labels.FromStrings("foo", "bar"), 0, invalidSampleTime, 2, nil, nil, storage.AOptions{})
+ require.Error(t, err)
+ // These are our assumptions: we're not testing them, we're just checking them to make debugging a failed
+ // test easier if someone refactors the code and breaks these assumptions.
+ // If these assumptions fail after a refactor, feel free to remove them but make sure that the test is still what we intended to test.
+ require.NotErrorIs(t, err, storage.ErrOutOfBounds, "Failed to append sample shouldn't take the shortcut that returns storage.ErrOutOfBounds")
+ require.ErrorIs(t, err, storage.ErrTooOldSample, "Failed to append sample should return storage.ErrTooOldSample, because OOO window was enabled but this sample doesn't fall into it.")
+ // Do commit or rollback, depending on what we're testing.
+ require.NoError(t, finishTxn(app))
+
+ // Garbage-collect, since we finished the transaction and series has no samples, it should be collectable.
+ head.gc()
+ require.Equal(t, 1, int(head.NumSeries()))
+ })
+ }
+}
+
+func TestHeadAppenderV2_DeleteSimple(t *testing.T) {
+ buildSmpls := func(s []int64) []sample {
+ ss := make([]sample, 0, len(s))
+ for _, t := range s {
+ ss = append(ss, sample{t: t, f: float64(t)})
+ }
+ return ss
+ }
+ smplsAll := buildSmpls([]int64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
+ lblDefault := labels.Label{Name: "a", Value: "b"}
+ lblsDefault := labels.FromStrings("a", "b")
+
+ cases := []struct {
+ dranges tombstones.Intervals
+ addSamples []sample // Samples to add after delete.
+ smplsExp []sample
+ }{
+ {
+ dranges: tombstones.Intervals{{Mint: 0, Maxt: 3}},
+ smplsExp: buildSmpls([]int64{4, 5, 6, 7, 8, 9}),
+ },
+ {
+ dranges: tombstones.Intervals{{Mint: 1, Maxt: 3}},
+ smplsExp: buildSmpls([]int64{0, 4, 5, 6, 7, 8, 9}),
+ },
+ {
+ dranges: tombstones.Intervals{{Mint: 1, Maxt: 3}, {Mint: 4, Maxt: 7}},
+ smplsExp: buildSmpls([]int64{0, 8, 9}),
+ },
+ {
+ dranges: tombstones.Intervals{{Mint: 1, Maxt: 3}, {Mint: 4, Maxt: 700}},
+ smplsExp: buildSmpls([]int64{0}),
+ },
+ { // This case is to ensure that labels and symbols are deleted.
+ dranges: tombstones.Intervals{{Mint: 0, Maxt: 9}},
+ smplsExp: buildSmpls([]int64{}),
+ },
+ {
+ dranges: tombstones.Intervals{{Mint: 1, Maxt: 3}},
+ addSamples: buildSmpls([]int64{11, 13, 15}),
+ smplsExp: buildSmpls([]int64{0, 4, 5, 6, 7, 8, 9, 11, 13, 15}),
+ },
+ {
+ // After delete, the appended samples in the deleted range should be visible
+ // as the tombstones are clamped to head min/max time.
+ dranges: tombstones.Intervals{{Mint: 7, Maxt: 20}},
+ addSamples: buildSmpls([]int64{11, 13, 15}),
+ smplsExp: buildSmpls([]int64{0, 1, 2, 3, 4, 5, 6, 11, 13, 15}),
+ },
+ }
+
+ for _, compress := range []compression.Type{compression.None, compression.Snappy, compression.Zstd} {
+ t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) {
+ for _, c := range cases {
+ head, w := newTestHead(t, 1000, compress, false)
+ require.NoError(t, head.Init(0))
+
+ app := head.AppenderV2(context.Background())
+ for _, smpl := range smplsAll {
+ _, err := app.Append(0, lblsDefault, 0, smpl.t, smpl.f, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+
+ // Delete the ranges.
+ for _, r := range c.dranges {
+ require.NoError(t, head.Delete(context.Background(), r.Mint, r.Maxt, labels.MustNewMatcher(labels.MatchEqual, lblDefault.Name, lblDefault.Value)))
+ }
+
+ // Add more samples.
+ app = head.AppenderV2(context.Background())
+ for _, smpl := range c.addSamples {
+ _, err := app.Append(0, lblsDefault, 0, smpl.t, smpl.f, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+
+ // Compare the samples for both heads - before and after the reloadBlocks.
+ reloadedW, err := wlog.New(nil, nil, w.Dir(), compress) // Use a new wal to ensure deleted samples are gone even after a reloadBlocks.
+ require.NoError(t, err)
+ opts := DefaultHeadOptions()
+ opts.ChunkRange = 1000
+ opts.ChunkDirRoot = reloadedW.Dir()
+ reloadedHead, err := NewHead(nil, nil, reloadedW, nil, opts, nil)
+ require.NoError(t, err)
+ require.NoError(t, reloadedHead.Init(0))
+
+ // Compare the query results for both heads - before and after the reloadBlocks.
+ Outer:
+ for _, h := range []*Head{head, reloadedHead} {
+ q, err := NewBlockQuerier(h, h.MinTime(), h.MaxTime())
+ require.NoError(t, err)
+ actSeriesSet := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, lblDefault.Name, lblDefault.Value))
+ require.NoError(t, q.Close())
+ expSeriesSet := newMockSeriesSet([]storage.Series{
+ storage.NewListSeries(lblsDefault, func() []chunks.Sample {
+ ss := make([]chunks.Sample, 0, len(c.smplsExp))
+ for _, s := range c.smplsExp {
+ ss = append(ss, s)
+ }
+ return ss
+ }(),
+ ),
+ })
+
+ for {
+ eok, rok := expSeriesSet.Next(), actSeriesSet.Next()
+ require.Equal(t, eok, rok)
+
+ if !eok {
+ require.NoError(t, h.Close())
+ require.NoError(t, actSeriesSet.Err())
+ require.Empty(t, actSeriesSet.Warnings())
+ continue Outer
+ }
+ expSeries := expSeriesSet.At()
+ actSeries := actSeriesSet.At()
+
+ require.Equal(t, expSeries.Labels(), actSeries.Labels())
+
+ smplExp, errExp := storage.ExpandSamples(expSeries.Iterator(nil), nil)
+ smplRes, errRes := storage.ExpandSamples(actSeries.Iterator(nil), nil)
+
+ require.Equal(t, errExp, errRes)
+ require.Equal(t, smplExp, smplRes)
+ }
+ }
+ }
+ })
+ }
+}
+
+func TestHeadAppenderV2_DeleteUntilCurrMax(t *testing.T) {
+ hb, _ := newTestHead(t, 1000000, compression.None, false)
+ defer func() {
+ require.NoError(t, hb.Close())
+ }()
+
+ numSamples := int64(10)
+ app := hb.AppenderV2(context.Background())
+ smpls := make([]float64, numSamples)
+ for i := range numSamples {
+ smpls[i] = rand.Float64()
+ _, err := app.Append(0, labels.FromStrings("a", "b"), 0, i, smpls[i], nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+ require.NoError(t, hb.Delete(context.Background(), 0, 10000, labels.MustNewMatcher(labels.MatchEqual, "a", "b")))
+
+ // Test the series returns no samples. The series is cleared only after compaction.
+ q, err := NewBlockQuerier(hb, 0, 100000)
+ require.NoError(t, err)
+ res := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
+ require.True(t, res.Next(), "series is not present")
+ s := res.At()
+ it := s.Iterator(nil)
+ require.Equal(t, chunkenc.ValNone, it.Next(), "expected no samples")
+ for res.Next() {
+ }
+ require.NoError(t, res.Err())
+ require.Empty(t, res.Warnings())
+
+ // Add again and test for presence.
+ app = hb.AppenderV2(context.Background())
+ _, err = app.Append(0, labels.FromStrings("a", "b"), 0, 11, 1, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ q, err = NewBlockQuerier(hb, 0, 100000)
+ require.NoError(t, err)
+ res = q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
+ require.True(t, res.Next(), "series don't exist")
+ exps := res.At()
+ it = exps.Iterator(nil)
+ resSamples, err := storage.ExpandSamples(it, newSample)
+ require.NoError(t, err)
+ require.Equal(t, []chunks.Sample{sample{11, 1, nil, nil}}, resSamples)
+ for res.Next() {
+ }
+ require.NoError(t, res.Err())
+ require.Empty(t, res.Warnings())
+}
+
+func TestHeadAppenderV2_DeleteSamplesAndSeriesStillInWALAfterCheckpoint(t *testing.T) {
+ numSamples := 10000
+
+ // Enough samples to cause a checkpoint.
+ hb, w := newTestHead(t, int64(numSamples)*10, compression.None, false)
+
+ for i := range numSamples {
+ app := hb.AppenderV2(context.Background())
+ _, err := app.Append(0, labels.FromStrings("a", "b"), 0, int64(i), 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ }
+ require.NoError(t, hb.Delete(context.Background(), 0, int64(numSamples), labels.MustNewMatcher(labels.MatchEqual, "a", "b")))
+ require.NoError(t, hb.Truncate(1))
+ require.NoError(t, hb.Close())
+
+ // Confirm there's been a checkpoint.
+ cdir, _, err := wlog.LastCheckpoint(w.Dir())
+ require.NoError(t, err)
+ // Read in checkpoint and WAL.
+ recs := readTestWAL(t, cdir)
+ recs = append(recs, readTestWAL(t, w.Dir())...)
+
+ var series, samples, stones, metadata int
+ for _, rec := range recs {
+ switch rec.(type) {
+ case []record.RefSeries:
+ series++
+ case []record.RefSample:
+ samples++
+ case []tombstones.Stone:
+ stones++
+ case []record.RefMetadata:
+ metadata++
+ default:
+ require.Fail(t, "unknown record type")
+ }
+ }
+ require.Equal(t, 1, series)
+ require.Equal(t, 9999, samples)
+ require.Equal(t, 1, stones)
+ require.Equal(t, 0, metadata)
+}
+
+func TestHeadAppenderV2_Delete_e2e(t *testing.T) {
+ numDatapoints := 1000
+ numRanges := 1000
+ timeInterval := int64(2)
+ // Create 8 series with 1000 data-points of different ranges, delete and run queries.
+ lbls := [][]labels.Label{
+ {
+ {Name: "a", Value: "b"},
+ {Name: "instance", Value: "localhost:9090"},
+ {Name: "job", Value: "prometheus"},
+ },
+ {
+ {Name: "a", Value: "b"},
+ {Name: "instance", Value: "127.0.0.1:9090"},
+ {Name: "job", Value: "prometheus"},
+ },
+ {
+ {Name: "a", Value: "b"},
+ {Name: "instance", Value: "127.0.0.1:9090"},
+ {Name: "job", Value: "prom-k8s"},
+ },
+ {
+ {Name: "a", Value: "b"},
+ {Name: "instance", Value: "localhost:9090"},
+ {Name: "job", Value: "prom-k8s"},
+ },
+ {
+ {Name: "a", Value: "c"},
+ {Name: "instance", Value: "localhost:9090"},
+ {Name: "job", Value: "prometheus"},
+ },
+ {
+ {Name: "a", Value: "c"},
+ {Name: "instance", Value: "127.0.0.1:9090"},
+ {Name: "job", Value: "prometheus"},
+ },
+ {
+ {Name: "a", Value: "c"},
+ {Name: "instance", Value: "127.0.0.1:9090"},
+ {Name: "job", Value: "prom-k8s"},
+ },
+ {
+ {Name: "a", Value: "c"},
+ {Name: "instance", Value: "localhost:9090"},
+ {Name: "job", Value: "prom-k8s"},
+ },
+ }
+ seriesMap := map[string][]chunks.Sample{}
+ for _, l := range lbls {
+ seriesMap[labels.New(l...).String()] = []chunks.Sample{}
+ }
+
+ hb, _ := newTestHead(t, 100000, compression.None, false)
+ defer func() {
+ require.NoError(t, hb.Close())
+ }()
+
+ app := hb.AppenderV2(context.Background())
+ for _, l := range lbls {
+ ls := labels.New(l...)
+ series := []chunks.Sample{}
+ ts := rand.Int63n(300)
+ for range numDatapoints {
+ v := rand.Float64()
+ _, err := app.Append(0, ls, 0, ts, v, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ series = append(series, sample{ts, v, nil, nil})
+ ts += rand.Int63n(timeInterval) + 1
+ }
+ seriesMap[labels.New(l...).String()] = series
+ }
+ require.NoError(t, app.Commit())
+ // Delete a time-range from each-selector.
+ dels := []struct {
+ ms []*labels.Matcher
+ drange tombstones.Intervals
+ }{
+ {
+ ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "b")},
+ drange: tombstones.Intervals{{Mint: 300, Maxt: 500}, {Mint: 600, Maxt: 670}},
+ },
+ {
+ ms: []*labels.Matcher{
+ labels.MustNewMatcher(labels.MatchEqual, "a", "b"),
+ labels.MustNewMatcher(labels.MatchEqual, "job", "prom-k8s"),
+ },
+ drange: tombstones.Intervals{{Mint: 300, Maxt: 500}, {Mint: 100, Maxt: 670}},
+ },
+ {
+ ms: []*labels.Matcher{
+ labels.MustNewMatcher(labels.MatchEqual, "a", "c"),
+ labels.MustNewMatcher(labels.MatchEqual, "instance", "localhost:9090"),
+ labels.MustNewMatcher(labels.MatchEqual, "job", "prometheus"),
+ },
+ drange: tombstones.Intervals{{Mint: 300, Maxt: 400}, {Mint: 100, Maxt: 6700}},
+ },
+ // TODO: Add Regexp Matchers.
+ }
+ for _, del := range dels {
+ for _, r := range del.drange {
+ require.NoError(t, hb.Delete(context.Background(), r.Mint, r.Maxt, del.ms...))
+ }
+ matched := labels.Slice{}
+ for _, l := range lbls {
+ s := labels.Selector(del.ms)
+ ls := labels.New(l...)
+ if s.Matches(ls) {
+ matched = append(matched, ls)
+ }
+ }
+ sort.Sort(matched)
+ for range numRanges {
+ q, err := NewBlockQuerier(hb, 0, 100000)
+ require.NoError(t, err)
+ ss := q.Select(context.Background(), true, nil, del.ms...)
+ // Build the mockSeriesSet.
+ matchedSeries := make([]storage.Series, 0, len(matched))
+ for _, m := range matched {
+ smpls := seriesMap[m.String()]
+ smpls = deletedSamples(smpls, del.drange)
+ // Only append those series for which samples exist as mockSeriesSet
+ // doesn't skip series with no samples.
+ // TODO: But sometimes SeriesSet returns an empty chunkenc.Iterator
+ if len(smpls) > 0 {
+ matchedSeries = append(matchedSeries, storage.NewListSeries(m, smpls))
+ }
+ }
+ expSs := newMockSeriesSet(matchedSeries)
+ // Compare both SeriesSets.
+ for {
+ eok, rok := expSs.Next(), ss.Next()
+ // Skip a series if iterator is empty.
+ if rok {
+ for ss.At().Iterator(nil).Next() == chunkenc.ValNone {
+ rok = ss.Next()
+ if !rok {
+ break
+ }
+ }
+ }
+ require.Equal(t, eok, rok)
+ if !eok {
+ break
+ }
+ sexp := expSs.At()
+ sres := ss.At()
+ require.Equal(t, sexp.Labels(), sres.Labels())
+ smplExp, errExp := storage.ExpandSamples(sexp.Iterator(nil), nil)
+ smplRes, errRes := storage.ExpandSamples(sres.Iterator(nil), nil)
+ require.Equal(t, errExp, errRes)
+ require.Equal(t, smplExp, smplRes)
+ }
+ require.NoError(t, ss.Err())
+ require.Empty(t, ss.Warnings())
+ require.NoError(t, q.Close())
+ }
+ }
+}
+
+func TestHeadAppenderV2_UncommittedSamplesNotLostOnTruncate(t *testing.T) {
+ h, _ := newTestHead(t, 1000, compression.None, false)
+ defer func() {
+ require.NoError(t, h.Close())
+ }()
+
+ h.initTime(0)
+
+ app := h.appenderV2()
+ lset := labels.FromStrings("a", "1")
+ _, err := app.Append(0, lset, 0, 2100, 1, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+
+ require.NoError(t, h.Truncate(2000))
+ require.NotNil(t, h.series.getByHash(lset.Hash(), lset), "series should not have been garbage collected")
+
+ require.NoError(t, app.Commit())
+
+ q, err := NewBlockQuerier(h, 1500, 2500)
+ require.NoError(t, err)
+ defer q.Close()
+
+ ss := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "1"))
+ require.True(t, ss.Next())
+ for ss.Next() {
+ }
+ require.NoError(t, ss.Err())
+ require.Empty(t, ss.Warnings())
+}
+
+func TestHeadAppenderV2_TestRemoveSeriesAfterRollbackAndTruncate(t *testing.T) {
+ h, _ := newTestHead(t, 1000, compression.None, false)
+ defer func() {
+ require.NoError(t, h.Close())
+ }()
+
+ h.initTime(0)
+
+ app := h.appenderV2()
+ lset := labels.FromStrings("a", "1")
+ _, err := app.Append(0, lset, 0, 2100, 1, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+
+ require.NoError(t, h.Truncate(2000))
+ require.NotNil(t, h.series.getByHash(lset.Hash(), lset), "series should not have been garbage collected")
+
+ require.NoError(t, app.Rollback())
+
+ q, err := NewBlockQuerier(h, 1500, 2500)
+ require.NoError(t, err)
+
+ ss := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "1"))
+ require.False(t, ss.Next())
+ require.Empty(t, ss.Warnings())
+ require.NoError(t, q.Close())
+
+ // Truncate again, this time the series should be deleted
+ require.NoError(t, h.Truncate(2050))
+ require.Equal(t, (*memSeries)(nil), h.series.getByHash(lset.Hash(), lset))
+}
+
+func TestHeadAppenderV2_LogRollback(t *testing.T) {
+ for _, compress := range []compression.Type{compression.None, compression.Snappy, compression.Zstd} {
+ t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) {
+ h, w := newTestHead(t, 1000, compress, false)
+ defer func() {
+ require.NoError(t, h.Close())
+ }()
+
+ app := h.AppenderV2(context.Background())
+ _, err := app.Append(0, labels.FromStrings("a", "b"), 0, 1, 2, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+
+ require.NoError(t, app.Rollback())
+ recs := readTestWAL(t, w.Dir())
+
+ require.Len(t, recs, 1)
+
+ series, ok := recs[0].([]record.RefSeries)
+ require.True(t, ok, "expected series record but got %+v", recs[0])
+ require.Equal(t, []record.RefSeries{{Ref: 1, Labels: labels.FromStrings("a", "b")}}, series)
+ })
+ }
+}
+
+func TestHeadAppenderV2_ReturnsSortedLabelValues(t *testing.T) {
+ h, _ := newTestHead(t, 1000, compression.None, false)
+ defer func() {
+ require.NoError(t, h.Close())
+ }()
+
+ h.initTime(0)
+
+ app := h.appenderV2()
+ for i := 100; i > 0; i-- {
+ for j := range 10 {
+ lset := labels.FromStrings(
+ "__name__", fmt.Sprintf("metric_%d", i),
+ "label", fmt.Sprintf("value_%d", j),
+ )
+ _, err := app.Append(0, lset, 0, 2100, 1, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+ }
+
+ q, err := NewBlockQuerier(h, 1500, 2500)
+ require.NoError(t, err)
+
+ res, _, err := q.LabelValues(context.Background(), "__name__", nil)
+ require.NoError(t, err)
+
+ require.True(t, slices.IsSorted(res))
+ require.NoError(t, q.Close())
+}
+
+func TestHeadAppenderV2_NewWalSegmentOnTruncate(t *testing.T) {
+ h, wal := newTestHead(t, 1000, compression.None, false)
+ defer func() {
+ require.NoError(t, h.Close())
+ }()
+ add := func(ts int64) {
+ app := h.AppenderV2(context.Background())
+ _, err := app.Append(0, labels.FromStrings("a", "b"), 0, ts, 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ }
+
+ add(0)
+ _, last, err := wlog.Segments(wal.Dir())
+ require.NoError(t, err)
+ require.Equal(t, 0, last)
+
+ add(1)
+ require.NoError(t, h.Truncate(1))
+ _, last, err = wlog.Segments(wal.Dir())
+ require.NoError(t, err)
+ require.Equal(t, 1, last)
+
+ add(2)
+ require.NoError(t, h.Truncate(2))
+ _, last, err = wlog.Segments(wal.Dir())
+ require.NoError(t, err)
+ require.Equal(t, 2, last)
+}
+
+func TestHeadAppenderV2_Append_DuplicateLabelName(t *testing.T) {
+ h, _ := newTestHead(t, 1000, compression.None, false)
+ defer func() {
+ require.NoError(t, h.Close())
+ }()
+
+ add := func(labels labels.Labels, labelName string) {
+ app := h.AppenderV2(context.Background())
+ _, err := app.Append(0, labels, 0, 0, 0, nil, nil, storage.AOptions{})
+ require.EqualError(t, err, fmt.Sprintf(`label name "%s" is not unique: invalid sample`, labelName))
+ }
+
+ add(labels.FromStrings("a", "c", "a", "b"), "a")
+ add(labels.FromStrings("a", "c", "a", "c"), "a")
+ add(labels.FromStrings("__name__", "up", "job", "prometheus", "le", "500", "le", "400", "unit", "s"), "le")
+}
+
+func TestHeadAppenderV2_MemSeriesIsolation(t *testing.T) {
+ if defaultIsolationDisabled {
+ t.Skip("skipping test since tsdb isolation is disabled")
+ }
+
+ // Put a series, select it. GC it and then access it.
+ lastValue := func(h *Head, maxAppendID uint64) int {
+ idx, err := h.Index()
+
+ require.NoError(t, err)
+
+ iso := h.iso.State(math.MinInt64, math.MaxInt64)
+ iso.maxAppendID = maxAppendID
+
+ chunks, err := h.chunksRange(math.MinInt64, math.MaxInt64, iso)
+ require.NoError(t, err)
+ // Hm.. here direct block chunk querier might be required?
+ querier := blockQuerier{
+ blockBaseQuerier: &blockBaseQuerier{
+ index: idx,
+ chunks: chunks,
+ tombstones: tombstones.NewMemTombstones(),
+
+ mint: 0,
+ maxt: 10000,
+ },
+ }
+
+ require.NoError(t, err)
+ defer querier.Close()
+
+ ss := querier.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
+ _, seriesSet, ws, err := expandSeriesSet(ss)
+ require.NoError(t, err)
+ require.Empty(t, ws)
+
+ for _, series := range seriesSet {
+ return int(series[len(series)-1].f)
+ }
+ return -1
+ }
+
+ addSamples := func(h *Head) int {
+ i := 1
+ for ; i <= 1000; i++ {
+ var app storage.AppenderV2
+ // To initialize bounds.
+ if h.MinTime() == math.MaxInt64 {
+ app = &initAppenderV2{head: h}
+ } else {
+ a := h.appenderV2()
+ a.cleanupAppendIDsBelow = 0
+ app = a
+ }
+
+ _, err := app.Append(0, labels.FromStrings("foo", "bar"), 0, int64(i), float64(i), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ h.mmapHeadChunks()
+ }
+ return i
+ }
+
+ testIsolation := func(*Head, int) {
+ }
+
+ // Test isolation without restart of Head.
+ hb, _ := newTestHead(t, 1000, compression.None, false)
+ i := addSamples(hb)
+ testIsolation(hb, i)
+
+ // Test simple cases in different chunks when no appendID cleanup has been performed.
+ require.Equal(t, 10, lastValue(hb, 10))
+ require.Equal(t, 130, lastValue(hb, 130))
+ require.Equal(t, 160, lastValue(hb, 160))
+ require.Equal(t, 240, lastValue(hb, 240))
+ require.Equal(t, 500, lastValue(hb, 500))
+ require.Equal(t, 750, lastValue(hb, 750))
+ require.Equal(t, 995, lastValue(hb, 995))
+ require.Equal(t, 999, lastValue(hb, 999))
+
+ // Cleanup appendIDs below 500.
+ app := hb.appenderV2()
+ app.cleanupAppendIDsBelow = 500
+ _, err := app.Append(0, labels.FromStrings("foo", "bar"), 0, int64(i), float64(i), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ i++
+
+ // We should not get queries with a maxAppendID below 500 after the cleanup,
+ // but they only take the remaining appendIDs into account.
+ require.Equal(t, 499, lastValue(hb, 10))
+ require.Equal(t, 499, lastValue(hb, 130))
+ require.Equal(t, 499, lastValue(hb, 160))
+ require.Equal(t, 499, lastValue(hb, 240))
+ require.Equal(t, 500, lastValue(hb, 500))
+ require.Equal(t, 995, lastValue(hb, 995))
+ require.Equal(t, 999, lastValue(hb, 999))
+
+ // Cleanup appendIDs below 1000, which means the sample buffer is
+ // the only thing with appendIDs.
+ app = hb.appenderV2()
+ app.cleanupAppendIDsBelow = 1000
+ _, err = app.Append(0, labels.FromStrings("foo", "bar"), 0, int64(i), float64(i), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ require.Equal(t, 999, lastValue(hb, 998))
+ require.Equal(t, 999, lastValue(hb, 999))
+ require.Equal(t, 1000, lastValue(hb, 1000))
+ require.Equal(t, 1001, lastValue(hb, 1001))
+ require.Equal(t, 1002, lastValue(hb, 1002))
+ require.Equal(t, 1002, lastValue(hb, 1003))
+
+ i++
+ // Cleanup appendIDs below 1001, but with a rollback.
+ app = hb.appenderV2()
+ app.cleanupAppendIDsBelow = 1001
+ _, err = app.Append(0, labels.FromStrings("foo", "bar"), 0, int64(i), float64(i), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Rollback())
+ require.Equal(t, 1000, lastValue(hb, 999))
+ require.Equal(t, 1000, lastValue(hb, 1000))
+ require.Equal(t, 1001, lastValue(hb, 1001))
+ require.Equal(t, 1002, lastValue(hb, 1002))
+ require.Equal(t, 1002, lastValue(hb, 1003))
+
+ require.NoError(t, hb.Close())
+
+ // Test isolation with restart of Head. This is to verify the num samples of chunks after m-map chunk replay.
+ hb, w := newTestHead(t, 1000, compression.None, false)
+ i = addSamples(hb)
+ require.NoError(t, hb.Close())
+
+ wal, err := wlog.NewSize(nil, nil, w.Dir(), 32768, compression.None)
+ require.NoError(t, err)
+ opts := DefaultHeadOptions()
+ opts.ChunkRange = 1000
+ opts.ChunkDirRoot = wal.Dir()
+ hb, err = NewHead(nil, nil, wal, nil, opts, nil)
+ defer func() { require.NoError(t, hb.Close()) }()
+ require.NoError(t, err)
+ require.NoError(t, hb.Init(0))
+
+ // No appends after restarting. Hence all should return the last value.
+ require.Equal(t, 1000, lastValue(hb, 10))
+ require.Equal(t, 1000, lastValue(hb, 130))
+ require.Equal(t, 1000, lastValue(hb, 160))
+ require.Equal(t, 1000, lastValue(hb, 240))
+ require.Equal(t, 1000, lastValue(hb, 500))
+
+ // Cleanup appendIDs below 1000, which means the sample buffer is
+ // the only thing with appendIDs.
+ app = hb.appenderV2()
+ _, err = app.Append(0, labels.FromStrings("foo", "bar"), 0, int64(i), float64(i), nil, nil, storage.AOptions{})
+ i++
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ require.Equal(t, 1001, lastValue(hb, 998))
+ require.Equal(t, 1001, lastValue(hb, 999))
+ require.Equal(t, 1001, lastValue(hb, 1000))
+ require.Equal(t, 1001, lastValue(hb, 1001))
+ require.Equal(t, 1001, lastValue(hb, 1002))
+ require.Equal(t, 1001, lastValue(hb, 1003))
+
+ // Cleanup appendIDs below 1002, but with a rollback.
+ app = hb.appenderV2()
+ _, err = app.Append(0, labels.FromStrings("foo", "bar"), 0, int64(i), float64(i), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Rollback())
+ require.Equal(t, 1001, lastValue(hb, 999))
+ require.Equal(t, 1001, lastValue(hb, 1000))
+ require.Equal(t, 1001, lastValue(hb, 1001))
+ require.Equal(t, 1001, lastValue(hb, 1002))
+ require.Equal(t, 1001, lastValue(hb, 1003))
+}
+
+func TestHeadAppenderV2_IsolationRollback(t *testing.T) {
+ if defaultIsolationDisabled {
+ t.Skip("skipping test since tsdb isolation is disabled")
+ }
+
+ // Rollback after a failed append and test if the low watermark has progressed anyway.
+ hb, _ := newTestHead(t, 1000, compression.None, false)
+ defer func() {
+ require.NoError(t, hb.Close())
+ }()
+
+ app := hb.AppenderV2(context.Background())
+ _, err := app.Append(0, labels.FromStrings("foo", "bar"), 0, 0, 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ require.Equal(t, uint64(1), hb.iso.lowWatermark())
+
+ app = hb.AppenderV2(context.Background())
+ _, err = app.Append(0, labels.FromStrings("foo", "bar"), 0, 1, 1, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ _, err = app.Append(0, labels.FromStrings("foo", "bar", "foo", "baz"), 0, 2, 2, nil, nil, storage.AOptions{})
+ require.Error(t, err)
+ require.NoError(t, app.Rollback())
+ require.Equal(t, uint64(2), hb.iso.lowWatermark())
+
+ app = hb.AppenderV2(context.Background())
+ _, err = app.Append(0, labels.FromStrings("foo", "bar"), 0, 3, 3, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ require.Equal(t, uint64(3), hb.iso.lowWatermark(), "Low watermark should proceed to 3 even if append #2 was rolled back.")
+}
+
+func TestHeadAppenderV2_IsolationLowWatermarkMonotonous(t *testing.T) {
+ if defaultIsolationDisabled {
+ t.Skip("skipping test since tsdb isolation is disabled")
+ }
+
+ hb, _ := newTestHead(t, 1000, compression.None, false)
+ defer func() {
+ require.NoError(t, hb.Close())
+ }()
+
+ app1 := hb.AppenderV2(context.Background())
+ _, err := app1.Append(0, labels.FromStrings("foo", "bar"), 0, 0, 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app1.Commit())
+ require.Equal(t, uint64(1), hb.iso.lowWatermark(), "Low watermark should by 1 after 1st append.")
+
+ app1 = hb.AppenderV2(context.Background())
+ _, err = app1.Append(0, labels.FromStrings("foo", "bar"), 0, 1, 1, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.Equal(t, uint64(2), hb.iso.lowWatermark(), "Low watermark should be two, even if append is not committed yet.")
+
+ app2 := hb.AppenderV2(context.Background())
+ _, err = app2.Append(0, labels.FromStrings("foo", "baz"), 0, 1, 1, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app2.Commit())
+ require.Equal(t, uint64(2), hb.iso.lowWatermark(), "Low watermark should stay two because app1 is not committed yet.")
+
+ is := hb.iso.State(math.MinInt64, math.MaxInt64)
+ require.Equal(t, uint64(2), hb.iso.lowWatermark(), "After simulated read (iso state retrieved), low watermark should stay at 2.")
+
+ require.NoError(t, app1.Commit())
+ require.Equal(t, uint64(2), hb.iso.lowWatermark(), "Even after app1 is committed, low watermark should stay at 2 because read is still ongoing.")
+
+ is.Close()
+ require.Equal(t, uint64(3), hb.iso.lowWatermark(), "After read has finished (iso state closed), low watermark should jump to three.")
+}
+
+func TestHeadAppenderV2_IsolationWithoutAdd(t *testing.T) {
+ if defaultIsolationDisabled {
+ t.Skip("skipping test since tsdb isolation is disabled")
+ }
+
+ hb, _ := newTestHead(t, 1000, compression.None, false)
+ defer func() {
+ require.NoError(t, hb.Close())
+ }()
+
+ app := hb.AppenderV2(context.Background())
+ require.NoError(t, app.Commit())
+
+ app = hb.AppenderV2(context.Background())
+ _, err := app.Append(0, labels.FromStrings("foo", "baz"), 0, 1, 1, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+
+ require.Equal(t, hb.iso.lastAppendID(), hb.iso.lowWatermark(), "High watermark should be equal to the low watermark")
+}
+
+func TestHeadAppenderV2_Append_OutOfOrderSamplesMetric(t *testing.T) {
+ t.Parallel()
+ for name, scenario := range sampleTypeScenarios {
+ t.Run(name, func(t *testing.T) {
+ options := DefaultOptions()
+ testHeadAppenderV2OutOfOrderSamplesMetric(t, scenario, options, storage.ErrOutOfOrderSample)
+ })
+ }
+}
+
+func TestHeadAppenderV2_Append_OutOfOrderSamplesMetricNativeHistogramOOODisabled(t *testing.T) {
+ for name, scenario := range sampleTypeScenarios {
+ if scenario.sampleType != "histogram" {
+ continue
+ }
+ t.Run(name, func(t *testing.T) {
+ options := DefaultOptions()
+ options.OutOfOrderTimeWindow = 0
+ testHeadAppenderV2OutOfOrderSamplesMetric(t, scenario, options, storage.ErrOutOfOrderSample)
+ })
+ }
+}
+
+func testHeadAppenderV2OutOfOrderSamplesMetric(t *testing.T, scenario sampleTypeScenario, options *Options, expectOutOfOrderError error) {
+ dir := t.TempDir()
+ db, err := Open(dir, nil, nil, options, nil)
+ require.NoError(t, err)
+ defer func() {
+ require.NoError(t, db.Close())
+ }()
+ db.DisableCompactions()
+
+ appendSample := func(app storage.AppenderV2, ts int64) (storage.SeriesRef, error) {
+ // TODO(bwplotka): Migrate to V2 natively.
+ ref, _, err := scenario.appendFunc(storage.AppenderV2AsLimitedV1(app), labels.FromStrings("a", "b"), ts, 99)
+ return ref, err
+ }
+
+ ctx := context.Background()
+ app := db.AppenderV2(ctx)
+ for i := 1; i <= 5; i++ {
+ _, err = appendSample(app, int64(i))
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+
+ // Test out of order metric.
+ require.Equal(t, 0.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(scenario.sampleType)))
+ app = db.AppenderV2(ctx)
+ _, err = appendSample(app, 2)
+ require.Equal(t, expectOutOfOrderError, err)
+ require.Equal(t, 1.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(scenario.sampleType)))
+
+ _, err = appendSample(app, 3)
+ require.Equal(t, expectOutOfOrderError, err)
+ require.Equal(t, 2.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(scenario.sampleType)))
+
+ _, err = appendSample(app, 4)
+ require.Equal(t, expectOutOfOrderError, err)
+ require.Equal(t, 3.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(scenario.sampleType)))
+ require.NoError(t, app.Commit())
+
+ // Compact Head to test out of bound metric.
+ app = db.AppenderV2(ctx)
+ _, err = appendSample(app, DefaultBlockDuration*2)
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+
+ require.Equal(t, int64(math.MinInt64), db.head.minValidTime.Load())
+ require.NoError(t, db.Compact(ctx))
+ require.Positive(t, db.head.minValidTime.Load())
+ require.Equal(t, 0.0, prom_testutil.ToFloat64(db.head.metrics.outOfBoundSamples.WithLabelValues(scenario.sampleType)))
+
+ app = db.AppenderV2(ctx)
+ _, err = appendSample(app, db.head.minValidTime.Load()-2)
+ require.Equal(t, storage.ErrOutOfBounds, err)
+ require.Equal(t, 1.0, prom_testutil.ToFloat64(db.head.metrics.outOfBoundSamples.WithLabelValues(scenario.sampleType)))
+
+ _, err = appendSample(app, db.head.minValidTime.Load()-1)
+ require.Equal(t, storage.ErrOutOfBounds, err)
+ require.Equal(t, 2.0, prom_testutil.ToFloat64(db.head.metrics.outOfBoundSamples.WithLabelValues(scenario.sampleType)))
+ require.NoError(t, app.Commit())
+
+ // Some more valid samples for out of order.
+ app = db.AppenderV2(ctx)
+ for i := 1; i <= 5; i++ {
+ _, err = appendSample(app, db.head.minValidTime.Load()+DefaultBlockDuration+int64(i))
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+
+ // Test out of order metric.
+ app = db.AppenderV2(ctx)
+ _, err = appendSample(app, db.head.minValidTime.Load()+DefaultBlockDuration+2)
+ require.Equal(t, expectOutOfOrderError, err)
+ require.Equal(t, 4.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(scenario.sampleType)))
+
+ _, err = appendSample(app, db.head.minValidTime.Load()+DefaultBlockDuration+3)
+ require.Equal(t, expectOutOfOrderError, err)
+ require.Equal(t, 5.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(scenario.sampleType)))
+
+ _, err = appendSample(app, db.head.minValidTime.Load()+DefaultBlockDuration+4)
+ require.Equal(t, expectOutOfOrderError, err)
+ require.Equal(t, 6.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(scenario.sampleType)))
+ require.NoError(t, app.Commit())
+}
+
+func TestHeadLabelNamesValuesWithMinMaxRange_AppenderV2(t *testing.T) {
+ head, _ := newTestHead(t, 1000, compression.None, false)
+ defer func() {
+ require.NoError(t, head.Close())
+ }()
+
+ const (
+ firstSeriesTimestamp int64 = 100
+ secondSeriesTimestamp int64 = 200
+ lastSeriesTimestamp int64 = 300
+ )
+ var (
+ seriesTimestamps = []int64{
+ firstSeriesTimestamp,
+ secondSeriesTimestamp,
+ lastSeriesTimestamp,
+ }
+ expectedLabelNames = []string{"a", "b", "c"}
+ expectedLabelValues = []string{"d", "e", "f"}
+ ctx = context.Background()
+ )
+
+ app := head.AppenderV2(ctx)
+ for i, name := range expectedLabelNames {
+ _, err := app.Append(0, labels.FromStrings(name, expectedLabelValues[i]), 0, seriesTimestamps[i], 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+ require.Equal(t, firstSeriesTimestamp, head.MinTime())
+ require.Equal(t, lastSeriesTimestamp, head.MaxTime())
+
+ testCases := []struct {
+ name string
+ mint int64
+ maxt int64
+ expectedNames []string
+ expectedValues []string
+ }{
+ {"maxt less than head min", head.MaxTime() - 10, head.MinTime() - 10, []string{}, []string{}},
+ {"mint less than head max", head.MaxTime() + 10, head.MinTime() + 10, []string{}, []string{}},
+ {"mint and maxt outside head", head.MaxTime() + 10, head.MinTime() - 10, []string{}, []string{}},
+ {"mint and maxt within head", head.MaxTime() - 10, head.MinTime() + 10, expectedLabelNames, expectedLabelValues},
+ }
+
+ for _, tt := range testCases {
+ t.Run(tt.name, func(t *testing.T) {
+ headIdxReader := head.indexRange(tt.mint, tt.maxt)
+ actualLabelNames, err := headIdxReader.LabelNames(ctx)
+ require.NoError(t, err)
+ require.Equal(t, tt.expectedNames, actualLabelNames)
+ if len(tt.expectedValues) > 0 {
+ for i, name := range expectedLabelNames {
+ actualLabelValue, err := headIdxReader.SortedLabelValues(ctx, name, nil)
+ require.NoError(t, err)
+ require.Equal(t, []string{tt.expectedValues[i]}, actualLabelValue)
+ }
+ }
+ })
+ }
+}
+
+func TestHeadAppenderV2_ErrReuse(t *testing.T) {
+ head, _ := newTestHead(t, 1000, compression.None, false)
+ defer func() {
+ require.NoError(t, head.Close())
+ }()
+
+ app := head.AppenderV2(context.Background())
+ _, err := app.Append(0, labels.FromStrings("test", "test"), 0, 0, 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ require.Error(t, app.Commit())
+ require.Error(t, app.Rollback())
+
+ app = head.AppenderV2(context.Background())
+ _, err = app.Append(0, labels.FromStrings("test", "test"), 0, 1, 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Rollback())
+ require.Error(t, app.Rollback())
+ require.Error(t, app.Commit())
+
+ app = head.AppenderV2(context.Background())
+ _, err = app.Append(0, labels.FromStrings("test", "test"), 0, 2, 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ require.Error(t, app.Rollback())
+ require.Error(t, app.Commit())
+
+ app = head.AppenderV2(context.Background())
+ _, err = app.Append(0, labels.FromStrings("test", "test"), 0, 3, 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Rollback())
+ require.Error(t, app.Commit())
+ require.Error(t, app.Rollback())
+}
+
+func TestHeadAppenderV2_MinTimeAfterTruncation(t *testing.T) {
+ chunkRange := int64(2000)
+ head, _ := newTestHead(t, chunkRange, compression.None, false)
+
+ app := head.AppenderV2(context.Background())
+ _, err := app.Append(0, labels.FromStrings("a", "b"), 0, 100, 100, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ _, err = app.Append(0, labels.FromStrings("a", "b"), 0, 4000, 200, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ _, err = app.Append(0, labels.FromStrings("a", "b"), 0, 8000, 300, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+
+ // Truncating outside the appendable window and actual mint being outside
+ // appendable window should leave mint at the actual mint.
+ require.NoError(t, head.Truncate(3500))
+ require.Equal(t, int64(4000), head.MinTime())
+ require.Equal(t, int64(4000), head.minValidTime.Load())
+
+ // After truncation outside the appendable window if the actual min time
+ // is in the appendable window then we should leave mint at the start of appendable window.
+ require.NoError(t, head.Truncate(5000))
+ require.Equal(t, head.appendableMinValidTime(), head.MinTime())
+ require.Equal(t, head.appendableMinValidTime(), head.minValidTime.Load())
+
+ // If the truncation time is inside the appendable window, then the min time
+ // should be the truncation time.
+ require.NoError(t, head.Truncate(7500))
+ require.Equal(t, int64(7500), head.MinTime())
+ require.Equal(t, int64(7500), head.minValidTime.Load())
+
+ require.NoError(t, head.Close())
+}
+
+func TestHeadAppenderV2_AppendExemplars(t *testing.T) {
+ chunkRange := int64(2000)
+ head, _ := newTestHead(t, chunkRange, compression.None, false)
+ app := head.AppenderV2(context.Background())
+
+ l := labels.FromStrings("trace_id", "123")
+
+ // It is perfectly valid to add Exemplars before the current start time -
+ // histogram buckets that haven't been update in a while could still be
+ // exported exemplars from an hour ago.
+ _, err := app.Append(0, labels.FromStrings("a", "b"), 0, 100, 100, nil, nil, storage.AOptions{
+ Exemplars: []exemplar.Exemplar{{Labels: l, HasTs: true, Ts: -1000, Value: 1}},
+ })
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ require.NoError(t, head.Close())
+}
+
+// Tests https://github.com/prometheus/prometheus/issues/9079.
+func TestDataMissingOnQueryDuringCompaction_AppenderV2(t *testing.T) {
+ t.Parallel()
+ db := newTestDB(t)
+ db.DisableCompactions()
+ ctx := context.Background()
+
+ var (
+ app = db.AppenderV2(context.Background())
+ ref = storage.SeriesRef(0)
+ mint, maxt = int64(0), int64(0)
+ err error
+ )
+
+ // Appends samples to span over 1.5 block ranges.
+ expSamples := make([]chunks.Sample, 0)
+ // 7 chunks with 15s scrape interval.
+ for i := int64(0); i <= 120*7; i++ {
+ ts := i * DefaultBlockDuration / (4 * 120)
+ ref, err = app.Append(ref, labels.FromStrings("a", "b"), 0, ts, float64(i), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ maxt = ts
+ expSamples = append(expSamples, sample{ts, float64(i), nil, nil})
+ }
+ require.NoError(t, app.Commit())
+
+ // Get a querier before compaction (or when compaction is about to begin).
+ q, err := db.Querier(mint, maxt)
+ require.NoError(t, err)
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ // Compacting head while the querier spans the compaction time.
+ require.NoError(t, db.Compact(ctx))
+ require.NotEmpty(t, db.Blocks())
+ }()
+
+ // Give enough time for compaction to finish.
+ // We expect it to be blocked until querier is closed.
+ <-time.After(3 * time.Second)
+
+ // Querying the querier that was got before compaction.
+ series := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
+ require.Equal(t, map[string][]chunks.Sample{`{a="b"}`: expSamples}, series)
+
+ wg.Wait()
+}
+
+func TestIsQuerierCollidingWithTruncation_AppenderV2(t *testing.T) {
+ db := newTestDB(t)
+ db.DisableCompactions()
+
+ var (
+ app = db.AppenderV2(context.Background())
+ ref = storage.SeriesRef(0)
+ err error
+ )
+
+ for i := int64(0); i <= 3000; i++ {
+ ref, err = app.Append(ref, labels.FromStrings("a", "b"), 0, i, float64(i), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+
+ // This mocks truncation.
+ db.head.memTruncationInProcess.Store(true)
+ db.head.lastMemoryTruncationTime.Store(2000)
+
+ // Test that IsQuerierValid suggests correct querier ranges.
+ cases := []struct {
+ mint, maxt int64 // For the querier.
+ expShouldClose, expGetNew bool
+ expNewMint int64
+ }{
+ {-200, -100, true, false, 0},
+ {-200, 300, true, false, 0},
+ {100, 1900, true, false, 0},
+ {1900, 2200, true, true, 2000},
+ {2000, 2500, false, false, 0},
+ }
+
+ for _, c := range cases {
+ t.Run(fmt.Sprintf("mint=%d,maxt=%d", c.mint, c.maxt), func(t *testing.T) {
+ shouldClose, getNew, newMint := db.head.IsQuerierCollidingWithTruncation(c.mint, c.maxt)
+ require.Equal(t, c.expShouldClose, shouldClose)
+ require.Equal(t, c.expGetNew, getNew)
+ if getNew {
+ require.Equal(t, c.expNewMint, newMint)
+ }
+ })
+ }
+}
+
+func TestWaitForPendingReadersInTimeRange_AppenderV2(t *testing.T) {
+ t.Parallel()
+ db := newTestDB(t)
+ db.DisableCompactions()
+
+ sampleTs := func(i int64) int64 { return i * DefaultBlockDuration / (4 * 120) }
+
+ var (
+ app = db.AppenderV2(context.Background())
+ ref = storage.SeriesRef(0)
+ err error
+ )
+
+ for i := int64(0); i <= 3000; i++ {
+ ts := sampleTs(i)
+ ref, err = app.Append(ref, labels.FromStrings("a", "b"), 0, ts, float64(i), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+
+ truncMint, truncMaxt := int64(1000), int64(2000)
+ cases := []struct {
+ mint, maxt int64
+ shouldWait bool
+ }{
+ {0, 500, false}, // Before truncation range.
+ {500, 1500, true}, // Overlaps with truncation at the start.
+ {1200, 1700, true}, // Within truncation range.
+ {1800, 2500, true}, // Overlaps with truncation at the end.
+ {2000, 2500, false}, // After truncation range.
+ {2100, 2500, false}, // After truncation range.
+ }
+ for _, c := range cases {
+ t.Run(fmt.Sprintf("mint=%d,maxt=%d,shouldWait=%t", c.mint, c.maxt, c.shouldWait), func(t *testing.T) {
+ checkWaiting := func(cl io.Closer) {
+ var waitOver atomic.Bool
+ go func() {
+ db.head.WaitForPendingReadersInTimeRange(truncMint, truncMaxt)
+ waitOver.Store(true)
+ }()
+ <-time.After(550 * time.Millisecond)
+ require.Equal(t, !c.shouldWait, waitOver.Load())
+ require.NoError(t, cl.Close())
+ <-time.After(550 * time.Millisecond)
+ require.True(t, waitOver.Load())
+ }
+
+ q, err := db.Querier(c.mint, c.maxt)
+ require.NoError(t, err)
+ checkWaiting(q)
+
+ cq, err := db.ChunkQuerier(c.mint, c.maxt)
+ require.NoError(t, err)
+ checkWaiting(cq)
+ })
+ }
+}
+
+func TestChunkQueryOOOHeadDuringTruncate_AppenderV2(t *testing.T) {
+ testQueryOOOHeadDuringTruncateAppenderV2(t,
+ func(db *DB, minT, maxT int64) (storage.LabelQuerier, error) {
+ return db.ChunkQuerier(minT, maxT)
+ },
+ func(t *testing.T, lq storage.LabelQuerier, minT, _ int64) {
+ // Chunks
+ q, ok := lq.(storage.ChunkQuerier)
+ require.True(t, ok)
+ ss := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
+ require.True(t, ss.Next())
+ s := ss.At()
+ require.False(t, ss.Next()) // One series.
+ metaIt := s.Iterator(nil)
+ require.True(t, metaIt.Next())
+ meta := metaIt.At()
+ // Samples
+ it := meta.Chunk.Iterator(nil)
+ require.NotEqual(t, chunkenc.ValNone, it.Next()) // Has some data.
+ require.Equal(t, minT, it.AtT()) // It is an in-order sample.
+ require.NotEqual(t, chunkenc.ValNone, it.Next()) // Has some data.
+ require.Equal(t, minT+50, it.AtT()) // it is an out-of-order sample.
+ require.NoError(t, it.Err())
+ },
+ )
+}
+
+func testQueryOOOHeadDuringTruncateAppenderV2(t *testing.T, makeQuerier func(db *DB, minT, maxT int64) (storage.LabelQuerier, error), verify func(t *testing.T, q storage.LabelQuerier, minT, maxT int64)) {
+ const maxT int64 = 6000
+
+ dir := t.TempDir()
+ opts := DefaultOptions()
+ opts.OutOfOrderTimeWindow = maxT
+ opts.MinBlockDuration = maxT / 2 // So that head will compact up to 3000.
+
+ db, err := Open(dir, nil, nil, opts, nil)
+ require.NoError(t, err)
+ t.Cleanup(func() {
+ require.NoError(t, db.Close())
+ })
+ db.DisableCompactions()
+
+ var (
+ ref = storage.SeriesRef(0)
+ app = db.AppenderV2(context.Background())
+ )
+ // Add in-order samples at every 100ms starting at 0ms.
+ for i := int64(0); i < maxT; i += 100 {
+ _, err := app.Append(ref, labels.FromStrings("a", "b"), 0, i, 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+ // Add out-of-order samples at every 100ms starting at 50ms.
+ for i := int64(50); i < maxT; i += 100 {
+ _, err := app.Append(ref, labels.FromStrings("a", "b"), 0, i, 0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+
+ requireEqualOOOSamples(t, int(maxT/100-1), db)
+
+ // Synchronization points.
+ allowQueryToStart := make(chan struct{})
+ queryStarted := make(chan struct{})
+ compactionFinished := make(chan struct{})
+
+ db.head.memTruncationCallBack = func() {
+ // Compaction has started, let the query start and wait for it to actually start to simulate race condition.
+ allowQueryToStart <- struct{}{}
+ <-queryStarted
+ }
+
+ go func() {
+ db.Compact(context.Background()) // Compact and write blocks up to 3000 (maxtT/2).
+ compactionFinished <- struct{}{}
+ }()
+
+ // Wait for the compaction to start.
+ <-allowQueryToStart
+
+ q, err := makeQuerier(db, 1500, 2500)
+ require.NoError(t, err)
+ queryStarted <- struct{}{} // Unblock the compaction.
+ ctx := context.Background()
+
+ // Label names.
+ res, annots, err := q.LabelNames(ctx, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
+ require.NoError(t, err)
+ require.Empty(t, annots)
+ require.Equal(t, []string{"a"}, res)
+
+ // Label values.
+ res, annots, err = q.LabelValues(ctx, "a", nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
+ require.NoError(t, err)
+ require.Empty(t, annots)
+ require.Equal(t, []string{"b"}, res)
+
+ verify(t, q, 1500, 2500)
+
+ require.NoError(t, q.Close()) // Cannot be deferred as the compaction waits for queries to close before finishing.
+
+ <-compactionFinished // Wait for compaction otherwise Go test finds stray goroutines.
+}
+
+func TestHeadAppenderV2_Append_Histogram(t *testing.T) {
+ l := labels.FromStrings("a", "b")
+ for _, numHistograms := range []int{1, 10, 150, 200, 250, 300} {
+ t.Run(strconv.Itoa(numHistograms), func(t *testing.T) {
+ head, _ := newTestHead(t, 1000, compression.None, false)
+ t.Cleanup(func() {
+ require.NoError(t, head.Close())
+ })
+
+ require.NoError(t, head.Init(0))
+ ingestTs := int64(0)
+ app := head.AppenderV2(context.Background())
+
+ expHistograms := make([]chunks.Sample, 0, 2*numHistograms)
+
+ // Counter integer histograms.
+ for _, h := range tsdbutil.GenerateTestHistograms(numHistograms) {
+ _, err := app.Append(0, l, 0, ingestTs, 0, h, nil, storage.AOptions{})
+ require.NoError(t, err)
+ expHistograms = append(expHistograms, sample{t: ingestTs, h: h})
+ ingestTs++
+ if ingestTs%50 == 0 {
+ require.NoError(t, app.Commit())
+ app = head.AppenderV2(context.Background())
+ }
+ }
+
+ // Gauge integer histograms.
+ for _, h := range tsdbutil.GenerateTestGaugeHistograms(numHistograms) {
+ _, err := app.Append(0, l, 0, ingestTs, 0, h, nil, storage.AOptions{})
+ require.NoError(t, err)
+ expHistograms = append(expHistograms, sample{t: ingestTs, h: h})
+ ingestTs++
+ if ingestTs%50 == 0 {
+ require.NoError(t, app.Commit())
+ app = head.AppenderV2(context.Background())
+ }
+ }
+
+ expFloatHistograms := make([]chunks.Sample, 0, 2*numHistograms)
+
+ // Counter float histograms.
+ for _, fh := range tsdbutil.GenerateTestFloatHistograms(numHistograms) {
+ _, err := app.Append(0, l, 0, ingestTs, 0, nil, fh, storage.AOptions{})
+ require.NoError(t, err)
+ expFloatHistograms = append(expFloatHistograms, sample{t: ingestTs, fh: fh})
+ ingestTs++
+ if ingestTs%50 == 0 {
+ require.NoError(t, app.Commit())
+ app = head.AppenderV2(context.Background())
+ }
+ }
+
+ // Gauge float histograms.
+ for _, fh := range tsdbutil.GenerateTestGaugeFloatHistograms(numHistograms) {
+ _, err := app.Append(0, l, 0, ingestTs, 0, nil, fh, storage.AOptions{})
+ require.NoError(t, err)
+ expFloatHistograms = append(expFloatHistograms, sample{t: ingestTs, fh: fh})
+ ingestTs++
+ if ingestTs%50 == 0 {
+ require.NoError(t, app.Commit())
+ app = head.AppenderV2(context.Background())
+ }
+ }
+
+ require.NoError(t, app.Commit())
+
+ q, err := NewBlockQuerier(head, head.MinTime(), head.MaxTime())
+ require.NoError(t, err)
+ t.Cleanup(func() {
+ require.NoError(t, q.Close())
+ })
+
+ ss := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
+
+ require.True(t, ss.Next())
+ s := ss.At()
+ require.False(t, ss.Next())
+
+ it := s.Iterator(nil)
+ actHistograms := make([]chunks.Sample, 0, len(expHistograms))
+ actFloatHistograms := make([]chunks.Sample, 0, len(expFloatHistograms))
+ for typ := it.Next(); typ != chunkenc.ValNone; typ = it.Next() {
+ switch typ {
+ case chunkenc.ValHistogram:
+ ts, h := it.AtHistogram(nil)
+ actHistograms = append(actHistograms, sample{t: ts, h: h})
+ case chunkenc.ValFloatHistogram:
+ ts, fh := it.AtFloatHistogram(nil)
+ actFloatHistograms = append(actFloatHistograms, sample{t: ts, fh: fh})
+ }
+ }
+
+ compareSeries(
+ t,
+ map[string][]chunks.Sample{"dummy": expHistograms},
+ map[string][]chunks.Sample{"dummy": actHistograms},
+ )
+ compareSeries(
+ t,
+ map[string][]chunks.Sample{"dummy": expFloatHistograms},
+ map[string][]chunks.Sample{"dummy": actFloatHistograms},
+ )
+ })
+ }
+}
+
+func TestHistogramInWALAndMmapChunk_AppenderV2(t *testing.T) {
+ head, _ := newTestHead(t, 3000, compression.None, false)
+ t.Cleanup(func() {
+ require.NoError(t, head.Close())
+ })
+ require.NoError(t, head.Init(0))
+
+ // Series with only histograms.
+ s1 := labels.FromStrings("a", "b1")
+ k1 := s1.String()
+ numHistograms := 300
+ exp := map[string][]chunks.Sample{}
+ ts := int64(0)
+ var app storage.AppenderV2
+ for _, gauge := range []bool{true, false} {
+ app = head.AppenderV2(context.Background())
+ var hists []*histogram.Histogram
+ if gauge {
+ hists = tsdbutil.GenerateTestGaugeHistograms(numHistograms)
+ } else {
+ hists = tsdbutil.GenerateTestHistograms(numHistograms)
+ }
+ for _, h := range hists {
+ h.NegativeSpans = h.PositiveSpans
+ h.NegativeBuckets = h.PositiveBuckets
+ _, err := app.Append(0, s1, 0, ts, 0, h, nil, storage.AOptions{})
+ require.NoError(t, err)
+ exp[k1] = append(exp[k1], sample{t: ts, h: h.Copy()})
+ ts++
+ if ts%5 == 0 {
+ require.NoError(t, app.Commit())
+ app = head.AppenderV2(context.Background())
+ }
+ }
+ require.NoError(t, app.Commit())
+ }
+ for _, gauge := range []bool{true, false} {
+ app = head.AppenderV2(context.Background())
+ var hists []*histogram.FloatHistogram
+ if gauge {
+ hists = tsdbutil.GenerateTestGaugeFloatHistograms(numHistograms)
+ } else {
+ hists = tsdbutil.GenerateTestFloatHistograms(numHistograms)
+ }
+ for _, h := range hists {
+ h.NegativeSpans = h.PositiveSpans
+ h.NegativeBuckets = h.PositiveBuckets
+ _, err := app.Append(0, s1, 0, ts, 0, nil, h, storage.AOptions{})
+ require.NoError(t, err)
+ exp[k1] = append(exp[k1], sample{t: ts, fh: h.Copy()})
+ ts++
+ if ts%5 == 0 {
+ require.NoError(t, app.Commit())
+ app = head.AppenderV2(context.Background())
+ }
+ }
+ require.NoError(t, app.Commit())
+ head.mmapHeadChunks()
+ }
+
+ // There should be 20 mmap chunks in s1.
+ ms := head.series.getByHash(s1.Hash(), s1)
+ require.Len(t, ms.mmappedChunks, 25)
+ expMmapChunks := make([]*mmappedChunk, 0, 20)
+ for _, mmap := range ms.mmappedChunks {
+ require.Positive(t, mmap.numSamples)
+ cpy := *mmap
+ expMmapChunks = append(expMmapChunks, &cpy)
+ }
+ expHeadChunkSamples := ms.headChunks.chunk.NumSamples()
+ require.Positive(t, expHeadChunkSamples)
+
+ // Series with mix of histograms and float.
+ s2 := labels.FromStrings("a", "b2")
+ k2 := s2.String()
+ ts = 0
+ for _, gauge := range []bool{true, false} {
+ app = head.AppenderV2(context.Background())
+ var hists []*histogram.Histogram
+ if gauge {
+ hists = tsdbutil.GenerateTestGaugeHistograms(100)
+ } else {
+ hists = tsdbutil.GenerateTestHistograms(100)
+ }
+ for _, h := range hists {
+ ts++
+ h.NegativeSpans = h.PositiveSpans
+ h.NegativeBuckets = h.PositiveBuckets
+ _, err := app.Append(0, s2, 0, ts, 0, h, nil, storage.AOptions{})
+ require.NoError(t, err)
+ eh := h.Copy()
+ if !gauge && ts > 30 && (ts-10)%20 == 1 {
+ // Need "unknown" hint after float sample.
+ eh.CounterResetHint = histogram.UnknownCounterReset
+ }
+ exp[k2] = append(exp[k2], sample{t: ts, h: eh})
+ if ts%20 == 0 {
+ require.NoError(t, app.Commit())
+ app = head.AppenderV2(context.Background())
+ // Add some float.
+ for range 10 {
+ ts++
+ _, err := app.Append(0, s2, 0, ts, float64(ts), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ exp[k2] = append(exp[k2], sample{t: ts, f: float64(ts)})
+ }
+ require.NoError(t, app.Commit())
+ app = head.AppenderV2(context.Background())
+ }
+ }
+ require.NoError(t, app.Commit())
+ }
+ for _, gauge := range []bool{true, false} {
+ app = head.AppenderV2(context.Background())
+ var hists []*histogram.FloatHistogram
+ if gauge {
+ hists = tsdbutil.GenerateTestGaugeFloatHistograms(100)
+ } else {
+ hists = tsdbutil.GenerateTestFloatHistograms(100)
+ }
+ for _, h := range hists {
+ ts++
+ h.NegativeSpans = h.PositiveSpans
+ h.NegativeBuckets = h.PositiveBuckets
+ _, err := app.Append(0, s2, 0, ts, 0, nil, h, storage.AOptions{})
+ require.NoError(t, err)
+ eh := h.Copy()
+ if !gauge && ts > 30 && (ts-10)%20 == 1 {
+ // Need "unknown" hint after float sample.
+ eh.CounterResetHint = histogram.UnknownCounterReset
+ }
+ exp[k2] = append(exp[k2], sample{t: ts, fh: eh})
+ if ts%20 == 0 {
+ require.NoError(t, app.Commit())
+ app = head.AppenderV2(context.Background())
+ // Add some float.
+ for range 10 {
+ ts++
+ _, err := app.Append(0, s2, 0, ts, float64(ts), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ exp[k2] = append(exp[k2], sample{t: ts, f: float64(ts)})
+ }
+ require.NoError(t, app.Commit())
+ app = head.AppenderV2(context.Background())
+ }
+ }
+ require.NoError(t, app.Commit())
+ }
+
+ // Restart head.
+ require.NoError(t, head.Close())
+ startHead := func() {
+ w, err := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, compression.None)
+ require.NoError(t, err)
+ head, err = NewHead(nil, nil, w, nil, head.opts, nil)
+ require.NoError(t, err)
+ require.NoError(t, head.Init(0))
+ }
+ startHead()
+
+ // Checking contents of s1.
+ ms = head.series.getByHash(s1.Hash(), s1)
+ require.Equal(t, expMmapChunks, ms.mmappedChunks)
+ require.Equal(t, expHeadChunkSamples, ms.headChunks.chunk.NumSamples())
+
+ testQuery := func() {
+ q, err := NewBlockQuerier(head, head.MinTime(), head.MaxTime())
+ require.NoError(t, err)
+ act := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "a", "b.*"))
+ compareSeries(t, exp, act)
+ }
+ testQuery()
+
+ // Restart with no mmap chunks to test WAL replay.
+ require.NoError(t, head.Close())
+ require.NoError(t, os.RemoveAll(mmappedChunksDir(head.opts.ChunkDirRoot)))
+ startHead()
+ testQuery()
+}
+
+func TestChunkSnapshot_AppenderV2(t *testing.T) {
+ head, _ := newTestHead(t, 120*4, compression.None, false)
+ defer func() {
+ head.opts.EnableMemorySnapshotOnShutdown = false
+ require.NoError(t, head.Close())
+ }()
+
+ type ex struct {
+ seriesLabels labels.Labels
+ e exemplar.Exemplar
+ }
+
+ numSeries := 10
+ expSeries := make(map[string][]chunks.Sample)
+ expHist := make(map[string][]chunks.Sample)
+ expFloatHist := make(map[string][]chunks.Sample)
+ expTombstones := make(map[storage.SeriesRef]tombstones.Intervals)
+ expExemplars := make([]ex, 0)
+ histograms := tsdbutil.GenerateTestGaugeHistograms(481)
+ floatHistogram := tsdbutil.GenerateTestGaugeFloatHistograms(481)
+
+ newExemplar := func(lbls labels.Labels, ts int64) exemplar.Exemplar {
+ e := ex{
+ seriesLabels: lbls,
+ e: exemplar.Exemplar{
+ Labels: labels.FromStrings("trace_id", strconv.Itoa(rand.Int())),
+ Value: rand.Float64(),
+ Ts: ts,
+ },
+ }
+ expExemplars = append(expExemplars, e)
+ return e.e
+ }
+
+ checkSamples := func() {
+ q, err := NewBlockQuerier(head, math.MinInt64, math.MaxInt64)
+ require.NoError(t, err)
+ series := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*"))
+ require.Equal(t, expSeries, series)
+ }
+ checkHistograms := func() {
+ q, err := NewBlockQuerier(head, math.MinInt64, math.MaxInt64)
+ require.NoError(t, err)
+ series := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "hist", "baz.*"))
+ require.Equal(t, expHist, series)
+ }
+ checkFloatHistograms := func() {
+ q, err := NewBlockQuerier(head, math.MinInt64, math.MaxInt64)
+ require.NoError(t, err)
+ series := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "floathist", "bat.*"))
+ require.Equal(t, expFloatHist, series)
+ }
+ checkTombstones := func() {
+ tr, err := head.Tombstones()
+ require.NoError(t, err)
+ actTombstones := make(map[storage.SeriesRef]tombstones.Intervals)
+ require.NoError(t, tr.Iter(func(ref storage.SeriesRef, itvs tombstones.Intervals) error {
+ for _, itv := range itvs {
+ actTombstones[ref].Add(itv)
+ }
+ return nil
+ }))
+ require.Equal(t, expTombstones, actTombstones)
+ }
+ checkExemplars := func() {
+ actExemplars := make([]ex, 0, len(expExemplars))
+ err := head.exemplars.IterateExemplars(func(seriesLabels labels.Labels, e exemplar.Exemplar) error {
+ actExemplars = append(actExemplars, ex{
+ seriesLabels: seriesLabels,
+ e: e,
+ })
+ return nil
+ })
+ require.NoError(t, err)
+ // Verifies both existence of right exemplars and order of exemplars in the buffer.
+ testutil.RequireEqualWithOptions(t, expExemplars, actExemplars, []cmp.Option{cmp.AllowUnexported(ex{})})
+ }
+
+ var (
+ wlast, woffset int
+ err error
+ )
+
+ closeHeadAndCheckSnapshot := func() {
+ require.NoError(t, head.Close())
+
+ _, sidx, soffset, err := LastChunkSnapshot(head.opts.ChunkDirRoot)
+ require.NoError(t, err)
+ require.Equal(t, wlast, sidx)
+ require.Equal(t, woffset, soffset)
+ }
+
+ openHeadAndCheckReplay := func() {
+ w, err := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, compression.None)
+ require.NoError(t, err)
+ head, err = NewHead(nil, nil, w, nil, head.opts, nil)
+ require.NoError(t, err)
+ require.NoError(t, head.Init(math.MinInt64))
+
+ checkSamples()
+ checkHistograms()
+ checkFloatHistograms()
+ checkTombstones()
+ checkExemplars()
+ }
+
+ { // Initial data that goes into snapshot.
+ // Add some initial samples with >=1 m-map chunk.
+ app := head.AppenderV2(context.Background())
+ for i := 1; i <= numSeries; i++ {
+ lbls := labels.FromStrings("foo", fmt.Sprintf("bar%d", i))
+ lblStr := lbls.String()
+ lblsHist := labels.FromStrings("hist", fmt.Sprintf("baz%d", i))
+ lblsHistStr := lblsHist.String()
+ lblsFloatHist := labels.FromStrings("floathist", fmt.Sprintf("bat%d", i))
+ lblsFloatHistStr := lblsFloatHist.String()
+
+ // 240 samples should m-map at least 1 chunk.
+ for ts := int64(1); ts <= 240; ts++ {
+ // Add an exemplar, but only to float sample.
+ aOpts := storage.AOptions{}
+ if ts%10 == 0 {
+ aOpts.Exemplars = []exemplar.Exemplar{newExemplar(lbls, ts)}
+ }
+ val := rand.Float64()
+ expSeries[lblStr] = append(expSeries[lblStr], sample{ts, val, nil, nil})
+ _, err := app.Append(0, lbls, 0, ts, val, nil, nil, aOpts)
+ require.NoError(t, err)
+
+ hist := histograms[int(ts)]
+ expHist[lblsHistStr] = append(expHist[lblsHistStr], sample{ts, 0, hist, nil})
+ _, err = app.Append(0, lblsHist, 0, ts, 0, hist, nil, storage.AOptions{})
+ require.NoError(t, err)
+
+ floatHist := floatHistogram[int(ts)]
+ expFloatHist[lblsFloatHistStr] = append(expFloatHist[lblsFloatHistStr], sample{ts, 0, nil, floatHist})
+ _, err = app.Append(0, lblsFloatHist, 0, ts, 0, nil, floatHist, storage.AOptions{})
+ require.NoError(t, err)
+
+ // Create multiple WAL records (commit).
+ if ts%10 == 0 {
+ require.NoError(t, app.Commit())
+ app = head.AppenderV2(context.Background())
+ }
+ }
+ }
+ require.NoError(t, app.Commit())
+
+ // Add some tombstones.
+ var enc record.Encoder
+ for i := 1; i <= numSeries; i++ {
+ ref := storage.SeriesRef(i)
+ itvs := tombstones.Intervals{
+ {Mint: 1234, Maxt: 2345},
+ {Mint: 3456, Maxt: 4567},
+ }
+ for _, itv := range itvs {
+ expTombstones[ref].Add(itv)
+ }
+ head.tombstones.AddInterval(ref, itvs...)
+ err := head.wal.Log(enc.Tombstones([]tombstones.Stone{
+ {Ref: ref, Intervals: itvs},
+ }, nil))
+ require.NoError(t, err)
+ }
+ }
+
+ // These references should be the ones used for the snapshot.
+ wlast, woffset, err = head.wal.LastSegmentAndOffset()
+ require.NoError(t, err)
+ if woffset != 0 && woffset < 32*1024 {
+ // The page is always filled before taking the snapshot.
+ woffset = 32 * 1024
+ }
+
+ {
+ // Creating snapshot and verifying it.
+ head.opts.EnableMemorySnapshotOnShutdown = true
+ closeHeadAndCheckSnapshot() // This will create a snapshot.
+
+ // Test the replay of snapshot.
+ openHeadAndCheckReplay()
+ }
+
+ { // Additional data to only include in WAL and m-mapped chunks and not snapshot. This mimics having an old snapshot on disk.
+ // Add more samples.
+ app := head.AppenderV2(context.Background())
+ for i := 1; i <= numSeries; i++ {
+ lbls := labels.FromStrings("foo", fmt.Sprintf("bar%d", i))
+ lblStr := lbls.String()
+ lblsHist := labels.FromStrings("hist", fmt.Sprintf("baz%d", i))
+ lblsHistStr := lblsHist.String()
+ lblsFloatHist := labels.FromStrings("floathist", fmt.Sprintf("bat%d", i))
+ lblsFloatHistStr := lblsFloatHist.String()
+
+ // 240 samples should m-map at least 1 chunk.
+ for ts := int64(241); ts <= 480; ts++ {
+ // Add an exemplar, but only to float sample.
+ aOpts := storage.AOptions{}
+ if ts%10 == 0 {
+ aOpts.Exemplars = []exemplar.Exemplar{newExemplar(lbls, ts)}
+ }
+ val := rand.Float64()
+ expSeries[lblStr] = append(expSeries[lblStr], sample{ts, val, nil, nil})
+ _, err := app.Append(0, lbls, 0, ts, val, nil, nil, aOpts)
+ require.NoError(t, err)
+
+ hist := histograms[int(ts)]
+ expHist[lblsHistStr] = append(expHist[lblsHistStr], sample{ts, 0, hist, nil})
+ _, err = app.Append(0, lblsHist, 0, ts, 0, hist, nil, storage.AOptions{})
+ require.NoError(t, err)
+
+ floatHist := floatHistogram[int(ts)]
+ expFloatHist[lblsFloatHistStr] = append(expFloatHist[lblsFloatHistStr], sample{ts, 0, nil, floatHist})
+ _, err = app.Append(0, lblsFloatHist, 0, ts, 0, nil, floatHist, storage.AOptions{})
+ require.NoError(t, err)
+
+ // Create multiple WAL records (commit).
+ if ts%10 == 0 {
+ require.NoError(t, app.Commit())
+ app = head.AppenderV2(context.Background())
+ }
+ }
+ }
+ require.NoError(t, app.Commit())
+
+ // Add more tombstones.
+ var enc record.Encoder
+ for i := 1; i <= numSeries; i++ {
+ ref := storage.SeriesRef(i)
+ itvs := tombstones.Intervals{
+ {Mint: 12345, Maxt: 23456},
+ {Mint: 34567, Maxt: 45678},
+ }
+ for _, itv := range itvs {
+ expTombstones[ref].Add(itv)
+ }
+ head.tombstones.AddInterval(ref, itvs...)
+ err := head.wal.Log(enc.Tombstones([]tombstones.Stone{
+ {Ref: ref, Intervals: itvs},
+ }, nil))
+ require.NoError(t, err)
+ }
+ }
+ {
+ // Close Head and verify that new snapshot was not created.
+ head.opts.EnableMemorySnapshotOnShutdown = false
+ closeHeadAndCheckSnapshot() // This should not create a snapshot.
+
+ // Test the replay of snapshot, m-map chunks, and WAL.
+ head.opts.EnableMemorySnapshotOnShutdown = true // Enabled to read from snapshot.
+ openHeadAndCheckReplay()
+ }
+
+ // Creating another snapshot should delete the older snapshot and replay still works fine.
+ wlast, woffset, err = head.wal.LastSegmentAndOffset()
+ require.NoError(t, err)
+ if woffset != 0 && woffset < 32*1024 {
+ // The page is always filled before taking the snapshot.
+ woffset = 32 * 1024
+ }
+
+ {
+ // Close Head and verify that new snapshot was created.
+ closeHeadAndCheckSnapshot()
+
+ // Verify that there is only 1 snapshot.
+ files, err := os.ReadDir(head.opts.ChunkDirRoot)
+ require.NoError(t, err)
+ snapshots := 0
+ for i := len(files) - 1; i >= 0; i-- {
+ fi := files[i]
+ if strings.HasPrefix(fi.Name(), chunkSnapshotPrefix) {
+ snapshots++
+ require.Equal(t, chunkSnapshotDir(wlast, woffset), fi.Name())
+ }
+ }
+ require.Equal(t, 1, snapshots)
+
+ // Test the replay of snapshot.
+ head.opts.EnableMemorySnapshotOnShutdown = true // Enabled to read from snapshot.
+
+ // Disabling exemplars to check that it does not hard fail replay
+ // https://github.com/prometheus/prometheus/issues/9437#issuecomment-933285870.
+ head.opts.EnableExemplarStorage = false
+ head.opts.MaxExemplars.Store(0)
+ expExemplars = expExemplars[:0]
+
+ openHeadAndCheckReplay()
+
+ require.Equal(t, 0.0, prom_testutil.ToFloat64(head.metrics.snapshotReplayErrorTotal))
+ }
+}
+
+func TestSnapshotError_AppenderV2(t *testing.T) {
+ head, _ := newTestHead(t, 120*4, compression.None, false)
+ defer func() {
+ head.opts.EnableMemorySnapshotOnShutdown = false
+ require.NoError(t, head.Close())
+ }()
+
+ // Add a sample.
+ app := head.AppenderV2(context.Background())
+ lbls := labels.FromStrings("foo", "bar")
+ _, err := app.Append(0, lbls, 0, 99, 99, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+
+ // Add histograms
+ hist := tsdbutil.GenerateTestGaugeHistograms(1)[0]
+ floatHist := tsdbutil.GenerateTestGaugeFloatHistograms(1)[0]
+ lblsHist := labels.FromStrings("hist", "bar")
+ lblsFloatHist := labels.FromStrings("floathist", "bar")
+
+ _, err = app.Append(0, lblsHist, 0, 99, 0, hist, nil, storage.AOptions{})
+ require.NoError(t, err)
+
+ _, err = app.Append(0, lblsFloatHist, 0, 99, 0, nil, floatHist, storage.AOptions{})
+ require.NoError(t, err)
+
+ require.NoError(t, app.Commit())
+
+ // Add some tombstones.
+ itvs := tombstones.Intervals{
+ {Mint: 1234, Maxt: 2345},
+ {Mint: 3456, Maxt: 4567},
+ }
+ head.tombstones.AddInterval(1, itvs...)
+
+ // Check existence of data.
+ require.NotNil(t, head.series.getByHash(lbls.Hash(), lbls))
+ tm, err := head.tombstones.Get(1)
+ require.NoError(t, err)
+ require.NotEmpty(t, tm)
+
+ head.opts.EnableMemorySnapshotOnShutdown = true
+ require.NoError(t, head.Close()) // This will create a snapshot.
+
+ // Remove the WAL so that we don't load from it.
+ require.NoError(t, os.RemoveAll(head.wal.Dir()))
+
+ // Corrupt the snapshot.
+ snapDir, _, _, err := LastChunkSnapshot(head.opts.ChunkDirRoot)
+ require.NoError(t, err)
+ files, err := os.ReadDir(snapDir)
+ require.NoError(t, err)
+ f, err := os.OpenFile(path.Join(snapDir, files[0].Name()), os.O_RDWR, 0)
+ require.NoError(t, err)
+ // Create snapshot backup to be restored on future test cases.
+ snapshotBackup, err := io.ReadAll(f)
+ require.NoError(t, err)
+ _, err = f.WriteAt([]byte{0b11111111}, 18)
+ require.NoError(t, err)
+ require.NoError(t, f.Close())
+
+ // Create new Head which should replay this snapshot.
+ w, err := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, compression.None)
+ require.NoError(t, err)
+ // Testing https://github.com/prometheus/prometheus/issues/9437 with the registry.
+ head, err = NewHead(prometheus.NewRegistry(), nil, w, nil, head.opts, nil)
+ require.NoError(t, err)
+ require.NoError(t, head.Init(math.MinInt64))
+
+ // There should be no series in the memory after snapshot error since WAL was removed.
+ require.Equal(t, 1.0, prom_testutil.ToFloat64(head.metrics.snapshotReplayErrorTotal))
+ require.Equal(t, uint64(0), head.NumSeries())
+ require.Nil(t, head.series.getByHash(lbls.Hash(), lbls))
+ tm, err = head.tombstones.Get(1)
+ require.NoError(t, err)
+ require.Empty(t, tm)
+ require.NoError(t, head.Close())
+
+ // Test corruption in the middle of the snapshot.
+ f, err = os.OpenFile(path.Join(snapDir, files[0].Name()), os.O_RDWR, 0)
+ require.NoError(t, err)
+ _, err = f.WriteAt(snapshotBackup, 0)
+ require.NoError(t, err)
+ _, err = f.WriteAt([]byte{0b11111111}, 300)
+ require.NoError(t, err)
+ require.NoError(t, f.Close())
+
+ c := &countSeriesLifecycleCallback{}
+ opts := head.opts
+ opts.SeriesCallback = c
+
+ w, err = wlog.NewSize(nil, nil, head.wal.Dir(), 32768, compression.None)
+ require.NoError(t, err)
+ head, err = NewHead(prometheus.NewRegistry(), nil, w, nil, head.opts, nil)
+ require.NoError(t, err)
+ require.NoError(t, head.Init(math.MinInt64))
+
+ // There should be no series in the memory after snapshot error since WAL was removed.
+ require.Equal(t, 1.0, prom_testutil.ToFloat64(head.metrics.snapshotReplayErrorTotal))
+ require.Nil(t, head.series.getByHash(lbls.Hash(), lbls))
+ require.Equal(t, uint64(0), head.NumSeries())
+
+ // Since the snapshot could replay certain series, we continue invoking the create hooks.
+ // In such instances, we need to ensure that we also trigger the delete hooks when resetting the memory.
+ require.Equal(t, int64(2), c.created.Load())
+ require.Equal(t, int64(2), c.deleted.Load())
+
+ require.Equal(t, 2.0, prom_testutil.ToFloat64(head.metrics.seriesRemoved))
+ require.Equal(t, 2.0, prom_testutil.ToFloat64(head.metrics.seriesCreated))
+}
+
+func TestHeadAppenderV2_Append_HistogramSamplesAppendedMetric(t *testing.T) {
+ numHistograms := 10
+ head, _ := newTestHead(t, 1000, compression.None, false)
+ t.Cleanup(func() {
+ require.NoError(t, head.Close())
+ })
+ require.NoError(t, head.Init(0))
+
+ expHSeries, expHSamples := 0, 0
+
+ for x := range 5 {
+ expHSeries++
+ l := labels.FromStrings("a", fmt.Sprintf("b%d", x))
+ for i, h := range tsdbutil.GenerateTestHistograms(numHistograms) {
+ app := head.AppenderV2(context.Background())
+ _, err := app.Append(0, l, 0, int64(i), 0, h, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ expHSamples++
+ }
+ for i, fh := range tsdbutil.GenerateTestFloatHistograms(numHistograms) {
+ app := head.AppenderV2(context.Background())
+ _, err := app.Append(0, l, 0, int64(numHistograms+i), 0, nil, fh, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ expHSamples++
+ }
+ }
+
+ require.Equal(t, float64(expHSamples), prom_testutil.ToFloat64(head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeHistogram)))
+
+ require.NoError(t, head.Close())
+ w, err := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, compression.None)
+ require.NoError(t, err)
+ head, err = NewHead(nil, nil, w, nil, head.opts, nil)
+ require.NoError(t, err)
+ require.NoError(t, head.Init(0))
+
+ require.Equal(t, float64(0), prom_testutil.ToFloat64(head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeHistogram))) // Counter reset.
+}
+
+func TestHeadAppenderV2_Append_StaleHistogram(t *testing.T) {
+ t.Run("integer histogram", func(t *testing.T) {
+ testHeadAppenderV2AppendStaleHistogram(t, false)
+ })
+ t.Run("float histogram", func(t *testing.T) {
+ testHeadAppenderV2AppendStaleHistogram(t, true)
+ })
+}
+
+func testHeadAppenderV2AppendStaleHistogram(t *testing.T, floatHistogram bool) {
+ t.Helper()
+ l := labels.FromStrings("a", "b")
+ numHistograms := 20
+ head, _ := newTestHead(t, 100000, compression.None, false)
+ t.Cleanup(func() {
+ require.NoError(t, head.Close())
+ })
+ require.NoError(t, head.Init(0))
+
+ type timedHistogram struct {
+ t int64
+ h *histogram.Histogram
+ fh *histogram.FloatHistogram
+ }
+ expHistograms := make([]timedHistogram, 0, numHistograms)
+
+ testQuery := func(numStale int) {
+ q, err := NewBlockQuerier(head, head.MinTime(), head.MaxTime())
+ require.NoError(t, err)
+ t.Cleanup(func() {
+ require.NoError(t, q.Close())
+ })
+
+ ss := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
+
+ require.True(t, ss.Next())
+ s := ss.At()
+ require.False(t, ss.Next())
+
+ it := s.Iterator(nil)
+ actHistograms := make([]timedHistogram, 0, len(expHistograms))
+ for typ := it.Next(); typ != chunkenc.ValNone; typ = it.Next() {
+ switch typ {
+ case chunkenc.ValHistogram:
+ t, h := it.AtHistogram(nil)
+ actHistograms = append(actHistograms, timedHistogram{t: t, h: h})
+ case chunkenc.ValFloatHistogram:
+ t, h := it.AtFloatHistogram(nil)
+ actHistograms = append(actHistograms, timedHistogram{t: t, fh: h})
+ }
+ }
+
+ // We cannot compare StaleNAN with require.Equal, hence checking each histogram manually.
+ require.Len(t, actHistograms, len(expHistograms))
+ actNumStale := 0
+ for i, eh := range expHistograms {
+ ah := actHistograms[i]
+ if floatHistogram {
+ switch {
+ case value.IsStaleNaN(eh.fh.Sum):
+ actNumStale++
+ require.True(t, value.IsStaleNaN(ah.fh.Sum))
+ // To make require.Equal work.
+ ah.fh.Sum = 0
+ eh.fh = eh.fh.Copy()
+ eh.fh.Sum = 0
+ case i > 0:
+ prev := expHistograms[i-1]
+ if prev.fh == nil || value.IsStaleNaN(prev.fh.Sum) {
+ eh.fh.CounterResetHint = histogram.UnknownCounterReset
+ }
+ }
+ require.Equal(t, eh, ah)
+ } else {
+ switch {
+ case value.IsStaleNaN(eh.h.Sum):
+ actNumStale++
+ require.True(t, value.IsStaleNaN(ah.h.Sum))
+ // To make require.Equal work.
+ ah.h.Sum = 0
+ eh.h = eh.h.Copy()
+ eh.h.Sum = 0
+ case i > 0:
+ prev := expHistograms[i-1]
+ if prev.h == nil || value.IsStaleNaN(prev.h.Sum) {
+ eh.h.CounterResetHint = histogram.UnknownCounterReset
+ }
+ }
+ require.Equal(t, eh, ah)
+ }
+ }
+ require.Equal(t, numStale, actNumStale)
+ }
+
+ // Adding stale in the same appender.
+ app := head.AppenderV2(context.Background())
+ for _, h := range tsdbutil.GenerateTestHistograms(numHistograms) {
+ var err error
+ if floatHistogram {
+ _, err = app.Append(0, l, 0, 100*int64(len(expHistograms)), 0, nil, h.ToFloat(nil), storage.AOptions{})
+ expHistograms = append(expHistograms, timedHistogram{t: 100 * int64(len(expHistograms)), fh: h.ToFloat(nil)})
+ } else {
+ _, err = app.Append(0, l, 0, 100*int64(len(expHistograms)), 0, h, nil, storage.AOptions{})
+ expHistograms = append(expHistograms, timedHistogram{t: 100 * int64(len(expHistograms)), h: h})
+ }
+ require.NoError(t, err)
+ }
+ // +1 so that delta-of-delta is not 0.
+ _, err := app.Append(0, l, 0, 100*int64(len(expHistograms))+1, math.Float64frombits(value.StaleNaN), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ if floatHistogram {
+ expHistograms = append(expHistograms, timedHistogram{t: 100*int64(len(expHistograms)) + 1, fh: &histogram.FloatHistogram{Sum: math.Float64frombits(value.StaleNaN)}})
+ } else {
+ expHistograms = append(expHistograms, timedHistogram{t: 100*int64(len(expHistograms)) + 1, h: &histogram.Histogram{Sum: math.Float64frombits(value.StaleNaN)}})
+ }
+ require.NoError(t, app.Commit())
+
+ // Only 1 chunk in the memory, no m-mapped chunk.
+ s := head.series.getByHash(l.Hash(), l)
+ require.NotNil(t, s)
+ require.NotNil(t, s.headChunks)
+ require.Equal(t, 1, s.headChunks.len())
+ require.Empty(t, s.mmappedChunks)
+ testQuery(1)
+
+ // Adding stale in different appender and continuing series after a stale sample.
+ app = head.AppenderV2(context.Background())
+ for _, h := range tsdbutil.GenerateTestHistograms(2 * numHistograms)[numHistograms:] {
+ var err error
+ if floatHistogram {
+ _, err = app.Append(0, l, 0, 100*int64(len(expHistograms)), 0, nil, h.ToFloat(nil), storage.AOptions{})
+ expHistograms = append(expHistograms, timedHistogram{t: 100 * int64(len(expHistograms)), fh: h.ToFloat(nil)})
+ } else {
+ _, err = app.Append(0, l, 0, 100*int64(len(expHistograms)), 0, h, nil, storage.AOptions{})
+ expHistograms = append(expHistograms, timedHistogram{t: 100 * int64(len(expHistograms)), h: h})
+ }
+ require.NoError(t, err)
+ }
+ require.NoError(t, app.Commit())
+
+ app = head.AppenderV2(context.Background())
+ // +1 so that delta-of-delta is not 0.
+ _, err = app.Append(0, l, 0, 100*int64(len(expHistograms))+1, math.Float64frombits(value.StaleNaN), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ if floatHistogram {
+ expHistograms = append(expHistograms, timedHistogram{t: 100*int64(len(expHistograms)) + 1, fh: &histogram.FloatHistogram{Sum: math.Float64frombits(value.StaleNaN)}})
+ } else {
+ expHistograms = append(expHistograms, timedHistogram{t: 100*int64(len(expHistograms)) + 1, h: &histogram.Histogram{Sum: math.Float64frombits(value.StaleNaN)}})
+ }
+ require.NoError(t, app.Commit())
+ head.mmapHeadChunks()
+
+ // Total 2 chunks, 1 m-mapped.
+ s = head.series.getByHash(l.Hash(), l)
+ require.NotNil(t, s)
+ require.NotNil(t, s.headChunks)
+ require.Equal(t, 1, s.headChunks.len())
+ require.Len(t, s.mmappedChunks, 1)
+ testQuery(2)
+}
+
+func TestHeadAppenderV2_Append_CounterResetHeader(t *testing.T) {
+ for _, floatHisto := range []bool{true} { // FIXME
+ t.Run(fmt.Sprintf("floatHistogram=%t", floatHisto), func(t *testing.T) {
+ l := labels.FromStrings("a", "b")
+ head, _ := newTestHead(t, 1000, compression.None, false)
+ t.Cleanup(func() {
+ require.NoError(t, head.Close())
+ })
+ require.NoError(t, head.Init(0))
+
+ ts := int64(0)
+ appendHistogram := func(h *histogram.Histogram) {
+ ts++
+ app := head.AppenderV2(context.Background())
+ var err error
+ if floatHisto {
+ _, err = app.Append(0, l, 0, ts, 0, nil, h.ToFloat(nil), storage.AOptions{})
+ } else {
+ _, err = app.Append(0, l, 0, ts, 0, h.Copy(), nil, storage.AOptions{})
+ }
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ }
+
+ var expHeaders []chunkenc.CounterResetHeader
+ checkExpCounterResetHeader := func(newHeaders ...chunkenc.CounterResetHeader) {
+ expHeaders = append(expHeaders, newHeaders...)
+
+ ms, _, err := head.getOrCreate(l.Hash(), l, false)
+ require.NoError(t, err)
+ ms.mmapChunks(head.chunkDiskMapper)
+ require.Len(t, ms.mmappedChunks, len(expHeaders)-1) // One is the head chunk.
+
+ for i, mmapChunk := range ms.mmappedChunks {
+ chk, err := head.chunkDiskMapper.Chunk(mmapChunk.ref)
+ require.NoError(t, err)
+ if floatHisto {
+ require.Equal(t, expHeaders[i], chk.(*chunkenc.FloatHistogramChunk).GetCounterResetHeader())
+ } else {
+ require.Equal(t, expHeaders[i], chk.(*chunkenc.HistogramChunk).GetCounterResetHeader())
+ }
+ }
+ if floatHisto {
+ require.Equal(t, expHeaders[len(expHeaders)-1], ms.headChunks.chunk.(*chunkenc.FloatHistogramChunk).GetCounterResetHeader())
+ } else {
+ require.Equal(t, expHeaders[len(expHeaders)-1], ms.headChunks.chunk.(*chunkenc.HistogramChunk).GetCounterResetHeader())
+ }
+ }
+
+ h := tsdbutil.GenerateTestHistograms(1)[0]
+ h.PositiveBuckets = []int64{100, 1, 1, 1}
+ h.NegativeBuckets = []int64{100, 1, 1, 1}
+ h.Count = 1000
+
+ // First histogram is UnknownCounterReset.
+ appendHistogram(h)
+ checkExpCounterResetHeader(chunkenc.UnknownCounterReset)
+
+ // Another normal histogram.
+ h.Count++
+ appendHistogram(h)
+ checkExpCounterResetHeader()
+
+ // Counter reset via Count.
+ h.Count--
+ appendHistogram(h)
+ checkExpCounterResetHeader(chunkenc.CounterReset)
+
+ // Add 2 non-counter reset histogram chunks (each chunk targets 1024 bytes which contains ~500 int histogram
+ // samples or ~1000 float histogram samples).
+ numAppend := 2000
+ if floatHisto {
+ numAppend = 1000
+ }
+ for i := 0; i < numAppend; i++ {
+ appendHistogram(h)
+ }
+
+ checkExpCounterResetHeader(chunkenc.NotCounterReset, chunkenc.NotCounterReset)
+
+ // Changing schema will cut a new chunk with unknown counter reset.
+ h.Schema++
+ appendHistogram(h)
+ checkExpCounterResetHeader(chunkenc.UnknownCounterReset)
+
+ // Changing schema will zero threshold a new chunk with unknown counter reset.
+ h.ZeroThreshold += 0.01
+ appendHistogram(h)
+ checkExpCounterResetHeader(chunkenc.UnknownCounterReset)
+
+ // Counter reset by removing a positive bucket.
+ h.PositiveSpans[1].Length--
+ h.PositiveBuckets = h.PositiveBuckets[1:]
+ appendHistogram(h)
+ checkExpCounterResetHeader(chunkenc.CounterReset)
+
+ // Counter reset by removing a negative bucket.
+ h.NegativeSpans[1].Length--
+ h.NegativeBuckets = h.NegativeBuckets[1:]
+ appendHistogram(h)
+ checkExpCounterResetHeader(chunkenc.CounterReset)
+
+ // Add 2 non-counter reset histogram chunks. Just to have some non-counter reset chunks in between.
+ for range 2000 {
+ appendHistogram(h)
+ }
+ checkExpCounterResetHeader(chunkenc.NotCounterReset, chunkenc.NotCounterReset)
+
+ // Counter reset with counter reset in a positive bucket.
+ h.PositiveBuckets[len(h.PositiveBuckets)-1]--
+ appendHistogram(h)
+ checkExpCounterResetHeader(chunkenc.CounterReset)
+
+ // Counter reset with counter reset in a negative bucket.
+ h.NegativeBuckets[len(h.NegativeBuckets)-1]--
+ appendHistogram(h)
+ checkExpCounterResetHeader(chunkenc.CounterReset)
+ })
+ }
+}
+
+func TestHeadAppenderV2_Append_OOOHistogramCounterResetHeaders(t *testing.T) {
+ for _, floatHisto := range []bool{true, false} {
+ t.Run(fmt.Sprintf("floatHistogram=%t", floatHisto), func(t *testing.T) {
+ l := labels.FromStrings("a", "b")
+ head, _ := newTestHead(t, 1000, compression.None, true)
+ head.opts.OutOfOrderCapMax.Store(5)
+
+ t.Cleanup(func() {
+ require.NoError(t, head.Close())
+ })
+ require.NoError(t, head.Init(0))
+
+ appendHistogram := func(ts int64, h *histogram.Histogram) {
+ app := head.AppenderV2(context.Background())
+ var err error
+ if floatHisto {
+ _, err = app.Append(0, l, 0, ts, 0, nil, h.ToFloat(nil), storage.AOptions{})
+ } else {
+ _, err = app.Append(0, l, 0, ts, 0, h.Copy(), nil, storage.AOptions{})
+ }
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ }
+
+ type expOOOMmappedChunks struct {
+ header chunkenc.CounterResetHeader
+ mint, maxt int64
+ numSamples uint16
+ }
+
+ var expChunks []expOOOMmappedChunks
+ checkOOOExpCounterResetHeader := func(newChunks ...expOOOMmappedChunks) {
+ expChunks = append(expChunks, newChunks...)
+
+ ms, _, err := head.getOrCreate(l.Hash(), l, false)
+ require.NoError(t, err)
+
+ require.Len(t, ms.ooo.oooMmappedChunks, len(expChunks))
+
+ for i, mmapChunk := range ms.ooo.oooMmappedChunks {
+ chk, err := head.chunkDiskMapper.Chunk(mmapChunk.ref)
+ require.NoError(t, err)
+ if floatHisto {
+ require.Equal(t, expChunks[i].header, chk.(*chunkenc.FloatHistogramChunk).GetCounterResetHeader())
+ } else {
+ require.Equal(t, expChunks[i].header, chk.(*chunkenc.HistogramChunk).GetCounterResetHeader())
+ }
+ require.Equal(t, expChunks[i].mint, mmapChunk.minTime)
+ require.Equal(t, expChunks[i].maxt, mmapChunk.maxTime)
+ require.Equal(t, expChunks[i].numSamples, mmapChunk.numSamples)
+ }
+ }
+
+ // Append an in-order histogram, so the rest of the samples can be detected as OOO.
+ appendHistogram(1000, tsdbutil.GenerateTestHistogram(1000))
+
+ // OOO histogram
+ for i := 1; i <= 5; i++ {
+ appendHistogram(100+int64(i), tsdbutil.GenerateTestHistogram(1000+int64(i)))
+ }
+ // Nothing mmapped yet.
+ checkOOOExpCounterResetHeader()
+
+ // 6th observation (which triggers a head chunk mmapping).
+ appendHistogram(int64(112), tsdbutil.GenerateTestHistogram(1002))
+
+ // One mmapped chunk with (ts, val) [(101, 1001), (102, 1002), (103, 1003), (104, 1004), (105, 1005)].
+ checkOOOExpCounterResetHeader(expOOOMmappedChunks{
+ header: chunkenc.UnknownCounterReset,
+ mint: 101,
+ maxt: 105,
+ numSamples: 5,
+ })
+
+ // Add more samples, there's a counter reset at ts 122.
+ appendHistogram(int64(110), tsdbutil.GenerateTestHistogram(1001))
+ appendHistogram(int64(124), tsdbutil.GenerateTestHistogram(904))
+ appendHistogram(int64(123), tsdbutil.GenerateTestHistogram(903))
+ appendHistogram(int64(122), tsdbutil.GenerateTestHistogram(902))
+
+ // New samples not mmapped yet.
+ checkOOOExpCounterResetHeader()
+
+ // 11th observation (which triggers another head chunk mmapping).
+ appendHistogram(int64(200), tsdbutil.GenerateTestHistogram(2000))
+
+ // Two new mmapped chunks [(110, 1001), (112, 1002)], [(122, 902), (123, 903), (124, 904)].
+ checkOOOExpCounterResetHeader(
+ expOOOMmappedChunks{
+ header: chunkenc.UnknownCounterReset,
+ mint: 110,
+ maxt: 112,
+ numSamples: 2,
+ },
+ expOOOMmappedChunks{
+ header: chunkenc.CounterReset,
+ mint: 122,
+ maxt: 124,
+ numSamples: 3,
+ },
+ )
+
+ // Count is lower than previous sample at ts 200, and NotCounterReset is always ignored on append.
+ appendHistogram(int64(205), tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(1000)))
+
+ appendHistogram(int64(210), tsdbutil.SetHistogramCounterReset(tsdbutil.GenerateTestHistogram(2010)))
+
+ appendHistogram(int64(220), tsdbutil.GenerateTestHistogram(2020))
+
+ appendHistogram(int64(215), tsdbutil.GenerateTestHistogram(2005))
+
+ // 16th observation (which triggers another head chunk mmapping).
+ appendHistogram(int64(350), tsdbutil.GenerateTestHistogram(4000))
+
+ // Four new mmapped chunks: [(200, 2000)] [(205, 1000)], [(210, 2010)], [(215, 2015), (220, 2020)]
+ checkOOOExpCounterResetHeader(
+ expOOOMmappedChunks{
+ header: chunkenc.UnknownCounterReset,
+ mint: 200,
+ maxt: 200,
+ numSamples: 1,
+ },
+ expOOOMmappedChunks{
+ header: chunkenc.CounterReset,
+ mint: 205,
+ maxt: 205,
+ numSamples: 1,
+ },
+ expOOOMmappedChunks{
+ header: chunkenc.CounterReset,
+ mint: 210,
+ maxt: 210,
+ numSamples: 1,
+ },
+ expOOOMmappedChunks{
+ header: chunkenc.CounterReset,
+ mint: 215,
+ maxt: 220,
+ numSamples: 2,
+ },
+ )
+
+ // Adding five more samples (21 in total), so another mmapped chunk is created.
+ appendHistogram(300, tsdbutil.SetHistogramCounterReset(tsdbutil.GenerateTestHistogram(3000)))
+
+ for i := 1; i <= 4; i++ {
+ appendHistogram(300+int64(i), tsdbutil.GenerateTestHistogram(3000+int64(i)))
+ }
+
+ // One mmapped chunk with (ts, val) [(300, 3000), (301, 3001), (302, 3002), (303, 3003), (350, 4000)].
+ checkOOOExpCounterResetHeader(expOOOMmappedChunks{
+ header: chunkenc.CounterReset,
+ mint: 300,
+ maxt: 350,
+ numSamples: 5,
+ })
+ })
+ }
+}
+
+func TestHeadAppenderV2_Append_DifferentEncodingSameSeries(t *testing.T) {
+ dir := t.TempDir()
+ opts := DefaultOptions()
+ db, err := Open(dir, nil, nil, opts, nil)
+ require.NoError(t, err)
+ t.Cleanup(func() {
+ require.NoError(t, db.Close())
+ })
+ db.DisableCompactions()
+
+ hists := tsdbutil.GenerateTestHistograms(10)
+ floatHists := tsdbutil.GenerateTestFloatHistograms(10)
+ lbls := labels.FromStrings("a", "b")
+
+ var expResult []chunks.Sample
+ checkExpChunks := func(count int) {
+ ms, created, err := db.Head().getOrCreate(lbls.Hash(), lbls, false)
+ require.NoError(t, err)
+ require.False(t, created)
+ require.NotNil(t, ms)
+ require.Equal(t, count, ms.headChunks.len())
+ }
+
+ appends := []struct {
+ samples []chunks.Sample
+ expChunks int
+ err error
+ }{
+ // Histograms that end up in the expected samples are copied here so that we
+ // can independently set the CounterResetHint later.
+ {
+ samples: []chunks.Sample{sample{t: 100, h: hists[0].Copy()}},
+ expChunks: 1,
+ },
+ {
+ samples: []chunks.Sample{sample{t: 200, f: 2}},
+ expChunks: 2,
+ },
+ {
+ samples: []chunks.Sample{sample{t: 210, fh: floatHists[0].Copy()}},
+ expChunks: 3,
+ },
+ {
+ samples: []chunks.Sample{sample{t: 220, h: hists[1].Copy()}},
+ expChunks: 4,
+ },
+ {
+ samples: []chunks.Sample{sample{t: 230, fh: floatHists[3].Copy()}},
+ expChunks: 5,
+ },
+ {
+ samples: []chunks.Sample{sample{t: 100, h: hists[2].Copy()}},
+ err: storage.ErrOutOfOrderSample,
+ },
+ {
+ samples: []chunks.Sample{sample{t: 300, h: hists[3].Copy()}},
+ expChunks: 6,
+ },
+ {
+ samples: []chunks.Sample{sample{t: 100, f: 2}},
+ err: storage.ErrOutOfOrderSample,
+ },
+ {
+ samples: []chunks.Sample{sample{t: 100, fh: floatHists[4].Copy()}},
+ err: storage.ErrOutOfOrderSample,
+ },
+ // The three next tests all failed before #15177 was fixed.
+ {
+ samples: []chunks.Sample{
+ sample{t: 400, f: 4},
+ sample{t: 500, h: hists[5]},
+ sample{t: 600, f: 6},
+ },
+ expChunks: 9, // Each of the three samples above creates a new chunk because the type changes.
+ },
+ {
+ samples: []chunks.Sample{
+ sample{t: 700, h: hists[7]},
+ sample{t: 800, f: 8},
+ sample{t: 900, h: hists[9]},
+ },
+ expChunks: 12, // Again each sample creates a new chunk.
+ },
+ {
+ samples: []chunks.Sample{
+ sample{t: 1000, fh: floatHists[7]},
+ sample{t: 1100, h: hists[9]},
+ },
+ expChunks: 14, // Even changes between float and integer histogram create new chunks.
+ },
+ }
+
+ for _, a := range appends {
+ app := db.AppenderV2(context.Background())
+ for _, s := range a.samples {
+ var err error
+ if s.H() != nil || s.FH() != nil {
+ _, err = app.Append(0, lbls, 0, s.T(), 0, s.H(), s.FH(), storage.AOptions{})
+ } else {
+ _, err = app.Append(0, lbls, 0, s.T(), s.F(), nil, nil, storage.AOptions{})
+ }
+ require.Equal(t, a.err, err)
+ }
+
+ if a.err == nil {
+ require.NoError(t, app.Commit())
+ expResult = append(expResult, a.samples...)
+ checkExpChunks(a.expChunks)
+ } else {
+ require.NoError(t, app.Rollback())
+ }
+ }
+ for i, s := range expResult[1:] {
+ switch {
+ case s.H() != nil && expResult[i].H() == nil:
+ s.(sample).h.CounterResetHint = histogram.UnknownCounterReset
+ case s.FH() != nil && expResult[i].FH() == nil:
+ s.(sample).fh.CounterResetHint = histogram.UnknownCounterReset
+ }
+ }
+
+ // Query back and expect same order of samples.
+ q, err := db.Querier(math.MinInt64, math.MaxInt64)
+ require.NoError(t, err)
+
+ series := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
+ require.Equal(t, map[string][]chunks.Sample{lbls.String(): expResult}, series)
+}
+
+func TestChunkSnapshotTakenAfterIncompleteSnapshot_AppenderV2(t *testing.T) {
+ dir := t.TempDir()
+ wlTemp, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, compression.Snappy)
+ require.NoError(t, err)
+
+ // Write a snapshot with .tmp suffix. This used to fail taking any further snapshots or replay of snapshots.
+ snapshotName := chunkSnapshotDir(0, 100) + ".tmp"
+ cpdir := filepath.Join(dir, snapshotName)
+ require.NoError(t, os.MkdirAll(cpdir, 0o777))
+
+ opts := DefaultHeadOptions()
+ opts.ChunkDirRoot = dir
+ opts.EnableMemorySnapshotOnShutdown = true
+ head, err := NewHead(nil, nil, wlTemp, nil, opts, nil)
+ require.NoError(t, err)
+ require.NoError(t, head.Init(math.MinInt64))
+
+ require.Equal(t, 0.0, prom_testutil.ToFloat64(head.metrics.snapshotReplayErrorTotal))
+
+ // Add some samples for the snapshot.
+ app := head.AppenderV2(context.Background())
+ _, err = app.Append(0, labels.FromStrings("foo", "bar"), 0, 10, 10, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+
+ // Should not return any error for a successful snapshot.
+ require.NoError(t, head.Close())
+
+ // Verify the snapshot.
+ name, idx, offset, err := LastChunkSnapshot(dir)
+ require.NoError(t, err)
+ require.NotEmpty(t, name)
+ require.Equal(t, 0, idx)
+ require.Positive(t, offset)
+}
+
+// TestWBLReplay checks the replay at a low level.
+func TestWBLReplay_AppenderV2(t *testing.T) {
+ for name, scenario := range sampleTypeScenarios {
+ t.Run(name, func(t *testing.T) {
+ testWBLReplayAppenderV2(t, scenario)
+ })
+ }
+}
+
+func testWBLReplayAppenderV2(t *testing.T, scenario sampleTypeScenario) {
+ dir := t.TempDir()
+ wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, compression.Snappy)
+ require.NoError(t, err)
+ oooWlog, err := wlog.NewSize(nil, nil, filepath.Join(dir, wlog.WblDirName), 32768, compression.Snappy)
+ require.NoError(t, err)
+
+ opts := DefaultHeadOptions()
+ opts.ChunkRange = 1000
+ opts.ChunkDirRoot = dir
+ opts.OutOfOrderTimeWindow.Store(30 * time.Minute.Milliseconds())
+
+ h, err := NewHead(nil, nil, wal, oooWlog, opts, nil)
+ require.NoError(t, err)
+ require.NoError(t, h.Init(0))
+
+ var expOOOSamples []chunks.Sample
+ l := labels.FromStrings("foo", "bar")
+ appendSample := func(mins int64, _ float64, isOOO bool) {
+ app := h.AppenderV2(context.Background())
+ _, s, err := scenario.appendFunc(storage.AppenderV2AsLimitedV1(app), l, mins*time.Minute.Milliseconds(), mins)
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+
+ if isOOO {
+ expOOOSamples = append(expOOOSamples, s)
+ }
+ }
+
+ // In-order sample.
+ appendSample(60, 60, false)
+
+ // Out of order samples.
+ appendSample(40, 40, true)
+ appendSample(35, 35, true)
+ appendSample(50, 50, true)
+ appendSample(55, 55, true)
+ appendSample(59, 59, true)
+ appendSample(31, 31, true)
+
+ // Check that Head's time ranges are set properly.
+ require.Equal(t, 60*time.Minute.Milliseconds(), h.MinTime())
+ require.Equal(t, 60*time.Minute.Milliseconds(), h.MaxTime())
+ require.Equal(t, 31*time.Minute.Milliseconds(), h.MinOOOTime())
+ require.Equal(t, 59*time.Minute.Milliseconds(), h.MaxOOOTime())
+
+ // Restart head.
+ require.NoError(t, h.Close())
+ wal, err = wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, compression.Snappy)
+ require.NoError(t, err)
+ oooWlog, err = wlog.NewSize(nil, nil, filepath.Join(dir, wlog.WblDirName), 32768, compression.Snappy)
+ require.NoError(t, err)
+ h, err = NewHead(nil, nil, wal, oooWlog, opts, nil)
+ require.NoError(t, err)
+ require.NoError(t, h.Init(0)) // Replay happens here.
+
+ // Get the ooo samples from the Head.
+ ms, ok, err := h.getOrCreate(l.Hash(), l, false)
+ require.NoError(t, err)
+ require.False(t, ok)
+ require.NotNil(t, ms)
+
+ chks, err := ms.ooo.oooHeadChunk.chunk.ToEncodedChunks(math.MinInt64, math.MaxInt64)
+ require.NoError(t, err)
+ require.Len(t, chks, 1)
+
+ it := chks[0].chunk.Iterator(nil)
+ actOOOSamples, err := storage.ExpandSamples(it, nil)
+ require.NoError(t, err)
+
+ // OOO chunk will be sorted. Hence sort the expected samples.
+ sort.Slice(expOOOSamples, func(i, j int) bool {
+ return expOOOSamples[i].T() < expOOOSamples[j].T()
+ })
+
+ // Passing in true for the 'ignoreCounterResets' parameter prevents differences in counter reset headers
+ // from being factored in to the sample comparison
+ // TODO(fionaliao): understand counter reset behaviour, might want to modify this later
+ requireEqualSamples(t, l.String(), expOOOSamples, actOOOSamples, requireEqualSamplesIgnoreCounterResets)
+
+ require.NoError(t, h.Close())
+}
+
+// TestOOOMmapReplay checks the replay at a low level.
+func TestOOOMmapReplay_AppenderV2(t *testing.T) {
+ for name, scenario := range sampleTypeScenarios {
+ t.Run(name, func(t *testing.T) {
+ testOOOMmapReplayAppenderV2(t, scenario)
+ })
+ }
+}
+
+func testOOOMmapReplayAppenderV2(t *testing.T, scenario sampleTypeScenario) {
+ dir := t.TempDir()
+ wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, compression.Snappy)
+ require.NoError(t, err)
+ oooWlog, err := wlog.NewSize(nil, nil, filepath.Join(dir, wlog.WblDirName), 32768, compression.Snappy)
+ require.NoError(t, err)
+
+ opts := DefaultHeadOptions()
+ opts.ChunkRange = 1000
+ opts.ChunkDirRoot = dir
+ opts.OutOfOrderCapMax.Store(30)
+ opts.OutOfOrderTimeWindow.Store(1000 * time.Minute.Milliseconds())
+
+ h, err := NewHead(nil, nil, wal, oooWlog, opts, nil)
+ require.NoError(t, err)
+ require.NoError(t, h.Init(0))
+
+ l := labels.FromStrings("foo", "bar")
+ appendSample := func(mins int64) {
+ app := h.AppenderV2(context.Background())
+ _, _, err := scenario.appendFunc(storage.AppenderV2AsLimitedV1(app), l, mins*time.Minute.Milliseconds(), mins)
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ }
+
+ // In-order sample.
+ appendSample(200)
+
+ // Out of order samples. 92 samples to create 3 m-map chunks.
+ for mins := int64(100); mins <= 191; mins++ {
+ appendSample(mins)
+ }
+
+ ms, ok, err := h.getOrCreate(l.Hash(), l, false)
+ require.NoError(t, err)
+ require.False(t, ok)
+ require.NotNil(t, ms)
+
+ require.Len(t, ms.ooo.oooMmappedChunks, 3)
+ // Verify that we can access the chunks without error.
+ for _, m := range ms.ooo.oooMmappedChunks {
+ chk, err := h.chunkDiskMapper.Chunk(m.ref)
+ require.NoError(t, err)
+ require.Equal(t, int(m.numSamples), chk.NumSamples())
+ }
+
+ expMmapChunks := make([]*mmappedChunk, 3)
+ copy(expMmapChunks, ms.ooo.oooMmappedChunks)
+
+ // Restart head.
+ require.NoError(t, h.Close())
+
+ wal, err = wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, compression.Snappy)
+ require.NoError(t, err)
+ oooWlog, err = wlog.NewSize(nil, nil, filepath.Join(dir, wlog.WblDirName), 32768, compression.Snappy)
+ require.NoError(t, err)
+ h, err = NewHead(nil, nil, wal, oooWlog, opts, nil)
+ require.NoError(t, err)
+ require.NoError(t, h.Init(0)) // Replay happens here.
+
+ // Get the mmap chunks from the Head.
+ ms, ok, err = h.getOrCreate(l.Hash(), l, false)
+ require.NoError(t, err)
+ require.False(t, ok)
+ require.NotNil(t, ms)
+
+ require.Len(t, ms.ooo.oooMmappedChunks, len(expMmapChunks))
+ // Verify that we can access the chunks without error.
+ for _, m := range ms.ooo.oooMmappedChunks {
+ chk, err := h.chunkDiskMapper.Chunk(m.ref)
+ require.NoError(t, err)
+ require.Equal(t, int(m.numSamples), chk.NumSamples())
+ }
+
+ actMmapChunks := make([]*mmappedChunk, len(expMmapChunks))
+ copy(actMmapChunks, ms.ooo.oooMmappedChunks)
+
+ require.Equal(t, expMmapChunks, actMmapChunks)
+
+ require.NoError(t, h.Close())
+}
+
+func TestHead_Init_DiscardChunksWithUnsupportedEncoding(t *testing.T) {
+ h, _ := newTestHead(t, 1000, compression.None, false)
+ defer func() {
+ require.NoError(t, h.Close())
+ }()
+
+ require.NoError(t, h.Init(0))
+
+ ctx := context.Background()
+ app := h.AppenderV2(ctx)
+ seriesLabels := labels.FromStrings("a", "1")
+ var seriesRef storage.SeriesRef
+ var err error
+ for i := range 400 {
+ seriesRef, err = app.Append(0, seriesLabels, 0, int64(i), float64(i), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+
+ require.NoError(t, app.Commit())
+ require.Greater(t, prom_testutil.ToFloat64(h.metrics.chunksCreated), 1.0)
+
+ uc := newUnsupportedChunk()
+ // Make this chunk not overlap with the previous and the next
+ h.chunkDiskMapper.WriteChunk(chunks.HeadSeriesRef(seriesRef), 500, 600, uc, false, func(err error) { require.NoError(t, err) })
+
+ app = h.AppenderV2(ctx)
+ for i := 700; i < 1200; i++ {
+ _, err := app.Append(0, seriesLabels, 0, int64(i), float64(i), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+
+ require.NoError(t, app.Commit())
+ require.Greater(t, prom_testutil.ToFloat64(h.metrics.chunksCreated), 4.0)
+
+ series, created, err := h.getOrCreate(seriesLabels.Hash(), seriesLabels, false)
+ require.NoError(t, err)
+ require.False(t, created, "should already exist")
+ require.NotNil(t, series, "should return the series we created above")
+
+ series.mmapChunks(h.chunkDiskMapper)
+ expChunks := make([]*mmappedChunk, len(series.mmappedChunks))
+ copy(expChunks, series.mmappedChunks)
+
+ require.NoError(t, h.Close())
+
+ wal, err := wlog.NewSize(nil, nil, filepath.Join(h.opts.ChunkDirRoot, "wal"), 32768, compression.None)
+ require.NoError(t, err)
+ h, err = NewHead(nil, nil, wal, nil, h.opts, nil)
+ require.NoError(t, err)
+ require.NoError(t, h.Init(0))
+
+ series, created, err = h.getOrCreate(seriesLabels.Hash(), seriesLabels, false)
+ require.NoError(t, err)
+ require.False(t, created, "should already exist")
+ require.NotNil(t, series, "should return the series we created above")
+
+ require.Equal(t, expChunks, series.mmappedChunks)
+}
+
+// Tests https://github.com/prometheus/prometheus/issues/10277.
+func TestMmapPanicAfterMmapReplayCorruption_AppenderV2(t *testing.T) {
+ dir := t.TempDir()
+ wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, compression.None)
+ require.NoError(t, err)
+
+ opts := DefaultHeadOptions()
+ opts.ChunkRange = DefaultBlockDuration
+ opts.ChunkDirRoot = dir
+ opts.EnableExemplarStorage = true
+ opts.MaxExemplars.Store(config.DefaultExemplarsConfig.MaxExemplars)
+
+ h, err := NewHead(nil, nil, wal, nil, opts, nil)
+ require.NoError(t, err)
+ require.NoError(t, h.Init(0))
+
+ lastTs := int64(0)
+ var ref storage.SeriesRef
+ lbls := labels.FromStrings("__name__", "testing", "foo", "bar")
+ addChunks := func() {
+ interval := DefaultBlockDuration / (4 * 120)
+ app := h.AppenderV2(context.Background())
+ for i := range 250 {
+ ref, err = app.Append(ref, lbls, 0, lastTs, float64(lastTs), nil, nil, storage.AOptions{})
+ lastTs += interval
+ if i%10 == 0 {
+ require.NoError(t, app.Commit())
+ app = h.AppenderV2(context.Background())
+ }
+ }
+ require.NoError(t, app.Commit())
+ }
+
+ addChunks()
+
+ require.NoError(t, h.Close())
+ wal, err = wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, compression.None)
+ require.NoError(t, err)
+
+ mmapFilePath := filepath.Join(dir, "chunks_head", "000001")
+ f, err := os.OpenFile(mmapFilePath, os.O_WRONLY, 0o666)
+ require.NoError(t, err)
+ _, err = f.WriteAt([]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 17)
+ require.NoError(t, err)
+ require.NoError(t, f.Close())
+
+ h, err = NewHead(nil, nil, wal, nil, opts, nil)
+ require.NoError(t, err)
+ require.NoError(t, h.Init(0))
+
+ addChunks()
+
+ require.NoError(t, h.Close())
+}
+
+// Tests https://github.com/prometheus/prometheus/issues/10277.
+func TestReplayAfterMmapReplayError_AppenderV2(t *testing.T) {
+ dir := t.TempDir()
+ var h *Head
+ var err error
+
+ openHead := func() {
+ wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, compression.None)
+ require.NoError(t, err)
+
+ opts := DefaultHeadOptions()
+ opts.ChunkRange = DefaultBlockDuration
+ opts.ChunkDirRoot = dir
+ opts.EnableMemorySnapshotOnShutdown = true
+ opts.MaxExemplars.Store(config.DefaultExemplarsConfig.MaxExemplars)
+
+ h, err = NewHead(nil, nil, wal, nil, opts, nil)
+ require.NoError(t, err)
+ require.NoError(t, h.Init(0))
+ }
+
+ openHead()
+
+ itvl := int64(15 * time.Second / time.Millisecond)
+ lastTs := int64(0)
+ lbls := labels.FromStrings("__name__", "testing", "foo", "bar")
+ var expSamples []chunks.Sample
+ addSamples := func(numSamples int) {
+ app := h.AppenderV2(context.Background())
+ var ref storage.SeriesRef
+ for i := range numSamples {
+ ref, err = app.Append(ref, lbls, 0, lastTs, float64(lastTs), nil, nil, storage.AOptions{})
+ expSamples = append(expSamples, sample{t: lastTs, f: float64(lastTs)})
+ require.NoError(t, err)
+ lastTs += itvl
+ if i%10 == 0 {
+ require.NoError(t, app.Commit())
+ app = h.AppenderV2(context.Background())
+ }
+ }
+ require.NoError(t, app.Commit())
+ }
+
+ // Creating multiple m-map files.
+ for i := range 5 {
+ addSamples(250)
+ require.NoError(t, h.Close())
+ if i != 4 {
+ // Don't open head for the last iteration.
+ openHead()
+ }
+ }
+
+ files, err := os.ReadDir(filepath.Join(dir, "chunks_head"))
+ require.Len(t, files, 5)
+
+ // Corrupt a m-map file.
+ mmapFilePath := filepath.Join(dir, "chunks_head", "000002")
+ f, err := os.OpenFile(mmapFilePath, os.O_WRONLY, 0o666)
+ require.NoError(t, err)
+ _, err = f.WriteAt([]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 17)
+ require.NoError(t, err)
+ require.NoError(t, f.Close())
+
+ openHead()
+ h.mmapHeadChunks()
+
+ // There should be less m-map files due to corruption.
+ files, err = os.ReadDir(filepath.Join(dir, "chunks_head"))
+ require.Len(t, files, 2)
+
+ // Querying should not panic.
+ q, err := NewBlockQuerier(h, 0, lastTs)
+ require.NoError(t, err)
+ res := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "__name__", "testing"))
+ require.Equal(t, map[string][]chunks.Sample{lbls.String(): expSamples}, res)
+
+ require.NoError(t, h.Close())
+}
+
+func TestHeadAppenderV2_Append_OOOWithNoSeries(t *testing.T) {
+ for name, scenario := range sampleTypeScenarios {
+ t.Run(name, func(t *testing.T) {
+ testHeadAppenderV2AppendOOOWithNoSeries(t, scenario.appendFunc)
+ })
+ }
+}
+
+func testHeadAppenderV2AppendOOOWithNoSeries(t *testing.T, appendFunc func(appender storage.LimitedAppenderV1, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error)) {
+ dir := t.TempDir()
+ wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, compression.Snappy)
+ require.NoError(t, err)
+ oooWlog, err := wlog.NewSize(nil, nil, filepath.Join(dir, wlog.WblDirName), 32768, compression.Snappy)
+ require.NoError(t, err)
+
+ opts := DefaultHeadOptions()
+ opts.ChunkDirRoot = dir
+ opts.OutOfOrderCapMax.Store(30)
+ opts.OutOfOrderTimeWindow.Store(120 * time.Minute.Milliseconds())
+
+ h, err := NewHead(nil, nil, wal, oooWlog, opts, nil)
+ require.NoError(t, err)
+ t.Cleanup(func() {
+ require.NoError(t, h.Close())
+ })
+ require.NoError(t, h.Init(0))
+
+ appendSample := func(lbls labels.Labels, ts int64) {
+ app := h.AppenderV2(context.Background())
+ _, _, err := appendFunc(storage.AppenderV2AsLimitedV1(app), lbls, ts*time.Minute.Milliseconds(), ts)
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ }
+
+ verifyOOOSamples := func(lbls labels.Labels, expSamples int) {
+ ms, created, err := h.getOrCreate(lbls.Hash(), lbls, false)
+ require.NoError(t, err)
+ require.False(t, created)
+ require.NotNil(t, ms)
+
+ require.Nil(t, ms.headChunks)
+ require.NotNil(t, ms.ooo.oooHeadChunk)
+ require.Equal(t, expSamples, ms.ooo.oooHeadChunk.chunk.NumSamples())
+ }
+
+ verifyInOrderSamples := func(lbls labels.Labels, expSamples int) {
+ ms, created, err := h.getOrCreate(lbls.Hash(), lbls, false)
+ require.NoError(t, err)
+ require.False(t, created)
+ require.NotNil(t, ms)
+
+ require.Nil(t, ms.ooo)
+ require.NotNil(t, ms.headChunks)
+ require.Equal(t, expSamples, ms.headChunks.chunk.NumSamples())
+ }
+
+ newLabels := func(idx int) labels.Labels { return labels.FromStrings("foo", strconv.Itoa(idx)) }
+
+ s1 := newLabels(1)
+ appendSample(s1, 300) // At 300m.
+ verifyInOrderSamples(s1, 1)
+
+ // At 239m, the sample cannot be appended to in-order chunk since it is
+ // beyond the minValidTime. So it should go in OOO chunk.
+ // Series does not exist for s2 yet.
+ s2 := newLabels(2)
+ appendSample(s2, 239) // OOO sample.
+ verifyOOOSamples(s2, 1)
+
+ // Similar for 180m.
+ s3 := newLabels(3)
+ appendSample(s3, 180) // OOO sample.
+ verifyOOOSamples(s3, 1)
+
+ // Now 179m is too old.
+ s4 := newLabels(4)
+ app := h.AppenderV2(context.Background())
+ _, _, err = appendFunc(storage.AppenderV2AsLimitedV1(app), s4, 179*time.Minute.Milliseconds(), 179)
+ require.Equal(t, storage.ErrTooOldSample, err)
+ require.NoError(t, app.Rollback())
+ verifyOOOSamples(s3, 1)
+
+ // Samples still go into in-order chunk for samples within
+ // appendable minValidTime.
+ s5 := newLabels(5)
+ appendSample(s5, 240)
+ verifyInOrderSamples(s5, 1)
+}
+
+func TestHead_MinOOOTime_Update_AppenderV2(t *testing.T) {
+ for name, scenario := range sampleTypeScenarios {
+ t.Run(name, func(t *testing.T) {
+ if scenario.sampleType == sampleMetricTypeFloat {
+ testHeadMinOOOTimeUpdateAppenderV2(t, scenario)
+ }
+ })
+ }
+}
+
+func testHeadMinOOOTimeUpdateAppenderV2(t *testing.T, scenario sampleTypeScenario) {
+ dir := t.TempDir()
+ wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, compression.Snappy)
+ require.NoError(t, err)
+ oooWlog, err := wlog.NewSize(nil, nil, filepath.Join(dir, wlog.WblDirName), 32768, compression.Snappy)
+ require.NoError(t, err)
+
+ opts := DefaultHeadOptions()
+ opts.ChunkDirRoot = dir
+ opts.OutOfOrderTimeWindow.Store(10 * time.Minute.Milliseconds())
+
+ h, err := NewHead(nil, nil, wal, oooWlog, opts, nil)
+ require.NoError(t, err)
+ t.Cleanup(func() {
+ require.NoError(t, h.Close())
+ })
+ require.NoError(t, h.Init(0))
+
+ appendSample := func(ts int64) {
+ app := h.AppenderV2(context.Background())
+ _, _, err = scenario.appendFunc(storage.AppenderV2AsLimitedV1(app), labels.FromStrings("a", "b"), ts*time.Minute.Milliseconds(), ts)
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ }
+
+ appendSample(300) // In-order sample.
+ require.Equal(t, int64(math.MaxInt64), h.MinOOOTime())
+
+ appendSample(295) // OOO sample.
+ require.Equal(t, 295*time.Minute.Milliseconds(), h.MinOOOTime())
+
+ // Allowed window for OOO is >=290, which is before the earliest ooo sample 295, so it gets set to the lower value.
+ require.NoError(t, h.truncateOOO(0, 1))
+ require.Equal(t, 290*time.Minute.Milliseconds(), h.MinOOOTime())
+
+ appendSample(310) // In-order sample.
+ appendSample(305) // OOO sample.
+ require.Equal(t, 290*time.Minute.Milliseconds(), h.MinOOOTime())
+
+ // Now the OOO sample 295 was not gc'ed yet. And allowed window for OOO is now >=300.
+ // So the lowest among them, 295, is set as minOOOTime.
+ require.NoError(t, h.truncateOOO(0, 2))
+ require.Equal(t, 295*time.Minute.Milliseconds(), h.MinOOOTime())
+}
+
+func TestGaugeHistogramWALAndChunkHeader_AppenderV2(t *testing.T) {
+ l := labels.FromStrings("a", "b")
+ head, _ := newTestHead(t, 1000, compression.None, false)
+ t.Cleanup(func() {
+ require.NoError(t, head.Close())
+ })
+ require.NoError(t, head.Init(0))
+
+ ts := int64(0)
+ appendHistogram := func(h *histogram.Histogram) {
+ ts++
+ app := head.AppenderV2(context.Background())
+ _, err := app.Append(0, l, 0, ts, 0, h.Copy(), nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ }
+
+ hists := tsdbutil.GenerateTestGaugeHistograms(5)
+ hists[0].CounterResetHint = histogram.UnknownCounterReset
+ appendHistogram(hists[0])
+ appendHistogram(hists[1])
+ appendHistogram(hists[2])
+ hists[3].CounterResetHint = histogram.UnknownCounterReset
+ appendHistogram(hists[3])
+ appendHistogram(hists[3])
+ appendHistogram(hists[4])
+
+ checkHeaders := func() {
+ head.mmapHeadChunks()
+ ms, _, err := head.getOrCreate(l.Hash(), l, false)
+ require.NoError(t, err)
+ require.Len(t, ms.mmappedChunks, 3)
+ expHeaders := []chunkenc.CounterResetHeader{
+ chunkenc.UnknownCounterReset,
+ chunkenc.GaugeType,
+ chunkenc.NotCounterReset,
+ chunkenc.GaugeType,
+ }
+ for i, mmapChunk := range ms.mmappedChunks {
+ chk, err := head.chunkDiskMapper.Chunk(mmapChunk.ref)
+ require.NoError(t, err)
+ require.Equal(t, expHeaders[i], chk.(*chunkenc.HistogramChunk).GetCounterResetHeader())
+ }
+ require.Equal(t, expHeaders[len(expHeaders)-1], ms.headChunks.chunk.(*chunkenc.HistogramChunk).GetCounterResetHeader())
+ }
+ checkHeaders()
+
+ recs := readTestWAL(t, head.wal.Dir())
+ require.Equal(t, []any{
+ []record.RefSeries{
+ {
+ Ref: 1,
+ Labels: labels.FromStrings("a", "b"),
+ },
+ },
+ []record.RefHistogramSample{{Ref: 1, T: 1, H: hists[0]}},
+ []record.RefHistogramSample{{Ref: 1, T: 2, H: hists[1]}},
+ []record.RefHistogramSample{{Ref: 1, T: 3, H: hists[2]}},
+ []record.RefHistogramSample{{Ref: 1, T: 4, H: hists[3]}},
+ []record.RefHistogramSample{{Ref: 1, T: 5, H: hists[3]}},
+ []record.RefHistogramSample{{Ref: 1, T: 6, H: hists[4]}},
+ }, recs)
+
+ // Restart Head without mmap chunks to expect the WAL replay to recognize gauge histograms.
+ require.NoError(t, head.Close())
+ require.NoError(t, os.RemoveAll(mmappedChunksDir(head.opts.ChunkDirRoot)))
+
+ w, err := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, compression.None)
+ require.NoError(t, err)
+ head, err = NewHead(nil, nil, w, nil, head.opts, nil)
+ require.NoError(t, err)
+ require.NoError(t, head.Init(0))
+
+ checkHeaders()
+}
+
+func TestGaugeFloatHistogramWALAndChunkHeader_AppenderV2(t *testing.T) {
+ l := labels.FromStrings("a", "b")
+ head, _ := newTestHead(t, 1000, compression.None, false)
+ t.Cleanup(func() {
+ require.NoError(t, head.Close())
+ })
+ require.NoError(t, head.Init(0))
+
+ ts := int64(0)
+ appendHistogram := func(h *histogram.FloatHistogram) {
+ ts++
+ app := head.AppenderV2(context.Background())
+ _, err := app.Append(0, l, 0, ts, 0, nil, h.Copy(), storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ }
+
+ hists := tsdbutil.GenerateTestGaugeFloatHistograms(5)
+ hists[0].CounterResetHint = histogram.UnknownCounterReset
+ appendHistogram(hists[0])
+ appendHistogram(hists[1])
+ appendHistogram(hists[2])
+ hists[3].CounterResetHint = histogram.UnknownCounterReset
+ appendHistogram(hists[3])
+ appendHistogram(hists[3])
+ appendHistogram(hists[4])
+
+ checkHeaders := func() {
+ ms, _, err := head.getOrCreate(l.Hash(), l, false)
+ require.NoError(t, err)
+ head.mmapHeadChunks()
+ require.Len(t, ms.mmappedChunks, 3)
+ expHeaders := []chunkenc.CounterResetHeader{
+ chunkenc.UnknownCounterReset,
+ chunkenc.GaugeType,
+ chunkenc.UnknownCounterReset,
+ chunkenc.GaugeType,
+ }
+ for i, mmapChunk := range ms.mmappedChunks {
+ chk, err := head.chunkDiskMapper.Chunk(mmapChunk.ref)
+ require.NoError(t, err)
+ require.Equal(t, expHeaders[i], chk.(*chunkenc.FloatHistogramChunk).GetCounterResetHeader())
+ }
+ require.Equal(t, expHeaders[len(expHeaders)-1], ms.headChunks.chunk.(*chunkenc.FloatHistogramChunk).GetCounterResetHeader())
+ }
+ checkHeaders()
+
+ recs := readTestWAL(t, head.wal.Dir())
+ require.Equal(t, []any{
+ []record.RefSeries{
+ {
+ Ref: 1,
+ Labels: labels.FromStrings("a", "b"),
+ },
+ },
+ []record.RefFloatHistogramSample{{Ref: 1, T: 1, FH: hists[0]}},
+ []record.RefFloatHistogramSample{{Ref: 1, T: 2, FH: hists[1]}},
+ []record.RefFloatHistogramSample{{Ref: 1, T: 3, FH: hists[2]}},
+ []record.RefFloatHistogramSample{{Ref: 1, T: 4, FH: hists[3]}},
+ []record.RefFloatHistogramSample{{Ref: 1, T: 5, FH: hists[3]}},
+ []record.RefFloatHistogramSample{{Ref: 1, T: 6, FH: hists[4]}},
+ }, recs)
+
+ // Restart Head without mmap chunks to expect the WAL replay to recognize gauge histograms.
+ require.NoError(t, head.Close())
+ require.NoError(t, os.RemoveAll(mmappedChunksDir(head.opts.ChunkDirRoot)))
+
+ w, err := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, compression.None)
+ require.NoError(t, err)
+ head, err = NewHead(nil, nil, w, nil, head.opts, nil)
+ require.NoError(t, err)
+ require.NoError(t, head.Init(0))
+
+ checkHeaders()
+}
+
+func TestSnapshotAheadOfWALError_AppenderV2(t *testing.T) {
+ head, _ := newTestHead(t, 120*4, compression.None, false)
+ head.opts.EnableMemorySnapshotOnShutdown = true
+ // Add a sample to fill WAL.
+ app := head.AppenderV2(context.Background())
+ _, err := app.Append(0, labels.FromStrings("foo", "bar"), 0, 10, 10, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+
+ // Increment snapshot index to create sufficiently large difference.
+ for range 2 {
+ _, err = head.wal.NextSegment()
+ require.NoError(t, err)
+ }
+ require.NoError(t, head.Close()) // This will create a snapshot.
+
+ _, idx, _, err := LastChunkSnapshot(head.opts.ChunkDirRoot)
+ require.NoError(t, err)
+ require.Equal(t, 2, idx)
+
+ // Restart the WAL while keeping the old snapshot. The new head is created manually in this case in order
+ // to keep using the same snapshot directory instead of a random one.
+ require.NoError(t, os.RemoveAll(head.wal.Dir()))
+ head.opts.EnableMemorySnapshotOnShutdown = false
+ w, _ := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, compression.None)
+ head, err = NewHead(nil, nil, w, nil, head.opts, nil)
+ require.NoError(t, err)
+ // Add a sample to fill WAL.
+ app = head.AppenderV2(context.Background())
+ _, err = app.Append(0, labels.FromStrings("foo", "bar"), 0, 10, 10, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ lastSegment, _, _ := w.LastSegmentAndOffset()
+ require.Equal(t, 0, lastSegment)
+ require.NoError(t, head.Close())
+
+ // New WAL is saved, but old snapshot still exists.
+ _, idx, _, err = LastChunkSnapshot(head.opts.ChunkDirRoot)
+ require.NoError(t, err)
+ require.Equal(t, 2, idx)
+
+ // Create new Head which should detect the incorrect index and delete the snapshot.
+ head.opts.EnableMemorySnapshotOnShutdown = true
+ w, _ = wlog.NewSize(nil, nil, head.wal.Dir(), 32768, compression.None)
+ head, err = NewHead(nil, nil, w, nil, head.opts, nil)
+ require.NoError(t, err)
+ require.NoError(t, head.Init(math.MinInt64))
+
+ // Verify that snapshot directory does not exist anymore.
+ _, _, _, err = LastChunkSnapshot(head.opts.ChunkDirRoot)
+ require.Equal(t, record.ErrNotFound, err)
+
+ require.NoError(t, head.Close())
+}
+
+func TestCuttingNewHeadChunks_AppenderV2(t *testing.T) {
+ ctx := context.Background()
+ testCases := map[string]struct {
+ numTotalSamples int
+ timestampJitter bool
+ floatValFunc func(i int) float64
+ histValFunc func(i int) *histogram.Histogram
+ expectedChks []struct {
+ numSamples int
+ numBytes int
+ }
+ }{
+ "float samples": {
+ numTotalSamples: 180,
+ floatValFunc: func(int) float64 {
+ return 1.
+ },
+ expectedChks: []struct {
+ numSamples int
+ numBytes int
+ }{
+ {numSamples: 120, numBytes: 46},
+ {numSamples: 60, numBytes: 32},
+ },
+ },
+ "large float samples": {
+ // Normally 120 samples would fit into a single chunk but these chunks violate the 1005 byte soft cap.
+ numTotalSamples: 120,
+ timestampJitter: true,
+ floatValFunc: func(i int) float64 {
+ // Flipping between these two make each sample val take at least 64 bits.
+ vals := []float64{math.MaxFloat64, 0x00}
+ return vals[i%len(vals)]
+ },
+ expectedChks: []struct {
+ numSamples int
+ numBytes int
+ }{
+ {99, 1008},
+ {21, 219},
+ },
+ },
+ "small histograms": {
+ numTotalSamples: 240,
+ histValFunc: func() func(i int) *histogram.Histogram {
+ hists := histogram.GenerateBigTestHistograms(240, 10)
+ return func(i int) *histogram.Histogram {
+ return hists[i]
+ }
+ }(),
+ expectedChks: []struct {
+ numSamples int
+ numBytes int
+ }{
+ {120, 1087},
+ {120, 1039},
+ },
+ },
+ "large histograms": {
+ numTotalSamples: 240,
+ histValFunc: func() func(i int) *histogram.Histogram {
+ hists := histogram.GenerateBigTestHistograms(240, 100)
+ return func(i int) *histogram.Histogram {
+ return hists[i]
+ }
+ }(),
+ expectedChks: []struct {
+ numSamples int
+ numBytes int
+ }{
+ {40, 896},
+ {40, 899},
+ {40, 896},
+ {30, 690},
+ {30, 691},
+ {30, 694},
+ {30, 693},
+ },
+ },
+ "really large histograms": {
+ // Really large histograms; each chunk can only contain a single histogram but we have a 10 sample minimum
+ // per chunk.
+ numTotalSamples: 11,
+ histValFunc: func() func(i int) *histogram.Histogram {
+ hists := histogram.GenerateBigTestHistograms(11, 100000)
+ return func(i int) *histogram.Histogram {
+ return hists[i]
+ }
+ }(),
+ expectedChks: []struct {
+ numSamples int
+ numBytes int
+ }{
+ {10, 200103},
+ {1, 87540},
+ },
+ },
+ }
+ for testName, tc := range testCases {
+ t.Run(testName, func(t *testing.T) {
+ h, _ := newTestHead(t, DefaultBlockDuration, compression.None, false)
+ defer func() {
+ require.NoError(t, h.Close())
+ }()
+
+ a := h.AppenderV2(context.Background())
+
+ ts := int64(10000)
+ lbls := labels.FromStrings("foo", "bar")
+ jitter := []int64{0, 1} // A bit of jitter to prevent dod=0.
+
+ for i := 0; i < tc.numTotalSamples; i++ {
+ if tc.floatValFunc != nil {
+ _, err := a.Append(0, lbls, 0, ts, tc.floatValFunc(i), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ } else if tc.histValFunc != nil {
+ _, err := a.Append(0, lbls, 0, ts, 0, tc.histValFunc(i), nil, storage.AOptions{})
+ require.NoError(t, err)
+ }
+
+ ts += int64(60 * time.Second / time.Millisecond)
+ if tc.timestampJitter {
+ ts += jitter[i%len(jitter)]
+ }
+ }
+
+ require.NoError(t, a.Commit())
+
+ idxReader, err := h.Index()
+ require.NoError(t, err)
+
+ chkReader, err := h.Chunks()
+ require.NoError(t, err)
+
+ p, err := idxReader.Postings(ctx, "foo", "bar")
+ require.NoError(t, err)
+
+ var lblBuilder labels.ScratchBuilder
+
+ for p.Next() {
+ sRef := p.At()
+
+ chkMetas := make([]chunks.Meta, len(tc.expectedChks))
+ require.NoError(t, idxReader.Series(sRef, &lblBuilder, &chkMetas))
+
+ require.Len(t, chkMetas, len(tc.expectedChks))
+
+ for i, expected := range tc.expectedChks {
+ chk, iterable, err := chkReader.ChunkOrIterable(chkMetas[i])
+ require.NoError(t, err)
+ require.Nil(t, iterable)
+
+ require.Equal(t, expected.numSamples, chk.NumSamples())
+ require.Len(t, chk.Bytes(), expected.numBytes)
+ }
+ }
+ })
+ }
+}
+
+// TestHeadDetectsDuplicateSampleAtSizeLimit tests a regression where a duplicate sample
+// is appended to the head, right when the head chunk is at the size limit.
+// The test adds all samples as duplicate, thus expecting that the result has
+// exactly half of the samples.
+func TestHeadDetectsDuplicateSampleAtSizeLimit_AppenderV2(t *testing.T) {
+ numSamples := 1000
+ baseTS := int64(1695209650)
+
+ h, _ := newTestHead(t, DefaultBlockDuration, compression.None, false)
+ defer func() {
+ require.NoError(t, h.Close())
+ }()
+
+ a := h.AppenderV2(context.Background())
+ var err error
+ vals := []float64{math.MaxFloat64, 0x00} // Use the worst case scenario for the XOR encoding. Otherwise we hit the sample limit before the size limit.
+ for i := range numSamples {
+ ts := baseTS + int64(i/2)*10000
+ a.Append(0, labels.FromStrings("foo", "bar"), 0, ts, vals[(i/2)%len(vals)], nil, nil, storage.AOptions{})
+ err = a.Commit()
+ require.NoError(t, err)
+ a = h.AppenderV2(context.Background())
+ }
+
+ indexReader, err := h.Index()
+ require.NoError(t, err)
+
+ var (
+ chunks []chunks.Meta
+ builder labels.ScratchBuilder
+ )
+ require.NoError(t, indexReader.Series(1, &builder, &chunks))
+
+ chunkReader, err := h.Chunks()
+ require.NoError(t, err)
+
+ storedSampleCount := 0
+ for _, chunkMeta := range chunks {
+ chunk, iterable, err := chunkReader.ChunkOrIterable(chunkMeta)
+ require.NoError(t, err)
+ require.Nil(t, iterable)
+ storedSampleCount += chunk.NumSamples()
+ }
+
+ require.Equal(t, numSamples/2, storedSampleCount)
+}
+
+func TestWALSampleAndExemplarOrder_AppenderV2(t *testing.T) {
+ lbls := labels.FromStrings("foo", "bar")
+ testcases := map[string]struct {
+ appendF func(app storage.AppenderV2, ts int64) (storage.SeriesRef, error)
+ expectedType reflect.Type
+ }{
+ "float sample": {
+ appendF: func(app storage.AppenderV2, ts int64) (storage.SeriesRef, error) {
+ return app.Append(0, lbls, 0, ts, 1.0, nil, nil, storage.AOptions{Exemplars: []exemplar.Exemplar{{Value: 1.0, Ts: 5}}})
+ },
+ expectedType: reflect.TypeFor[[]record.RefSample](),
+ },
+ "histogram sample": {
+ appendF: func(app storage.AppenderV2, ts int64) (storage.SeriesRef, error) {
+ return app.Append(0, lbls, 0, ts, 0, tsdbutil.GenerateTestHistogram(1), nil, storage.AOptions{Exemplars: []exemplar.Exemplar{{Value: 1.0, Ts: 5}}})
+ },
+ expectedType: reflect.TypeFor[[]record.RefHistogramSample](),
+ },
+ "float histogram sample": {
+ appendF: func(app storage.AppenderV2, ts int64) (storage.SeriesRef, error) {
+ return app.Append(0, lbls, 0, ts, 0, nil, tsdbutil.GenerateTestFloatHistogram(1), storage.AOptions{Exemplars: []exemplar.Exemplar{{Value: 1.0, Ts: 5}}})
+ },
+ expectedType: reflect.TypeFor[[]record.RefFloatHistogramSample](),
+ },
+ }
+
+ for testName, tc := range testcases {
+ t.Run(testName, func(t *testing.T) {
+ h, w := newTestHead(t, 1000, compression.None, false)
+ defer func() {
+ require.NoError(t, h.Close())
+ }()
+
+ app := h.AppenderV2(context.Background())
+ _, err := tc.appendF(app, 10)
+ require.NoError(t, err)
+
+ require.NoError(t, app.Commit())
+
+ recs := readTestWAL(t, w.Dir())
+ require.Len(t, recs, 3)
+ _, ok := recs[0].([]record.RefSeries)
+ require.True(t, ok, "expected first record to be a RefSeries")
+ actualType := reflect.TypeOf(recs[1])
+ require.Equal(t, tc.expectedType, actualType, "expected second record to be a %s", tc.expectedType)
+ _, ok = recs[2].([]record.RefExemplar)
+ require.True(t, ok, "expected third record to be a RefExemplar")
+ })
+ }
+}
+
+func TestHeadAppenderV2_Append_FloatWithSameTimestampAsPreviousHistogram(t *testing.T) {
+ head, _ := newTestHead(t, DefaultBlockDuration, compression.None, false)
+ t.Cleanup(func() { head.Close() })
+
+ ls := labels.FromStrings(labels.MetricName, "test")
+
+ {
+ // Append a float 10.0 @ 1_000
+ app := head.AppenderV2(context.Background())
+ _, err := app.Append(0, ls, 0, 1_000, 10.0, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ }
+
+ {
+ // Append a float histogram @ 2_000
+ app := head.AppenderV2(context.Background())
+ h := tsdbutil.GenerateTestHistogram(1)
+ _, err := app.Append(0, ls, 0, 2_000, 0, h, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ }
+
+ app := head.AppenderV2(context.Background())
+ _, err := app.Append(0, ls, 0, 2_000, 10.0, nil, nil, storage.AOptions{})
+ require.Error(t, err)
+ require.ErrorIs(t, err, storage.NewDuplicateHistogramToFloatErr(2_000, 10.0))
+}
+
+func TestHeadAppenderV2_Append_EnableSTAsZeroSample(t *testing.T) {
+ // Make sure counter resets hints are non-zero, so we can detect ST histogram samples.
+ testHistogram := tsdbutil.GenerateTestHistogram(1)
+ testHistogram.CounterResetHint = histogram.NotCounterReset
+ testFloatHistogram := tsdbutil.GenerateTestFloatHistogram(1)
+ testFloatHistogram.CounterResetHint = histogram.NotCounterReset
+ // TODO(beorn7): Once issue #15346 is fixed, the CounterResetHint of the
+ // following two zero histograms should be histogram.CounterReset.
+ testZeroHistogram := &histogram.Histogram{
+ Schema: testHistogram.Schema,
+ ZeroThreshold: testHistogram.ZeroThreshold,
+ PositiveSpans: testHistogram.PositiveSpans,
+ NegativeSpans: testHistogram.NegativeSpans,
+ PositiveBuckets: []int64{0, 0, 0, 0},
+ NegativeBuckets: []int64{0, 0, 0, 0},
+ }
+ testZeroFloatHistogram := &histogram.FloatHistogram{
+ Schema: testFloatHistogram.Schema,
+ ZeroThreshold: testFloatHistogram.ZeroThreshold,
+ PositiveSpans: testFloatHistogram.PositiveSpans,
+ NegativeSpans: testFloatHistogram.NegativeSpans,
+ PositiveBuckets: []float64{0, 0, 0, 0},
+ NegativeBuckets: []float64{0, 0, 0, 0},
+ }
+ type appendableSamples struct {
+ ts int64
+ fSample float64
+ h *histogram.Histogram
+ fh *histogram.FloatHistogram
+ st int64
+ }
+ for _, tc := range []struct {
+ name string
+ appendableSamples []appendableSamples
+ expectedSamples []chunks.Sample
+ }{
+ {
+ name: "In order ct+normal sample/floatSample",
+ appendableSamples: []appendableSamples{
+ {ts: 100, fSample: 10, st: 1},
+ {ts: 101, fSample: 10, st: 1},
+ },
+ expectedSamples: []chunks.Sample{
+ sample{t: 1, f: 0},
+ sample{t: 100, f: 10},
+ sample{t: 101, f: 10},
+ },
+ },
+ {
+ name: "In order ct+normal sample/histogram",
+ appendableSamples: []appendableSamples{
+ {ts: 100, h: testHistogram, st: 1},
+ {ts: 101, h: testHistogram, st: 1},
+ },
+ expectedSamples: func() []chunks.Sample {
+ return []chunks.Sample{
+ sample{t: 1, h: testZeroHistogram},
+ sample{t: 100, h: testHistogram},
+ sample{t: 101, h: testHistogram},
+ }
+ }(),
+ },
+ {
+ name: "In order ct+normal sample/floathistogram",
+ appendableSamples: []appendableSamples{
+ {ts: 100, fh: testFloatHistogram, st: 1},
+ {ts: 101, fh: testFloatHistogram, st: 1},
+ },
+ expectedSamples: func() []chunks.Sample {
+ return []chunks.Sample{
+ sample{t: 1, fh: testZeroFloatHistogram},
+ sample{t: 100, fh: testFloatHistogram},
+ sample{t: 101, fh: testFloatHistogram},
+ }
+ }(),
+ },
+ {
+ name: "Consecutive appends with same st ignore st/floatSample",
+ appendableSamples: []appendableSamples{
+ {ts: 100, fSample: 10, st: 1},
+ {ts: 101, fSample: 10, st: 1},
+ },
+ expectedSamples: []chunks.Sample{
+ sample{t: 1, f: 0},
+ sample{t: 100, f: 10},
+ sample{t: 101, f: 10},
+ },
+ },
+ {
+ name: "Consecutive appends with same st ignore st/histogram",
+ appendableSamples: []appendableSamples{
+ {ts: 100, h: testHistogram, st: 1},
+ {ts: 101, h: testHistogram, st: 1},
+ },
+ expectedSamples: func() []chunks.Sample {
+ return []chunks.Sample{
+ sample{t: 1, h: testZeroHistogram},
+ sample{t: 100, h: testHistogram},
+ sample{t: 101, h: testHistogram},
+ }
+ }(),
+ },
+ {
+ name: "Consecutive appends with same st ignore st/floathistogram",
+ appendableSamples: []appendableSamples{
+ {ts: 100, fh: testFloatHistogram, st: 1},
+ {ts: 101, fh: testFloatHistogram, st: 1},
+ },
+ expectedSamples: func() []chunks.Sample {
+ return []chunks.Sample{
+ sample{t: 1, fh: testZeroFloatHistogram},
+ sample{t: 100, fh: testFloatHistogram},
+ sample{t: 101, fh: testFloatHistogram},
+ }
+ }(),
+ },
+ {
+ name: "Consecutive appends with newer st do not ignore st/floatSample",
+ appendableSamples: []appendableSamples{
+ {ts: 100, fSample: 10, st: 1},
+ {ts: 102, fSample: 10, st: 101},
+ },
+ expectedSamples: []chunks.Sample{
+ sample{t: 1, f: 0},
+ sample{t: 100, f: 10},
+ sample{t: 101, f: 0},
+ sample{t: 102, f: 10},
+ },
+ },
+ {
+ name: "Consecutive appends with newer st do not ignore st/histogram",
+ appendableSamples: []appendableSamples{
+ {ts: 100, h: testHistogram, st: 1},
+ {ts: 102, h: testHistogram, st: 101},
+ },
+ expectedSamples: []chunks.Sample{
+ sample{t: 1, h: testZeroHistogram},
+ sample{t: 100, h: testHistogram},
+ sample{t: 101, h: testZeroHistogram},
+ sample{t: 102, h: testHistogram},
+ },
+ },
+ {
+ name: "Consecutive appends with newer st do not ignore st/floathistogram",
+ appendableSamples: []appendableSamples{
+ {ts: 100, fh: testFloatHistogram, st: 1},
+ {ts: 102, fh: testFloatHistogram, st: 101},
+ },
+ expectedSamples: []chunks.Sample{
+ sample{t: 1, fh: testZeroFloatHistogram},
+ sample{t: 100, fh: testFloatHistogram},
+ sample{t: 101, fh: testZeroFloatHistogram},
+ sample{t: 102, fh: testFloatHistogram},
+ },
+ },
+ {
+ name: "ST equals to previous sample timestamp is ignored/floatSample",
+ appendableSamples: []appendableSamples{
+ {ts: 100, fSample: 10, st: 1},
+ {ts: 101, fSample: 10, st: 100},
+ },
+ expectedSamples: []chunks.Sample{
+ sample{t: 1, f: 0},
+ sample{t: 100, f: 10},
+ sample{t: 101, f: 10},
+ },
+ },
+ {
+ name: "ST equals to previous sample timestamp is ignored/histogram",
+ appendableSamples: []appendableSamples{
+ {ts: 100, h: testHistogram, st: 1},
+ {ts: 101, h: testHistogram, st: 100},
+ },
+ expectedSamples: func() []chunks.Sample {
+ return []chunks.Sample{
+ sample{t: 1, h: testZeroHistogram},
+ sample{t: 100, h: testHistogram},
+ sample{t: 101, h: testHistogram},
+ }
+ }(),
+ },
+ {
+ name: "ST equals to previous sample timestamp is ignored/floathistogram",
+ appendableSamples: []appendableSamples{
+ {ts: 100, fh: testFloatHistogram, st: 1},
+ {ts: 101, fh: testFloatHistogram, st: 100},
+ },
+ expectedSamples: func() []chunks.Sample {
+ return []chunks.Sample{
+ sample{t: 1, fh: testZeroFloatHistogram},
+ sample{t: 100, fh: testFloatHistogram},
+ sample{t: 101, fh: testFloatHistogram},
+ }
+ }(),
+ },
+ {
+ name: "ST lower than minValidTime/float",
+ appendableSamples: []appendableSamples{
+ {ts: 100, fSample: 10, st: -1},
+ },
+ // ST results ErrOutOfBounds, but ST append is best effort, so
+ // ST should be ignored, but sample appended.
+ expectedSamples: func() []chunks.Sample {
+ return []chunks.Sample{
+ sample{t: 100, f: 10},
+ }
+ }(),
+ },
+ {
+ name: "ST lower than minValidTime/histogram",
+ appendableSamples: []appendableSamples{
+ {ts: 100, h: testHistogram, st: -1},
+ },
+ // ST results ErrOutOfBounds, but ST append is best effort, so
+ // ST should be ignored, but sample appended.
+ expectedSamples: func() []chunks.Sample {
+ // NOTE: Without ST, on query, first histogram sample will get
+ // CounterReset adjusted to 0.
+ firstSample := testHistogram.Copy()
+ firstSample.CounterResetHint = histogram.UnknownCounterReset
+ return []chunks.Sample{
+ sample{t: 100, h: firstSample},
+ }
+ }(),
+ },
+ {
+ name: "ST lower than minValidTime/floathistogram",
+ appendableSamples: []appendableSamples{
+ {ts: 100, fh: testFloatHistogram, st: -1},
+ },
+ // ST results ErrOutOfBounds, but ST append is best effort, so
+ // ST should be ignored, but sample appended.
+ expectedSamples: func() []chunks.Sample {
+ // NOTE: Without ST, on query, first histogram sample will get
+ // CounterReset adjusted to 0.
+ firstSample := testFloatHistogram.Copy()
+ firstSample.CounterResetHint = histogram.UnknownCounterReset
+ return []chunks.Sample{
+ sample{t: 100, fh: firstSample},
+ }
+ }(),
+ },
+ {
+ name: "ST duplicates an existing sample/float",
+ appendableSamples: []appendableSamples{
+ {ts: 100, fSample: 10},
+ {ts: 200, fSample: 10, st: 100},
+ },
+ // ST results ErrOutOfBounds, but ST append is best effort, so
+ // ST should be ignored, but sample appended.
+ expectedSamples: func() []chunks.Sample {
+ return []chunks.Sample{
+ sample{t: 100, f: 10},
+ sample{t: 200, f: 10},
+ }
+ }(),
+ },
+ {
+ name: "ST duplicates an existing sample/histogram",
+ appendableSamples: []appendableSamples{
+ {ts: 100, h: testHistogram},
+ {ts: 200, h: testHistogram, st: 100},
+ },
+ // ST results ErrDuplicateSampleForTimestamp, but ST append is best effort, so
+ // ST should be ignored, but sample appended.
+ expectedSamples: func() []chunks.Sample {
+ // NOTE: Without ST, on query, first histogram sample will get
+ // CounterReset adjusted to 0.
+ firstSample := testHistogram.Copy()
+ firstSample.CounterResetHint = histogram.UnknownCounterReset
+ return []chunks.Sample{
+ sample{t: 100, h: firstSample},
+ sample{t: 200, h: testHistogram},
+ }
+ }(),
+ },
+ {
+ name: "ST duplicates an existing sample/floathistogram",
+ appendableSamples: []appendableSamples{
+ {ts: 100, fh: testFloatHistogram},
+ {ts: 200, fh: testFloatHistogram, st: 100},
+ },
+ // ST results ErrDuplicateSampleForTimestamp, but ST append is best effort, so
+ // ST should ignored, but sample appended.
+ expectedSamples: func() []chunks.Sample {
+ // NOTE: Without ST, on query, first histogram sample will get
+ // CounterReset adjusted to 0.
+ firstSample := testFloatHistogram.Copy()
+ firstSample.CounterResetHint = histogram.UnknownCounterReset
+ return []chunks.Sample{
+ sample{t: 100, fh: firstSample},
+ sample{t: 200, fh: testFloatHistogram},
+ }
+ }(),
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ opts := newTestHeadDefaultOptions(DefaultBlockDuration, false)
+ opts.EnableSTAsZeroSample = true
+ h, _ := newTestHeadWithOptions(t, compression.None, opts)
+ defer func() {
+ require.NoError(t, h.Close())
+ }()
+
+ a := h.AppenderV2(context.Background())
+ lbls := labels.FromStrings("foo", "bar")
+
+ for _, s := range tc.appendableSamples {
+ _, err := a.Append(0, lbls, s.st, s.ts, s.fSample, s.h, s.fh, storage.AOptions{})
+ require.NoError(t, err)
+ }
+ require.NoError(t, a.Commit())
+
+ q, err := NewBlockQuerier(h, math.MinInt64, math.MaxInt64)
+ require.NoError(t, err)
+ result := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
+ require.Equal(t, tc.expectedSamples, result[`{foo="bar"}`])
+ })
+ }
+}
+
+// Regression test for data race https://github.com/prometheus/prometheus/issues/15139.
+func TestHeadAppenderV2_Append_HistogramAndCommitConcurrency(t *testing.T) {
+ h := tsdbutil.GenerateTestHistogram(1)
+ fh := tsdbutil.GenerateTestFloatHistogram(1)
+
+ testCases := map[string]func(storage.AppenderV2, int) error{
+ "integer histogram": func(app storage.AppenderV2, i int) error {
+ _, err := app.Append(0, labels.FromStrings("foo", "bar", "serial", strconv.Itoa(i)), 0, 1, 0, h, nil, storage.AOptions{})
+ return err
+ },
+ "float histogram": func(app storage.AppenderV2, i int) error {
+ _, err := app.Append(0, labels.FromStrings("foo", "bar", "serial", strconv.Itoa(i)), 0, 1, 0, nil, fh, storage.AOptions{})
+ return err
+ },
+ }
+ for name, tc := range testCases {
+ t.Run(name, func(t *testing.T) {
+ testHeadAppenderV2AppendHistogramAndCommitConcurrency(t, tc)
+ })
+ }
+}
+
+func testHeadAppenderV2AppendHistogramAndCommitConcurrency(t *testing.T, appendFn func(storage.AppenderV2, int) error) {
+ head, _ := newTestHead(t, 1000, compression.None, false)
+ defer func() {
+ require.NoError(t, head.Close())
+ }()
+
+ wg := sync.WaitGroup{}
+ wg.Add(2)
+
+ // How this works: Commit() should be atomic, thus one of the commits will
+ // be first and the other second. The first commit will create a new series
+ // and write a sample. The second commit will see an exact duplicate sample
+ // which it should ignore. Unless there's a race that causes the
+ // memSeries.lastHistogram to be corrupt and fail the duplicate check.
+ go func() {
+ defer wg.Done()
+ for i := range 10000 {
+ app := head.AppenderV2(context.Background())
+ require.NoError(t, appendFn(app, i))
+ require.NoError(t, app.Commit())
+ }
+ }()
+
+ go func() {
+ defer wg.Done()
+ for i := range 10000 {
+ app := head.AppenderV2(context.Background())
+ require.NoError(t, appendFn(app, i))
+ require.NoError(t, app.Commit())
+ }
+ }()
+
+ wg.Wait()
+}
+
+func TestHeadAppenderV2_NumStaleSeries(t *testing.T) {
+ head, _ := newTestHead(t, 1000, compression.None, false)
+ t.Cleanup(func() {
+ require.NoError(t, head.Close())
+ })
+ require.NoError(t, head.Init(0))
+
+ // Initially, no series should be stale.
+ require.Equal(t, uint64(0), head.NumStaleSeries())
+
+ appendSample := func(lbls labels.Labels, ts int64, val float64) {
+ app := head.AppenderV2(context.Background())
+ _, err := app.Append(0, lbls, 0, ts, val, nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ }
+ appendHistogram := func(lbls labels.Labels, ts int64, val *histogram.Histogram) {
+ app := head.AppenderV2(context.Background())
+ _, err := app.Append(0, lbls, 0, ts, 0, val, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ }
+ appendFloatHistogram := func(lbls labels.Labels, ts int64, val *histogram.FloatHistogram) {
+ app := head.AppenderV2(context.Background())
+ _, err := app.Append(0, lbls, 0, ts, 0, nil, val, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+ }
+
+ verifySeriesCounts := func(numStaleSeries, numSeries int) {
+ require.Equal(t, uint64(numStaleSeries), head.NumStaleSeries())
+ require.Equal(t, uint64(numSeries), head.NumSeries())
+ }
+
+ restartHeadAndVerifySeriesCounts := func(numStaleSeries, numSeries int) {
+ verifySeriesCounts(numStaleSeries, numSeries)
+
+ require.NoError(t, head.Close())
+
+ wal, err := wlog.NewSize(nil, nil, filepath.Join(head.opts.ChunkDirRoot, "wal"), 32768, compression.None)
+ require.NoError(t, err)
+ head, err = NewHead(nil, nil, wal, nil, head.opts, nil)
+ require.NoError(t, err)
+ require.NoError(t, head.Init(0))
+
+ verifySeriesCounts(numStaleSeries, numSeries)
+ }
+
+ // Create some series with normal samples.
+ series1 := labels.FromStrings("name", "series1", "label", "value1")
+ series2 := labels.FromStrings("name", "series2", "label", "value2")
+ series3 := labels.FromStrings("name", "series3", "label", "value3")
+
+ // Add normal samples to all series.
+ appendSample(series1, 100, 1)
+ appendSample(series2, 100, 2)
+ appendSample(series3, 100, 3)
+ // Still no stale series.
+ verifySeriesCounts(0, 3)
+
+ // Make series1 stale by appending a stale sample. Now we should have 1 stale series.
+ appendSample(series1, 200, math.Float64frombits(value.StaleNaN))
+ verifySeriesCounts(1, 3)
+
+ // Make series2 stale as well.
+ appendSample(series2, 200, math.Float64frombits(value.StaleNaN))
+ verifySeriesCounts(2, 3)
+ restartHeadAndVerifySeriesCounts(2, 3)
+
+ // Add a non-stale sample to series1. It should not be counted as stale now.
+ appendSample(series1, 300, 10)
+ verifySeriesCounts(1, 3)
+ restartHeadAndVerifySeriesCounts(1, 3)
+
+ // Test that series3 doesn't become stale when we add another normal sample.
+ appendSample(series3, 200, 10)
+ verifySeriesCounts(1, 3)
+
+ // Test histogram stale samples as well.
+ series4 := labels.FromStrings("name", "series4", "type", "histogram")
+ h := tsdbutil.GenerateTestHistograms(1)[0]
+ appendHistogram(series4, 100, h)
+ verifySeriesCounts(1, 4)
+
+ // Make histogram series stale.
+ staleHist := h.Copy()
+ staleHist.Sum = math.Float64frombits(value.StaleNaN)
+ appendHistogram(series4, 200, staleHist)
+ verifySeriesCounts(2, 4)
+
+ // Test float histogram stale samples.
+ series5 := labels.FromStrings("name", "series5", "type", "float_histogram")
+ fh := tsdbutil.GenerateTestFloatHistograms(1)[0]
+ appendFloatHistogram(series5, 100, fh)
+ verifySeriesCounts(2, 5)
+ restartHeadAndVerifySeriesCounts(2, 5)
+
+ // Make float histogram series stale.
+ staleFH := fh.Copy()
+ staleFH.Sum = math.Float64frombits(value.StaleNaN)
+ appendFloatHistogram(series5, 200, staleFH)
+ verifySeriesCounts(3, 5)
+
+ // Make histogram sample non-stale and stale back again.
+ appendHistogram(series4, 210, h)
+ verifySeriesCounts(2, 5)
+ appendHistogram(series4, 220, staleHist)
+ verifySeriesCounts(3, 5)
+
+ // Make float histogram sample non-stale and stale back again.
+ appendFloatHistogram(series5, 210, fh)
+ verifySeriesCounts(2, 5)
+ appendFloatHistogram(series5, 220, staleFH)
+ verifySeriesCounts(3, 5)
+
+ // Series 1 and 3 are not stale at this point. Add a new sample to series 1 and series 5,
+ // so after the GC and removing series 2, 3, 4, we should be left with 1 stale and 1 non-stale series.
+ appendSample(series1, 400, 10)
+ appendFloatHistogram(series5, 400, staleFH)
+ restartHeadAndVerifySeriesCounts(3, 5)
+
+ // This will test restarting with snapshot.
+ head.opts.EnableMemorySnapshotOnShutdown = true
+ restartHeadAndVerifySeriesCounts(3, 5)
+
+ // Test garbage collection behavior - stale series should be decremented when GC'd.
+ // Force a garbage collection by truncating old data.
+ require.NoError(t, head.Truncate(300))
+
+ // After truncation, run GC to collect old chunks/series.
+ head.gc()
+
+ // series 1 and series 5 are left.
+ verifySeriesCounts(1, 2)
+
+ // Test creating a new series for each of float, histogram, float histogram that starts as stale.
+ // This should be counted as stale.
+ series6 := labels.FromStrings("name", "series6", "direct", "stale")
+ series7 := labels.FromStrings("name", "series7", "direct", "stale")
+ series8 := labels.FromStrings("name", "series8", "direct", "stale")
+ appendSample(series6, 400, math.Float64frombits(value.StaleNaN))
+ verifySeriesCounts(2, 3)
+ appendHistogram(series7, 400, staleHist)
+ verifySeriesCounts(3, 4)
+ appendFloatHistogram(series8, 400, staleFH)
+ verifySeriesCounts(4, 5)
+}
+
+// TestHistogramStalenessConversionMetrics verifies that staleness marker conversion correctly
+// increments the right appender metrics for both histogram and float histogram scenarios.
+func TestHeadAppenderV2_Append_HistogramStalenessConversionMetrics(t *testing.T) {
+ testCases := []struct {
+ name string
+ setupHistogram func(app storage.AppenderV2, lbls labels.Labels) error
+ }{
+ {
+ name: "float_staleness_to_histogram",
+ setupHistogram: func(app storage.AppenderV2, lbls labels.Labels) error {
+ _, err := app.Append(0, lbls, 0, 1000, 0, tsdbutil.GenerateTestHistograms(1)[0], nil, storage.AOptions{})
+ return err
+ },
+ },
+ {
+ name: "float_staleness_to_float_histogram",
+ setupHistogram: func(app storage.AppenderV2, lbls labels.Labels) error {
+ _, err := app.Append(0, lbls, 0, 1000, 0, nil, tsdbutil.GenerateTestFloatHistograms(1)[0], storage.AOptions{})
+ return err
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ head, _ := newTestHead(t, 1000, compression.None, false)
+ defer func() {
+ require.NoError(t, head.Close())
+ }()
+
+ lbls := labels.FromStrings("name", tc.name)
+
+ // Helper to get counter values
+ getSampleCounter := func(sampleType string) float64 {
+ metric := &dto.Metric{}
+ err := head.metrics.samplesAppended.WithLabelValues(sampleType).Write(metric)
+ require.NoError(t, err)
+ return metric.GetCounter().GetValue()
+ }
+
+ // Step 1: Establish a series with histogram data
+ app := head.AppenderV2(context.Background())
+ err := tc.setupHistogram(app, lbls)
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+
+ // Step 2: Add a float staleness marker
+ app = head.AppenderV2(context.Background())
+ _, err = app.Append(0, lbls, 0, 2000, math.Float64frombits(value.StaleNaN), nil, nil, storage.AOptions{})
+ require.NoError(t, err)
+ require.NoError(t, app.Commit())
+
+ // Count what was actually stored by querying the series
+ q, err := NewBlockQuerier(head, 0, 3000)
+ require.NoError(t, err)
+ defer q.Close()
+
+ ss := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "name", tc.name))
+ require.True(t, ss.Next())
+ series := ss.At()
+
+ it := series.Iterator(nil)
+
+ actualFloatSamples := 0
+ actualHistogramSamples := 0
+
+ for valType := it.Next(); valType != chunkenc.ValNone; valType = it.Next() {
+ switch valType {
+ case chunkenc.ValFloat:
+ actualFloatSamples++
+ case chunkenc.ValHistogram, chunkenc.ValFloatHistogram:
+ actualHistogramSamples++
+ }
+ }
+ require.NoError(t, it.Err())
+
+ // Verify what was actually stored - should be 0 floats, 2 histograms (original + converted staleness marker)
+ require.Equal(t, 0, actualFloatSamples, "Should have 0 float samples stored")
+ require.Equal(t, 2, actualHistogramSamples, "Should have 2 histogram samples: original + converted staleness marker")
+
+ // The metrics should match what was actually stored
+ require.Equal(t, float64(actualFloatSamples), getSampleCounter(sampleMetricTypeFloat),
+ "Float counter should match actual float samples stored")
+ require.Equal(t, float64(actualHistogramSamples), getSampleCounter(sampleMetricTypeHistogram),
+ "Histogram counter should match actual histogram samples stored")
+ })
+ }
+}
diff --git a/tsdb/head_bench_test.go b/tsdb/head_bench_test.go
index c98fb6613d..dc0be0823a 100644
--- a/tsdb/head_bench_test.go
+++ b/tsdb/head_bench_test.go
@@ -1,4 +1,4 @@
-// Copyright 2018 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -14,7 +14,6 @@
package tsdb
import (
- "context"
"errors"
"fmt"
"math/rand"
@@ -32,6 +31,228 @@ import (
"github.com/prometheus/prometheus/util/compression"
)
+type benchAppendFunc func(b *testing.B, h *Head, ts int64, series []storage.Series, samplesPerAppend int64) storage.AppenderTransaction
+
+func appendV1Float(b *testing.B, h *Head, ts int64, series []storage.Series, samplesPerAppend int64) storage.AppenderTransaction {
+ var err error
+ app := h.Appender(b.Context())
+ for _, s := range series {
+ var ref storage.SeriesRef
+ for sampleIndex := range samplesPerAppend {
+ ref, err = app.Append(ref, s.Labels(), ts+sampleIndex, float64(ts+sampleIndex))
+ require.NoError(b, err)
+ }
+ }
+ return app
+}
+
+func appendV2Float(b *testing.B, h *Head, ts int64, series []storage.Series, samplesPerAppend int64) storage.AppenderTransaction {
+ var err error
+ app := h.AppenderV2(b.Context())
+ for _, s := range series {
+ var ref storage.SeriesRef
+ for sampleIndex := range samplesPerAppend {
+ ref, err = app.Append(ref, s.Labels(), 0, ts+sampleIndex, float64(ts+sampleIndex), nil, nil, storage.AOptions{})
+ require.NoError(b, err)
+ }
+ }
+ return app
+}
+
+func appendV1FloatOrHistogramWithExemplars(b *testing.B, h *Head, ts int64, series []storage.Series, samplesPerAppend int64) storage.AppenderTransaction {
+ var err error
+ app := h.Appender(b.Context())
+ for i, s := range series {
+ var ref storage.SeriesRef
+ for sampleIndex := range samplesPerAppend {
+ // if i is even, append a sample, else append a histogram.
+ if i%2 == 0 {
+ ref, err = app.Append(ref, s.Labels(), ts+sampleIndex, float64(ts+sampleIndex))
+ require.NoError(b, err)
+ // Every sample also has an exemplar attached.
+ _, err = app.AppendExemplar(ref, s.Labels(), exemplar.Exemplar{
+ Labels: labels.FromStrings("trace_id", strconv.Itoa(rand.Int())),
+ Value: rand.Float64(),
+ Ts: ts + sampleIndex,
+ })
+ require.NoError(b, err)
+ continue
+ }
+
+ h := &histogram.Histogram{
+ Count: 7 + uint64(ts*5),
+ ZeroCount: 2 + uint64(ts),
+ ZeroThreshold: 0.001,
+ Sum: 18.4 * rand.Float64(),
+ Schema: 1,
+ PositiveSpans: []histogram.Span{
+ {Offset: 0, Length: 2},
+ {Offset: 1, Length: 2},
+ },
+ PositiveBuckets: []int64{ts + 1, 1, -1, 0},
+ }
+ ref, err = app.AppendHistogram(ref, s.Labels(), ts, h, nil)
+ require.NoError(b, err)
+ // Every histogram sample also has 3 exemplars attached.
+ _, err = app.AppendExemplar(ref, s.Labels(), exemplar.Exemplar{
+ Labels: labels.FromStrings("trace_id", strconv.Itoa(rand.Int())),
+ Value: rand.Float64(),
+ Ts: ts + sampleIndex,
+ })
+ require.NoError(b, err)
+ _, err = app.AppendExemplar(ref, s.Labels(), exemplar.Exemplar{
+ Labels: labels.FromStrings("trace_id", strconv.Itoa(rand.Int())),
+ Value: rand.Float64(),
+ Ts: ts + sampleIndex,
+ })
+ require.NoError(b, err)
+ _, err = app.AppendExemplar(ref, s.Labels(), exemplar.Exemplar{
+ Labels: labels.FromStrings("trace_id", strconv.Itoa(rand.Int())),
+ Value: rand.Float64(),
+ Ts: ts + sampleIndex,
+ })
+ require.NoError(b, err)
+ }
+ }
+ return app
+}
+
+func appendV2FloatOrHistogramWithExemplars(b *testing.B, h *Head, ts int64, series []storage.Series, samplesPerAppend int64) storage.AppenderTransaction {
+ var (
+ err error
+ ex = make([]exemplar.Exemplar, 3)
+ )
+
+ app := h.AppenderV2(b.Context())
+ for i, s := range series {
+ var ref storage.SeriesRef
+ for sampleIndex := range samplesPerAppend {
+ aOpts := storage.AOptions{Exemplars: ex[:0]}
+
+ // if i is even, append a sample, else append a histogram.
+ if i%2 == 0 {
+ // Every sample also has an exemplar attached.
+ aOpts.Exemplars = append(aOpts.Exemplars, exemplar.Exemplar{
+ Labels: labels.FromStrings("trace_id", strconv.Itoa(rand.Int())),
+ Value: rand.Float64(),
+ Ts: ts + sampleIndex,
+ })
+ ref, err = app.Append(ref, s.Labels(), 0, ts, float64(ts), nil, nil, aOpts)
+ require.NoError(b, err)
+ continue
+ }
+ h := &histogram.Histogram{
+ Count: 7 + uint64(ts*5),
+ ZeroCount: 2 + uint64(ts),
+ ZeroThreshold: 0.001,
+ Sum: 18.4 * rand.Float64(),
+ Schema: 1,
+ PositiveSpans: []histogram.Span{
+ {Offset: 0, Length: 2},
+ {Offset: 1, Length: 2},
+ },
+ PositiveBuckets: []int64{ts + 1, 1, -1, 0},
+ }
+
+ // Every histogram sample also has 3 exemplars attached.
+ aOpts.Exemplars = append(aOpts.Exemplars,
+ exemplar.Exemplar{
+ Labels: labels.FromStrings("trace_id", strconv.Itoa(rand.Int())),
+ Value: rand.Float64(),
+ Ts: ts + sampleIndex,
+ },
+ exemplar.Exemplar{
+ Labels: labels.FromStrings("trace_id", strconv.Itoa(rand.Int())),
+ Value: rand.Float64(),
+ Ts: ts + sampleIndex,
+ },
+ exemplar.Exemplar{
+ Labels: labels.FromStrings("trace_id", strconv.Itoa(rand.Int())),
+ Value: rand.Float64(),
+ Ts: ts + sampleIndex,
+ },
+ )
+ ref, err = app.Append(ref, s.Labels(), 0, ts, 0, h, nil, aOpts)
+ require.NoError(b, err)
+ }
+ }
+ return app
+}
+
+type appendCase struct {
+ name string
+ appendFunc benchAppendFunc
+}
+
+func appendCases() []appendCase {
+ return []appendCase{
+ {
+ name: "appender=v1/case=floats",
+ appendFunc: appendV1Float,
+ },
+ {
+ name: "appender=v2/case=floats",
+ appendFunc: appendV2Float,
+ },
+ {
+ name: "appender=v1/case=floatsHistogramsExemplars",
+ appendFunc: appendV1FloatOrHistogramWithExemplars,
+ },
+ {
+ name: "appender=v2/case=floatsHistogramsExemplars",
+ appendFunc: appendV2FloatOrHistogramWithExemplars,
+ },
+ }
+}
+
+/*
+ export bench=append && go test \
+ -run '^$' -bench '^BenchmarkHeadAppender_AppendCommit$' \
+ -benchtime 5s -count 6 -cpu 2 -timeout 999m \
+ | tee ${bench}.txt
+*/
+func BenchmarkHeadAppender_AppendCommit(b *testing.B) {
+ // NOTE(bwplotka): Previously we also had 1k and 10k series case. There is nothing
+ // special happening in 100 vs 1k vs 10k, so let's save considerable amount of benchmark time
+ // for quicker feedback. In return, we add more sample type cases.
+ // Similarly, we removed the 2 sample in append case.
+ //
+ // TODO(bwplotka): This still takes ~6500s (~2h) for -benchtime 5s -count 6 to complete.
+ // We might want to reduce the time bit more. 5s is really important as the slowest
+ // case (appender=v1/case=floatsHistogramsExemplars/series=100/samples_per_append=100-2)
+ // in 5s yields only 255 iters 23184892 ns/op. Perhaps -benchtime=300x would be better?
+ seriesCounts := []int{10, 100}
+ series := genSeries(100, 10, 0, 0) // Only using the generated labels.
+ for _, appendCase := range appendCases() {
+ for _, seriesCount := range seriesCounts {
+ for _, samplesPerAppend := range []int64{1, 5, 100} {
+ b.Run(fmt.Sprintf("%s/series=%d/samples_per_append=%d", appendCase.name, seriesCount, samplesPerAppend), func(b *testing.B) {
+ opts := newTestHeadDefaultOptions(10000, false)
+ opts.EnableExemplarStorage = true // We benchmark with exemplars, benchmark with them.
+ h, _ := newTestHeadWithOptions(b, compression.None, opts)
+ b.Cleanup(func() { require.NoError(b, h.Close()) })
+
+ ts := int64(1000)
+
+ // Init series, that's not what we're benchmarking here.
+ app := appendCase.appendFunc(b, h, ts, series[:seriesCount], samplesPerAppend)
+ require.NoError(b, app.Commit())
+ ts += 1000 // should increment more than highest samplesPerAppend
+
+ b.ReportAllocs()
+ b.ResetTimer()
+
+ for b.Loop() {
+ app := appendCase.appendFunc(b, h, ts, series[:seriesCount], samplesPerAppend)
+ require.NoError(b, app.Commit())
+ ts += 1000 // should increment more than highest samplesPerAppend
+ }
+ })
+ }
+ }
+ }
+}
+
func BenchmarkHeadStripeSeriesCreate(b *testing.B) {
chunkDir := b.TempDir()
// Put a series, select it. GC it and then access it.
@@ -86,86 +307,6 @@ func BenchmarkHeadStripeSeriesCreate_PreCreationFailure(b *testing.B) {
}
}
-func BenchmarkHead_WalCommit(b *testing.B) {
- seriesCounts := []int{100, 1000, 10000}
- series := genSeries(10000, 10, 0, 0) // Only using the generated labels.
-
- appendSamples := func(b *testing.B, app storage.Appender, seriesCount int, ts int64) {
- var err error
- for i, s := range series[:seriesCount] {
- var ref storage.SeriesRef
- // if i is even, append a sample, else append a histogram.
- if i%2 == 0 {
- ref, err = app.Append(ref, s.Labels(), ts, float64(ts))
- } else {
- h := &histogram.Histogram{
- Count: 7 + uint64(ts*5),
- ZeroCount: 2 + uint64(ts),
- ZeroThreshold: 0.001,
- Sum: 18.4 * rand.Float64(),
- Schema: 1,
- PositiveSpans: []histogram.Span{
- {Offset: 0, Length: 2},
- {Offset: 1, Length: 2},
- },
- PositiveBuckets: []int64{ts + 1, 1, -1, 0},
- }
- ref, err = app.AppendHistogram(ref, s.Labels(), ts, h, nil)
- }
- require.NoError(b, err)
-
- _, err = app.AppendExemplar(ref, s.Labels(), exemplar.Exemplar{
- Labels: labels.FromStrings("trace_id", strconv.Itoa(rand.Int())),
- Value: rand.Float64(),
- Ts: ts,
- })
- require.NoError(b, err)
- }
- }
-
- for _, seriesCount := range seriesCounts {
- b.Run(fmt.Sprintf("%d series", seriesCount), func(b *testing.B) {
- for _, commits := range []int64{1, 2} { // To test commits that create new series and when the series already exists.
- b.Run(fmt.Sprintf("%d commits", commits), func(b *testing.B) {
- b.ReportAllocs()
- b.ResetTimer()
-
- for b.Loop() {
- b.StopTimer()
- h, w := newTestHead(b, 10000, compression.None, false)
- b.Cleanup(func() {
- if h != nil {
- h.Close()
- }
- if w != nil {
- w.Close()
- }
- })
- app := h.Appender(context.Background())
-
- appendSamples(b, app, seriesCount, 0)
-
- b.StartTimer()
- require.NoError(b, app.Commit())
- if commits == 2 {
- b.StopTimer()
- app = h.Appender(context.Background())
- appendSamples(b, app, seriesCount, 1)
- b.StartTimer()
- require.NoError(b, app.Commit())
- }
- b.StopTimer()
- h.Close()
- h = nil
- w.Close()
- w = nil
- }
- })
- }
- })
- }
-}
-
type failingSeriesLifecycleCallback struct{}
func (failingSeriesLifecycleCallback) PreCreation(labels.Labels) error { return errors.New("failed") }
diff --git a/tsdb/head_dedupelabels.go b/tsdb/head_dedupelabels.go
index a75f337224..f8bcec2e78 100644
--- a/tsdb/head_dedupelabels.go
+++ b/tsdb/head_dedupelabels.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/head_other.go b/tsdb/head_other.go
index 7e1eea8b05..d6d5795e20 100644
--- a/tsdb/head_other.go
+++ b/tsdb/head_other.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/head_read.go b/tsdb/head_read.go
index c66a6d1738..48f842430a 100644
--- a/tsdb/head_read.go
+++ b/tsdb/head_read.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -365,21 +365,6 @@ func unpackHeadChunkRef(ref chunks.ChunkRef) (seriesID chunks.HeadSeriesRef, chu
return sid, (cid & (oooChunkIDMask - 1)), (cid & oooChunkIDMask) != 0
}
-// LabelValueFor returns label value for the given label name in the series referred to by ID.
-func (h *headIndexReader) LabelValueFor(_ context.Context, id storage.SeriesRef, label string) (string, error) {
- memSeries := h.head.series.getByID(chunks.HeadSeriesRef(id))
- if memSeries == nil {
- return "", storage.ErrNotFound
- }
-
- value := memSeries.labels().Get(label)
- if value == "" {
- return "", storage.ErrNotFound
- }
-
- return value, nil
-}
-
// LabelNamesFor returns all the label names for the series referred to by the postings.
// The names returned are sorted.
func (h *headIndexReader) LabelNamesFor(ctx context.Context, series index.Postings) ([]string, error) {
diff --git a/tsdb/head_read_test.go b/tsdb/head_read_test.go
index b9f1700706..cf55973a01 100644
--- a/tsdb/head_read_test.go
+++ b/tsdb/head_read_test.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/head_test.go b/tsdb/head_test.go
index f73be51235..ce4bb6d8e7 100644
--- a/tsdb/head_test.go
+++ b/tsdb/head_test.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -107,49 +107,6 @@ func BenchmarkCreateSeries(b *testing.B) {
}
}
-func BenchmarkHeadAppender_Append_Commit_ExistingSeries(b *testing.B) {
- seriesCounts := []int{100, 1000, 10000}
- series := genSeries(10000, 10, 0, 0)
-
- for _, seriesCount := range seriesCounts {
- b.Run(fmt.Sprintf("%d series", seriesCount), func(b *testing.B) {
- for _, samplesPerAppend := range []int64{1, 2, 5, 100} {
- b.Run(fmt.Sprintf("%d samples per append", samplesPerAppend), func(b *testing.B) {
- h, _ := newTestHead(b, 10000, compression.None, false)
- b.Cleanup(func() { require.NoError(b, h.Close()) })
-
- ts := int64(1000)
- appendSamples := func() error {
- var err error
- app := h.Appender(context.Background())
- for _, s := range series[:seriesCount] {
- var ref storage.SeriesRef
- for sampleIndex := range samplesPerAppend {
- ref, err = app.Append(ref, s.Labels(), ts+sampleIndex, float64(ts+sampleIndex))
- if err != nil {
- return err
- }
- }
- }
- ts += 1000 // should increment more than highest samplesPerAppend
- return app.Commit()
- }
-
- // Init series, that's not what we're benchmarking here.
- require.NoError(b, appendSamples())
-
- b.ReportAllocs()
- b.ResetTimer()
-
- for b.Loop() {
- require.NoError(b, appendSamples())
- }
- })
- }
- })
- }
-}
-
func populateTestWL(t testing.TB, w *wlog.WL, recs []any, buf []byte) []byte {
var enc record.Encoder
for _, r := range recs {
@@ -5941,7 +5898,7 @@ func TestOOOAppendWithNoSeries(t *testing.T) {
}
}
-func testOOOAppendWithNoSeries(t *testing.T, appendFunc func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error)) {
+func testOOOAppendWithNoSeries(t *testing.T, appendFunc func(appender storage.LimitedAppenderV1, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error)) {
dir := t.TempDir()
wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, compression.Snappy)
require.NoError(t, err)
@@ -6284,6 +6241,7 @@ func TestSnapshotAheadOfWALError(t *testing.T) {
require.NoError(t, head.Close())
}
+// TODO(bwplotka): Bad benchmark (no b.Loop/b.N), fix or remove.
func BenchmarkCuttingHeadHistogramChunks(b *testing.B) {
const (
numSamples = 50000
@@ -6525,19 +6483,19 @@ func TestWALSampleAndExemplarOrder(t *testing.T) {
appendF: func(app storage.Appender, ts int64) (storage.SeriesRef, error) {
return app.Append(0, lbls, ts, 1.0)
},
- expectedType: reflect.TypeOf([]record.RefSample{}),
+ expectedType: reflect.TypeFor[[]record.RefSample](),
},
"histogram sample": {
appendF: func(app storage.Appender, ts int64) (storage.SeriesRef, error) {
return app.AppendHistogram(0, lbls, ts, tsdbutil.GenerateTestHistogram(1), nil)
},
- expectedType: reflect.TypeOf([]record.RefHistogramSample{}),
+ expectedType: reflect.TypeFor[[]record.RefHistogramSample](),
},
"float histogram sample": {
appendF: func(app storage.Appender, ts int64) (storage.SeriesRef, error) {
return app.AppendHistogram(0, lbls, ts, nil, tsdbutil.GenerateTestFloatHistogram(1))
},
- expectedType: reflect.TypeOf([]record.RefFloatHistogramSample{}),
+ expectedType: reflect.TypeFor[[]record.RefFloatHistogramSample](),
},
}
@@ -6579,6 +6537,8 @@ func TestWALSampleAndExemplarOrder(t *testing.T) {
// would trigger the
// `signal SIGSEGV: segmentation violation code=0x1 addr=0x20 pc=0xbb03d1`
// panic, that we have seen in the wild once.
+//
+// TODO(bwplotka): This no longer can happen in AppenderV2, remove once AppenderV1 is removed, see #17632.
func TestHeadCompactionWhileAppendAndCommitExemplar(t *testing.T) {
h, _ := newTestHead(t, DefaultBlockDuration, compression.None, false)
app := h.Appender(context.Background())
diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go
index 2ccd8a79eb..5802a570e7 100644
--- a/tsdb/head_wal.go
+++ b/tsdb/head_wal.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -336,7 +336,7 @@ Outer:
}
}
- for i := 0; i < concurrency; i++ {
+ for i := range concurrency {
if len(deleteSeriesShards[i]) > 0 {
processors[i].input <- walSubsetProcessorInputItem{deletedSeriesRefs: deleteSeriesShards[i]}
deleteSeriesShards[i] = nil
diff --git a/tsdb/index/index.go b/tsdb/index/index.go
index 28eacd7c00..8a76770821 100644
--- a/tsdb/index/index.go
+++ b/tsdb/index/index.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -17,6 +17,7 @@ import (
"bufio"
"context"
"encoding/binary"
+ "errors"
"fmt"
"hash"
"hash/crc32"
@@ -94,6 +95,13 @@ func (s indexWriterStage) String() string {
return ""
}
+// ErrPostingsOffsetTableTooLarge is returned when the postings offset table length
+// would exceed 4 bytes (table would exceed the 4GB limit).
+var ErrPostingsOffsetTableTooLarge = errors.New("length size exceeds 4 bytes")
+
+// ErrIndexExceeds64GiB is returned when the index file would exceed the 64GiB limit.
+var ErrIndexExceeds64GiB = errors.New("exceeding max size of 64GiB")
+
// The table gets initialized with sync.Once but may still cause a race
// with any other use of the crc32 package anywhere. Thus we initialize it
// before.
@@ -303,7 +311,7 @@ func (fw *FileWriter) Write(bufs ...[]byte) error {
// Once we move to compressed/varint representations in those areas, this limitation
// can be lifted.
if fw.pos > 16*math.MaxUint32 {
- return fmt.Errorf("%q exceeding max size of 64GiB", fw.name)
+ return fmt.Errorf("%q %w", fw.name, ErrIndexExceeds64GiB)
}
}
return nil
@@ -660,7 +668,7 @@ func (w *Writer) writeLengthAndHash(startPos uint64) error {
w.buf1.Reset()
l := w.f.pos - startPos - 4
if l > math.MaxUint32 {
- return fmt.Errorf("length size exceeds 4 bytes: %d", l)
+ return fmt.Errorf("%w: %d", ErrPostingsOffsetTableTooLarge, l)
}
w.buf1.PutBE32int(int(l))
if err := w.writeAt(w.buf1.Get(), startPos); err != nil {
@@ -1447,32 +1455,6 @@ func (r *Reader) LabelNamesFor(ctx context.Context, postings Postings) ([]string
return names, nil
}
-// LabelValueFor returns label value for the given label name in the series referred to by ID.
-func (r *Reader) LabelValueFor(ctx context.Context, id storage.SeriesRef, label string) (string, error) {
- offset := id
- // In version 2 series IDs are no longer exact references but series are 16-byte padded
- // and the ID is the multiple of 16 of the actual position.
- if r.version != FormatV1 {
- offset = id * seriesByteAlign
- }
- d := encoding.NewDecbufUvarintAt(r.b, int(offset), castagnoliTable)
- buf := d.Get()
- if d.Err() != nil {
- return "", fmt.Errorf("label values for: %w", d.Err())
- }
-
- value, err := r.dec.LabelValueFor(ctx, buf, label)
- if err != nil {
- return "", storage.ErrNotFound
- }
-
- if value == "" {
- return "", storage.ErrNotFound
- }
-
- return value, nil
-}
-
// Series reads the series with the given ID and writes its labels and chunks into builder and chks.
func (r *Reader) Series(id storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error {
offset := id
@@ -1809,37 +1791,6 @@ func (*Decoder) LabelNamesOffsetsFor(b []byte) ([]uint32, error) {
return offsets, d.Err()
}
-// LabelValueFor decodes a label for a given series.
-func (dec *Decoder) LabelValueFor(ctx context.Context, b []byte, label string) (string, error) {
- d := encoding.Decbuf{B: b}
- k := d.Uvarint()
-
- for range k {
- lno := uint32(d.Uvarint())
- lvo := uint32(d.Uvarint())
-
- if d.Err() != nil {
- return "", fmt.Errorf("read series label offsets: %w", d.Err())
- }
-
- ln, err := dec.LookupSymbol(ctx, lno)
- if err != nil {
- return "", fmt.Errorf("lookup label name: %w", err)
- }
-
- if ln == label {
- lv, err := dec.LookupSymbol(ctx, lvo)
- if err != nil {
- return "", fmt.Errorf("lookup label value: %w", err)
- }
-
- return lv, nil
- }
- }
-
- return "", d.Err()
-}
-
// Series decodes a series entry from the given byte slice into builder and chks.
// Previous contents of builder can be overwritten - make sure you copy before retaining.
// Skips reading chunks metadata if chks is nil.
diff --git a/tsdb/index/index_test.go b/tsdb/index/index_test.go
index 9013a1d5cd..20399dcdcf 100644
--- a/tsdb/index/index_test.go
+++ b/tsdb/index/index_test.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go
index 665a241c34..31b93f850d 100644
--- a/tsdb/index/postings.go
+++ b/tsdb/index/postings.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -391,7 +391,7 @@ func (p *MemPostings) Iter(f func(labels.Label, Postings) error) error {
for n, e := range p.m {
for v, p := range e {
- if err := f(labels.Label{Name: n, Value: v}, newListPostings(p...)); err != nil {
+ if err := f(labels.Label{Name: n, Value: v}, NewListPostings(p)); err != nil {
return err
}
}
@@ -478,8 +478,8 @@ func (p *MemPostings) PostingsForLabelMatching(ctx context.Context, name string,
}
// Now `vals` only contains the values that matched, get their postings.
- its := make([]*ListPostings, 0, len(vals))
- lps := make([]ListPostings, len(vals))
+ its := make([]*listPostings, 0, len(vals))
+ lps := make([]listPostings, len(vals))
p.mtx.RLock()
e := p.m[name]
for i, v := range vals {
@@ -488,7 +488,7 @@ func (p *MemPostings) PostingsForLabelMatching(ctx context.Context, name string,
// If we didn't let the mutex go, we'd have these postings here, but they would be pointing nowhere
// because there would be a `MemPostings.Delete()` call waiting for the lock to delete these labels,
// because the series were deleted already.
- lps[i] = ListPostings{list: refs}
+ lps[i] = listPostings{list: refs}
its = append(its, &lps[i])
}
}
@@ -500,13 +500,13 @@ func (p *MemPostings) PostingsForLabelMatching(ctx context.Context, name string,
// Postings returns a postings iterator for the given label values.
func (p *MemPostings) Postings(ctx context.Context, name string, values ...string) Postings {
- res := make([]*ListPostings, 0, len(values))
- lps := make([]ListPostings, len(values))
+ res := make([]*listPostings, 0, len(values))
+ lps := make([]listPostings, len(values))
p.mtx.RLock()
postingsMapForName := p.m[name]
for i, value := range values {
if lp := postingsMapForName[value]; lp != nil {
- lps[i] = ListPostings{list: lp}
+ lps[i] = listPostings{list: lp}
res = append(res, &lps[i])
}
}
@@ -518,12 +518,12 @@ func (p *MemPostings) PostingsForAllLabelValues(ctx context.Context, name string
p.mtx.RLock()
e := p.m[name]
- its := make([]*ListPostings, 0, len(e))
- lps := make([]ListPostings, len(e))
+ its := make([]*listPostings, 0, len(e))
+ lps := make([]listPostings, len(e))
i := 0
for _, refs := range e {
if len(refs) > 0 {
- lps[i] = ListPostings{list: refs}
+ lps[i] = listPostings{list: refs}
its = append(its, &lps[i])
}
i++
@@ -542,7 +542,7 @@ func ExpandPostings(p Postings) (res []storage.SeriesRef, err error) {
return res, p.Err()
}
-// Postings provides iterative access over a postings list.
+// Postings provides iterative access over an ordered list of SeriesRef.
type Postings interface {
// Next advances the iterator and returns true if another value was found.
Next() bool
@@ -827,25 +827,23 @@ func (rp *removedPostings) Err() error {
return rp.remove.Err()
}
-// ListPostings implements the Postings interface over a plain list.
-type ListPostings struct {
+// listPostings implements the Postings interface over a plain list.
+type listPostings struct {
list []storage.SeriesRef
cur storage.SeriesRef
}
+// NewListPostings creates a Postings from the supplied SeriesRefs, which must be in order.
+// The list slice passed in is retained.
func NewListPostings(list []storage.SeriesRef) Postings {
- return newListPostings(list...)
+ return &listPostings{list: list}
}
-func newListPostings(list ...storage.SeriesRef) *ListPostings {
- return &ListPostings{list: list}
-}
-
-func (it *ListPostings) At() storage.SeriesRef {
+func (it *listPostings) At() storage.SeriesRef {
return it.cur
}
-func (it *ListPostings) Next() bool {
+func (it *listPostings) Next() bool {
if len(it.list) > 0 {
it.cur = it.list[0]
it.list = it.list[1:]
@@ -855,7 +853,7 @@ func (it *ListPostings) Next() bool {
return false
}
-func (it *ListPostings) Seek(x storage.SeriesRef) bool {
+func (it *listPostings) Seek(x storage.SeriesRef) bool {
// If the current value satisfies, then return.
if it.cur >= x {
return true
@@ -877,12 +875,12 @@ func (it *ListPostings) Seek(x storage.SeriesRef) bool {
return true
}
-func (*ListPostings) Err() error {
+func (*listPostings) Err() error {
return nil
}
// Len returns the remaining number of postings in the list.
-func (it *ListPostings) Len() int {
+func (it *listPostings) Len() int {
return len(it.list)
}
diff --git a/tsdb/index/postings_test.go b/tsdb/index/postings_test.go
index 56c0f02455..77b43f76ab 100644
--- a/tsdb/index/postings_test.go
+++ b/tsdb/index/postings_test.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -20,6 +20,7 @@ import (
"errors"
"fmt"
"math/rand"
+ "slices"
"sort"
"strconv"
"strings"
@@ -62,9 +63,7 @@ func TestMemPostings_ensureOrder(t *testing.T) {
for _, e := range p.m {
for _, l := range e {
- ok := sort.SliceIsSorted(l, func(i, j int) bool {
- return l[i] < l[j]
- })
+ ok := slices.IsSorted(l)
require.True(t, ok, "postings list %v is not sorted", l)
}
}
@@ -285,9 +284,16 @@ func consumePostings(p Postings) error {
return p.Err()
}
+func newListPostings(list ...storage.SeriesRef) *listPostings {
+ if !slices.IsSorted(list) {
+ panic("newListPostings: list is not sorted")
+ }
+ return &listPostings{list: list}
+}
+
// Create ListPostings for a benchmark, collecting the original sets of references
// so they can be reset without additional memory allocations.
-func createPostings(lps *[]*ListPostings, refs *[][]storage.SeriesRef, params ...storage.SeriesRef) {
+func createPostings(lps *[]*listPostings, refs *[][]storage.SeriesRef, params ...storage.SeriesRef) {
var temp []storage.SeriesRef
for i := 0; i < len(params); i += 3 {
for j := params[i]; j < params[i+1]; j += params[i+2] {
@@ -299,7 +305,7 @@ func createPostings(lps *[]*ListPostings, refs *[][]storage.SeriesRef, params ..
}
// Reset the ListPostings to their original values each time round the benchmark loop.
-func resetPostings(its []Postings, lps []*ListPostings, refs [][]storage.SeriesRef) {
+func resetPostings(its []Postings, lps []*listPostings, refs [][]storage.SeriesRef) {
for j := range refs {
lps[j].list = refs[j]
its[j] = lps[j]
@@ -308,7 +314,7 @@ func resetPostings(its []Postings, lps []*ListPostings, refs [][]storage.SeriesR
func BenchmarkIntersect(t *testing.B) {
t.Run("LongPostings1", func(bench *testing.B) {
- var lps []*ListPostings
+ var lps []*listPostings
var refs [][]storage.SeriesRef
createPostings(&lps, &refs, 0, 10000000, 2)
createPostings(&lps, &refs, 5000000, 5000100, 4, 5090000, 5090600, 4)
@@ -327,7 +333,7 @@ func BenchmarkIntersect(t *testing.B) {
})
t.Run("LongPostings2", func(bench *testing.B) {
- var lps []*ListPostings
+ var lps []*listPostings
var refs [][]storage.SeriesRef
createPostings(&lps, &refs, 0, 12500000, 1)
createPostings(&lps, &refs, 7500000, 12500000, 1)
@@ -346,7 +352,7 @@ func BenchmarkIntersect(t *testing.B) {
})
t.Run("ManyPostings", func(bench *testing.B) {
- var lps []*ListPostings
+ var lps []*listPostings
var refs [][]storage.SeriesRef
for range 100 {
createPostings(&lps, &refs, 1, 100, 1)
@@ -365,7 +371,7 @@ func BenchmarkIntersect(t *testing.B) {
}
func BenchmarkMerge(t *testing.B) {
- var lps []*ListPostings
+ var lps []*listPostings
var refs [][]storage.SeriesRef
// Create 100000 matchers(k=100000), making sure all memory allocation is done before starting the loop.
@@ -378,7 +384,7 @@ func BenchmarkMerge(t *testing.B) {
refs = append(refs, temp)
}
- its := make([]*ListPostings, len(refs))
+ its := make([]*listPostings, len(refs))
for _, nSeries := range []int{1, 10, 10000, 100000} {
t.Run(strconv.Itoa(nSeries), func(bench *testing.B) {
ctx := context.Background()
@@ -1229,78 +1235,78 @@ func TestPostingsWithIndexHeap(t *testing.T) {
func TestListPostings(t *testing.T) {
t.Run("empty list", func(t *testing.T) {
p := NewListPostings(nil)
- require.Equal(t, 0, p.(*ListPostings).Len())
+ require.Equal(t, 0, p.(*listPostings).Len())
require.False(t, p.Next())
require.False(t, p.Seek(10))
require.False(t, p.Next())
require.NoError(t, p.Err())
- require.Equal(t, 0, p.(*ListPostings).Len())
+ require.Equal(t, 0, p.(*listPostings).Len())
})
t.Run("one posting", func(t *testing.T) {
t.Run("next", func(t *testing.T) {
p := NewListPostings([]storage.SeriesRef{10})
- require.Equal(t, 1, p.(*ListPostings).Len())
+ require.Equal(t, 1, p.(*listPostings).Len())
require.True(t, p.Next())
require.Equal(t, storage.SeriesRef(10), p.At())
require.False(t, p.Next())
require.NoError(t, p.Err())
- require.Equal(t, 0, p.(*ListPostings).Len())
+ require.Equal(t, 0, p.(*listPostings).Len())
})
t.Run("seek less", func(t *testing.T) {
p := NewListPostings([]storage.SeriesRef{10})
- require.Equal(t, 1, p.(*ListPostings).Len())
+ require.Equal(t, 1, p.(*listPostings).Len())
require.True(t, p.Seek(5))
require.Equal(t, storage.SeriesRef(10), p.At())
require.True(t, p.Seek(5))
require.Equal(t, storage.SeriesRef(10), p.At())
require.False(t, p.Next())
require.NoError(t, p.Err())
- require.Equal(t, 0, p.(*ListPostings).Len())
+ require.Equal(t, 0, p.(*listPostings).Len())
})
t.Run("seek equal", func(t *testing.T) {
p := NewListPostings([]storage.SeriesRef{10})
- require.Equal(t, 1, p.(*ListPostings).Len())
+ require.Equal(t, 1, p.(*listPostings).Len())
require.True(t, p.Seek(10))
require.Equal(t, storage.SeriesRef(10), p.At())
require.False(t, p.Next())
require.NoError(t, p.Err())
- require.Equal(t, 0, p.(*ListPostings).Len())
+ require.Equal(t, 0, p.(*listPostings).Len())
})
t.Run("seek more", func(t *testing.T) {
p := NewListPostings([]storage.SeriesRef{10})
- require.Equal(t, 1, p.(*ListPostings).Len())
+ require.Equal(t, 1, p.(*listPostings).Len())
require.False(t, p.Seek(15))
require.False(t, p.Next())
require.NoError(t, p.Err())
- require.Equal(t, 0, p.(*ListPostings).Len())
+ require.Equal(t, 0, p.(*listPostings).Len())
})
t.Run("seek after next", func(t *testing.T) {
p := NewListPostings([]storage.SeriesRef{10})
- require.Equal(t, 1, p.(*ListPostings).Len())
+ require.Equal(t, 1, p.(*listPostings).Len())
require.True(t, p.Next())
require.False(t, p.Seek(15))
require.False(t, p.Next())
require.NoError(t, p.Err())
- require.Equal(t, 0, p.(*ListPostings).Len())
+ require.Equal(t, 0, p.(*listPostings).Len())
})
})
t.Run("multiple postings", func(t *testing.T) {
t.Run("next", func(t *testing.T) {
p := NewListPostings([]storage.SeriesRef{10, 20})
- require.Equal(t, 2, p.(*ListPostings).Len())
+ require.Equal(t, 2, p.(*listPostings).Len())
require.True(t, p.Next())
require.Equal(t, storage.SeriesRef(10), p.At())
require.True(t, p.Next())
require.Equal(t, storage.SeriesRef(20), p.At())
require.False(t, p.Next())
require.NoError(t, p.Err())
- require.Equal(t, 0, p.(*ListPostings).Len())
+ require.Equal(t, 0, p.(*listPostings).Len())
})
t.Run("seek", func(t *testing.T) {
p := NewListPostings([]storage.SeriesRef{10, 20})
- require.Equal(t, 2, p.(*ListPostings).Len())
+ require.Equal(t, 2, p.(*listPostings).Len())
require.True(t, p.Seek(5))
require.Equal(t, storage.SeriesRef(10), p.At())
require.True(t, p.Seek(5))
@@ -1315,30 +1321,30 @@ func TestListPostings(t *testing.T) {
require.Equal(t, storage.SeriesRef(20), p.At())
require.False(t, p.Next())
require.NoError(t, p.Err())
- require.Equal(t, 0, p.(*ListPostings).Len())
+ require.Equal(t, 0, p.(*listPostings).Len())
})
t.Run("seek lest than last", func(t *testing.T) {
p := NewListPostings([]storage.SeriesRef{10, 20, 30, 40, 50})
- require.Equal(t, 5, p.(*ListPostings).Len())
+ require.Equal(t, 5, p.(*listPostings).Len())
require.True(t, p.Seek(45))
require.Equal(t, storage.SeriesRef(50), p.At())
require.False(t, p.Next())
- require.Equal(t, 0, p.(*ListPostings).Len())
+ require.Equal(t, 0, p.(*listPostings).Len())
})
t.Run("seek exactly last", func(t *testing.T) {
p := NewListPostings([]storage.SeriesRef{10, 20, 30, 40, 50})
- require.Equal(t, 5, p.(*ListPostings).Len())
+ require.Equal(t, 5, p.(*listPostings).Len())
require.True(t, p.Seek(50))
require.Equal(t, storage.SeriesRef(50), p.At())
require.False(t, p.Next())
- require.Equal(t, 0, p.(*ListPostings).Len())
+ require.Equal(t, 0, p.(*listPostings).Len())
})
t.Run("seek more than last", func(t *testing.T) {
p := NewListPostings([]storage.SeriesRef{10, 20, 30, 40, 50})
- require.Equal(t, 5, p.(*ListPostings).Len())
+ require.Equal(t, 5, p.(*listPostings).Len())
require.False(t, p.Seek(60))
require.False(t, p.Next())
- require.Equal(t, 0, p.(*ListPostings).Len())
+ require.Equal(t, 0, p.(*listPostings).Len())
})
})
diff --git a/tsdb/index/postingsstats.go b/tsdb/index/postingsstats.go
index f9ee640ff5..ebbe835207 100644
--- a/tsdb/index/postingsstats.go
+++ b/tsdb/index/postingsstats.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/index/postingsstats_test.go b/tsdb/index/postingsstats_test.go
index b218dd9fc7..766c5055c1 100644
--- a/tsdb/index/postingsstats_test.go
+++ b/tsdb/index/postingsstats_test.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/isolation.go b/tsdb/isolation.go
index 95d3cfa5eb..029efaf181 100644
--- a/tsdb/isolation.go
+++ b/tsdb/isolation.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/isolation_test.go b/tsdb/isolation_test.go
index 1e41b9c753..f2671024e8 100644
--- a/tsdb/isolation_test.go
+++ b/tsdb/isolation_test.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/mocks_test.go b/tsdb/mocks_test.go
index 986048d3d2..b3d2208bc1 100644
--- a/tsdb/mocks_test.go
+++ b/tsdb/mocks_test.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/ooo_head.go b/tsdb/ooo_head.go
index b3f5e2b675..c6ae924372 100644
--- a/tsdb/ooo_head.go
+++ b/tsdb/ooo_head.go
@@ -1,4 +1,4 @@
-// Copyright 2022 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/ooo_head_read.go b/tsdb/ooo_head_read.go
index af8f9b1f83..5d2347c2d7 100644
--- a/tsdb/ooo_head_read.go
+++ b/tsdb/ooo_head_read.go
@@ -1,4 +1,4 @@
-// Copyright 2022 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -500,10 +500,6 @@ func (*OOOCompactionHeadIndexReader) LabelNames(context.Context, ...*labels.Matc
return nil, errors.New("not implemented")
}
-func (*OOOCompactionHeadIndexReader) LabelValueFor(context.Context, storage.SeriesRef, string) (string, error) {
- return "", errors.New("not implemented")
-}
-
func (*OOOCompactionHeadIndexReader) LabelNamesFor(context.Context, index.Postings) ([]string, error) {
return nil, errors.New("not implemented")
}
diff --git a/tsdb/ooo_head_read_test.go b/tsdb/ooo_head_read_test.go
index d197eacb56..4ecaa51fec 100644
--- a/tsdb/ooo_head_read_test.go
+++ b/tsdb/ooo_head_read_test.go
@@ -1,4 +1,4 @@
-// Copyright 2022 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -498,7 +498,7 @@ func testOOOHeadChunkReader_Chunk(t *testing.T, scenario sampleTypeScenario) {
minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() }
t.Run("Getting a non existing chunk fails with not found error", func(t *testing.T) {
- db := newTestDBWithOpts(t, opts)
+ db := newTestDB(t, withOpts(opts))
cr := NewHeadAndOOOChunkReader(db.head, 0, 1000, nil, nil, 0)
defer cr.Close()
@@ -837,7 +837,7 @@ func testOOOHeadChunkReader_Chunk(t *testing.T, scenario sampleTypeScenario) {
for _, tc := range tests {
t.Run(fmt.Sprintf("name=%s", tc.name), func(t *testing.T) {
- db := newTestDBWithOpts(t, opts)
+ db := newTestDB(t, withOpts(opts))
app := db.Appender(context.Background())
s1Ref, _, err := scenario.appendFunc(app, s1, tc.firstInOrderSampleAt, tc.firstInOrderSampleAt/1*time.Minute.Milliseconds())
@@ -1006,7 +1006,7 @@ func testOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding(
for _, tc := range tests {
t.Run(fmt.Sprintf("name=%s", tc.name), func(t *testing.T) {
- db := newTestDBWithOpts(t, opts)
+ db := newTestDB(t, withOpts(opts))
app := db.Appender(context.Background())
s1Ref, _, err := scenario.appendFunc(app, s1, tc.firstInOrderSampleAt, tc.firstInOrderSampleAt/1*time.Minute.Milliseconds())
@@ -1118,16 +1118,3 @@ func TestSortMetaByMinTimeAndMinRef(t *testing.T) {
})
}
}
-
-func newTestDBWithOpts(t *testing.T, opts *Options) *DB {
- dir := t.TempDir()
-
- db, err := Open(dir, nil, nil, opts, nil)
- require.NoError(t, err)
-
- t.Cleanup(func() {
- require.NoError(t, db.Close())
- })
-
- return db
-}
diff --git a/tsdb/ooo_head_test.go b/tsdb/ooo_head_test.go
index 8f773b6ef9..99cd357a30 100644
--- a/tsdb/ooo_head_test.go
+++ b/tsdb/ooo_head_test.go
@@ -1,4 +1,4 @@
-// Copyright 2022 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/ooo_isolation.go b/tsdb/ooo_isolation.go
index 3e3e165a0a..3aeee693a9 100644
--- a/tsdb/ooo_isolation.go
+++ b/tsdb/ooo_isolation.go
@@ -1,4 +1,4 @@
-// Copyright 2023 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/ooo_isolation_test.go b/tsdb/ooo_isolation_test.go
index 4ff0488ab1..054823b30c 100644
--- a/tsdb/ooo_isolation_test.go
+++ b/tsdb/ooo_isolation_test.go
@@ -1,4 +1,4 @@
-// Copyright 2023 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/querier.go b/tsdb/querier.go
index 788991235f..4a487aa568 100644
--- a/tsdb/querier.go
+++ b/tsdb/querier.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/querier_bench_test.go b/tsdb/querier_bench_test.go
index 514fa05a17..ca9ee119f7 100644
--- a/tsdb/querier_bench_test.go
+++ b/tsdb/querier_bench_test.go
@@ -1,4 +1,4 @@
-// Copyright 2018 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go
index a5efa35ceb..6933aa617a 100644
--- a/tsdb/querier_test.go
+++ b/tsdb/querier_test.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -23,6 +23,7 @@ import (
"slices"
"sort"
"strconv"
+ "strings"
"sync"
"testing"
"time"
@@ -2293,10 +2294,6 @@ func (m mockIndex) LabelValues(_ context.Context, name string, hints *storage.La
return values, nil
}
-func (m mockIndex) LabelValueFor(_ context.Context, id storage.SeriesRef, label string) (string, error) {
- return m.series[id].l.Get(label), nil
-}
-
func (m mockIndex) LabelNamesFor(_ context.Context, postings index.Postings) ([]string, error) {
namesMap := make(map[string]bool)
for postings.Next() {
@@ -3037,14 +3034,14 @@ func TestPostingsForMatchers(t *testing.T) {
require.NoError(t, err)
for _, c := range cases {
- name := ""
+ var name strings.Builder
for i, matcher := range c.matchers {
if i > 0 {
- name += ","
+ name.WriteString(",")
}
- name += matcher.String()
+ name.WriteString(matcher.String())
}
- t.Run(name, func(t *testing.T) {
+ t.Run(name.String(), func(t *testing.T) {
exp := map[string]struct{}{}
for _, l := range c.exp {
exp[l.String()] = struct{}{}
@@ -3094,11 +3091,8 @@ func TestQuerierIndexQueriesRace(t *testing.T) {
for _, c := range testCases {
t.Run(fmt.Sprintf("%v", c.matchers), func(t *testing.T) {
t.Parallel()
- db := openTestDB(t, DefaultOptions(), nil)
+ db := newTestDB(t)
h := db.Head()
- t.Cleanup(func() {
- require.NoError(t, db.Close())
- })
ctx, cancel := context.WithCancel(context.Background())
wg := &sync.WaitGroup{}
wg.Add(1)
@@ -3317,10 +3311,6 @@ func (mockMatcherIndex) LabelValues(context.Context, string, *storage.LabelHints
return []string{}, errors.New("label values called")
}
-func (mockMatcherIndex) LabelValueFor(context.Context, storage.SeriesRef, string) (string, error) {
- return "", errors.New("label value for called")
-}
-
func (mockMatcherIndex) LabelNamesFor(context.Context, index.Postings) ([]string, error) {
return nil, errors.New("label names for called")
}
@@ -3496,10 +3486,7 @@ func TestBlockBaseSeriesSet(t *testing.T) {
}
func BenchmarkHeadChunkQuerier(b *testing.B) {
- db := openTestDB(b, nil, nil)
- defer func() {
- require.NoError(b, db.Close())
- }()
+ db := newTestDB(b)
// 3h of data.
numTimeseries := 100
@@ -3541,10 +3528,7 @@ func BenchmarkHeadChunkQuerier(b *testing.B) {
}
func BenchmarkHeadQuerier(b *testing.B) {
- db := openTestDB(b, nil, nil)
- defer func() {
- require.NoError(b, db.Close())
- }()
+ db := newTestDB(b)
// 3h of data.
numTimeseries := 100
@@ -3606,12 +3590,8 @@ func TestQueryWithDeletedHistograms(t *testing.T) {
for name, tc := range testcases {
t.Run(name, func(t *testing.T) {
- db := openTestDB(t, nil, nil)
- defer func() {
- require.NoError(t, db.Close())
- }()
-
- appender := db.Appender(context.Background())
+ db := newTestDB(t)
+ app := db.Appender(context.Background())
var (
err error
@@ -3621,12 +3601,11 @@ func TestQueryWithDeletedHistograms(t *testing.T) {
for i := range 100 {
h, fh := tc(i)
- seriesRef, err = appender.AppendHistogram(seriesRef, lbs, int64(i), h, fh)
+ seriesRef, err = app.AppendHistogram(seriesRef, lbs, int64(i), h, fh)
require.NoError(t, err)
}
- err = appender.Commit()
- require.NoError(t, err)
+ require.NoError(t, app.Commit())
matcher, err := labels.NewMatcher(labels.MatchEqual, "__name__", "test")
require.NoError(t, err)
@@ -3664,12 +3643,8 @@ func TestQueryWithDeletedHistograms(t *testing.T) {
func TestQueryWithOneChunkCompletelyDeleted(t *testing.T) {
ctx := context.Background()
- db := openTestDB(t, nil, nil)
- defer func() {
- require.NoError(t, db.Close())
- }()
-
- appender := db.Appender(context.Background())
+ db := newTestDB(t)
+ app := db.Appender(context.Background())
var (
err error
@@ -3680,12 +3655,12 @@ func TestQueryWithOneChunkCompletelyDeleted(t *testing.T) {
// Create an int histogram chunk with samples between 0 - 20 and 30 - 40.
for i := range 20 {
h := tsdbutil.GenerateTestHistogram(1)
- seriesRef, err = appender.AppendHistogram(seriesRef, lbs, int64(i), h, nil)
+ seriesRef, err = app.AppendHistogram(seriesRef, lbs, int64(i), h, nil)
require.NoError(t, err)
}
for i := 30; i < 40; i++ {
h := tsdbutil.GenerateTestHistogram(1)
- seriesRef, err = appender.AppendHistogram(seriesRef, lbs, int64(i), h, nil)
+ seriesRef, err = app.AppendHistogram(seriesRef, lbs, int64(i), h, nil)
require.NoError(t, err)
}
@@ -3693,12 +3668,11 @@ func TestQueryWithOneChunkCompletelyDeleted(t *testing.T) {
// type from int histograms so a new chunk is created.
for i := 60; i < 100; i++ {
fh := tsdbutil.GenerateTestFloatHistogram(1)
- seriesRef, err = appender.AppendHistogram(seriesRef, lbs, int64(i), nil, fh)
+ seriesRef, err = app.AppendHistogram(seriesRef, lbs, int64(i), nil, fh)
require.NoError(t, err)
}
- err = appender.Commit()
- require.NoError(t, err)
+ require.NoError(t, app.Commit())
matcher, err := labels.NewMatcher(labels.MatchEqual, "__name__", "test")
require.NoError(t, err)
@@ -3757,10 +3731,6 @@ func (mockReaderOfLabels) LabelValues(context.Context, string, *storage.LabelHin
return make([]string, mockReaderOfLabelsSeriesCount), nil
}
-func (mockReaderOfLabels) LabelValueFor(context.Context, storage.SeriesRef, string) (string, error) {
- panic("LabelValueFor called")
-}
-
func (mockReaderOfLabels) SortedLabelValues(context.Context, string, *storage.LabelHints, ...*labels.Matcher) ([]string, error) {
panic("SortedLabelValues called")
}
diff --git a/tsdb/record/record.go b/tsdb/record/record.go
index 5791f60df4..106b8e51bc 100644
--- a/tsdb/record/record.go
+++ b/tsdb/record/record.go
@@ -1,4 +1,4 @@
-// Copyright 2018 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/tsdb/record/record_test.go b/tsdb/record/record_test.go
index bbbea04940..8ebd805d4d 100644
--- a/tsdb/record/record_test.go
+++ b/tsdb/record/record_test.go
@@ -1,4 +1,4 @@
-// Copyright 2018 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/tsdb/repair.go b/tsdb/repair.go
index 8bdc645b5e..0d9d449a40 100644
--- a/tsdb/repair.go
+++ b/tsdb/repair.go
@@ -1,4 +1,4 @@
-// Copyright 2018 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/repair_test.go b/tsdb/repair_test.go
index 8a192c4f78..34fe85f422 100644
--- a/tsdb/repair_test.go
+++ b/tsdb/repair_test.go
@@ -1,4 +1,4 @@
-// Copyright 2018 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/testutil.go b/tsdb/testutil.go
index 4d413322c8..feb921447d 100644
--- a/tsdb/testutil.go
+++ b/tsdb/testutil.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -44,14 +44,14 @@ type testValue struct {
type sampleTypeScenario struct {
sampleType string
- appendFunc func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error)
+ appendFunc func(appender storage.LimitedAppenderV1, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error)
sampleFunc func(ts, value int64) sample
}
var sampleTypeScenarios = map[string]sampleTypeScenario{
float: {
sampleType: sampleMetricTypeFloat,
- appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
+ appendFunc: func(appender storage.LimitedAppenderV1, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
s := sample{t: ts, f: float64(value)}
ref, err := appender.Append(0, lbls, ts, s.f)
return ref, s, err
@@ -62,7 +62,7 @@ var sampleTypeScenarios = map[string]sampleTypeScenario{
},
intHistogram: {
sampleType: sampleMetricTypeHistogram,
- appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
+ appendFunc: func(appender storage.LimitedAppenderV1, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
s := sample{t: ts, h: tsdbutil.GenerateTestHistogram(value)}
ref, err := appender.AppendHistogram(0, lbls, ts, s.h, nil)
return ref, s, err
@@ -73,7 +73,7 @@ var sampleTypeScenarios = map[string]sampleTypeScenario{
},
floatHistogram: {
sampleType: sampleMetricTypeHistogram,
- appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
+ appendFunc: func(appender storage.LimitedAppenderV1, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
s := sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(value)}
ref, err := appender.AppendHistogram(0, lbls, ts, nil, s.fh)
return ref, s, err
@@ -84,7 +84,7 @@ var sampleTypeScenarios = map[string]sampleTypeScenario{
},
customBucketsIntHistogram: {
sampleType: sampleMetricTypeHistogram,
- appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
+ appendFunc: func(appender storage.LimitedAppenderV1, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
s := sample{t: ts, h: tsdbutil.GenerateTestCustomBucketsHistogram(value)}
ref, err := appender.AppendHistogram(0, lbls, ts, s.h, nil)
return ref, s, err
@@ -95,7 +95,7 @@ var sampleTypeScenarios = map[string]sampleTypeScenario{
},
customBucketsFloatHistogram: {
sampleType: sampleMetricTypeHistogram,
- appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
+ appendFunc: func(appender storage.LimitedAppenderV1, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
s := sample{t: ts, fh: tsdbutil.GenerateTestCustomBucketsFloatHistogram(value)}
ref, err := appender.AppendHistogram(0, lbls, ts, nil, s.fh)
return ref, s, err
@@ -106,7 +106,7 @@ var sampleTypeScenarios = map[string]sampleTypeScenario{
},
gaugeIntHistogram: {
sampleType: sampleMetricTypeHistogram,
- appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
+ appendFunc: func(appender storage.LimitedAppenderV1, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
s := sample{t: ts, h: tsdbutil.GenerateTestGaugeHistogram(value)}
ref, err := appender.AppendHistogram(0, lbls, ts, s.h, nil)
return ref, s, err
@@ -117,7 +117,7 @@ var sampleTypeScenarios = map[string]sampleTypeScenario{
},
gaugeFloatHistogram: {
sampleType: sampleMetricTypeHistogram,
- appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
+ appendFunc: func(appender storage.LimitedAppenderV1, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
s := sample{t: ts, fh: tsdbutil.GenerateTestGaugeFloatHistogram(value)}
ref, err := appender.AppendHistogram(0, lbls, ts, nil, s.fh)
return ref, s, err
diff --git a/tsdb/tombstones/tombstones.go b/tsdb/tombstones/tombstones.go
index bda565eae4..25218782cd 100644
--- a/tsdb/tombstones/tombstones.go
+++ b/tsdb/tombstones/tombstones.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/tombstones/tombstones_test.go b/tsdb/tombstones/tombstones_test.go
index de036e22d0..17802672c6 100644
--- a/tsdb/tombstones/tombstones_test.go
+++ b/tsdb/tombstones/tombstones_test.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/tsdbblockutil.go b/tsdb/tsdbblockutil.go
index af2348019a..1c6882b085 100644
--- a/tsdb/tsdbblockutil.go
+++ b/tsdb/tsdbblockutil.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/tsdbutil/dir_locker.go b/tsdb/tsdbutil/dir_locker.go
index 4b69e1f9d6..45cabdd3d7 100644
--- a/tsdb/tsdbutil/dir_locker.go
+++ b/tsdb/tsdbutil/dir_locker.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/tsdbutil/dir_locker_test.go b/tsdb/tsdbutil/dir_locker_test.go
index 8c027415d3..e3f323932a 100644
--- a/tsdb/tsdbutil/dir_locker_test.go
+++ b/tsdb/tsdbutil/dir_locker_test.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/tsdbutil/dir_locker_testutil.go b/tsdb/tsdbutil/dir_locker_testutil.go
index 5a335989c7..ffbf039339 100644
--- a/tsdb/tsdbutil/dir_locker_testutil.go
+++ b/tsdb/tsdbutil/dir_locker_testutil.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/tsdbutil/histogram.go b/tsdb/tsdbutil/histogram.go
index 64311a8c3b..e6a67c8212 100644
--- a/tsdb/tsdbutil/histogram.go
+++ b/tsdb/tsdbutil/histogram.go
@@ -1,4 +1,4 @@
-// Copyright 2023 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/wlog/checkpoint.go b/tsdb/wlog/checkpoint.go
index c26f3f1052..57c2faf23e 100644
--- a/tsdb/wlog/checkpoint.go
+++ b/tsdb/wlog/checkpoint.go
@@ -1,4 +1,4 @@
-// Copyright 2018 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/tsdb/wlog/checkpoint_test.go b/tsdb/wlog/checkpoint_test.go
index b83724ea2e..97ca2e768d 100644
--- a/tsdb/wlog/checkpoint_test.go
+++ b/tsdb/wlog/checkpoint_test.go
@@ -1,4 +1,4 @@
-// Copyright 2018 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/tsdb/wlog/live_reader.go b/tsdb/wlog/live_reader.go
index 004c397270..359f29274b 100644
--- a/tsdb/wlog/live_reader.go
+++ b/tsdb/wlog/live_reader.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Prometheus Authors
+// Copyright The Prometheus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/tsdb/wlog/reader.go b/tsdb/wlog/reader.go
index c559d85b89..54b1baf4c4 100644
--- a/tsdb/wlog/reader.go
+++ b/tsdb/wlog/reader.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Prometheus Authors
+// Copyright The Prometheus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/tsdb/wlog/reader_test.go b/tsdb/wlog/reader_test.go
index 1ddc33e2c8..788a2edfb9 100644
--- a/tsdb/wlog/reader_test.go
+++ b/tsdb/wlog/reader_test.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Prometheus Authors
+// Copyright The Prometheus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/tsdb/wlog/watcher.go b/tsdb/wlog/watcher.go
index abb5ef9731..a841a44fc8 100644
--- a/tsdb/wlog/watcher.go
+++ b/tsdb/wlog/watcher.go
@@ -1,4 +1,4 @@
-// Copyright 2018 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/wlog/watcher_test.go b/tsdb/wlog/watcher_test.go
index 9e6ea65a7f..b9a6504298 100644
--- a/tsdb/wlog/watcher_test.go
+++ b/tsdb/wlog/watcher_test.go
@@ -1,4 +1,4 @@
-// Copyright 2018 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/tsdb/wlog/wlog.go b/tsdb/wlog/wlog.go
index 176531c478..5a80d58abf 100644
--- a/tsdb/wlog/wlog.go
+++ b/tsdb/wlog/wlog.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/tsdb/wlog/wlog_test.go b/tsdb/wlog/wlog_test.go
index 1ade42d3ff..79955d499c 100644
--- a/tsdb/wlog/wlog_test.go
+++ b/tsdb/wlog/wlog_test.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/util/almost/almost.go b/util/almost/almost.go
index 5f866b89b3..b89f968db6 100644
--- a/util/almost/almost.go
+++ b/util/almost/almost.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/almost/almost_test.go b/util/almost/almost_test.go
index fba37f13f6..4e225bf862 100644
--- a/util/almost/almost_test.go
+++ b/util/almost/almost_test.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/annotations/annotations.go b/util/annotations/annotations.go
index 817f670b5e..a68b2ba4fc 100644
--- a/util/annotations/annotations.go
+++ b/util/annotations/annotations.go
@@ -1,4 +1,4 @@
-// Copyright 2023 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/compression/buffers.go b/util/compression/buffers.go
index f510efc042..30f002970b 100644
--- a/util/compression/buffers.go
+++ b/util/compression/buffers.go
@@ -1,4 +1,4 @@
-// Copyright 2025 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/compression/compression.go b/util/compression/compression.go
index a1e9b7e530..26cff6a22e 100644
--- a/util/compression/compression.go
+++ b/util/compression/compression.go
@@ -1,4 +1,4 @@
-// Copyright 2025 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/compression/compression_test.go b/util/compression/compression_test.go
index 736bb934e3..4c52b8f42e 100644
--- a/util/compression/compression_test.go
+++ b/util/compression/compression_test.go
@@ -1,4 +1,4 @@
-// Copyright 2025 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/convertnhcb/convertnhcb.go b/util/convertnhcb/convertnhcb.go
index 21ae62b3cb..64ec9054a3 100644
--- a/util/convertnhcb/convertnhcb.go
+++ b/util/convertnhcb/convertnhcb.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/convertnhcb/convertnhcb_test.go b/util/convertnhcb/convertnhcb_test.go
index 7486ac18bb..710d47385a 100644
--- a/util/convertnhcb/convertnhcb_test.go
+++ b/util/convertnhcb/convertnhcb_test.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/documentcli/documentcli.go b/util/documentcli/documentcli.go
index 14382663ee..ebd7d91a5d 100644
--- a/util/documentcli/documentcli.go
+++ b/util/documentcli/documentcli.go
@@ -1,4 +1,4 @@
-// Copyright 2023 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/features/features.go b/util/features/features.go
new file mode 100644
index 0000000000..d52384dbd8
--- /dev/null
+++ b/util/features/features.go
@@ -0,0 +1,127 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package features
+
+import (
+ "maps"
+ "sync"
+)
+
+// Category constants define the standard feature flag categories used in Prometheus.
+const (
+ API = "api"
+ OTLPReceiver = "otlp_receiver"
+ Prometheus = "prometheus"
+ PromQL = "promql"
+ PromQLFunctions = "promql_functions"
+ PromQLOperators = "promql_operators"
+ Rules = "rules"
+ Scrape = "scrape"
+ ServiceDiscoveryProviders = "service_discovery_providers"
+ TemplatingFunctions = "templating_functions"
+ TSDB = "tsdb"
+ UI = "ui"
+)
+
+// Collector defines the interface for collecting and managing feature flags.
+// It provides methods to enable, disable, and retrieve feature states.
+type Collector interface {
+ // Enable marks a feature as enabled in the registry.
+ // The category and name should use snake_case naming convention.
+ Enable(category, name string)
+
+ // Disable marks a feature as disabled in the registry.
+ // The category and name should use snake_case naming convention.
+ Disable(category, name string)
+
+ // Set sets a feature to the specified enabled state.
+ // The category and name should use snake_case naming convention.
+ Set(category, name string, enabled bool)
+
+ // Get returns a copy of all registered features organized by category.
+ // Returns a map where the keys are category names and values are maps
+ // of feature names to their enabled status.
+ Get() map[string]map[string]bool
+}
+
+// registry is the private implementation of the Collector interface.
+// It stores feature information organized by category.
+type registry struct {
+ mu sync.RWMutex
+ features map[string]map[string]bool
+}
+
+// DefaultRegistry is the package-level registry used by Prometheus.
+var DefaultRegistry = NewRegistry()
+
+// NewRegistry creates a new feature registry.
+func NewRegistry() Collector {
+ return ®istry{
+ features: make(map[string]map[string]bool),
+ }
+}
+
+// Enable marks a feature as enabled in the registry.
+func (r *registry) Enable(category, name string) {
+ r.Set(category, name, true)
+}
+
+// Disable marks a feature as disabled in the registry.
+func (r *registry) Disable(category, name string) {
+ r.Set(category, name, false)
+}
+
+// Set sets a feature to the specified enabled state.
+func (r *registry) Set(category, name string, enabled bool) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ if r.features[category] == nil {
+ r.features[category] = make(map[string]bool)
+ }
+ r.features[category][name] = enabled
+}
+
+// Get returns a copy of all registered features organized by category.
+func (r *registry) Get() map[string]map[string]bool {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+
+ result := make(map[string]map[string]bool, len(r.features))
+ for category, features := range r.features {
+ result[category] = make(map[string]bool, len(features))
+ maps.Copy(result[category], features)
+ }
+ return result
+}
+
+// Enable marks a feature as enabled in the default registry.
+func Enable(category, name string) {
+ DefaultRegistry.Enable(category, name)
+}
+
+// Disable marks a feature as disabled in the default registry.
+func Disable(category, name string) {
+ DefaultRegistry.Disable(category, name)
+}
+
+// Set sets a feature to the specified enabled state in the default registry.
+func Set(category, name string, enabled bool) {
+ DefaultRegistry.Set(category, name, enabled)
+}
+
+// Get returns all features from the default registry.
+func Get() map[string]map[string]bool {
+ return DefaultRegistry.Get()
+}
diff --git a/util/fmtutil/format.go b/util/fmtutil/format.go
index 377f4ece05..a4ac7d43ca 100644
--- a/util/fmtutil/format.go
+++ b/util/fmtutil/format.go
@@ -1,4 +1,4 @@
-// Copyright 2023 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/fmtutil/format_test.go b/util/fmtutil/format_test.go
index f1d025806e..73dbe39f45 100644
--- a/util/fmtutil/format_test.go
+++ b/util/fmtutil/format_test.go
@@ -1,4 +1,4 @@
-// Copyright 2023 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/gate/gate.go b/util/gate/gate.go
index 6cb9d583c6..a1066fd74f 100644
--- a/util/gate/gate.go
+++ b/util/gate/gate.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/httputil/compression.go b/util/httputil/compression.go
index d5bedb7fa9..ca9f3c17da 100644
--- a/util/httputil/compression.go
+++ b/util/httputil/compression.go
@@ -1,4 +1,4 @@
-// Copyright 2013 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -56,6 +56,7 @@ func (c *compressedResponseWriter) Close() {
// Constructs a new compressedResponseWriter based on client request headers.
func newCompressedResponseWriter(writer http.ResponseWriter, req *http.Request) *compressedResponseWriter {
+ writer.Header().Add("Vary", acceptEncodingHeader)
raw := req.Header.Get(acceptEncodingHeader)
var (
encoding string
@@ -65,13 +66,17 @@ func newCompressedResponseWriter(writer http.ResponseWriter, req *http.Request)
encoding, raw, commaFound = strings.Cut(raw, ",")
switch strings.TrimSpace(encoding) {
case gzipEncoding:
- writer.Header().Set(contentEncodingHeader, gzipEncoding)
+ h := writer.Header()
+ h.Del("Content-Length") // avoid stale length after compression
+ h.Set(contentEncodingHeader, gzipEncoding)
return &compressedResponseWriter{
ResponseWriter: writer,
writer: gzip.NewWriter(writer),
}
case deflateEncoding:
- writer.Header().Set(contentEncodingHeader, deflateEncoding)
+ h := writer.Header()
+ h.Del("Content-Length")
+ h.Set(contentEncodingHeader, deflateEncoding)
return &compressedResponseWriter{
ResponseWriter: writer,
writer: zlib.NewWriter(writer),
diff --git a/util/httputil/compression_test.go b/util/httputil/compression_test.go
index 11df0a7c4c..6bdde914ce 100644
--- a/util/httputil/compression_test.go
+++ b/util/httputil/compression_test.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/httputil/context.go b/util/httputil/context.go
index 9b16428892..7aaeebdb3e 100644
--- a/util/httputil/context.go
+++ b/util/httputil/context.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/httputil/cors.go b/util/httputil/cors.go
index 2d4cc91ccb..e319762b5f 100644
--- a/util/httputil/cors.go
+++ b/util/httputil/cors.go
@@ -1,4 +1,4 @@
-// Copyright 2013 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/httputil/cors_test.go b/util/httputil/cors_test.go
index 30567947a9..d637932267 100644
--- a/util/httputil/cors_test.go
+++ b/util/httputil/cors_test.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/jsonutil/marshal.go b/util/jsonutil/marshal.go
index d715eabe68..61ce4234eb 100644
--- a/util/jsonutil/marshal.go
+++ b/util/jsonutil/marshal.go
@@ -1,4 +1,4 @@
-// Copyright 2022 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/junitxml/junitxml.go b/util/junitxml/junitxml.go
index 14e4b6dbae..8249290830 100644
--- a/util/junitxml/junitxml.go
+++ b/util/junitxml/junitxml.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/junitxml/junitxml_test.go b/util/junitxml/junitxml_test.go
index ad4d0293d0..92a32f2ddf 100644
--- a/util/junitxml/junitxml_test.go
+++ b/util/junitxml/junitxml_test.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/logging/dedupe.go b/util/logging/dedupe.go
index 8137f4f22b..244cd6495c 100644
--- a/util/logging/dedupe.go
+++ b/util/logging/dedupe.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/logging/dedupe_test.go b/util/logging/dedupe_test.go
index 918c5d60bd..b584f12572 100644
--- a/util/logging/dedupe_test.go
+++ b/util/logging/dedupe_test.go
@@ -1,4 +1,4 @@
-// Copyright 2019 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/logging/file.go b/util/logging/file.go
index 5e379442a2..bce9be9ae6 100644
--- a/util/logging/file.go
+++ b/util/logging/file.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/logging/file_test.go b/util/logging/file_test.go
index bd34bc2a3a..58a55697d9 100644
--- a/util/logging/file_test.go
+++ b/util/logging/file_test.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/namevalidationutil/namevalidationutil.go b/util/namevalidationutil/namevalidationutil.go
index 2e656b6a19..14796b48f4 100644
--- a/util/namevalidationutil/namevalidationutil.go
+++ b/util/namevalidationutil/namevalidationutil.go
@@ -1,4 +1,4 @@
-// Copyright 2025 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/namevalidationutil/namevalidationutil_test.go b/util/namevalidationutil/namevalidationutil_test.go
index 660b6100b0..692bc2692b 100644
--- a/util/namevalidationutil/namevalidationutil_test.go
+++ b/util/namevalidationutil/namevalidationutil_test.go
@@ -1,4 +1,4 @@
-// Copyright 2025 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/netconnlimit/netconnlimit.go b/util/netconnlimit/netconnlimit.go
index 3bdd805b83..5f54d0616a 100644
--- a/util/netconnlimit/netconnlimit.go
+++ b/util/netconnlimit/netconnlimit.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Based on golang.org/x/net/netutil:
// Copyright 2013 The Go Authors
// Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/util/netconnlimit/netconnlimit_test.go b/util/netconnlimit/netconnlimit_test.go
index e4d4904209..c33c7b342f 100644
--- a/util/netconnlimit/netconnlimit_test.go
+++ b/util/netconnlimit/netconnlimit_test.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/notifications/notifications.go b/util/notifications/notifications.go
index 4888a0b664..0e3882ce36 100644
--- a/util/notifications/notifications.go
+++ b/util/notifications/notifications.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/notifications/notifications_test.go b/util/notifications/notifications_test.go
index 3d9ba6bb12..84db90c6e3 100644
--- a/util/notifications/notifications_test.go
+++ b/util/notifications/notifications_test.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/osutil/hostname.go b/util/osutil/hostname.go
index c44cb391b6..f0444114f7 100644
--- a/util/osutil/hostname.go
+++ b/util/osutil/hostname.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/pool/pool.go b/util/pool/pool.go
index 7d5a8e3abf..a7f1bbb54e 100644
--- a/util/pool/pool.go
+++ b/util/pool/pool.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/pool/pool_test.go b/util/pool/pool_test.go
index e1ac13fb90..a14da6be8b 100644
--- a/util/pool/pool_test.go
+++ b/util/pool/pool_test.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/runtime/limits_default.go b/util/runtime/limits_default.go
index 156747d450..51a78423d3 100644
--- a/util/runtime/limits_default.go
+++ b/util/runtime/limits_default.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/runtime/limits_windows.go b/util/runtime/limits_windows.go
index ce82d31e6d..1cb7ea33a7 100644
--- a/util/runtime/limits_windows.go
+++ b/util/runtime/limits_windows.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/runtime/statfs.go b/util/runtime/statfs.go
index 66bedb5ea1..98dd822e4a 100644
--- a/util/runtime/statfs.go
+++ b/util/runtime/statfs.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/runtime/statfs_default.go b/util/runtime/statfs_default.go
index 78cfb1fe41..0cf5c2e616 100644
--- a/util/runtime/statfs_default.go
+++ b/util/runtime/statfs_default.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/runtime/statfs_linux_386.go b/util/runtime/statfs_linux_386.go
index a003b2effe..33dbc4c3e9 100644
--- a/util/runtime/statfs_linux_386.go
+++ b/util/runtime/statfs_linux_386.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/runtime/statfs_uint32.go b/util/runtime/statfs_uint32.go
index fbf994ea63..2fb4d70849 100644
--- a/util/runtime/statfs_uint32.go
+++ b/util/runtime/statfs_uint32.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/runtime/uname_default.go b/util/runtime/uname_default.go
index 0052dbab47..1bdc2e6696 100644
--- a/util/runtime/uname_default.go
+++ b/util/runtime/uname_default.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/runtime/uname_linux.go b/util/runtime/uname_linux.go
index ce3bc42a25..f2798cda4b 100644
--- a/util/runtime/uname_linux.go
+++ b/util/runtime/uname_linux.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/runtime/vmlimits_default.go b/util/runtime/vmlimits_default.go
index aef4341061..0e3bc0ead5 100644
--- a/util/runtime/vmlimits_default.go
+++ b/util/runtime/vmlimits_default.go
@@ -1,4 +1,4 @@
-// Copyright 2018 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/runtime/vmlimits_openbsd.go b/util/runtime/vmlimits_openbsd.go
index b40f065883..ce9aa181e6 100644
--- a/util/runtime/vmlimits_openbsd.go
+++ b/util/runtime/vmlimits_openbsd.go
@@ -1,4 +1,4 @@
-// Copyright 2018 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/runutil/runutil.go b/util/runutil/runutil.go
index 5a77c332ba..14752ed796 100644
--- a/util/runutil/runutil.go
+++ b/util/runutil/runutil.go
@@ -1,4 +1,4 @@
-// Copyright 2013 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/stats/query_stats.go b/util/stats/query_stats.go
index d8ec186f4c..9801d658a7 100644
--- a/util/stats/query_stats.go
+++ b/util/stats/query_stats.go
@@ -1,4 +1,4 @@
-// Copyright 2013 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/stats/stats_test.go b/util/stats/stats_test.go
index 28753b95fc..245f7cbc16 100644
--- a/util/stats/stats_test.go
+++ b/util/stats/stats_test.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/stats/timer.go b/util/stats/timer.go
index eca0fcccb0..1b9e430a09 100644
--- a/util/stats/timer.go
+++ b/util/stats/timer.go
@@ -1,4 +1,4 @@
-// Copyright 2013 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/strutil/quote.go b/util/strutil/quote.go
index 0a78421fd4..d7e65395f4 100644
--- a/util/strutil/quote.go
+++ b/util/strutil/quote.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/strutil/quote_test.go b/util/strutil/quote_test.go
index de33230551..c077a5ed49 100644
--- a/util/strutil/quote_test.go
+++ b/util/strutil/quote_test.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/strutil/strconv.go b/util/strutil/strconv.go
index 88d2a3b610..77f1acc94d 100644
--- a/util/strutil/strconv.go
+++ b/util/strutil/strconv.go
@@ -1,4 +1,4 @@
-// Copyright 2013 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/strutil/strconv_test.go b/util/strutil/strconv_test.go
index f09e7ffb3f..b4b87ee816 100644
--- a/util/strutil/strconv_test.go
+++ b/util/strutil/strconv_test.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/teststorage/appender.go b/util/teststorage/appender.go
new file mode 100644
index 0000000000..058a09561c
--- /dev/null
+++ b/util/teststorage/appender.go
@@ -0,0 +1,399 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package teststorage
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "math"
+ "slices"
+ "strings"
+ "sync"
+
+ "github.com/prometheus/common/model"
+ "go.uber.org/atomic"
+
+ "github.com/prometheus/prometheus/model/exemplar"
+ "github.com/prometheus/prometheus/model/histogram"
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/model/metadata"
+ "github.com/prometheus/prometheus/storage"
+)
+
+// Sample represents test, combined sample for mocking storage.AppenderV2.
+type Sample struct {
+ MF string
+ L labels.Labels
+ M metadata.Metadata
+ ST, T int64
+ V float64
+ H *histogram.Histogram
+ FH *histogram.FloatHistogram
+ ES []exemplar.Exemplar
+}
+
+func (s Sample) String() string {
+ // Attempting to format similar to ~ OpenMetrics 2.0 for readability.
+ b := strings.Builder{}
+ if s.M.Help != "" {
+ b.WriteString("HELP ")
+ b.WriteString(s.M.Help)
+ b.WriteString("\n")
+ }
+ if s.M.Type != model.MetricTypeUnknown && s.M.Type != "" {
+ b.WriteString("type@")
+ b.WriteString(string(s.M.Type))
+ b.WriteString(" ")
+ }
+ if s.M.Unit != "" {
+ b.WriteString("unit@")
+ b.WriteString(s.M.Unit)
+ b.WriteString(" ")
+ }
+ // Print all value types on purpose, to catch bugs for appending multiple sample types at once.
+ h := ""
+ if s.H != nil {
+ h = s.H.String()
+ }
+ fh := ""
+ if s.FH != nil {
+ fh = s.FH.String()
+ }
+ b.WriteString(fmt.Sprintf("%s %v%v%v st@%v t@%v\n", s.L.String(), s.V, h, fh, s.ST, s.T))
+ return b.String()
+}
+
+func (s Sample) Equals(other Sample) bool {
+ return strings.Compare(s.MF, other.MF) == 0 &&
+ labels.Equal(s.L, other.L) &&
+ s.M.Equals(other.M) &&
+ s.ST == other.ST &&
+ s.T == other.T &&
+ math.Float64bits(s.V) == math.Float64bits(other.V) && // Compare Float64bits so NaN values which are exactly the same will compare equal.
+ s.H.Equals(other.H) &&
+ s.FH.Equals(other.FH) &&
+ slices.EqualFunc(s.ES, other.ES, exemplar.Exemplar.Equals)
+}
+
+// Appendable is a storage.Appendable mock.
+// It allows recording all samples that were added through the appender and injecting errors.
+// Appendable will panic if more than one Appender is open.
+type Appendable struct {
+ appendErrFn func(ls labels.Labels) error // If non-nil, inject appender error on every Append, AppendHistogram and ST zero calls.
+ appendExemplarsError error // If non-nil, inject exemplar error.
+ commitErr error // If non-nil, inject commit error.
+
+ mtx sync.Mutex
+ openAppenders atomic.Int32 // Guard against multi-appender use.
+
+ // Recorded results.
+ pendingSamples []Sample
+ resultSamples []Sample
+ rolledbackSamples []Sample
+
+ // Optional chain (Appender will collect samples, then run next).
+ next storage.Appendable
+}
+
+// NewAppendable returns mock Appendable.
+func NewAppendable() *Appendable {
+ return &Appendable{}
+}
+
+// Then chains another appender from the provided appendable for the Appender calls.
+func (a *Appendable) Then(appendable storage.Appendable) *Appendable {
+ a.next = appendable
+ return a
+}
+
+// WithErrs allows injecting errors to the appender.
+func (a *Appendable) WithErrs(appendErrFn func(ls labels.Labels) error, appendExemplarsError, commitErr error) *Appendable {
+ a.appendErrFn = appendErrFn
+ a.appendExemplarsError = appendExemplarsError
+ a.commitErr = commitErr
+ return a
+}
+
+// PendingSamples returns pending samples (samples appended without commit).
+func (a *Appendable) PendingSamples() []Sample {
+ a.mtx.Lock()
+ defer a.mtx.Unlock()
+
+ ret := make([]Sample, len(a.pendingSamples))
+ copy(ret, a.pendingSamples)
+ return ret
+}
+
+// ResultSamples returns committed samples.
+func (a *Appendable) ResultSamples() []Sample {
+ a.mtx.Lock()
+ defer a.mtx.Unlock()
+
+ ret := make([]Sample, len(a.resultSamples))
+ copy(ret, a.resultSamples)
+ return ret
+}
+
+// RolledbackSamples returns rolled back samples.
+func (a *Appendable) RolledbackSamples() []Sample {
+ a.mtx.Lock()
+ defer a.mtx.Unlock()
+
+ ret := make([]Sample, len(a.rolledbackSamples))
+ copy(ret, a.rolledbackSamples)
+ return ret
+}
+
+func (a *Appendable) ResultReset() {
+ a.mtx.Lock()
+ defer a.mtx.Unlock()
+
+ a.pendingSamples = a.pendingSamples[:0]
+ a.resultSamples = a.resultSamples[:0]
+ a.rolledbackSamples = a.rolledbackSamples[:0]
+}
+
+// ResultMetadata returns resultSamples with samples only containing L and M.
+// This is for compatibility with tests that only focus on metadata.
+//
+// TODO: Rewrite tests to test metadata on resultSamples instead.
+func (a *Appendable) ResultMetadata() []Sample {
+ a.mtx.Lock()
+ defer a.mtx.Unlock()
+
+ var ret []Sample
+ for _, s := range a.resultSamples {
+ if s.M.IsEmpty() {
+ continue
+ }
+ ret = append(ret, Sample{L: s.L, M: s.M})
+ }
+ return ret
+}
+
+func (a *Appendable) String() string {
+ var sb strings.Builder
+ sb.WriteString("committed:\n")
+ for _, s := range a.resultSamples {
+ sb.WriteString("\n")
+ sb.WriteString(s.String())
+ }
+ sb.WriteString("pending:\n")
+ for _, s := range a.pendingSamples {
+ sb.WriteString("\n")
+ sb.WriteString(s.String())
+ }
+ sb.WriteString("rolledback:\n")
+ for _, s := range a.rolledbackSamples {
+ sb.WriteString("\n")
+ sb.WriteString(s.String())
+ }
+ return sb.String()
+}
+
+var errClosedAppender = errors.New("appender was already committed/rolledback")
+
+type appender struct {
+ err error
+ next storage.Appender
+
+ a *Appendable
+}
+
+func (a *appender) checkErr() error {
+ a.a.mtx.Lock()
+ defer a.a.mtx.Unlock()
+
+ return a.err
+}
+
+func (a *Appendable) Appender(ctx context.Context) storage.Appender {
+ ret := &appender{a: a}
+ if a.openAppenders.Inc() > 1 {
+ ret.err = errors.New("teststorage.Appendable.Appender() concurrent use is not supported; attempted opening new Appender() without Commit/Rollback of the previous one. Extend the implementation if concurrent mock is needed")
+ }
+
+ if a.next != nil {
+ ret.next = a.next.Appender(ctx)
+ }
+ return ret
+}
+
+func (*appender) SetOptions(*storage.AppendOptions) {}
+
+func (a *appender) Append(ref storage.SeriesRef, ls labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
+ if err := a.checkErr(); err != nil {
+ return 0, err
+ }
+
+ if a.a.appendErrFn != nil {
+ if err := a.a.appendErrFn(ls); err != nil {
+ return 0, err
+ }
+ }
+
+ a.a.mtx.Lock()
+ a.a.pendingSamples = append(a.a.pendingSamples, Sample{L: ls, T: t, V: v})
+ a.a.mtx.Unlock()
+
+ if a.next != nil {
+ return a.next.Append(ref, ls, t, v)
+ }
+
+ return computeOrCheckRef(ref, ls)
+}
+
+func computeOrCheckRef(ref storage.SeriesRef, ls labels.Labels) (storage.SeriesRef, error) {
+ h := ls.Hash()
+ if ref == 0 {
+ // Use labels hash as a stand-in for unique series reference, to avoid having to track all series.
+ return storage.SeriesRef(h), nil
+ }
+
+ if storage.SeriesRef(h) != ref {
+ // Check for buggy ref while we at it.
+ return 0, errors.New("teststorage.appender: found input ref not matching labels; potential bug in Appendable user")
+ }
+ return ref, nil
+}
+
+func (a *appender) AppendHistogram(ref storage.SeriesRef, ls labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
+ if err := a.checkErr(); err != nil {
+ return 0, err
+ }
+ if a.a.appendErrFn != nil {
+ if err := a.a.appendErrFn(ls); err != nil {
+ return 0, err
+ }
+ }
+
+ a.a.mtx.Lock()
+ a.a.pendingSamples = append(a.a.pendingSamples, Sample{L: ls, T: t, H: h, FH: fh})
+ a.a.mtx.Unlock()
+
+ if a.next != nil {
+ return a.next.AppendHistogram(ref, ls, t, h, fh)
+ }
+
+ return computeOrCheckRef(ref, ls)
+}
+
+func (a *appender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) {
+ if err := a.checkErr(); err != nil {
+ return 0, err
+ }
+ if a.a.appendExemplarsError != nil {
+ return 0, a.a.appendExemplarsError
+ }
+
+ a.a.mtx.Lock()
+ // NOTE(bwplotka): Eventually exemplar has to be attached to a series and soon
+ // the AppenderV2 will guarantee that for TSDB. Assume this from the mock perspective
+ // with the naive attaching. See: https://github.com/prometheus/prometheus/issues/17632
+ i := len(a.a.pendingSamples) - 1
+ for ; i >= 0; i-- { // Attach exemplars to the last matching sample.
+ if ref == storage.SeriesRef(a.a.pendingSamples[i].L.Hash()) {
+ a.a.pendingSamples[i].ES = append(a.a.pendingSamples[i].ES, e)
+ break
+ }
+ }
+ a.a.mtx.Unlock()
+ if i < 0 {
+ return 0, fmt.Errorf("teststorage.appender: exemplar appender without series; ref %v; l %v; exemplar: %v", ref, l, e)
+ }
+
+ if a.next != nil {
+ return a.next.AppendExemplar(ref, l, e)
+ }
+ return computeOrCheckRef(ref, l)
+}
+
+func (a *appender) AppendSTZeroSample(ref storage.SeriesRef, l labels.Labels, _, st int64) (storage.SeriesRef, error) {
+ return a.Append(ref, l, st, 0.0) // This will change soon with AppenderV2, but we already report ST as 0 samples.
+}
+
+func (a *appender) AppendHistogramSTZeroSample(ref storage.SeriesRef, l labels.Labels, _, st int64, h *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) {
+ if h != nil {
+ return a.AppendHistogram(ref, l, st, &histogram.Histogram{}, nil)
+ }
+ return a.AppendHistogram(ref, l, st, nil, &histogram.FloatHistogram{}) // This will change soon with AppenderV2, but we already report ST as 0 histograms.
+}
+
+func (a *appender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) {
+ if err := a.checkErr(); err != nil {
+ return 0, err
+ }
+
+ a.a.mtx.Lock()
+ // NOTE(bwplotka): Eventually metadata has to be attached to a series and soon
+ // the AppenderV2 will guarantee that for TSDB. Assume this from the mock perspective
+ // with the naive attaching. See: https://github.com/prometheus/prometheus/issues/17632
+ i := len(a.a.pendingSamples) - 1
+ for ; i >= 0; i-- { // Attach metadata to the last matching sample.
+ if ref == storage.SeriesRef(a.a.pendingSamples[i].L.Hash()) {
+ a.a.pendingSamples[i].M = m
+ break
+ }
+ }
+ a.a.mtx.Unlock()
+ if i < 0 {
+ return 0, fmt.Errorf("teststorage.appender: metadata update without series; ref %v; l %v; m: %v", ref, l, m)
+ }
+
+ if a.next != nil {
+ return a.next.UpdateMetadata(ref, l, m)
+ }
+ return computeOrCheckRef(ref, l)
+}
+
+func (a *appender) Commit() error {
+ if err := a.checkErr(); err != nil {
+ return err
+ }
+ defer a.a.openAppenders.Dec()
+
+ if a.a.commitErr != nil {
+ return a.a.commitErr
+ }
+
+ a.a.mtx.Lock()
+ a.a.resultSamples = append(a.a.resultSamples, a.a.pendingSamples...)
+ a.a.pendingSamples = a.a.pendingSamples[:0]
+ a.err = errClosedAppender
+ a.a.mtx.Unlock()
+
+ if a.a.next != nil {
+ return a.next.Commit()
+ }
+ return nil
+}
+
+func (a *appender) Rollback() error {
+ if err := a.checkErr(); err != nil {
+ return err
+ }
+ defer a.a.openAppenders.Dec()
+
+ a.a.mtx.Lock()
+ a.a.rolledbackSamples = append(a.a.rolledbackSamples, a.a.pendingSamples...)
+ a.a.pendingSamples = a.a.pendingSamples[:0]
+ a.err = errClosedAppender
+ a.a.mtx.Unlock()
+
+ if a.next != nil {
+ return a.next.Rollback()
+ }
+ return nil
+}
diff --git a/util/teststorage/appender_test.go b/util/teststorage/appender_test.go
new file mode 100644
index 0000000000..8c2a825c3a
--- /dev/null
+++ b/util/teststorage/appender_test.go
@@ -0,0 +1,131 @@
+// Copyright The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package teststorage
+
+import (
+ "errors"
+ "fmt"
+ "testing"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/stretchr/testify/require"
+
+ "github.com/prometheus/prometheus/model/exemplar"
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/model/metadata"
+ "github.com/prometheus/prometheus/util/testutil"
+)
+
+// TestSample_RequireEqual ensures standard testutil.RequireEqual is enough for comparisons.
+// This is thanks to the fact metadata has now Equals method.
+func TestSample_RequireEqual(t *testing.T) {
+ a := []Sample{
+ {},
+ {L: labels.FromStrings("__name__", "test_metric_total"), M: metadata.Metadata{Type: "counter", Unit: "metric", Help: "some help text"}},
+ {L: labels.FromStrings("__name__", "test_metric2", "foo", "bar"), M: metadata.Metadata{Type: "gauge", Unit: "", Help: "other help text"}, V: 123.123},
+ {ES: []exemplar.Exemplar{{Labels: labels.FromStrings("__name__", "yolo")}}},
+ }
+ testutil.RequireEqual(t, a, a)
+
+ b1 := []Sample{
+ {},
+ {L: labels.FromStrings("__name__", "test_metric_total"), M: metadata.Metadata{Type: "counter", Unit: "metric", Help: "some help text"}},
+ {L: labels.FromStrings("__name__", "test_metric2_diff", "foo", "bar"), M: metadata.Metadata{Type: "gauge", Unit: "", Help: "other help text"}, V: 123.123}, // test_metric2_diff is different.
+ {ES: []exemplar.Exemplar{{Labels: labels.FromStrings("__name__", "yolo")}}},
+ }
+ requireNotEqual(t, a, b1)
+
+ b2 := []Sample{
+ {},
+ {L: labels.FromStrings("__name__", "test_metric_total"), M: metadata.Metadata{Type: "counter", Unit: "metric", Help: "some help text"}},
+ {L: labels.FromStrings("__name__", "test_metric2", "foo", "bar"), M: metadata.Metadata{Type: "gauge", Unit: "", Help: "other help text"}, V: 123.123},
+ {ES: []exemplar.Exemplar{{Labels: labels.FromStrings("__name__", "yolo2")}}}, // exemplar is different.
+ }
+ requireNotEqual(t, a, b2)
+
+ b3 := []Sample{
+ {},
+ {L: labels.FromStrings("__name__", "test_metric_total"), M: metadata.Metadata{Type: "counter", Unit: "metric", Help: "some help text"}},
+ {L: labels.FromStrings("__name__", "test_metric2", "foo", "bar"), M: metadata.Metadata{Type: "gauge", Unit: "", Help: "other help text"}, V: 123.123, T: 123}, // Timestamp is different.
+ {ES: []exemplar.Exemplar{{Labels: labels.FromStrings("__name__", "yolo")}}},
+ }
+ requireNotEqual(t, a, b3)
+
+ b4 := []Sample{
+ {},
+ {L: labels.FromStrings("__name__", "test_metric_total"), M: metadata.Metadata{Type: "counter", Unit: "metric", Help: "some help text"}},
+ {L: labels.FromStrings("__name__", "test_metric2", "foo", "bar"), M: metadata.Metadata{Type: "gauge", Unit: "", Help: "other help text"}, V: 456.456}, // Value is different.
+ {ES: []exemplar.Exemplar{{Labels: labels.FromStrings("__name__", "yolo")}}},
+ }
+ requireNotEqual(t, a, b4)
+
+ b5 := []Sample{
+ {},
+ {L: labels.FromStrings("__name__", "test_metric_total"), M: metadata.Metadata{Type: "counter2", Unit: "metric", Help: "some help text"}}, // Different type.
+ {L: labels.FromStrings("__name__", "test_metric2", "foo", "bar"), M: metadata.Metadata{Type: "gauge", Unit: "", Help: "other help text"}, V: 123.123},
+ {ES: []exemplar.Exemplar{{Labels: labels.FromStrings("__name__", "yolo")}}},
+ }
+ requireNotEqual(t, a, b5)
+}
+
+// TODO(bwplotka): While this mimick testutil.RequireEqual just making it negative, this does not literally test
+// testutil.RequireEqual. Either build test suita that mocks `testing.TB` or get rid of testutil.RequireEqual somehow.
+func requireNotEqual(t testing.TB, a, b any) {
+ t.Helper()
+ if !cmp.Equal(a, b, cmp.Comparer(labels.Equal)) {
+ return
+ }
+ require.Fail(t, fmt.Sprintf("Equal, but expected not: \n"+
+ "a: %s\n"+
+ "b: %s", a, b))
+}
+
+func TestConcurrentAppender_ReturnsErrAppender(t *testing.T) {
+ a := NewAppendable()
+
+ // Non-concurrent multiple use if fine.
+ app := a.Appender(t.Context())
+ require.Equal(t, int32(1), a.openAppenders.Load())
+ require.NoError(t, app.Commit())
+ // Repeated commit fails.
+ require.Error(t, app.Commit())
+
+ app = a.Appender(t.Context())
+ require.NoError(t, app.Rollback())
+ // Commit after rollback fails.
+ require.Error(t, app.Commit())
+
+ a.WithErrs(
+ nil,
+ nil,
+ errors.New("commit err"),
+ )
+ app = a.Appender(t.Context())
+ require.Error(t, app.Commit())
+
+ a.WithErrs(nil, nil, nil)
+ app = a.Appender(t.Context())
+ require.NoError(t, app.Commit())
+ require.Equal(t, int32(0), a.openAppenders.Load())
+
+ // Concurrent use should return appender that errors.
+ _ = a.Appender(t.Context())
+ app = a.Appender(t.Context())
+ _, err := app.Append(0, labels.EmptyLabels(), 0, 0)
+ require.Error(t, err)
+ _, err = app.AppendHistogram(0, labels.EmptyLabels(), 0, nil, nil)
+ require.Error(t, err)
+ require.Error(t, app.Commit())
+ require.Error(t, app.Rollback())
+}
diff --git a/util/teststorage/storage.go b/util/teststorage/storage.go
index e0a6f39be2..17efdda77d 100644
--- a/util/teststorage/storage.go
+++ b/util/teststorage/storage.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -65,7 +65,7 @@ func NewWithError(outOfOrderTimeWindow ...int64) (*TestStorage, error) {
reg := prometheus.NewRegistry()
eMetrics := tsdb.NewExemplarMetrics(reg)
- es, err := tsdb.NewCircularExemplarStorage(10, eMetrics)
+ es, err := tsdb.NewCircularExemplarStorage(10, eMetrics, opts.OutOfOrderTimeWindow)
if err != nil {
return nil, fmt.Errorf("opening test exemplar storage: %w", err)
}
diff --git a/util/testutil/cmp.go b/util/testutil/cmp.go
index 3ea1f40168..9be01a5b4b 100644
--- a/util/testutil/cmp.go
+++ b/util/testutil/cmp.go
@@ -1,4 +1,4 @@
-// Copyright 2023 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/testutil/context.go b/util/testutil/context.go
index 3d2a09d637..15f50fbff5 100644
--- a/util/testutil/context.go
+++ b/util/testutil/context.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/testutil/directory.go b/util/testutil/directory.go
index 176acb5dc1..706007d322 100644
--- a/util/testutil/directory.go
+++ b/util/testutil/directory.go
@@ -1,4 +1,4 @@
-// Copyright 2013 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/testutil/port.go b/util/testutil/port.go
index 91c1291749..3a9be3f1a3 100644
--- a/util/testutil/port.go
+++ b/util/testutil/port.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/testutil/roundtrip.go b/util/testutil/roundtrip.go
index 364e0c2642..0bd003ca68 100644
--- a/util/testutil/roundtrip.go
+++ b/util/testutil/roundtrip.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/testutil/synctest/disabled.go b/util/testutil/synctest/disabled.go
index e87454afcf..595b93c650 100644
--- a/util/testutil/synctest/disabled.go
+++ b/util/testutil/synctest/disabled.go
@@ -1,4 +1,4 @@
-// Copyright 2025 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/testutil/synctest/enabled.go b/util/testutil/synctest/enabled.go
index 61aa85dcf7..d219903809 100644
--- a/util/testutil/synctest/enabled.go
+++ b/util/testutil/synctest/enabled.go
@@ -1,4 +1,4 @@
-// Copyright 2025 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/testutil/synctest/synctest.go b/util/testutil/synctest/synctest.go
index 6780798a9b..41750f9892 100644
--- a/util/testutil/synctest/synctest.go
+++ b/util/testutil/synctest/synctest.go
@@ -1,4 +1,4 @@
-// Copyright 2025 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/treecache/treecache.go b/util/treecache/treecache.go
index 86fd207074..32912c5a94 100644
--- a/util/treecache/treecache.go
+++ b/util/treecache/treecache.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/zeropool/pool.go b/util/zeropool/pool.go
index 946ce02091..6eab9f3365 100644
--- a/util/zeropool/pool.go
+++ b/util/zeropool/pool.go
@@ -1,4 +1,4 @@
-// Copyright 2023 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/util/zeropool/pool_test.go b/util/zeropool/pool_test.go
index 24598cbfa3..f93e75d539 100644
--- a/util/zeropool/pool_test.go
+++ b/util/zeropool/pool_test.go
@@ -1,4 +1,4 @@
-// Copyright 2023 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/web/api/v1/api.go b/web/api/v1/api.go
index 86c0461087..f32fee19f8 100644
--- a/web/api/v1/api.go
+++ b/web/api/v1/api.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -56,6 +56,7 @@ import (
"github.com/prometheus/prometheus/tsdb"
"github.com/prometheus/prometheus/tsdb/index"
"github.com/prometheus/prometheus/util/annotations"
+ "github.com/prometheus/prometheus/util/features"
"github.com/prometheus/prometheus/util/httputil"
"github.com/prometheus/prometheus/util/notifications"
"github.com/prometheus/prometheus/util/stats"
@@ -255,6 +256,8 @@ type API struct {
otlpWriteHandler http.Handler
codecs []Codec
+
+ featureRegistry features.Collector
}
// NewAPI returns an initialized API type.
@@ -295,6 +298,7 @@ func NewAPI(
enableTypeAndUnitLabels bool,
appendMetadata bool,
overrideErrorCode OverrideErrorCode,
+ featureRegistry features.Collector,
) *API {
a := &API{
QueryEngine: qe,
@@ -324,6 +328,7 @@ func NewAPI(
notificationsGetter: notificationsGetter,
notificationsSub: notificationsSub,
overrideErrorCode: overrideErrorCode,
+ featureRegistry: featureRegistry,
remoteReadHandler: remote.NewReadHandler(logger, registerer, q, configFunc, remoteReadSampleLimit, remoteReadConcurrencyLimit, remoteReadMaxBytesInFrame),
}
@@ -445,6 +450,7 @@ func (api *API) Register(r *route.Router) {
r.Get("/status/flags", wrap(api.serveFlags))
r.Get("/status/tsdb", wrapAgent(api.serveTSDBStatus))
r.Get("/status/tsdb/blocks", wrapAgent(api.serveTSDBBlocks))
+ r.Get("/features", wrap(api.features))
r.Get("/status/walreplay", api.serveWALReplayStatus)
r.Get("/notifications", api.notifications)
r.Get("/notifications/live", api.notificationsSSE)
@@ -1789,6 +1795,29 @@ func (api *API) serveFlags(*http.Request) apiFuncResult {
return apiFuncResult{api.flagsMap, nil, nil, nil}
}
+// featuresData wraps feature flags data to provide custom JSON marshaling without HTML escaping.
+// featuresData does not contain user-provided input, and it is more convenient to have unescaped
+// representation of PromQL operators like >=.
+type featuresData struct {
+ data map[string]map[string]bool
+}
+
+func (f featuresData) MarshalJSON() ([]byte, error) {
+ json := jsoniter.Config{
+ EscapeHTML: false,
+ SortMapKeys: true,
+ ValidateJsonRawMessage: true,
+ }.Froze()
+ return json.Marshal(f.data)
+}
+
+func (api *API) features(*http.Request) apiFuncResult {
+ if api.featureRegistry == nil {
+ return apiFuncResult{nil, &apiError{errorInternal, errors.New("feature registry not configured")}, nil, nil}
+ }
+ return apiFuncResult{featuresData{data: api.featureRegistry.Get()}, nil, nil, nil}
+}
+
// TSDBStat holds the information about individual cardinality.
type TSDBStat struct {
Name string `json:"name"`
@@ -1837,12 +1866,16 @@ func (api *API) serveTSDBBlocks(*http.Request) apiFuncResult {
}
func (api *API) serveTSDBStatus(r *http.Request) apiFuncResult {
+ const maxTSDBLimit = 10000
limit := 10
if s := r.FormValue("limit"); s != "" {
var err error
if limit, err = strconv.Atoi(s); err != nil || limit < 1 {
return apiFuncResult{nil, &apiError{errorBadData, errors.New("limit must be a positive number")}, nil, nil}
}
+ if limit > maxTSDBLimit {
+ return apiFuncResult{nil, &apiError{errorBadData, fmt.Errorf("limit must not exceed %d", maxTSDBLimit)}, nil, nil}
+ }
}
s, err := api.db.Stats(labels.MetricName, limit)
if err != nil {
diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go
index 8e0adc0802..39c1fa6080 100644
--- a/web/api/v1/api_test.go
+++ b/web/api/v1/api_test.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -4465,6 +4465,18 @@ func TestTSDBStatus(t *testing.T) {
values: map[string][]string{"limit": {"0"}},
errType: errorBadData,
},
+ {
+ db: tsdb,
+ endpoint: tsdbStatusAPI,
+ values: map[string][]string{"limit": {"10000"}},
+ errType: errorNone,
+ },
+ {
+ db: tsdb,
+ endpoint: tsdbStatusAPI,
+ values: map[string][]string{"limit": {"10001"}},
+ errType: errorBadData,
+ },
} {
t.Run(strconv.Itoa(i), func(t *testing.T) {
api := &API{db: tc.db, gatherer: prometheus.DefaultGatherer}
diff --git a/web/api/v1/codec.go b/web/api/v1/codec.go
index 492e00a74a..e7e53b466c 100644
--- a/web/api/v1/codec.go
+++ b/web/api/v1/codec.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/web/api/v1/codec_test.go b/web/api/v1/codec_test.go
index 911bf206e3..10038b605a 100644
--- a/web/api/v1/codec_test.go
+++ b/web/api/v1/codec_test.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/web/api/v1/errors_test.go b/web/api/v1/errors_test.go
index c44444404b..6e55089e16 100644
--- a/web/api/v1/errors_test.go
+++ b/web/api/v1/errors_test.go
@@ -1,4 +1,4 @@
-// Copyright 2022 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -168,6 +168,7 @@ func createPrometheusAPI(t *testing.T, q storage.SampleAndChunkQueryable, overri
false,
false,
overrideErrorCode,
+ nil,
)
promRouter := route.New().WithPrefix("/api/v1")
diff --git a/web/api/v1/json_codec.go b/web/api/v1/json_codec.go
index 4f3a23e976..adcf0e34bc 100644
--- a/web/api/v1/json_codec.go
+++ b/web/api/v1/json_codec.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/web/api/v1/json_codec_test.go b/web/api/v1/json_codec_test.go
index f0a671d6d1..8d17a1759f 100644
--- a/web/api/v1/json_codec_test.go
+++ b/web/api/v1/json_codec_test.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/web/api/v1/translate_ast.go b/web/api/v1/translate_ast.go
index dc2e7e2901..3cce0583f9 100644
--- a/web/api/v1/translate_ast.go
+++ b/web/api/v1/translate_ast.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/web/federate.go b/web/federate.go
index 443fd73568..584b8d7c4a 100644
--- a/web/federate.go
+++ b/web/federate.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/web/federate_test.go b/web/federate_test.go
index 55e20c6b2f..932639e2e6 100644
--- a/web/federate_test.go
+++ b/web/federate_test.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/web/ui/assets_embed.go b/web/ui/assets_embed.go
index a5f8f5ddfa..48e4a2c6f1 100644
--- a/web/ui/assets_embed.go
+++ b/web/ui/assets_embed.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/web/ui/mantine-ui/package.json b/web/ui/mantine-ui/package.json
index 219d357f0d..f38a2d965f 100644
--- a/web/ui/mantine-ui/package.json
+++ b/web/ui/mantine-ui/package.json
@@ -1,7 +1,7 @@
{
"name": "@prometheus-io/mantine-ui",
"private": true,
- "version": "0.307.3",
+ "version": "0.309.1",
"type": "module",
"scripts": {
"start": "vite",
@@ -28,7 +28,7 @@
"@microsoft/fetch-event-source": "^2.0.1",
"@nexucis/fuzzy": "^0.5.1",
"@nexucis/kvsearch": "^0.9.1",
- "@prometheus-io/codemirror-promql": "0.307.3",
+ "@prometheus-io/codemirror-promql": "0.309.1",
"@reduxjs/toolkit": "^2.10.1",
"@tabler/icons-react": "^3.35.0",
"@tanstack/react-query": "^5.90.7",
diff --git a/web/ui/mantine-ui/src/pages/query/ExplainViews/Selector.tsx b/web/ui/mantine-ui/src/pages/query/ExplainViews/Selector.tsx
index 2c564d3a4a..a83a0141d5 100644
--- a/web/ui/mantine-ui/src/pages/query/ExplainViews/Selector.tsx
+++ b/web/ui/mantine-ui/src/pages/query/ExplainViews/Selector.tsx
@@ -126,7 +126,7 @@ const matchingCriteriaList = (
};
const SelectorExplainView: FC = ({ node }) => {
- const baseMetricName = node.name.replace(/(_count|_sum|_bucket)$/, "");
+ const baseMetricName = node.name.replace(/(_count|_sum|_bucket|_total)$/, "");
const { lookbackDelta } = useSettings();
// Try to get metadata for the full unchanged metric name first.
diff --git a/web/ui/mantine-ui/src/pages/query/ExpressionInput.tsx b/web/ui/mantine-ui/src/pages/query/ExpressionInput.tsx
index a4b26cd910..4c3209e53a 100644
--- a/web/ui/mantine-ui/src/pages/query/ExpressionInput.tsx
+++ b/web/ui/mantine-ui/src/pages/query/ExpressionInput.tsx
@@ -58,6 +58,7 @@ import { lintKeymap } from "@codemirror/lint";
import {
IconAlignJustified,
IconBinaryTree,
+ IconCopy,
IconDotsVertical,
IconSearch,
IconTerminal,
@@ -121,6 +122,7 @@ interface ExpressionInputProps {
executeQuery: (expr: string) => void;
treeShown: boolean;
setShowTree: (showTree: boolean) => void;
+ duplicatePanel: (expr: string) => void;
removePanel: () => void;
}
@@ -128,6 +130,7 @@ const ExpressionInput: FC = ({
initialExpr,
metricNames,
executeQuery,
+ duplicatePanel,
removePanel,
treeShown,
setShowTree,
@@ -250,6 +253,12 @@ const ExpressionInput: FC = ({
>
{treeShown ? "Hide" : "Show"} tree view
+ }
+ onClick={() => duplicatePanel(expr)}
+ >
+ Duplicate query
+
}
diff --git a/web/ui/mantine-ui/src/pages/query/MetricsExplorer/MetricsExplorer.tsx b/web/ui/mantine-ui/src/pages/query/MetricsExplorer/MetricsExplorer.tsx
index 9c33a3df75..c351984698 100644
--- a/web/ui/mantine-ui/src/pages/query/MetricsExplorer/MetricsExplorer.tsx
+++ b/web/ui/mantine-ui/src/pages/query/MetricsExplorer/MetricsExplorer.tsx
@@ -73,7 +73,7 @@ const MetricsExplorer: FC = ({
// histogram/summary suffixes, it may be a metric that is not following naming
// conventions, see https://github.com/prometheus/prometheus/issues/16907).
data.data[m] ??
- data.data[m.replace(/(_count|_sum|_bucket)$/, "")] ?? [
+ data.data[m.replace(/(_count|_sum|_bucket|_total)$/, "")] ?? [
{ help: "unknown", type: "unknown", unit: "unknown" },
]
);
diff --git a/web/ui/mantine-ui/src/pages/query/QueryPanel.tsx b/web/ui/mantine-ui/src/pages/query/QueryPanel.tsx
index 5e41be7bb3..fcc7648a77 100644
--- a/web/ui/mantine-ui/src/pages/query/QueryPanel.tsx
+++ b/web/ui/mantine-ui/src/pages/query/QueryPanel.tsx
@@ -23,6 +23,7 @@ import { FC, Suspense, useCallback, useMemo, useState } from "react";
import { useAppDispatch, useAppSelector } from "../../state/hooks";
import {
addQueryToHistory,
+ duplicatePanel,
GraphDisplayMode,
GraphResolution,
removePanel,
@@ -111,6 +112,9 @@ const QueryPanel: FC = ({ idx, metricNames }) => {
setSelectedNode(null);
}
}}
+ duplicatePanel={(expr: string) => {
+ dispatch(duplicatePanel({ idx, expr }));
+ }}
removePanel={() => {
dispatch(removePanel(idx));
}}
diff --git a/web/ui/mantine-ui/src/promql/format.tsx b/web/ui/mantine-ui/src/promql/format.tsx
index f4b883f678..75b1965b35 100644
--- a/web/ui/mantine-ui/src/promql/format.tsx
+++ b/web/ui/mantine-ui/src/promql/format.tsx
@@ -266,22 +266,19 @@ const formatNodeInternal = (
let matching = <>>;
let grouping = <>>;
const vm = node.matching;
- if (vm !== null && (vm.labels.length > 0 || vm.on)) {
- if (vm.on) {
+ if (vm !== null) {
+ if (
+ vm.labels.length > 0 ||
+ vm.on ||
+ vm.card === vectorMatchCardinality.manyToOne ||
+ vm.card === vectorMatchCardinality.oneToMany
+ ) {
matching = (
<>
{" "}
- on
- (
- {labelNameList(vm.labels)}
- )
- >
- );
- } else {
- matching = (
- <>
- {" "}
- ignoring
+
+ {vm.on ? "on" : "ignoring"}
+
(
{labelNameList(vm.labels)}
)
diff --git a/web/ui/mantine-ui/src/promql/functionDocs.tsx b/web/ui/mantine-ui/src/promql/functionDocs.tsx
index 91221666d7..a9d9ca53a9 100644
--- a/web/ui/mantine-ui/src/promql/functionDocs.tsx
+++ b/web/ui/mantine-ui/src/promql/functionDocs.tsx
@@ -1,28 +1,31 @@
-import React from 'react';
+import React from "react";
const funcDocs: Record = {
abs: (
<>
- abs(v instant-vector) returns the input vector with all sample values converted to their absolute value.
+ abs(v instant-vector) returns a vector containing all float samples in the input vector converted
+ to their absolute value. Histogram samples in the input vector are ignored silently.
>
),
absent: (
<>
- absent(v instant-vector) returns an empty vector if the vector passed to it has any elements (floats or
- native histograms) and a 1-element vector with the value 1 if the vector passed to it has no elements.
+ absent(v instant-vector) returns an empty vector if the vector passed to it has any elements (float
+ samples or histogram samples) and a 1-element vector with the value 1 if the vector passed to it has no
+ elements.
This is useful for alerting on when no time series exist for a given metric name and label combination.
- absent(nonexistent{'{'}job="myjob"{'}'}) # => {'{'}job="myjob"{'}'}
- absent(nonexistent{'{'}job="myjob",instance=~".*"{'}'}) # => {'{'}job="myjob"{'}'}
- absent(sum(nonexistent{'{'}job="myjob"{'}'})) # => {'{'}
- {'}'}
+ absent(nonexistent{"{"}job="myjob"{"}"}) # => {"{"}job="myjob"{"}"}
+ absent(nonexistent{"{"}job="myjob",instance=~".*"{"}"}) # => {"{"}job="myjob"
+ {"}"}
+ absent(sum(nonexistent{"{"}job="myjob"{"}"})) # => {"{"}
+ {"}"}
@@ -36,83 +39,83 @@ const funcDocs: Record = {
<>
absent_over_time(v range-vector) returns an empty vector if the range vector passed to it has any
- elements (floats or native histograms) and a 1-element vector with the value 1 if the range vector passed to it has
- no elements.
+ elements (float samples or histogram samples) and a 1-element vector with the value 1 if the range vector passed
+ to it has no elements.
- This is useful for alerting on when no time series exist for a given metric name and label combination for a certain
- amount of time.
+ This is useful for alerting on when no time series exist for a given metric name and label combination for a
+ certain amount of time.
- absent_over_time(nonexistent{'{'}job="myjob"{'}'}[1h]) # => {'{'}job="myjob"{'}'}
- absent_over_time(nonexistent{'{'}job="myjob",instance=~".*"{'}'}[1h]) # => {'{'}
- job="myjob"{'}'}
- absent_over_time(sum(nonexistent{'{'}job="myjob"{'}'})[1h:]) # => {'{'}
- {'}'}
+ absent_over_time(nonexistent{"{"}job="myjob"{"}"}[1h]) # => {"{"}job="myjob"{"}"}
+ absent_over_time(nonexistent{"{"}job="myjob",instance=~".*"{"}"}[1h]) # => {"{"}
+ job="myjob"{"}"}
+ absent_over_time(sum(nonexistent{"{"}job="myjob"{"}"})[1h:]) # => {"{"}
+ {"}"}
- In the first two examples, absent_over_time() tries to be smart about deriving labels of the 1-element
- output vector from the input vector.
+ In the first two examples, absent_over_time() tries to be smart about deriving labels of the
+ 1-element output vector from the input vector.
>
),
acos: (
<>
- The trigonometric functions work in radians:
+ The trigonometric functions work in radians. They ignore histogram samples in the input vector.
-
-
acos(v instant-vector): calculates the arccosine of all elements in v (
+ acos(v instant-vector): calculates the arccosine of all float samples in v (
special cases).
-
-
acosh(v instant-vector): calculates the inverse hyperbolic cosine of all elements in v (
- special cases).
+ acosh(v instant-vector): calculates the inverse hyperbolic cosine of all float samples in{" "}
+ v (special cases).
-
-
asin(v instant-vector): calculates the arcsine of all elements in v (
+ asin(v instant-vector): calculates the arcsine of all float samples in v (
special cases).
-
-
asinh(v instant-vector): calculates the inverse hyperbolic sine of all elements in v (
- special cases).
+ asinh(v instant-vector): calculates the inverse hyperbolic sine of all float samples in{" "}
+ v (special cases).
-
-
atan(v instant-vector): calculates the arctangent of all elements in v (
+ atan(v instant-vector): calculates the arctangent of all float samples in v (
special cases).
-
-
atanh(v instant-vector): calculates the inverse hyperbolic tangent of all elements in v (
- special cases).
+ atanh(v instant-vector): calculates the inverse hyperbolic tangent of all float samples in{" "}
+ v (special cases).
-
-
cos(v instant-vector): calculates the cosine of all elements in v (
+ cos(v instant-vector): calculates the cosine of all float samples in v (
special cases).
-
-
cosh(v instant-vector): calculates the hyperbolic cosine of all elements in v (
+ cosh(v instant-vector): calculates the hyperbolic cosine of all float samples in v (
special cases).
-
-
sin(v instant-vector): calculates the sine of all elements in v (
+ sin(v instant-vector): calculates the sine of all float samples in v (
special cases).
-
-
sinh(v instant-vector): calculates the hyperbolic sine of all elements in v (
+ sinh(v instant-vector): calculates the hyperbolic sine of all float samples in v (
special cases).
-
-
tan(v instant-vector): calculates the tangent of all elements in v (
+ tan(v instant-vector): calculates the tangent of all float samples in v (
special cases).
-
-
tanh(v instant-vector): calculates the hyperbolic tangent of all elements in v (
- special cases).
+ tanh(v instant-vector): calculates the hyperbolic tangent of all float samples in v{" "}
+ (special cases).
@@ -120,69 +123,69 @@ const funcDocs: Record = {
-
-
deg(v instant-vector): converts radians to degrees for all elements in v.
+ deg(v instant-vector): converts radians to degrees for all float samples in v.
-
pi(): returns pi.
-
-
rad(v instant-vector): converts degrees to radians for all elements in v.
+ rad(v instant-vector): converts degrees to radians for all float samples in v.
>
),
acosh: (
<>
- The trigonometric functions work in radians:
+ The trigonometric functions work in radians. They ignore histogram samples in the input vector.
-
-
acos(v instant-vector): calculates the arccosine of all elements in v (
+ acos(v instant-vector): calculates the arccosine of all float samples in v (
special cases).
-
-
acosh(v instant-vector): calculates the inverse hyperbolic cosine of all elements in v (
- special cases).
+ acosh(v instant-vector): calculates the inverse hyperbolic cosine of all float samples in{" "}
+ v (special cases).
-
-
asin(v instant-vector): calculates the arcsine of all elements in v (
+ asin(v instant-vector): calculates the arcsine of all float samples in v (
special cases).
-
-
asinh(v instant-vector): calculates the inverse hyperbolic sine of all elements in v (
- special cases).
+ asinh(v instant-vector): calculates the inverse hyperbolic sine of all float samples in{" "}
+ v (special cases).
-
-
atan(v instant-vector): calculates the arctangent of all elements in v (
+ atan(v instant-vector): calculates the arctangent of all float samples in v (
special cases).
-
-
atanh(v instant-vector): calculates the inverse hyperbolic tangent of all elements in v (
- special cases).
+ atanh(v instant-vector): calculates the inverse hyperbolic tangent of all float samples in{" "}
+ v (special cases).
-
-
cos(v instant-vector): calculates the cosine of all elements in v (
+ cos(v instant-vector): calculates the cosine of all float samples in v (
special cases).
-
-
cosh(v instant-vector): calculates the hyperbolic cosine of all elements in v (
+ cosh(v instant-vector): calculates the hyperbolic cosine of all float samples in v (
special cases).
-
-
sin(v instant-vector): calculates the sine of all elements in v (
+ sin(v instant-vector): calculates the sine of all float samples in v (
special cases).
-
-
sinh(v instant-vector): calculates the hyperbolic sine of all elements in v (
+ sinh(v instant-vector): calculates the hyperbolic sine of all float samples in v (
special cases).
-
-
tan(v instant-vector): calculates the tangent of all elements in v (
+ tan(v instant-vector): calculates the tangent of all float samples in v (
special cases).
-
-
tanh(v instant-vector): calculates the hyperbolic tangent of all elements in v (
- special cases).
+ tanh(v instant-vector): calculates the hyperbolic tangent of all float samples in v{" "}
+ (special cases).
@@ -190,69 +193,69 @@ const funcDocs: Record = {
-
-
deg(v instant-vector): converts radians to degrees for all elements in v.
+ deg(v instant-vector): converts radians to degrees for all float samples in v.
-
pi(): returns pi.
-
-
rad(v instant-vector): converts degrees to radians for all elements in v.
+ rad(v instant-vector): converts degrees to radians for all float samples in v.
>
),
asin: (
<>
- The trigonometric functions work in radians:
+ The trigonometric functions work in radians. They ignore histogram samples in the input vector.
-
-
acos(v instant-vector): calculates the arccosine of all elements in v (
+ acos(v instant-vector): calculates the arccosine of all float samples in v (
special cases).
-
-
acosh(v instant-vector): calculates the inverse hyperbolic cosine of all elements in v (
- special cases).
+ acosh(v instant-vector): calculates the inverse hyperbolic cosine of all float samples in{" "}
+ v (special cases).
-
-
asin(v instant-vector): calculates the arcsine of all elements in v (
+ asin(v instant-vector): calculates the arcsine of all float samples in v (
special cases).
-
-
asinh(v instant-vector): calculates the inverse hyperbolic sine of all elements in v (
- special cases).
+ asinh(v instant-vector): calculates the inverse hyperbolic sine of all float samples in{" "}
+ v (special cases).
-
-
atan(v instant-vector): calculates the arctangent of all elements in v (
+ atan(v instant-vector): calculates the arctangent of all float samples in v (
special cases).
-
-
atanh(v instant-vector): calculates the inverse hyperbolic tangent of all elements in v (
- special cases).
+ atanh(v instant-vector): calculates the inverse hyperbolic tangent of all float samples in{" "}
+ v (special cases).
-
-
cos(v instant-vector): calculates the cosine of all elements in v (
+ cos(v instant-vector): calculates the cosine of all float samples in v (
special cases).
-
-
cosh(v instant-vector): calculates the hyperbolic cosine of all elements in v (
+ cosh(v instant-vector): calculates the hyperbolic cosine of all float samples in v (
special cases).
-
-
sin(v instant-vector): calculates the sine of all elements in v (
+ sin(v instant-vector): calculates the sine of all float samples in v (
special cases).
-
-
sinh(v instant-vector): calculates the hyperbolic sine of all elements in v (
+ sinh(v instant-vector): calculates the hyperbolic sine of all float samples in v (
special cases).
-
-
tan(v instant-vector): calculates the tangent of all elements in v (
+ tan(v instant-vector): calculates the tangent of all float samples in v (
special cases).
-
-
tanh(v instant-vector): calculates the hyperbolic tangent of all elements in v (
- special cases).
+ tanh(v instant-vector): calculates the hyperbolic tangent of all float samples in v{" "}
+ (special cases).
@@ -260,69 +263,69 @@ const funcDocs: Record = {
-
-
deg(v instant-vector): converts radians to degrees for all elements in v.
+ deg(v instant-vector): converts radians to degrees for all float samples in v.
-
pi(): returns pi.
-
-
rad(v instant-vector): converts degrees to radians for all elements in v.
+ rad(v instant-vector): converts degrees to radians for all float samples in v.
>
),
asinh: (
<>
- The trigonometric functions work in radians:
+ The trigonometric functions work in radians. They ignore histogram samples in the input vector.
-
-
acos(v instant-vector): calculates the arccosine of all elements in v (
+ acos(v instant-vector): calculates the arccosine of all float samples in v (
special cases).
-
-
acosh(v instant-vector): calculates the inverse hyperbolic cosine of all elements in v (
- special cases).
+ acosh(v instant-vector): calculates the inverse hyperbolic cosine of all float samples in{" "}
+ v (special cases).
-
-
asin(v instant-vector): calculates the arcsine of all elements in v (
+ asin(v instant-vector): calculates the arcsine of all float samples in v (
special cases).
-
-
asinh(v instant-vector): calculates the inverse hyperbolic sine of all elements in v (
- special cases).
+ asinh(v instant-vector): calculates the inverse hyperbolic sine of all float samples in{" "}
+ v (special cases).
-
-
atan(v instant-vector): calculates the arctangent of all elements in v (
+ atan(v instant-vector): calculates the arctangent of all float samples in v (
special cases).
-
-
atanh(v instant-vector): calculates the inverse hyperbolic tangent of all elements in v (
- special cases).
+ atanh(v instant-vector): calculates the inverse hyperbolic tangent of all float samples in{" "}
+ v (special cases).
-
-
cos(v instant-vector): calculates the cosine of all elements in v (
+ cos(v instant-vector): calculates the cosine of all float samples in v (
special cases).
-
-
cosh(v instant-vector): calculates the hyperbolic cosine of all elements in v (
+ cosh(v instant-vector): calculates the hyperbolic cosine of all float samples in v (
special cases).
-
-
sin(v instant-vector): calculates the sine of all elements in v (
+ sin(v instant-vector): calculates the sine of all float samples in v (
special cases).
-
-
sinh(v instant-vector): calculates the hyperbolic sine of all elements in v (
+ sinh(v instant-vector): calculates the hyperbolic sine of all float samples in v (
special cases).
-
-
tan(v instant-vector): calculates the tangent of all elements in v (
+ tan(v instant-vector): calculates the tangent of all float samples in v (
special cases).
-
-
tanh(v instant-vector): calculates the hyperbolic tangent of all elements in v (
- special cases).
+ tanh(v instant-vector): calculates the hyperbolic tangent of all float samples in v{" "}
+ (special cases).
@@ -330,69 +333,69 @@ const funcDocs: Record = {
-
-
deg(v instant-vector): converts radians to degrees for all elements in v.
+ deg(v instant-vector): converts radians to degrees for all float samples in v.
-
pi(): returns pi.
-
-
rad(v instant-vector): converts degrees to radians for all elements in v.
+ rad(v instant-vector): converts degrees to radians for all float samples in v.
>
),
atan: (
<>
- The trigonometric functions work in radians:
+ The trigonometric functions work in radians. They ignore histogram samples in the input vector.
-
-
acos(v instant-vector): calculates the arccosine of all elements in v (
+ acos(v instant-vector): calculates the arccosine of all float samples in v (
special cases).
-
-
acosh(v instant-vector): calculates the inverse hyperbolic cosine of all elements in v (
- special cases).
+ acosh(v instant-vector): calculates the inverse hyperbolic cosine of all float samples in{" "}
+ v (special cases).
-
-
asin(v instant-vector): calculates the arcsine of all elements in v (
+ asin(v instant-vector): calculates the arcsine of all float samples in v (
special cases).
-
-
asinh(v instant-vector): calculates the inverse hyperbolic sine of all elements in v (
- special cases).
+ asinh(v instant-vector): calculates the inverse hyperbolic sine of all float samples in{" "}
+ v (special cases).
-
-
atan(v instant-vector): calculates the arctangent of all elements in v (
+ atan(v instant-vector): calculates the arctangent of all float samples in v (
special cases).
-
-
atanh(v instant-vector): calculates the inverse hyperbolic tangent of all elements in v (
- special cases).
+ atanh(v instant-vector): calculates the inverse hyperbolic tangent of all float samples in{" "}
+ v (special cases).
-
-
cos(v instant-vector): calculates the cosine of all elements in v (
+ cos(v instant-vector): calculates the cosine of all float samples in v (
special cases).
-
-
cosh(v instant-vector): calculates the hyperbolic cosine of all elements in v (
+ cosh(v instant-vector): calculates the hyperbolic cosine of all float samples in v (
special cases).
-
-
sin(v instant-vector): calculates the sine of all elements in v (
+ sin(v instant-vector): calculates the sine of all float samples in v (
special cases).
-
-
sinh(v instant-vector): calculates the hyperbolic sine of all elements in v (
+ sinh(v instant-vector): calculates the hyperbolic sine of all float samples in v (
special cases).
-
-
tan(v instant-vector): calculates the tangent of all elements in v (
+ tan(v instant-vector): calculates the tangent of all float samples in v (
special cases).
-
-
tanh(v instant-vector): calculates the hyperbolic tangent of all elements in v (
- special cases).
+ tanh(v instant-vector): calculates the hyperbolic tangent of all float samples in v{" "}
+ (special cases).
@@ -400,69 +403,69 @@ const funcDocs: Record = {
-
-
deg(v instant-vector): converts radians to degrees for all elements in v.
+ deg(v instant-vector): converts radians to degrees for all float samples in v.
-
pi(): returns pi.
-
-
rad(v instant-vector): converts degrees to radians for all elements in v.
+ rad(v instant-vector): converts degrees to radians for all float samples in v.
>
),
atanh: (
<>
- The trigonometric functions work in radians:
+ The trigonometric functions work in radians. They ignore histogram samples in the input vector.
-
-
acos(v instant-vector): calculates the arccosine of all elements in v (
+ acos(v instant-vector): calculates the arccosine of all float samples in v (
special cases).
-
-
acosh(v instant-vector): calculates the inverse hyperbolic cosine of all elements in v (
- special cases).
+ acosh(v instant-vector): calculates the inverse hyperbolic cosine of all float samples in{" "}
+ v (special cases).
-
-
asin(v instant-vector): calculates the arcsine of all elements in v (
+ asin(v instant-vector): calculates the arcsine of all float samples in v (
special cases).
-
-
asinh(v instant-vector): calculates the inverse hyperbolic sine of all elements in v (
- special cases).
+ asinh(v instant-vector): calculates the inverse hyperbolic sine of all float samples in{" "}
+ v (special cases).
-
-
atan(v instant-vector): calculates the arctangent of all elements in v (
+ atan(v instant-vector): calculates the arctangent of all float samples in v (
special cases).
-
-
atanh(v instant-vector): calculates the inverse hyperbolic tangent of all elements in v (
- special cases).
+ atanh(v instant-vector): calculates the inverse hyperbolic tangent of all float samples in{" "}
+ v (special cases).
-
-
cos(v instant-vector): calculates the cosine of all elements in v (
+ cos(v instant-vector): calculates the cosine of all float samples in v (
special cases).
-
-
cosh(v instant-vector): calculates the hyperbolic cosine of all elements in v (
+ cosh(v instant-vector): calculates the hyperbolic cosine of all float samples in v (
special cases).
-
-
sin(v instant-vector): calculates the sine of all elements in v (
+ sin(v instant-vector): calculates the sine of all float samples in v (
special cases).
-
-
sinh(v instant-vector): calculates the hyperbolic sine of all elements in v (
+ sinh(v instant-vector): calculates the hyperbolic sine of all float samples in v (
special cases).
-
-
tan(v instant-vector): calculates the tangent of all elements in v (
+ tan(v instant-vector): calculates the tangent of all float samples in v (
special cases).
-
-
tanh(v instant-vector): calculates the hyperbolic tangent of all elements in v (
- special cases).
+ tanh(v instant-vector): calculates the hyperbolic tangent of all float samples in v{" "}
+ (special cases).
@@ -470,13 +473,13 @@ const funcDocs: Record = {
-
-
deg(v instant-vector): converts radians to degrees for all elements in v.
+ deg(v instant-vector): converts radians to degrees for all float samples in v.
-
pi(): returns pi.
-
-
rad(v instant-vector): converts degrees to radians for all elements in v.
+ rad(v instant-vector): converts degrees to radians for all float samples in v.
>
@@ -484,40 +487,42 @@ const funcDocs: Record = {
avg_over_time: (
<>
- The following functions allow aggregating each series of a given range vector over time and return an instant vector
- with per-series aggregation results:
+ The following functions allow aggregating each series of a given range vector over time and return an instant
+ vector with per-series aggregation results:
-
-
avg_over_time(range-vector): the average value of all points in the specified interval.
+ avg_over_time(range-vector): the average value of all float or histogram samples in the specified
+ interval (see details below).
-
-
min_over_time(range-vector): the minimum value of all points in the specified interval.
+ min_over_time(range-vector): the minimum value of all float samples in the specified interval.
-
-
max_over_time(range-vector): the maximum value of all points in the specified interval.
+ max_over_time(range-vector): the maximum value of all float samples in the specified interval.
-
-
sum_over_time(range-vector): the sum of all values in the specified interval.
+ sum_over_time(range-vector): the sum of all float or histogram samples in the specified interval
+ (see details below).
-
-
count_over_time(range-vector): the count of all values in the specified interval.
+ count_over_time(range-vector): the count of all samples in the specified interval.
-
-
quantile_over_time(scalar, range-vector): the φ-quantile (0 ≤ φ ≤ 1) of the values in the specified
- interval.
+ quantile_over_time(scalar, range-vector): the φ-quantile (0 ≤ φ ≤ 1) of all float samples in the
+ specified interval.
-
-
stddev_over_time(range-vector): the population standard deviation of the values in the specified
- interval.
+ stddev_over_time(range-vector): the population standard deviation of all float samples in the
+ specified interval.
-
-
stdvar_over_time(range-vector): the population standard variance of the values in the specified
- interval.
+ stdvar_over_time(range-vector): the population standard variance of all float samples in the
+ specified interval.
-
-
last_over_time(range-vector): the most recent point value in the specified interval.
+ last_over_time(range-vector): the most recent sample in the specified interval.
-
present_over_time(range-vector): the value 1 for any series in the specified interval.
@@ -526,32 +531,75 @@ const funcDocs: Record = {
If the feature flag
- --enable-feature=promql-experimental-functions is set, the following additional functions are available:
+ --enable-feature=promql-experimental-functions is set, the following additional functions are
+ available:
-
-
mad_over_time(range-vector): the median absolute deviation of all points in the specified interval.
+ mad_over_time(range-vector): the median absolute deviation of all float samples in the specified
+ interval.
+
+ -
+
ts_of_min_over_time(range-vector): the timestamp of the last float sample that has the minimum
+ value of all float samples in the specified interval.
+
+ -
+
ts_of_max_over_time(range-vector): the timestamp of the last float sample that has the maximum
+ value of all float samples in the specified interval.
+
+ -
+
ts_of_last_over_time(range-vector): the timestamp of last sample in the specified interval.
+
+ -
+
first_over_time(range-vector): the oldest sample in the specified interval.
+
+ -
+
ts_of_first_over_time(range-vector): the timestamp of earliest sample in the specified interval.
- Note that all values in the specified interval have the same weight in the aggregation even if the values are not
- equally spaced throughout the interval.
+ Note that all values in the specified interval have the same weight in the aggregation even if the values are
+ not equally spaced throughout the interval.
+ These functions act on histograms in the following way:
+
+
+ -
+
count_over_time, first_over_time, last_over_time, and
+ present_over_time() act on float and histogram samples in the same way.
+
+ -
+
avg_over_time() and sum_over_time() act on histogram samples in a way that
+ corresponds to the respective aggregation operators. If a series contains a mix of float samples and histogram
+ samples within the range, the corresponding result is removed entirely from the output vector. Such a removal
+ is flagged by a warn-level annotation.
+
+ -
+ All other functions ignore histogram samples in the following way: Input ranges containing only histogram
+ samples are silently removed from the output. For ranges with a mix of histogram and float samples, only the
+ float samples are processed and the omission of the histogram samples is flagged by an info-level annotation.
+
+
+
- avg_over_time, sum_over_time, count_over_time, last_over_time,
- and
- present_over_time handle native histograms as expected. All other functions ignore histogram samples.
+ first_over_time(m[1m]) differs from m offset 1m in that the former will select the
+ first sample of m within the 1m range, where m offset 1m will select the most
+ recent sample within the lookback interval outside and prior to the 1m offset. This is particularly
+ useful with first_over_time(m[step()])
+ in range queries (available when --enable-feature=promql-duration-expr is set) to ensure that the
+ sample selected is within the range step.
>
),
ceil: (
<>
- ceil(v instant-vector) rounds the sample values of all elements in v up to the nearest
- integer value greater than or equal to v.
+ ceil(v instant-vector) returns a vector containing all float samples in the input vector rounded up
+ to the nearest integer value greater than or equal to their original value. Histogram samples in the input
+ vector are ignored silently.
@@ -573,17 +621,19 @@ const funcDocs: Record = {
changes: (
<>
- For each input time series, changes(v range-vector) returns the number of times its value has changed
- within the provided time range as an instant vector.
+ For each input time series, changes(v range-vector) returns the number of times its value has
+ changed within the provided time range as an instant vector. A float sample followed by a histogram sample, or
+ vice versa, counts as a change. A counter histogram sample followed by a gauge histogram sample with otherwise
+ exactly the same values, or vice versa, does not count as a change.
>
),
clamp: (
<>
- clamp(v instant-vector, min scalar, max scalar)
- clamps the sample values of all elements in v to have a lower limit of min and an upper
- limit of max.
+ clamp(v instant-vector, min scalar, max scalar) clamps the values of all float samples in{" "}
+ v to have a lower limit of min and an upper limit of
+ max. Histogram samples in the input vector are ignored silently.
Special cases:
@@ -593,7 +643,7 @@ const funcDocs: Record = {
Return an empty vector if min > max
-
- Return
NaN if min or max is NaN
+ Float samples are clamped to NaN if min or max is NaN
>
@@ -601,71 +651,71 @@ const funcDocs: Record = {
clamp_max: (
<>
- clamp_max(v instant-vector, max scalar) clamps the sample values of all elements in v to
- have an upper limit of max.
+ clamp_max(v instant-vector, max scalar) clamps the values of all float samples in v to
+ have an upper limit of max. Histogram samples in the input vector are ignored silently.
>
),
clamp_min: (
<>
- clamp_min(v instant-vector, min scalar) clamps the sample values of all elements in v to
- have a lower limit of min.
+ clamp_min(v instant-vector, min scalar) clamps the values of all float samples in v to
+ have a lower limit of min. Histogram samples in the input vector are ignored silently.
>
),
cos: (
<>
- The trigonometric functions work in radians:
+ The trigonometric functions work in radians. They ignore histogram samples in the input vector.
-
-
acos(v instant-vector): calculates the arccosine of all elements in v (
+ acos(v instant-vector): calculates the arccosine of all float samples in v (
special cases).
-
-
acosh(v instant-vector): calculates the inverse hyperbolic cosine of all elements in v (
- special cases).
+ acosh(v instant-vector): calculates the inverse hyperbolic cosine of all float samples in{" "}
+ v (special cases).
-
-
asin(v instant-vector): calculates the arcsine of all elements in v (
+ asin(v instant-vector): calculates the arcsine of all float samples in v (
special cases).
-
-
asinh(v instant-vector): calculates the inverse hyperbolic sine of all elements in v (
- special cases).
+ asinh(v instant-vector): calculates the inverse hyperbolic sine of all float samples in{" "}
+ v (special cases).
-
-
atan(v instant-vector): calculates the arctangent of all elements in v (
+ atan(v instant-vector): calculates the arctangent of all float samples in v (
special cases).
-
-
atanh(v instant-vector): calculates the inverse hyperbolic tangent of all elements in v (
- special cases).
+ atanh(v instant-vector): calculates the inverse hyperbolic tangent of all float samples in{" "}
+ v (special cases).
-
-
cos(v instant-vector): calculates the cosine of all elements in v (
+ cos(v instant-vector): calculates the cosine of all float samples in v (
special cases).
-
-
cosh(v instant-vector): calculates the hyperbolic cosine of all elements in v (
+ cosh(v instant-vector): calculates the hyperbolic cosine of all float samples in v (
special cases).
-
-
sin(v instant-vector): calculates the sine of all elements in v (
+ sin(v instant-vector): calculates the sine of all float samples in v (
special cases).
-
-
sinh(v instant-vector): calculates the hyperbolic sine of all elements in v (
+ sinh(v instant-vector): calculates the hyperbolic sine of all float samples in v (
special cases).
-
-
tan(v instant-vector): calculates the tangent of all elements in v (
+ tan(v instant-vector): calculates the tangent of all float samples in v (
special cases).
-
-
tanh(v instant-vector): calculates the hyperbolic tangent of all elements in v (
- special cases).
+ tanh(v instant-vector): calculates the hyperbolic tangent of all float samples in v{" "}
+ (special cases).
@@ -673,69 +723,69 @@ const funcDocs: Record = {
-
-
deg(v instant-vector): converts radians to degrees for all elements in v.
+ deg(v instant-vector): converts radians to degrees for all float samples in v.
-
pi(): returns pi.
-
-
rad(v instant-vector): converts degrees to radians for all elements in v.
+ rad(v instant-vector): converts degrees to radians for all float samples in v.
>
),
cosh: (
<>
- The trigonometric functions work in radians:
+ The trigonometric functions work in radians. They ignore histogram samples in the input vector.
-
-
acos(v instant-vector): calculates the arccosine of all elements in v (
+ acos(v instant-vector): calculates the arccosine of all float samples in v (
special cases).
-
-
acosh(v instant-vector): calculates the inverse hyperbolic cosine of all elements in v (
- special cases).
+ acosh(v instant-vector): calculates the inverse hyperbolic cosine of all float samples in{" "}
+ v (special cases).
-
-
asin(v instant-vector): calculates the arcsine of all elements in v (
+ asin(v instant-vector): calculates the arcsine of all float samples in v (
special cases).
-
-
asinh(v instant-vector): calculates the inverse hyperbolic sine of all elements in v (
- special cases).
+ asinh(v instant-vector): calculates the inverse hyperbolic sine of all float samples in{" "}
+ v (special cases).
-
-
atan(v instant-vector): calculates the arctangent of all elements in v (
+ atan(v instant-vector): calculates the arctangent of all float samples in v (
special cases).
-
-
atanh(v instant-vector): calculates the inverse hyperbolic tangent of all elements in v (
- special cases).
+ atanh(v instant-vector): calculates the inverse hyperbolic tangent of all float samples in{" "}
+ v (special cases).
-
-
cos(v instant-vector): calculates the cosine of all elements in v (
+ cos(v instant-vector): calculates the cosine of all float samples in v (
special cases).
-
-
cosh(v instant-vector): calculates the hyperbolic cosine of all elements in v (
+ cosh(v instant-vector): calculates the hyperbolic cosine of all float samples in v (
special cases).
-
-
sin(v instant-vector): calculates the sine of all elements in v (
+ sin(v instant-vector): calculates the sine of all float samples in v (
special cases).
-
-
sinh(v instant-vector): calculates the hyperbolic sine of all elements in v (
+ sinh(v instant-vector): calculates the hyperbolic sine of all float samples in v (
special cases).
-
-
tan(v instant-vector): calculates the tangent of all elements in v (
+ tan(v instant-vector): calculates the tangent of all float samples in v (
special cases).
-
-
tanh(v instant-vector): calculates the hyperbolic tangent of all elements in v (
- special cases).
+ tanh(v instant-vector): calculates the hyperbolic tangent of all float samples in v{" "}
+ (special cases).
@@ -743,13 +793,13 @@ const funcDocs: Record = {
-
-
deg(v instant-vector): converts radians to degrees for all elements in v.
+ deg(v instant-vector): converts radians to degrees for all float samples in v.
-
pi(): returns pi.
-
-
rad(v instant-vector): converts degrees to radians for all elements in v.
+ rad(v instant-vector): converts degrees to radians for all float samples in v.
>
@@ -757,40 +807,42 @@ const funcDocs: Record = {
count_over_time: (
<>
- The following functions allow aggregating each series of a given range vector over time and return an instant vector
- with per-series aggregation results:
+ The following functions allow aggregating each series of a given range vector over time and return an instant
+ vector with per-series aggregation results:
-
-
avg_over_time(range-vector): the average value of all points in the specified interval.
+ avg_over_time(range-vector): the average value of all float or histogram samples in the specified
+ interval (see details below).
-
-
min_over_time(range-vector): the minimum value of all points in the specified interval.
+ min_over_time(range-vector): the minimum value of all float samples in the specified interval.
-
-
max_over_time(range-vector): the maximum value of all points in the specified interval.
+ max_over_time(range-vector): the maximum value of all float samples in the specified interval.
-
-
sum_over_time(range-vector): the sum of all values in the specified interval.
+ sum_over_time(range-vector): the sum of all float or histogram samples in the specified interval
+ (see details below).
-
-
count_over_time(range-vector): the count of all values in the specified interval.
+ count_over_time(range-vector): the count of all samples in the specified interval.
-
-
quantile_over_time(scalar, range-vector): the φ-quantile (0 ≤ φ ≤ 1) of the values in the specified
- interval.
+ quantile_over_time(scalar, range-vector): the φ-quantile (0 ≤ φ ≤ 1) of all float samples in the
+ specified interval.
-
-
stddev_over_time(range-vector): the population standard deviation of the values in the specified
- interval.
+ stddev_over_time(range-vector): the population standard deviation of all float samples in the
+ specified interval.
-
-
stdvar_over_time(range-vector): the population standard variance of the values in the specified
- interval.
+ stdvar_over_time(range-vector): the population standard variance of all float samples in the
+ specified interval.
-
-
last_over_time(range-vector): the most recent point value in the specified interval.
+ last_over_time(range-vector): the most recent sample in the specified interval.
-
present_over_time(range-vector): the value 1 for any series in the specified interval.
@@ -799,111 +851,161 @@ const funcDocs: Record = {
If the feature flag
- --enable-feature=promql-experimental-functions is set, the following additional functions are available:
+ --enable-feature=promql-experimental-functions is set, the following additional functions are
+ available:
-
-
mad_over_time(range-vector): the median absolute deviation of all points in the specified interval.
+ mad_over_time(range-vector): the median absolute deviation of all float samples in the specified
+ interval.
+
+ -
+
ts_of_min_over_time(range-vector): the timestamp of the last float sample that has the minimum
+ value of all float samples in the specified interval.
+
+ -
+
ts_of_max_over_time(range-vector): the timestamp of the last float sample that has the maximum
+ value of all float samples in the specified interval.
+
+ -
+
ts_of_last_over_time(range-vector): the timestamp of last sample in the specified interval.
+
+ -
+
first_over_time(range-vector): the oldest sample in the specified interval.
+
+ -
+
ts_of_first_over_time(range-vector): the timestamp of earliest sample in the specified interval.
- Note that all values in the specified interval have the same weight in the aggregation even if the values are not
- equally spaced throughout the interval.
+ Note that all values in the specified interval have the same weight in the aggregation even if the values are
+ not equally spaced throughout the interval.
+ These functions act on histograms in the following way:
+
+
+ -
+
count_over_time, first_over_time, last_over_time, and
+ present_over_time() act on float and histogram samples in the same way.
+
+ -
+
avg_over_time() and sum_over_time() act on histogram samples in a way that
+ corresponds to the respective aggregation operators. If a series contains a mix of float samples and histogram
+ samples within the range, the corresponding result is removed entirely from the output vector. Such a removal
+ is flagged by a warn-level annotation.
+
+ -
+ All other functions ignore histogram samples in the following way: Input ranges containing only histogram
+ samples are silently removed from the output. For ranges with a mix of histogram and float samples, only the
+ float samples are processed and the omission of the histogram samples is flagged by an info-level annotation.
+
+
+
- avg_over_time, sum_over_time, count_over_time, last_over_time,
- and
- present_over_time handle native histograms as expected. All other functions ignore histogram samples.
+ first_over_time(m[1m]) differs from m offset 1m in that the former will select the
+ first sample of m within the 1m range, where m offset 1m will select the most
+ recent sample within the lookback interval outside and prior to the 1m offset. This is particularly
+ useful with first_over_time(m[step()])
+ in range queries (available when --enable-feature=promql-duration-expr is set) to ensure that the
+ sample selected is within the range step.
>
),
day_of_month: (
<>
- day_of_month(v=vector(time()) instant-vector) returns the day of the month for each of the given times
- in UTC. Returned values are from 1 to 31.
+ day_of_month(v=vector(time()) instant-vector) interprets float samples in
+ v as timestamps (number of seconds since January 1, 1970 UTC) and returns the day of the month (in
+ UTC) for each of those timestamps. Returned values are from 1 to 31. Histogram samples in the input vector are
+ ignored silently.
>
),
day_of_week: (
<>
- day_of_week(v=vector(time()) instant-vector) returns the day of the week for each of the given times in
- UTC. Returned values are from 0 to 6, where 0 means Sunday etc.
+ day_of_week(v=vector(time()) instant-vector) interprets float samples in v
+ as timestamps (number of seconds since January 1, 1970 UTC) and returns the day of the week (in UTC) for each of
+ those timestamps. Returned values are from 0 to 6, where 0 means Sunday etc. Histogram samples in the input
+ vector are ignored silently.
>
),
day_of_year: (
<>
- day_of_year(v=vector(time()) instant-vector) returns the day of the year for each of the given times in
- UTC. Returned values are from 1 to 365 for non-leap years, and 1 to 366 in leap years.
+ day_of_year(v=vector(time()) instant-vector) interprets float samples in v
+ as timestamps (number of seconds since January 1, 1970 UTC) and returns the day of the year (in UTC) for each of
+ those timestamps. Returned values are from 1 to 365 for non-leap years, and 1 to 366 in leap years. Histogram
+ samples in the input vector are ignored silently.
>
),
days_in_month: (
<>
- days_in_month(v=vector(time()) instant-vector) returns number of days in the month for each of the given
- times in UTC. Returned values are from 28 to 31.
+ days_in_month(v=vector(time()) instant-vector) interprets float samples in
+ v as timestamps (number of seconds since January 1, 1970 UTC) and returns the number of days in the
+ month of each of those timestamps (in UTC). Returned values are from 28 to 31. Histogram samples in the input
+ vector are ignored silently.
>
),
deg: (
<>
- The trigonometric functions work in radians:
+ The trigonometric functions work in radians. They ignore histogram samples in the input vector.
-
-
acos(v instant-vector): calculates the arccosine of all elements in v (
+ acos(v instant-vector): calculates the arccosine of all float samples in v (
special cases).
-
-
acosh(v instant-vector): calculates the inverse hyperbolic cosine of all elements in v (
- special cases).
+ acosh(v instant-vector): calculates the inverse hyperbolic cosine of all float samples in{" "}
+ v (special cases).
-
-
asin(v instant-vector): calculates the arcsine of all elements in v (
+ asin(v instant-vector): calculates the arcsine of all float samples in v (
special cases).
-
-
asinh(v instant-vector): calculates the inverse hyperbolic sine of all elements in v (
- special cases).
+ asinh(v instant-vector): calculates the inverse hyperbolic sine of all float samples in{" "}
+ v (special cases).
-
-
atan(v instant-vector): calculates the arctangent of all elements in v (
+ atan(v instant-vector): calculates the arctangent of all float samples in v (
special cases).
-
-
atanh(v instant-vector): calculates the inverse hyperbolic tangent of all elements in v (
- special cases).
+ atanh(v instant-vector): calculates the inverse hyperbolic tangent of all float samples in{" "}
+ v (special cases).
-
-
cos(v instant-vector): calculates the cosine of all elements in v (
+ cos(v instant-vector): calculates the cosine of all float samples in v (
special cases).
-
-
cosh(v instant-vector): calculates the hyperbolic cosine of all elements in v (
+ cosh(v instant-vector): calculates the hyperbolic cosine of all float samples in v (
special cases).
-
-
sin(v instant-vector): calculates the sine of all elements in v (
+ sin(v instant-vector): calculates the sine of all float samples in v (
special cases).
-
-
sinh(v instant-vector): calculates the hyperbolic sine of all elements in v (
+ sinh(v instant-vector): calculates the hyperbolic sine of all float samples in v (
special cases).
-
-
tan(v instant-vector): calculates the tangent of all elements in v (
+ tan(v instant-vector): calculates the tangent of all float samples in v (
special cases).
-
-
tanh(v instant-vector): calculates the hyperbolic tangent of all elements in v (
- special cases).
+ tanh(v instant-vector): calculates the hyperbolic tangent of all float samples in v{" "}
+ (special cases).
@@ -911,13 +1013,13 @@ const funcDocs: Record = {
-
-
deg(v instant-vector): converts radians to degrees for all elements in v.
+ deg(v instant-vector): converts radians to degrees for all float samples in v.
-
pi(): returns pi.
-
-
rad(v instant-vector): converts degrees to radians for all elements in v.
+ rad(v instant-vector): converts degrees to radians for all float samples in v.
>
@@ -925,52 +1027,86 @@ const funcDocs: Record = {
delta: (
<>
- delta(v range-vector) calculates the difference between the first and last value of each time series
- element in a range vector v, returning an instant vector with the given deltas and equivalent labels.
- The delta is extrapolated to cover the full time range as specified in the range vector selector, so that it is
- possible to get a non-integer result even if the sample values are all integers.
+ delta(v range-vector) calculates the difference between the first and last value of each time
+ series element in a range vector v, returning an instant vector with the given deltas and
+ equivalent labels. The delta is extrapolated to cover the full time range as specified in the range vector
+ selector, so that it is possible to get a non-integer result even if the sample values are all integers.
The following example expression returns the difference in CPU temperature between now and 2 hours ago:
- delta(cpu_temp_celsius{'{'}host="zeus"{'}'}[2h])
+ delta(cpu_temp_celsius{"{"}host="zeus"{"}"}[2h])
- delta acts on native histograms by calculating a new histogram where each component (sum and count of
- observations, buckets) is the difference between the respective component in the first and last native histogram in
- v. However, each element in v that contains a mix of float and native histogram samples
- within the range, will be missing from the result vector.
+ delta acts on histogram samples by calculating a new histogram where each component (sum and count
+ of observations, buckets) is the difference between the respective component in the first and last native
+ histogram in v. However, each element in v that contains a mix of float samples and
+ histogram samples within the range will be omitted from the result vector, flagged by a warn-level annotation.
- delta should only be used with gauges and native histograms where the components behave like gauges
- (so-called gauge histograms).
+ delta should only be used with gauges (for both floats and histograms).
>
),
deriv: (
<>
- deriv(v range-vector) calculates the per-second derivative of the time series in a range vector{' '}
- v, using simple linear regression.
- The range vector must have at least two samples in order to perform the calculation. When +Inf or
+ deriv(v range-vector) calculates the per-second derivative of each float time series in the range
+ vector v, using{" "}
+ simple linear regression. The range vector
+ must have at least two float samples in order to perform the calculation. When +Inf or{" "}
-Inf are found in the range vector, the slope and offset value calculated will be NaN.
- deriv should only be used with gauges.
+ deriv should only be used with gauges and only works for float samples. Elements in the range
+ vector that contain only histogram samples are ignored entirely. For elements that contain a mix of float and
+ histogram samples, only the float samples are used as input, which is flagged by an info-level annotation.
+
+ >
+ ),
+ double_exponential_smoothing: (
+ <>
+
+
+ This function has to be enabled via the{" "}
+ feature flag
+ --enable-feature=promql-experimental-functions.
+
+
+
+
+ double_exponential_smoothing(v range-vector, sf scalar, tf scalar) produces a smoothed value for
+ each float time series in the range in v. The lower the smoothing factor sf, the more
+ importance is given to old data. The higher the trend factor tf, the more trends in the data is
+ considered. Both sf and
+ tf must be between 0 and 1. For additional details, refer to{" "}
+
+ NIST Engineering Statistics Handbook
+
+ . In Prometheus V2 this function was called holt_winters. This caused confusion since the
+ Holt-Winters method usually refers to triple exponential smoothing. Double exponential smoothing as implemented
+ here is also referred to as “Holt Linear”.
+
+
+
+ double_exponential_smoothing should only be used with gauges and only works for float samples.
+ Elements in the range vector that contain only histogram samples are ignored entirely. For elements that contain
+ a mix of float and histogram samples, only the float samples are used as input, which is flagged by an
+ info-level annotation.
>
),
exp: (
<>
- exp(v instant-vector) calculates the exponential function for all elements in v. Special
- cases are:
+ exp(v instant-vector) calculates the exponential function for all float samples in v.
+ Histogram samples are ignored silently. Special cases are:
@@ -983,11 +1119,122 @@ const funcDocs: Record = {
>
),
+ first_over_time: (
+ <>
+
+ The following functions allow aggregating each series of a given range vector over time and return an instant
+ vector with per-series aggregation results:
+
+
+
+ -
+
avg_over_time(range-vector): the average value of all float or histogram samples in the specified
+ interval (see details below).
+
+ -
+
min_over_time(range-vector): the minimum value of all float samples in the specified interval.
+
+ -
+
max_over_time(range-vector): the maximum value of all float samples in the specified interval.
+
+ -
+
sum_over_time(range-vector): the sum of all float or histogram samples in the specified interval
+ (see details below).
+
+ -
+
count_over_time(range-vector): the count of all samples in the specified interval.
+
+ -
+
quantile_over_time(scalar, range-vector): the φ-quantile (0 ≤ φ ≤ 1) of all float samples in the
+ specified interval.
+
+ -
+
stddev_over_time(range-vector): the population standard deviation of all float samples in the
+ specified interval.
+
+ -
+
stdvar_over_time(range-vector): the population standard variance of all float samples in the
+ specified interval.
+
+ -
+
last_over_time(range-vector): the most recent sample in the specified interval.
+
+ -
+
present_over_time(range-vector): the value 1 for any series in the specified interval.
+
+
+
+
+ If the feature flag
+ --enable-feature=promql-experimental-functions is set, the following additional functions are
+ available:
+
+
+
+ -
+
mad_over_time(range-vector): the median absolute deviation of all float samples in the specified
+ interval.
+
+ -
+
ts_of_min_over_time(range-vector): the timestamp of the last float sample that has the minimum
+ value of all float samples in the specified interval.
+
+ -
+
ts_of_max_over_time(range-vector): the timestamp of the last float sample that has the maximum
+ value of all float samples in the specified interval.
+
+ -
+
ts_of_last_over_time(range-vector): the timestamp of last sample in the specified interval.
+
+ -
+
first_over_time(range-vector): the oldest sample in the specified interval.
+
+ -
+
ts_of_first_over_time(range-vector): the timestamp of earliest sample in the specified interval.
+
+
+
+
+ Note that all values in the specified interval have the same weight in the aggregation even if the values are
+ not equally spaced throughout the interval.
+
+
+ These functions act on histograms in the following way:
+
+
+ -
+
count_over_time, first_over_time, last_over_time, and
+ present_over_time() act on float and histogram samples in the same way.
+
+ -
+
avg_over_time() and sum_over_time() act on histogram samples in a way that
+ corresponds to the respective aggregation operators. If a series contains a mix of float samples and histogram
+ samples within the range, the corresponding result is removed entirely from the output vector. Such a removal
+ is flagged by a warn-level annotation.
+
+ -
+ All other functions ignore histogram samples in the following way: Input ranges containing only histogram
+ samples are silently removed from the output. For ranges with a mix of histogram and float samples, only the
+ float samples are processed and the omission of the histogram samples is flagged by an info-level annotation.
+
+
+
+
+ first_over_time(m[1m]) differs from m offset 1m in that the former will select the
+ first sample of m within the 1m range, where m offset 1m will select the most
+ recent sample within the lookback interval outside and prior to the 1m offset. This is particularly
+ useful with first_over_time(m[step()])
+ in range queries (available when --enable-feature=promql-duration-expr is set) to ensure that the
+ sample selected is within the range step.
+
+ >
+ ),
floor: (
<>
- floor(v instant-vector) rounds the sample values of all elements in v down to the nearest
- integer value smaller than or equal to v.
+ floor(v instant-vector) returns a vector containing all float samples in the input vector rounded
+ down to the nearest integer value smaller than or equal to their original value. Histogram samples in the input
+ vector are ignored silently.
@@ -1009,19 +1256,13 @@ const funcDocs: Record = {
histogram_avg: (
<>
-
- This function only acts on native histograms.
-
+ histogram_avg(v instant-vector) returns the arithmetic average of observed values stored in each
+ histogram sample in v. Float samples are ignored and do not show up in the returned vector.
- histogram_avg(v instant-vector) returns the arithmetic average of observed values stored in a native
- histogram. Samples that are not native histograms are ignored and do not show up in the returned vector.
-
-
-
- Use histogram_avg as demonstrated below to compute the average request duration over a 5-minute window
- from a native histogram:
+ Use histogram_avg as demonstrated below to compute the average request duration over a 5-minute
+ window from a native histogram:
@@ -1032,32 +1273,28 @@ const funcDocs: Record = {
- {' '}
- histogram_sum(rate(http_request_duration_seconds[5m])) / histogram_count(rate(http_request_duration_seconds[5m]))
+ {" "}
+ histogram_sum(rate(http_request_duration_seconds[5m])) /
+ histogram_count(rate(http_request_duration_seconds[5m]))
>
),
- 'histogram_count()` and `histogram_sum': (
+ histogram_count: (
<>
-
- Both functions only act on native histograms.
-
+ histogram_count(v instant-vector) returns the count of observations stored in each histogram sample
+ in v. Float samples are ignored and do not show up in the returned vector.
- histogram_count(v instant-vector) returns the count of observations stored in a native histogram.
- Samples that are not native histograms are ignored and do not show up in the returned vector.
+ Similarly, histogram_sum(v instant-vector) returns the sum of observations stored in each histogram
+ sample.
- Similarly, histogram_sum(v instant-vector) returns the sum of observations stored in a native histogram.
-
-
-
- Use histogram_count in the following way to calculate a rate of observations (in this case corresponding
- to “requests per second”) from a native histogram:
+ Use histogram_count in the following way to calculate a rate of observations (in this case
+ corresponding to “requests per second”) from a series of histogram samples:
@@ -1068,20 +1305,28 @@ const funcDocs: Record = {
histogram_fraction: (
<>
-
- This function only acts on native histograms.
-
+ histogram_fraction(lower scalar, upper scalar, b instant-vector) returns the estimated fraction of
+ observations between the provided lower and upper values for each classic or native histogram contained in{" "}
+ b. Float samples in b are considered the counts of observations in each bucket of one
+ or more classic histograms, while native histogram samples in b are treated each individually as a
+ separate histogram. This works in the same way as for histogram_quantile(). (See there for more
+ details.)
- For a native histogram, histogram_fraction(lower scalar, upper scalar, v instant-vector) returns the
- estimated fraction of observations between the provided lower and upper values. Samples that are not native
- histograms are ignored and do not show up in the returned vector.
+ If the provided lower and upper values do not coincide with bucket boundaries, the calculated fraction is an
+ estimate, using the same interpolation method as for
+ histogram_quantile(). (See there for more details.) Especially with classic histograms, it is easy
+ to accidentally pick lower or upper values that are very far away from any bucket boundary, leading to large
+ margins of error. Rather than using histogram_fraction() with classic histograms, it is often a
+ more robust approach to directly act on the bucket series when calculating fractions. See the
+ calculation of the Apdex score
+ as a typical example.
- For example, the following expression calculates the fraction of HTTP requests over the last hour that took 200ms or
- less:
+ For example, the following expression calculates the fraction of HTTP requests over the last hour that took
+ 200ms or less:
@@ -1089,48 +1334,56 @@ const funcDocs: Record = {
- The error of the estimation depends on the resolution of the underlying native histogram and how closely the provided
- boundaries are aligned with the bucket boundaries in the histogram.
+ The error of the estimation depends on the resolution of the underlying native histogram and how closely the
+ provided boundaries are aligned with the bucket boundaries in the histogram.
- +Inf and -Inf are valid boundary values. For example, if the histogram in the expression
- above included negative observations (which shouldn’t be the case for request durations), the appropriate lower
- boundary to include all observations less than or equal 0.2 would be -Inf rather than 0.
+ +Inf and -Inf are valid boundary values. For example, if the histogram in the
+ expression above included negative observations (which shouldn’t be the case for request durations), the
+ appropriate lower boundary to include all observations less than or equal 0.2 would be -Inf rather
+ than 0.
- Whether the provided boundaries are inclusive or exclusive is only relevant if the provided boundaries are precisely
- aligned with bucket boundaries in the underlying native histogram. In this case, the behavior depends on the schema
- definition of the histogram. The currently supported schemas all feature inclusive upper boundaries and exclusive
- lower boundaries for positive values (and vice versa for negative values). Without a precise alignment of boundaries,
- the function uses linear interpolation to estimate the fraction. With the resulting uncertainty, it becomes
- irrelevant if the boundaries are inclusive or exclusive.
+ Whether the provided boundaries are inclusive or exclusive is only relevant if the provided boundaries are
+ precisely aligned with bucket boundaries in the underlying native histogram. In this case, the behavior depends
+ on the schema definition of the histogram. (The usual standard exponential schemas all feature inclusive upper
+ boundaries and exclusive lower boundaries for positive values, and vice versa for negative values.) Without a
+ precise alignment of boundaries, the function uses interpolation to estimate the fraction. With the resulting
+ uncertainty, it becomes irrelevant if the boundaries are inclusive or exclusive.
+
+
+
+ Special case for native histograms with standard exponential buckets:
+ NaN observations are considered outside of any buckets in this case.
+ histogram_fraction(-Inf, +Inf, b) effectively returns the fraction of non-NaN{" "}
+ observations and may therefore be less than 1.
>
),
histogram_quantile: (
<>
- histogram_quantile(φ scalar, b instant-vector) calculates the φ-quantile (0 ≤ φ ≤ 1) from a{' '}
+ histogram_quantile(φ scalar, b instant-vector) calculates the φ-quantile (0 ≤ φ ≤ 1) from a{" "}
classic histogram or from a native
- histogram. (See histograms and summaries for a detailed
- explanation of φ-quantiles and the usage of the (classic) histogram metric type in general.)
+ histogram. (See histograms and summaries for a
+ detailed explanation of φ-quantiles and the usage of the (classic) histogram metric type in general.)
- The float samples in b are considered the counts of observations in each bucket of one or more classic
- histograms. Each float sample must have a label
- le where the label value denotes the inclusive upper bound of the bucket. (Float samples without such a
- label are silently ignored.) The other labels and the metric name are used to identify the buckets belonging to each
- classic histogram. The{' '}
+ The float samples in b are considered the counts of observations in each bucket of one or more
+ classic histograms. Each float sample must have a label
+ le where the label value denotes the inclusive upper bound of the bucket. (Float samples without
+ such a label are silently ignored.) The other labels and the metric name are used to identify the buckets
+ belonging to each classic histogram. The{" "}
histogram metric type
automatically provides time series with the _bucket suffix and the appropriate labels.
- The native histogram samples in b are treated each individually as a separate histogram to calculate the
- quantile from.
+ The (native) histogram samples in b are treated each individually as a separate histogram to
+ calculate the quantile from.
@@ -1142,10 +1395,10 @@ const funcDocs: Record = {
- Example: A histogram metric is called http_request_duration_seconds (and therefore the metric name for
- the buckets of a classic histogram is
- http_request_duration_seconds_bucket). To calculate the 90th percentile of request durations over the
- last 10m, use the following expression in case
+ Example: A histogram metric is called http_request_duration_seconds (and therefore the metric name
+ for the buckets of a classic histogram is
+ http_request_duration_seconds_bucket). To calculate the 90th percentile of request durations over
+ the last 10m, use the following expression in case
http_request_duration_seconds is a classic histogram:
@@ -1161,9 +1414,9 @@ const funcDocs: Record = {
The quantile is calculated for each label combination in
- http_request_duration_seconds. To aggregate, use the sum() aggregator around the{' '}
+ http_request_duration_seconds. To aggregate, use the sum() aggregator around the{" "}
rate() function. Since the le label is required by
- histogram_quantile() to deal with classic histograms, it has to be included in the by{' '}
+ histogram_quantile() to deal with classic histograms, it has to be included in the by{" "}
clause. The following expression aggregates the 90th percentile by job for classic histograms:
@@ -1194,23 +1447,67 @@ const funcDocs: Record = {
- The histogram_quantile() function interpolates quantile values by assuming a linear distribution within
- a bucket.
+ In the (common) case that a quantile value does not coincide with a bucket boundary, the{" "}
+ histogram_quantile() function interpolates the quantile value within the bucket the quantile value
+ falls into. For classic histograms, for native histograms with custom bucket boundaries, and for the zero bucket
+ of other native histograms, it assumes a uniform distribution of observations within the bucket (also called{" "}
+ linear interpolation). For the non-zero-buckets of native histograms with a standard exponential
+ bucketing schema, the interpolation is done under the assumption that the samples within the bucket are
+ distributed in a way that they would uniformly populate the buckets in a hypothetical histogram with higher
+ resolution. (This is also called exponential interpolation. See the{" "}
+
+ native histogram specification
+
+ for more details.)
- If b has 0 observations, NaN is returned. For φ < 0, -Inf is returned. For
- φ > 1, +Inf is returned. For φ = NaN, NaN is returned.
+ If b has 0 observations, NaN is returned. For φ < 0, -Inf is returned.
+ For φ > 1, +Inf is returned. For φ = NaN, NaN is returned.
-
- The following is only relevant for classic histograms: If b contains fewer than two buckets,{' '}
- NaN is returned. The highest bucket must have an upper bound of +Inf. (Otherwise,{' '}
- NaN is returned.) If a quantile is located in the highest bucket, the upper bound of the second highest
- bucket is returned. A lower limit of the lowest bucket is assumed to be 0 if the upper bound of that bucket is
- greater than 0. In that case, the usual linear interpolation is applied within that bucket. Otherwise, the upper
- bound of the lowest bucket is returned for quantiles located in the lowest bucket.
-
+ Special cases for classic histograms:
+
+
+ -
+ If
b contains fewer than two buckets, NaN is returned.
+
+ -
+ The highest bucket must have an upper bound of
+Inf. (Otherwise, NaN is returned.)
+
+ -
+ If a quantile is located in the highest bucket, the upper bound of the second highest bucket is returned.
+
+ -
+ The lower limit of the lowest bucket is assumed to be 0 if the upper bound of that bucket is greater than 0.
+ In that case, the usual linear interpolation is applied within that bucket. Otherwise, the upper bound of the
+ lowest bucket is returned for quantiles located in the lowest bucket.
+
+
+
+ Special cases for native histograms:
+
+
+ -
+ If a native histogram with standard exponential buckets has
NaN
+ observations and the quantile falls into one of the existing exponential buckets, the result is skewed towards
+ higher values due to NaN
+ observations treated as +Inf. This is flagged with an info level annotation.
+
+ -
+ If a native histogram with standard exponential buckets has
NaN
+ observations and the quantile falls above all of the existing exponential buckets, NaN is
+ returned. This is flagged with an info level annotation.
+
+ -
+ A zero bucket with finite width is assumed to contain no negative observations if the histogram has
+ observations in positive buckets, but none in negative buckets.
+
+ -
+ A zero bucket with finite width is assumed to contain no positive observations if the histogram has
+ observations in negative buckets, but none in positive buckets.
+
+
You can use histogram_quantile(0, v instant-vector) to get the estimated minimum value stored in a
@@ -1227,78 +1524,100 @@ const funcDocs: Record = {
- The counts in the buckets are monotonically increasing (strictly non-decreasing).
-
- A lack of observations between the upper limits of two consecutive buckets results in equal counts in those two
- buckets.
+ A lack of observations between the upper limits of two consecutive buckets results in equal counts in those
+ two buckets.
- However, floating point precision issues (e.g. small discrepancies introduced by computing of buckets with{' '}
- sum(rate(...))) or invalid data might violate these assumptions. In that case,
- histogram_quantile would be unable to return meaningful results. To mitigate the issue,
+ However, floating point precision issues (e.g. small discrepancies introduced by computing of buckets with{" "}
+ sum(rate(...))) or invalid data might violate these assumptions. In that case,{" "}
+ histogram_quantile would be unable to return meaningful results. To mitigate the issue,{" "}
histogram_quantile assumes that tiny relative differences between consecutive buckets are happening
because of floating point precision errors and ignores them. (The threshold to ignore a difference between two
- buckets is a trillionth (1e-12) of the sum of both buckets.) Furthermore, if there are non-monotonic bucket counts
- even after this adjustment, they are increased to the value of the previous buckets to enforce monotonicity. The
- latter is evidence for an actual issue with the input data and is therefore flagged with an informational annotation
- reading input to histogram_quantile needed to be fixed for monotonicity. If you encounter this
- annotation, you should find and remove the source of the invalid data.
+ buckets is a trillionth (1e-12) of the sum of both buckets.) Furthermore, if there are non-monotonic bucket
+ counts even after this adjustment, they are increased to the value of the previous buckets to enforce
+ monotonicity. The latter is evidence for an actual issue with the input data and is therefore flagged by an
+ info-level annotation reading input to histogram_quantile needed to be fixed for monotonicity. If
+ you encounter this annotation, you should find and remove the source of the invalid data.
>
),
- 'histogram_stddev()` and `histogram_stdvar': (
+ histogram_stddev: (
<>
-
- Both functions only act on native histograms.
-
+ histogram_stddev(v instant-vector) returns the estimated standard deviation of observations for
+ each histogram sample in v. For this estimation, all observations in a bucket are assumed to have
+ the value of the mean of the bucket boundaries. For the zero bucket and for buckets with custom boundaries, the
+ arithmetic mean is used. For the usual exponential buckets, the geometric mean is used. Float samples are
+ ignored and do not show up in the returned vector.
-
- histogram_stddev(v instant-vector) returns the estimated standard deviation of observations in a native
- histogram. For this estimation, all observations in a bucket are assumed to have the value of the mean of the bucket boundaries.
- For the zero bucket and for buckets with custom boundaries, the arithmetic mean is used. For the usual exponential buckets,
- the geometric mean is used. Samples that are not native histograms are ignored and do not show up in the returned vector.
-
-
-
- Similarly, histogram_stdvar(v instant-vector) returns the estimated standard variance of observations in
- a native histogram.
+ Similarly, histogram_stdvar(v instant-vector) returns the estimated standard variance of
+ observations for each histogram sample in v.
>
),
- double_exponential_smoothing: (
+ histogram_stdvar: (
<>
- double_exponential_smoothing(v range-vector, sf scalar, tf scalar) produces a smoothed value for time series based on
- the range in v. The lower the smoothing factor sf, the more importance is given to old
- data. The higher the trend factor tf, the more trends in the data is considered. Both sf{' '}
- and tf must be between 0 and 1.
+ histogram_stddev(v instant-vector) returns the estimated standard deviation of observations for
+ each histogram sample in v. For this estimation, all observations in a bucket are assumed to have
+ the value of the mean of the bucket boundaries. For the zero bucket and for buckets with custom boundaries, the
+ arithmetic mean is used. For the usual exponential buckets, the geometric mean is used. Float samples are
+ ignored and do not show up in the returned vector.
- double_exponential_smoothing should only be used with gauges.
+ Similarly, histogram_stdvar(v instant-vector) returns the estimated standard variance of
+ observations for each histogram sample in v.
>
),
+ histogram_sum: (
+ <>
+
+ histogram_count(v instant-vector) returns the count of observations stored in each histogram sample
+ in v. Float samples are ignored and do not show up in the returned vector.
+
+
+
+ Similarly, histogram_sum(v instant-vector) returns the sum of observations stored in each histogram
+ sample.
+
+
+
+ Use histogram_count in the following way to calculate a rate of observations (in this case
+ corresponding to “requests per second”) from a series of histogram samples:
+
+
+
+ histogram_count(rate(http_request_duration_seconds[10m]))
+
+ >
+ ),
hour: (
<>
- hour(v=vector(time()) instant-vector) returns the hour of the day for each of the given times in UTC.
- Returned values are from 0 to 23.
+ hour(v=vector(time()) instant-vector) interprets float samples in v as timestamps
+ (number of seconds since January 1, 1970 UTC) and returns the hour of the day (in UTC) for each of those
+ timestamps. Returned values are from 0 to 23. Histogram samples in the input vector are ignored silently.
>
),
idelta: (
<>
- idelta(v range-vector) calculates the difference between the last two samples in the range vector{' '}
- v, returning an instant vector with the given deltas and equivalent labels.
+ idelta(v range-vector) calculates the difference between the last two samples in the range vector{" "}
+ v, returning an instant vector with the given deltas and equivalent labels. Both samples must be
+ either float samples or histogram samples. Elements in v where one of the last two samples is a
+ float sample and the other is a histogram sample will be omitted from the result vector, flagged by a warn-level
+ annotation.
- idelta should only be used with gauges.
+ idelta should only be used with gauges (for both floats and histograms).
>
),
@@ -1307,79 +1626,208 @@ const funcDocs: Record = {
increase(v range-vector) calculates the increase in the time series in the range vector. Breaks in
monotonicity (such as counter resets due to target restarts) are automatically adjusted for. The increase is
- extrapolated to cover the full time range as specified in the range vector selector, so that it is possible to get a
- non-integer result even if a counter increases only by integer increments.
+ extrapolated to cover the full time range as specified in the range vector selector, so that it is possible to
+ get a non-integer result even if a counter increases only by integer increments.
- The following example expression returns the number of HTTP requests as measured over the last 5 minutes, per time
- series in the range vector:
+ The following example expression returns the number of HTTP requests as measured over the last 5 minutes, per
+ time series in the range vector:
- increase(http_requests_total{'{'}job="api-server"{'}'}[5m])
+ increase(http_requests_total{"{"}job="api-server"{"}"}[5m])
- increase acts on native histograms by calculating a new histogram where each component (sum and count of
- observations, buckets) is the increase between the respective component in the first and last native histogram in
- v. However, each element in v that contains a mix of float and native histogram samples
- within the range, will be missing from the result vector.
+ increase acts on histogram samples by calculating a new histogram where each component (sum and
+ count of observations, buckets) is the increase between the respective component in the first and last native
+ histogram in v. However, each element in v that contains a mix of float samples and
+ histogram samples within the range, will be omitted from the result vector, flagged by a warn-level annotation.
- increase should only be used with counters and native histograms where the components behave like
- counters. It is syntactic sugar for rate(v) multiplied by the number of seconds under the specified time
- range window, and should be used primarily for human readability. Use rate in recording rules so that
- increases are tracked consistently on a per-second basis.
+ increase should only be used with counters (for both floats and histograms). It is syntactic sugar
+ for rate(v) multiplied by the number of seconds under the specified time range window, and should
+ be used primarily for human readability. Use rate in recording rules so that increases are tracked
+ consistently on a per-second basis.
+
+ >
+ ),
+ info: (
+ <>
+
+ _The info function is an experiment to improve UX around including labels from{" "}
+
+ info metrics
+
+ . The behavior of this function may change in future versions of Prometheus, including its removal from PromQL.{" "}
+ info has to be enabled via the
+ feature flag{" "}
+ --enable-feature=promql-experimental-functions._
+
+
+
+ info(v instant-vector, [data-label-selector instant-vector]) finds, for each time series in{" "}
+ v, all info series with matching identifying labels (more on this later), and adds the
+ union of their data (i.e., non-identifying) labels to the time series. The second argument{" "}
+ data-label-selector is optional. It is not a real instant vector, but uses a subset of its syntax.
+ It must start and end with curly braces (
+
+ {"{"} ... {"}"}
+
+ ) and may only contain label matchers. The label matchers are used to constrain which info series to consider
+ and which data labels to add to v.
+
+
+
+ Identifying labels of an info series are the subset of labels that uniquely identify the info series. The
+ remaining labels are considered
+ data labels (also called non-identifying). (Note that Prometheus’s concept of time series
+ identity always includes all the labels. For the sake of the info
+ function, we “logically” define info series identity in a different way than in the conventional Prometheus
+ view.) The identifying labels of an info series are used to join it to regular (non-info) series, i.e. those
+ series that have the same labels as the identifying labels of the info series. The data labels, which are the
+ ones added to the regular series by the info function, effectively encode metadata key value pairs.
+ (This implies that a change in the data labels in the conventional Prometheus view constitutes the end of one
+ info series and the beginning of a new info series, while the “logical” view of the info function
+ is that the same info series continues to exist, just with different “data”.)
+
+
+
+ The conventional approach of adding data labels is sometimes called a “join query”, as illustrated by the
+ following example:
+
+
+
+
+ {" "}
+ rate(http_server_request_duration_seconds_count[2m]) * on (job, instance) group_left (k8s_cluster_name)
+ target_info
+
+
+
+
+ The core of the query is the expression rate(http_server_request_duration_seconds_count[2m]). But
+ to add data labels from an info metric, the user has to use elaborate (and not very obvious) syntax to specify
+ which info metric to use (target_info), what the identifying labels are (
+ on (job, instance)), and which data labels to add (group_left (k8s_cluster_name)).
+
+
+
+ This query is not only verbose and hard to write, it might also run into an “identity crisis”: If any of the
+ data labels of target_info changes, Prometheus sees that as a change of series (as alluded to
+ above, Prometheus just has no native concept of non-identifying labels). If the old target_info{" "}
+ series is not properly marked as stale (which can happen with certain ingestion paths), the query above will
+ fail for up to 5m (the lookback delta) because it will find a conflicting match with both the old and the new
+ version of target_info.
+
+
+
+ The info function not only resolves this conflict in favor of the newer series, it also simplifies
+ the syntax because it knows about the available info series and what their identifying labels are. The example
+ query looks like this with the info function:
+
+
+
+
+ info( rate(http_server_request_duration_seconds_count[2m]),
+ {"{"}k8s_cluster_name=~".+"{"}"})
+
+
+
+
+ The common case of adding all data labels can be achieved by omitting the 2nd argument of the{" "}
+ info function entirely, simplifying the example even more:
+
+
+
+ info(rate(http_server_request_duration_seconds_count[2m]))
+
+
+
+ While info normally automatically finds all matching info series, it’s possible to restrict
+ them by providing a __name__ label matcher, e.g.
+
+ {"{"}__name__="target_info"{"}"}
+
+ .
+
+
+ Limitations
+
+
+ In its current iteration, info defaults to considering only info series with the name{" "}
+ target_info. It also assumes that the identifying info series labels are
+ instance and job. info does support other info series names however,
+ through
+ __name__ label matchers. E.g., one can explicitly say to consider both
+ target_info and build_info as follows:
+
+ {"{"}__name__=~"(target|build)_info"{"}"}
+
+ . However, the identifying labels always have to be instance and job.
+
+
+
+ These limitations are partially defeating the purpose of the info function. At the current stage,
+ this is an experiment to find out how useful the approach turns out to be in practice. A final version of the{" "}
+ info function will indeed consider all matching info series and with their appropriate identifying
+ labels.
>
),
irate: (
<>
- irate(v range-vector) calculates the per-second instant rate of increase of the time series in the range
- vector. This is based on the last two data points. Breaks in monotonicity (such as counter resets due to target
- restarts) are automatically adjusted for.
+ irate(v range-vector) calculates the per-second instant rate of increase of the time series in the
+ range vector. This is based on the last two data points. Breaks in monotonicity (such as counter resets due to
+ target restarts) are automatically adjusted for. Both samples must be either float samples or histogram samples.
+ Elements in v where one of the last two samples is a float sample and the other is a histogram
+ sample will be omitted from the result vector, flagged by a warn-level annotation.
- The following example expression returns the per-second rate of HTTP requests looking up to 5 minutes back for the
- two most recent data points, per time series in the range vector:
+ irate should only be used with counters (for both floats and histograms).
+
+
+
+ The following example expression returns the per-second rate of HTTP requests looking up to 5 minutes back for
+ the two most recent data points, per time series in the range vector:
- irate(http_requests_total{'{'}job="api-server"{'}'}[5m])
+ irate(http_requests_total{"{"}job="api-server"{"}"}[5m])
- irate should only be used when graphing volatile, fast-moving counters. Use rate for alerts
- and slow-moving counters, as brief changes in the rate can reset the FOR clause and graphs consisting
- entirely of rare spikes are hard to read.
+ irate should only be used when graphing volatile, fast-moving counters. Use rate for
+ alerts and slow-moving counters, as brief changes in the rate can reset the FOR clause and graphs
+ consisting entirely of rare spikes are hard to read.
Note that when combining irate() with an
aggregation operator (e.g. sum()) or a function
- aggregating over time (any function ending in _over_time), always take a irate() first,
- then aggregate. Otherwise irate() cannot detect counter resets when your target restarts.
+ aggregating over time (any function ending in _over_time), always take an irate(){" "}
+ first, then aggregate. Otherwise irate() cannot detect counter resets when your target restarts.
>
),
label_join: (
<>
- For each timeseries in v,{' '}
+ For each timeseries in v,{" "}
label_join(v instant-vector, dst_label string, separator string, src_label_1 string, src_label_2 string, ...)
- {' '}
+
{" "}
joins all the values of all the src_labels
- using separator and returns the timeseries with the label dst_label containing the joined
- value. There can be any number of src_labels in this function.
+ using separator and returns the timeseries with the label dst_label containing the
+ joined value. There can be any number of src_labels in this function.
@@ -1387,13 +1835,13 @@ const funcDocs: Record = {
- This example will return a vector with each time series having a foo label with the value{' '}
+ This example will return a vector with each time series having a foo label with the value{" "}
a,b,c added to it:
- label_join(up{'{'}job="api-server",src1="a",src2="b",src3="c"{'}'},
+ label_join(up{"{"}job="api-server",src1="a",src2="b",src3="c"{"}"},
"foo", ",", "src1", "src2", "src3")
@@ -1402,14 +1850,17 @@ const funcDocs: Record = {
label_replace: (
<>
- For each timeseries in v,{' '}
- label_replace(v instant-vector, dst_label string, replacement string, src_label string, regex string)
- matches the regular expression regex against the
+ For each timeseries in v,{" "}
+
+ label_replace(v instant-vector, dst_label string, replacement string, src_label string, regex string)
+
+ matches the regular expression regex against the
value of the label src_label. If it matches, the value of the label dst_label in the
returned timeseries will be the expansion of replacement, together with the original labels in the
- input. Capturing groups in the regular expression can be referenced with $1, $2, etc. Named
- capturing groups in the regular expression can be referenced with $name (where name is the
- capturing group name). If the regular expression doesn’t match then the timeseries is returned unchanged.
+ input. Capturing groups in the regular expression can be referenced with $1, $2, etc.
+ Named capturing groups in the regular expression can be referenced with $name (where{" "}
+ name is the capturing group name). If the regular expression doesn’t match then the
+ timeseries is returned unchanged.
@@ -1417,23 +1868,25 @@ const funcDocs: Record = {
- This example will return timeseries with the values a:c at label service and a{' '}
- at label foo:
+ This example will return timeseries with the values a:c at label service and{" "}
+ a at label foo:
- label_replace(up{'{'}job="api-server",service="a:c"{'}'}, "foo", "$1",
+ label_replace(up{"{"}job="api-server",service="a:c"{"}"}, "foo", "$1",
"service", "(.*):.*")
- This second example has the same effect than the first example, and illustrates use of named capturing groups:
+
+ This second example has the same effect than the first example, and illustrates use of named capturing groups:
+
- label_replace(up{'{'}job="api-server",service="a:c"{'}'}, "foo", "$name",
- "service", "(?P<name>.*):(?P<version>.*)")
+ label_replace(up{"{"}job="api-server",service="a:c"{"}"}, "foo",
+ "$name", "service", "(?P<name>.*):(?P<version>.*)")
>
@@ -1441,40 +1894,42 @@ const funcDocs: Record = {
last_over_time: (
<>
- The following functions allow aggregating each series of a given range vector over time and return an instant vector
- with per-series aggregation results:
+ The following functions allow aggregating each series of a given range vector over time and return an instant
+ vector with per-series aggregation results:
-
-
avg_over_time(range-vector): the average value of all points in the specified interval.
+ avg_over_time(range-vector): the average value of all float or histogram samples in the specified
+ interval (see details below).
-
-
min_over_time(range-vector): the minimum value of all points in the specified interval.
+ min_over_time(range-vector): the minimum value of all float samples in the specified interval.
-
-
max_over_time(range-vector): the maximum value of all points in the specified interval.
+ max_over_time(range-vector): the maximum value of all float samples in the specified interval.
-
-
sum_over_time(range-vector): the sum of all values in the specified interval.
+ sum_over_time(range-vector): the sum of all float or histogram samples in the specified interval
+ (see details below).
-
-
count_over_time(range-vector): the count of all values in the specified interval.
+ count_over_time(range-vector): the count of all samples in the specified interval.
-
-
quantile_over_time(scalar, range-vector): the φ-quantile (0 ≤ φ ≤ 1) of the values in the specified
- interval.
+ quantile_over_time(scalar, range-vector): the φ-quantile (0 ≤ φ ≤ 1) of all float samples in the
+ specified interval.
-
-
stddev_over_time(range-vector): the population standard deviation of the values in the specified
- interval.
+ stddev_over_time(range-vector): the population standard deviation of all float samples in the
+ specified interval.
-
-
stdvar_over_time(range-vector): the population standard variance of the values in the specified
- interval.
+ stdvar_over_time(range-vector): the population standard variance of all float samples in the
+ specified interval.
-
-
last_over_time(range-vector): the most recent point value in the specified interval.
+ last_over_time(range-vector): the most recent sample in the specified interval.
-
present_over_time(range-vector): the value 1 for any series in the specified interval.
@@ -1483,32 +1938,74 @@ const funcDocs: Record = {
If the feature flag
- --enable-feature=promql-experimental-functions is set, the following additional functions are available:
+ --enable-feature=promql-experimental-functions is set, the following additional functions are
+ available:
-
-
mad_over_time(range-vector): the median absolute deviation of all points in the specified interval.
+ mad_over_time(range-vector): the median absolute deviation of all float samples in the specified
+ interval.
+
+ -
+
ts_of_min_over_time(range-vector): the timestamp of the last float sample that has the minimum
+ value of all float samples in the specified interval.
+
+ -
+
ts_of_max_over_time(range-vector): the timestamp of the last float sample that has the maximum
+ value of all float samples in the specified interval.
+
+ -
+
ts_of_last_over_time(range-vector): the timestamp of last sample in the specified interval.
+
+ -
+
first_over_time(range-vector): the oldest sample in the specified interval.
+
+ -
+
ts_of_first_over_time(range-vector): the timestamp of earliest sample in the specified interval.
- Note that all values in the specified interval have the same weight in the aggregation even if the values are not
- equally spaced throughout the interval.
+ Note that all values in the specified interval have the same weight in the aggregation even if the values are
+ not equally spaced throughout the interval.
+ These functions act on histograms in the following way:
+
+
+ -
+
count_over_time, first_over_time, last_over_time, and
+ present_over_time() act on float and histogram samples in the same way.
+
+ -
+
avg_over_time() and sum_over_time() act on histogram samples in a way that
+ corresponds to the respective aggregation operators. If a series contains a mix of float samples and histogram
+ samples within the range, the corresponding result is removed entirely from the output vector. Such a removal
+ is flagged by a warn-level annotation.
+
+ -
+ All other functions ignore histogram samples in the following way: Input ranges containing only histogram
+ samples are silently removed from the output. For ranges with a mix of histogram and float samples, only the
+ float samples are processed and the omission of the histogram samples is flagged by an info-level annotation.
+
+
+
- avg_over_time, sum_over_time, count_over_time, last_over_time,
- and
- present_over_time handle native histograms as expected. All other functions ignore histogram samples.
+ first_over_time(m[1m]) differs from m offset 1m in that the former will select the
+ first sample of m within the 1m range, where m offset 1m will select the most
+ recent sample within the lookback interval outside and prior to the 1m offset. This is particularly
+ useful with first_over_time(m[step()])
+ in range queries (available when --enable-feature=promql-duration-expr is set) to ensure that the
+ sample selected is within the range step.
>
),
ln: (
<>
- ln(v instant-vector) calculates the natural logarithm for all elements in v. Special cases
- are:
+ ln(v instant-vector) calculates the natural logarithm for all float samples in v.
+ Histogram samples in the input vector are ignored silently. Special cases are:
@@ -1530,56 +2027,60 @@ const funcDocs: Record = {
log10: (
<>
- log10(v instant-vector) calculates the decimal logarithm for all elements in v. The special
- cases are equivalent to those in ln.
+ log10(v instant-vector) calculates the decimal logarithm for all float samples in v.
+ Histogram samples in the input vector are ignored silently. The special cases are equivalent to those in{" "}
+ ln.
>
),
log2: (
<>
- log2(v instant-vector) calculates the binary logarithm for all elements in v. The special
- cases are equivalent to those in ln.
+ log2(v instant-vector) calculates the binary logarithm for all float samples in v.
+ Histogram samples in the input vector are ignored silently. The special cases are equivalent to those in{" "}
+ ln.
>
),
mad_over_time: (
<>
- The following functions allow aggregating each series of a given range vector over time and return an instant vector
- with per-series aggregation results:
+ The following functions allow aggregating each series of a given range vector over time and return an instant
+ vector with per-series aggregation results:
-
-
avg_over_time(range-vector): the average value of all points in the specified interval.
+ avg_over_time(range-vector): the average value of all float or histogram samples in the specified
+ interval (see details below).
-
-
min_over_time(range-vector): the minimum value of all points in the specified interval.
+ min_over_time(range-vector): the minimum value of all float samples in the specified interval.
-
-
max_over_time(range-vector): the maximum value of all points in the specified interval.
+ max_over_time(range-vector): the maximum value of all float samples in the specified interval.
-
-
sum_over_time(range-vector): the sum of all values in the specified interval.
+ sum_over_time(range-vector): the sum of all float or histogram samples in the specified interval
+ (see details below).
-
-
count_over_time(range-vector): the count of all values in the specified interval.
+ count_over_time(range-vector): the count of all samples in the specified interval.
-
-
quantile_over_time(scalar, range-vector): the φ-quantile (0 ≤ φ ≤ 1) of the values in the specified
- interval.
+ quantile_over_time(scalar, range-vector): the φ-quantile (0 ≤ φ ≤ 1) of all float samples in the
+ specified interval.
-
-
stddev_over_time(range-vector): the population standard deviation of the values in the specified
- interval.
+ stddev_over_time(range-vector): the population standard deviation of all float samples in the
+ specified interval.
-
-
stdvar_over_time(range-vector): the population standard variance of the values in the specified
- interval.
+ stdvar_over_time(range-vector): the population standard variance of all float samples in the
+ specified interval.
-
-
last_over_time(range-vector): the most recent point value in the specified interval.
+ last_over_time(range-vector): the most recent sample in the specified interval.
-
present_over_time(range-vector): the value 1 for any series in the specified interval.
@@ -1588,64 +2089,108 @@ const funcDocs: Record = {
If the feature flag
- --enable-feature=promql-experimental-functions is set, the following additional functions are available:
+ --enable-feature=promql-experimental-functions is set, the following additional functions are
+ available:
-
-
mad_over_time(range-vector): the median absolute deviation of all points in the specified interval.
+ mad_over_time(range-vector): the median absolute deviation of all float samples in the specified
+ interval.
+
+ -
+
ts_of_min_over_time(range-vector): the timestamp of the last float sample that has the minimum
+ value of all float samples in the specified interval.
+
+ -
+
ts_of_max_over_time(range-vector): the timestamp of the last float sample that has the maximum
+ value of all float samples in the specified interval.
+
+ -
+
ts_of_last_over_time(range-vector): the timestamp of last sample in the specified interval.
+
+ -
+
first_over_time(range-vector): the oldest sample in the specified interval.
+
+ -
+
ts_of_first_over_time(range-vector): the timestamp of earliest sample in the specified interval.
- Note that all values in the specified interval have the same weight in the aggregation even if the values are not
- equally spaced throughout the interval.
+ Note that all values in the specified interval have the same weight in the aggregation even if the values are
+ not equally spaced throughout the interval.
+ These functions act on histograms in the following way:
+
+
+ -
+
count_over_time, first_over_time, last_over_time, and
+ present_over_time() act on float and histogram samples in the same way.
+
+ -
+
avg_over_time() and sum_over_time() act on histogram samples in a way that
+ corresponds to the respective aggregation operators. If a series contains a mix of float samples and histogram
+ samples within the range, the corresponding result is removed entirely from the output vector. Such a removal
+ is flagged by a warn-level annotation.
+
+ -
+ All other functions ignore histogram samples in the following way: Input ranges containing only histogram
+ samples are silently removed from the output. For ranges with a mix of histogram and float samples, only the
+ float samples are processed and the omission of the histogram samples is flagged by an info-level annotation.
+
+
+
- avg_over_time, sum_over_time, count_over_time, last_over_time,
- and
- present_over_time handle native histograms as expected. All other functions ignore histogram samples.
+ first_over_time(m[1m]) differs from m offset 1m in that the former will select the
+ first sample of m within the 1m range, where m offset 1m will select the most
+ recent sample within the lookback interval outside and prior to the 1m offset. This is particularly
+ useful with first_over_time(m[step()])
+ in range queries (available when --enable-feature=promql-duration-expr is set) to ensure that the
+ sample selected is within the range step.
>
),
max_over_time: (
<>
- The following functions allow aggregating each series of a given range vector over time and return an instant vector
- with per-series aggregation results:
+ The following functions allow aggregating each series of a given range vector over time and return an instant
+ vector with per-series aggregation results:
-
-
avg_over_time(range-vector): the average value of all points in the specified interval.
+ avg_over_time(range-vector): the average value of all float or histogram samples in the specified
+ interval (see details below).
-
-
min_over_time(range-vector): the minimum value of all points in the specified interval.
+ min_over_time(range-vector): the minimum value of all float samples in the specified interval.
-
-
max_over_time(range-vector): the maximum value of all points in the specified interval.
+ max_over_time(range-vector): the maximum value of all float samples in the specified interval.
-
-
sum_over_time(range-vector): the sum of all values in the specified interval.
+ sum_over_time(range-vector): the sum of all float or histogram samples in the specified interval
+ (see details below).
-
-
count_over_time(range-vector): the count of all values in the specified interval.
+ count_over_time(range-vector): the count of all samples in the specified interval.
-
-
quantile_over_time(scalar, range-vector): the φ-quantile (0 ≤ φ ≤ 1) of the values in the specified
- interval.
+ quantile_over_time(scalar, range-vector): the φ-quantile (0 ≤ φ ≤ 1) of all float samples in the
+ specified interval.
-
-
stddev_over_time(range-vector): the population standard deviation of the values in the specified
- interval.
+ stddev_over_time(range-vector): the population standard deviation of all float samples in the
+ specified interval.
-
-
stdvar_over_time(range-vector): the population standard variance of the values in the specified
- interval.
+ stdvar_over_time(range-vector): the population standard variance of all float samples in the
+ specified interval.
-
-
last_over_time(range-vector): the most recent point value in the specified interval.
+ last_over_time(range-vector): the most recent sample in the specified interval.
-
present_over_time(range-vector): the value 1 for any series in the specified interval.
@@ -1654,64 +2199,108 @@ const funcDocs: Record = {
If the feature flag
- --enable-feature=promql-experimental-functions is set, the following additional functions are available:
+ --enable-feature=promql-experimental-functions is set, the following additional functions are
+ available:
-
-
mad_over_time(range-vector): the median absolute deviation of all points in the specified interval.
+ mad_over_time(range-vector): the median absolute deviation of all float samples in the specified
+ interval.
+
+ -
+
ts_of_min_over_time(range-vector): the timestamp of the last float sample that has the minimum
+ value of all float samples in the specified interval.
+
+ -
+
ts_of_max_over_time(range-vector): the timestamp of the last float sample that has the maximum
+ value of all float samples in the specified interval.
+
+ -
+
ts_of_last_over_time(range-vector): the timestamp of last sample in the specified interval.
+
+ -
+
first_over_time(range-vector): the oldest sample in the specified interval.
+
+ -
+
ts_of_first_over_time(range-vector): the timestamp of earliest sample in the specified interval.
- Note that all values in the specified interval have the same weight in the aggregation even if the values are not
- equally spaced throughout the interval.
+ Note that all values in the specified interval have the same weight in the aggregation even if the values are
+ not equally spaced throughout the interval.
+ These functions act on histograms in the following way:
+
+
+ -
+
count_over_time, first_over_time, last_over_time, and
+ present_over_time() act on float and histogram samples in the same way.
+
+ -
+
avg_over_time() and sum_over_time() act on histogram samples in a way that
+ corresponds to the respective aggregation operators. If a series contains a mix of float samples and histogram
+ samples within the range, the corresponding result is removed entirely from the output vector. Such a removal
+ is flagged by a warn-level annotation.
+
+ -
+ All other functions ignore histogram samples in the following way: Input ranges containing only histogram
+ samples are silently removed from the output. For ranges with a mix of histogram and float samples, only the
+ float samples are processed and the omission of the histogram samples is flagged by an info-level annotation.
+
+
+
- avg_over_time, sum_over_time, count_over_time, last_over_time,
- and
- present_over_time handle native histograms as expected. All other functions ignore histogram samples.
+ first_over_time(m[1m]) differs from m offset 1m in that the former will select the
+ first sample of m within the 1m range, where m offset 1m will select the most
+ recent sample within the lookback interval outside and prior to the 1m offset. This is particularly
+ useful with first_over_time(m[step()])
+ in range queries (available when --enable-feature=promql-duration-expr is set) to ensure that the
+ sample selected is within the range step.
>
),
min_over_time: (
<>
- The following functions allow aggregating each series of a given range vector over time and return an instant vector
- with per-series aggregation results:
+ The following functions allow aggregating each series of a given range vector over time and return an instant
+ vector with per-series aggregation results:
-
-
avg_over_time(range-vector): the average value of all points in the specified interval.
+ avg_over_time(range-vector): the average value of all float or histogram samples in the specified
+ interval (see details below).
-
-
min_over_time(range-vector): the minimum value of all points in the specified interval.
+ min_over_time(range-vector): the minimum value of all float samples in the specified interval.
-
-
max_over_time(range-vector): the maximum value of all points in the specified interval.
+ max_over_time(range-vector): the maximum value of all float samples in the specified interval.
-
-
sum_over_time(range-vector): the sum of all values in the specified interval.
+ sum_over_time(range-vector): the sum of all float or histogram samples in the specified interval
+ (see details below).
-
-
count_over_time(range-vector): the count of all values in the specified interval.
+ count_over_time(range-vector): the count of all samples in the specified interval.
-
-
quantile_over_time(scalar, range-vector): the φ-quantile (0 ≤ φ ≤ 1) of the values in the specified
- interval.
+ quantile_over_time(scalar, range-vector): the φ-quantile (0 ≤ φ ≤ 1) of all float samples in the
+ specified interval.
-
-
stddev_over_time(range-vector): the population standard deviation of the values in the specified
- interval.
+ stddev_over_time(range-vector): the population standard deviation of all float samples in the
+ specified interval.
-
-
stdvar_over_time(range-vector): the population standard variance of the values in the specified
- interval.
+ stdvar_over_time(range-vector): the population standard variance of all float samples in the
+ specified interval.
-
-
last_over_time(range-vector): the most recent point value in the specified interval.
+ last_over_time(range-vector): the most recent sample in the specified interval.
-
present_over_time(range-vector): the value 1 for any series in the specified interval.
@@ -1720,95 +2309,140 @@ const funcDocs: Record = {
If the feature flag
- --enable-feature=promql-experimental-functions is set, the following additional functions are available:
+ --enable-feature=promql-experimental-functions is set, the following additional functions are
+ available:
-
-
mad_over_time(range-vector): the median absolute deviation of all points in the specified interval.
+ mad_over_time(range-vector): the median absolute deviation of all float samples in the specified
+ interval.
+
+ -
+
ts_of_min_over_time(range-vector): the timestamp of the last float sample that has the minimum
+ value of all float samples in the specified interval.
+
+ -
+
ts_of_max_over_time(range-vector): the timestamp of the last float sample that has the maximum
+ value of all float samples in the specified interval.
+
+ -
+
ts_of_last_over_time(range-vector): the timestamp of last sample in the specified interval.
+
+ -
+
first_over_time(range-vector): the oldest sample in the specified interval.
+
+ -
+
ts_of_first_over_time(range-vector): the timestamp of earliest sample in the specified interval.
- Note that all values in the specified interval have the same weight in the aggregation even if the values are not
- equally spaced throughout the interval.
+ Note that all values in the specified interval have the same weight in the aggregation even if the values are
+ not equally spaced throughout the interval.
+ These functions act on histograms in the following way:
+
+
+ -
+
count_over_time, first_over_time, last_over_time, and
+ present_over_time() act on float and histogram samples in the same way.
+
+ -
+
avg_over_time() and sum_over_time() act on histogram samples in a way that
+ corresponds to the respective aggregation operators. If a series contains a mix of float samples and histogram
+ samples within the range, the corresponding result is removed entirely from the output vector. Such a removal
+ is flagged by a warn-level annotation.
+
+ -
+ All other functions ignore histogram samples in the following way: Input ranges containing only histogram
+ samples are silently removed from the output. For ranges with a mix of histogram and float samples, only the
+ float samples are processed and the omission of the histogram samples is flagged by an info-level annotation.
+
+
+
- avg_over_time, sum_over_time, count_over_time, last_over_time,
- and
- present_over_time handle native histograms as expected. All other functions ignore histogram samples.
+ first_over_time(m[1m]) differs from m offset 1m in that the former will select the
+ first sample of m within the 1m range, where m offset 1m will select the most
+ recent sample within the lookback interval outside and prior to the 1m offset. This is particularly
+ useful with first_over_time(m[step()])
+ in range queries (available when --enable-feature=promql-duration-expr is set) to ensure that the
+ sample selected is within the range step.
>
),
minute: (
<>
- minute(v=vector(time()) instant-vector) returns the minute of the hour for each of the given times in
- UTC. Returned values are from 0 to 59.
+ minute(v=vector(time()) instant-vector) interprets float samples in v as timestamps
+ (number of seconds since January 1, 1970 UTC) and returns the minute of the hour (in UTC) for each of those
+ timestamps. Returned values are from 0 to 59. Histogram samples in the input vector are ignored silently.
>
),
month: (
<>
- month(v=vector(time()) instant-vector) returns the month of the year for each of the given times in UTC.
- Returned values are from 1 to 12, where 1 means January etc.
+ month(v=vector(time()) instant-vector) interprets float samples in v as timestamps
+ (number of seconds since January 1, 1970 UTC) and returns the month of the year (in UTC) for each of those
+ timestamps. Returned values are from 1 to 12, where 1 means January etc. Histogram samples in the input vector
+ are ignored silently.
>
),
pi: (
<>
- The trigonometric functions work in radians:
+ The trigonometric functions work in radians. They ignore histogram samples in the input vector.
-
-
acos(v instant-vector): calculates the arccosine of all elements in v (
+ acos(v instant-vector): calculates the arccosine of all float samples in v (
special cases).
-
-
acosh(v instant-vector): calculates the inverse hyperbolic cosine of all elements in v (
- special cases).
+ acosh(v instant-vector): calculates the inverse hyperbolic cosine of all float samples in{" "}
+ v (special cases).
-
-
asin(v instant-vector): calculates the arcsine of all elements in v (
+ asin(v instant-vector): calculates the arcsine of all float samples in v (
special cases).
-
-
asinh(v instant-vector): calculates the inverse hyperbolic sine of all elements in v (
- special cases).
+ asinh(v instant-vector): calculates the inverse hyperbolic sine of all float samples in{" "}
+ v (special cases).
-
-
atan(v instant-vector): calculates the arctangent of all elements in v (
+ atan(v instant-vector): calculates the arctangent of all float samples in v (
special cases).
-
-
atanh(v instant-vector): calculates the inverse hyperbolic tangent of all elements in v (
- special cases).
+ atanh(v instant-vector): calculates the inverse hyperbolic tangent of all float samples in{" "}
+ v (special cases).
-
-
cos(v instant-vector): calculates the cosine of all elements in v (
+ cos(v instant-vector): calculates the cosine of all float samples in v (
special cases).
-
-
cosh(v instant-vector): calculates the hyperbolic cosine of all elements in v (
+ cosh(v instant-vector): calculates the hyperbolic cosine of all float samples in v (
special cases).
-
-
sin(v instant-vector): calculates the sine of all elements in v (
+ sin(v instant-vector): calculates the sine of all float samples in v (
special cases).
-
-
sinh(v instant-vector): calculates the hyperbolic sine of all elements in v (
+ sinh(v instant-vector): calculates the hyperbolic sine of all float samples in v (
special cases).
-
-
tan(v instant-vector): calculates the tangent of all elements in v (
+ tan(v instant-vector): calculates the tangent of all float samples in v (
special cases).
-
-
tanh(v instant-vector): calculates the hyperbolic tangent of all elements in v (
- special cases).
+ tanh(v instant-vector): calculates the hyperbolic tangent of all float samples in v{" "}
+ (special cases).
@@ -1816,13 +2450,13 @@ const funcDocs: Record = {
-
-
deg(v instant-vector): converts radians to degrees for all elements in v.
+ deg(v instant-vector): converts radians to degrees for all float samples in v.
-
pi(): returns pi.
-
-
rad(v instant-vector): converts degrees to radians for all elements in v.
+ rad(v instant-vector): converts degrees to radians for all float samples in v.
>
@@ -1831,54 +2465,58 @@ const funcDocs: Record = {
<>
predict_linear(v range-vector, t scalar) predicts the value of time series
- t seconds from now, based on the range vector v, using{' '}
- simple linear regression. The range vector must
- have at least two samples in order to perform the calculation. When +Inf or -Inf are found
- in the range vector, the slope and offset value calculated will be NaN.
+ t seconds from now, based on the range vector v, using{" "}
+ simple linear regression. The range vector
+ must have at least two float samples in order to perform the calculation. When +Inf or{" "}
+ -Inf are found in the range vector, the predicted value will be NaN.
- predict_linear should only be used with gauges.
+ predict_linear should only be used with gauges and only works for float samples. Elements in the
+ range vector that contain only histogram samples are ignored entirely. For elements that contain a mix of float
+ and histogram samples, only the float samples are used as input, which is flagged by an info-level annotation.
>
),
present_over_time: (
<>
- The following functions allow aggregating each series of a given range vector over time and return an instant vector
- with per-series aggregation results:
+ The following functions allow aggregating each series of a given range vector over time and return an instant
+ vector with per-series aggregation results:
-
-
avg_over_time(range-vector): the average value of all points in the specified interval.
+ avg_over_time(range-vector): the average value of all float or histogram samples in the specified
+ interval (see details below).
-
-
min_over_time(range-vector): the minimum value of all points in the specified interval.
+ min_over_time(range-vector): the minimum value of all float samples in the specified interval.
-
-
max_over_time(range-vector): the maximum value of all points in the specified interval.
+ max_over_time(range-vector): the maximum value of all float samples in the specified interval.
-
-
sum_over_time(range-vector): the sum of all values in the specified interval.
+ sum_over_time(range-vector): the sum of all float or histogram samples in the specified interval
+ (see details below).
-
-
count_over_time(range-vector): the count of all values in the specified interval.
+ count_over_time(range-vector): the count of all samples in the specified interval.
-
-
quantile_over_time(scalar, range-vector): the φ-quantile (0 ≤ φ ≤ 1) of the values in the specified
- interval.
+ quantile_over_time(scalar, range-vector): the φ-quantile (0 ≤ φ ≤ 1) of all float samples in the
+ specified interval.
-
-
stddev_over_time(range-vector): the population standard deviation of the values in the specified
- interval.
+ stddev_over_time(range-vector): the population standard deviation of all float samples in the
+ specified interval.
-
-
stdvar_over_time(range-vector): the population standard variance of the values in the specified
- interval.
+ stdvar_over_time(range-vector): the population standard variance of all float samples in the
+ specified interval.
-
-
last_over_time(range-vector): the most recent point value in the specified interval.
+ last_over_time(range-vector): the most recent sample in the specified interval.
-
present_over_time(range-vector): the value 1 for any series in the specified interval.
@@ -1887,64 +2525,108 @@ const funcDocs: Record = {
If the feature flag
- --enable-feature=promql-experimental-functions is set, the following additional functions are available:
+ --enable-feature=promql-experimental-functions is set, the following additional functions are
+ available:
-
-
mad_over_time(range-vector): the median absolute deviation of all points in the specified interval.
+ mad_over_time(range-vector): the median absolute deviation of all float samples in the specified
+ interval.
+
+ -
+
ts_of_min_over_time(range-vector): the timestamp of the last float sample that has the minimum
+ value of all float samples in the specified interval.
+
+ -
+
ts_of_max_over_time(range-vector): the timestamp of the last float sample that has the maximum
+ value of all float samples in the specified interval.
+
+ -
+
ts_of_last_over_time(range-vector): the timestamp of last sample in the specified interval.
+
+ -
+
first_over_time(range-vector): the oldest sample in the specified interval.
+
+ -
+
ts_of_first_over_time(range-vector): the timestamp of earliest sample in the specified interval.
- Note that all values in the specified interval have the same weight in the aggregation even if the values are not
- equally spaced throughout the interval.
+ Note that all values in the specified interval have the same weight in the aggregation even if the values are
+ not equally spaced throughout the interval.
+ These functions act on histograms in the following way:
+
+
+ -
+
count_over_time, first_over_time, last_over_time, and
+ present_over_time() act on float and histogram samples in the same way.
+
+ -
+
avg_over_time() and sum_over_time() act on histogram samples in a way that
+ corresponds to the respective aggregation operators. If a series contains a mix of float samples and histogram
+ samples within the range, the corresponding result is removed entirely from the output vector. Such a removal
+ is flagged by a warn-level annotation.
+
+ -
+ All other functions ignore histogram samples in the following way: Input ranges containing only histogram
+ samples are silently removed from the output. For ranges with a mix of histogram and float samples, only the
+ float samples are processed and the omission of the histogram samples is flagged by an info-level annotation.
+
+
+
- avg_over_time, sum_over_time, count_over_time, last_over_time,
- and
- present_over_time handle native histograms as expected. All other functions ignore histogram samples.
+ first_over_time(m[1m]) differs from m offset 1m in that the former will select the
+ first sample of m within the 1m range, where m offset 1m will select the most
+ recent sample within the lookback interval outside and prior to the 1m offset. This is particularly
+ useful with first_over_time(m[step()])
+ in range queries (available when --enable-feature=promql-duration-expr is set) to ensure that the
+ sample selected is within the range step.
>
),
quantile_over_time: (
<>
- The following functions allow aggregating each series of a given range vector over time and return an instant vector
- with per-series aggregation results:
+ The following functions allow aggregating each series of a given range vector over time and return an instant
+ vector with per-series aggregation results:
-
-
avg_over_time(range-vector): the average value of all points in the specified interval.
+ avg_over_time(range-vector): the average value of all float or histogram samples in the specified
+ interval (see details below).
-
-
min_over_time(range-vector): the minimum value of all points in the specified interval.
+ min_over_time(range-vector): the minimum value of all float samples in the specified interval.
-
-
max_over_time(range-vector): the maximum value of all points in the specified interval.
+ max_over_time(range-vector): the maximum value of all float samples in the specified interval.
-
-
sum_over_time(range-vector): the sum of all values in the specified interval.
+ sum_over_time(range-vector): the sum of all float or histogram samples in the specified interval
+ (see details below).
-
-
count_over_time(range-vector): the count of all values in the specified interval.
+ count_over_time(range-vector): the count of all samples in the specified interval.
-
-
quantile_over_time(scalar, range-vector): the φ-quantile (0 ≤ φ ≤ 1) of the values in the specified
- interval.
+ quantile_over_time(scalar, range-vector): the φ-quantile (0 ≤ φ ≤ 1) of all float samples in the
+ specified interval.
-
-
stddev_over_time(range-vector): the population standard deviation of the values in the specified
- interval.
+ stddev_over_time(range-vector): the population standard deviation of all float samples in the
+ specified interval.
-
-
stdvar_over_time(range-vector): the population standard variance of the values in the specified
- interval.
+ stdvar_over_time(range-vector): the population standard variance of all float samples in the
+ specified interval.
-
-
last_over_time(range-vector): the most recent point value in the specified interval.
+ last_over_time(range-vector): the most recent sample in the specified interval.
-
present_over_time(range-vector): the value 1 for any series in the specified interval.
@@ -1953,79 +2635,121 @@ const funcDocs: Record = {
If the feature flag
- --enable-feature=promql-experimental-functions is set, the following additional functions are available:
+ --enable-feature=promql-experimental-functions is set, the following additional functions are
+ available:
-
-
mad_over_time(range-vector): the median absolute deviation of all points in the specified interval.
+ mad_over_time(range-vector): the median absolute deviation of all float samples in the specified
+ interval.
+
+ -
+
ts_of_min_over_time(range-vector): the timestamp of the last float sample that has the minimum
+ value of all float samples in the specified interval.
+
+ -
+
ts_of_max_over_time(range-vector): the timestamp of the last float sample that has the maximum
+ value of all float samples in the specified interval.
+
+ -
+
ts_of_last_over_time(range-vector): the timestamp of last sample in the specified interval.
+
+ -
+
first_over_time(range-vector): the oldest sample in the specified interval.
+
+ -
+
ts_of_first_over_time(range-vector): the timestamp of earliest sample in the specified interval.
- Note that all values in the specified interval have the same weight in the aggregation even if the values are not
- equally spaced throughout the interval.
+ Note that all values in the specified interval have the same weight in the aggregation even if the values are
+ not equally spaced throughout the interval.
+ These functions act on histograms in the following way:
+
+
+ -
+
count_over_time, first_over_time, last_over_time, and
+ present_over_time() act on float and histogram samples in the same way.
+
+ -
+
avg_over_time() and sum_over_time() act on histogram samples in a way that
+ corresponds to the respective aggregation operators. If a series contains a mix of float samples and histogram
+ samples within the range, the corresponding result is removed entirely from the output vector. Such a removal
+ is flagged by a warn-level annotation.
+
+ -
+ All other functions ignore histogram samples in the following way: Input ranges containing only histogram
+ samples are silently removed from the output. For ranges with a mix of histogram and float samples, only the
+ float samples are processed and the omission of the histogram samples is flagged by an info-level annotation.
+
+
+
- avg_over_time, sum_over_time, count_over_time, last_over_time,
- and
- present_over_time handle native histograms as expected. All other functions ignore histogram samples.
+ first_over_time(m[1m]) differs from m offset 1m in that the former will select the
+ first sample of m within the 1m range, where m offset 1m will select the most
+ recent sample within the lookback interval outside and prior to the 1m offset. This is particularly
+ useful with first_over_time(m[step()])
+ in range queries (available when --enable-feature=promql-duration-expr is set) to ensure that the
+ sample selected is within the range step.
>
),
rad: (
<>
- The trigonometric functions work in radians:
+ The trigonometric functions work in radians. They ignore histogram samples in the input vector.
-
-
acos(v instant-vector): calculates the arccosine of all elements in v (
+ acos(v instant-vector): calculates the arccosine of all float samples in v (
special cases).
-
-
acosh(v instant-vector): calculates the inverse hyperbolic cosine of all elements in v (
- special cases).
+ acosh(v instant-vector): calculates the inverse hyperbolic cosine of all float samples in{" "}
+ v (special cases).
-
-
asin(v instant-vector): calculates the arcsine of all elements in v (
+ asin(v instant-vector): calculates the arcsine of all float samples in v (
special cases).
-
-
asinh(v instant-vector): calculates the inverse hyperbolic sine of all elements in v (
- special cases).
+ asinh(v instant-vector): calculates the inverse hyperbolic sine of all float samples in{" "}
+ v (special cases).
-
-
atan(v instant-vector): calculates the arctangent of all elements in v (
+ atan(v instant-vector): calculates the arctangent of all float samples in v (
special cases).
-
-
atanh(v instant-vector): calculates the inverse hyperbolic tangent of all elements in v (
- special cases).
+ atanh(v instant-vector): calculates the inverse hyperbolic tangent of all float samples in{" "}
+ v (special cases).
-
-
cos(v instant-vector): calculates the cosine of all elements in v (
+ cos(v instant-vector): calculates the cosine of all float samples in v (
special cases).
-
-
cosh(v instant-vector): calculates the hyperbolic cosine of all elements in v (
+ cosh(v instant-vector): calculates the hyperbolic cosine of all float samples in v (
special cases).
-
-
sin(v instant-vector): calculates the sine of all elements in v (
+ sin(v instant-vector): calculates the sine of all float samples in v (
special cases).
-
-
sinh(v instant-vector): calculates the hyperbolic sine of all elements in v (
+ sinh(v instant-vector): calculates the hyperbolic sine of all float samples in v (
special cases).
-
-
tan(v instant-vector): calculates the tangent of all elements in v (
+ tan(v instant-vector): calculates the tangent of all float samples in v (
special cases).
-
-
tanh(v instant-vector): calculates the hyperbolic tangent of all elements in v (
- special cases).
+ tanh(v instant-vector): calculates the hyperbolic tangent of all float samples in v{" "}
+ (special cases).
@@ -2033,13 +2757,13 @@ const funcDocs: Record = {
-
-
deg(v instant-vector): converts radians to degrees for all elements in v.
+ deg(v instant-vector): converts radians to degrees for all float samples in v.
-
pi(): returns pi.
-
-
rad(v instant-vector): converts degrees to radians for all elements in v.
+ rad(v instant-vector): converts degrees to radians for all float samples in v.
>
@@ -2047,40 +2771,40 @@ const funcDocs: Record = {
rate: (
<>
- rate(v range-vector) calculates the per-second average rate of increase of the time series in the range
- vector. Breaks in monotonicity (such as counter resets due to target restarts) are automatically adjusted for. Also,
- the calculation extrapolates to the ends of the time range, allowing for missed scrapes or imperfect alignment of
- scrape cycles with the range’s time period.
+ rate(v range-vector) calculates the per-second average rate of increase of the time series in the
+ range vector. Breaks in monotonicity (such as counter resets due to target restarts) are automatically adjusted
+ for. Also, the calculation extrapolates to the ends of the time range, allowing for missed scrapes or imperfect
+ alignment of scrape cycles with the range’s time period.
- The following example expression returns the per-second rate of HTTP requests as measured over the last 5 minutes,
+ The following example expression returns the per-second average rate of HTTP requests over the last 5 minutes,
per time series in the range vector:
- rate(http_requests_total{'{'}job="api-server"{'}'}[5m])
+ rate(http_requests_total{"{"}job="api-server"{"}"}[5m])
- rate acts on native histograms by calculating a new histogram where each component (sum and count of
- observations, buckets) is the rate of increase between the respective component in the first and last native
- histogram in
- v. However, each element in v that contains a mix of float and native histogram samples
- within the range, will be missing from the result vector.
+ rate acts on native histograms by calculating a new histogram where each component (sum and count
+ of observations, buckets) is the rate of increase between the respective component in the first and last native
+ histogram in v. However, each element in v that contains a mix of float and native
+ histogram samples within the range, will be omitted from the result vector, flagged by a warn-level annotation.
- rate should only be used with counters and native histograms where the components behave like counters.
- It is best suited for alerting, and for graphing of slow-moving counters.
+ rate should only be used with counters (for both floats and histograms). It is best suited for
+ alerting, and for graphing of slow-moving counters.
- Note that when combining rate() with an aggregation operator (e.g. sum()) or a function
- aggregating over time (any function ending in _over_time), always take a rate() first, then
- aggregate. Otherwise rate() cannot detect counter resets when your target restarts.
+ Note that when combining rate() with an aggregation operator (e.g. sum()) or a
+ function aggregating over time (any function ending in _over_time), always take a{" "}
+ rate() first, then aggregate. Otherwise rate() cannot detect counter resets when your
+ target restarts.
>
),
@@ -2089,102 +2813,104 @@ const funcDocs: Record = {
For each input time series, resets(v range-vector) returns the number of counter resets within the
provided time range as an instant vector. Any decrease in the value between two consecutive float samples is
- interpreted as a counter reset. A reset in a native histogram is detected in a more complex way: Any decrease in any
- bucket, including the zero bucket, or in the count of observation constitutes a counter reset, but also the
- disappearance of any previously populated bucket, an increase in bucket resolution, or a decrease of the zero-bucket
- width.
+ interpreted as a counter reset. A reset in a native histogram is detected in a more complex way: Any decrease in
+ any bucket, including the zero bucket, or in the count of observation constitutes a counter reset, but also the
+ disappearance of any previously populated bucket, a decrease of the zero-bucket width, or any schema change that
+ is not a compatible decrease of resolution.
- resets should only be used with counters and counter-like native histograms.
+ resets should only be used with counters (for both floats and histograms).
- If the range vector contains a mix of float and histogram samples for the same series, counter resets are detected
- separately and their numbers added up. The change from a float to a histogram sample is not considered a
- counter reset. Each float sample is compared to the next float sample, and each histogram is comprared to the next
- histogram.
+ A float sample followed by a histogram sample, or vice versa, counts as a reset. A counter histogram sample
+ followed by a gauge histogram sample, or vice versa, also counts as a reset (but note that resets{" "}
+ should not be used on gauges in the first place, see above).
>
),
round: (
<>
- round(v instant-vector, to_nearest=1 scalar) rounds the sample values of all elements in v{' '}
- to the nearest integer. Ties are resolved by rounding up. The optional to_nearest argument allows
- specifying the nearest multiple to which the sample values should be rounded. This multiple may also be a fraction.
+ round(v instant-vector, to_nearest=1 scalar) rounds the sample values of all elements in{" "}
+ v to the nearest integer. Ties are resolved by rounding up. The optional to_nearest{" "}
+ argument allows specifying the nearest multiple to which the sample values should be rounded. This multiple may
+ also be a fraction. Histogram samples in the input vector are ignored silently.
>
),
scalar: (
<>
- Given a single-element input vector, scalar(v instant-vector) returns the sample value of that single
- element as a scalar. If the input vector does not have exactly one element, scalar will return{' '}
- NaN.
+ Given an input vector that contains only one element with a float sample,
+ scalar(v instant-vector) returns the sample value of that float sample as a scalar. If the input
+ vector does not have exactly one element with a float sample, scalar will return NaN.
+ Histogram samples in the input vector are ignored silently.
>
),
sgn: (
<>
- sgn(v instant-vector) returns a vector with all sample values converted to their sign, defined as this:
- 1 if v is positive, -1 if v is negative and 0 if v is equal to zero.
+ sgn(v instant-vector) returns a vector with all float sample values converted to their sign,
+ defined as this: 1 if v is positive, -1 if v is negative and 0 if v is equal to zero. Histogram samples in the
+ input vector are ignored silently.
>
),
sin: (
<>
- The trigonometric functions work in radians:
+ The trigonometric functions work in radians. They ignore histogram samples in the input vector.
-
-
acos(v instant-vector): calculates the arccosine of all elements in v (
+ acos(v instant-vector): calculates the arccosine of all float samples in v (
special cases).
-
-
acosh(v instant-vector): calculates the inverse hyperbolic cosine of all elements in v (
- special cases).
+ acosh(v instant-vector): calculates the inverse hyperbolic cosine of all float samples in{" "}
+ v (special cases).
-
-
asin(v instant-vector): calculates the arcsine of all elements in v (
+ asin(v instant-vector): calculates the arcsine of all float samples in v (
special cases).
-
-
asinh(v instant-vector): calculates the inverse hyperbolic sine of all elements in v (
- special cases).
+ asinh(v instant-vector): calculates the inverse hyperbolic sine of all float samples in{" "}
+ v (special cases).
-
-
atan(v instant-vector): calculates the arctangent of all elements in v (
+ atan(v instant-vector): calculates the arctangent of all float samples in v (
special cases).
-
-
atanh(v instant-vector): calculates the inverse hyperbolic tangent of all elements in v (
- special cases).
+ atanh(v instant-vector): calculates the inverse hyperbolic tangent of all float samples in{" "}
+ v (special cases).
-
-
cos(v instant-vector): calculates the cosine of all elements in v (
+ cos(v instant-vector): calculates the cosine of all float samples in v (
special cases).
-
-
cosh(v instant-vector): calculates the hyperbolic cosine of all elements in v (
+ cosh(v instant-vector): calculates the hyperbolic cosine of all float samples in v (
special cases).
-
-
sin(v instant-vector): calculates the sine of all elements in v (
+ sin(v instant-vector): calculates the sine of all float samples in v (
special cases).
-
-
sinh(v instant-vector): calculates the hyperbolic sine of all elements in v (
+ sinh(v instant-vector): calculates the hyperbolic sine of all float samples in v (
special cases).
-
-
tan(v instant-vector): calculates the tangent of all elements in v (
+ tan(v instant-vector): calculates the tangent of all float samples in v (
special cases).
-
-
tanh(v instant-vector): calculates the hyperbolic tangent of all elements in v (
- special cases).
+ tanh(v instant-vector): calculates the hyperbolic tangent of all float samples in v{" "}
+ (special cases).
@@ -2192,69 +2918,69 @@ const funcDocs: Record = {
-
-
deg(v instant-vector): converts radians to degrees for all elements in v.
+ deg(v instant-vector): converts radians to degrees for all float samples in v.
-
pi(): returns pi.
-
-
rad(v instant-vector): converts degrees to radians for all elements in v.
+ rad(v instant-vector): converts degrees to radians for all float samples in v.
>
),
sinh: (
<>
- The trigonometric functions work in radians:
+ The trigonometric functions work in radians. They ignore histogram samples in the input vector.
-
-
acos(v instant-vector): calculates the arccosine of all elements in v (
+ acos(v instant-vector): calculates the arccosine of all float samples in v (
special cases).
-
-
acosh(v instant-vector): calculates the inverse hyperbolic cosine of all elements in v (
- special cases).
+ acosh(v instant-vector): calculates the inverse hyperbolic cosine of all float samples in{" "}
+ v (special cases).
-
-
asin(v instant-vector): calculates the arcsine of all elements in v (
+ asin(v instant-vector): calculates the arcsine of all float samples in v (
special cases).
-
-
asinh(v instant-vector): calculates the inverse hyperbolic sine of all elements in v (
- special cases).
+ asinh(v instant-vector): calculates the inverse hyperbolic sine of all float samples in{" "}
+ v (special cases).
-
-
atan(v instant-vector): calculates the arctangent of all elements in v (
+ atan(v instant-vector): calculates the arctangent of all float samples in v (
special cases).
-
-
atanh(v instant-vector): calculates the inverse hyperbolic tangent of all elements in v (
- special cases).
+ atanh(v instant-vector): calculates the inverse hyperbolic tangent of all float samples in{" "}
+ v (special cases).
-
-
cos(v instant-vector): calculates the cosine of all elements in v (
+ cos(v instant-vector): calculates the cosine of all float samples in v (
special cases).
-
-
cosh(v instant-vector): calculates the hyperbolic cosine of all elements in v (
+ cosh(v instant-vector): calculates the hyperbolic cosine of all float samples in v (
special cases).
-
-
sin(v instant-vector): calculates the sine of all elements in v (
+ sin(v instant-vector): calculates the sine of all float samples in v (
special cases).
-
-
sinh(v instant-vector): calculates the hyperbolic sine of all elements in v (
+ sinh(v instant-vector): calculates the hyperbolic sine of all float samples in v (
special cases).
-
-
tan(v instant-vector): calculates the tangent of all elements in v (
+ tan(v instant-vector): calculates the tangent of all float samples in v (
special cases).
-
-
tanh(v instant-vector): calculates the hyperbolic tangent of all elements in v (
- special cases).
+ tanh(v instant-vector): calculates the hyperbolic tangent of all float samples in v{" "}
+ (special cases).
@@ -2262,13 +2988,13 @@ const funcDocs: Record = {
-
-
deg(v instant-vector): converts radians to degrees for all elements in v.
+ deg(v instant-vector): converts radians to degrees for all float samples in v.
-
pi(): returns pi.
-
-
rad(v instant-vector): converts degrees to radians for all elements in v.
+ rad(v instant-vector): converts degrees to radians for all float samples in v.
>
@@ -2276,13 +3002,13 @@ const funcDocs: Record = {
sort: (
<>
- sort(v instant-vector) returns vector elements sorted by their sample values, in ascending order. Native
- histograms are sorted by their sum of observations.
+ sort(v instant-vector) returns vector elements sorted by their float sample values, in ascending
+ order. Histogram samples in the input vector are ignored silently.
- Please note that sort only affects the results of instant queries, as range query results always have a
- fixed output ordering.
+ Please note that sort only affects the results of instant queries, as range query results always
+ have a fixed output ordering.
>
),
@@ -2290,24 +3016,27 @@ const funcDocs: Record = {
<>
- This function has to be enabled via the{' '}
- feature flag{' '}
+ This function has to be enabled via the{" "}
+ feature flag
--enable-feature=promql-experimental-functions.
- sort_by_label(v instant-vector, label string, ...) returns vector elements sorted by the values of the
- given labels in ascending order. In case these label values are equal, elements are sorted by their full label sets.
+ sort_by_label(v instant-vector, label string, ...) returns vector elements sorted by the values of
+ the given labels in ascending order. In case these label values are equal, elements are sorted by their full
+ label sets.
+ sort_by_label acts on float and histogram samples in the same way.
- Please note that the sort by label functions only affect the results of instant queries, as range query results
+ Please note that sort_by_label only affects the results of instant queries, as range query results
always have a fixed output ordering.
- This function uses natural sort order.
+ sort_by_label uses{" "}
+ natural sort order.
>
),
@@ -2315,8 +3044,8 @@ const funcDocs: Record = {
<>
- This function has to be enabled via the{' '}
- feature flag{' '}
+ This function has to be enabled via the{" "}
+ feature flag
--enable-feature=promql-experimental-functions.
@@ -2324,15 +3053,6 @@ const funcDocs: Record = {
Same as sort_by_label, but sorts in descending order.
-
-
- Please note that the sort by label functions only affect the results of instant queries, as range query results
- always have a fixed output ordering.
-
-
-
- This function uses natural sort order.
-
>
),
sort_desc: (
@@ -2340,57 +3060,55 @@ const funcDocs: Record = {
Same as sort, but sorts in descending order.
-
-
- Like sort, sort_desc only affects the results of instant queries, as range query results
- always have a fixed output ordering.
-
>
),
sqrt: (
<>
- sqrt(v instant-vector) calculates the square root of all elements in v.
+ sqrt(v instant-vector) calculates the square root of all float samples in
+ v. Histogram samples in the input vector are ignored silently.
>
),
stddev_over_time: (
<>
- The following functions allow aggregating each series of a given range vector over time and return an instant vector
- with per-series aggregation results:
+ The following functions allow aggregating each series of a given range vector over time and return an instant
+ vector with per-series aggregation results:
-
-
avg_over_time(range-vector): the average value of all points in the specified interval.
+ avg_over_time(range-vector): the average value of all float or histogram samples in the specified
+ interval (see details below).
-
-
min_over_time(range-vector): the minimum value of all points in the specified interval.
+ min_over_time(range-vector): the minimum value of all float samples in the specified interval.
-
-
max_over_time(range-vector): the maximum value of all points in the specified interval.
+ max_over_time(range-vector): the maximum value of all float samples in the specified interval.
-
-
sum_over_time(range-vector): the sum of all values in the specified interval.
+ sum_over_time(range-vector): the sum of all float or histogram samples in the specified interval
+ (see details below).
-
-
count_over_time(range-vector): the count of all values in the specified interval.
+ count_over_time(range-vector): the count of all samples in the specified interval.
-
-
quantile_over_time(scalar, range-vector): the φ-quantile (0 ≤ φ ≤ 1) of the values in the specified
- interval.
+ quantile_over_time(scalar, range-vector): the φ-quantile (0 ≤ φ ≤ 1) of all float samples in the
+ specified interval.
-
-
stddev_over_time(range-vector): the population standard deviation of the values in the specified
- interval.
+ stddev_over_time(range-vector): the population standard deviation of all float samples in the
+ specified interval.
-
-
stdvar_over_time(range-vector): the population standard variance of the values in the specified
- interval.
+ stdvar_over_time(range-vector): the population standard variance of all float samples in the
+ specified interval.
-
-
last_over_time(range-vector): the most recent point value in the specified interval.
+ last_over_time(range-vector): the most recent sample in the specified interval.
-
present_over_time(range-vector): the value 1 for any series in the specified interval.
@@ -2399,64 +3117,108 @@ const funcDocs: Record = {
If the feature flag
- --enable-feature=promql-experimental-functions is set, the following additional functions are available:
+ --enable-feature=promql-experimental-functions is set, the following additional functions are
+ available:
-
-
mad_over_time(range-vector): the median absolute deviation of all points in the specified interval.
+ mad_over_time(range-vector): the median absolute deviation of all float samples in the specified
+ interval.
+
+ -
+
ts_of_min_over_time(range-vector): the timestamp of the last float sample that has the minimum
+ value of all float samples in the specified interval.
+
+ -
+
ts_of_max_over_time(range-vector): the timestamp of the last float sample that has the maximum
+ value of all float samples in the specified interval.
+
+ -
+
ts_of_last_over_time(range-vector): the timestamp of last sample in the specified interval.
+
+ -
+
first_over_time(range-vector): the oldest sample in the specified interval.
+
+ -
+
ts_of_first_over_time(range-vector): the timestamp of earliest sample in the specified interval.
- Note that all values in the specified interval have the same weight in the aggregation even if the values are not
- equally spaced throughout the interval.
+ Note that all values in the specified interval have the same weight in the aggregation even if the values are
+ not equally spaced throughout the interval.
+ These functions act on histograms in the following way:
+
+
+ -
+
count_over_time, first_over_time, last_over_time, and
+ present_over_time() act on float and histogram samples in the same way.
+
+ -
+
avg_over_time() and sum_over_time() act on histogram samples in a way that
+ corresponds to the respective aggregation operators. If a series contains a mix of float samples and histogram
+ samples within the range, the corresponding result is removed entirely from the output vector. Such a removal
+ is flagged by a warn-level annotation.
+
+ -
+ All other functions ignore histogram samples in the following way: Input ranges containing only histogram
+ samples are silently removed from the output. For ranges with a mix of histogram and float samples, only the
+ float samples are processed and the omission of the histogram samples is flagged by an info-level annotation.
+
+
+
- avg_over_time, sum_over_time, count_over_time, last_over_time,
- and
- present_over_time handle native histograms as expected. All other functions ignore histogram samples.
+ first_over_time(m[1m]) differs from m offset 1m in that the former will select the
+ first sample of m within the 1m range, where m offset 1m will select the most
+ recent sample within the lookback interval outside and prior to the 1m offset. This is particularly
+ useful with first_over_time(m[step()])
+ in range queries (available when --enable-feature=promql-duration-expr is set) to ensure that the
+ sample selected is within the range step.
>
),
stdvar_over_time: (
<>
- The following functions allow aggregating each series of a given range vector over time and return an instant vector
- with per-series aggregation results:
+ The following functions allow aggregating each series of a given range vector over time and return an instant
+ vector with per-series aggregation results:
-
-
avg_over_time(range-vector): the average value of all points in the specified interval.
+ avg_over_time(range-vector): the average value of all float or histogram samples in the specified
+ interval (see details below).
-
-
min_over_time(range-vector): the minimum value of all points in the specified interval.
+ min_over_time(range-vector): the minimum value of all float samples in the specified interval.
-
-
max_over_time(range-vector): the maximum value of all points in the specified interval.
+ max_over_time(range-vector): the maximum value of all float samples in the specified interval.
-
-
sum_over_time(range-vector): the sum of all values in the specified interval.
+ sum_over_time(range-vector): the sum of all float or histogram samples in the specified interval
+ (see details below).
-
-
count_over_time(range-vector): the count of all values in the specified interval.
+ count_over_time(range-vector): the count of all samples in the specified interval.
-
-
quantile_over_time(scalar, range-vector): the φ-quantile (0 ≤ φ ≤ 1) of the values in the specified
- interval.
+ quantile_over_time(scalar, range-vector): the φ-quantile (0 ≤ φ ≤ 1) of all float samples in the
+ specified interval.
-
-
stddev_over_time(range-vector): the population standard deviation of the values in the specified
- interval.
+ stddev_over_time(range-vector): the population standard deviation of all float samples in the
+ specified interval.
-
-
stdvar_over_time(range-vector): the population standard variance of the values in the specified
- interval.
+ stdvar_over_time(range-vector): the population standard variance of all float samples in the
+ specified interval.
-
-
last_over_time(range-vector): the most recent point value in the specified interval.
+ last_over_time(range-vector): the most recent sample in the specified interval.
-
present_over_time(range-vector): the value 1 for any series in the specified interval.
@@ -2465,64 +3227,108 @@ const funcDocs: Record = {
If the feature flag
- --enable-feature=promql-experimental-functions is set, the following additional functions are available:
+ --enable-feature=promql-experimental-functions is set, the following additional functions are
+ available:
-
-
mad_over_time(range-vector): the median absolute deviation of all points in the specified interval.
+ mad_over_time(range-vector): the median absolute deviation of all float samples in the specified
+ interval.
+
+ -
+
ts_of_min_over_time(range-vector): the timestamp of the last float sample that has the minimum
+ value of all float samples in the specified interval.
+
+ -
+
ts_of_max_over_time(range-vector): the timestamp of the last float sample that has the maximum
+ value of all float samples in the specified interval.
+
+ -
+
ts_of_last_over_time(range-vector): the timestamp of last sample in the specified interval.
+
+ -
+
first_over_time(range-vector): the oldest sample in the specified interval.
+
+ -
+
ts_of_first_over_time(range-vector): the timestamp of earliest sample in the specified interval.
- Note that all values in the specified interval have the same weight in the aggregation even if the values are not
- equally spaced throughout the interval.
+ Note that all values in the specified interval have the same weight in the aggregation even if the values are
+ not equally spaced throughout the interval.
+ These functions act on histograms in the following way:
+
+
+ -
+
count_over_time, first_over_time, last_over_time, and
+ present_over_time() act on float and histogram samples in the same way.
+
+ -
+
avg_over_time() and sum_over_time() act on histogram samples in a way that
+ corresponds to the respective aggregation operators. If a series contains a mix of float samples and histogram
+ samples within the range, the corresponding result is removed entirely from the output vector. Such a removal
+ is flagged by a warn-level annotation.
+
+ -
+ All other functions ignore histogram samples in the following way: Input ranges containing only histogram
+ samples are silently removed from the output. For ranges with a mix of histogram and float samples, only the
+ float samples are processed and the omission of the histogram samples is flagged by an info-level annotation.
+
+
+
- avg_over_time, sum_over_time, count_over_time, last_over_time,
- and
- present_over_time handle native histograms as expected. All other functions ignore histogram samples.
+ first_over_time(m[1m]) differs from m offset 1m in that the former will select the
+ first sample of m within the 1m range, where m offset 1m will select the most
+ recent sample within the lookback interval outside and prior to the 1m offset. This is particularly
+ useful with first_over_time(m[step()])
+ in range queries (available when --enable-feature=promql-duration-expr is set) to ensure that the
+ sample selected is within the range step.
>
),
sum_over_time: (
<>
- The following functions allow aggregating each series of a given range vector over time and return an instant vector
- with per-series aggregation results:
+ The following functions allow aggregating each series of a given range vector over time and return an instant
+ vector with per-series aggregation results:
-
-
avg_over_time(range-vector): the average value of all points in the specified interval.
+ avg_over_time(range-vector): the average value of all float or histogram samples in the specified
+ interval (see details below).
-
-
min_over_time(range-vector): the minimum value of all points in the specified interval.
+ min_over_time(range-vector): the minimum value of all float samples in the specified interval.
-
-
max_over_time(range-vector): the maximum value of all points in the specified interval.
+ max_over_time(range-vector): the maximum value of all float samples in the specified interval.
-
-
sum_over_time(range-vector): the sum of all values in the specified interval.
+ sum_over_time(range-vector): the sum of all float or histogram samples in the specified interval
+ (see details below).
-
-
count_over_time(range-vector): the count of all values in the specified interval.
+ count_over_time(range-vector): the count of all samples in the specified interval.
-
-
quantile_over_time(scalar, range-vector): the φ-quantile (0 ≤ φ ≤ 1) of the values in the specified
- interval.
+ quantile_over_time(scalar, range-vector): the φ-quantile (0 ≤ φ ≤ 1) of all float samples in the
+ specified interval.
-
-
stddev_over_time(range-vector): the population standard deviation of the values in the specified
- interval.
+ stddev_over_time(range-vector): the population standard deviation of all float samples in the
+ specified interval.
-
-
stdvar_over_time(range-vector): the population standard variance of the values in the specified
- interval.
+ stdvar_over_time(range-vector): the population standard variance of all float samples in the
+ specified interval.
-
-
last_over_time(range-vector): the most recent point value in the specified interval.
+ last_over_time(range-vector): the most recent sample in the specified interval.
-
present_over_time(range-vector): the value 1 for any series in the specified interval.
@@ -2531,79 +3337,121 @@ const funcDocs: Record = {
If the feature flag
- --enable-feature=promql-experimental-functions is set, the following additional functions are available:
+ --enable-feature=promql-experimental-functions is set, the following additional functions are
+ available:
-
-
mad_over_time(range-vector): the median absolute deviation of all points in the specified interval.
+ mad_over_time(range-vector): the median absolute deviation of all float samples in the specified
+ interval.
+
+ -
+
ts_of_min_over_time(range-vector): the timestamp of the last float sample that has the minimum
+ value of all float samples in the specified interval.
+
+ -
+
ts_of_max_over_time(range-vector): the timestamp of the last float sample that has the maximum
+ value of all float samples in the specified interval.
+
+ -
+
ts_of_last_over_time(range-vector): the timestamp of last sample in the specified interval.
+
+ -
+
first_over_time(range-vector): the oldest sample in the specified interval.
+
+ -
+
ts_of_first_over_time(range-vector): the timestamp of earliest sample in the specified interval.
- Note that all values in the specified interval have the same weight in the aggregation even if the values are not
- equally spaced throughout the interval.
+ Note that all values in the specified interval have the same weight in the aggregation even if the values are
+ not equally spaced throughout the interval.
+ These functions act on histograms in the following way:
+
+
+ -
+
count_over_time, first_over_time, last_over_time, and
+ present_over_time() act on float and histogram samples in the same way.
+
+ -
+
avg_over_time() and sum_over_time() act on histogram samples in a way that
+ corresponds to the respective aggregation operators. If a series contains a mix of float samples and histogram
+ samples within the range, the corresponding result is removed entirely from the output vector. Such a removal
+ is flagged by a warn-level annotation.
+
+ -
+ All other functions ignore histogram samples in the following way: Input ranges containing only histogram
+ samples are silently removed from the output. For ranges with a mix of histogram and float samples, only the
+ float samples are processed and the omission of the histogram samples is flagged by an info-level annotation.
+
+
+
- avg_over_time, sum_over_time, count_over_time, last_over_time,
- and
- present_over_time handle native histograms as expected. All other functions ignore histogram samples.
+ first_over_time(m[1m]) differs from m offset 1m in that the former will select the
+ first sample of m within the 1m range, where m offset 1m will select the most
+ recent sample within the lookback interval outside and prior to the 1m offset. This is particularly
+ useful with first_over_time(m[step()])
+ in range queries (available when --enable-feature=promql-duration-expr is set) to ensure that the
+ sample selected is within the range step.
>
),
tan: (
<>
- The trigonometric functions work in radians:
+ The trigonometric functions work in radians. They ignore histogram samples in the input vector.
-
-
acos(v instant-vector): calculates the arccosine of all elements in v (
+ acos(v instant-vector): calculates the arccosine of all float samples in v (
special cases).
-
-
acosh(v instant-vector): calculates the inverse hyperbolic cosine of all elements in v (
- special cases).
+ acosh(v instant-vector): calculates the inverse hyperbolic cosine of all float samples in{" "}
+ v (special cases).
-
-
asin(v instant-vector): calculates the arcsine of all elements in v (
+ asin(v instant-vector): calculates the arcsine of all float samples in v (
special cases).
-
-
asinh(v instant-vector): calculates the inverse hyperbolic sine of all elements in v (
- special cases).
+ asinh(v instant-vector): calculates the inverse hyperbolic sine of all float samples in{" "}
+ v (special cases).
-
-
atan(v instant-vector): calculates the arctangent of all elements in v (
+ atan(v instant-vector): calculates the arctangent of all float samples in v (
special cases).
-
-
atanh(v instant-vector): calculates the inverse hyperbolic tangent of all elements in v (
- special cases).
+ atanh(v instant-vector): calculates the inverse hyperbolic tangent of all float samples in{" "}
+ v (special cases).
-
-
cos(v instant-vector): calculates the cosine of all elements in v (
+ cos(v instant-vector): calculates the cosine of all float samples in v (
special cases).
-
-
cosh(v instant-vector): calculates the hyperbolic cosine of all elements in v (
+ cosh(v instant-vector): calculates the hyperbolic cosine of all float samples in v (
special cases).
-
-
sin(v instant-vector): calculates the sine of all elements in v (
+ sin(v instant-vector): calculates the sine of all float samples in v (
special cases).
-
-
sinh(v instant-vector): calculates the hyperbolic sine of all elements in v (
+ sinh(v instant-vector): calculates the hyperbolic sine of all float samples in v (
special cases).
-
-
tan(v instant-vector): calculates the tangent of all elements in v (
+ tan(v instant-vector): calculates the tangent of all float samples in v (
special cases).
-
-
tanh(v instant-vector): calculates the hyperbolic tangent of all elements in v (
- special cases).
+ tanh(v instant-vector): calculates the hyperbolic tangent of all float samples in v{" "}
+ (special cases).
@@ -2611,69 +3459,69 @@ const funcDocs: Record = {
-
-
deg(v instant-vector): converts radians to degrees for all elements in v.
+ deg(v instant-vector): converts radians to degrees for all float samples in v.
-
pi(): returns pi.
-
-
rad(v instant-vector): converts degrees to radians for all elements in v.
+ rad(v instant-vector): converts degrees to radians for all float samples in v.
>
),
tanh: (
<>
- The trigonometric functions work in radians:
+ The trigonometric functions work in radians. They ignore histogram samples in the input vector.
-
-
acos(v instant-vector): calculates the arccosine of all elements in v (
+ acos(v instant-vector): calculates the arccosine of all float samples in v (
special cases).
-
-
acosh(v instant-vector): calculates the inverse hyperbolic cosine of all elements in v (
- special cases).
+ acosh(v instant-vector): calculates the inverse hyperbolic cosine of all float samples in{" "}
+ v (special cases).
-
-
asin(v instant-vector): calculates the arcsine of all elements in v (
+ asin(v instant-vector): calculates the arcsine of all float samples in v (
special cases).
-
-
asinh(v instant-vector): calculates the inverse hyperbolic sine of all elements in v (
- special cases).
+ asinh(v instant-vector): calculates the inverse hyperbolic sine of all float samples in{" "}
+ v (special cases).
-
-
atan(v instant-vector): calculates the arctangent of all elements in v (
+ atan(v instant-vector): calculates the arctangent of all float samples in v (
special cases).
-
-
atanh(v instant-vector): calculates the inverse hyperbolic tangent of all elements in v (
- special cases).
+ atanh(v instant-vector): calculates the inverse hyperbolic tangent of all float samples in{" "}
+ v (special cases).
-
-
cos(v instant-vector): calculates the cosine of all elements in v (
+ cos(v instant-vector): calculates the cosine of all float samples in v (
special cases).
-
-
cosh(v instant-vector): calculates the hyperbolic cosine of all elements in v (
+ cosh(v instant-vector): calculates the hyperbolic cosine of all float samples in v (
special cases).
-
-
sin(v instant-vector): calculates the sine of all elements in v (
+ sin(v instant-vector): calculates the sine of all float samples in v (
special cases).
-
-
sinh(v instant-vector): calculates the hyperbolic sine of all elements in v (
+ sinh(v instant-vector): calculates the hyperbolic sine of all float samples in v (
special cases).
-
-
tan(v instant-vector): calculates the tangent of all elements in v (
+ tan(v instant-vector): calculates the tangent of all float samples in v (
special cases).
-
-
tanh(v instant-vector): calculates the hyperbolic tangent of all elements in v (
- special cases).
+ tanh(v instant-vector): calculates the hyperbolic tangent of all float samples in v{" "}
+ (special cases).
@@ -2681,13 +3529,13 @@ const funcDocs: Record = {
-
-
deg(v instant-vector): converts radians to degrees for all elements in v.
+ deg(v instant-vector): converts radians to degrees for all float samples in v.
-
pi(): returns pi.
-
-
rad(v instant-vector): converts degrees to radians for all elements in v.
+ rad(v instant-vector): converts degrees to radians for all float samples in v.
>
@@ -2695,8 +3543,8 @@ const funcDocs: Record = {
time: (
<>
- time() returns the number of seconds since January 1, 1970 UTC. Note that this does not actually return
- the current time, but the time at which the expression is to be evaluated.
+ time() returns the number of seconds since January 1, 1970 UTC. Note that this does not actually
+ return the current time, but the time at which the expression is to be evaluated.
>
),
@@ -2704,14 +3552,455 @@ const funcDocs: Record = {
<>
timestamp(v instant-vector) returns the timestamp of each of the samples of the given vector as the
- number of seconds since January 1, 1970 UTC. It also works with histogram samples.
+ number of seconds since January 1, 1970 UTC. It acts on float and histogram samples in the same way.
+
+ >
+ ),
+ ts_of_first_over_time: (
+ <>
+
+ The following functions allow aggregating each series of a given range vector over time and return an instant
+ vector with per-series aggregation results:
+
+
+
+ -
+
avg_over_time(range-vector): the average value of all float or histogram samples in the specified
+ interval (see details below).
+
+ -
+
min_over_time(range-vector): the minimum value of all float samples in the specified interval.
+
+ -
+
max_over_time(range-vector): the maximum value of all float samples in the specified interval.
+
+ -
+
sum_over_time(range-vector): the sum of all float or histogram samples in the specified interval
+ (see details below).
+
+ -
+
count_over_time(range-vector): the count of all samples in the specified interval.
+
+ -
+
quantile_over_time(scalar, range-vector): the φ-quantile (0 ≤ φ ≤ 1) of all float samples in the
+ specified interval.
+
+ -
+
stddev_over_time(range-vector): the population standard deviation of all float samples in the
+ specified interval.
+
+ -
+
stdvar_over_time(range-vector): the population standard variance of all float samples in the
+ specified interval.
+
+ -
+
last_over_time(range-vector): the most recent sample in the specified interval.
+
+ -
+
present_over_time(range-vector): the value 1 for any series in the specified interval.
+
+
+
+
+ If the feature flag
+ --enable-feature=promql-experimental-functions is set, the following additional functions are
+ available:
+
+
+
+ -
+
mad_over_time(range-vector): the median absolute deviation of all float samples in the specified
+ interval.
+
+ -
+
ts_of_min_over_time(range-vector): the timestamp of the last float sample that has the minimum
+ value of all float samples in the specified interval.
+
+ -
+
ts_of_max_over_time(range-vector): the timestamp of the last float sample that has the maximum
+ value of all float samples in the specified interval.
+
+ -
+
ts_of_last_over_time(range-vector): the timestamp of last sample in the specified interval.
+
+ -
+
first_over_time(range-vector): the oldest sample in the specified interval.
+
+ -
+
ts_of_first_over_time(range-vector): the timestamp of earliest sample in the specified interval.
+
+
+
+
+ Note that all values in the specified interval have the same weight in the aggregation even if the values are
+ not equally spaced throughout the interval.
+
+
+ These functions act on histograms in the following way:
+
+
+ -
+
count_over_time, first_over_time, last_over_time, and
+ present_over_time() act on float and histogram samples in the same way.
+
+ -
+
avg_over_time() and sum_over_time() act on histogram samples in a way that
+ corresponds to the respective aggregation operators. If a series contains a mix of float samples and histogram
+ samples within the range, the corresponding result is removed entirely from the output vector. Such a removal
+ is flagged by a warn-level annotation.
+
+ -
+ All other functions ignore histogram samples in the following way: Input ranges containing only histogram
+ samples are silently removed from the output. For ranges with a mix of histogram and float samples, only the
+ float samples are processed and the omission of the histogram samples is flagged by an info-level annotation.
+
+
+
+
+ first_over_time(m[1m]) differs from m offset 1m in that the former will select the
+ first sample of m within the 1m range, where m offset 1m will select the most
+ recent sample within the lookback interval outside and prior to the 1m offset. This is particularly
+ useful with first_over_time(m[step()])
+ in range queries (available when --enable-feature=promql-duration-expr is set) to ensure that the
+ sample selected is within the range step.
+
+ >
+ ),
+ ts_of_last_over_time: (
+ <>
+
+ The following functions allow aggregating each series of a given range vector over time and return an instant
+ vector with per-series aggregation results:
+
+
+
+ -
+
avg_over_time(range-vector): the average value of all float or histogram samples in the specified
+ interval (see details below).
+
+ -
+
min_over_time(range-vector): the minimum value of all float samples in the specified interval.
+
+ -
+
max_over_time(range-vector): the maximum value of all float samples in the specified interval.
+
+ -
+
sum_over_time(range-vector): the sum of all float or histogram samples in the specified interval
+ (see details below).
+
+ -
+
count_over_time(range-vector): the count of all samples in the specified interval.
+
+ -
+
quantile_over_time(scalar, range-vector): the φ-quantile (0 ≤ φ ≤ 1) of all float samples in the
+ specified interval.
+
+ -
+
stddev_over_time(range-vector): the population standard deviation of all float samples in the
+ specified interval.
+
+ -
+
stdvar_over_time(range-vector): the population standard variance of all float samples in the
+ specified interval.
+
+ -
+
last_over_time(range-vector): the most recent sample in the specified interval.
+
+ -
+
present_over_time(range-vector): the value 1 for any series in the specified interval.
+
+
+
+
+ If the feature flag
+ --enable-feature=promql-experimental-functions is set, the following additional functions are
+ available:
+
+
+
+ -
+
mad_over_time(range-vector): the median absolute deviation of all float samples in the specified
+ interval.
+
+ -
+
ts_of_min_over_time(range-vector): the timestamp of the last float sample that has the minimum
+ value of all float samples in the specified interval.
+
+ -
+
ts_of_max_over_time(range-vector): the timestamp of the last float sample that has the maximum
+ value of all float samples in the specified interval.
+
+ -
+
ts_of_last_over_time(range-vector): the timestamp of last sample in the specified interval.
+
+ -
+
first_over_time(range-vector): the oldest sample in the specified interval.
+
+ -
+
ts_of_first_over_time(range-vector): the timestamp of earliest sample in the specified interval.
+
+
+
+
+ Note that all values in the specified interval have the same weight in the aggregation even if the values are
+ not equally spaced throughout the interval.
+
+
+ These functions act on histograms in the following way:
+
+
+ -
+
count_over_time, first_over_time, last_over_time, and
+ present_over_time() act on float and histogram samples in the same way.
+
+ -
+
avg_over_time() and sum_over_time() act on histogram samples in a way that
+ corresponds to the respective aggregation operators. If a series contains a mix of float samples and histogram
+ samples within the range, the corresponding result is removed entirely from the output vector. Such a removal
+ is flagged by a warn-level annotation.
+
+ -
+ All other functions ignore histogram samples in the following way: Input ranges containing only histogram
+ samples are silently removed from the output. For ranges with a mix of histogram and float samples, only the
+ float samples are processed and the omission of the histogram samples is flagged by an info-level annotation.
+
+
+
+
+ first_over_time(m[1m]) differs from m offset 1m in that the former will select the
+ first sample of m within the 1m range, where m offset 1m will select the most
+ recent sample within the lookback interval outside and prior to the 1m offset. This is particularly
+ useful with first_over_time(m[step()])
+ in range queries (available when --enable-feature=promql-duration-expr is set) to ensure that the
+ sample selected is within the range step.
+
+ >
+ ),
+ ts_of_max_over_time: (
+ <>
+
+ The following functions allow aggregating each series of a given range vector over time and return an instant
+ vector with per-series aggregation results:
+
+
+
+ -
+
avg_over_time(range-vector): the average value of all float or histogram samples in the specified
+ interval (see details below).
+
+ -
+
min_over_time(range-vector): the minimum value of all float samples in the specified interval.
+
+ -
+
max_over_time(range-vector): the maximum value of all float samples in the specified interval.
+
+ -
+
sum_over_time(range-vector): the sum of all float or histogram samples in the specified interval
+ (see details below).
+
+ -
+
count_over_time(range-vector): the count of all samples in the specified interval.
+
+ -
+
quantile_over_time(scalar, range-vector): the φ-quantile (0 ≤ φ ≤ 1) of all float samples in the
+ specified interval.
+
+ -
+
stddev_over_time(range-vector): the population standard deviation of all float samples in the
+ specified interval.
+
+ -
+
stdvar_over_time(range-vector): the population standard variance of all float samples in the
+ specified interval.
+
+ -
+
last_over_time(range-vector): the most recent sample in the specified interval.
+
+ -
+
present_over_time(range-vector): the value 1 for any series in the specified interval.
+
+
+
+
+ If the feature flag
+ --enable-feature=promql-experimental-functions is set, the following additional functions are
+ available:
+
+
+
+ -
+
mad_over_time(range-vector): the median absolute deviation of all float samples in the specified
+ interval.
+
+ -
+
ts_of_min_over_time(range-vector): the timestamp of the last float sample that has the minimum
+ value of all float samples in the specified interval.
+
+ -
+
ts_of_max_over_time(range-vector): the timestamp of the last float sample that has the maximum
+ value of all float samples in the specified interval.
+
+ -
+
ts_of_last_over_time(range-vector): the timestamp of last sample in the specified interval.
+
+ -
+
first_over_time(range-vector): the oldest sample in the specified interval.
+
+ -
+
ts_of_first_over_time(range-vector): the timestamp of earliest sample in the specified interval.
+
+
+
+
+ Note that all values in the specified interval have the same weight in the aggregation even if the values are
+ not equally spaced throughout the interval.
+
+
+ These functions act on histograms in the following way:
+
+
+ -
+
count_over_time, first_over_time, last_over_time, and
+ present_over_time() act on float and histogram samples in the same way.
+
+ -
+
avg_over_time() and sum_over_time() act on histogram samples in a way that
+ corresponds to the respective aggregation operators. If a series contains a mix of float samples and histogram
+ samples within the range, the corresponding result is removed entirely from the output vector. Such a removal
+ is flagged by a warn-level annotation.
+
+ -
+ All other functions ignore histogram samples in the following way: Input ranges containing only histogram
+ samples are silently removed from the output. For ranges with a mix of histogram and float samples, only the
+ float samples are processed and the omission of the histogram samples is flagged by an info-level annotation.
+
+
+
+
+ first_over_time(m[1m]) differs from m offset 1m in that the former will select the
+ first sample of m within the 1m range, where m offset 1m will select the most
+ recent sample within the lookback interval outside and prior to the 1m offset. This is particularly
+ useful with first_over_time(m[step()])
+ in range queries (available when --enable-feature=promql-duration-expr is set) to ensure that the
+ sample selected is within the range step.
+
+ >
+ ),
+ ts_of_min_over_time: (
+ <>
+
+ The following functions allow aggregating each series of a given range vector over time and return an instant
+ vector with per-series aggregation results:
+
+
+
+ -
+
avg_over_time(range-vector): the average value of all float or histogram samples in the specified
+ interval (see details below).
+
+ -
+
min_over_time(range-vector): the minimum value of all float samples in the specified interval.
+
+ -
+
max_over_time(range-vector): the maximum value of all float samples in the specified interval.
+
+ -
+
sum_over_time(range-vector): the sum of all float or histogram samples in the specified interval
+ (see details below).
+
+ -
+
count_over_time(range-vector): the count of all samples in the specified interval.
+
+ -
+
quantile_over_time(scalar, range-vector): the φ-quantile (0 ≤ φ ≤ 1) of all float samples in the
+ specified interval.
+
+ -
+
stddev_over_time(range-vector): the population standard deviation of all float samples in the
+ specified interval.
+
+ -
+
stdvar_over_time(range-vector): the population standard variance of all float samples in the
+ specified interval.
+
+ -
+
last_over_time(range-vector): the most recent sample in the specified interval.
+
+ -
+
present_over_time(range-vector): the value 1 for any series in the specified interval.
+
+
+
+
+ If the feature flag
+ --enable-feature=promql-experimental-functions is set, the following additional functions are
+ available:
+
+
+
+ -
+
mad_over_time(range-vector): the median absolute deviation of all float samples in the specified
+ interval.
+
+ -
+
ts_of_min_over_time(range-vector): the timestamp of the last float sample that has the minimum
+ value of all float samples in the specified interval.
+
+ -
+
ts_of_max_over_time(range-vector): the timestamp of the last float sample that has the maximum
+ value of all float samples in the specified interval.
+
+ -
+
ts_of_last_over_time(range-vector): the timestamp of last sample in the specified interval.
+
+ -
+
first_over_time(range-vector): the oldest sample in the specified interval.
+
+ -
+
ts_of_first_over_time(range-vector): the timestamp of earliest sample in the specified interval.
+
+
+
+
+ Note that all values in the specified interval have the same weight in the aggregation even if the values are
+ not equally spaced throughout the interval.
+
+
+ These functions act on histograms in the following way:
+
+
+ -
+
count_over_time, first_over_time, last_over_time, and
+ present_over_time() act on float and histogram samples in the same way.
+
+ -
+
avg_over_time() and sum_over_time() act on histogram samples in a way that
+ corresponds to the respective aggregation operators. If a series contains a mix of float samples and histogram
+ samples within the range, the corresponding result is removed entirely from the output vector. Such a removal
+ is flagged by a warn-level annotation.
+
+ -
+ All other functions ignore histogram samples in the following way: Input ranges containing only histogram
+ samples are silently removed from the output. For ranges with a mix of histogram and float samples, only the
+ float samples are processed and the omission of the histogram samples is flagged by an info-level annotation.
+
+
+
+
+ first_over_time(m[1m]) differs from m offset 1m in that the former will select the
+ first sample of m within the 1m range, where m offset 1m will select the most
+ recent sample within the lookback interval outside and prior to the 1m offset. This is particularly
+ useful with first_over_time(m[step()])
+ in range queries (available when --enable-feature=promql-duration-expr is set) to ensure that the
+ sample selected is within the range step.
>
),
vector: (
<>
- vector(s scalar) returns the scalar s as a vector with no labels.
+ vector(s scalar) converts the scalar s to a float sample and returns it as a
+ single-element instant vector with no labels.
>
),
@@ -2719,6 +4008,7 @@ const funcDocs: Record = {
<>
year(v=vector(time()) instant-vector) returns the year for each of the given times in UTC.
+ Histogram samples in the input vector are ignored silently.
>
),
diff --git a/web/ui/mantine-ui/src/promql/functionSignatures.ts b/web/ui/mantine-ui/src/promql/functionSignatures.ts
index 472d54ac5a..da21a2d4aa 100644
--- a/web/ui/mantine-ui/src/promql/functionSignatures.ts
+++ b/web/ui/mantine-ui/src/promql/functionSignatures.ts
@@ -1,140 +1,196 @@
-import { valueType, Func } from './ast';
+import { valueType, Func } from "./ast";
export const functionSignatures: Record = {
- abs: { name: 'abs', argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
- absent: { name: 'absent', argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
- absent_over_time: { name: 'absent_over_time', argTypes: [valueType.matrix], variadic: 0, returnType: valueType.vector },
- acos: { name: 'acos', argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
- acosh: { name: 'acosh', argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
- asin: { name: 'asin', argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
- asinh: { name: 'asinh', argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
- atan: { name: 'atan', argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
- atanh: { name: 'atanh', argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
- avg_over_time: { name: 'avg_over_time', argTypes: [valueType.matrix], variadic: 0, returnType: valueType.vector },
- ceil: { name: 'ceil', argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
- changes: { name: 'changes', argTypes: [valueType.matrix], variadic: 0, returnType: valueType.vector },
+ abs: { name: "abs", argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
+ absent: { name: "absent", argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
+ absent_over_time: {
+ name: "absent_over_time",
+ argTypes: [valueType.matrix],
+ variadic: 0,
+ returnType: valueType.vector,
+ },
+ acos: { name: "acos", argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
+ acosh: { name: "acosh", argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
+ asin: { name: "asin", argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
+ asinh: { name: "asinh", argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
+ atan: { name: "atan", argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
+ atanh: { name: "atanh", argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
+ avg_over_time: { name: "avg_over_time", argTypes: [valueType.matrix], variadic: 0, returnType: valueType.vector },
+ ceil: { name: "ceil", argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
+ changes: { name: "changes", argTypes: [valueType.matrix], variadic: 0, returnType: valueType.vector },
clamp: {
- name: 'clamp',
+ name: "clamp",
argTypes: [valueType.vector, valueType.scalar, valueType.scalar],
variadic: 0,
returnType: valueType.vector,
},
clamp_max: {
- name: 'clamp_max',
+ name: "clamp_max",
argTypes: [valueType.vector, valueType.scalar],
variadic: 0,
returnType: valueType.vector,
},
clamp_min: {
- name: 'clamp_min',
+ name: "clamp_min",
argTypes: [valueType.vector, valueType.scalar],
variadic: 0,
returnType: valueType.vector,
},
- cos: { name: 'cos', argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
- cosh: { name: 'cosh', argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
- count_over_time: { name: 'count_over_time', argTypes: [valueType.matrix], variadic: 0, returnType: valueType.vector },
- day_of_month: { name: 'day_of_month', argTypes: [valueType.vector], variadic: 1, returnType: valueType.vector },
- day_of_week: { name: 'day_of_week', argTypes: [valueType.vector], variadic: 1, returnType: valueType.vector },
- day_of_year: { name: 'day_of_year', argTypes: [valueType.vector], variadic: 1, returnType: valueType.vector },
- days_in_month: { name: 'days_in_month', argTypes: [valueType.vector], variadic: 1, returnType: valueType.vector },
- deg: { name: 'deg', argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
- delta: { name: 'delta', argTypes: [valueType.matrix], variadic: 0, returnType: valueType.vector },
- deriv: { name: 'deriv', argTypes: [valueType.matrix], variadic: 0, returnType: valueType.vector },
- exp: { name: 'exp', argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
- floor: { name: 'floor', argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
- histogram_avg: { name: 'histogram_avg', argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
- histogram_count: { name: 'histogram_count', argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
+ cos: { name: "cos", argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
+ cosh: { name: "cosh", argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
+ count_over_time: { name: "count_over_time", argTypes: [valueType.matrix], variadic: 0, returnType: valueType.vector },
+ day_of_month: { name: "day_of_month", argTypes: [valueType.vector], variadic: 1, returnType: valueType.vector },
+ day_of_week: { name: "day_of_week", argTypes: [valueType.vector], variadic: 1, returnType: valueType.vector },
+ day_of_year: { name: "day_of_year", argTypes: [valueType.vector], variadic: 1, returnType: valueType.vector },
+ days_in_month: { name: "days_in_month", argTypes: [valueType.vector], variadic: 1, returnType: valueType.vector },
+ deg: { name: "deg", argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
+ delta: { name: "delta", argTypes: [valueType.matrix], variadic: 0, returnType: valueType.vector },
+ deriv: { name: "deriv", argTypes: [valueType.matrix], variadic: 0, returnType: valueType.vector },
+ double_exponential_smoothing: {
+ name: "double_exponential_smoothing",
+ argTypes: [valueType.matrix, valueType.scalar, valueType.scalar],
+ variadic: 0,
+ returnType: valueType.vector,
+ },
+ exp: { name: "exp", argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
+ first_over_time: { name: "first_over_time", argTypes: [valueType.matrix], variadic: 0, returnType: valueType.vector },
+ floor: { name: "floor", argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
+ histogram_avg: { name: "histogram_avg", argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
+ histogram_count: { name: "histogram_count", argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
histogram_fraction: {
- name: 'histogram_fraction',
+ name: "histogram_fraction",
argTypes: [valueType.scalar, valueType.scalar, valueType.vector],
variadic: 0,
returnType: valueType.vector,
},
histogram_quantile: {
- name: 'histogram_quantile',
+ name: "histogram_quantile",
argTypes: [valueType.scalar, valueType.vector],
variadic: 0,
returnType: valueType.vector,
},
- histogram_stddev: { name: 'histogram_stddev', argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
- histogram_stdvar: { name: 'histogram_stdvar', argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
- histogram_sum: { name: 'histogram_sum', argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
- double_exponential_smoothing: {
- name: 'double_exponential_smoothing',
- argTypes: [valueType.matrix, valueType.scalar, valueType.scalar],
+ histogram_stddev: {
+ name: "histogram_stddev",
+ argTypes: [valueType.vector],
variadic: 0,
returnType: valueType.vector,
},
- hour: { name: 'hour', argTypes: [valueType.vector], variadic: 1, returnType: valueType.vector },
- idelta: { name: 'idelta', argTypes: [valueType.matrix], variadic: 0, returnType: valueType.vector },
- increase: { name: 'increase', argTypes: [valueType.matrix], variadic: 0, returnType: valueType.vector },
- irate: { name: 'irate', argTypes: [valueType.matrix], variadic: 0, returnType: valueType.vector },
+ histogram_stdvar: {
+ name: "histogram_stdvar",
+ argTypes: [valueType.vector],
+ variadic: 0,
+ returnType: valueType.vector,
+ },
+ histogram_sum: { name: "histogram_sum", argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
+ hour: { name: "hour", argTypes: [valueType.vector], variadic: 1, returnType: valueType.vector },
+ idelta: { name: "idelta", argTypes: [valueType.matrix], variadic: 0, returnType: valueType.vector },
+ increase: { name: "increase", argTypes: [valueType.matrix], variadic: 0, returnType: valueType.vector },
+ info: { name: "info", argTypes: [valueType.vector, valueType.vector], variadic: 1, returnType: valueType.vector },
+ irate: { name: "irate", argTypes: [valueType.matrix], variadic: 0, returnType: valueType.vector },
label_join: {
- name: 'label_join',
+ name: "label_join",
argTypes: [valueType.vector, valueType.string, valueType.string, valueType.string],
variadic: -1,
returnType: valueType.vector,
},
label_replace: {
- name: 'label_replace',
+ name: "label_replace",
argTypes: [valueType.vector, valueType.string, valueType.string, valueType.string, valueType.string],
variadic: 0,
returnType: valueType.vector,
},
- last_over_time: { name: 'last_over_time', argTypes: [valueType.matrix], variadic: 0, returnType: valueType.vector },
- ln: { name: 'ln', argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
- log10: { name: 'log10', argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
- log2: { name: 'log2', argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
- mad_over_time: { name: 'mad_over_time', argTypes: [valueType.matrix], variadic: 0, returnType: valueType.vector },
- max_over_time: { name: 'max_over_time', argTypes: [valueType.matrix], variadic: 0, returnType: valueType.vector },
- min_over_time: { name: 'min_over_time', argTypes: [valueType.matrix], variadic: 0, returnType: valueType.vector },
- minute: { name: 'minute', argTypes: [valueType.vector], variadic: 1, returnType: valueType.vector },
- month: { name: 'month', argTypes: [valueType.vector], variadic: 1, returnType: valueType.vector },
- pi: { name: 'pi', argTypes: [], variadic: 0, returnType: valueType.scalar },
+ last_over_time: { name: "last_over_time", argTypes: [valueType.matrix], variadic: 0, returnType: valueType.vector },
+ ln: { name: "ln", argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
+ log10: { name: "log10", argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
+ log2: { name: "log2", argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
+ mad_over_time: { name: "mad_over_time", argTypes: [valueType.matrix], variadic: 0, returnType: valueType.vector },
+ max_over_time: { name: "max_over_time", argTypes: [valueType.matrix], variadic: 0, returnType: valueType.vector },
+ min_over_time: { name: "min_over_time", argTypes: [valueType.matrix], variadic: 0, returnType: valueType.vector },
+ minute: { name: "minute", argTypes: [valueType.vector], variadic: 1, returnType: valueType.vector },
+ month: { name: "month", argTypes: [valueType.vector], variadic: 1, returnType: valueType.vector },
+ pi: { name: "pi", argTypes: [], variadic: 0, returnType: valueType.scalar },
predict_linear: {
- name: 'predict_linear',
+ name: "predict_linear",
argTypes: [valueType.matrix, valueType.scalar],
variadic: 0,
returnType: valueType.vector,
},
- present_over_time: { name: 'present_over_time', argTypes: [valueType.matrix], variadic: 0, returnType: valueType.vector },
+ present_over_time: {
+ name: "present_over_time",
+ argTypes: [valueType.matrix],
+ variadic: 0,
+ returnType: valueType.vector,
+ },
quantile_over_time: {
- name: 'quantile_over_time',
+ name: "quantile_over_time",
argTypes: [valueType.scalar, valueType.matrix],
variadic: 0,
returnType: valueType.vector,
},
- rad: { name: 'rad', argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
- rate: { name: 'rate', argTypes: [valueType.matrix], variadic: 0, returnType: valueType.vector },
- resets: { name: 'resets', argTypes: [valueType.matrix], variadic: 0, returnType: valueType.vector },
- round: { name: 'round', argTypes: [valueType.vector, valueType.scalar], variadic: 1, returnType: valueType.vector },
- scalar: { name: 'scalar', argTypes: [valueType.vector], variadic: 0, returnType: valueType.scalar },
- sgn: { name: 'sgn', argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
- sin: { name: 'sin', argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
- sinh: { name: 'sinh', argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
- sort: { name: 'sort', argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
+ rad: { name: "rad", argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
+ rate: { name: "rate", argTypes: [valueType.matrix], variadic: 0, returnType: valueType.vector },
+ resets: { name: "resets", argTypes: [valueType.matrix], variadic: 0, returnType: valueType.vector },
+ round: { name: "round", argTypes: [valueType.vector, valueType.scalar], variadic: 1, returnType: valueType.vector },
+ scalar: { name: "scalar", argTypes: [valueType.vector], variadic: 0, returnType: valueType.scalar },
+ sgn: { name: "sgn", argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
+ sin: { name: "sin", argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
+ sinh: { name: "sinh", argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
+ sort: { name: "sort", argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
sort_by_label: {
- name: 'sort_by_label',
+ name: "sort_by_label",
argTypes: [valueType.vector, valueType.string],
variadic: -1,
returnType: valueType.vector,
},
sort_by_label_desc: {
- name: 'sort_by_label_desc',
+ name: "sort_by_label_desc",
argTypes: [valueType.vector, valueType.string],
variadic: -1,
returnType: valueType.vector,
},
- sort_desc: { name: 'sort_desc', argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
- sqrt: { name: 'sqrt', argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
- stddev_over_time: { name: 'stddev_over_time', argTypes: [valueType.matrix], variadic: 0, returnType: valueType.vector },
- stdvar_over_time: { name: 'stdvar_over_time', argTypes: [valueType.matrix], variadic: 0, returnType: valueType.vector },
- sum_over_time: { name: 'sum_over_time', argTypes: [valueType.matrix], variadic: 0, returnType: valueType.vector },
- tan: { name: 'tan', argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
- tanh: { name: 'tanh', argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
- time: { name: 'time', argTypes: [], variadic: 0, returnType: valueType.scalar },
- timestamp: { name: 'timestamp', argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
- vector: { name: 'vector', argTypes: [valueType.scalar], variadic: 0, returnType: valueType.vector },
- year: { name: 'year', argTypes: [valueType.vector], variadic: 1, returnType: valueType.vector },
+ sort_desc: { name: "sort_desc", argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
+ sqrt: { name: "sqrt", argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
+ stddev_over_time: {
+ name: "stddev_over_time",
+ argTypes: [valueType.matrix],
+ variadic: 0,
+ returnType: valueType.vector,
+ },
+ stdvar_over_time: {
+ name: "stdvar_over_time",
+ argTypes: [valueType.matrix],
+ variadic: 0,
+ returnType: valueType.vector,
+ },
+ sum_over_time: { name: "sum_over_time", argTypes: [valueType.matrix], variadic: 0, returnType: valueType.vector },
+ tan: { name: "tan", argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
+ tanh: { name: "tanh", argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
+ time: { name: "time", argTypes: [], variadic: 0, returnType: valueType.scalar },
+ timestamp: { name: "timestamp", argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
+ ts_of_first_over_time: {
+ name: "ts_of_first_over_time",
+ argTypes: [valueType.matrix],
+ variadic: 0,
+ returnType: valueType.vector,
+ },
+ ts_of_last_over_time: {
+ name: "ts_of_last_over_time",
+ argTypes: [valueType.matrix],
+ variadic: 0,
+ returnType: valueType.vector,
+ },
+ ts_of_max_over_time: {
+ name: "ts_of_max_over_time",
+ argTypes: [valueType.matrix],
+ variadic: 0,
+ returnType: valueType.vector,
+ },
+ ts_of_min_over_time: {
+ name: "ts_of_min_over_time",
+ argTypes: [valueType.matrix],
+ variadic: 0,
+ returnType: valueType.vector,
+ },
+ vector: { name: "vector", argTypes: [valueType.scalar], variadic: 0, returnType: valueType.vector },
+ year: { name: "year", argTypes: [valueType.vector], variadic: 1, returnType: valueType.vector },
};
diff --git a/web/ui/mantine-ui/src/promql/serialize.ts b/web/ui/mantine-ui/src/promql/serialize.ts
index bbccede708..584e1ae9ff 100644
--- a/web/ui/mantine-ui/src/promql/serialize.ts
+++ b/web/ui/mantine-ui/src/promql/serialize.ts
@@ -136,11 +136,14 @@ const serializeNode = (
let matching = "";
let grouping = "";
const vm = node.matching;
- if (vm !== null && (vm.labels.length > 0 || vm.on)) {
- if (vm.on) {
- matching = ` on(${labelNameList(vm.labels)})`;
- } else {
- matching = ` ignoring(${labelNameList(vm.labels)})`;
+ if (vm !== null) {
+ if (
+ vm.labels.length > 0 ||
+ vm.on ||
+ vm.card === vectorMatchCardinality.manyToOne ||
+ vm.card === vectorMatchCardinality.oneToMany
+ ) {
+ matching = ` ${vm.on ? "on" : "ignoring"}(${labelNameList(vm.labels)})`;
}
if (
diff --git a/web/ui/mantine-ui/src/promql/serializeAndFormat.test.ts b/web/ui/mantine-ui/src/promql/serializeAndFormat.test.ts
index 62b10cd781..a3734d311f 100644
--- a/web/ui/mantine-ui/src/promql/serializeAndFormat.test.ts
+++ b/web/ui/mantine-ui/src/promql/serializeAndFormat.test.ts
@@ -192,8 +192,7 @@ describe("serializeNode and formatNode", () => {
anchored: false,
smoothed: false,
},
- output:
- '{label1="value1"}',
+ output: '{label1="value1"}',
},
// Anchored and smoothed modifiers.
@@ -722,6 +721,46 @@ describe("serializeNode and formatNode", () => {
output: "… + ignoring(label1, label2) …",
prettyOutput: ` …
+ ignoring(label1, label2)
+ …`,
+ },
+ {
+ // Empty ignoring() without group modifiers can be stripped away.
+ node: {
+ type: nodeType.binaryExpr,
+ op: binaryOperatorType.add,
+ lhs: { type: nodeType.placeholder, children: [] },
+ rhs: { type: nodeType.placeholder, children: [] },
+ matching: {
+ card: vectorMatchCardinality.oneToOne,
+ labels: [],
+ on: false,
+ include: [],
+ },
+ bool: false,
+ },
+ output: "… + …",
+ prettyOutput: ` …
++
+ …`,
+ },
+ {
+ // Empty ignoring() with group modifiers may not be stripped away.
+ node: {
+ type: nodeType.binaryExpr,
+ op: binaryOperatorType.add,
+ lhs: { type: nodeType.placeholder, children: [] },
+ rhs: { type: nodeType.placeholder, children: [] },
+ matching: {
+ card: vectorMatchCardinality.manyToOne,
+ labels: [],
+ on: false,
+ include: ["__name__"],
+ },
+ bool: false,
+ },
+ output: "… + ignoring() group_left(__name__) …",
+ prettyOutput: ` …
++ ignoring() group_left(__name__)
…`,
},
{
diff --git a/web/ui/mantine-ui/src/promql/tools/gen_functions_docs/main.go b/web/ui/mantine-ui/src/promql/tools/gen_functions_docs/main.go
index 89545c1e5e..74e8ac0354 100644
--- a/web/ui/mantine-ui/src/promql/tools/gen_functions_docs/main.go
+++ b/web/ui/mantine-ui/src/promql/tools/gen_functions_docs/main.go
@@ -1,4 +1,4 @@
-// Copyright 2022 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -18,7 +18,7 @@ import (
"fmt"
"io"
"log"
- "net/http"
+ "os"
"sort"
"strings"
@@ -26,20 +26,23 @@ import (
"github.com/russross/blackfriday/v2"
)
-var funcDocsRe = regexp.MustCompile("^## `(.+)\\(\\)`\n$|^## (Trigonometric Functions)\n$")
+var funcDocsRe = regexp.MustCompile("^## `([^)]+)\\(\\)` and `([^)]+)\\(\\)`\n$|^## `(.+)\\(\\)`\n$|^## (Trigonometric Functions)\n$")
func main() {
- resp, err := http.Get("https://raw.githubusercontent.com/prometheus/prometheus/master/docs/querying/functions.md")
+ // Read from local file instead of fetching from upstream.
+ if len(os.Args) < 2 {
+ log.Fatalln("Usage: gen_functions_docs ")
+ }
+ functionsPath := os.Args[1]
+ file, err := os.Open(functionsPath)
if err != nil {
- log.Fatalln("Failed to fetch function docs:", err)
- }
- if resp.StatusCode != 200 {
- log.Fatalln("Bad status code while fetching function docs:", resp.Status)
+ log.Fatalln("Failed to open function docs:", err)
}
+ defer file.Close()
funcDocs := map[string]string{}
- r := bufio.NewReader(resp.Body)
+ r := bufio.NewReader(file)
currentFunc := ""
currentDocs := ""
@@ -58,6 +61,11 @@ func main() {
"last_over_time",
"present_over_time",
"mad_over_time",
+ "first_over_time",
+ "ts_of_first_over_time",
+ "ts_of_last_over_time",
+ "ts_of_max_over_time",
+ "ts_of_min_over_time",
} {
funcDocs[fn] = currentDocs
}
@@ -81,6 +89,12 @@ func main() {
} {
funcDocs[fn] = currentDocs
}
+ case "histogram_count_and_histogram_sum":
+ funcDocs["histogram_count"] = currentDocs
+ funcDocs["histogram_sum"] = currentDocs
+ case "histogram_stddev_and_histogram_stdvar":
+ funcDocs["histogram_stddev"] = currentDocs
+ funcDocs["histogram_stdvar"] = currentDocs
default:
funcDocs[currentFunc] = currentDocs
}
@@ -103,10 +117,16 @@ func main() {
}
currentDocs = ""
- currentFunc = string(matches[1])
- if matches[2] != "" {
- // This is the case for "## Trigonometric Functions"
- currentFunc = matches[2]
+ if matches[1] != "" && matches[2] != "" {
+ // Combined functions: "## `function1()` and `function2()`"
+ // Store as "function1_and_function2" and handle in saveCurrent.
+ currentFunc = matches[1] + "_and_" + matches[2]
+ } else if matches[3] != "" {
+ // Single function: "## `function_name()`"
+ currentFunc = string(matches[3])
+ } else if matches[4] != "" {
+ // Special section: "## Trigonometric Functions"
+ currentFunc = matches[4]
}
} else {
currentDocs += line
diff --git a/web/ui/mantine-ui/src/promql/tools/gen_functions_list/main.go b/web/ui/mantine-ui/src/promql/tools/gen_functions_list/main.go
index f479b6d36a..6b77f368c8 100644
--- a/web/ui/mantine-ui/src/promql/tools/gen_functions_list/main.go
+++ b/web/ui/mantine-ui/src/promql/tools/gen_functions_list/main.go
@@ -1,4 +1,4 @@
-// Copyright 2022 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -41,10 +41,10 @@ func main() {
sort.Strings(fnNames)
fmt.Println(`import { valueType, Func } from './ast';
- export const functionSignatures: Record = {`)
+export const functionSignatures: Record = {`)
for _, fnName := range fnNames {
fn := parser.Functions[fnName]
fmt.Printf(" %s: { name: '%s', argTypes: [%s], variadic: %d, returnType: %s },\n", fn.Name, fn.Name, formatValueTypes(fn.ArgTypes), fn.Variadic, formatValueType(fn.ReturnType))
}
- fmt.Println("}")
+ fmt.Println("};")
}
diff --git a/web/ui/mantine-ui/src/promql/tools/go.mod b/web/ui/mantine-ui/src/promql/tools/go.mod
index 6983cf4fe6..32b64019e9 100644
--- a/web/ui/mantine-ui/src/promql/tools/go.mod
+++ b/web/ui/mantine-ui/src/promql/tools/go.mod
@@ -1,10 +1,10 @@
module github.com/prometheus/prometheus/web/ui/mantine-ui/src/promql/tools
-go 1.24.0
+go 1.24.9
require (
- github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc
- github.com/prometheus/prometheus v0.54.1
+ github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853
+ github.com/prometheus/prometheus v0.308.1
github.com/russross/blackfriday/v2 v2.1.0
)
@@ -12,15 +12,15 @@ require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/dennwc/varint v1.0.0 // indirect
- github.com/go-kit/log v0.2.1 // indirect
- github.com/go-logfmt/logfmt v0.6.0 // indirect
+ github.com/kr/text v0.2.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
- github.com/prometheus/client_golang v1.19.1 // indirect
- github.com/prometheus/client_model v0.6.1 // indirect
- github.com/prometheus/common v0.55.0 // indirect
- github.com/prometheus/procfs v0.15.1 // indirect
+ github.com/prometheus/client_golang v1.23.2 // indirect
+ github.com/prometheus/client_model v0.6.2 // indirect
+ github.com/prometheus/common v0.67.4 // indirect
+ github.com/prometheus/procfs v0.16.1 // indirect
go.uber.org/atomic v1.11.0 // indirect
- golang.org/x/sys v0.22.0 // indirect
- golang.org/x/text v0.16.0 // indirect
- google.golang.org/protobuf v1.34.2 // indirect
+ go.yaml.in/yaml/v2 v2.4.3 // indirect
+ golang.org/x/sys v0.37.0 // indirect
+ golang.org/x/text v0.30.0 // indirect
+ google.golang.org/protobuf v1.36.10 // indirect
)
diff --git a/web/ui/mantine-ui/src/promql/tools/go.sum b/web/ui/mantine-ui/src/promql/tools/go.sum
index e7ed7cec79..40c792d93d 100644
--- a/web/ui/mantine-ui/src/promql/tools/go.sum
+++ b/web/ui/mantine-ui/src/promql/tools/go.sum
@@ -1,47 +1,86 @@
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 h1:GJHeeA2N7xrG3q30L2UXDyuWRzDM900/65j70wcM4Ww=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0=
-github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc=
-github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg=
-github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY=
-github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY=
-github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU=
-github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
-github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 h1:t3eaIm0rUkzbrIewtiFmMK5RXHej2XnoXNhxVsAYUfg=
-github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs=
-github.com/aws/aws-sdk-go v1.54.19 h1:tyWV+07jagrNiCcGRzRhdtVjQs7Vy41NwsuOcl0IbVI=
-github.com/aws/aws-sdk-go v1.54.19/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
+cloud.google.com/go/auth v0.17.0 h1:74yCm7hCj2rUyyAocqnFzsAYXgJhrG26XCFimrc/Kz4=
+cloud.google.com/go/auth v0.17.0/go.mod h1:6wv/t5/6rOPAX4fJiRjKkJCvswLwdet7G8+UGXt7nCQ=
+cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc=
+cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c=
+cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs=
+cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1 h1:5YTBM8QDVIBN3sxBil89WfdAAqDZbyJTgh688DSxX5w=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.12.0 h1:wL5IEG5zb7BVv1Kv0Xm92orq+5hB5Nipn3B5tn4Rqfk=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.12.0/go.mod h1:J7MUC/wtRpfGVbQ5sIItY5/FuVWmvzlY21WAOfQnq/I=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 h1:XkkQbfMyuH2jTSjQjSoihryI8GINRcs4xp8lNawg0FI=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk=
+github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b h1:mimo19zliBX/vSQ6PWWSL9lK8qwHozUj03+zLoEB8O0=
+github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs=
+github.com/aws/aws-sdk-go-v2 v1.39.6 h1:2JrPCVgWJm7bm83BDwY5z8ietmeJUbh3O2ACnn+Xsqk=
+github.com/aws/aws-sdk-go-v2 v1.39.6/go.mod h1:c9pm7VwuW0UPxAEYGyTmyurVcNrbF6Rt/wixFqDhcjE=
+github.com/aws/aws-sdk-go-v2/config v1.31.17 h1:QFl8lL6RgakNK86vusim14P2k8BFSxjvUkcWLDjgz9Y=
+github.com/aws/aws-sdk-go-v2/config v1.31.17/go.mod h1:V8P7ILjp/Uef/aX8TjGk6OHZN6IKPM5YW6S78QnRD5c=
+github.com/aws/aws-sdk-go-v2/credentials v1.18.21 h1:56HGpsgnmD+2/KpG0ikvvR8+3v3COCwaF4r+oWwOeNA=
+github.com/aws/aws-sdk-go-v2/credentials v1.18.21/go.mod h1:3YELwedmQbw7cXNaII2Wywd+YY58AmLPwX4LzARgmmA=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.13 h1:T1brd5dR3/fzNFAQch/iBKeX07/ffu/cLu+q+RuzEWk=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.13/go.mod h1:Peg/GBAQ6JDt+RoBf4meB1wylmAipb7Kg2ZFakZTlwk=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.13 h1:a+8/MLcWlIxo1lF9xaGt3J/u3yOZx+CdSveSNwjhD40=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.13/go.mod h1:oGnKwIYZ4XttyU2JWxFrwvhF6YKiK/9/wmE3v3Iu9K8=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.13 h1:HBSI2kDkMdWz4ZM7FjwE7e/pWDEZ+nR95x8Ztet1ooY=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.13/go.mod h1:YE94ZoDArI7awZqJzBAZ3PDD2zSfuP7w6P2knOzIn8M=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 h1:x2Ibm/Af8Fi+BH+Hsn9TXGdT+hKbDd5XOTZxTMxDk7o=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3/go.mod h1:IW1jwyrQgMdhisceG8fQLmQIydcT/jWY21rFhzgaKwo=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13 h1:kDqdFvMY4AtKoACfzIGD8A0+hbT41KTKF//gq7jITfM=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13/go.mod h1:lmKuogqSU3HzQCwZ9ZtcqOc5XGMqtDK7OIc2+DxiUEg=
+github.com/aws/aws-sdk-go-v2/service/sso v1.30.1 h1:0JPwLz1J+5lEOfy/g0SURC9cxhbQ1lIMHMa+AHZSzz0=
+github.com/aws/aws-sdk-go-v2/service/sso v1.30.1/go.mod h1:fKvyjJcz63iL/ftA6RaM8sRCtN4r4zl4tjL3qw5ec7k=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.5 h1:OWs0/j2UYR5LOGi88sD5/lhN6TDLG6SfA7CqsQO9zF0=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.5/go.mod h1:klO+ejMvYsB4QATfEOIXk8WAEwN4N0aBfJpvC+5SZBo=
+github.com/aws/aws-sdk-go-v2/service/sts v1.39.1 h1:mLlUgHn02ue8whiR4BmxxGJLR2gwU6s6ZzJ5wDamBUs=
+github.com/aws/aws-sdk-go-v2/service/sts v1.39.1/go.mod h1:E19xDjpzPZC7LS2knI9E6BaRFDK43Eul7vd6rSq2HWk=
+github.com/aws/smithy-go v1.23.2 h1:Crv0eatJUQhaManss33hS5r40CG3ZFH+21XSkqMrIUM=
+github.com/aws/smithy-go v1.23.2/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
-github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=
-github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
-github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4=
-github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
-github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
-github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
-github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
-github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
-github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
-github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
-github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
+github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
+github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
+github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
+github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
+github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs=
+github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
+github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
+github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0=
+github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248=
-github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=
-github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
-github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4=
+github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA=
+github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo=
+github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc=
+github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853 h1:cLN4IBkmkYZNnk7EAJ0BHIethd+J6LqxFNw5mSiI2bM=
+github.com/grafana/regexp v0.0.0-20250905093917-f7b3be9d1853/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
-github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
-github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
+github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk=
+github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
@@ -49,59 +88,85 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
-github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
+github.com/oklog/ulid/v2 v2.1.1 h1:suPZ4ARWLOJLegGFiZZ1dFAkqzhMjL3J1TzI+5wHz8s=
+github.com/oklog/ulid/v2 v2.1.1/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
-github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
-github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
-github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
-github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
-github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
-github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
-github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
-github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
-github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
-github.com/prometheus/prometheus v0.54.1 h1:vKuwQNjnYN2/mDoWfHXDhAsz/68q/dQDb+YbcEqU7MQ=
-github.com/prometheus/prometheus v0.54.1/go.mod h1:xlLByHhk2g3ycakQGrMaU8K7OySZx98BzeCR99991NY=
+github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
+github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
+github.com/prometheus/client_golang/exp v0.0.0-20251212205219-7ba246a648ca h1:BOxmsLoL2ymn8lXJtorca7N/m+2vDQUDoEtPjf0iAxA=
+github.com/prometheus/client_golang/exp v0.0.0-20251212205219-7ba246a648ca/go.mod h1:gndBHh3ZdjBozGcGrjUYjN3UJLRS3l2drALtu4lUt+k=
+github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
+github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
+github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc=
+github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI=
+github.com/prometheus/otlptranslator v1.0.0 h1:s0LJW/iN9dkIH+EnhiD3BlkkP5QVIUVEoIwkU+A6qos=
+github.com/prometheus/otlptranslator v1.0.0/go.mod h1:vRYWnXvI6aWGpsdY/mOT/cbeVRBlPWtBNDb7kGR3uKM=
+github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
+github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
+github.com/prometheus/prometheus v0.308.1 h1:ApMNI/3/es3Ze90Z7CMb+wwU2BsSYur0m5VKeqHj7h4=
+github.com/prometheus/prometheus v0.308.1/go.mod h1:aHjYCDz9zKRyoUXvMWvu13K9XHOkBB12XrEqibs3e0A=
+github.com/prometheus/sigv4 v0.3.0 h1:QIG7nTbu0JTnNidGI1Uwl5AGVIChWUACxn2B/BQ1kms=
+github.com/prometheus/sigv4 v0.3.0/go.mod h1:fKtFYDus2M43CWKMNtGvFNHGXnAJJEGZbiYCmVp/F8I=
+github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
+github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
-github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
+github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
+go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
+go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg=
+go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
+go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
+go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
+go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
+go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
+go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
-golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30=
-golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M=
-golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA=
-golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
-golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys=
-golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE=
-golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
-golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
-golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
-golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
-golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
-golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
-golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
-golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
-golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
-google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
-google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
-go.yaml.in/yaml/v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
-go.yaml.in/yaml/v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
+go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
+golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04=
+golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0=
+golang.org/x/exp v0.0.0-20250808145144-a408d31f581a h1:Y+7uR/b1Mw2iSXZ3G//1haIiSElDQZ8KWh0h+sZPG90=
+golang.org/x/exp v0.0.0-20250808145144-a408d31f581a/go.mod h1:rT6SFzZ7oxADUDx58pcaKFTcZ+inxAa9fTrYx/uVYwg=
+golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4=
+golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210=
+golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY=
+golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
+golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
+golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
+golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
+golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
+golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
+golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
+golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI=
+golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
+google.golang.org/api v0.252.0 h1:xfKJeAJaMwb8OC9fesr369rjciQ704AjU/psjkKURSI=
+google.golang.org/api v0.252.0/go.mod h1:dnHOv81x5RAmumZ7BWLShB/u7JZNeyalImxHmtTHxqw=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20251002232023-7c0ddcbb5797 h1:CirRxTOwnRWVLKzDNrs0CXAaVozJoR4G9xvdRecrdpk=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20251002232023-7c0ddcbb5797/go.mod h1:HSkG/KdJWusxU1F6CNrwNDjBMgisKxGnc5dAZfT0mjQ=
+google.golang.org/grpc v1.76.0 h1:UnVkv1+uMLYXoIz6o7chp59WfQUYA2ex/BXQ9rHZu7A=
+google.golang.org/grpc v1.76.0/go.mod h1:Ju12QI8M6iQJtbcsV+awF5a4hfJMLi4X0JLo94ULZ6c=
+google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
+google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU=
-k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU=
-k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg=
-k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0=
+k8s.io/apimachinery v0.34.1 h1:dTlxFls/eikpJxmAC7MVE8oOeP1zryV7iRyIjB0gky4=
+k8s.io/apimachinery v0.34.1/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=
+k8s.io/client-go v0.34.1 h1:ZUPJKgXsnKwVwmKKdPfw4tB58+7/Ik3CrjOEhsiZ7mY=
+k8s.io/client-go v0.34.1/go.mod h1:kA8v0FP+tk6sZA0yKLRG67LWjqufAoSHA2xVGKw9Of8=
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
-k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI=
-k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y=
+k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
diff --git a/web/ui/mantine-ui/src/state/queryPageSlice.ts b/web/ui/mantine-ui/src/state/queryPageSlice.ts
index 4cf483e2b6..7a4f7b257a 100644
--- a/web/ui/mantine-ui/src/state/queryPageSlice.ts
+++ b/web/ui/mantine-ui/src/state/queryPageSlice.ts
@@ -115,6 +115,19 @@ export const queryPageSlice = createSlice({
state.panels.push(newDefaultPanel());
updateURL(state.panels);
},
+ duplicatePanel: (
+ state,
+ { payload }: PayloadAction<{ idx: number; expr: string }>
+ ) => {
+ const newPanel = {
+ ...state.panels[payload.idx],
+ id: randomId(),
+ expr: payload.expr,
+ };
+ // Insert the duplicated panel just below the original panel.
+ state.panels.splice(payload.idx + 1, 0, newPanel);
+ updateURL(state.panels);
+ },
removePanel: (state, { payload }: PayloadAction) => {
state.panels.splice(payload, 1);
updateURL(state.panels);
@@ -153,6 +166,7 @@ export const {
setPanels,
addPanel,
removePanel,
+ duplicatePanel,
setExpr,
addQueryToHistory,
setShowTree,
diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json
index f850342728..06b75f735c 100644
--- a/web/ui/module/codemirror-promql/package.json
+++ b/web/ui/module/codemirror-promql/package.json
@@ -1,6 +1,6 @@
{
"name": "@prometheus-io/codemirror-promql",
- "version": "0.307.3",
+ "version": "0.309.1",
"description": "a CodeMirror mode for the PromQL language",
"types": "dist/esm/index.d.ts",
"module": "dist/esm/index.js",
@@ -29,7 +29,7 @@
},
"homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md",
"dependencies": {
- "@prometheus-io/lezer-promql": "0.307.3",
+ "@prometheus-io/lezer-promql": "0.309.1",
"lru-cache": "^11.2.2"
},
"devDependencies": {
diff --git a/web/ui/module/codemirror-promql/src/client/prometheus.test.ts b/web/ui/module/codemirror-promql/src/client/prometheus.test.ts
new file mode 100644
index 0000000000..c872edbb69
--- /dev/null
+++ b/web/ui/module/codemirror-promql/src/client/prometheus.test.ts
@@ -0,0 +1,97 @@
+// Copyright 2025 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import { HTTPPrometheusClient, CachedPrometheusClient } from './prometheus';
+
+describe('HTTPPrometheusClient destroy', () => {
+ it('should be safe to call destroy multiple times', () => {
+ const client = new HTTPPrometheusClient({ url: 'http://localhost:8080' });
+ // First call
+ client.destroy();
+ // Second call should not throw
+ expect(() => client.destroy()).not.toThrow();
+ });
+
+ it('should abort in-flight requests when destroy is called', async () => {
+ let abortSignal: AbortSignal | null | undefined;
+
+ const mockFetch = (_url: RequestInfo, init?: RequestInit): Promise => {
+ abortSignal = init?.signal;
+ // Return a promise that never resolves to simulate an in-flight request
+ return new Promise(() => {});
+ };
+
+ const client = new HTTPPrometheusClient({
+ url: 'http://localhost:8080',
+ fetchFn: mockFetch,
+ });
+
+ // Start a request (don't await it)
+ client.labelNames();
+
+ // Verify the signal was captured and not aborted yet
+ expect(abortSignal).toBeDefined();
+ expect(abortSignal?.aborted).toBe(false);
+
+ // Destroy the client
+ client.destroy();
+
+ // Verify the request was aborted
+ expect(abortSignal?.aborted).toBe(true);
+ });
+});
+
+describe('CachedPrometheusClient destroy', () => {
+ it('should be safe to call destroy multiple times', () => {
+ const httpClient = new HTTPPrometheusClient({ url: 'http://localhost:8080' });
+ const cachedClient = new CachedPrometheusClient(httpClient);
+
+ // First call
+ cachedClient.destroy();
+ // Second call should not throw
+ expect(() => cachedClient.destroy()).not.toThrow();
+ });
+
+ it('should call destroy on the underlying HTTPPrometheusClient', () => {
+ const httpClient = new HTTPPrometheusClient({ url: 'http://localhost:8080' });
+
+ let destroyCalled = false;
+ const originalDestroy = httpClient.destroy.bind(httpClient);
+ httpClient.destroy = () => {
+ destroyCalled = true;
+ originalDestroy();
+ };
+
+ const cachedClient = new CachedPrometheusClient(httpClient);
+ cachedClient.destroy();
+
+ expect(destroyCalled).toBe(true);
+ });
+
+ it('should handle underlying clients without destroy method', () => {
+ // Create a minimal PrometheusClient without destroy
+ const minimalClient = {
+ labelNames: () => Promise.resolve([]),
+ labelValues: () => Promise.resolve([]),
+ metricMetadata: () => Promise.resolve({}),
+ series: () => Promise.resolve([]),
+ metricNames: () => Promise.resolve([]),
+ flags: () => Promise.resolve({}),
+ };
+
+ const cachedClient = new CachedPrometheusClient(minimalClient);
+
+ // Should not throw even though underlying client has no destroy
+ expect(() => cachedClient.destroy()).not.toThrow();
+ });
+});
diff --git a/web/ui/module/codemirror-promql/src/client/prometheus.ts b/web/ui/module/codemirror-promql/src/client/prometheus.ts
index 165549ac82..91de148f3c 100644
--- a/web/ui/module/codemirror-promql/src/client/prometheus.ts
+++ b/web/ui/module/codemirror-promql/src/client/prometheus.ts
@@ -39,6 +39,9 @@ export interface PrometheusClient {
// flags returns flag values that prometheus was configured with.
flags(): Promise>;
+
+ // destroy is called to release all resources held by this client
+ destroy?(): void;
}
export interface CacheConfig {
@@ -88,6 +91,7 @@ export class HTTPPrometheusClient implements PrometheusClient {
// when calling it, thus the indirection via another function wrapper.
private readonly fetchFn: FetchFn = (input: RequestInfo, init?: RequestInit): Promise => fetch(input, init);
private requestHeaders: Headers = new Headers();
+ private readonly abortControllers: Set = new Set();
constructor(config: PrometheusConfig) {
this.url = config.url ? config.url : '';
@@ -199,11 +203,22 @@ export class HTTPPrometheusClient implements PrometheusClient {
});
}
+ destroy(): void {
+ for (const controller of this.abortControllers) {
+ controller.abort();
+ }
+ this.abortControllers.clear();
+ }
+
private fetchAPI(resource: string, init?: RequestInit): Promise {
+ const controller = new AbortController();
+ this.abortControllers.add(controller);
+
if (init) {
init.headers = this.requestHeaders;
+ init.signal = controller.signal;
} else {
- init = { headers: this.requestHeaders };
+ init = { headers: this.requestHeaders, signal: controller.signal };
}
return this.fetchFn(this.url + resource, init)
.then((res) => {
@@ -221,6 +236,9 @@ export class HTTPPrometheusClient implements PrometheusClient {
throw new Error('missing "data" field in response JSON');
}
return apiRes.data;
+ })
+ .finally(() => {
+ this.abortControllers.delete(controller);
});
}
@@ -448,4 +466,8 @@ export class CachedPrometheusClient implements PrometheusClient {
return flags;
});
}
+
+ destroy(): void {
+ this.client.destroy?.();
+ }
}
diff --git a/web/ui/module/codemirror-promql/src/complete/hybrid.test.ts b/web/ui/module/codemirror-promql/src/complete/hybrid.test.ts
index 526a5ce4f8..facda35ac8 100644
--- a/web/ui/module/codemirror-promql/src/complete/hybrid.test.ts
+++ b/web/ui/module/codemirror-promql/src/complete/hybrid.test.ts
@@ -11,7 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-import { analyzeCompletion, computeStartCompletePosition, ContextKind } from './hybrid';
+import { analyzeCompletion, computeStartCompletePosition, computeEndCompletePosition, ContextKind, durationWithUnitRegexp } from './hybrid';
import { createEditorState, mockedMetricsTerms, mockPrometheusServer } from '../test/utils-test';
import { Completion, CompletionContext } from '@codemirror/autocomplete';
import {
@@ -29,6 +29,7 @@ import {
import { EqlSingle, Neq } from '@prometheus-io/lezer-promql';
import { syntaxTree } from '@codemirror/language';
import { newCompleteStrategy } from './index';
+import nock from 'nock';
describe('analyzeCompletion test', () => {
const testCases = [
@@ -559,6 +560,18 @@ describe('analyzeCompletion test', () => {
pos: 28,
expectedContext: [{ kind: ContextKind.Duration }],
},
+ {
+ title: 'do not autocomplete duration when unit already present in matrixSelector',
+ expr: 'rate(foo[5m])',
+ pos: 10,
+ expectedContext: [],
+ },
+ {
+ title: 'do not autocomplete duration when multi char unit already present in matrixSelector',
+ expr: 'rate(foo[5ms])',
+ pos: 10,
+ expectedContext: [],
+ },
{
title: 'autocomplete duration for a subQuery',
expr: 'go[5d:5]',
@@ -625,7 +638,43 @@ describe('analyzeCompletion test', () => {
const state = createEditorState(value.expr);
const node = syntaxTree(state).resolve(value.pos, -1);
const result = analyzeCompletion(state, node, value.pos);
- expect(value.expectedContext).toEqual(result);
+ expect(result).toEqual(value.expectedContext);
+ });
+ });
+});
+
+describe('durationWithUnitRegexp test', () => {
+ it('should match complete durations with units', () => {
+ const testCases = [
+ { input: '5m', expected: true },
+ { input: '30s', expected: true },
+ { input: '1h', expected: true },
+ { input: '500ms', expected: true },
+ { input: '2d', expected: true },
+ { input: '1w', expected: true },
+ { input: '1y', expected: true },
+ { input: '1d2h', expected: true },
+ { input: '2h30m', expected: true },
+ { input: '1h2m3s', expected: true },
+ { input: '250ms2s', expected: true },
+ { input: '2h3m4s5ms', expected: true },
+ { input: '5', expected: false },
+ { input: '5m5', expected: false },
+ { input: 'm', expected: false },
+ { input: 'd', expected: false },
+ { input: '', expected: false },
+ { input: '1hms', expected: false },
+ { input: '2x', expected: false },
+ ];
+ testCases.forEach(({ input, expected }) => {
+ expect(durationWithUnitRegexp.test(input)).toBe(expected);
+ });
+ });
+
+ it('should not match durations without units or partial units', () => {
+ const testCases = ['5', '30', '100', '5m5', 'm', 'd'];
+ testCases.forEach((input) => {
+ expect(durationWithUnitRegexp.test(input)).toBe(false);
});
});
});
@@ -812,7 +861,146 @@ describe('computeStartCompletePosition test', () => {
const state = createEditorState(value.expr);
const node = syntaxTree(state).resolve(value.pos, -1);
const result = computeStartCompletePosition(state, node, value.pos);
- expect(value.expectedStart).toEqual(result);
+ expect(result).toEqual(value.expectedStart);
+ });
+ });
+});
+
+describe('computeEndCompletePosition test', () => {
+ const testCases = [
+ {
+ title: 'cursor at end of metric name',
+ expr: 'metric_name',
+ pos: 11, // cursor is at the end
+ expectedEnd: 11,
+ },
+ {
+ title: 'cursor in middle of metric name - should extend to end',
+ expr: 'coredns_cache_hits_total',
+ pos: 14, // cursor is after 'coredns_cache_' (before 'hits')
+ expectedEnd: 24, // should extend to end of 'coredns_cache_hits_total'
+ },
+ {
+ title: 'cursor in middle of metric name inside rate() - should extend to end',
+ expr: 'rate(coredns_cache_hits_total[2m])',
+ pos: 19, // cursor is after 'coredns_cache_' (before 'hits')
+ expectedEnd: 29, // should extend to end of 'coredns_cache_hits_total'
+ },
+ {
+ title: 'cursor in middle of metric name inside sum(rate()) - should extend to end',
+ expr: 'sum(rate(coredns_cache_hits_total[2m]))',
+ pos: 24, // cursor is after 'coredns_cache_' (before 'hits')
+ expectedEnd: 33, // should extend to end of 'coredns_cache_hits_total'
+ },
+ {
+ title: 'cursor at beginning of metric name - should extend to end',
+ expr: 'metric_name',
+ pos: 1, // cursor after 'm'
+ expectedEnd: 11,
+ },
+ {
+ title: 'cursor in middle of incomplete function name - should extend to end',
+ expr: 'sum_ov',
+ pos: 4, // cursor after 'sum_' (before 'ov')
+ expectedEnd: 6, // should extend to end of 'sum_ov'
+ },
+ {
+ title: 'cursor in middle of incomplete function name within aggregator - should extend to end',
+ expr: 'sum(sum_ov(foo[5m]))',
+ pos: 8, // cursor after 'sum_' (before 'ov')
+ expectedEnd: 10, // should extend to end of 'sum_ov'
+ },
+ {
+ title: 'empty bracket - ends before the closing bracket',
+ expr: '{}',
+ pos: 1,
+ expectedEnd: 1,
+ },
+ {
+ title: 'cursor in label matchers - ends before the closing bracket',
+ expr: 'metric_name{label="value"}',
+ pos: 12, // cursor after '{'
+ expectedEnd: 25,
+ },
+ {
+ title: 'cursor in middle of label name in grouping clause - should extend to end',
+ expr: 'sum by (instance_name)',
+ pos: 12, // cursor after 'inst' (before 'ance')
+ expectedEnd: 21, // should extend to end of 'instance_name'
+ },
+ {
+ title: 'cursor in middle of label name in label matcher - should extend to end',
+ expr: 'metric{instance_name="value"}',
+ pos: 11, // cursor after 'inst' (before 'ance')
+ expectedEnd: 20, // should extend to end of 'instance_name'
+ },
+ {
+ title: 'cursor in middle of label name in on() modifier - should extend to end',
+ expr: 'a / on(instance_name) b',
+ pos: 11, // cursor after 'inst' (before 'ance')
+ expectedEnd: 20, // should extend to end of 'instance_name'
+ },
+ {
+ title: 'cursor in middle of label name in ignoring() modifier - should extend to end',
+ expr: 'a / ignoring(instance_name) b',
+ pos: 17, // cursor after 'inst' (before 'ance')
+ expectedEnd: 26, // should extend to end of 'instance_name'
+ },
+ {
+ title: 'cursor in middle of function name rate - should extend to end',
+ expr: 'rate(foo[5m])',
+ pos: 2, // cursor after 'ra' (before 'te')
+ expectedEnd: 4, // should extend to end of 'rate'
+ },
+ {
+ title: 'cursor in middle of function name histogram_quantile - should extend to end',
+ expr: 'histogram_quantile(0.9, rate(foo[5m]))',
+ pos: 10, // cursor after 'histogram_' (before 'quantile')
+ expectedEnd: 18, // should extend to end of 'histogram_quantile'
+ },
+ {
+ title: 'cursor in middle of aggregator sum - should extend to end',
+ expr: 'sum(rate(foo[5m]))',
+ pos: 2, // cursor after 'su' (before 'm')
+ expectedEnd: 3, // should extend to end of 'sum'
+ },
+ {
+ title: 'cursor in middle of aggregator count_values - should extend to end',
+ expr: 'count_values("label", foo)',
+ pos: 6, // cursor after 'count_' (before 'values')
+ expectedEnd: 12, // should extend to end of 'count_values'
+ },
+ {
+ title: 'cursor in middle of nested function - should extend to end',
+ expr: 'sum(rate(foo[5m]))',
+ pos: 6, // cursor after 'ra' inside rate (before 'te')
+ expectedEnd: 8, // should extend to end of 'rate'
+ },
+ {
+ title: 'cursor at beginning of aggregator - should extend to end',
+ expr: 'avg by (instance) (rate(foo[5m]))',
+ pos: 1, // cursor after 'a' (before 'vg')
+ expectedEnd: 3, // should extend to end of 'avg'
+ },
+ {
+ title: 'cursor in middle of function name with binary op - should extend to end',
+ expr: 'rate(foo[5m]) / irate(bar[5m])',
+ pos: 17, // cursor after 'ir' inside irate (before 'ate')
+ expectedEnd: 21, // should extend to end of 'irate'
+ },
+ {
+ title: 'error node - returns pos (cursor position)',
+ expr: 'metric_name !',
+ pos: 13, // cursor at '!' (error node)
+ expectedEnd: 13, // error node returns pos
+ },
+ ];
+ testCases.forEach((value) => {
+ it(value.title, () => {
+ const state = createEditorState(value.expr);
+ const node = syntaxTree(state).resolve(value.pos, -1);
+ const result = computeEndCompletePosition(node, value.pos);
+ expect(result).toEqual(value.expectedEnd);
});
});
});
@@ -866,6 +1054,28 @@ describe('autocomplete promQL test', () => {
validFor: /^[a-zA-Z0-9_:]+$/,
},
},
+ {
+ title: 'cursor in middle of metric name - to should extend to end (issue #15839)',
+ expr: 'sum(coredns_cache_hits_total)',
+ pos: 18, // cursor is after 'coredns_cache_' (before 'hits')
+ expectedResult: {
+ options: ([] as Completion[]).concat(functionIdentifierTerms, aggregateOpTerms, snippets),
+ from: 4,
+ to: 28, // should extend to end of 'coredns_cache_hits_total'
+ validFor: /^[a-zA-Z0-9_:]+$/,
+ },
+ },
+ {
+ title: 'cursor in middle of metric name inside rate() - to should extend to end (issue #15839)',
+ expr: 'rate(coredns_cache_hits_total[2m])',
+ pos: 19, // cursor is after 'coredns_cache_' (before 'hits')
+ expectedResult: {
+ options: ([] as Completion[]).concat(functionIdentifierTerms, aggregateOpTerms, snippets),
+ from: 5,
+ to: 29, // should extend to end of 'coredns_cache_hits_total'
+ validFor: /^[a-zA-Z0-9_:]+$/,
+ },
+ },
{
title: 'offline function/aggregation autocompletion in aggregation 3',
expr: 'sum(rate())',
@@ -1229,6 +1439,28 @@ describe('autocomplete promQL test', () => {
validFor: undefined,
},
},
+ {
+ title: 'offline do not autocomplete duration when unit already present in matrixSelector',
+ expr: 'rate(foo[5m])',
+ pos: 10,
+ expectedResult: {
+ options: [],
+ from: 10,
+ to: 11,
+ validFor: /^[a-zA-Z0-9_:]+$/,
+ },
+ },
+ {
+ title: 'offline do not autocomplete duration when multi char unit already present in matrixSelector',
+ expr: 'rate(foo[5ms])',
+ pos: 10,
+ expectedResult: {
+ options: [],
+ from: 10,
+ to: 12,
+ validFor: /^[a-zA-Z0-9_:]+$/,
+ },
+ },
{
title: 'offline autocomplete duration for a subQuery',
expr: 'go[5d:5]',
@@ -1380,7 +1612,39 @@ describe('autocomplete promQL test', () => {
const context = new CompletionContext(state, value.pos, true);
const completion = newCompleteStrategy(value.conf);
const result = await completion.promQL(context);
- expect(value.expectedResult).toEqual(result);
+ expect(result).toEqual(value.expectedResult);
});
});
+
+ it('online autocomplete of openmetrics counter', async () => {
+ const metricName = 'direct_notifications_total';
+ const baseMetricName = 'direct_notifications';
+ nock('http://localhost:8080')
+ .get('/api/v1/label/__name__/values')
+ .query(true)
+ .reply(200, { status: 'success', data: [metricName] });
+ nock('http://localhost:8080')
+ .get('/api/v1/metadata')
+ .query(true)
+ .reply(200, {
+ status: 'success',
+ data: {
+ [baseMetricName]: [
+ {
+ type: 'counter',
+ help: 'Number of direct notifications.',
+ unit: '',
+ },
+ ],
+ },
+ });
+ const state = createEditorState(metricName);
+ const context = new CompletionContext(state, metricName.length, true);
+ const completion = newCompleteStrategy({ remote: { url: 'http://localhost:8080' } });
+ const result = await completion.promQL(context);
+ // nock only mocks the HTTP endpoints; this test just ensures remote completion works
+ // when metadata for an OpenMetrics _total counter is stored under its base metric name.
+ expect(result).not.toBeNull();
+ expect((result as NonNullable).options.length).toBeGreaterThan(0);
+ });
});
diff --git a/web/ui/module/codemirror-promql/src/complete/hybrid.ts b/web/ui/module/codemirror-promql/src/complete/hybrid.ts
index 76efc34442..84c101b43c 100644
--- a/web/ui/module/codemirror-promql/src/complete/hybrid.ts
+++ b/web/ui/module/codemirror-promql/src/complete/hybrid.ts
@@ -166,6 +166,49 @@ function arrayToCompletionResult(data: Completion[], from: number, to: number, i
} as CompletionResult;
}
+// computeEndCompletePosition calculates the end position for autocompletion replacement.
+// When the cursor is in the middle of a token, this ensures the entire token is replaced,
+// not just the portion before the cursor. This fixes issue #15839.
+// Note: this method is exported only for testing purpose.
+export function computeEndCompletePosition(node: SyntaxNode, pos: number): number {
+ // For error nodes, use the cursor position as the end position
+ if (node.type.id === 0) {
+ return pos;
+ }
+
+ if (
+ node.type.id === LabelMatchers ||
+ node.type.id === GroupingLabels ||
+ node.type.id === FunctionCallBody ||
+ node.type.id === MatrixSelector ||
+ node.type.id === SubqueryExpr
+ ) {
+ // When we're inside empty brackets, we want to replace up to just before the closing bracket.
+ return node.to - 1;
+ }
+
+ if (node.type.id === StringLiteral && (node.parent?.type.id === UnquotedLabelMatcher || node.parent?.type.id === QuotedLabelMatcher)) {
+ // For label values, we want to replace all content inside the quotes.
+ return node.parent.to - 1;
+ }
+
+ // For all other nodes, extend the end position to include the entire token.
+ return node.to;
+}
+
+// Matches complete PromQL durations, including compound units (e.g., 5m, 1d2h, 1h30m, etc.).
+// Duration units are a fixed, safe set (no regex metacharacters), so no escaping is needed.
+export const durationWithUnitRegexp = new RegExp(`^(\\d+(${durationTerms.map((term) => term.label).join('|')}))+$`);
+
+// Determines if a duration already has a complete time unit to prevent autocomplete insertion (issue #15452)
+function hasCompleteDurationUnit(state: EditorState, node: SyntaxNode): boolean {
+ if (node.from >= node.to) {
+ return false;
+ }
+ const nodeContent = state.sliceDoc(node.from, node.to);
+ return durationWithUnitRegexp.test(nodeContent);
+}
+
// computeStartCompleteLabelPositionInLabelMatcherOrInGroupingLabel calculates the start position only when the node is a LabelMatchers or a GroupingLabels
function computeStartCompleteLabelPositionInLabelMatcherOrInGroupingLabel(node: SyntaxNode, pos: number): number {
// Here we can have two different situations:
@@ -477,12 +520,18 @@ export function analyzeCompletion(state: EditorState, node: SyntaxNode, pos: num
// Duration, Duration, ⚠(NumberLiteral)
// )
// So we should continue to autocomplete a duration
- result.push({ kind: ContextKind.Duration });
+ if (!hasCompleteDurationUnit(state, node)) {
+ result.push({ kind: ContextKind.Duration });
+ }
} else {
result.push({ kind: ContextKind.Number });
}
break;
case NumberDurationLiteralInDurationContext:
+ if (!hasCompleteDurationUnit(state, node)) {
+ result.push({ kind: ContextKind.Duration });
+ }
+ break;
case OffsetExpr:
result.push({ kind: ContextKind.Duration });
break;
@@ -556,6 +605,10 @@ export class HybridComplete implements CompleteStrategy {
return this.prometheusClient;
}
+ destroy(): void {
+ this.prometheusClient?.destroy?.();
+ }
+
promQL(context: CompletionContext): Promise | CompletionResult | null {
const { state, pos } = context;
const tree = syntaxTree(state).resolve(pos, -1);
@@ -644,7 +697,13 @@ export class HybridComplete implements CompleteStrategy {
}
}
return asyncResult.then((result) => {
- return arrayToCompletionResult(result, computeStartCompletePosition(state, tree, pos), pos, completeSnippet, span);
+ return arrayToCompletionResult(
+ result,
+ computeStartCompletePosition(state, tree, pos),
+ computeEndCompletePosition(tree, pos),
+ completeSnippet,
+ span
+ );
});
}
@@ -670,11 +729,10 @@ export class HybridComplete implements CompleteStrategy {
.then((metricMetadata) => {
if (metricMetadata) {
for (const [metricName, node] of metricCompletion) {
- // First check if the full metric name has metadata (even if it has one of the
- // histogram/summary suffixes, it may be a metric that is not following naming
- // conventions, see https://github.com/prometheus/prometheus/issues/16907).
- // Then fall back to the base metric name if full metadata doesn't exist.
- const metadata = metricMetadata[metricName] ?? metricMetadata[metricName.replace(/(_count|_sum|_bucket)$/, '')];
+ // First check if the full metric name has metadata (even if it has one of the histogram/summary/openmetrics suffixes
+ // it may be a metric that is not following naming conventions)
+ // Then fall back to the base metric name if full metadata doesn't exist
+ const metadata = metricMetadata[metricName] ?? metricMetadata[metricName.replace(/(_count|_sum|_bucket|_total)$/, '')];
if (metadata) {
if (metadata.length > 1) {
// it means the metricName has different possible helper and type
diff --git a/web/ui/module/codemirror-promql/src/complete/index.ts b/web/ui/module/codemirror-promql/src/complete/index.ts
index b3902c3b6b..dd73857639 100644
--- a/web/ui/module/codemirror-promql/src/complete/index.ts
+++ b/web/ui/module/codemirror-promql/src/complete/index.ts
@@ -19,6 +19,7 @@ import { CompletionContext, CompletionResult } from '@codemirror/autocomplete';
// Every different completion mode must implement this interface.
export interface CompleteStrategy {
promQL(context: CompletionContext): Promise | CompletionResult | null;
+ destroy?(): void;
}
// CompleteConfiguration should be used to customize the autocompletion.
diff --git a/web/ui/module/codemirror-promql/src/promql.test.ts b/web/ui/module/codemirror-promql/src/promql.test.ts
new file mode 100644
index 0000000000..787747cc5e
--- /dev/null
+++ b/web/ui/module/codemirror-promql/src/promql.test.ts
@@ -0,0 +1,58 @@
+// Copyright 2025 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import { PromQLExtension } from './promql';
+import { CompleteStrategy } from './complete';
+import { CompletionResult } from '@codemirror/autocomplete';
+
+describe('PromQLExtension destroy', () => {
+ it('should be safe to call destroy multiple times', () => {
+ const extension = new PromQLExtension();
+ // First call
+ extension.destroy();
+ // Second call should not throw
+ expect(() => extension.destroy()).not.toThrow();
+ });
+
+ it('should call destroy on the complete strategy if available', () => {
+ const extension = new PromQLExtension();
+
+ // Set up a mock complete strategy with destroy
+ let destroyCalled = false;
+ const mockCompleteStrategy: CompleteStrategy = {
+ promQL: (): CompletionResult | null => null,
+ destroy: () => {
+ destroyCalled = true;
+ },
+ };
+
+ extension.setComplete({ completeStrategy: mockCompleteStrategy });
+ extension.destroy();
+
+ expect(destroyCalled).toBe(true);
+ });
+
+ it('should handle complete strategies without destroy method', () => {
+ const extension = new PromQLExtension();
+
+ // Set up a mock complete strategy without destroy
+ const mockCompleteStrategy: CompleteStrategy = {
+ promQL: (): CompletionResult | null => null,
+ };
+
+ extension.setComplete({ completeStrategy: mockCompleteStrategy });
+
+ // Should not throw even though complete strategy has no destroy
+ expect(() => extension.destroy()).not.toThrow();
+ });
+});
diff --git a/web/ui/module/codemirror-promql/src/promql.ts b/web/ui/module/codemirror-promql/src/promql.ts
index 506cd1348b..859442559f 100644
--- a/web/ui/module/codemirror-promql/src/promql.ts
+++ b/web/ui/module/codemirror-promql/src/promql.ts
@@ -79,6 +79,10 @@ export class PromQLExtension {
return this;
}
+ destroy(): void {
+ this.complete.destroy?.();
+ }
+
asExtension(languageType = LanguageType.PromQL): Extension {
const language = promQLLanguage(languageType);
let extension: Extension = [language];
diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json
index 05511c2b89..eccae9a163 100644
--- a/web/ui/module/lezer-promql/package.json
+++ b/web/ui/module/lezer-promql/package.json
@@ -1,6 +1,6 @@
{
"name": "@prometheus-io/lezer-promql",
- "version": "0.307.3",
+ "version": "0.309.1",
"description": "lezer-based PromQL grammar",
"main": "dist/index.cjs",
"type": "module",
diff --git a/web/ui/module/lezer-promql/src/highlight.js b/web/ui/module/lezer-promql/src/highlight.js
index 9c1b5601a3..b452373345 100644
--- a/web/ui/module/lezer-promql/src/highlight.js
+++ b/web/ui/module/lezer-promql/src/highlight.js
@@ -1,4 +1,4 @@
-// Copyright 2022 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/web/ui/module/lezer-promql/src/tokens.js b/web/ui/module/lezer-promql/src/tokens.js
index 1695ae1d87..523c306ae9 100644
--- a/web/ui/module/lezer-promql/src/tokens.js
+++ b/web/ui/module/lezer-promql/src/tokens.js
@@ -1,4 +1,4 @@
-// Copyright 2021 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json
index 2631802e53..764fd87820 100644
--- a/web/ui/package-lock.json
+++ b/web/ui/package-lock.json
@@ -1,12 +1,12 @@
{
"name": "prometheus-io",
- "version": "0.307.3",
+ "version": "0.309.1",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "prometheus-io",
- "version": "0.307.3",
+ "version": "0.309.1",
"workspaces": [
"mantine-ui",
"module/*"
@@ -24,7 +24,7 @@
},
"mantine-ui": {
"name": "@prometheus-io/mantine-ui",
- "version": "0.307.3",
+ "version": "0.309.1",
"dependencies": {
"@codemirror/autocomplete": "^6.19.1",
"@codemirror/language": "^6.11.3",
@@ -42,7 +42,7 @@
"@microsoft/fetch-event-source": "^2.0.1",
"@nexucis/fuzzy": "^0.5.1",
"@nexucis/kvsearch": "^0.9.1",
- "@prometheus-io/codemirror-promql": "0.307.3",
+ "@prometheus-io/codemirror-promql": "0.309.1",
"@reduxjs/toolkit": "^2.10.1",
"@tabler/icons-react": "^3.35.0",
"@tanstack/react-query": "^5.90.7",
@@ -88,10 +88,10 @@
},
"module/codemirror-promql": {
"name": "@prometheus-io/codemirror-promql",
- "version": "0.307.3",
+ "version": "0.309.1",
"license": "Apache-2.0",
"dependencies": {
- "@prometheus-io/lezer-promql": "0.307.3",
+ "@prometheus-io/lezer-promql": "0.309.1",
"lru-cache": "^11.2.2"
},
"devDependencies": {
@@ -121,7 +121,7 @@
},
"module/lezer-promql": {
"name": "@prometheus-io/lezer-promql",
- "version": "0.307.3",
+ "version": "0.309.1",
"license": "Apache-2.0",
"devDependencies": {
"@lezer/generator": "^1.8.0",
@@ -7551,9 +7551,9 @@
}
},
"node_modules/prettier": {
- "version": "3.6.2",
- "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.6.2.tgz",
- "integrity": "sha512-I7AIg5boAr5R0FFtJ6rCfD+LFsWHp81dolrFD8S79U9tb8Az2nGrJncnMSnys+bpQJfRUzqs9hnA81OAA3hCuQ==",
+ "version": "3.7.4",
+ "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.7.4.tgz",
+ "integrity": "sha512-v6UNi1+3hSlVvv8fSaoUbggEM5VErKmmpGA7Pl3HF8V6uKY7rvClBOJlH6yNwQtfTueNkGVpOv/mtWL9L4bgRA==",
"dev": true,
"license": "MIT",
"bin": {
@@ -8693,10 +8693,11 @@
}
},
"node_modules/ts-jest": {
- "version": "29.4.5",
- "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.4.5.tgz",
- "integrity": "sha512-HO3GyiWn2qvTQA4kTgjDcXiMwYQt68a1Y8+JuLRVpdIzm+UOLSHgl/XqR4c6nzJkq5rOkjc02O2I7P7l/Yof0Q==",
+ "version": "29.4.6",
+ "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.4.6.tgz",
+ "integrity": "sha512-fSpWtOO/1AjSNQguk43hb/JCo16oJDnMJf3CdEGNkqsEX3t0KX96xvyX1D7PfLCpVoKu4MfVrqUkFyblYoY4lA==",
"dev": true,
+ "license": "MIT",
"dependencies": {
"bs-logger": "^0.2.6",
"fast-json-stable-stringify": "^2.1.0",
diff --git a/web/ui/package.json b/web/ui/package.json
index e237294df8..e634652b41 100644
--- a/web/ui/package.json
+++ b/web/ui/package.json
@@ -1,7 +1,7 @@
{
"name": "prometheus-io",
"description": "Monorepo for the Prometheus UI",
- "version": "0.307.3",
+ "version": "0.309.1",
"private": true,
"scripts": {
"build": "bash build_ui.sh --all",
diff --git a/web/ui/ui.go b/web/ui/ui.go
index 2585951d4d..c427dcf119 100644
--- a/web/ui/ui.go
+++ b/web/ui/ui.go
@@ -1,4 +1,4 @@
-// Copyright 2018 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
diff --git a/web/web.go b/web/web.go
index d7b647e3db..afe78e4255 100644
--- a/web/web.go
+++ b/web/web.go
@@ -1,4 +1,4 @@
-// Copyright 2013 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -57,6 +57,7 @@ import (
"github.com/prometheus/prometheus/scrape"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/template"
+ "github.com/prometheus/prometheus/util/features"
"github.com/prometheus/prometheus/util/httputil"
"github.com/prometheus/prometheus/util/netconnlimit"
"github.com/prometheus/prometheus/util/notifications"
@@ -300,8 +301,9 @@ type Options struct {
AcceptRemoteWriteProtoMsgs remoteapi.MessageTypes
- Gatherer prometheus.Gatherer
- Registerer prometheus.Registerer
+ Gatherer prometheus.Gatherer
+ Registerer prometheus.Registerer
+ FeatureRegistry features.Collector
}
// New initializes a new web Handler.
@@ -399,8 +401,27 @@ func New(logger *slog.Logger, o *Options) *Handler {
o.EnableTypeAndUnitLabels,
o.AppendMetadata,
nil,
+ o.FeatureRegistry,
)
+ if r := o.FeatureRegistry; r != nil {
+ // Set dynamic API features (based on configuration).
+ r.Set(features.API, "lifecycle", o.EnableLifecycle)
+ r.Set(features.API, "admin", o.EnableAdminAPI)
+ r.Set(features.API, "remote_write_receiver", o.EnableRemoteWriteReceiver)
+ r.Set(features.API, "otlp_write_receiver", o.EnableOTLPWriteReceiver)
+ r.Set(features.OTLPReceiver, "delta_conversion", o.ConvertOTLPDelta)
+ r.Set(features.OTLPReceiver, "native_delta_ingestion", o.NativeOTLPDeltaIngestion)
+ r.Enable(features.API, "label_values_match") // match[] parameter for label values endpoint.
+ r.Enable(features.API, "query_warnings") // warnings in query responses.
+ r.Enable(features.API, "query_stats") // stats parameter for query endpoints.
+ r.Enable(features.API, "time_range_series") // start/end parameters for /series endpoint.
+ r.Enable(features.API, "time_range_labels") // start/end parameters for /labels endpoints.
+ r.Enable(features.API, "exclude_alerts") // exclude_alerts parameter for /rules endpoint.
+ r.Set(features.UI, "ui_v3", !o.UseOldUI)
+ r.Set(features.UI, "ui_v2", o.UseOldUI)
+ }
+
if o.RoutePrefix != "/" {
// If the prefix is missing for the root path, prepend it.
router.Get("/", func(w http.ResponseWriter, r *http.Request) {
@@ -434,13 +455,6 @@ func New(logger *slog.Logger, o *Options) *Handler {
reactAssetsRoot = "/static/react-app"
}
- // The console library examples at 'console_libraries/prom.lib' still depend on old asset files being served under `classic`.
- router.Get("/classic/static/*filepath", func(w http.ResponseWriter, r *http.Request) {
- r.URL.Path = path.Join("/static", route.Param(r.Context(), "filepath"))
- fs := server.StaticFileServer(ui.Assets)
- fs.ServeHTTP(w, r)
- })
-
router.Get("/version", h.version)
router.Get("/metrics", promhttp.Handler().ServeHTTP)
diff --git a/web/web_test.go b/web/web_test.go
index b07e26cfa8..ae7d532f1f 100644
--- a/web/web_test.go
+++ b/web/web_test.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The Prometheus Authors
+// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at