mirror of
https://github.com/prometheus/prometheus.git
synced 2026-02-03 20:39:32 -05:00
Merge branch 'main' into codesome/stale-series-compaction
Some checks failed
CI / Go tests (push) Has been cancelled
CI / More Go tests (push) Has been cancelled
CI / Go tests with previous Go version (push) Has been cancelled
CI / UI tests (push) Has been cancelled
CI / Go tests on Windows (push) Has been cancelled
CI / Mixins tests (push) Has been cancelled
CI / Build Prometheus for common architectures (push) Has been cancelled
CI / Build Prometheus for all architectures (push) Has been cancelled
CI / Check generated parser (push) Has been cancelled
CI / golangci-lint (push) Has been cancelled
CI / fuzzing (push) Has been cancelled
CI / codeql (push) Has been cancelled
CI / Report status of build Prometheus for all architectures (push) Has been cancelled
CI / Publish main branch artifacts (push) Has been cancelled
CI / Publish release artefacts (push) Has been cancelled
CI / Publish UI on npm Registry (push) Has been cancelled
Some checks failed
CI / Go tests (push) Has been cancelled
CI / More Go tests (push) Has been cancelled
CI / Go tests with previous Go version (push) Has been cancelled
CI / UI tests (push) Has been cancelled
CI / Go tests on Windows (push) Has been cancelled
CI / Mixins tests (push) Has been cancelled
CI / Build Prometheus for common architectures (push) Has been cancelled
CI / Build Prometheus for all architectures (push) Has been cancelled
CI / Check generated parser (push) Has been cancelled
CI / golangci-lint (push) Has been cancelled
CI / fuzzing (push) Has been cancelled
CI / codeql (push) Has been cancelled
CI / Report status of build Prometheus for all architectures (push) Has been cancelled
CI / Publish main branch artifacts (push) Has been cancelled
CI / Publish release artefacts (push) Has been cancelled
CI / Publish UI on npm Registry (push) Has been cancelled
Signed-off-by: Ganesh Vernekar <ganesh.vernekar@reddit.com>
This commit is contained in:
commit
3f51be0f54
717 changed files with 28823 additions and 6844 deletions
10
.github/CODEOWNERS
vendored
10
.github/CODEOWNERS
vendored
|
|
@ -1,10 +0,0 @@
|
|||
/web/ui @juliusv
|
||||
/web/ui/module @juliusv @nexucis
|
||||
/storage/remote @cstyan @bwplotka @tomwilkie
|
||||
/storage/remote/otlptranslator @aknuds1 @jesusvazquez
|
||||
/discovery/kubernetes @brancz
|
||||
/tsdb @jesusvazquez
|
||||
/promql @roidelapluie
|
||||
/cmd/promtool @dgl
|
||||
/documentation/prometheus-mixin @metalmatze
|
||||
|
||||
33
.github/dependabot.yml
vendored
33
.github/dependabot.yml
vendored
|
|
@ -1,33 +0,0 @@
|
|||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "docker"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
- package-ecosystem: "github-actions"
|
||||
directories:
|
||||
- "/"
|
||||
- "/scripts"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
- package-ecosystem: "gomod"
|
||||
directories:
|
||||
- "/"
|
||||
- "/documentation/examples/remote_storage"
|
||||
- "/internal/tools"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
groups:
|
||||
aws:
|
||||
patterns:
|
||||
- "github.com/aws/*"
|
||||
azure:
|
||||
patterns:
|
||||
- "github.com/Azure/*"
|
||||
k8s.io:
|
||||
patterns:
|
||||
- "k8s.io/*"
|
||||
go.opentelemetry.io:
|
||||
patterns:
|
||||
- "go.opentelemetry.io/*"
|
||||
open-pull-requests-limit: 20
|
||||
2
.github/workflows/buf-lint.yml
vendored
2
.github/workflows/buf-lint.yml
vendored
|
|
@ -12,7 +12,7 @@ jobs:
|
|||
name: lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: bufbuild/buf-setup-action@a47c93e0b1648d5651a065437926377d060baa99 # v1.50.0
|
||||
|
|
|
|||
4
.github/workflows/buf.yml
vendored
4
.github/workflows/buf.yml
vendored
|
|
@ -12,7 +12,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
if: github.repository_owner == 'prometheus'
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: bufbuild/buf-setup-action@a47c93e0b1648d5651a065437926377d060baa99 # v1.50.0
|
||||
|
|
@ -25,7 +25,7 @@ jobs:
|
|||
with:
|
||||
input: 'prompb'
|
||||
against: 'https://github.com/prometheus/prometheus.git#branch=main,ref=HEAD~1,subdir=prompb'
|
||||
- uses: bufbuild/buf-push-action@a654ff18effe4641ebea4a4ce242c49800728459 # v1.1.1
|
||||
- uses: bufbuild/buf-push-action@a654ff18effe4641ebea4a4ce242c49800728459 # v1.2.0
|
||||
with:
|
||||
input: 'prompb'
|
||||
buf_token: ${{ secrets.BUF_TOKEN }}
|
||||
|
|
|
|||
2
.github/workflows/check_release_notes.yml
vendored
2
.github/workflows/check_release_notes.yml
vendored
|
|
@ -20,7 +20,7 @@ jobs:
|
|||
# Don't run it on dependabot PRs either as humans would take control in case a bump introduces a breaking change.
|
||||
if: (github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community') && github.event.pull_request.user.login != 'dependabot[bot]'
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
- env:
|
||||
PR_DESCRIPTION: ${{ github.event.pull_request.body }}
|
||||
run: |
|
||||
|
|
|
|||
72
.github/workflows/ci.yml
vendored
72
.github/workflows/ci.yml
vendored
|
|
@ -16,10 +16,10 @@ jobs:
|
|||
# should also be updated.
|
||||
image: quay.io/prometheus/golang-builder:1.25-base
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: prometheus/promci@443c7fc2397e946bc9f5029e313a9c3441b9b86d # v0.4.7
|
||||
- uses: prometheus/promci@c0916f0a41f13444612a8f0f5e700ea34edd7c19 # v0.5.3
|
||||
- uses: ./.github/promci/actions/setup_environment
|
||||
with:
|
||||
enable_npm: true
|
||||
|
|
@ -34,10 +34,10 @@ jobs:
|
|||
container:
|
||||
image: quay.io/prometheus/golang-builder:1.25-base
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: prometheus/promci@443c7fc2397e946bc9f5029e313a9c3441b9b86d # v0.4.7
|
||||
- uses: prometheus/promci@c0916f0a41f13444612a8f0f5e700ea34edd7c19 # v0.5.3
|
||||
- uses: ./.github/promci/actions/setup_environment
|
||||
- run: go test --tags=dedupelabels ./...
|
||||
- run: go test --tags=slicelabels -race ./cmd/prometheus ./model/textparse ./prompb/...
|
||||
|
|
@ -57,9 +57,9 @@ jobs:
|
|||
GOEXPERIMENT: synctest
|
||||
container:
|
||||
# The go version in this image should be N-1 wrt test_go.
|
||||
image: quay.io/prometheus/golang-builder:1.24-base
|
||||
image: quay.io/prometheus/golang-builder:1.25-base
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
- run: make build
|
||||
|
|
@ -78,10 +78,10 @@ jobs:
|
|||
image: quay.io/prometheus/golang-builder:1.25-base
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: prometheus/promci@443c7fc2397e946bc9f5029e313a9c3441b9b86d # v0.4.7
|
||||
- uses: prometheus/promci@c0916f0a41f13444612a8f0f5e700ea34edd7c19 # v0.5.3
|
||||
- uses: ./.github/promci/actions/setup_environment
|
||||
with:
|
||||
enable_go: false
|
||||
|
|
@ -97,10 +97,10 @@ jobs:
|
|||
name: Go tests on Windows
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
- uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
|
||||
with:
|
||||
go-version: 1.25.x
|
||||
- run: |
|
||||
|
|
@ -116,7 +116,7 @@ jobs:
|
|||
container:
|
||||
image: quay.io/prometheus/golang-builder:1.25-base
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
- run: go install ./cmd/promtool/.
|
||||
|
|
@ -143,10 +143,10 @@ jobs:
|
|||
matrix:
|
||||
thread: [ 0, 1, 2 ]
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: prometheus/promci@443c7fc2397e946bc9f5029e313a9c3441b9b86d # v0.4.7
|
||||
- uses: prometheus/promci@c0916f0a41f13444612a8f0f5e700ea34edd7c19 # v0.5.3
|
||||
- uses: ./.github/promci/actions/build
|
||||
with:
|
||||
promu_opts: "-p linux/amd64 -p windows/amd64 -p linux/arm64 -p darwin/amd64 -p darwin/arm64 -p linux/386"
|
||||
|
|
@ -170,10 +170,10 @@ jobs:
|
|||
# Whenever the Go version is updated here, .promu.yml
|
||||
# should also be updated.
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: prometheus/promci@443c7fc2397e946bc9f5029e313a9c3441b9b86d # v0.4.7
|
||||
- uses: prometheus/promci@c0916f0a41f13444612a8f0f5e700ea34edd7c19 # v0.5.3
|
||||
- uses: ./.github/promci/actions/build
|
||||
with:
|
||||
parallelism: 12
|
||||
|
|
@ -202,30 +202,32 @@ jobs:
|
|||
if: ${{ contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') }}
|
||||
run: exit 1
|
||||
check_generated_parser:
|
||||
# Checks generated parser and UI functions list. Not renaming as it is a required check.
|
||||
name: Check generated parser
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: quay.io/prometheus/golang-builder:1.25-base
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
- uses: prometheus/promci@c0916f0a41f13444612a8f0f5e700ea34edd7c19 # v0.5.3
|
||||
- uses: ./.github/promci/actions/setup_environment
|
||||
with:
|
||||
cache: false
|
||||
go-version: 1.25.x
|
||||
- name: Run goyacc and check for diff
|
||||
run: make install-goyacc check-generated-parser
|
||||
enable_npm: true
|
||||
- run: make install-goyacc check-generated-parser
|
||||
- run: make check-generated-promql-functions
|
||||
golangci:
|
||||
name: golangci-lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
|
||||
with:
|
||||
go-version: 1.25.x
|
||||
- name: Install snmp_exporter/generator dependencies
|
||||
|
|
@ -235,18 +237,18 @@ jobs:
|
|||
id: golangci-lint-version
|
||||
run: echo "version=$(make print-golangci-lint-version)" >> $GITHUB_OUTPUT
|
||||
- name: Lint
|
||||
uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0
|
||||
uses: golangci/golangci-lint-action@1e7e51e771db61008b38414a730f564565cf7c20 # v9.2.0
|
||||
with:
|
||||
args: --verbose
|
||||
version: ${{ steps.golangci-lint-version.outputs.version }}
|
||||
- name: Lint with slicelabels
|
||||
uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0
|
||||
uses: golangci/golangci-lint-action@1e7e51e771db61008b38414a730f564565cf7c20 # v9.2.0
|
||||
with:
|
||||
# goexperiment.synctest to ensure we don't miss files that depend on it.
|
||||
args: --verbose --build-tags=slicelabels,goexperiment.synctest
|
||||
version: ${{ steps.golangci-lint-version.outputs.version }}
|
||||
- name: Lint with dedupelabels
|
||||
uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0
|
||||
uses: golangci/golangci-lint-action@1e7e51e771db61008b38414a730f564565cf7c20 # v9.2.0
|
||||
with:
|
||||
args: --verbose --build-tags=dedupelabels
|
||||
version: ${{ steps.golangci-lint-version.outputs.version }}
|
||||
|
|
@ -265,10 +267,10 @@ jobs:
|
|||
needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all]
|
||||
if: github.event_name == 'push' && github.event.ref == 'refs/heads/main'
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: prometheus/promci@443c7fc2397e946bc9f5029e313a9c3441b9b86d # v0.4.7
|
||||
- uses: prometheus/promci@c0916f0a41f13444612a8f0f5e700ea34edd7c19 # v0.5.3
|
||||
- uses: ./.github/promci/actions/publish_main
|
||||
with:
|
||||
docker_hub_login: ${{ secrets.docker_hub_login }}
|
||||
|
|
@ -284,10 +286,10 @@ jobs:
|
|||
||
|
||||
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.'))
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: prometheus/promci@443c7fc2397e946bc9f5029e313a9c3441b9b86d # v0.4.7
|
||||
- uses: prometheus/promci@c0916f0a41f13444612a8f0f5e700ea34edd7c19 # v0.5.3
|
||||
- uses: ./.github/promci/actions/publish_release
|
||||
with:
|
||||
docker_hub_login: ${{ secrets.docker_hub_login }}
|
||||
|
|
@ -301,16 +303,16 @@ jobs:
|
|||
needs: [test_ui, codeql]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: prometheus/promci@443c7fc2397e946bc9f5029e313a9c3441b9b86d # v0.4.7
|
||||
- uses: prometheus/promci@c0916f0a41f13444612a8f0f5e700ea34edd7c19 # v0.5.3
|
||||
- name: Install nodejs
|
||||
uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0
|
||||
uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0
|
||||
with:
|
||||
node-version-file: "web/ui/.nvmrc"
|
||||
registry-url: "https://registry.npmjs.org"
|
||||
- uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
|
||||
- uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
|
||||
with:
|
||||
path: ~/.npm
|
||||
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
|
||||
|
|
|
|||
8
.github/workflows/codeql-analysis.yml
vendored
8
.github/workflows/codeql-analysis.yml
vendored
|
|
@ -24,17 +24,17 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@0499de31b99561a6d14a36a5f662c2a54f91beee # v4.31.2
|
||||
uses: github/codeql-action/init@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@0499de31b99561a6d14a36a5f662c2a54f91beee # v4.31.2
|
||||
uses: github/codeql-action/autobuild@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@0499de31b99561a6d14a36a5f662c2a54f91beee # v4.31.2
|
||||
uses: github/codeql-action/analyze@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9
|
||||
|
|
|
|||
4
.github/workflows/container_description.yml
vendored
4
.github/workflows/container_description.yml
vendored
|
|
@ -18,7 +18,7 @@ jobs:
|
|||
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
|
||||
steps:
|
||||
- name: git checkout
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Set docker hub repo name
|
||||
|
|
@ -42,7 +42,7 @@ jobs:
|
|||
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
|
||||
steps:
|
||||
- name: git checkout
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Set quay.io org name
|
||||
|
|
|
|||
6
.github/workflows/fuzzing.yml
vendored
6
.github/workflows/fuzzing.yml
vendored
|
|
@ -10,12 +10,12 @@ jobs:
|
|||
steps:
|
||||
- name: Build Fuzzers
|
||||
id: build
|
||||
uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@cafd7a0eb8ecb4e007c56897996a9b65c49c972f # master
|
||||
uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@4bf20ff8dfda18ad651583ebca9fb17a7ce1940a # master
|
||||
with:
|
||||
oss-fuzz-project-name: "prometheus"
|
||||
dry-run: false
|
||||
- name: Run Fuzzers
|
||||
uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@cafd7a0eb8ecb4e007c56897996a9b65c49c972f # master
|
||||
uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@4bf20ff8dfda18ad651583ebca9fb17a7ce1940a # master
|
||||
# Note: Regularly check for updates to the pinned commit hash at:
|
||||
# https://github.com/google/oss-fuzz/tree/master/infra/cifuzz/actions/run_fuzzers
|
||||
with:
|
||||
|
|
@ -23,7 +23,7 @@ jobs:
|
|||
fuzz-seconds: 600
|
||||
dry-run: false
|
||||
- name: Upload Crash
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
|
||||
if: failure() && steps.build.outcome == 'success'
|
||||
with:
|
||||
name: artifacts
|
||||
|
|
|
|||
2
.github/workflows/lock.yml
vendored
2
.github/workflows/lock.yml
vendored
|
|
@ -16,7 +16,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
if: github.repository_owner == 'prometheus'
|
||||
steps:
|
||||
- uses: dessant/lock-threads@1bf7ec25051fe7c00bdd17e6a7cf3d7bfb7dc771 # v5.0.1
|
||||
- uses: dessant/lock-threads@7266a7ce5c1df01b1c6db85bf8cd86c737dadbe7 # v6.0.0
|
||||
with:
|
||||
process-only: 'issues'
|
||||
issue-inactive-days: '180'
|
||||
|
|
|
|||
2
.github/workflows/repo_sync.yml
vendored
2
.github/workflows/repo_sync.yml
vendored
|
|
@ -14,7 +14,7 @@ jobs:
|
|||
container:
|
||||
image: quay.io/prometheus/golang-builder
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
- run: ./scripts/sync_repo_files.sh
|
||||
|
|
|
|||
6
.github/workflows/scorecards.yml
vendored
6
.github/workflows/scorecards.yml
vendored
|
|
@ -21,7 +21,7 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: "Checkout code"
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # tag=v4.2.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # tag=v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
|
|
@ -37,7 +37,7 @@ jobs:
|
|||
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
|
||||
# format to the repository Actions tab.
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # tag=v5.0.0
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # tag=v6.0.0
|
||||
with:
|
||||
name: SARIF file
|
||||
path: results.sarif
|
||||
|
|
@ -45,6 +45,6 @@ jobs:
|
|||
|
||||
# Upload the results to GitHub's code scanning dashboard.
|
||||
- name: "Upload to code-scanning"
|
||||
uses: github/codeql-action/upload-sarif@0499de31b99561a6d14a36a5f662c2a54f91beee # tag=v4.31.2
|
||||
uses: github/codeql-action/upload-sarif@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9
|
||||
with:
|
||||
sarif_file: results.sarif
|
||||
|
|
|
|||
2
.github/workflows/stale.yml
vendored
2
.github/workflows/stale.yml
vendored
|
|
@ -11,7 +11,7 @@ jobs:
|
|||
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@5f858e3efba33a5ca4407a664cc011ad407f2008 # v10.1.0
|
||||
- uses: actions/stale@997185467fa4f803885201cee163a9f38240193d # v10.1.1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
# opt out of defaults to avoid marking issues as stale and closing them
|
||||
|
|
|
|||
1
.gitignore
vendored
1
.gitignore
vendored
|
|
@ -26,6 +26,7 @@ npm_licenses.tar.bz2
|
|||
|
||||
/vendor
|
||||
/.build
|
||||
/go.work.sum
|
||||
|
||||
/**/node_modules
|
||||
|
||||
|
|
|
|||
|
|
@ -31,6 +31,7 @@ linters:
|
|||
- govet
|
||||
- loggercheck
|
||||
- misspell
|
||||
- modernize
|
||||
- nilnesserr
|
||||
# TODO(bwplotka): Enable once https://github.com/golangci/golangci-lint/issues/3228 is fixed.
|
||||
# - nolintlint
|
||||
|
|
@ -117,6 +118,12 @@ linters:
|
|||
- shadow
|
||||
- fieldalignment
|
||||
enable-all: true
|
||||
modernize:
|
||||
disable:
|
||||
# Suggest replacing omitempty with omitzero for struct fields.
|
||||
# Disable this check for now since it introduces too many changes in our existing codebase.
|
||||
# See https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#hdr-Analyzer_omitzero for more details.
|
||||
- omitzero
|
||||
perfsprint:
|
||||
# Optimizes even if it requires an int or uint type cast.
|
||||
int-conversion: true
|
||||
|
|
@ -175,6 +182,11 @@ linters:
|
|||
- name: unused-receiver
|
||||
- name: var-declaration
|
||||
- name: var-naming
|
||||
# TODO(SuperQ): See: https://github.com/prometheus/prometheus/issues/17766
|
||||
arguments:
|
||||
- []
|
||||
- []
|
||||
- - skip-package-name-checks: true
|
||||
testifylint:
|
||||
disable:
|
||||
- float-compare
|
||||
|
|
|
|||
94
CHANGELOG.md
94
CHANGELOG.md
|
|
@ -1,9 +1,87 @@
|
|||
# Changelog
|
||||
|
||||
## main / unreleased
|
||||
## 3.9.1 / 2026-01-07
|
||||
|
||||
* [FEATURE] Templates: Add urlQueryEscape to template functions. #17403
|
||||
* [BUGFIX] TSDB: Register `prometheus_tsdb_sample_ooo_delta` metric properly. #17477
|
||||
- [BUGFIX] Agent: fix crash shortly after startup from invalid type of object. #17802
|
||||
- [BUGFIX] Scraping: fix relabel keep/drop not working. #17807
|
||||
|
||||
## 3.9.0 / 2026-01-06
|
||||
|
||||
- [CHANGE] Native Histograms are no longer experimental! Make the `native-histogram` feature flag a no-op. Use `scrape_native_histograms` config option instead. #17528
|
||||
- [CHANGE] API: Add maximum limit of 10,000 sets of statistics to TSDB status endpoint. #17647
|
||||
- [FEATURE] API: Add /api/v1/features for clients to understand which features are supported. #17427
|
||||
- [FEATURE] Promtool: Add `start_timestamp` field for unit tests. #17636
|
||||
- [FEATURE] Promtool: Add `--format seriesjson` option to `tsdb dump` to output just series labels in JSON format. #13409
|
||||
- [FEATURE] Add `--storage.tsdb.delay-compact-file.path` flag for better interoperability with Thanos. #17435
|
||||
- [FEATURE] UI: Add an option on the query drop-down menu to duplicate that query panel. #17714
|
||||
- [ENHANCEMENT]: TSDB: add flag `--storage.tsdb.block-reload-interval` to configure TSDB Block Reload Interval. #16728
|
||||
- [ENHANCEMENT] UI: Add graph option to start the chart's Y axis at zero. #17565
|
||||
- [ENHANCEMENT] Scraping: Classic protobuf format no longer requires the unit in the metric name. #16834
|
||||
- [ENHANCEMENT] PromQL, Rules, SD, Scraping: Add native histograms to complement existing summaries. #17374
|
||||
- [ENHANCEMENT] Notifications: Add a histogram `prometheus_notifications_latency_histogram_seconds` to complement the existing summary. #16637
|
||||
- [ENHANCEMENT] Remote-write: Add custom scope support for AzureAD authentication. #17483
|
||||
- [ENHANCEMENT] SD: add a `config` label with job name for most `prometheus_sd_refresh` metrics. #17138
|
||||
- [ENHANCEMENT] TSDB: New histogram `prometheus_tsdb_sample_ooo_delta`, the distribution of out-of-order samples in seconds. Collected for all samples, accepted or not. #17477
|
||||
- [ENHANCEMENT] Remote-read: Validate histograms received via remote-read. #17561
|
||||
- [PERF] TSDB: Small optimizations to postings index. #17439
|
||||
- [PERF] Scraping: Speed up relabelling of series. #17530
|
||||
- [PERF] PromQL: Small optimisations in binary operators. #17524, #17519.
|
||||
- [BUGFIX] UI: PromQL autocomplete now shows the correct type and HELP text for OpenMetrics counters whose samples end in `_total`. #17682
|
||||
- [BUGFIX] UI: Fixed codemirror-promql incorrectly showing label completion suggestions after the closing curly brace of a vector selector. #17602
|
||||
- [BUGFIX] UI: Query editor no longer suggests a duration unit if one is already present after a number. #17605
|
||||
- [BUGFIX] PromQL: Fix some "vector cannot contain metrics with the same labelset" errors when experimental delayed name removal is enabled. #17678
|
||||
- [BUGFIX] PromQL: Fix possible corruption of PromQL text if the query had an empty `ignoring()` and non-empty grouping. #17643
|
||||
- [BUGFIX] PromQL: Fix resets/changes to return empty results for anchored selectors when all samples are outside the range. #17479
|
||||
- [BUGFIX] PromQL: Check more consistently for many-to-one matching in filter binary operators. #17668
|
||||
- [BUGFIX] PromQL: Fix collision in unary negation with non-overlapping series. #17708
|
||||
- [BUGFIX] PromQL: Fix collision in label_join and label_replace with non-overlapping series. #17703
|
||||
- [BUGFIX] PromQL: Fix bug with inconsistent results for queries with OR expression when experimental delayed name removal is enabled. #17161
|
||||
- [BUGFIX] PromQL: Ensure that `rate`/`increase`/`delta` of histograms results in a gauge histogram. #17608
|
||||
- [BUGFIX] PromQL: Do not panic while iterating over invalid histograms. #17559
|
||||
- [BUGFIX] TSDB: Reject chunk files whose encoded chunk length overflows int. #17533
|
||||
- [BUGFIX] TSDB: Do not panic during resolution reduction of invalid histograms. #17561
|
||||
- [BUGFIX] Remote-write Receive: Avoid duplicate labels when experimental type-and-unit-label feature is enabled. #17546
|
||||
- [BUGFIX] OTLP Receiver: Only write metadata to disk when experimental metadata-wal-records feature is enabled. #17472
|
||||
|
||||
## 3.8.1 / 2025-12-16
|
||||
|
||||
* [BUGFIX] remote: Fix Remote Write receiver, so it does not send wrong response headers for v1 flow and cause Prometheus senders to emit false partial error log and metrics. #17683
|
||||
|
||||
## 3.8.0 / 2025-11-28
|
||||
|
||||
* [CHANGE] Remote-write: Update receiving to [2.0-rc.4 spec](https://github.com/prometheus/docs/blob/60c24e450010df38cfcb4f65df874f6f9b26dbcb/docs/specs/prw/remote_write_spec_2_0.md). "created timestamp" (CT) is now called "start timestamp" (ST). #17411
|
||||
* [CHANGE] TSDB: Native Histogram Custom Bounds with a NaN threshold are now rejected. #17287
|
||||
* [FEATURE] OAuth2: support jwt-bearer grant-type (RFC7523 3.1). #17592
|
||||
* [FEATURE] Dockerfile: Add OpenContainers spec labels to Dockerfile. #16483
|
||||
* [FEATURE] SD: Add unified AWS service discovery for ec2, lightsail and ecs services. #17406
|
||||
* [FEATURE] Native histograms are now a stable, but optional feature, use the `scrape_native_histogram` config setting. #17232 #17315
|
||||
* [FEATURE] UI: Support anchored and smoothed keyword in promql editor. #17239
|
||||
* [FEATURE] UI: Show detailed relabeling steps for each discovered target. #17337
|
||||
* [FEATURE] Alerting: Add urlQueryEscape to template functions. #17403
|
||||
* [FEATURE] Promtool: Add Remote-Write 2.0 support to `promtool push metrics` via the `--protobuf_message` flag. #17417
|
||||
* [ENHANCEMENT] Clarify the docs about handling negative native histograms. #17249
|
||||
* [ENHANCEMENT] Mixin: Add static UID to the remote-write dashboard. #17256
|
||||
* [ENHANCEMENT] PromQL: Reconcile mismatched NHCB bounds in `Add` and `Sub`. #17278
|
||||
* [ENHANCEMENT] Alerting: Add "unknown" state for alerting rules that haven't been evaluated yet. #17282
|
||||
* [ENHANCEMENT] Scrape: Allow simultaneous use of classic histogram → NHCB conversion and zero-timestamp ingestion. #17305
|
||||
* [ENHANCEMENT] UI: Add smoothed/anchored in explain. #17334
|
||||
* [ENHANCEMENT] OTLP: De-duplicate any `target_info` samples with the same timestamp for the same series. #17400
|
||||
* [ENHANCEMENT] Document `use_fips_sts_endpoint` in `sigv4` config sections. #17304
|
||||
* [ENHANCEMENT] Document Prometheus Agent. #14519
|
||||
* [PERF] PromQL: Speed up parsing of variadic functions. #17316
|
||||
* [PERF] UI: Speed up alerts/rules/... pages by not rendering collapsed content. #17485
|
||||
* [PERF] UI: Performance improvement when getting label name and values in promql editor. #17194
|
||||
* [PERF] UI: Speed up /alerts for many firing alerts via virtual scrolling. #17254
|
||||
* [BUGFIX] PromQL: Fix slice indexing bug in info function on churning series. #17199
|
||||
* [BUGFIX] API: Reduce lock contention on `/api/v1/targets`. #17306
|
||||
* [BUGFIX] PromQL: Consistent handling of gauge vs. counter histograms in aggregations. #17312
|
||||
* [BUGFIX] TSDB: Allow NHCB with -Inf as the first custom value. #17320
|
||||
* [BUGFIX] UI: Fix duplicate loading of data from the API speed up rendering of some pages. #17357
|
||||
* [BUGFIX] Old UI: Fix createExpressionLink to correctly build /graph URLs so links from Alerts/Rules work again. #17365
|
||||
* [BUGFIX] PromQL: Avoid panic when parsing malformed `info` call. #17379
|
||||
* [BUGFIX] PromQL: Include histograms when enforcing sample_limit. #17390
|
||||
* [BUGFIX] Config: Fix panic if TLS CA file is absent. #17418
|
||||
* [BUGFIX] PromQL: Fix `histogram_fraction` for classic histograms and NHCB if lower bound is in the first bucket. #17424
|
||||
|
||||
## 3.7.3 / 2025-10-29
|
||||
|
||||
|
|
@ -201,7 +279,7 @@
|
|||
|
||||
## 3.2.1 / 2025-02-25
|
||||
|
||||
* [BUGFIX] Don't send Accept` header `escape=allow-utf-8` when `metric_name_validation_scheme: legacy` is configured. #16061
|
||||
* [BUGFIX] Don't send `Accept` header `escape=allow-utf-8` when `metric_name_validation_scheme: legacy` is configured. #16061
|
||||
|
||||
## 3.2.0 / 2025-02-17
|
||||
|
||||
|
|
@ -212,10 +290,10 @@
|
|||
* [ENHANCEMENT] scrape: Add metadata for automatic metrics to WAL for `metadata-wal-records` feature. #15837
|
||||
* [ENHANCEMENT] promtool: Support linting of scrape interval, through lint option `too-long-scrape-interval`. #15719
|
||||
* [ENHANCEMENT] promtool: Add --ignore-unknown-fields option. #15706
|
||||
* [ENHANCEMENT] ui: Make "hide empty rules" and hide empty rules" persistent #15807
|
||||
* [ENHANCEMENT] ui: Make "hide empty rules" and "hide empty rules" persistent #15807
|
||||
* [ENHANCEMENT] web/api: Add a limit parameter to `/query` and `/query_range`. #15552
|
||||
* [ENHANCEMENT] api: Add fields Node and ServerTime to `/status`. #15784
|
||||
* [PERF] Scraping: defer computing labels for dropped targets until they are needed by the UI. #15261
|
||||
* [PERF] Scraping: defer computing labels for dropped targets until they are needed by the UI. #15261
|
||||
* [BUGFIX] remotewrite2: Fix invalid metadata bug for metrics without metadata. #15829
|
||||
* [BUGFIX] remotewrite2: Fix the unit field propagation. #15825
|
||||
* [BUGFIX] scrape: Fix WAL metadata for histograms and summaries. #15832
|
||||
|
|
@ -232,9 +310,9 @@
|
|||
* [ENHANCEMENT] TSDB: Improve calculation of space used by labels. #13880
|
||||
* [ENHANCEMENT] Rules: new metric rule_group_last_rule_duration_sum_seconds. #15672
|
||||
* [ENHANCEMENT] Observability: Export 'go_sync_mutex_wait_total_seconds_total' metric. #15339
|
||||
* [ENHANCEMEN] Remote-Write: optionally use a DNS resolver that picks a random IP. #15329
|
||||
* [ENHANCEMENT] Remote-Write: optionally use a DNS resolver that picks a random IP. #15329
|
||||
* [PERF] Optimize `l=~".+"` matcher. #15474, #15684
|
||||
* [PERF] TSDB: Cache all symbols for compaction . #15455
|
||||
* [PERF] TSDB: Cache all symbols for compaction. #15455
|
||||
* [PERF] TSDB: MemPostings: keep a map of label values slices. #15426
|
||||
* [PERF] Remote-Write: Remove interning hook. #15456
|
||||
* [PERF] Scrape: optimize string manipulation for experimental native histograms with custom buckets. #15453
|
||||
|
|
|
|||
32
CODEOWNERS
32
CODEOWNERS
|
|
@ -1,10 +1,26 @@
|
|||
# Prometheus team members are members of the "default maintainers" github team.
|
||||
# They are code owners by default for the whole repo.
|
||||
* @prometheus/default-maintainers
|
||||
#
|
||||
# Please keep this file in sync with the MAINTAINERS.md file!
|
||||
#
|
||||
|
||||
# Example adding a dedicated maintainer for AWS SD, and also "default
|
||||
# maintainers" so that they do not need to bypass codeowners check to merge
|
||||
# something.
|
||||
# Example comes from
|
||||
# Subsystems.
|
||||
/Makefile @simonpasquier @SuperQ
|
||||
/cmd/promtool @dgl
|
||||
/documentation/prometheus-mixin @metalmatze
|
||||
/model/histogram @beorn7 @krajorama
|
||||
/web/ui @juliusv
|
||||
/web/ui/module @juliusv @nexucis
|
||||
/promql @roidelapluie
|
||||
/storage/remote @cstyan @bwplotka @tomwilkie @npazosmendez @alexgreenbank
|
||||
/storage/remote/otlptranslator @aknuds1 @jesusvazquez @ArthurSens
|
||||
/tsdb @jesusvazquez @codesome @bwplotka @krajorama
|
||||
|
||||
# Service discovery.
|
||||
/discovery/kubernetes @brancz
|
||||
/discovery/stackit @jkroepke
|
||||
# Pending
|
||||
# https://github.com/prometheus/prometheus/pull/17105#issuecomment-3248209452
|
||||
# /discovery/aws/ @matt-gp @prometheus/default-maintainers
|
||||
# /discovery/aws/ @matt-gp @sysadmind
|
||||
# https://github.com/prometheus/prometheus/pull/15212#issuecomment-3575225179
|
||||
# /discovery/aliyun @KeyOfSpectator
|
||||
# https://github.com/prometheus/prometheus/pull/14108#issuecomment-2639515421
|
||||
# /discovery/nomad @jaloren @jrasell
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ Prometheus uses GitHub to manage reviews of pull requests.
|
|||
of inspiration. Also please see our [non-goals issue](https://github.com/prometheus/docs/issues/149) on areas that the Prometheus community doesn't plan to work on.
|
||||
|
||||
* Relevant coding style guidelines are the [Go Code Review
|
||||
Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
|
||||
Comments](https://go.dev/wiki/CodeReviewComments)
|
||||
and the _Formatting and style_ section of Peter Bourgon's [Go: Best
|
||||
Practices for Production
|
||||
Environments](https://peter.bourgon.org/go-in-production/#formatting-and-style).
|
||||
|
|
@ -78,8 +78,7 @@ go get example.com/some/module/pkg@vX.Y.Z
|
|||
Tidy up the `go.mod` and `go.sum` files:
|
||||
|
||||
```bash
|
||||
# The GO111MODULE variable can be omitted when the code isn't located in GOPATH.
|
||||
GO111MODULE=on go mod tidy
|
||||
go mod tidy
|
||||
```
|
||||
|
||||
You have to commit the changes to `go.mod` and `go.sum` before submitting the pull request.
|
||||
|
|
|
|||
|
|
@ -1,5 +1,7 @@
|
|||
# Maintainers
|
||||
|
||||
## Please keep this file in sync with the CODEOWNERS file!
|
||||
|
||||
General maintainers:
|
||||
* Bryan Boreham (bjboreham@gmail.com / @bboreham)
|
||||
* Ayoub Mrini (ayoubmrini424@gmail.com / @machine424)
|
||||
|
|
@ -16,12 +18,12 @@ Maintainers for specific parts of the codebase:
|
|||
* `model/histogram` and other code related to native histograms: Björn Rabenstein (<beorn@grafana.com> / @beorn7),
|
||||
George Krajcsovits (<gyorgy.krajcsovits@grafana.com> / @krajorama)
|
||||
* `storage`
|
||||
* `remote`: Callum Styan (<callumstyan@gmail.com> / @cstyan), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Tom Wilkie (tom.wilkie@gmail.com / @tomwilkie), Nicolás Pazos ( <npazosmendez@gmail.com> / @npazosmendez), Alex Greenbank ( <alexgreenbank@yahoo.com> / @alexgreenbank)
|
||||
* `remote`: Callum Styan (<callumstyan@gmail.com> / @cstyan), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Tom Wilkie (tom.wilkie@gmail.com / @tomwilkie), Nicolás Pazos (<npazosmendez@gmail.com> / @npazosmendez), Alex Greenbank (<alexgreenbank@yahoo.com> / @alexgreenbank)
|
||||
* `otlptranslator`: Arthur Silva Sens (<arthursens2005@gmail.com> / @ArthurSens), Arve Knudsen (<arve.knudsen@gmail.com> / @aknuds1), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez)
|
||||
* `tsdb`: Ganesh Vernekar (<ganesh@grafana.com> / @codesome), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez)
|
||||
* `tsdb`: Ganesh Vernekar (<ganesh@grafana.com> / @codesome), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez), George Krajcsovits (<gyorgy.krajcsovits@grafana.com> / @krajorama)
|
||||
* `web`
|
||||
* `ui`: Julius Volz (<julius.volz@gmail.com> / @juliusv)
|
||||
* `module`: Augustin Husson (<husson.augustin@gmail.com> @nexucis)
|
||||
* `module`: Augustin Husson (<husson.augustin@gmail.com> / @nexucis)
|
||||
* `Makefile` and related build configuration: Simon Pasquier (<pasquier.simon@gmail.com> / @simonpasquier), Ben Kochie (<superq@gmail.com> / @SuperQ)
|
||||
|
||||
For the sake of brevity, not all subtrees are explicitly listed. Due to the
|
||||
|
|
|
|||
47
Makefile
47
Makefile
|
|
@ -1,4 +1,4 @@
|
|||
# Copyright 2018 The Prometheus Authors
|
||||
# Copyright The Prometheus Authors
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
|
@ -79,6 +79,20 @@ ui-lint:
|
|||
# new Mantine-based UI is fully integrated and the old app can be removed.
|
||||
cd $(UI_PATH)/react-app && npm run lint
|
||||
|
||||
.PHONY: generate-promql-functions
|
||||
generate-promql-functions: ui-install
|
||||
@echo ">> generating PromQL function signatures"
|
||||
@cd $(UI_PATH)/mantine-ui/src/promql/tools && $(GO) run ./gen_functions_list > ../functionSignatures.ts
|
||||
@echo ">> generating PromQL function documentation"
|
||||
@cd $(UI_PATH)/mantine-ui/src/promql/tools && $(GO) run ./gen_functions_docs $(CURDIR)/docs/querying/functions.md > ../functionDocs.tsx
|
||||
@echo ">> formatting generated files"
|
||||
@cd $(UI_PATH)/mantine-ui && npx prettier --write --print-width 120 src/promql/functionSignatures.ts src/promql/functionDocs.tsx
|
||||
|
||||
.PHONY: check-generated-promql-functions
|
||||
check-generated-promql-functions: generate-promql-functions
|
||||
@echo ">> checking generated PromQL functions"
|
||||
@git diff --exit-code -- $(UI_PATH)/mantine-ui/src/promql/functionSignatures.ts $(UI_PATH)/mantine-ui/src/promql/functionDocs.tsx || (echo "Generated PromQL function files are out of date. Please run 'make generate-promql-functions' and commit the changes." && false)
|
||||
|
||||
.PHONY: assets
|
||||
ifndef SKIP_UI_BUILD
|
||||
assets: check-node-version ui-install ui-build
|
||||
|
|
@ -152,15 +166,8 @@ tarball: npm_licenses common-tarball
|
|||
.PHONY: docker
|
||||
docker: npm_licenses common-docker
|
||||
|
||||
plugins/plugins.go: plugins.yml plugins/generate.go
|
||||
@echo ">> creating plugins list"
|
||||
$(GO) generate -tags plugins ./plugins
|
||||
|
||||
.PHONY: plugins
|
||||
plugins: plugins/plugins.go
|
||||
|
||||
.PHONY: build
|
||||
build: assets npm_licenses assets-compress plugins common-build
|
||||
build: assets npm_licenses assets-compress common-build
|
||||
|
||||
.PHONY: bench_tsdb
|
||||
bench_tsdb: $(PROMU)
|
||||
|
|
@ -184,14 +191,26 @@ check-go-mod-version:
|
|||
@echo ">> checking go.mod version matching"
|
||||
@./scripts/check-go-mod-version.sh
|
||||
|
||||
.PHONY: update-features-testdata
|
||||
update-features-testdata:
|
||||
@echo ">> updating features testdata"
|
||||
@$(GO) test ./cmd/prometheus -run TestFeaturesAPI -update-features
|
||||
|
||||
GO_SUBMODULE_DIRS := documentation/examples/remote_storage internal/tools web/ui/mantine-ui/src/promql/tools
|
||||
|
||||
.PHONY: update-all-go-deps
|
||||
update-all-go-deps:
|
||||
@$(MAKE) update-go-deps
|
||||
@echo ">> updating Go dependencies in ./documentation/examples/remote_storage/"
|
||||
@cd ./documentation/examples/remote_storage/ && for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \
|
||||
update-all-go-deps: update-go-deps
|
||||
$(foreach dir,$(GO_SUBMODULE_DIRS),$(MAKE) update-go-deps-in-dir DIR=$(dir);)
|
||||
@echo ">> syncing Go workspace"
|
||||
@$(GO) work sync
|
||||
|
||||
.PHONY: update-go-deps-in-dir
|
||||
update-go-deps-in-dir:
|
||||
@echo ">> updating Go dependencies in ./$(DIR)/"
|
||||
@cd ./$(DIR) && for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \
|
||||
$(GO) get $$m; \
|
||||
done
|
||||
@cd ./documentation/examples/remote_storage/ && $(GO) mod tidy
|
||||
@cd ./$(DIR) && $(GO) mod tidy
|
||||
|
||||
.PHONY: check-node-version
|
||||
check-node-version:
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
# Copyright 2018 The Prometheus Authors
|
||||
# Copyright The Prometheus Authors
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
|
@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
|
|||
SKIP_GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT_OPTS ?=
|
||||
GOLANGCI_LINT_VERSION ?= v2.6.0
|
||||
GOLANGCI_LINT_VERSION ?= v2.7.2
|
||||
GOLANGCI_FMT_OPTS ?=
|
||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
|
||||
# windows isn't included here because of the path separator being different.
|
||||
|
|
@ -129,6 +129,12 @@ common-check_license:
|
|||
echo "license header checking failed:"; echo "$${licRes}"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo ">> checking for copyright years 2026 or later"
|
||||
@futureYearRes=$$(git grep -E 'Copyright (202[6-9]|20[3-9][0-9])' -- '*.go' ':!:vendor/*' || true); \
|
||||
if [ -n "$${futureYearRes}" ]; then \
|
||||
echo "Files with copyright year 2026 or later found (should use 'Copyright The Prometheus Authors'):"; echo "$${futureYearRes}"; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
.PHONY: common-deps
|
||||
common-deps:
|
||||
|
|
|
|||
43
README.md
43
README.md
|
|
@ -69,7 +69,7 @@ To build Prometheus from source code, You need:
|
|||
|
||||
* Go: Version specified in [go.mod](./go.mod) or greater.
|
||||
* NodeJS: Version specified in [.nvmrc](./web/ui/.nvmrc) or greater.
|
||||
* npm: Version 8 or greater (check with `npm --version` and [here](https://www.npmjs.com/)).
|
||||
* npm: Version 10 or greater (check with `npm --version` and [here](https://www.npmjs.com/)).
|
||||
|
||||
Start by cloning the repository:
|
||||
|
||||
|
|
@ -82,15 +82,15 @@ You can use the `go` tool to build and install the `prometheus`
|
|||
and `promtool` binaries into your `GOPATH`:
|
||||
|
||||
```bash
|
||||
GO111MODULE=on go install github.com/prometheus/prometheus/cmd/...
|
||||
go install github.com/prometheus/prometheus/cmd/...
|
||||
prometheus --config.file=your_config.yml
|
||||
```
|
||||
|
||||
*However*, when using `go install` to build Prometheus, Prometheus will expect to be able to
|
||||
read its web assets from local filesystem directories under `web/ui/static` and
|
||||
`web/ui/templates`. In order for these assets to be found, you will have to run Prometheus
|
||||
from the root of the cloned repository. Note also that these directories do not include the
|
||||
React UI unless it has been built explicitly using `make assets` or `make build`.
|
||||
read its web assets from local filesystem directories under `web/ui/static`. In order for
|
||||
these assets to be found, you will have to run Prometheus from the root of the cloned
|
||||
repository. Note also that this directory does not include the React UI unless it has been
|
||||
built explicitly using `make assets` or `make build`.
|
||||
|
||||
An example of the above configuration file can be found [here.](https://github.com/prometheus/prometheus/blob/main/documentation/examples/prometheus.yml)
|
||||
|
||||
|
|
@ -113,16 +113,31 @@ The Makefile provides several targets:
|
|||
|
||||
### Service discovery plugins
|
||||
|
||||
Prometheus is bundled with many service discovery plugins.
|
||||
When building Prometheus from source, you can edit the [plugins.yml](./plugins.yml)
|
||||
file to disable some service discoveries. The file is a yaml-formatted list of go
|
||||
import path that will be built into the Prometheus binary.
|
||||
Prometheus is bundled with many service discovery plugins. You can customize
|
||||
which service discoveries are included in your build using Go build tags.
|
||||
|
||||
After you have changed the file, you
|
||||
need to run `make build` again.
|
||||
To exclude service discoveries when building with `make build`, add the desired
|
||||
tags to the `.promu.yml` file under `build.tags.all`:
|
||||
|
||||
If you are using another method to compile Prometheus, `make plugins` will
|
||||
generate the plugins file accordingly.
|
||||
```yaml
|
||||
build:
|
||||
tags:
|
||||
all:
|
||||
- netgo
|
||||
- builtinassets
|
||||
- remove_all_sd # Exclude all optional SDs
|
||||
- enable_kubernetes_sd # Re-enable only kubernetes
|
||||
```
|
||||
|
||||
Then run `make build` as usual. Alternatively, when using `go build` directly:
|
||||
|
||||
```bash
|
||||
go build -tags "remove_all_sd,enable_kubernetes_sd" ./cmd/prometheus
|
||||
```
|
||||
|
||||
Available build tags:
|
||||
* `remove_all_sd` - Exclude all optional service discoveries (keeps file_sd, static_sd, and http_sd)
|
||||
* `enable_<name>_sd` - Re-enable a specific SD when using `remove_all_sd`
|
||||
|
||||
If you add out-of-tree plugins, which we do not endorse at the moment,
|
||||
additional steps might be needed to adjust the `go.mod` and `go.sum` files. As
|
||||
|
|
|
|||
|
|
@ -18,7 +18,8 @@ Please see [the v2.55 RELEASE.md](https://github.com/prometheus/prometheus/blob/
|
|||
| v3.6 | 2025-08-01 | Ayoub Mrini (Github: @machine424) |
|
||||
| v3.7 | 2025-09-25 | Arthur Sens and George Krajcsovits (Github: @ArthurSens and @krajorama)|
|
||||
| v3.8 | 2025-11-06 | Jan Fajerski (GitHub: @jan--f) |
|
||||
| v3.9 | 2025-12-18 | **volunteer welcome** |
|
||||
| v3.9 | 2025-12-18 | Bryan Boreham (GitHub: @bboreham) |
|
||||
| v3.10 | 2026-02-05 | **volunteer welcome** |
|
||||
|
||||
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.
|
||||
|
||||
|
|
|
|||
2
VERSION
2
VERSION
|
|
@ -1 +1 @@
|
|||
3.7.3
|
||||
3.9.1
|
||||
|
|
|
|||
125
cmd/prometheus/features_test.go
Normal file
125
cmd/prometheus/features_test.go
Normal file
|
|
@ -0,0 +1,125 @@
|
|||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/util/testutil"
|
||||
)
|
||||
|
||||
var updateFeatures = flag.Bool("update-features", false, "update features.json golden file")
|
||||
|
||||
func TestFeaturesAPI(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
}
|
||||
t.Parallel()
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
configFile := filepath.Join(tmpDir, "prometheus.yml")
|
||||
require.NoError(t, os.WriteFile(configFile, []byte{}, 0o644))
|
||||
|
||||
port := testutil.RandomUnprivilegedPort(t)
|
||||
prom := prometheusCommandWithLogging(
|
||||
t,
|
||||
configFile,
|
||||
port,
|
||||
fmt.Sprintf("--storage.tsdb.path=%s", tmpDir),
|
||||
)
|
||||
require.NoError(t, prom.Start())
|
||||
|
||||
baseURL := fmt.Sprintf("http://127.0.0.1:%d", port)
|
||||
|
||||
// Wait for Prometheus to be ready.
|
||||
require.Eventually(t, func() bool {
|
||||
resp, err := http.Get(baseURL + "/-/ready")
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
return resp.StatusCode == http.StatusOK
|
||||
}, 10*time.Second, 100*time.Millisecond, "Prometheus didn't become ready in time")
|
||||
|
||||
// Fetch features from the API.
|
||||
resp, err := http.Get(baseURL + "/api/v1/features")
|
||||
require.NoError(t, err)
|
||||
defer resp.Body.Close()
|
||||
require.Equal(t, http.StatusOK, resp.StatusCode)
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Parse API response.
|
||||
var apiResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data map[string]map[string]bool `json:"data"`
|
||||
}
|
||||
require.NoError(t, json.Unmarshal(body, &apiResponse))
|
||||
require.Equal(t, "success", apiResponse.Status)
|
||||
|
||||
goldenPath := filepath.Join("testdata", "features.json")
|
||||
|
||||
// If update flag is set, write the current features to the golden file.
|
||||
if *updateFeatures {
|
||||
var buf bytes.Buffer
|
||||
encoder := json.NewEncoder(&buf)
|
||||
encoder.SetEscapeHTML(false)
|
||||
encoder.SetIndent("", " ")
|
||||
require.NoError(t, encoder.Encode(apiResponse.Data))
|
||||
// Ensure testdata directory exists.
|
||||
require.NoError(t, os.MkdirAll(filepath.Dir(goldenPath), 0o755))
|
||||
require.NoError(t, os.WriteFile(goldenPath, buf.Bytes(), 0o644))
|
||||
t.Logf("Updated golden file: %s", goldenPath)
|
||||
return
|
||||
}
|
||||
|
||||
// Load golden file.
|
||||
goldenData, err := os.ReadFile(goldenPath)
|
||||
require.NoError(t, err, "Failed to read golden file %s. Run 'make update-features-testdata' to generate it.", goldenPath)
|
||||
|
||||
var expectedFeatures map[string]map[string]bool
|
||||
require.NoError(t, json.Unmarshal(goldenData, &expectedFeatures))
|
||||
|
||||
// The labels implementation depends on build tags (stringlabels, slicelabels, or dedupelabels).
|
||||
// We need to update the expected features to match the current build.
|
||||
if prometheusFeatures, ok := expectedFeatures["prometheus"]; ok {
|
||||
// Remove all label implementation features from expected.
|
||||
delete(prometheusFeatures, "stringlabels")
|
||||
delete(prometheusFeatures, "slicelabels")
|
||||
delete(prometheusFeatures, "dedupelabels")
|
||||
// Add the current implementation.
|
||||
if actualPrometheus, ok := apiResponse.Data["prometheus"]; ok {
|
||||
for _, impl := range []string{"stringlabels", "slicelabels", "dedupelabels"} {
|
||||
if actualPrometheus[impl] {
|
||||
prometheusFeatures[impl] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Compare the features data with the golden file.
|
||||
require.Equal(t, expectedFeatures, apiResponse.Data, "Features mismatch. Run 'make update-features-testdata' to update the golden file.")
|
||||
}
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
@ -16,6 +16,7 @@ package main
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
|
|
@ -72,11 +73,13 @@ import (
|
|||
"github.com/prometheus/prometheus/scrape"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
"github.com/prometheus/prometheus/storage/remote"
|
||||
"github.com/prometheus/prometheus/template"
|
||||
"github.com/prometheus/prometheus/tracing"
|
||||
"github.com/prometheus/prometheus/tsdb"
|
||||
"github.com/prometheus/prometheus/tsdb/agent"
|
||||
"github.com/prometheus/prometheus/util/compression"
|
||||
"github.com/prometheus/prometheus/util/documentcli"
|
||||
"github.com/prometheus/prometheus/util/features"
|
||||
"github.com/prometheus/prometheus/util/logging"
|
||||
"github.com/prometheus/prometheus/util/notifications"
|
||||
prom_runtime "github.com/prometheus/prometheus/util/runtime"
|
||||
|
|
@ -230,11 +233,14 @@ func (c *flagConfig) setFeatureListOptions(logger *slog.Logger) error {
|
|||
c.tsdb.EnableMemorySnapshotOnShutdown = true
|
||||
logger.Info("Experimental memory snapshot on shutdown enabled")
|
||||
case "extra-scrape-metrics":
|
||||
c.scrape.ExtraMetrics = true
|
||||
logger.Info("Experimental additional scrape metrics enabled")
|
||||
t := true
|
||||
config.DefaultConfig.GlobalConfig.ExtraScrapeMetrics = &t
|
||||
config.DefaultGlobalConfig.ExtraScrapeMetrics = &t
|
||||
logger.Warn("This option for --enable-feature is being phased out. It currently changes the default for the extra_scrape_metrics config setting to true, but will become a no-op in a future version. Stop using this option and set extra_scrape_metrics in the config instead.", "option", o)
|
||||
case "metadata-wal-records":
|
||||
c.scrape.AppendMetadata = true
|
||||
c.web.AppendMetadata = true
|
||||
features.Enable(features.TSDB, "metadata_wal_records")
|
||||
logger.Info("Experimental metadata records in WAL enabled")
|
||||
case "promql-per-step-stats":
|
||||
c.enablePerStepStats = true
|
||||
|
|
@ -261,6 +267,7 @@ func (c *flagConfig) setFeatureListOptions(logger *slog.Logger) error {
|
|||
case "created-timestamp-zero-ingestion":
|
||||
c.scrape.EnableStartTimestampZeroIngestion = true
|
||||
c.web.STZeroIngestionEnabled = true
|
||||
c.agent.EnableSTAsZeroSample = true
|
||||
// Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers.
|
||||
config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
|
||||
config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
|
||||
|
|
@ -341,10 +348,14 @@ func main() {
|
|||
Registerer: prometheus.DefaultRegisterer,
|
||||
},
|
||||
web: web.Options{
|
||||
Registerer: prometheus.DefaultRegisterer,
|
||||
Gatherer: prometheus.DefaultGatherer,
|
||||
Registerer: prometheus.DefaultRegisterer,
|
||||
Gatherer: prometheus.DefaultGatherer,
|
||||
FeatureRegistry: features.DefaultRegistry,
|
||||
},
|
||||
promslogConfig: promslog.Config{},
|
||||
scrape: scrape.Options{
|
||||
FeatureRegistry: features.DefaultRegistry,
|
||||
},
|
||||
}
|
||||
|
||||
a := kingpin.New(filepath.Base(os.Args[0]), "The Prometheus monitoring server").UsageWriter(os.Stdout)
|
||||
|
|
@ -456,8 +467,9 @@ func main() {
|
|||
Default("true").Hidden().BoolVar(&cfg.tsdb.EnableOverlappingCompaction)
|
||||
|
||||
var (
|
||||
tsdbWALCompression bool
|
||||
tsdbWALCompressionType string
|
||||
tsdbWALCompression bool
|
||||
tsdbWALCompressionType string
|
||||
tsdbDelayCompactFilePath string
|
||||
)
|
||||
serverOnlyFlag(a, "storage.tsdb.wal-compression", "Compress the tsdb WAL. If false, the --storage.tsdb.wal-compression-type flag is ignored.").
|
||||
Hidden().Default("true").BoolVar(&tsdbWALCompression)
|
||||
|
|
@ -474,6 +486,12 @@ func main() {
|
|||
serverOnlyFlag(a, "storage.tsdb.delayed-compaction.max-percent", "Sets the upper limit for the random compaction delay, specified as a percentage of the head chunk range. 100 means the compaction can be delayed by up to the entire head chunk range. Only effective when the delayed-compaction feature flag is enabled.").
|
||||
Default("10").Hidden().IntVar(&cfg.tsdb.CompactionDelayMaxPercent)
|
||||
|
||||
serverOnlyFlag(a, "storage.tsdb.delay-compact-file.path", "Path to a JSON file with uploaded TSDB blocks e.g. Thanos shipper meta file. If set TSDB will only compact 1 level blocks that are marked as uploaded in that file, improving external storage integrations e.g. with Thanos sidecar. 1+ level compactions won't be delayed.").
|
||||
Default("").StringVar(&tsdbDelayCompactFilePath)
|
||||
|
||||
serverOnlyFlag(a, "storage.tsdb.block-reload-interval", "Interval at which to check for new or removed blocks in storage. Users who manually backfill or drop blocks must wait up to this duration before changes become available.").
|
||||
Default("1m").Hidden().SetValue(&cfg.tsdb.BlockReloadInterval)
|
||||
|
||||
agentOnlyFlag(a, "storage.agent.path", "Base path for metrics storage.").
|
||||
Default("data-agent/").StringVar(&cfg.agentStoragePath)
|
||||
|
||||
|
|
@ -665,6 +683,10 @@ func main() {
|
|||
}
|
||||
cfg.tsdb.MaxExemplars = cfgFile.StorageConfig.ExemplarsConfig.MaxExemplars
|
||||
}
|
||||
if cfg.tsdb.BlockReloadInterval < model.Duration(1*time.Second) {
|
||||
logger.Warn("The option --storage.tsdb.block-reload-interval is set to a value less than 1s. Setting it to 1s to avoid overload.")
|
||||
cfg.tsdb.BlockReloadInterval = model.Duration(1 * time.Second)
|
||||
}
|
||||
if cfgFile.StorageConfig.TSDBConfig != nil {
|
||||
cfg.tsdb.OutOfOrderTimeWindow = cfgFile.StorageConfig.TSDBConfig.OutOfOrderTimeWindow
|
||||
cfg.tsdb.StaleSeriesCompactionThreshold = cfgFile.StorageConfig.TSDBConfig.StaleSeriesCompactionThreshold
|
||||
|
|
@ -704,6 +726,12 @@ func main() {
|
|||
}
|
||||
}
|
||||
|
||||
if tsdbDelayCompactFilePath != "" {
|
||||
logger.Info("Compactions will be delayed for blocks not marked as uploaded in the file tracking uploads", "path", tsdbDelayCompactFilePath)
|
||||
cfg.tsdb.BlockCompactionExcludeFunc = exludeBlocksPendingUpload(
|
||||
logger, tsdbDelayCompactFilePath)
|
||||
}
|
||||
|
||||
// Now that the validity of the config is established, set the config
|
||||
// success metrics accordingly, although the config isn't really loaded
|
||||
// yet. This will happen later (including setting these metrics again),
|
||||
|
|
@ -787,6 +815,12 @@ func main() {
|
|||
"vm_limits", prom_runtime.VMLimits(),
|
||||
)
|
||||
|
||||
features.Set(features.Prometheus, "agent_mode", agentMode)
|
||||
features.Set(features.Prometheus, "server_mode", !agentMode)
|
||||
features.Set(features.Prometheus, "auto_reload_config", cfg.enableAutoReload)
|
||||
features.Enable(features.Prometheus, labels.ImplementationName)
|
||||
template.RegisterFeatures(features.DefaultRegistry)
|
||||
|
||||
var (
|
||||
localStorage = &readyStorage{stats: tsdb.NewDBStats()}
|
||||
scraper = &readyScrapeManager{}
|
||||
|
|
@ -823,13 +857,13 @@ func main() {
|
|||
os.Exit(1)
|
||||
}
|
||||
|
||||
discoveryManagerScrape = discovery.NewManager(ctxScrape, logger.With("component", "discovery manager scrape"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("scrape"))
|
||||
discoveryManagerScrape = discovery.NewManager(ctxScrape, logger.With("component", "discovery manager scrape"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("scrape"), discovery.FeatureRegistry(features.DefaultRegistry))
|
||||
if discoveryManagerScrape == nil {
|
||||
logger.Error("failed to create a discovery manager scrape")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
discoveryManagerNotify = discovery.NewManager(ctxNotify, logger.With("component", "discovery manager notify"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("notify"))
|
||||
discoveryManagerNotify = discovery.NewManager(ctxNotify, logger.With("component", "discovery manager notify"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("notify"), discovery.FeatureRegistry(features.DefaultRegistry))
|
||||
if discoveryManagerNotify == nil {
|
||||
logger.Error("failed to create a discovery manager notify")
|
||||
os.Exit(1)
|
||||
|
|
@ -870,6 +904,7 @@ func main() {
|
|||
EnablePerStepStats: cfg.enablePerStepStats,
|
||||
EnableDelayedNameRemoval: cfg.promqlEnableDelayedNameRemoval,
|
||||
EnableTypeAndUnitLabels: cfg.scrape.EnableTypeAndUnitLabels,
|
||||
FeatureRegistry: features.DefaultRegistry,
|
||||
}
|
||||
|
||||
queryEngine = promql.NewEngine(opts)
|
||||
|
|
@ -892,6 +927,7 @@ func main() {
|
|||
DefaultRuleQueryOffset: func() time.Duration {
|
||||
return time.Duration(cfgFile.GlobalConfig.RuleQueryOffset)
|
||||
},
|
||||
FeatureRegistry: features.DefaultRegistry,
|
||||
})
|
||||
}
|
||||
|
||||
|
|
@ -1328,6 +1364,7 @@ func main() {
|
|||
"RetentionDuration", cfg.tsdb.RetentionDuration,
|
||||
"WALSegmentSize", cfg.tsdb.WALSegmentSize,
|
||||
"WALCompressionType", cfg.tsdb.WALCompressionType,
|
||||
"BlockReloadInterval", cfg.tsdb.BlockReloadInterval,
|
||||
)
|
||||
|
||||
startTimeMargin := int64(2 * time.Duration(cfg.tsdb.MinBlockDuration).Seconds() * 1000)
|
||||
|
|
@ -1384,6 +1421,7 @@ func main() {
|
|||
"MinWALTime", cfg.agent.MinWALTime,
|
||||
"MaxWALTime", cfg.agent.MaxWALTime,
|
||||
"OutOfOrderTimeWindow", cfg.agent.OutOfOrderTimeWindow,
|
||||
"EnableSTAsZeroSample", cfg.agent.EnableSTAsZeroSample,
|
||||
)
|
||||
|
||||
localStorage.Set(db, 0)
|
||||
|
|
@ -1884,6 +1922,8 @@ type tsdbOptions struct {
|
|||
CompactionDelayMaxPercent int
|
||||
EnableOverlappingCompaction bool
|
||||
UseUncachedIO bool
|
||||
BlockCompactionExcludeFunc tsdb.BlockExcludeFilterFunc
|
||||
BlockReloadInterval model.Duration
|
||||
StaleSeriesCompactionThreshold float64
|
||||
}
|
||||
|
||||
|
|
@ -1908,6 +1948,9 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options {
|
|||
CompactionDelayMaxPercent: opts.CompactionDelayMaxPercent,
|
||||
EnableOverlappingCompaction: opts.EnableOverlappingCompaction,
|
||||
UseUncachedIO: opts.UseUncachedIO,
|
||||
BlockCompactionExcludeFunc: opts.BlockCompactionExcludeFunc,
|
||||
BlockReloadInterval: time.Duration(opts.BlockReloadInterval),
|
||||
FeatureRegistry: features.DefaultRegistry,
|
||||
StaleSeriesCompactionThreshold: opts.StaleSeriesCompactionThreshold,
|
||||
}
|
||||
}
|
||||
|
|
@ -1921,7 +1964,8 @@ type agentOptions struct {
|
|||
TruncateFrequency model.Duration
|
||||
MinWALTime, MaxWALTime model.Duration
|
||||
NoLockfile bool
|
||||
OutOfOrderTimeWindow int64
|
||||
OutOfOrderTimeWindow int64 // TODO(bwplotka): Unused option, fix it or remove.
|
||||
EnableSTAsZeroSample bool
|
||||
}
|
||||
|
||||
func (opts agentOptions) ToAgentOptions(outOfOrderTimeWindow int64) agent.Options {
|
||||
|
|
@ -1937,6 +1981,7 @@ func (opts agentOptions) ToAgentOptions(outOfOrderTimeWindow int64) agent.Option
|
|||
MaxWALTime: durationToInt64Millis(time.Duration(opts.MaxWALTime)),
|
||||
NoLockfile: opts.NoLockfile,
|
||||
OutOfOrderTimeWindow: outOfOrderTimeWindow,
|
||||
EnableSTAsZeroSample: opts.EnableSTAsZeroSample,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1973,3 +2018,48 @@ func (p *rwProtoMsgFlagParser) Set(opt string) error {
|
|||
*p.msgs = append(*p.msgs, t)
|
||||
return nil
|
||||
}
|
||||
|
||||
type UploadMeta struct {
|
||||
Uploaded []string `json:"uploaded"`
|
||||
}
|
||||
|
||||
// Cache the last read UploadMeta.
|
||||
var (
|
||||
tsdbDelayCompactLastMeta *UploadMeta // The content of uploadMetaPath from the last time we've opened it.
|
||||
tsdbDelayCompactLastMetaTime time.Time // The timestamp at which we stored tsdbDelayCompactLastMeta last time.
|
||||
)
|
||||
|
||||
func exludeBlocksPendingUpload(logger *slog.Logger, uploadMetaPath string) tsdb.BlockExcludeFilterFunc {
|
||||
return func(meta *tsdb.BlockMeta) bool {
|
||||
if meta.Compaction.Level > 1 {
|
||||
// Blocks with level > 1 are assumed to be not uploaded, thus no need to delay those.
|
||||
// See `storage.tsdb.delay-compact-file.path` flag for detail.
|
||||
return false
|
||||
}
|
||||
|
||||
// If we have cached uploadMetaPath content that was stored in the last minute the use it.
|
||||
if tsdbDelayCompactLastMeta != nil &&
|
||||
tsdbDelayCompactLastMetaTime.After(time.Now().UTC().Add(time.Minute*-1)) {
|
||||
return !slices.Contains(tsdbDelayCompactLastMeta.Uploaded, meta.ULID.String())
|
||||
}
|
||||
|
||||
// We don't have anything cached or it's older than a minute. Try to open and parse the uploadMetaPath path.
|
||||
data, err := os.ReadFile(uploadMetaPath)
|
||||
if err != nil {
|
||||
logger.Warn("cannot open TSDB upload meta file", slog.String("path", uploadMetaPath), slog.Any("err", err))
|
||||
return false
|
||||
}
|
||||
|
||||
var uploadMeta UploadMeta
|
||||
if err = json.Unmarshal(data, &uploadMeta); err != nil {
|
||||
logger.Warn("cannot parse TSDB upload meta file", slog.String("path", uploadMetaPath), slog.Any("err", err))
|
||||
return false
|
||||
}
|
||||
|
||||
// We have parsed the uploadMetaPath file, cache it.
|
||||
tsdbDelayCompactLastMeta = &uploadMeta
|
||||
tsdbDelayCompactLastMetaTime = time.Now().UTC()
|
||||
|
||||
return !slices.Contains(uploadMeta.Uploaded, meta.ULID.String())
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2017 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
@ -979,6 +979,7 @@ remote_write:
|
|||
// | dataPending | 0 | 1228.8 |
|
||||
// | desiredShards | 0.6 | 369.2 |.
|
||||
func TestRemoteWrite_ReshardingWithoutDeadlock(t *testing.T) {
|
||||
t.Skip("flaky test, see https://github.com/prometheus/prometheus/issues/17489")
|
||||
t.Parallel()
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2020 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2020 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2024 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2024 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
249
cmd/prometheus/testdata/features.json
vendored
Normal file
249
cmd/prometheus/testdata/features.json
vendored
Normal file
|
|
@ -0,0 +1,249 @@
|
|||
{
|
||||
"api": {
|
||||
"admin": false,
|
||||
"exclude_alerts": true,
|
||||
"label_values_match": true,
|
||||
"lifecycle": false,
|
||||
"otlp_write_receiver": false,
|
||||
"query_stats": true,
|
||||
"query_warnings": true,
|
||||
"remote_write_receiver": false,
|
||||
"time_range_labels": true,
|
||||
"time_range_series": true
|
||||
},
|
||||
"otlp_receiver": {
|
||||
"delta_conversion": false,
|
||||
"native_delta_ingestion": false
|
||||
},
|
||||
"prometheus": {
|
||||
"agent_mode": false,
|
||||
"auto_reload_config": false,
|
||||
"server_mode": true,
|
||||
"stringlabels": true
|
||||
},
|
||||
"promql": {
|
||||
"anchored": false,
|
||||
"at_modifier": true,
|
||||
"bool": true,
|
||||
"by": true,
|
||||
"delayed_name_removal": false,
|
||||
"duration_expr": false,
|
||||
"group_left": true,
|
||||
"group_right": true,
|
||||
"ignoring": true,
|
||||
"negative_offset": true,
|
||||
"offset": true,
|
||||
"on": true,
|
||||
"per_query_lookback_delta": true,
|
||||
"per_step_stats": false,
|
||||
"smoothed": false,
|
||||
"subqueries": true,
|
||||
"type_and_unit_labels": false,
|
||||
"without": true
|
||||
},
|
||||
"promql_functions": {
|
||||
"abs": true,
|
||||
"absent": true,
|
||||
"absent_over_time": true,
|
||||
"acos": true,
|
||||
"acosh": true,
|
||||
"asin": true,
|
||||
"asinh": true,
|
||||
"atan": true,
|
||||
"atanh": true,
|
||||
"avg_over_time": true,
|
||||
"ceil": true,
|
||||
"changes": true,
|
||||
"clamp": true,
|
||||
"clamp_max": true,
|
||||
"clamp_min": true,
|
||||
"cos": true,
|
||||
"cosh": true,
|
||||
"count_over_time": true,
|
||||
"day_of_month": true,
|
||||
"day_of_week": true,
|
||||
"day_of_year": true,
|
||||
"days_in_month": true,
|
||||
"deg": true,
|
||||
"delta": true,
|
||||
"deriv": true,
|
||||
"double_exponential_smoothing": false,
|
||||
"exp": true,
|
||||
"first_over_time": false,
|
||||
"floor": true,
|
||||
"histogram_avg": true,
|
||||
"histogram_count": true,
|
||||
"histogram_fraction": true,
|
||||
"histogram_quantile": true,
|
||||
"histogram_stddev": true,
|
||||
"histogram_stdvar": true,
|
||||
"histogram_sum": true,
|
||||
"hour": true,
|
||||
"idelta": true,
|
||||
"increase": true,
|
||||
"info": false,
|
||||
"irate": true,
|
||||
"label_join": true,
|
||||
"label_replace": true,
|
||||
"last_over_time": true,
|
||||
"ln": true,
|
||||
"log10": true,
|
||||
"log2": true,
|
||||
"mad_over_time": false,
|
||||
"max_over_time": true,
|
||||
"min_over_time": true,
|
||||
"minute": true,
|
||||
"month": true,
|
||||
"pi": true,
|
||||
"predict_linear": true,
|
||||
"present_over_time": true,
|
||||
"quantile_over_time": true,
|
||||
"rad": true,
|
||||
"rate": true,
|
||||
"resets": true,
|
||||
"round": true,
|
||||
"scalar": true,
|
||||
"sgn": true,
|
||||
"sin": true,
|
||||
"sinh": true,
|
||||
"sort": true,
|
||||
"sort_by_label": false,
|
||||
"sort_by_label_desc": false,
|
||||
"sort_desc": true,
|
||||
"sqrt": true,
|
||||
"stddev_over_time": true,
|
||||
"stdvar_over_time": true,
|
||||
"sum_over_time": true,
|
||||
"tan": true,
|
||||
"tanh": true,
|
||||
"time": true,
|
||||
"timestamp": true,
|
||||
"ts_of_first_over_time": false,
|
||||
"ts_of_last_over_time": false,
|
||||
"ts_of_max_over_time": false,
|
||||
"ts_of_min_over_time": false,
|
||||
"vector": true,
|
||||
"year": true
|
||||
},
|
||||
"promql_operators": {
|
||||
"!=": true,
|
||||
"!~": true,
|
||||
"%": true,
|
||||
"*": true,
|
||||
"+": true,
|
||||
"-": true,
|
||||
"/": true,
|
||||
"<": true,
|
||||
"<=": true,
|
||||
"==": true,
|
||||
"=~": true,
|
||||
">": true,
|
||||
">=": true,
|
||||
"@": true,
|
||||
"^": true,
|
||||
"and": true,
|
||||
"atan2": true,
|
||||
"avg": true,
|
||||
"bottomk": true,
|
||||
"count": true,
|
||||
"count_values": true,
|
||||
"group": true,
|
||||
"limit_ratio": false,
|
||||
"limitk": false,
|
||||
"max": true,
|
||||
"min": true,
|
||||
"or": true,
|
||||
"quantile": true,
|
||||
"stddev": true,
|
||||
"stdvar": true,
|
||||
"sum": true,
|
||||
"topk": true,
|
||||
"unless": true
|
||||
},
|
||||
"rules": {
|
||||
"concurrent_rule_eval": false,
|
||||
"keep_firing_for": true,
|
||||
"query_offset": true
|
||||
},
|
||||
"scrape": {
|
||||
"extra_scrape_metrics": true,
|
||||
"start_timestamp_zero_ingestion": false,
|
||||
"type_and_unit_labels": false
|
||||
},
|
||||
"service_discovery_providers": {
|
||||
"aws": true,
|
||||
"azure": true,
|
||||
"consul": true,
|
||||
"digitalocean": true,
|
||||
"dns": true,
|
||||
"docker": true,
|
||||
"dockerswarm": true,
|
||||
"ec2": true,
|
||||
"ecs": true,
|
||||
"eureka": true,
|
||||
"file": true,
|
||||
"gce": true,
|
||||
"hetzner": true,
|
||||
"http": true,
|
||||
"ionos": true,
|
||||
"kubernetes": true,
|
||||
"kuma": true,
|
||||
"lightsail": true,
|
||||
"linode": true,
|
||||
"marathon": true,
|
||||
"nerve": true,
|
||||
"nomad": true,
|
||||
"openstack": true,
|
||||
"ovhcloud": true,
|
||||
"puppetdb": true,
|
||||
"scaleway": true,
|
||||
"serverset": true,
|
||||
"stackit": true,
|
||||
"static": true,
|
||||
"triton": true,
|
||||
"uyuni": true,
|
||||
"vultr": true
|
||||
},
|
||||
"templating_functions": {
|
||||
"args": true,
|
||||
"externalURL": true,
|
||||
"first": true,
|
||||
"graphLink": true,
|
||||
"humanize": true,
|
||||
"humanize1024": true,
|
||||
"humanizeDuration": true,
|
||||
"humanizePercentage": true,
|
||||
"humanizeTimestamp": true,
|
||||
"label": true,
|
||||
"match": true,
|
||||
"now": true,
|
||||
"parseDuration": true,
|
||||
"pathPrefix": true,
|
||||
"query": true,
|
||||
"reReplaceAll": true,
|
||||
"safeHtml": true,
|
||||
"sortByLabel": true,
|
||||
"stripDomain": true,
|
||||
"stripPort": true,
|
||||
"strvalue": true,
|
||||
"tableLink": true,
|
||||
"title": true,
|
||||
"toDuration": true,
|
||||
"toLower": true,
|
||||
"toTime": true,
|
||||
"toUpper": true,
|
||||
"urlQueryEscape": true,
|
||||
"value": true
|
||||
},
|
||||
"tsdb": {
|
||||
"delayed_compaction": false,
|
||||
"exemplar_storage": false,
|
||||
"isolation": true,
|
||||
"native_histograms": true,
|
||||
"use_uncached_io": false
|
||||
},
|
||||
"ui": {
|
||||
"ui_v2": false,
|
||||
"ui_v3": true
|
||||
}
|
||||
}
|
||||
144
cmd/prometheus/upload_test.go
Normal file
144
cmd/prometheus/upload_test.go
Normal file
|
|
@ -0,0 +1,144 @@
|
|||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/oklog/ulid/v2"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/tsdb"
|
||||
)
|
||||
|
||||
func TestBlockExcludeFilter(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
summary string // Description of the test case.
|
||||
uploaded []ulid.ULID // List of blocks marked as uploaded inside the shipper file.
|
||||
setupFn func(string) // Optional function to run before the test, takes the path to the shipper file.
|
||||
meta tsdb.BlockMeta // Meta of the block we're checking.
|
||||
isExcluded bool // What do we expect to be returned.
|
||||
}{
|
||||
{
|
||||
summary: "missing file",
|
||||
setupFn: func(path string) {
|
||||
// Delete shipper file to test error handling.
|
||||
os.Remove(path)
|
||||
},
|
||||
meta: tsdb.BlockMeta{ULID: ulid.MustNew(1, nil)},
|
||||
isExcluded: false,
|
||||
},
|
||||
{
|
||||
summary: "corrupt file",
|
||||
setupFn: func(path string) {
|
||||
// Overwrite the shipper file content with invalid JSON.
|
||||
os.WriteFile(path, []byte("{["), 0o644)
|
||||
},
|
||||
meta: tsdb.BlockMeta{ULID: ulid.MustNew(1, nil)},
|
||||
isExcluded: false,
|
||||
},
|
||||
{
|
||||
summary: "empty uploaded list",
|
||||
uploaded: []ulid.ULID{},
|
||||
meta: tsdb.BlockMeta{ULID: ulid.MustNew(1, nil)},
|
||||
isExcluded: true,
|
||||
},
|
||||
{
|
||||
summary: "block meta not present in the uploaded list, level=1",
|
||||
uploaded: []ulid.ULID{ulid.MustNew(1, nil), ulid.MustNew(3, nil)},
|
||||
meta: tsdb.BlockMeta{
|
||||
ULID: ulid.MustNew(2, nil),
|
||||
Compaction: tsdb.BlockMetaCompaction{Level: 1},
|
||||
},
|
||||
isExcluded: true,
|
||||
},
|
||||
{
|
||||
summary: "block meta not present in the uploaded list, level=2",
|
||||
uploaded: []ulid.ULID{ulid.MustNew(1, nil), ulid.MustNew(3, nil)},
|
||||
meta: tsdb.BlockMeta{
|
||||
ULID: ulid.MustNew(2, nil),
|
||||
Compaction: tsdb.BlockMetaCompaction{Level: 2},
|
||||
},
|
||||
isExcluded: false,
|
||||
},
|
||||
{
|
||||
summary: "block meta present in the uploaded list",
|
||||
uploaded: []ulid.ULID{ulid.MustNew(1, nil), ulid.MustNew(2, nil), ulid.MustNew(3, nil)},
|
||||
meta: tsdb.BlockMeta{ULID: ulid.MustNew(2, nil)},
|
||||
isExcluded: false,
|
||||
},
|
||||
{
|
||||
summary: "don't read the file if there's valid cache",
|
||||
setupFn: func(path string) {
|
||||
// Remove the shipper file, cache should be used instead.
|
||||
require.NoError(t, os.Remove(path))
|
||||
// Set cached values
|
||||
tsdbDelayCompactLastMeta = &UploadMeta{
|
||||
Uploaded: []string{
|
||||
ulid.MustNew(1, nil).String(),
|
||||
ulid.MustNew(2, nil).String(),
|
||||
ulid.MustNew(3, nil).String(),
|
||||
},
|
||||
}
|
||||
tsdbDelayCompactLastMetaTime = time.Now().UTC().Add(time.Second * -1)
|
||||
},
|
||||
uploaded: []ulid.ULID{},
|
||||
meta: tsdb.BlockMeta{ULID: ulid.MustNew(2, nil)},
|
||||
isExcluded: false,
|
||||
},
|
||||
{
|
||||
summary: "read the file if there's cache but expired",
|
||||
setupFn: func(_ string) {
|
||||
// Set the cache but make it too old
|
||||
tsdbDelayCompactLastMeta = &UploadMeta{
|
||||
Uploaded: []string{},
|
||||
}
|
||||
tsdbDelayCompactLastMetaTime = time.Now().UTC().Add(time.Second * -61)
|
||||
},
|
||||
uploaded: []ulid.ULID{ulid.MustNew(1, nil), ulid.MustNew(2, nil), ulid.MustNew(3, nil)},
|
||||
meta: tsdb.BlockMeta{ULID: ulid.MustNew(2, nil)},
|
||||
isExcluded: false,
|
||||
},
|
||||
} {
|
||||
t.Run(test.summary, func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
shipperPath := path.Join(dir, "shipper.json")
|
||||
|
||||
uploaded := make([]string, 0, len(test.uploaded))
|
||||
for _, ul := range test.uploaded {
|
||||
uploaded = append(uploaded, ul.String())
|
||||
}
|
||||
ts := UploadMeta{Uploaded: uploaded}
|
||||
data, err := json.Marshal(ts)
|
||||
require.NoError(t, err, "failed to marshall upload meta file")
|
||||
require.NoError(t, os.WriteFile(shipperPath, data, 0o644), "failed to write upload meta file")
|
||||
|
||||
tsdbDelayCompactLastMeta = nil
|
||||
tsdbDelayCompactLastMetaTime = time.Time{}
|
||||
|
||||
if test.setupFn != nil {
|
||||
test.setupFn(shipperPath)
|
||||
}
|
||||
|
||||
fn := exludeBlocksPendingUpload(promslog.NewNopLogger(), shipperPath)
|
||||
isExcluded := fn(&test.meta)
|
||||
require.Equal(t, test.isExcluded, isExcluded)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2023 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2023 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2020 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2020 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
@ -162,7 +162,11 @@ func main() {
|
|||
checkRulesIgnoreUnknownFields := checkRulesCmd.Flag("ignore-unknown-fields", "Ignore unknown fields in the rule files. This is useful when you want to extend rule files with custom metadata. Ensure that those fields are removed before loading them into the Prometheus server as it performs strict checks by default.").Default("false").Bool()
|
||||
|
||||
checkMetricsCmd := checkCmd.Command("metrics", checkMetricsUsage)
|
||||
checkMetricsExtended := checkCmd.Flag("extended", "Print extended information related to the cardinality of the metrics.").Bool()
|
||||
checkMetricsExtended := checkMetricsCmd.Flag("extended", "Print extended information related to the cardinality of the metrics.").Bool()
|
||||
checkMetricsLint := checkMetricsCmd.Flag(
|
||||
"lint",
|
||||
"Linting checks to apply for metrics. Available options are: all, none. Use --lint=none to disable metrics linting.",
|
||||
).Default(lintOptionAll).String()
|
||||
agentMode := checkConfigCmd.Flag("agent", "Check config file for Prometheus in Agent mode.").Bool()
|
||||
|
||||
queryCmd := app.Command("query", "Run query against a Prometheus server.")
|
||||
|
|
@ -257,12 +261,13 @@ func main() {
|
|||
listHumanReadable := tsdbListCmd.Flag("human-readable", "Print human readable values.").Short('r').Bool()
|
||||
listPath := tsdbListCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String()
|
||||
|
||||
tsdbDumpCmd := tsdbCmd.Command("dump", "Dump samples from a TSDB.")
|
||||
tsdbDumpCmd := tsdbCmd.Command("dump", "Dump data (series+samples or optionally just series) from a TSDB.")
|
||||
dumpPath := tsdbDumpCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String()
|
||||
dumpSandboxDirRoot := tsdbDumpCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory will be created, this sandbox is used in case WAL replay generates chunks (default is the database path). The sandbox is cleaned up at the end.").String()
|
||||
dumpMinTime := tsdbDumpCmd.Flag("min-time", "Minimum timestamp to dump, in milliseconds since the Unix epoch.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64()
|
||||
dumpMaxTime := tsdbDumpCmd.Flag("max-time", "Maximum timestamp to dump, in milliseconds since the Unix epoch.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64()
|
||||
dumpMatch := tsdbDumpCmd.Flag("match", "Series selector. Can be specified multiple times.").Default("{__name__=~'(?s:.*)'}").Strings()
|
||||
dumpFormat := tsdbDumpCmd.Flag("format", "Output format of the dump (prom (default) or seriesjson).").Default("prom").Enum("prom", "seriesjson")
|
||||
|
||||
tsdbDumpOpenMetricsCmd := tsdbCmd.Command("dump-openmetrics", "[Experimental] Dump samples from a TSDB into OpenMetrics text format, excluding native histograms and staleness markers, which are not representable in OpenMetrics.")
|
||||
dumpOpenMetricsPath := tsdbDumpOpenMetricsCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String()
|
||||
|
|
@ -374,7 +379,7 @@ func main() {
|
|||
os.Exit(CheckRules(newRulesLintConfig(*checkRulesLint, *checkRulesLintFatal, *checkRulesIgnoreUnknownFields, model.UTF8Validation), *ruleFiles...))
|
||||
|
||||
case checkMetricsCmd.FullCommand():
|
||||
os.Exit(CheckMetrics(*checkMetricsExtended))
|
||||
os.Exit(CheckMetrics(*checkMetricsExtended, *checkMetricsLint))
|
||||
|
||||
case pushMetricsCmd.FullCommand():
|
||||
os.Exit(PushMetrics(remoteWriteURL, httpRoundTripper, *pushMetricsHeaders, *pushMetricsTimeout, *pushMetricsProtoMsg, *pushMetricsLabels, *metricFiles...))
|
||||
|
|
@ -428,9 +433,14 @@ func main() {
|
|||
os.Exit(checkErr(listBlocks(*listPath, *listHumanReadable)))
|
||||
|
||||
case tsdbDumpCmd.FullCommand():
|
||||
os.Exit(checkErr(dumpSamples(ctx, *dumpPath, *dumpSandboxDirRoot, *dumpMinTime, *dumpMaxTime, *dumpMatch, formatSeriesSet)))
|
||||
format := formatSeriesSet
|
||||
if *dumpFormat == "seriesjson" {
|
||||
format = formatSeriesSetLabelsToJSON
|
||||
}
|
||||
os.Exit(checkErr(dumpTSDBData(ctx, *dumpPath, *dumpSandboxDirRoot, *dumpMinTime, *dumpMaxTime, *dumpMatch, format)))
|
||||
|
||||
case tsdbDumpOpenMetricsCmd.FullCommand():
|
||||
os.Exit(checkErr(dumpSamples(ctx, *dumpOpenMetricsPath, *dumpOpenMetricsSandboxDirRoot, *dumpOpenMetricsMinTime, *dumpOpenMetricsMaxTime, *dumpOpenMetricsMatch, formatSeriesSetOpenMetrics)))
|
||||
os.Exit(checkErr(dumpTSDBData(ctx, *dumpOpenMetricsPath, *dumpOpenMetricsSandboxDirRoot, *dumpOpenMetricsMinTime, *dumpOpenMetricsMaxTime, *dumpOpenMetricsMatch, formatSeriesSetOpenMetrics)))
|
||||
// TODO(aSquare14): Work on adding support for custom block size.
|
||||
case openMetricsImportCmd.FullCommand():
|
||||
os.Exit(backfillOpenMetrics(*importFilePath, *importDBPath, *importHumanReadable, *importQuiet, *maxBlockDuration, *openMetricsLabels))
|
||||
|
|
@ -1012,36 +1022,53 @@ func ruleMetric(rule rulefmt.Rule) string {
|
|||
}
|
||||
|
||||
var checkMetricsUsage = strings.TrimSpace(`
|
||||
Pass Prometheus metrics over stdin to lint them for consistency and correctness.
|
||||
Pass Prometheus metrics over stdin to lint them for consistency and correctness, and optionally perform cardinality analysis.
|
||||
|
||||
examples:
|
||||
|
||||
$ cat metrics.prom | promtool check metrics
|
||||
|
||||
$ curl -s http://localhost:9090/metrics | promtool check metrics
|
||||
$ curl -s http://localhost:9090/metrics | promtool check metrics --extended
|
||||
|
||||
$ curl -s http://localhost:9100/metrics | promtool check metrics --extended --lint=none
|
||||
`)
|
||||
|
||||
// CheckMetrics performs a linting pass on input metrics.
|
||||
func CheckMetrics(extended bool) int {
|
||||
var buf bytes.Buffer
|
||||
tee := io.TeeReader(os.Stdin, &buf)
|
||||
l := promlint.New(tee)
|
||||
problems, err := l.Lint()
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "error while linting:", err)
|
||||
func CheckMetrics(extended bool, lint string) int {
|
||||
// Validate that at least one feature is enabled.
|
||||
if !extended && lint == lintOptionNone {
|
||||
fmt.Fprintln(os.Stderr, "error: at least one of --extended or linting must be enabled")
|
||||
fmt.Fprintln(os.Stderr, "Use --extended for cardinality analysis, or remove --lint=none to enable linting")
|
||||
return failureExitCode
|
||||
}
|
||||
|
||||
for _, p := range problems {
|
||||
fmt.Fprintln(os.Stderr, p.Metric, p.Text)
|
||||
var buf bytes.Buffer
|
||||
var (
|
||||
problems []promlint.Problem
|
||||
reader io.Reader
|
||||
err error
|
||||
)
|
||||
|
||||
if lint != lintOptionNone {
|
||||
tee := io.TeeReader(os.Stdin, &buf)
|
||||
l := promlint.New(tee)
|
||||
problems, err = l.Lint()
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "error while linting:", err)
|
||||
return failureExitCode
|
||||
}
|
||||
for _, p := range problems {
|
||||
fmt.Fprintln(os.Stderr, p.Metric, p.Text)
|
||||
}
|
||||
reader = &buf
|
||||
} else {
|
||||
reader = os.Stdin
|
||||
}
|
||||
|
||||
if len(problems) > 0 {
|
||||
return lintErrExitCode
|
||||
}
|
||||
hasLintProblems := len(problems) > 0
|
||||
|
||||
if extended {
|
||||
stats, total, err := checkMetricsExtended(&buf)
|
||||
stats, total, err := checkMetricsExtended(reader)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
return failureExitCode
|
||||
|
|
@ -1055,6 +1082,10 @@ func CheckMetrics(extended bool) int {
|
|||
w.Flush()
|
||||
}
|
||||
|
||||
if hasLintProblems {
|
||||
return lintErrExitCode
|
||||
}
|
||||
|
||||
return successExitCode
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
@ -18,6 +18,7 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
|
|
@ -402,6 +403,99 @@ func TestCheckMetricsExtended(t *testing.T) {
|
|||
}, stats)
|
||||
}
|
||||
|
||||
func TestCheckMetricsLintOptions(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("Skipping on windows")
|
||||
}
|
||||
|
||||
const testMetrics = `
|
||||
# HELP testMetric_CamelCase A test metric with camelCase
|
||||
# TYPE testMetric_CamelCase gauge
|
||||
testMetric_CamelCase{label="value1"} 1
|
||||
`
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
lint string
|
||||
extended bool
|
||||
wantErrCode int
|
||||
wantLint bool
|
||||
wantCard bool
|
||||
}{
|
||||
{
|
||||
name: "default_all_with_extended",
|
||||
lint: lintOptionAll,
|
||||
extended: true,
|
||||
wantErrCode: lintErrExitCode,
|
||||
wantLint: true,
|
||||
wantCard: true,
|
||||
},
|
||||
{
|
||||
name: "lint_none_with_extended",
|
||||
lint: lintOptionNone,
|
||||
extended: true,
|
||||
wantErrCode: successExitCode,
|
||||
wantLint: false,
|
||||
wantCard: true,
|
||||
},
|
||||
{
|
||||
name: "both_disabled_fails",
|
||||
lint: lintOptionNone,
|
||||
extended: false,
|
||||
wantErrCode: failureExitCode,
|
||||
wantLint: false,
|
||||
wantCard: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
r, w, err := os.Pipe()
|
||||
require.NoError(t, err)
|
||||
_, err = w.WriteString(testMetrics)
|
||||
require.NoError(t, err)
|
||||
w.Close()
|
||||
|
||||
oldStdin := os.Stdin
|
||||
os.Stdin = r
|
||||
defer func() { os.Stdin = oldStdin }()
|
||||
|
||||
oldStdout := os.Stdout
|
||||
oldStderr := os.Stderr
|
||||
rOut, wOut, err := os.Pipe()
|
||||
require.NoError(t, err)
|
||||
rErr, wErr, err := os.Pipe()
|
||||
require.NoError(t, err)
|
||||
os.Stdout = wOut
|
||||
os.Stderr = wErr
|
||||
|
||||
code := CheckMetrics(tt.extended, tt.lint)
|
||||
|
||||
wOut.Close()
|
||||
wErr.Close()
|
||||
os.Stdout = oldStdout
|
||||
os.Stderr = oldStderr
|
||||
|
||||
var outBuf, errBuf bytes.Buffer
|
||||
_, _ = io.Copy(&outBuf, rOut)
|
||||
_, _ = io.Copy(&errBuf, rErr)
|
||||
|
||||
require.Equal(t, tt.wantErrCode, code)
|
||||
if tt.wantLint {
|
||||
require.Contains(t, errBuf.String(), "testMetric_CamelCase")
|
||||
} else {
|
||||
require.NotContains(t, errBuf.String(), "testMetric_CamelCase")
|
||||
}
|
||||
|
||||
if tt.wantCard {
|
||||
require.Contains(t, outBuf.String(), "Cardinality")
|
||||
} else {
|
||||
require.NotContains(t, outBuf.String(), "Cardinality")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestExitCodes(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2023 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2025 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2023 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2020 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2020 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2021 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2021 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
3
cmd/promtool/testdata/dump-series-1.prom
vendored
Normal file
3
cmd/promtool/testdata/dump-series-1.prom
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
{"__name__":"heavy_metric","foo":"bar"}
|
||||
{"__name__":"heavy_metric","foo":"foo"}
|
||||
{"__name__":"metric","baz":"abc","foo":"bar"}
|
||||
2
cmd/promtool/testdata/dump-series-2.prom
vendored
Normal file
2
cmd/promtool/testdata/dump-series-2.prom
vendored
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
{"__name__":"heavy_metric","foo":"foo"}
|
||||
{"__name__":"metric","baz":"abc","foo":"bar"}
|
||||
1
cmd/promtool/testdata/dump-series-3.prom
vendored
Normal file
1
cmd/promtool/testdata/dump-series-3.prom
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"__name__":"metric","baz":"abc","foo":"bar"}
|
||||
76
cmd/promtool/testdata/start-time-test.yml
vendored
Normal file
76
cmd/promtool/testdata/start-time-test.yml
vendored
Normal file
|
|
@ -0,0 +1,76 @@
|
|||
rule_files:
|
||||
- rules.yml
|
||||
|
||||
evaluation_interval: 1m
|
||||
|
||||
tests:
|
||||
# Test with default start_time (0 / Unix epoch).
|
||||
- name: default_start_time
|
||||
interval: 1m
|
||||
promql_expr_test:
|
||||
- expr: time()
|
||||
eval_time: 0m
|
||||
exp_samples:
|
||||
- value: 0
|
||||
- expr: time()
|
||||
eval_time: 5m
|
||||
exp_samples:
|
||||
- value: 300
|
||||
|
||||
# Test with RFC3339 start_timestamp.
|
||||
- name: rfc3339_start_timestamp
|
||||
interval: 1m
|
||||
start_timestamp: "2024-01-01T00:00:00Z"
|
||||
promql_expr_test:
|
||||
- expr: time()
|
||||
eval_time: 0m
|
||||
exp_samples:
|
||||
- value: 1704067200
|
||||
- expr: time()
|
||||
eval_time: 5m
|
||||
exp_samples:
|
||||
- value: 1704067500
|
||||
|
||||
# Test with Unix timestamp start_timestamp.
|
||||
- name: unix_timestamp_start_timestamp
|
||||
interval: 1m
|
||||
start_timestamp: 1609459200
|
||||
input_series:
|
||||
- series: test_metric
|
||||
values: "1 1 1"
|
||||
promql_expr_test:
|
||||
- expr: time()
|
||||
eval_time: 0m
|
||||
exp_samples:
|
||||
- value: 1609459200
|
||||
- expr: time()
|
||||
eval_time: 10m
|
||||
exp_samples:
|
||||
- value: 1609459800
|
||||
|
||||
# Test that input series samples are correctly timestamped with custom start_timestamp.
|
||||
- name: samples_with_start_timestamp
|
||||
interval: 1m
|
||||
start_timestamp: "2024-01-01T00:00:00Z"
|
||||
input_series:
|
||||
- series: 'my_metric{label="test"}'
|
||||
values: "10+10x15"
|
||||
promql_expr_test:
|
||||
# Query at absolute timestamp (start_timestamp = 1704067200).
|
||||
- expr: my_metric@1704067200
|
||||
eval_time: 5m
|
||||
exp_samples:
|
||||
- labels: 'my_metric{label="test"}'
|
||||
value: 10
|
||||
# Query at 2 minutes after start_timestamp (1704067200 + 120 = 1704067320).
|
||||
- expr: my_metric@1704067320
|
||||
eval_time: 5m
|
||||
exp_samples:
|
||||
- labels: 'my_metric{label="test"}'
|
||||
value: 30
|
||||
# Verify timestamp() function returns the absolute timestamp.
|
||||
- expr: timestamp(my_metric)
|
||||
eval_time: 5m
|
||||
exp_samples:
|
||||
- labels: '{label="test"}'
|
||||
value: 1704067500
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2017 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
@ -17,6 +17,7 @@ import (
|
|||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
|
@ -706,7 +707,7 @@ func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb.
|
|||
|
||||
type SeriesSetFormatter func(series storage.SeriesSet) error
|
||||
|
||||
func dumpSamples(ctx context.Context, dbDir, sandboxDirRoot string, mint, maxt int64, match []string, formatter SeriesSetFormatter) (err error) {
|
||||
func dumpTSDBData(ctx context.Context, dbDir, sandboxDirRoot string, mint, maxt int64, match []string, formatter SeriesSetFormatter) (err error) {
|
||||
db, err := tsdb.OpenDBReadOnly(dbDir, sandboxDirRoot, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -794,6 +795,30 @@ func CondensedString(ls labels.Labels) string {
|
|||
return b.String()
|
||||
}
|
||||
|
||||
func formatSeriesSetLabelsToJSON(ss storage.SeriesSet) error {
|
||||
seriesCache := make(map[string]struct{})
|
||||
for ss.Next() {
|
||||
series := ss.At()
|
||||
lbs := series.Labels()
|
||||
|
||||
b, err := json.Marshal(lbs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(b) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
s := string(b)
|
||||
if _, ok := seriesCache[s]; !ok {
|
||||
fmt.Println(s)
|
||||
seriesCache[s] = struct{}{}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func formatSeriesSetOpenMetrics(ss storage.SeriesSet) error {
|
||||
for ss.Next() {
|
||||
series := ss.At()
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2017 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2017 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
@ -63,7 +63,7 @@ func getDumpedSamples(t *testing.T, databasePath, sandboxDirRoot string, mint, m
|
|||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
err := dumpSamples(
|
||||
err := dumpTSDBData(
|
||||
context.Background(),
|
||||
databasePath,
|
||||
sandboxDirRoot,
|
||||
|
|
@ -106,13 +106,15 @@ func TestTSDBDump(t *testing.T) {
|
|||
sandboxDirRoot string
|
||||
match []string
|
||||
expectedDump string
|
||||
expectedSeries string
|
||||
}{
|
||||
{
|
||||
name: "default match",
|
||||
mint: math.MinInt64,
|
||||
maxt: math.MaxInt64,
|
||||
match: []string{"{__name__=~'(?s:.*)'}"},
|
||||
expectedDump: "testdata/dump-test-1.prom",
|
||||
name: "default match",
|
||||
mint: math.MinInt64,
|
||||
maxt: math.MaxInt64,
|
||||
match: []string{"{__name__=~'(?s:.*)'}"},
|
||||
expectedDump: "testdata/dump-test-1.prom",
|
||||
expectedSeries: "testdata/dump-series-1.prom",
|
||||
},
|
||||
{
|
||||
name: "default match with sandbox dir root set",
|
||||
|
|
@ -121,41 +123,47 @@ func TestTSDBDump(t *testing.T) {
|
|||
sandboxDirRoot: t.TempDir(),
|
||||
match: []string{"{__name__=~'(?s:.*)'}"},
|
||||
expectedDump: "testdata/dump-test-1.prom",
|
||||
expectedSeries: "testdata/dump-series-1.prom",
|
||||
},
|
||||
{
|
||||
name: "same matcher twice",
|
||||
mint: math.MinInt64,
|
||||
maxt: math.MaxInt64,
|
||||
match: []string{"{foo=~'.+'}", "{foo=~'.+'}"},
|
||||
expectedDump: "testdata/dump-test-1.prom",
|
||||
name: "same matcher twice",
|
||||
mint: math.MinInt64,
|
||||
maxt: math.MaxInt64,
|
||||
match: []string{"{foo=~'.+'}", "{foo=~'.+'}"},
|
||||
expectedDump: "testdata/dump-test-1.prom",
|
||||
expectedSeries: "testdata/dump-series-1.prom",
|
||||
},
|
||||
{
|
||||
name: "no duplication",
|
||||
mint: math.MinInt64,
|
||||
maxt: math.MaxInt64,
|
||||
match: []string{"{__name__=~'(?s:.*)'}", "{baz='abc'}"},
|
||||
expectedDump: "testdata/dump-test-1.prom",
|
||||
name: "no duplication",
|
||||
mint: math.MinInt64,
|
||||
maxt: math.MaxInt64,
|
||||
match: []string{"{__name__=~'(?s:.*)'}", "{baz='abc'}"},
|
||||
expectedDump: "testdata/dump-test-1.prom",
|
||||
expectedSeries: "testdata/dump-series-1.prom",
|
||||
},
|
||||
{
|
||||
name: "well merged",
|
||||
mint: math.MinInt64,
|
||||
maxt: math.MaxInt64,
|
||||
match: []string{"{__name__='heavy_metric'}", "{baz='abc'}"},
|
||||
expectedDump: "testdata/dump-test-1.prom",
|
||||
name: "well merged",
|
||||
mint: math.MinInt64,
|
||||
maxt: math.MaxInt64,
|
||||
match: []string{"{__name__='heavy_metric'}", "{baz='abc'}"},
|
||||
expectedDump: "testdata/dump-test-1.prom",
|
||||
expectedSeries: "testdata/dump-series-1.prom",
|
||||
},
|
||||
{
|
||||
name: "multi matchers",
|
||||
mint: math.MinInt64,
|
||||
maxt: math.MaxInt64,
|
||||
match: []string{"{__name__='heavy_metric',foo='foo'}", "{__name__='metric'}"},
|
||||
expectedDump: "testdata/dump-test-2.prom",
|
||||
name: "multi matchers",
|
||||
mint: math.MinInt64,
|
||||
maxt: math.MaxInt64,
|
||||
match: []string{"{__name__='heavy_metric',foo='foo'}", "{__name__='metric'}"},
|
||||
expectedDump: "testdata/dump-test-2.prom",
|
||||
expectedSeries: "testdata/dump-series-2.prom",
|
||||
},
|
||||
{
|
||||
name: "with reduced mint and maxt",
|
||||
mint: int64(60000),
|
||||
maxt: int64(120000),
|
||||
match: []string{"{__name__='metric'}"},
|
||||
expectedDump: "testdata/dump-test-3.prom",
|
||||
name: "with reduced mint and maxt",
|
||||
mint: int64(60000),
|
||||
maxt: int64(120000),
|
||||
match: []string{"{__name__='metric'}"},
|
||||
expectedDump: "testdata/dump-test-3.prom",
|
||||
expectedSeries: "testdata/dump-series-3.prom",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
|
|
@ -166,6 +174,12 @@ func TestTSDBDump(t *testing.T) {
|
|||
expectedMetrics = normalizeNewLine(expectedMetrics)
|
||||
// Sort both, because Prometheus does not guarantee the output order.
|
||||
require.Equal(t, sortLines(string(expectedMetrics)), sortLines(dumpedMetrics))
|
||||
|
||||
dumpedSeries := getDumpedSamples(t, storage.Dir(), tt.sandboxDirRoot, tt.mint, tt.maxt, tt.match, formatSeriesSetLabelsToJSON)
|
||||
expectedSeries, err := os.ReadFile(tt.expectedSeries)
|
||||
require.NoError(t, err)
|
||||
expectedSeries = normalizeNewLine(expectedSeries)
|
||||
require.Equal(t, sortLines(string(expectedSeries)), sortLines(dumpedSeries))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
@ -188,15 +188,37 @@ func resolveAndGlobFilepaths(baseDir string, utf *unitTestFile) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// testStartTimestamp wraps time.Time to support custom YAML unmarshaling.
|
||||
// It can parse both RFC3339 timestamps and Unix timestamps.
|
||||
type testStartTimestamp struct {
|
||||
time.Time
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements custom YAML unmarshaling for testStartTimestamp.
|
||||
// It accepts both RFC3339 formatted strings and numeric Unix timestamps.
|
||||
func (t *testStartTimestamp) UnmarshalYAML(unmarshal func(any) error) error {
|
||||
var s string
|
||||
if err := unmarshal(&s); err != nil {
|
||||
return err
|
||||
}
|
||||
parsed, err := parseTime(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.Time = parsed
|
||||
return nil
|
||||
}
|
||||
|
||||
// testGroup is a group of input series and tests associated with it.
|
||||
type testGroup struct {
|
||||
Interval model.Duration `yaml:"interval"`
|
||||
InputSeries []series `yaml:"input_series"`
|
||||
AlertRuleTests []alertTestCase `yaml:"alert_rule_test,omitempty"`
|
||||
PromqlExprTests []promqlTestCase `yaml:"promql_expr_test,omitempty"`
|
||||
ExternalLabels labels.Labels `yaml:"external_labels,omitempty"`
|
||||
ExternalURL string `yaml:"external_url,omitempty"`
|
||||
TestGroupName string `yaml:"name,omitempty"`
|
||||
Interval model.Duration `yaml:"interval"`
|
||||
InputSeries []series `yaml:"input_series"`
|
||||
AlertRuleTests []alertTestCase `yaml:"alert_rule_test,omitempty"`
|
||||
PromqlExprTests []promqlTestCase `yaml:"promql_expr_test,omitempty"`
|
||||
ExternalLabels labels.Labels `yaml:"external_labels,omitempty"`
|
||||
ExternalURL string `yaml:"external_url,omitempty"`
|
||||
TestGroupName string `yaml:"name,omitempty"`
|
||||
StartTimestamp testStartTimestamp `yaml:"start_timestamp,omitempty"`
|
||||
}
|
||||
|
||||
// test performs the unit tests.
|
||||
|
|
@ -209,6 +231,8 @@ func (tg *testGroup) test(testname string, evalInterval time.Duration, groupOrde
|
|||
}()
|
||||
}
|
||||
// Setup testing suite.
|
||||
// Set the start time from the test group.
|
||||
queryOpts.StartTime = tg.StartTimestamp.Time
|
||||
suite, err := promqltest.NewLazyLoader(tg.seriesLoadingString(), queryOpts)
|
||||
if err != nil {
|
||||
return []error{err}
|
||||
|
|
@ -237,7 +261,12 @@ func (tg *testGroup) test(testname string, evalInterval time.Duration, groupOrde
|
|||
groups := orderedGroups(groupsMap, groupOrderMap)
|
||||
|
||||
// Bounds for evaluating the rules.
|
||||
mint := time.Unix(0, 0).UTC()
|
||||
var mint time.Time
|
||||
if tg.StartTimestamp.IsZero() {
|
||||
mint = time.Unix(0, 0).UTC()
|
||||
} else {
|
||||
mint = tg.StartTimestamp.Time
|
||||
}
|
||||
maxt := mint.Add(tg.maxEvalTime())
|
||||
|
||||
// Optional floating point compare fuzzing.
|
||||
|
|
@ -631,13 +660,14 @@ func (la labelsAndAnnotations) String() string {
|
|||
if len(la) == 0 {
|
||||
return "[]"
|
||||
}
|
||||
s := "[\n0:" + indentLines("\n"+la[0].String(), " ")
|
||||
var s strings.Builder
|
||||
s.WriteString("[\n0:" + indentLines("\n"+la[0].String(), " "))
|
||||
for i, l := range la[1:] {
|
||||
s += ",\n" + strconv.Itoa(i+1) + ":" + indentLines("\n"+l.String(), " ")
|
||||
s.WriteString(",\n" + strconv.Itoa(i+1) + ":" + indentLines("\n"+l.String(), " "))
|
||||
}
|
||||
s += "\n]"
|
||||
s.WriteString("\n]")
|
||||
|
||||
return s
|
||||
return s.String()
|
||||
}
|
||||
|
||||
type labelAndAnnotation struct {
|
||||
|
|
@ -688,11 +718,12 @@ func parsedSamplesString(pss []parsedSample) string {
|
|||
if len(pss) == 0 {
|
||||
return "nil"
|
||||
}
|
||||
s := pss[0].String()
|
||||
var s strings.Builder
|
||||
s.WriteString(pss[0].String())
|
||||
for _, ps := range pss[1:] {
|
||||
s += ", " + ps.String()
|
||||
s.WriteString(", " + ps.String())
|
||||
}
|
||||
return s
|
||||
return s.String()
|
||||
}
|
||||
|
||||
func (ps *parsedSample) String() string {
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
@ -129,6 +129,16 @@ func TestRulesUnitTest(t *testing.T) {
|
|||
},
|
||||
want: 0,
|
||||
},
|
||||
{
|
||||
name: "Start time tests",
|
||||
args: args{
|
||||
files: []string{"./testdata/start-time-test.yml"},
|
||||
},
|
||||
queryOpts: promqltest.LazyLoaderOpts{
|
||||
EnableAtModifier: true,
|
||||
},
|
||||
want: 0,
|
||||
},
|
||||
}
|
||||
reuseFiles := []string{}
|
||||
reuseCount := [2]int{}
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
@ -149,6 +149,10 @@ func LoadFile(filename string, agentMode bool, logger *slog.Logger) (*Config, er
|
|||
return cfg, nil
|
||||
}
|
||||
|
||||
func boolPtr(b bool) *bool {
|
||||
return &b
|
||||
}
|
||||
|
||||
// The defaults applied before parsing the respective config sections.
|
||||
var (
|
||||
// DefaultConfig is the default top-level configuration.
|
||||
|
|
@ -158,7 +162,6 @@ var (
|
|||
OTLPConfig: DefaultOTLPConfig,
|
||||
}
|
||||
|
||||
f bool
|
||||
// DefaultGlobalConfig is the default global configuration.
|
||||
DefaultGlobalConfig = GlobalConfig{
|
||||
ScrapeInterval: model.Duration(1 * time.Minute),
|
||||
|
|
@ -173,9 +176,10 @@ var (
|
|||
ScrapeProtocols: nil,
|
||||
// When the native histogram feature flag is enabled,
|
||||
// ScrapeNativeHistograms default changes to true.
|
||||
ScrapeNativeHistograms: &f,
|
||||
ScrapeNativeHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: false,
|
||||
AlwaysScrapeClassicHistograms: false,
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
MetricNameValidationScheme: model.UTF8Validation,
|
||||
MetricNameEscapingScheme: model.AllowUTF8,
|
||||
}
|
||||
|
|
@ -513,6 +517,10 @@ type GlobalConfig struct {
|
|||
ConvertClassicHistogramsToNHCB bool `yaml:"convert_classic_histograms_to_nhcb,omitempty"`
|
||||
// Whether to scrape a classic histogram, even if it is also exposed as a native histogram.
|
||||
AlwaysScrapeClassicHistograms bool `yaml:"always_scrape_classic_histograms,omitempty"`
|
||||
// Whether to enable additional scrape metrics.
|
||||
// When enabled, Prometheus stores samples for scrape_timeout_seconds,
|
||||
// scrape_sample_limit, and scrape_body_size_bytes.
|
||||
ExtraScrapeMetrics *bool `yaml:"extra_scrape_metrics,omitempty"`
|
||||
}
|
||||
|
||||
// ScrapeProtocol represents supported protocol for scraping metrics.
|
||||
|
|
@ -652,6 +660,9 @@ func (c *GlobalConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
|||
if gc.ScrapeNativeHistograms == nil {
|
||||
gc.ScrapeNativeHistograms = DefaultGlobalConfig.ScrapeNativeHistograms
|
||||
}
|
||||
if gc.ExtraScrapeMetrics == nil {
|
||||
gc.ExtraScrapeMetrics = DefaultGlobalConfig.ExtraScrapeMetrics
|
||||
}
|
||||
if gc.ScrapeProtocols == nil {
|
||||
if DefaultGlobalConfig.ScrapeProtocols != nil {
|
||||
// This is the case where the defaults are set due to a feature flag.
|
||||
|
|
@ -687,7 +698,17 @@ func (c *GlobalConfig) isZero() bool {
|
|||
c.ScrapeProtocols == nil &&
|
||||
c.ScrapeNativeHistograms == nil &&
|
||||
!c.ConvertClassicHistogramsToNHCB &&
|
||||
!c.AlwaysScrapeClassicHistograms
|
||||
!c.AlwaysScrapeClassicHistograms &&
|
||||
c.BodySizeLimit == 0 &&
|
||||
c.SampleLimit == 0 &&
|
||||
c.TargetLimit == 0 &&
|
||||
c.LabelLimit == 0 &&
|
||||
c.LabelNameLengthLimit == 0 &&
|
||||
c.LabelValueLengthLimit == 0 &&
|
||||
c.KeepDroppedTargets == 0 &&
|
||||
c.MetricNameValidationScheme == model.UnsetValidation &&
|
||||
c.MetricNameEscapingScheme == "" &&
|
||||
c.ExtraScrapeMetrics == nil
|
||||
}
|
||||
|
||||
const DefaultGoGCPercentage = 75
|
||||
|
|
@ -796,6 +817,11 @@ type ScrapeConfig struct {
|
|||
// blank in config files but must have a value if a ScrapeConfig is created
|
||||
// programmatically.
|
||||
MetricNameEscapingScheme string `yaml:"metric_name_escaping_scheme,omitempty"`
|
||||
// Whether to enable additional scrape metrics.
|
||||
// When enabled, Prometheus stores samples for scrape_timeout_seconds,
|
||||
// scrape_sample_limit, and scrape_body_size_bytes.
|
||||
// If not set (nil), inherits the value from the global configuration.
|
||||
ExtraScrapeMetrics *bool `yaml:"extra_scrape_metrics,omitempty"`
|
||||
|
||||
// We cannot do proper Go type embedding below as the parser will then parse
|
||||
// values arbitrarily into the overflow maps of further-down types.
|
||||
|
|
@ -897,6 +923,9 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error {
|
|||
if c.ScrapeNativeHistograms == nil {
|
||||
c.ScrapeNativeHistograms = globalConfig.ScrapeNativeHistograms
|
||||
}
|
||||
if c.ExtraScrapeMetrics == nil {
|
||||
c.ExtraScrapeMetrics = globalConfig.ExtraScrapeMetrics
|
||||
}
|
||||
|
||||
if c.ScrapeProtocols == nil {
|
||||
switch {
|
||||
|
|
@ -1022,7 +1051,7 @@ func ToEscapingScheme(s string, v model.ValidationScheme) (model.EscapingScheme,
|
|||
case model.LegacyValidation:
|
||||
return model.UnderscoreEscaping, nil
|
||||
case model.UnsetValidation:
|
||||
return model.NoEscaping, fmt.Errorf("v is unset: %s", v)
|
||||
return model.NoEscaping, fmt.Errorf("ValidationScheme is unset: %s", v)
|
||||
default:
|
||||
panic(fmt.Errorf("unhandled validation scheme: %s", v))
|
||||
}
|
||||
|
|
@ -1045,6 +1074,11 @@ func (c *ScrapeConfig) AlwaysScrapeClassicHistogramsEnabled() bool {
|
|||
return c.AlwaysScrapeClassicHistograms != nil && *c.AlwaysScrapeClassicHistograms
|
||||
}
|
||||
|
||||
// ExtraScrapeMetricsEnabled returns whether to enable extra scrape metrics.
|
||||
func (c *ScrapeConfig) ExtraScrapeMetricsEnabled() bool {
|
||||
return c.ExtraScrapeMetrics != nil && *c.ExtraScrapeMetrics
|
||||
}
|
||||
|
||||
// StorageConfig configures runtime reloadable configuration options.
|
||||
type StorageConfig struct {
|
||||
TSDBConfig *TSDBConfig `yaml:"tsdb,omitempty"`
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2017 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
@ -74,10 +74,6 @@ func mustParseURL(u string) *config.URL {
|
|||
return &config.URL{URL: parsed}
|
||||
}
|
||||
|
||||
func boolPtr(b bool) *bool {
|
||||
return &b
|
||||
}
|
||||
|
||||
const (
|
||||
globBodySizeLimit = 15 * units.MiB
|
||||
globSampleLimit = 1500
|
||||
|
|
@ -109,6 +105,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: false,
|
||||
ConvertClassicHistogramsToNHCB: false,
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
MetricNameValidationScheme: model.UTF8Validation,
|
||||
},
|
||||
|
||||
|
|
@ -236,6 +233,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -360,6 +358,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
HTTPClientConfig: config.HTTPClientConfig{
|
||||
BasicAuth: &config.BasicAuth{
|
||||
|
|
@ -470,6 +469,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -532,6 +532,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: "/metrics",
|
||||
Scheme: "http",
|
||||
|
|
@ -571,6 +572,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -616,6 +618,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -661,6 +664,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -696,6 +700,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -739,6 +744,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -779,6 +785,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -826,6 +833,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -863,6 +871,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -903,6 +912,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -936,6 +946,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -972,6 +983,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: "/federate",
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -1008,6 +1020,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -1044,6 +1057,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -1077,6 +1091,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -1118,6 +1133,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -1158,6 +1174,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -1195,6 +1212,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -1231,6 +1249,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -1271,6 +1290,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -1314,6 +1334,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(true),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -1377,6 +1398,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -1410,6 +1432,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
|
|
@ -1454,6 +1477,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
|
|
@ -1504,6 +1528,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -1544,6 +1569,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -1585,6 +1611,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
|
|
@ -1621,6 +1648,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -1659,6 +1687,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -2663,12 +2692,87 @@ func TestAgentMode(t *testing.T) {
|
|||
)
|
||||
}
|
||||
|
||||
func TestEmptyGlobalBlock(t *testing.T) {
|
||||
c, err := Load("global:\n", promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
exp := DefaultConfig
|
||||
exp.loaded = true
|
||||
require.Equal(t, exp, *c)
|
||||
func TestGlobalConfig(t *testing.T) {
|
||||
t.Run("empty block restores defaults", func(t *testing.T) {
|
||||
c, err := Load("global:\n", promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
exp := DefaultConfig
|
||||
exp.loaded = true
|
||||
require.Equal(t, exp, *c)
|
||||
})
|
||||
|
||||
// Verify that isZero() correctly identifies non-zero configurations for all
|
||||
// fields in GlobalConfig. This is important because isZero() is used during
|
||||
// YAML unmarshaling to detect empty global blocks that should be replaced
|
||||
// with defaults.
|
||||
t.Run("isZero", func(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
config GlobalConfig
|
||||
expectZero bool
|
||||
}{
|
||||
{
|
||||
name: "empty GlobalConfig",
|
||||
config: GlobalConfig{},
|
||||
expectZero: true,
|
||||
},
|
||||
{
|
||||
name: "ScrapeInterval set",
|
||||
config: GlobalConfig{ScrapeInterval: model.Duration(30 * time.Second)},
|
||||
expectZero: false,
|
||||
},
|
||||
{
|
||||
name: "BodySizeLimit set",
|
||||
config: GlobalConfig{BodySizeLimit: 1 * units.MiB},
|
||||
expectZero: false,
|
||||
},
|
||||
{
|
||||
name: "SampleLimit set",
|
||||
config: GlobalConfig{SampleLimit: 1000},
|
||||
expectZero: false,
|
||||
},
|
||||
{
|
||||
name: "TargetLimit set",
|
||||
config: GlobalConfig{TargetLimit: 500},
|
||||
expectZero: false,
|
||||
},
|
||||
{
|
||||
name: "LabelLimit set",
|
||||
config: GlobalConfig{LabelLimit: 100},
|
||||
expectZero: false,
|
||||
},
|
||||
{
|
||||
name: "LabelNameLengthLimit set",
|
||||
config: GlobalConfig{LabelNameLengthLimit: 50},
|
||||
expectZero: false,
|
||||
},
|
||||
{
|
||||
name: "LabelValueLengthLimit set",
|
||||
config: GlobalConfig{LabelValueLengthLimit: 200},
|
||||
expectZero: false,
|
||||
},
|
||||
{
|
||||
name: "KeepDroppedTargets set",
|
||||
config: GlobalConfig{KeepDroppedTargets: 10},
|
||||
expectZero: false,
|
||||
},
|
||||
{
|
||||
name: "MetricNameValidationScheme set",
|
||||
config: GlobalConfig{MetricNameValidationScheme: model.LegacyValidation},
|
||||
expectZero: false,
|
||||
},
|
||||
{
|
||||
name: "MetricNameEscapingScheme set",
|
||||
config: GlobalConfig{MetricNameEscapingScheme: model.EscapeUnderscores},
|
||||
expectZero: false,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result := tc.config.isZero()
|
||||
require.Equal(t, tc.expectZero, result)
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// ScrapeConfigOptions contains options for creating a scrape config.
|
||||
|
|
@ -2680,6 +2784,7 @@ type ScrapeConfigOptions struct {
|
|||
ScrapeNativeHistograms bool
|
||||
AlwaysScrapeClassicHistograms bool
|
||||
ConvertClassicHistToNHCB bool
|
||||
ExtraScrapeMetrics bool
|
||||
}
|
||||
|
||||
func TestGetScrapeConfigs(t *testing.T) {
|
||||
|
|
@ -2713,6 +2818,7 @@ func TestGetScrapeConfigs(t *testing.T) {
|
|||
ScrapeNativeHistograms: boolPtr(opts.ScrapeNativeHistograms),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(opts.AlwaysScrapeClassicHistograms),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(opts.ConvertClassicHistToNHCB),
|
||||
ExtraScrapeMetrics: boolPtr(opts.ExtraScrapeMetrics),
|
||||
}
|
||||
if opts.ScrapeProtocols == nil {
|
||||
sc.ScrapeProtocols = DefaultScrapeProtocols
|
||||
|
|
@ -2796,6 +2902,7 @@ func TestGetScrapeConfigs(t *testing.T) {
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -2834,6 +2941,7 @@ func TestGetScrapeConfigs(t *testing.T) {
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
HTTPClientConfig: config.HTTPClientConfig{
|
||||
TLSConfig: config.TLSConfig{
|
||||
|
|
@ -2946,6 +3054,26 @@ func TestGetScrapeConfigs(t *testing.T) {
|
|||
configFile: "testdata/global_scrape_protocols_and_local_disable_scrape_native_hist.good.yml",
|
||||
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), ScrapeNativeHistograms: false, ScrapeProtocols: []ScrapeProtocol{PrometheusText0_0_4}})},
|
||||
},
|
||||
{
|
||||
name: "A global config that enables extra scrape metrics",
|
||||
configFile: "testdata/global_enable_extra_scrape_metrics.good.yml",
|
||||
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), ExtraScrapeMetrics: true})},
|
||||
},
|
||||
{
|
||||
name: "A global config that disables extra scrape metrics",
|
||||
configFile: "testdata/global_disable_extra_scrape_metrics.good.yml",
|
||||
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), ExtraScrapeMetrics: false})},
|
||||
},
|
||||
{
|
||||
name: "A global config that disables extra scrape metrics and scrape config that enables it",
|
||||
configFile: "testdata/local_enable_extra_scrape_metrics.good.yml",
|
||||
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), ExtraScrapeMetrics: true})},
|
||||
},
|
||||
{
|
||||
name: "A global config that enables extra scrape metrics and scrape config that disables it",
|
||||
configFile: "testdata/local_disable_extra_scrape_metrics.good.yml",
|
||||
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), ExtraScrapeMetrics: false})},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
|
|
@ -2962,6 +3090,99 @@ func TestGetScrapeConfigs(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestExtraScrapeMetrics(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
config string
|
||||
expectGlobal *bool
|
||||
expectEnabled bool
|
||||
}{
|
||||
{
|
||||
name: "default values (not set)",
|
||||
config: `
|
||||
scrape_configs:
|
||||
- job_name: test
|
||||
static_configs:
|
||||
- targets: ['localhost:9090']
|
||||
`,
|
||||
expectGlobal: boolPtr(false), // inherits from DefaultGlobalConfig
|
||||
expectEnabled: false,
|
||||
},
|
||||
{
|
||||
name: "global enabled",
|
||||
config: `
|
||||
global:
|
||||
extra_scrape_metrics: true
|
||||
scrape_configs:
|
||||
- job_name: test
|
||||
static_configs:
|
||||
- targets: ['localhost:9090']
|
||||
`,
|
||||
expectGlobal: boolPtr(true),
|
||||
expectEnabled: true,
|
||||
},
|
||||
{
|
||||
name: "global disabled",
|
||||
config: `
|
||||
global:
|
||||
extra_scrape_metrics: false
|
||||
scrape_configs:
|
||||
- job_name: test
|
||||
static_configs:
|
||||
- targets: ['localhost:9090']
|
||||
`,
|
||||
expectGlobal: boolPtr(false),
|
||||
expectEnabled: false,
|
||||
},
|
||||
{
|
||||
name: "scrape override enabled",
|
||||
config: `
|
||||
global:
|
||||
extra_scrape_metrics: false
|
||||
scrape_configs:
|
||||
- job_name: test
|
||||
extra_scrape_metrics: true
|
||||
static_configs:
|
||||
- targets: ['localhost:9090']
|
||||
`,
|
||||
expectGlobal: boolPtr(false),
|
||||
expectEnabled: true,
|
||||
},
|
||||
{
|
||||
name: "scrape override disabled",
|
||||
config: `
|
||||
global:
|
||||
extra_scrape_metrics: true
|
||||
scrape_configs:
|
||||
- job_name: test
|
||||
extra_scrape_metrics: false
|
||||
static_configs:
|
||||
- targets: ['localhost:9090']
|
||||
`,
|
||||
expectGlobal: boolPtr(true),
|
||||
expectEnabled: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cfg, err := Load(tc.config, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check global config
|
||||
require.Equal(t, tc.expectGlobal, cfg.GlobalConfig.ExtraScrapeMetrics)
|
||||
|
||||
// Check scrape config
|
||||
scfgs, err := cfg.GetScrapeConfigs()
|
||||
require.NoError(t, err)
|
||||
require.Len(t, scfgs, 1)
|
||||
|
||||
// Check the effective value via the helper method
|
||||
require.Equal(t, tc.expectEnabled, scfgs[0].ExtraScrapeMetricsEnabled())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func kubernetesSDHostURL() config.URL {
|
||||
tURL, _ := url.Parse("https://localhost:1234")
|
||||
return config.URL{URL: tURL}
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2017 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2024 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2024 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
6
config/testdata/global_disable_extra_scrape_metrics.good.yml
vendored
Normal file
6
config/testdata/global_disable_extra_scrape_metrics.good.yml
vendored
Normal file
|
|
@ -0,0 +1,6 @@
|
|||
global:
|
||||
extra_scrape_metrics: false
|
||||
scrape_configs:
|
||||
- job_name: prometheus
|
||||
static_configs:
|
||||
- targets: ['localhost:8080']
|
||||
6
config/testdata/global_enable_extra_scrape_metrics.good.yml
vendored
Normal file
6
config/testdata/global_enable_extra_scrape_metrics.good.yml
vendored
Normal file
|
|
@ -0,0 +1,6 @@
|
|||
global:
|
||||
extra_scrape_metrics: true
|
||||
scrape_configs:
|
||||
- job_name: prometheus
|
||||
static_configs:
|
||||
- targets: ['localhost:8080']
|
||||
7
config/testdata/local_disable_extra_scrape_metrics.good.yml
vendored
Normal file
7
config/testdata/local_disable_extra_scrape_metrics.good.yml
vendored
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
global:
|
||||
extra_scrape_metrics: true
|
||||
scrape_configs:
|
||||
- job_name: prometheus
|
||||
static_configs:
|
||||
- targets: ['localhost:8080']
|
||||
extra_scrape_metrics: false
|
||||
7
config/testdata/local_enable_extra_scrape_metrics.good.yml
vendored
Normal file
7
config/testdata/local_enable_extra_scrape_metrics.good.yml
vendored
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
global:
|
||||
extra_scrape_metrics: false
|
||||
scrape_configs:
|
||||
- job_name: prometheus
|
||||
static_configs:
|
||||
- targets: ['localhost:8080']
|
||||
extra_scrape_metrics: true
|
||||
|
|
@ -50,7 +50,7 @@ file for use with `file_sd`.
|
|||
The general principle with SD is to extract all the potentially useful
|
||||
information we can out of the SD, and let the user choose what they need of it
|
||||
using
|
||||
[relabelling](https://prometheus.io/docs/operating/configuration/#<relabel_config>).
|
||||
[relabelling](https://prometheus.io/docs/operating/configuration/#relabel_config).
|
||||
This information is generally termed metadata.
|
||||
|
||||
Metadata is exposed as a set of key/value pairs (labels) per target. The keys
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2021 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2024 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -28,6 +28,7 @@ import (
|
|||
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials/stscreds"
|
||||
"github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
|
||||
"github.com/aws/aws-sdk-go-v2/service/ec2"
|
||||
"github.com/aws/aws-sdk-go-v2/service/ecs"
|
||||
"github.com/aws/aws-sdk-go-v2/service/ecs/types"
|
||||
"github.com/aws/aws-sdk-go-v2/service/sts"
|
||||
|
|
@ -44,31 +45,37 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
ecsLabel = model.MetaLabelPrefix + "ecs_"
|
||||
ecsLabelCluster = ecsLabel + "cluster"
|
||||
ecsLabelClusterARN = ecsLabel + "cluster_arn"
|
||||
ecsLabelService = ecsLabel + "service"
|
||||
ecsLabelServiceARN = ecsLabel + "service_arn"
|
||||
ecsLabelServiceStatus = ecsLabel + "service_status"
|
||||
ecsLabelTaskGroup = ecsLabel + "task_group"
|
||||
ecsLabelTaskARN = ecsLabel + "task_arn"
|
||||
ecsLabelTaskDefinition = ecsLabel + "task_definition"
|
||||
ecsLabelRegion = ecsLabel + "region"
|
||||
ecsLabelAvailabilityZone = ecsLabel + "availability_zone"
|
||||
ecsLabelAZID = ecsLabel + "availability_zone_id"
|
||||
ecsLabelSubnetID = ecsLabel + "subnet_id"
|
||||
ecsLabelIPAddress = ecsLabel + "ip_address"
|
||||
ecsLabelLaunchType = ecsLabel + "launch_type"
|
||||
ecsLabelDesiredStatus = ecsLabel + "desired_status"
|
||||
ecsLabelLastStatus = ecsLabel + "last_status"
|
||||
ecsLabelHealthStatus = ecsLabel + "health_status"
|
||||
ecsLabelPlatformFamily = ecsLabel + "platform_family"
|
||||
ecsLabelPlatformVersion = ecsLabel + "platform_version"
|
||||
ecsLabelTag = ecsLabel + "tag_"
|
||||
ecsLabelTagCluster = ecsLabelTag + "cluster_"
|
||||
ecsLabelTagService = ecsLabelTag + "service_"
|
||||
ecsLabelTagTask = ecsLabelTag + "task_"
|
||||
ecsLabelSeparator = ","
|
||||
ecsLabel = model.MetaLabelPrefix + "ecs_"
|
||||
ecsLabelCluster = ecsLabel + "cluster"
|
||||
ecsLabelClusterARN = ecsLabel + "cluster_arn"
|
||||
ecsLabelService = ecsLabel + "service"
|
||||
ecsLabelServiceARN = ecsLabel + "service_arn"
|
||||
ecsLabelServiceStatus = ecsLabel + "service_status"
|
||||
ecsLabelTaskGroup = ecsLabel + "task_group"
|
||||
ecsLabelTaskARN = ecsLabel + "task_arn"
|
||||
ecsLabelTaskDefinition = ecsLabel + "task_definition"
|
||||
ecsLabelRegion = ecsLabel + "region"
|
||||
ecsLabelAvailabilityZone = ecsLabel + "availability_zone"
|
||||
ecsLabelSubnetID = ecsLabel + "subnet_id"
|
||||
ecsLabelIPAddress = ecsLabel + "ip_address"
|
||||
ecsLabelLaunchType = ecsLabel + "launch_type"
|
||||
ecsLabelDesiredStatus = ecsLabel + "desired_status"
|
||||
ecsLabelLastStatus = ecsLabel + "last_status"
|
||||
ecsLabelHealthStatus = ecsLabel + "health_status"
|
||||
ecsLabelPlatformFamily = ecsLabel + "platform_family"
|
||||
ecsLabelPlatformVersion = ecsLabel + "platform_version"
|
||||
ecsLabelTag = ecsLabel + "tag_"
|
||||
ecsLabelTagCluster = ecsLabelTag + "cluster_"
|
||||
ecsLabelTagService = ecsLabelTag + "service_"
|
||||
ecsLabelTagTask = ecsLabelTag + "task_"
|
||||
ecsLabelTagEC2 = ecsLabelTag + "ec2_"
|
||||
ecsLabelNetworkMode = ecsLabel + "network_mode"
|
||||
ecsLabelContainerInstanceARN = ecsLabel + "container_instance_arn"
|
||||
ecsLabelEC2InstanceID = ecsLabel + "ec2_instance_id"
|
||||
ecsLabelEC2InstanceType = ecsLabel + "ec2_instance_type"
|
||||
ecsLabelEC2InstancePrivateIP = ecsLabel + "ec2_instance_private_ip"
|
||||
ecsLabelEC2InstancePublicIP = ecsLabel + "ec2_instance_public_ip"
|
||||
ecsLabelPublicIP = ecsLabel + "public_ip"
|
||||
)
|
||||
|
||||
// DefaultECSSDConfig is the default ECS SD configuration.
|
||||
|
|
@ -122,7 +129,7 @@ func (c *ECSSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery
|
|||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface for the ECS Config.
|
||||
func (c *ECSSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
func (c *ECSSDConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
||||
*c = DefaultECSSDConfig
|
||||
type plain ECSSDConfig
|
||||
err := unmarshal((*plain)(c))
|
||||
|
|
@ -153,6 +160,12 @@ type ecsClient interface {
|
|||
DescribeServices(context.Context, *ecs.DescribeServicesInput, ...func(*ecs.Options)) (*ecs.DescribeServicesOutput, error)
|
||||
ListTasks(context.Context, *ecs.ListTasksInput, ...func(*ecs.Options)) (*ecs.ListTasksOutput, error)
|
||||
DescribeTasks(context.Context, *ecs.DescribeTasksInput, ...func(*ecs.Options)) (*ecs.DescribeTasksOutput, error)
|
||||
DescribeContainerInstances(context.Context, *ecs.DescribeContainerInstancesInput, ...func(*ecs.Options)) (*ecs.DescribeContainerInstancesOutput, error)
|
||||
}
|
||||
|
||||
type ecsEC2Client interface {
|
||||
DescribeInstances(context.Context, *ec2.DescribeInstancesInput, ...func(*ec2.Options)) (*ec2.DescribeInstancesOutput, error)
|
||||
DescribeNetworkInterfaces(context.Context, *ec2.DescribeNetworkInterfacesInput, ...func(*ec2.Options)) (*ec2.DescribeNetworkInterfacesOutput, error)
|
||||
}
|
||||
|
||||
// ECSDiscovery periodically performs ECS-SD requests. It implements
|
||||
|
|
@ -162,6 +175,7 @@ type ECSDiscovery struct {
|
|||
logger *slog.Logger
|
||||
cfg *ECSSDConfig
|
||||
ecs ecsClient
|
||||
ec2 ecsEC2Client
|
||||
}
|
||||
|
||||
// NewECSDiscovery returns a new ECSDiscovery which periodically refreshes its targets.
|
||||
|
|
@ -191,7 +205,7 @@ func NewECSDiscovery(conf *ECSSDConfig, opts discovery.DiscovererOptions) (*ECSD
|
|||
}
|
||||
|
||||
func (d *ECSDiscovery) initEcsClient(ctx context.Context) error {
|
||||
if d.ecs != nil {
|
||||
if d.ecs != nil && d.ec2 != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -240,6 +254,10 @@ func (d *ECSDiscovery) initEcsClient(ctx context.Context) error {
|
|||
options.HTTPClient = client
|
||||
})
|
||||
|
||||
d.ec2 = ec2.NewFromConfig(cfg, func(options *ec2.Options) {
|
||||
options.HTTPClient = client
|
||||
})
|
||||
|
||||
// Test credentials by making a simple API call
|
||||
testCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
|
|
@ -458,13 +476,117 @@ func (d *ECSDiscovery) describeTasks(ctx context.Context, clusterARN string, tas
|
|||
return tasks, errg.Wait()
|
||||
}
|
||||
|
||||
// describeContainerInstances returns a map of container instance ARN to EC2 instance ID
|
||||
// Uses batching to respect AWS API limits (100 container instances per request).
|
||||
func (d *ECSDiscovery) describeContainerInstances(ctx context.Context, clusterARN string, containerInstanceARNs []string) (map[string]string, error) {
|
||||
if len(containerInstanceARNs) == 0 {
|
||||
return make(map[string]string), nil
|
||||
}
|
||||
|
||||
containerInstToEC2 := make(map[string]string)
|
||||
batchSize := 100 // AWS API limit
|
||||
|
||||
for _, batch := range batchSlice(containerInstanceARNs, batchSize) {
|
||||
resp, err := d.ecs.DescribeContainerInstances(ctx, &ecs.DescribeContainerInstancesInput{
|
||||
Cluster: aws.String(clusterARN),
|
||||
ContainerInstances: batch,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not describe container instances: %w", err)
|
||||
}
|
||||
|
||||
for _, ci := range resp.ContainerInstances {
|
||||
if ci.ContainerInstanceArn != nil && ci.Ec2InstanceId != nil {
|
||||
containerInstToEC2[*ci.ContainerInstanceArn] = *ci.Ec2InstanceId
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return containerInstToEC2, nil
|
||||
}
|
||||
|
||||
// ec2InstanceInfo holds information retrieved from EC2 DescribeInstances.
|
||||
type ec2InstanceInfo struct {
|
||||
privateIP string
|
||||
publicIP string
|
||||
subnetID string
|
||||
instanceType string
|
||||
tags map[string]string
|
||||
}
|
||||
|
||||
// describeEC2Instances returns a map of EC2 instance ID to instance information.
|
||||
func (d *ECSDiscovery) describeEC2Instances(ctx context.Context, instanceIDs []string) (map[string]ec2InstanceInfo, error) {
|
||||
if len(instanceIDs) == 0 {
|
||||
return make(map[string]ec2InstanceInfo), nil
|
||||
}
|
||||
|
||||
instanceInfo := make(map[string]ec2InstanceInfo)
|
||||
|
||||
resp, err := d.ec2.DescribeInstances(ctx, &ec2.DescribeInstancesInput{
|
||||
InstanceIds: instanceIDs,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not describe EC2 instances: %w", err)
|
||||
}
|
||||
|
||||
for _, reservation := range resp.Reservations {
|
||||
for _, instance := range reservation.Instances {
|
||||
if instance.InstanceId != nil && instance.PrivateIpAddress != nil {
|
||||
info := ec2InstanceInfo{
|
||||
privateIP: *instance.PrivateIpAddress,
|
||||
tags: make(map[string]string),
|
||||
}
|
||||
if instance.PublicIpAddress != nil {
|
||||
info.publicIP = *instance.PublicIpAddress
|
||||
}
|
||||
if instance.SubnetId != nil {
|
||||
info.subnetID = *instance.SubnetId
|
||||
}
|
||||
if instance.InstanceType != "" {
|
||||
info.instanceType = string(instance.InstanceType)
|
||||
}
|
||||
// Collect EC2 instance tags
|
||||
for _, tag := range instance.Tags {
|
||||
if tag.Key != nil && tag.Value != nil {
|
||||
info.tags[*tag.Key] = *tag.Value
|
||||
}
|
||||
}
|
||||
instanceInfo[*instance.InstanceId] = info
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return instanceInfo, nil
|
||||
}
|
||||
|
||||
// describeNetworkInterfaces returns a map of ENI ID to public IP address.
|
||||
func (d *ECSDiscovery) describeNetworkInterfaces(ctx context.Context, eniIDs []string) (map[string]string, error) {
|
||||
if len(eniIDs) == 0 {
|
||||
return make(map[string]string), nil
|
||||
}
|
||||
|
||||
eniToPublicIP := make(map[string]string)
|
||||
|
||||
resp, err := d.ec2.DescribeNetworkInterfaces(ctx, &ec2.DescribeNetworkInterfacesInput{
|
||||
NetworkInterfaceIds: eniIDs,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not describe network interfaces: %w", err)
|
||||
}
|
||||
|
||||
for _, eni := range resp.NetworkInterfaces {
|
||||
if eni.NetworkInterfaceId != nil && eni.Association != nil && eni.Association.PublicIp != nil {
|
||||
eniToPublicIP[*eni.NetworkInterfaceId] = *eni.Association.PublicIp
|
||||
}
|
||||
}
|
||||
|
||||
return eniToPublicIP, nil
|
||||
}
|
||||
|
||||
func batchSlice[T any](a []T, size int) [][]T {
|
||||
batches := make([][]T, 0, len(a)/size+1)
|
||||
for i := 0; i < len(a); i += size {
|
||||
end := i + size
|
||||
if end > len(a) {
|
||||
end = len(a)
|
||||
}
|
||||
end := min(i+size, len(a))
|
||||
batches = append(batches, a[i:end])
|
||||
}
|
||||
return batches
|
||||
|
|
@ -557,8 +679,76 @@ func (d *ECSDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
|
|||
if tasks, exists := serviceTaskMap[serviceArn]; exists {
|
||||
var serviceTargets []model.LabelSet
|
||||
|
||||
// Collect container instance ARNs for all EC2 tasks to get instance type
|
||||
var containerInstanceARNs []string
|
||||
taskToContainerInstance := make(map[string]string)
|
||||
// Collect ENI IDs for awsvpc tasks to get public IPs
|
||||
var eniIDs []string
|
||||
taskToENI := make(map[string]string)
|
||||
|
||||
for _, task := range tasks {
|
||||
// Find the ENI attachment to get the private IP address
|
||||
// Collect container instance ARN for any task running on EC2
|
||||
if task.ContainerInstanceArn != nil {
|
||||
containerInstanceARNs = append(containerInstanceARNs, *task.ContainerInstanceArn)
|
||||
taskToContainerInstance[*task.TaskArn] = *task.ContainerInstanceArn
|
||||
}
|
||||
|
||||
// Collect ENI IDs from awsvpc tasks
|
||||
for _, attachment := range task.Attachments {
|
||||
if attachment.Type != nil && *attachment.Type == "ElasticNetworkInterface" {
|
||||
for _, detail := range attachment.Details {
|
||||
if detail.Name != nil && *detail.Name == "networkInterfaceId" && detail.Value != nil {
|
||||
eniIDs = append(eniIDs, *detail.Value)
|
||||
taskToENI[*task.TaskArn] = *detail.Value
|
||||
break
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Batch describe container instances and EC2 instances to get instance type and other metadata
|
||||
var containerInstToEC2 map[string]string
|
||||
var ec2InstInfo map[string]ec2InstanceInfo
|
||||
if len(containerInstanceARNs) > 0 {
|
||||
var err error
|
||||
containerInstToEC2, err = d.describeContainerInstances(ctx, clusterArn, containerInstanceARNs)
|
||||
if err != nil {
|
||||
d.logger.Error("Failed to describe container instances", "cluster", clusterArn, "error", err)
|
||||
// Continue processing tasks
|
||||
} else {
|
||||
// Collect unique EC2 instance IDs
|
||||
ec2InstanceIDs := make([]string, 0, len(containerInstToEC2))
|
||||
for _, ec2ID := range containerInstToEC2 {
|
||||
ec2InstanceIDs = append(ec2InstanceIDs, ec2ID)
|
||||
}
|
||||
|
||||
// Batch describe EC2 instances
|
||||
ec2InstInfo, err = d.describeEC2Instances(ctx, ec2InstanceIDs)
|
||||
if err != nil {
|
||||
d.logger.Error("Failed to describe EC2 instances", "cluster", clusterArn, "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Batch describe ENIs to get public IPs for awsvpc tasks
|
||||
var eniToPublicIP map[string]string
|
||||
if len(eniIDs) > 0 {
|
||||
var err error
|
||||
eniToPublicIP, err = d.describeNetworkInterfaces(ctx, eniIDs)
|
||||
if err != nil {
|
||||
d.logger.Error("Failed to describe network interfaces", "cluster", clusterArn, "error", err)
|
||||
// Continue processing without ENI public IPs
|
||||
}
|
||||
}
|
||||
|
||||
for _, task := range tasks {
|
||||
var ipAddress, subnetID, publicIP string
|
||||
var networkMode string
|
||||
var ec2InstanceID, ec2InstanceType, ec2InstancePrivateIP, ec2InstancePublicIP string
|
||||
|
||||
// Try to get IP from ENI attachment (awsvpc mode)
|
||||
var eniAttachment *types.Attachment
|
||||
for _, attachment := range task.Attachments {
|
||||
if attachment.Type != nil && *attachment.Type == "ElasticNetworkInterface" {
|
||||
|
|
@ -566,19 +756,65 @@ func (d *ECSDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
|
|||
break
|
||||
}
|
||||
}
|
||||
if eniAttachment == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
var ipAddress, subnetID string
|
||||
for _, detail := range eniAttachment.Details {
|
||||
switch *detail.Name {
|
||||
case "privateIPv4Address":
|
||||
ipAddress = *detail.Value
|
||||
case "subnetId":
|
||||
subnetID = *detail.Value
|
||||
if eniAttachment != nil {
|
||||
// awsvpc networking mode - get IP from ENI
|
||||
networkMode = "awsvpc"
|
||||
for _, detail := range eniAttachment.Details {
|
||||
switch *detail.Name {
|
||||
case "privateIPv4Address":
|
||||
ipAddress = *detail.Value
|
||||
case "subnetId":
|
||||
subnetID = *detail.Value
|
||||
}
|
||||
}
|
||||
// Get public IP from ENI if available
|
||||
if eniID, ok := taskToENI[*task.TaskArn]; ok {
|
||||
if eniPublicIP, ok := eniToPublicIP[eniID]; ok {
|
||||
publicIP = eniPublicIP
|
||||
}
|
||||
}
|
||||
} else if task.ContainerInstanceArn != nil {
|
||||
// bridge/host networking mode - need to get EC2 instance IP and subnet
|
||||
networkMode = "bridge"
|
||||
containerInstARN, ok := taskToContainerInstance[*task.TaskArn]
|
||||
if ok {
|
||||
ec2InstanceID, ok = containerInstToEC2[containerInstARN]
|
||||
if ok {
|
||||
info, ok := ec2InstInfo[ec2InstanceID]
|
||||
if ok {
|
||||
ipAddress = info.privateIP
|
||||
publicIP = info.publicIP
|
||||
subnetID = info.subnetID
|
||||
ec2InstanceType = info.instanceType
|
||||
ec2InstancePrivateIP = info.privateIP
|
||||
ec2InstancePublicIP = info.publicIP
|
||||
} else {
|
||||
d.logger.Debug("EC2 instance info not found", "instance", ec2InstanceID, "task", *task.TaskArn)
|
||||
}
|
||||
} else {
|
||||
d.logger.Debug("Container instance not found in map", "arn", containerInstARN, "task", *task.TaskArn)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get EC2 instance metadata for awsvpc tasks running on EC2
|
||||
// We want the instance type and the host IPs for advanced use cases
|
||||
if networkMode == "awsvpc" && task.ContainerInstanceArn != nil {
|
||||
containerInstARN, ok := taskToContainerInstance[*task.TaskArn]
|
||||
if ok {
|
||||
ec2InstanceID, ok = containerInstToEC2[containerInstARN]
|
||||
if ok {
|
||||
info, ok := ec2InstInfo[ec2InstanceID]
|
||||
if ok {
|
||||
ec2InstanceType = info.instanceType
|
||||
ec2InstancePrivateIP = info.privateIP
|
||||
ec2InstancePublicIP = info.publicIP
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ipAddress == "" {
|
||||
continue
|
||||
}
|
||||
|
|
@ -592,13 +828,38 @@ func (d *ECSDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
|
|||
ecsLabelTaskARN: model.LabelValue(*task.TaskArn),
|
||||
ecsLabelTaskDefinition: model.LabelValue(*task.TaskDefinitionArn),
|
||||
ecsLabelIPAddress: model.LabelValue(ipAddress),
|
||||
ecsLabelSubnetID: model.LabelValue(subnetID),
|
||||
ecsLabelRegion: model.LabelValue(d.cfg.Region),
|
||||
ecsLabelLaunchType: model.LabelValue(task.LaunchType),
|
||||
ecsLabelAvailabilityZone: model.LabelValue(*task.AvailabilityZone),
|
||||
ecsLabelDesiredStatus: model.LabelValue(*task.DesiredStatus),
|
||||
ecsLabelLastStatus: model.LabelValue(*task.LastStatus),
|
||||
ecsLabelHealthStatus: model.LabelValue(task.HealthStatus),
|
||||
ecsLabelNetworkMode: model.LabelValue(networkMode),
|
||||
}
|
||||
|
||||
// Add subnet ID when available (awsvpc mode from ENI, bridge/host from EC2 instance)
|
||||
if subnetID != "" {
|
||||
labels[ecsLabelSubnetID] = model.LabelValue(subnetID)
|
||||
}
|
||||
|
||||
// Add container instance and EC2 instance info for EC2 launch type
|
||||
if task.ContainerInstanceArn != nil {
|
||||
labels[ecsLabelContainerInstanceARN] = model.LabelValue(*task.ContainerInstanceArn)
|
||||
}
|
||||
if ec2InstanceID != "" {
|
||||
labels[ecsLabelEC2InstanceID] = model.LabelValue(ec2InstanceID)
|
||||
}
|
||||
if ec2InstanceType != "" {
|
||||
labels[ecsLabelEC2InstanceType] = model.LabelValue(ec2InstanceType)
|
||||
}
|
||||
if ec2InstancePrivateIP != "" {
|
||||
labels[ecsLabelEC2InstancePrivateIP] = model.LabelValue(ec2InstancePrivateIP)
|
||||
}
|
||||
if ec2InstancePublicIP != "" {
|
||||
labels[ecsLabelEC2InstancePublicIP] = model.LabelValue(ec2InstancePublicIP)
|
||||
}
|
||||
if publicIP != "" {
|
||||
labels[ecsLabelPublicIP] = model.LabelValue(publicIP)
|
||||
}
|
||||
|
||||
if task.PlatformFamily != nil {
|
||||
|
|
@ -637,6 +898,15 @@ func (d *ECSDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
|
|||
}
|
||||
}
|
||||
|
||||
// Add EC2 instance tags (if running on EC2)
|
||||
if ec2InstanceID != "" {
|
||||
if info, ok := ec2InstInfo[ec2InstanceID]; ok {
|
||||
for tagKey, tagValue := range info.tags {
|
||||
labels[model.LabelName(ecsLabelTagEC2+strutil.SanitizeLabelName(tagKey))] = model.LabelValue(tagValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
serviceTargets = append(serviceTargets, labels)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -17,6 +17,8 @@ import (
|
|||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/ec2"
|
||||
ec2Types "github.com/aws/aws-sdk-go-v2/service/ec2/types"
|
||||
"github.com/aws/aws-sdk-go-v2/service/ecs"
|
||||
ecsTypes "github.com/aws/aws-sdk-go-v2/service/ecs/types"
|
||||
"github.com/prometheus/common/model"
|
||||
|
|
@ -29,9 +31,12 @@ import (
|
|||
type ecsDataStore struct {
|
||||
region string
|
||||
|
||||
clusters []ecsTypes.Cluster
|
||||
services []ecsTypes.Service
|
||||
tasks []ecsTypes.Task
|
||||
clusters []ecsTypes.Cluster
|
||||
services []ecsTypes.Service
|
||||
tasks []ecsTypes.Task
|
||||
containerInstances []ecsTypes.ContainerInstance
|
||||
ec2Instances map[string]ec2InstanceInfo // EC2 instance ID to instance info
|
||||
eniPublicIPs map[string]string // ENI ID to public IP
|
||||
}
|
||||
|
||||
func TestECSDiscoveryListClusterARNs(t *testing.T) {
|
||||
|
|
@ -716,6 +721,7 @@ func TestECSDiscoveryRefresh(t *testing.T) {
|
|||
Details: []ecsTypes.KeyValuePair{
|
||||
{Name: strptr("subnetId"), Value: strptr("subnet-12345")},
|
||||
{Name: strptr("privateIPv4Address"), Value: strptr("10.0.1.100")},
|
||||
{Name: strptr("networkInterfaceId"), Value: strptr("eni-fargate-123")},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -724,6 +730,9 @@ func TestECSDiscoveryRefresh(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
eniPublicIPs: map[string]string{
|
||||
"eni-fargate-123": "52.1.2.3",
|
||||
},
|
||||
},
|
||||
expected: []*targetgroup.Group{
|
||||
{
|
||||
|
|
@ -749,6 +758,8 @@ func TestECSDiscoveryRefresh(t *testing.T) {
|
|||
"__meta_ecs_health_status": model.LabelValue("HEALTHY"),
|
||||
"__meta_ecs_platform_family": model.LabelValue("Linux"),
|
||||
"__meta_ecs_platform_version": model.LabelValue("1.4.0"),
|
||||
"__meta_ecs_network_mode": model.LabelValue("awsvpc"),
|
||||
"__meta_ecs_public_ip": model.LabelValue("52.1.2.3"),
|
||||
"__meta_ecs_tag_cluster_Environment": model.LabelValue("test"),
|
||||
"__meta_ecs_tag_service_App": model.LabelValue("web"),
|
||||
"__meta_ecs_tag_task_Version": model.LabelValue("v1.0"),
|
||||
|
|
@ -825,14 +836,345 @@ func TestECSDiscoveryRefresh(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "TaskWithBridgeNetworking",
|
||||
ecsData: &ecsDataStore{
|
||||
region: "us-west-2",
|
||||
clusters: []ecsTypes.Cluster{
|
||||
{
|
||||
ClusterName: strptr("test-cluster"),
|
||||
ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
|
||||
Status: strptr("ACTIVE"),
|
||||
},
|
||||
},
|
||||
services: []ecsTypes.Service{
|
||||
{
|
||||
ServiceName: strptr("bridge-service"),
|
||||
ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/bridge-service"),
|
||||
ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
|
||||
Status: strptr("ACTIVE"),
|
||||
},
|
||||
},
|
||||
tasks: []ecsTypes.Task{
|
||||
{
|
||||
TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-bridge"),
|
||||
ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
|
||||
TaskDefinitionArn: strptr("arn:aws:ecs:us-west-2:123456789012:task-definition/bridge-task:1"),
|
||||
Group: strptr("service:bridge-service"),
|
||||
LaunchType: ecsTypes.LaunchTypeEc2,
|
||||
LastStatus: strptr("RUNNING"),
|
||||
DesiredStatus: strptr("RUNNING"),
|
||||
HealthStatus: ecsTypes.HealthStatusHealthy,
|
||||
AvailabilityZone: strptr("us-west-2a"),
|
||||
ContainerInstanceArn: strptr("arn:aws:ecs:us-west-2:123456789012:container-instance/test-cluster/abc123"),
|
||||
Attachments: []ecsTypes.Attachment{},
|
||||
},
|
||||
},
|
||||
containerInstances: []ecsTypes.ContainerInstance{
|
||||
{
|
||||
ContainerInstanceArn: strptr("arn:aws:ecs:us-west-2:123456789012:container-instance/test-cluster/abc123"),
|
||||
Ec2InstanceId: strptr("i-1234567890abcdef0"),
|
||||
Status: strptr("ACTIVE"),
|
||||
},
|
||||
},
|
||||
ec2Instances: map[string]ec2InstanceInfo{
|
||||
"i-1234567890abcdef0": {
|
||||
privateIP: "10.0.1.50",
|
||||
publicIP: "54.1.2.3",
|
||||
subnetID: "subnet-bridge-1",
|
||||
instanceType: "t3.medium",
|
||||
tags: map[string]string{
|
||||
"Name": "ecs-host-1",
|
||||
"Environment": "production",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []*targetgroup.Group{
|
||||
{
|
||||
Source: "us-west-2",
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
model.AddressLabel: model.LabelValue("10.0.1.50:80"),
|
||||
"__meta_ecs_cluster": model.LabelValue("test-cluster"),
|
||||
"__meta_ecs_cluster_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
|
||||
"__meta_ecs_service": model.LabelValue("bridge-service"),
|
||||
"__meta_ecs_service_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/bridge-service"),
|
||||
"__meta_ecs_service_status": model.LabelValue("ACTIVE"),
|
||||
"__meta_ecs_task_group": model.LabelValue("service:bridge-service"),
|
||||
"__meta_ecs_task_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-bridge"),
|
||||
"__meta_ecs_task_definition": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:task-definition/bridge-task:1"),
|
||||
"__meta_ecs_region": model.LabelValue("us-west-2"),
|
||||
"__meta_ecs_availability_zone": model.LabelValue("us-west-2a"),
|
||||
"__meta_ecs_ip_address": model.LabelValue("10.0.1.50"),
|
||||
"__meta_ecs_subnet_id": model.LabelValue("subnet-bridge-1"),
|
||||
"__meta_ecs_launch_type": model.LabelValue("EC2"),
|
||||
"__meta_ecs_desired_status": model.LabelValue("RUNNING"),
|
||||
"__meta_ecs_last_status": model.LabelValue("RUNNING"),
|
||||
"__meta_ecs_health_status": model.LabelValue("HEALTHY"),
|
||||
"__meta_ecs_network_mode": model.LabelValue("bridge"),
|
||||
"__meta_ecs_container_instance_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:container-instance/test-cluster/abc123"),
|
||||
"__meta_ecs_ec2_instance_id": model.LabelValue("i-1234567890abcdef0"),
|
||||
"__meta_ecs_ec2_instance_type": model.LabelValue("t3.medium"),
|
||||
"__meta_ecs_ec2_instance_private_ip": model.LabelValue("10.0.1.50"),
|
||||
"__meta_ecs_ec2_instance_public_ip": model.LabelValue("54.1.2.3"),
|
||||
"__meta_ecs_public_ip": model.LabelValue("54.1.2.3"),
|
||||
"__meta_ecs_tag_ec2_Name": model.LabelValue("ecs-host-1"),
|
||||
"__meta_ecs_tag_ec2_Environment": model.LabelValue("production"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "MixedNetworkingModes",
|
||||
ecsData: &ecsDataStore{
|
||||
region: "us-west-2",
|
||||
clusters: []ecsTypes.Cluster{
|
||||
{
|
||||
ClusterName: strptr("mixed-cluster"),
|
||||
ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/mixed-cluster"),
|
||||
Status: strptr("ACTIVE"),
|
||||
},
|
||||
},
|
||||
services: []ecsTypes.Service{
|
||||
{
|
||||
ServiceName: strptr("mixed-service"),
|
||||
ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/mixed-cluster/mixed-service"),
|
||||
ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/mixed-cluster"),
|
||||
Status: strptr("ACTIVE"),
|
||||
},
|
||||
},
|
||||
tasks: []ecsTypes.Task{
|
||||
{
|
||||
TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/mixed-cluster/task-awsvpc"),
|
||||
ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/mixed-cluster"),
|
||||
TaskDefinitionArn: strptr("arn:aws:ecs:us-west-2:123456789012:task-definition/awsvpc-task:1"),
|
||||
Group: strptr("service:mixed-service"),
|
||||
LaunchType: ecsTypes.LaunchTypeFargate,
|
||||
LastStatus: strptr("RUNNING"),
|
||||
DesiredStatus: strptr("RUNNING"),
|
||||
HealthStatus: ecsTypes.HealthStatusHealthy,
|
||||
AvailabilityZone: strptr("us-west-2a"),
|
||||
Attachments: []ecsTypes.Attachment{
|
||||
{
|
||||
Type: strptr("ElasticNetworkInterface"),
|
||||
Details: []ecsTypes.KeyValuePair{
|
||||
{Name: strptr("subnetId"), Value: strptr("subnet-12345")},
|
||||
{Name: strptr("privateIPv4Address"), Value: strptr("10.0.2.100")},
|
||||
{Name: strptr("networkInterfaceId"), Value: strptr("eni-mixed-awsvpc")},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/mixed-cluster/task-bridge"),
|
||||
ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/mixed-cluster"),
|
||||
TaskDefinitionArn: strptr("arn:aws:ecs:us-west-2:123456789012:task-definition/bridge-task:1"),
|
||||
Group: strptr("service:mixed-service"),
|
||||
LaunchType: ecsTypes.LaunchTypeEc2,
|
||||
LastStatus: strptr("RUNNING"),
|
||||
DesiredStatus: strptr("RUNNING"),
|
||||
HealthStatus: ecsTypes.HealthStatusHealthy,
|
||||
AvailabilityZone: strptr("us-west-2b"),
|
||||
ContainerInstanceArn: strptr("arn:aws:ecs:us-west-2:123456789012:container-instance/mixed-cluster/xyz789"),
|
||||
Attachments: []ecsTypes.Attachment{},
|
||||
},
|
||||
},
|
||||
containerInstances: []ecsTypes.ContainerInstance{
|
||||
{
|
||||
ContainerInstanceArn: strptr("arn:aws:ecs:us-west-2:123456789012:container-instance/mixed-cluster/xyz789"),
|
||||
Ec2InstanceId: strptr("i-0987654321fedcba0"),
|
||||
Status: strptr("ACTIVE"),
|
||||
},
|
||||
},
|
||||
ec2Instances: map[string]ec2InstanceInfo{
|
||||
"i-0987654321fedcba0": {
|
||||
privateIP: "10.0.1.75",
|
||||
publicIP: "54.2.3.4",
|
||||
subnetID: "subnet-bridge-2",
|
||||
instanceType: "t3.large",
|
||||
tags: map[string]string{
|
||||
"Name": "mixed-host",
|
||||
"Team": "platform",
|
||||
},
|
||||
},
|
||||
},
|
||||
eniPublicIPs: map[string]string{
|
||||
"eni-mixed-awsvpc": "52.2.3.4",
|
||||
},
|
||||
},
|
||||
expected: []*targetgroup.Group{
|
||||
{
|
||||
Source: "us-west-2",
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
model.AddressLabel: model.LabelValue("10.0.2.100:80"),
|
||||
"__meta_ecs_cluster": model.LabelValue("mixed-cluster"),
|
||||
"__meta_ecs_cluster_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:cluster/mixed-cluster"),
|
||||
"__meta_ecs_service": model.LabelValue("mixed-service"),
|
||||
"__meta_ecs_service_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:service/mixed-cluster/mixed-service"),
|
||||
"__meta_ecs_service_status": model.LabelValue("ACTIVE"),
|
||||
"__meta_ecs_task_group": model.LabelValue("service:mixed-service"),
|
||||
"__meta_ecs_task_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:task/mixed-cluster/task-awsvpc"),
|
||||
"__meta_ecs_task_definition": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:task-definition/awsvpc-task:1"),
|
||||
"__meta_ecs_region": model.LabelValue("us-west-2"),
|
||||
"__meta_ecs_availability_zone": model.LabelValue("us-west-2a"),
|
||||
"__meta_ecs_ip_address": model.LabelValue("10.0.2.100"),
|
||||
"__meta_ecs_subnet_id": model.LabelValue("subnet-12345"),
|
||||
"__meta_ecs_launch_type": model.LabelValue("FARGATE"),
|
||||
"__meta_ecs_desired_status": model.LabelValue("RUNNING"),
|
||||
"__meta_ecs_last_status": model.LabelValue("RUNNING"),
|
||||
"__meta_ecs_health_status": model.LabelValue("HEALTHY"),
|
||||
"__meta_ecs_network_mode": model.LabelValue("awsvpc"),
|
||||
"__meta_ecs_public_ip": model.LabelValue("52.2.3.4"),
|
||||
},
|
||||
{
|
||||
model.AddressLabel: model.LabelValue("10.0.1.75:80"),
|
||||
"__meta_ecs_cluster": model.LabelValue("mixed-cluster"),
|
||||
"__meta_ecs_cluster_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:cluster/mixed-cluster"),
|
||||
"__meta_ecs_service": model.LabelValue("mixed-service"),
|
||||
"__meta_ecs_service_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:service/mixed-cluster/mixed-service"),
|
||||
"__meta_ecs_service_status": model.LabelValue("ACTIVE"),
|
||||
"__meta_ecs_task_group": model.LabelValue("service:mixed-service"),
|
||||
"__meta_ecs_task_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:task/mixed-cluster/task-bridge"),
|
||||
"__meta_ecs_task_definition": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:task-definition/bridge-task:1"),
|
||||
"__meta_ecs_region": model.LabelValue("us-west-2"),
|
||||
"__meta_ecs_availability_zone": model.LabelValue("us-west-2b"),
|
||||
"__meta_ecs_ip_address": model.LabelValue("10.0.1.75"),
|
||||
"__meta_ecs_subnet_id": model.LabelValue("subnet-bridge-2"),
|
||||
"__meta_ecs_launch_type": model.LabelValue("EC2"),
|
||||
"__meta_ecs_desired_status": model.LabelValue("RUNNING"),
|
||||
"__meta_ecs_last_status": model.LabelValue("RUNNING"),
|
||||
"__meta_ecs_health_status": model.LabelValue("HEALTHY"),
|
||||
"__meta_ecs_network_mode": model.LabelValue("bridge"),
|
||||
"__meta_ecs_container_instance_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:container-instance/mixed-cluster/xyz789"),
|
||||
"__meta_ecs_ec2_instance_id": model.LabelValue("i-0987654321fedcba0"),
|
||||
"__meta_ecs_ec2_instance_type": model.LabelValue("t3.large"),
|
||||
"__meta_ecs_ec2_instance_private_ip": model.LabelValue("10.0.1.75"),
|
||||
"__meta_ecs_ec2_instance_public_ip": model.LabelValue("54.2.3.4"),
|
||||
"__meta_ecs_public_ip": model.LabelValue("54.2.3.4"),
|
||||
"__meta_ecs_tag_ec2_Name": model.LabelValue("mixed-host"),
|
||||
"__meta_ecs_tag_ec2_Team": model.LabelValue("platform"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "EC2WithAwsvpcNetworking",
|
||||
ecsData: &ecsDataStore{
|
||||
region: "us-west-2",
|
||||
clusters: []ecsTypes.Cluster{
|
||||
{
|
||||
ClusterName: strptr("ec2-awsvpc-cluster"),
|
||||
ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/ec2-awsvpc-cluster"),
|
||||
Status: strptr("ACTIVE"),
|
||||
},
|
||||
},
|
||||
services: []ecsTypes.Service{
|
||||
{
|
||||
ServiceName: strptr("ec2-awsvpc-service"),
|
||||
ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/ec2-awsvpc-cluster/ec2-awsvpc-service"),
|
||||
ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/ec2-awsvpc-cluster"),
|
||||
Status: strptr("ACTIVE"),
|
||||
},
|
||||
},
|
||||
tasks: []ecsTypes.Task{
|
||||
{
|
||||
TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/ec2-awsvpc-cluster/task-ec2-awsvpc"),
|
||||
ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/ec2-awsvpc-cluster"),
|
||||
TaskDefinitionArn: strptr("arn:aws:ecs:us-west-2:123456789012:task-definition/ec2-awsvpc-task:1"),
|
||||
Group: strptr("service:ec2-awsvpc-service"),
|
||||
LaunchType: ecsTypes.LaunchTypeEc2,
|
||||
LastStatus: strptr("RUNNING"),
|
||||
DesiredStatus: strptr("RUNNING"),
|
||||
HealthStatus: ecsTypes.HealthStatusHealthy,
|
||||
AvailabilityZone: strptr("us-west-2c"),
|
||||
ContainerInstanceArn: strptr("arn:aws:ecs:us-west-2:123456789012:container-instance/ec2-awsvpc-cluster/def456"),
|
||||
// Has BOTH ENI attachment AND container instance ARN - should use ENI
|
||||
Attachments: []ecsTypes.Attachment{
|
||||
{
|
||||
Type: strptr("ElasticNetworkInterface"),
|
||||
Details: []ecsTypes.KeyValuePair{
|
||||
{Name: strptr("subnetId"), Value: strptr("subnet-99999")},
|
||||
{Name: strptr("privateIPv4Address"), Value: strptr("10.0.3.200")},
|
||||
{Name: strptr("networkInterfaceId"), Value: strptr("eni-ec2-awsvpc")},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
eniPublicIPs: map[string]string{
|
||||
"eni-ec2-awsvpc": "52.3.4.5",
|
||||
},
|
||||
// Container instance data - IP should NOT be used, but instance type SHOULD be used
|
||||
containerInstances: []ecsTypes.ContainerInstance{
|
||||
{
|
||||
ContainerInstanceArn: strptr("arn:aws:ecs:us-west-2:123456789012:container-instance/ec2-awsvpc-cluster/def456"),
|
||||
Ec2InstanceId: strptr("i-ec2awsvpcinstance"),
|
||||
Status: strptr("ACTIVE"),
|
||||
},
|
||||
},
|
||||
ec2Instances: map[string]ec2InstanceInfo{
|
||||
"i-ec2awsvpcinstance": {
|
||||
privateIP: "10.0.9.99", // This IP should NOT be used (ENI IP is used instead)
|
||||
publicIP: "54.3.4.5", // This public IP SHOULD be exposed
|
||||
subnetID: "subnet-wrong", // This subnet should NOT be used (ENI subnet is used instead)
|
||||
instanceType: "c5.2xlarge", // This instance type SHOULD be used
|
||||
tags: map[string]string{
|
||||
"Name": "ec2-awsvpc-host",
|
||||
"Owner": "team-a",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []*targetgroup.Group{
|
||||
{
|
||||
Source: "us-west-2",
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
model.AddressLabel: model.LabelValue("10.0.3.200:80"),
|
||||
"__meta_ecs_cluster": model.LabelValue("ec2-awsvpc-cluster"),
|
||||
"__meta_ecs_cluster_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:cluster/ec2-awsvpc-cluster"),
|
||||
"__meta_ecs_service": model.LabelValue("ec2-awsvpc-service"),
|
||||
"__meta_ecs_service_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:service/ec2-awsvpc-cluster/ec2-awsvpc-service"),
|
||||
"__meta_ecs_service_status": model.LabelValue("ACTIVE"),
|
||||
"__meta_ecs_task_group": model.LabelValue("service:ec2-awsvpc-service"),
|
||||
"__meta_ecs_task_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:task/ec2-awsvpc-cluster/task-ec2-awsvpc"),
|
||||
"__meta_ecs_task_definition": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:task-definition/ec2-awsvpc-task:1"),
|
||||
"__meta_ecs_region": model.LabelValue("us-west-2"),
|
||||
"__meta_ecs_availability_zone": model.LabelValue("us-west-2c"),
|
||||
"__meta_ecs_ip_address": model.LabelValue("10.0.3.200"),
|
||||
"__meta_ecs_subnet_id": model.LabelValue("subnet-99999"),
|
||||
"__meta_ecs_launch_type": model.LabelValue("EC2"),
|
||||
"__meta_ecs_desired_status": model.LabelValue("RUNNING"),
|
||||
"__meta_ecs_last_status": model.LabelValue("RUNNING"),
|
||||
"__meta_ecs_health_status": model.LabelValue("HEALTHY"),
|
||||
"__meta_ecs_network_mode": model.LabelValue("awsvpc"),
|
||||
"__meta_ecs_container_instance_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:container-instance/ec2-awsvpc-cluster/def456"),
|
||||
"__meta_ecs_ec2_instance_id": model.LabelValue("i-ec2awsvpcinstance"),
|
||||
"__meta_ecs_ec2_instance_type": model.LabelValue("c5.2xlarge"),
|
||||
"__meta_ecs_ec2_instance_private_ip": model.LabelValue("10.0.9.99"),
|
||||
"__meta_ecs_ec2_instance_public_ip": model.LabelValue("54.3.4.5"),
|
||||
"__meta_ecs_public_ip": model.LabelValue("52.3.4.5"),
|
||||
"__meta_ecs_tag_ec2_Name": model.LabelValue("ec2-awsvpc-host"),
|
||||
"__meta_ecs_tag_ec2_Owner": model.LabelValue("team-a"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
client := newMockECSClient(tt.ecsData)
|
||||
ecsClient := newMockECSClient(tt.ecsData)
|
||||
ec2Client := newMockECSEC2Client(tt.ecsData.ec2Instances, tt.ecsData.eniPublicIPs)
|
||||
|
||||
d := &ECSDiscovery{
|
||||
ecs: client,
|
||||
ecs: ecsClient,
|
||||
ec2: ec2Client,
|
||||
cfg: &ECSSDConfig{
|
||||
Region: tt.ecsData.region,
|
||||
Port: 80,
|
||||
|
|
@ -951,3 +1293,91 @@ func (m *mockECSClient) DescribeTasks(_ context.Context, input *ecs.DescribeTask
|
|||
Tasks: tasks,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *mockECSClient) DescribeContainerInstances(_ context.Context, input *ecs.DescribeContainerInstancesInput, _ ...func(*ecs.Options)) (*ecs.DescribeContainerInstancesOutput, error) {
|
||||
var containerInstances []ecsTypes.ContainerInstance
|
||||
for _, ciArn := range input.ContainerInstances {
|
||||
for _, ci := range m.ecsData.containerInstances {
|
||||
if *ci.ContainerInstanceArn == ciArn {
|
||||
containerInstances = append(containerInstances, ci)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &ecs.DescribeContainerInstancesOutput{
|
||||
ContainerInstances: containerInstances,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Mock EC2 client wrapper for ECS tests.
|
||||
type mockECSEC2Client struct {
|
||||
ec2Instances map[string]ec2InstanceInfo
|
||||
eniPublicIPs map[string]string
|
||||
}
|
||||
|
||||
func newMockECSEC2Client(ec2Instances map[string]ec2InstanceInfo, eniPublicIPs map[string]string) *mockECSEC2Client {
|
||||
return &mockECSEC2Client{
|
||||
ec2Instances: ec2Instances,
|
||||
eniPublicIPs: eniPublicIPs,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *mockECSEC2Client) DescribeInstances(_ context.Context, input *ec2.DescribeInstancesInput, _ ...func(*ec2.Options)) (*ec2.DescribeInstancesOutput, error) {
|
||||
var reservations []ec2Types.Reservation
|
||||
|
||||
for _, instanceID := range input.InstanceIds {
|
||||
if info, ok := m.ec2Instances[instanceID]; ok {
|
||||
instance := ec2Types.Instance{
|
||||
InstanceId: &instanceID,
|
||||
PrivateIpAddress: &info.privateIP,
|
||||
}
|
||||
if info.publicIP != "" {
|
||||
instance.PublicIpAddress = &info.publicIP
|
||||
}
|
||||
if info.subnetID != "" {
|
||||
instance.SubnetId = &info.subnetID
|
||||
}
|
||||
if info.instanceType != "" {
|
||||
instance.InstanceType = ec2Types.InstanceType(info.instanceType)
|
||||
}
|
||||
// Add tags
|
||||
for tagKey, tagValue := range info.tags {
|
||||
instance.Tags = append(instance.Tags, ec2Types.Tag{
|
||||
Key: &tagKey,
|
||||
Value: &tagValue,
|
||||
})
|
||||
}
|
||||
reservation := ec2Types.Reservation{
|
||||
Instances: []ec2Types.Instance{instance},
|
||||
}
|
||||
reservations = append(reservations, reservation)
|
||||
}
|
||||
}
|
||||
|
||||
return &ec2.DescribeInstancesOutput{
|
||||
Reservations: reservations,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *mockECSEC2Client) DescribeNetworkInterfaces(_ context.Context, input *ec2.DescribeNetworkInterfacesInput, _ ...func(*ec2.Options)) (*ec2.DescribeNetworkInterfacesOutput, error) {
|
||||
var networkInterfaces []ec2Types.NetworkInterface
|
||||
|
||||
for _, eniID := range input.NetworkInterfaceIds {
|
||||
if publicIP, ok := m.eniPublicIPs[eniID]; ok {
|
||||
eni := ec2Types.NetworkInterface{
|
||||
NetworkInterfaceId: &eniID,
|
||||
}
|
||||
if publicIP != "" {
|
||||
eni.Association = &ec2Types.NetworkInterfaceAssociation{
|
||||
PublicIp: &publicIP,
|
||||
}
|
||||
}
|
||||
networkInterfaces = append(networkInterfaces, eni)
|
||||
}
|
||||
}
|
||||
|
||||
return &ec2.DescribeNetworkInterfacesOutput{
|
||||
NetworkInterfaces: networkInterfaces,
|
||||
}, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2021 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2020 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2020 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2020 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2020 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
@ -63,8 +63,9 @@ type DiscovererOptions struct {
|
|||
// We define them here in the "discovery" package in order to avoid a cyclic dependency between
|
||||
// "discovery" and "refresh".
|
||||
type RefreshMetrics struct {
|
||||
Failures prometheus.Counter
|
||||
Duration prometheus.Observer
|
||||
Failures prometheus.Counter
|
||||
Duration prometheus.Observer
|
||||
DurationHistogram prometheus.Observer
|
||||
}
|
||||
|
||||
// RefreshMetricsInstantiator instantiates the metrics used by the "refresh" package.
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2024 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2016 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2019 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2020 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2020 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2020 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2020 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2016 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue