mirror of
https://github.com/prometheus/prometheus.git
synced 2026-02-03 20:39:32 -05:00
Merge branch 'main' of github.com:prometheus/prometheus into fix/delete_refresh_metrics_reload
This commit is contained in:
commit
0d5a682b30
761 changed files with 39587 additions and 8984 deletions
1
.gitattributes
vendored
Normal file
1
.gitattributes
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
web/api/v1/testdata/openapi_golden.yaml linguist-generated
|
||||
10
.github/CODEOWNERS
vendored
10
.github/CODEOWNERS
vendored
|
|
@ -1,10 +0,0 @@
|
|||
/web/ui @juliusv
|
||||
/web/ui/module @juliusv @nexucis
|
||||
/storage/remote @cstyan @bwplotka @tomwilkie
|
||||
/storage/remote/otlptranslator @aknuds1 @jesusvazquez
|
||||
/discovery/kubernetes @brancz
|
||||
/tsdb @jesusvazquez
|
||||
/promql @roidelapluie
|
||||
/cmd/promtool @dgl
|
||||
/documentation/prometheus-mixin @metalmatze
|
||||
|
||||
1
.github/PULL_REQUEST_TEMPLATE.md
vendored
1
.github/PULL_REQUEST_TEMPLATE.md
vendored
|
|
@ -28,6 +28,7 @@ If no, just write "NONE" in the release-notes block below.
|
|||
Otherwise, please describe what should be mentioned in the CHANGELOG. Use the following prefixes:
|
||||
[FEATURE] [ENHANCEMENT] [PERF] [BUGFIX] [SECURITY] [CHANGE]
|
||||
Refer to the existing CHANGELOG for inspiration: https://github.com/prometheus/prometheus/blob/main/CHANGELOG.md
|
||||
A concrete example may look as follows (be sure to leave out the surrounding quotes): "[FEATURE] API: Add /api/v1/features for clients to understand which features are supported".
|
||||
If you need help formulating your entries, consult the reviewer(s).
|
||||
-->
|
||||
```release-notes
|
||||
|
|
|
|||
33
.github/dependabot.yml
vendored
33
.github/dependabot.yml
vendored
|
|
@ -1,33 +0,0 @@
|
|||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "docker"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
- package-ecosystem: "github-actions"
|
||||
directories:
|
||||
- "/"
|
||||
- "/scripts"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
- package-ecosystem: "gomod"
|
||||
directories:
|
||||
- "/"
|
||||
- "/documentation/examples/remote_storage"
|
||||
- "/internal/tools"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
groups:
|
||||
aws:
|
||||
patterns:
|
||||
- "github.com/aws/*"
|
||||
azure:
|
||||
patterns:
|
||||
- "github.com/Azure/*"
|
||||
k8s.io:
|
||||
patterns:
|
||||
- "k8s.io/*"
|
||||
go.opentelemetry.io:
|
||||
patterns:
|
||||
- "go.opentelemetry.io/*"
|
||||
open-pull-requests-limit: 20
|
||||
2
.github/workflows/buf-lint.yml
vendored
2
.github/workflows/buf-lint.yml
vendored
|
|
@ -12,7 +12,7 @@ jobs:
|
|||
name: lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: bufbuild/buf-setup-action@a47c93e0b1648d5651a065437926377d060baa99 # v1.50.0
|
||||
|
|
|
|||
4
.github/workflows/buf.yml
vendored
4
.github/workflows/buf.yml
vendored
|
|
@ -12,7 +12,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
if: github.repository_owner == 'prometheus'
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: bufbuild/buf-setup-action@a47c93e0b1648d5651a065437926377d060baa99 # v1.50.0
|
||||
|
|
@ -25,7 +25,7 @@ jobs:
|
|||
with:
|
||||
input: 'prompb'
|
||||
against: 'https://github.com/prometheus/prometheus.git#branch=main,ref=HEAD~1,subdir=prompb'
|
||||
- uses: bufbuild/buf-push-action@a654ff18effe4641ebea4a4ce242c49800728459 # v1.1.1
|
||||
- uses: bufbuild/buf-push-action@a654ff18effe4641ebea4a4ce242c49800728459 # v1.2.0
|
||||
with:
|
||||
input: 'prompb'
|
||||
buf_token: ${{ secrets.BUF_TOKEN }}
|
||||
|
|
|
|||
2
.github/workflows/check_release_notes.yml
vendored
2
.github/workflows/check_release_notes.yml
vendored
|
|
@ -20,7 +20,7 @@ jobs:
|
|||
# Don't run it on dependabot PRs either as humans would take control in case a bump introduces a breaking change.
|
||||
if: (github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community') && github.event.pull_request.user.login != 'dependabot[bot]'
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
- env:
|
||||
PR_DESCRIPTION: ${{ github.event.pull_request.body }}
|
||||
run: |
|
||||
|
|
|
|||
72
.github/workflows/ci.yml
vendored
72
.github/workflows/ci.yml
vendored
|
|
@ -16,10 +16,10 @@ jobs:
|
|||
# should also be updated.
|
||||
image: quay.io/prometheus/golang-builder:1.25-base
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: prometheus/promci@c0916f0a41f13444612a8f0f5e700ea34edd7c19 # v0.5.3
|
||||
- uses: prometheus/promci@fc721ff8497a70a93a881cd552b71af7fb3a9d53 # v0.5.4
|
||||
- uses: ./.github/promci/actions/setup_environment
|
||||
with:
|
||||
enable_npm: true
|
||||
|
|
@ -34,10 +34,10 @@ jobs:
|
|||
container:
|
||||
image: quay.io/prometheus/golang-builder:1.25-base
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: prometheus/promci@c0916f0a41f13444612a8f0f5e700ea34edd7c19 # v0.5.3
|
||||
- uses: prometheus/promci@fc721ff8497a70a93a881cd552b71af7fb3a9d53 # v0.5.4
|
||||
- uses: ./.github/promci/actions/setup_environment
|
||||
- run: go test --tags=dedupelabels ./...
|
||||
- run: go test --tags=slicelabels -race ./cmd/prometheus ./model/textparse ./prompb/...
|
||||
|
|
@ -57,9 +57,9 @@ jobs:
|
|||
GOEXPERIMENT: synctest
|
||||
container:
|
||||
# The go version in this image should be N-1 wrt test_go.
|
||||
image: quay.io/prometheus/golang-builder:1.24-base
|
||||
image: quay.io/prometheus/golang-builder:1.25-base
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
- run: make build
|
||||
|
|
@ -78,10 +78,10 @@ jobs:
|
|||
image: quay.io/prometheus/golang-builder:1.25-base
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: prometheus/promci@c0916f0a41f13444612a8f0f5e700ea34edd7c19 # v0.5.3
|
||||
- uses: prometheus/promci@fc721ff8497a70a93a881cd552b71af7fb3a9d53 # v0.5.4
|
||||
- uses: ./.github/promci/actions/setup_environment
|
||||
with:
|
||||
enable_go: false
|
||||
|
|
@ -97,10 +97,10 @@ jobs:
|
|||
name: Go tests on Windows
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
- uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
|
||||
with:
|
||||
go-version: 1.25.x
|
||||
- run: |
|
||||
|
|
@ -116,7 +116,7 @@ jobs:
|
|||
container:
|
||||
image: quay.io/prometheus/golang-builder:1.25-base
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
- run: go install ./cmd/promtool/.
|
||||
|
|
@ -143,10 +143,10 @@ jobs:
|
|||
matrix:
|
||||
thread: [ 0, 1, 2 ]
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: prometheus/promci@c0916f0a41f13444612a8f0f5e700ea34edd7c19 # v0.5.3
|
||||
- uses: prometheus/promci@fc721ff8497a70a93a881cd552b71af7fb3a9d53 # v0.5.4
|
||||
- uses: ./.github/promci/actions/build
|
||||
with:
|
||||
promu_opts: "-p linux/amd64 -p windows/amd64 -p linux/arm64 -p darwin/amd64 -p darwin/arm64 -p linux/386"
|
||||
|
|
@ -170,10 +170,10 @@ jobs:
|
|||
# Whenever the Go version is updated here, .promu.yml
|
||||
# should also be updated.
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: prometheus/promci@c0916f0a41f13444612a8f0f5e700ea34edd7c19 # v0.5.3
|
||||
- uses: prometheus/promci@fc721ff8497a70a93a881cd552b71af7fb3a9d53 # v0.5.4
|
||||
- uses: ./.github/promci/actions/build
|
||||
with:
|
||||
parallelism: 12
|
||||
|
|
@ -202,30 +202,32 @@ jobs:
|
|||
if: ${{ contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') }}
|
||||
run: exit 1
|
||||
check_generated_parser:
|
||||
# Checks generated parser and UI functions list. Not renaming as it is a required check.
|
||||
name: Check generated parser
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: quay.io/prometheus/golang-builder:1.25-base
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
- uses: prometheus/promci@fc721ff8497a70a93a881cd552b71af7fb3a9d53 # v0.5.4
|
||||
- uses: ./.github/promci/actions/setup_environment
|
||||
with:
|
||||
cache: false
|
||||
go-version: 1.25.x
|
||||
- name: Run goyacc and check for diff
|
||||
run: make install-goyacc check-generated-parser
|
||||
enable_npm: true
|
||||
- run: make install-goyacc check-generated-parser
|
||||
- run: make check-generated-promql-functions
|
||||
golangci:
|
||||
name: golangci-lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
|
||||
with:
|
||||
go-version: 1.25.x
|
||||
- name: Install snmp_exporter/generator dependencies
|
||||
|
|
@ -235,18 +237,18 @@ jobs:
|
|||
id: golangci-lint-version
|
||||
run: echo "version=$(make print-golangci-lint-version)" >> $GITHUB_OUTPUT
|
||||
- name: Lint
|
||||
uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0
|
||||
uses: golangci/golangci-lint-action@1e7e51e771db61008b38414a730f564565cf7c20 # v9.2.0
|
||||
with:
|
||||
args: --verbose
|
||||
version: ${{ steps.golangci-lint-version.outputs.version }}
|
||||
- name: Lint with slicelabels
|
||||
uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0
|
||||
uses: golangci/golangci-lint-action@1e7e51e771db61008b38414a730f564565cf7c20 # v9.2.0
|
||||
with:
|
||||
# goexperiment.synctest to ensure we don't miss files that depend on it.
|
||||
args: --verbose --build-tags=slicelabels,goexperiment.synctest
|
||||
version: ${{ steps.golangci-lint-version.outputs.version }}
|
||||
- name: Lint with dedupelabels
|
||||
uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0
|
||||
uses: golangci/golangci-lint-action@1e7e51e771db61008b38414a730f564565cf7c20 # v9.2.0
|
||||
with:
|
||||
args: --verbose --build-tags=dedupelabels
|
||||
version: ${{ steps.golangci-lint-version.outputs.version }}
|
||||
|
|
@ -265,10 +267,10 @@ jobs:
|
|||
needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all]
|
||||
if: github.event_name == 'push' && github.event.ref == 'refs/heads/main'
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: prometheus/promci@c0916f0a41f13444612a8f0f5e700ea34edd7c19 # v0.5.3
|
||||
- uses: prometheus/promci@fc721ff8497a70a93a881cd552b71af7fb3a9d53 # v0.5.4
|
||||
- uses: ./.github/promci/actions/publish_main
|
||||
with:
|
||||
docker_hub_login: ${{ secrets.docker_hub_login }}
|
||||
|
|
@ -284,10 +286,10 @@ jobs:
|
|||
||
|
||||
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.'))
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: prometheus/promci@c0916f0a41f13444612a8f0f5e700ea34edd7c19 # v0.5.3
|
||||
- uses: prometheus/promci@fc721ff8497a70a93a881cd552b71af7fb3a9d53 # v0.5.4
|
||||
- uses: ./.github/promci/actions/publish_release
|
||||
with:
|
||||
docker_hub_login: ${{ secrets.docker_hub_login }}
|
||||
|
|
@ -301,16 +303,16 @@ jobs:
|
|||
needs: [test_ui, codeql]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: prometheus/promci@c0916f0a41f13444612a8f0f5e700ea34edd7c19 # v0.5.3
|
||||
- uses: prometheus/promci@fc721ff8497a70a93a881cd552b71af7fb3a9d53 # v0.5.4
|
||||
- name: Install nodejs
|
||||
uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0
|
||||
uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0
|
||||
with:
|
||||
node-version-file: "web/ui/.nvmrc"
|
||||
registry-url: "https://registry.npmjs.org"
|
||||
- uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
|
||||
- uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
|
||||
with:
|
||||
path: ~/.npm
|
||||
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
|
||||
|
|
|
|||
8
.github/workflows/codeql-analysis.yml
vendored
8
.github/workflows/codeql-analysis.yml
vendored
|
|
@ -24,17 +24,17 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@0499de31b99561a6d14a36a5f662c2a54f91beee # v4.31.2
|
||||
uses: github/codeql-action/init@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@0499de31b99561a6d14a36a5f662c2a54f91beee # v4.31.2
|
||||
uses: github/codeql-action/autobuild@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@0499de31b99561a6d14a36a5f662c2a54f91beee # v4.31.2
|
||||
uses: github/codeql-action/analyze@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9
|
||||
|
|
|
|||
4
.github/workflows/container_description.yml
vendored
4
.github/workflows/container_description.yml
vendored
|
|
@ -18,7 +18,7 @@ jobs:
|
|||
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
|
||||
steps:
|
||||
- name: git checkout
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Set docker hub repo name
|
||||
|
|
@ -42,7 +42,7 @@ jobs:
|
|||
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
|
||||
steps:
|
||||
- name: git checkout
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Set quay.io org name
|
||||
|
|
|
|||
53
.github/workflows/fuzzing.yml
vendored
53
.github/workflows/fuzzing.yml
vendored
|
|
@ -1,30 +1,47 @@
|
|||
name: CIFuzz
|
||||
name: fuzzing
|
||||
on:
|
||||
workflow_call:
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
Fuzzing:
|
||||
fuzzing:
|
||||
name: Run Go Fuzz Tests
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
fuzz_test: [FuzzParseMetricText, FuzzParseOpenMetric, FuzzParseMetricSelector, FuzzParseExpr]
|
||||
steps:
|
||||
- name: Build Fuzzers
|
||||
id: build
|
||||
uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@cafd7a0eb8ecb4e007c56897996a9b65c49c972f # master
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
oss-fuzz-project-name: "prometheus"
|
||||
dry-run: false
|
||||
- name: Run Fuzzers
|
||||
uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@cafd7a0eb8ecb4e007c56897996a9b65c49c972f # master
|
||||
# Note: Regularly check for updates to the pinned commit hash at:
|
||||
# https://github.com/google/oss-fuzz/tree/master/infra/cifuzz/actions/run_fuzzers
|
||||
persist-credentials: false
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||
with:
|
||||
oss-fuzz-project-name: "prometheus"
|
||||
fuzz-seconds: 600
|
||||
dry-run: false
|
||||
- name: Upload Crash
|
||||
go-version: 1.25.x
|
||||
- name: Run Fuzzing
|
||||
run: go test -fuzz=${{ matrix.fuzz_test }}$ -fuzztime=5m ./util/fuzzing
|
||||
continue-on-error: true
|
||||
id: fuzz
|
||||
- name: Upload Crash Artifacts
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
if: failure() && steps.build.outcome == 'success'
|
||||
if: failure()
|
||||
with:
|
||||
name: artifacts
|
||||
path: ./out/artifacts
|
||||
name: fuzz-artifacts-${{ matrix.fuzz_test }}
|
||||
path: util/fuzzing/testdata/fuzz/${{ matrix.fuzz_test }}
|
||||
fuzzing_status:
|
||||
# This status check aggregates the individual matrix jobs of the fuzzing
|
||||
# step into a final status. Fails if a single matrix job fails, succeeds if
|
||||
# all matrix jobs succeed.
|
||||
name: Fuzzing
|
||||
runs-on: ubuntu-latest
|
||||
needs: [fuzzing]
|
||||
if: always()
|
||||
steps:
|
||||
- name: Successful fuzzing
|
||||
if: ${{ !(contains(needs.*.result, 'failure')) && !(contains(needs.*.result, 'cancelled')) }}
|
||||
run: exit 0
|
||||
- name: Failing or cancelled fuzzing
|
||||
if: ${{ contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') }}
|
||||
run: exit 1
|
||||
|
|
|
|||
2
.github/workflows/lock.yml
vendored
2
.github/workflows/lock.yml
vendored
|
|
@ -16,7 +16,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
if: github.repository_owner == 'prometheus'
|
||||
steps:
|
||||
- uses: dessant/lock-threads@1bf7ec25051fe7c00bdd17e6a7cf3d7bfb7dc771 # v5.0.1
|
||||
- uses: dessant/lock-threads@7266a7ce5c1df01b1c6db85bf8cd86c737dadbe7 # v6.0.0
|
||||
with:
|
||||
process-only: 'issues'
|
||||
issue-inactive-days: '180'
|
||||
|
|
|
|||
2
.github/workflows/repo_sync.yml
vendored
2
.github/workflows/repo_sync.yml
vendored
|
|
@ -14,7 +14,7 @@ jobs:
|
|||
container:
|
||||
image: quay.io/prometheus/golang-builder
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
- run: ./scripts/sync_repo_files.sh
|
||||
|
|
|
|||
6
.github/workflows/scorecards.yml
vendored
6
.github/workflows/scorecards.yml
vendored
|
|
@ -21,7 +21,7 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: "Checkout code"
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # tag=v4.2.2
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # tag=v6.0.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
|
|
@ -37,7 +37,7 @@ jobs:
|
|||
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
|
||||
# format to the repository Actions tab.
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # tag=v5.0.0
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # tag=v6.0.0
|
||||
with:
|
||||
name: SARIF file
|
||||
path: results.sarif
|
||||
|
|
@ -45,6 +45,6 @@ jobs:
|
|||
|
||||
# Upload the results to GitHub's code scanning dashboard.
|
||||
- name: "Upload to code-scanning"
|
||||
uses: github/codeql-action/upload-sarif@0499de31b99561a6d14a36a5f662c2a54f91beee # tag=v4.31.2
|
||||
uses: github/codeql-action/upload-sarif@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9
|
||||
with:
|
||||
sarif_file: results.sarif
|
||||
|
|
|
|||
2
.github/workflows/stale.yml
vendored
2
.github/workflows/stale.yml
vendored
|
|
@ -11,7 +11,7 @@ jobs:
|
|||
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@5f858e3efba33a5ca4407a664cc011ad407f2008 # v10.1.0
|
||||
- uses: actions/stale@997185467fa4f803885201cee163a9f38240193d # v10.1.1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
# opt out of defaults to avoid marking issues as stale and closing them
|
||||
|
|
|
|||
1
.gitignore
vendored
1
.gitignore
vendored
|
|
@ -26,6 +26,7 @@ npm_licenses.tar.bz2
|
|||
|
||||
/vendor
|
||||
/.build
|
||||
/go.work.sum
|
||||
|
||||
/**/node_modules
|
||||
|
||||
|
|
|
|||
|
|
@ -124,6 +124,8 @@ linters:
|
|||
# Disable this check for now since it introduces too many changes in our existing codebase.
|
||||
# See https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#hdr-Analyzer_omitzero for more details.
|
||||
- omitzero
|
||||
# Disable waitgroup check until we really move to Go 1.25.
|
||||
- waitgroup
|
||||
perfsprint:
|
||||
# Optimizes even if it requires an int or uint type cast.
|
||||
int-conversion: true
|
||||
|
|
@ -182,6 +184,11 @@ linters:
|
|||
- name: unused-receiver
|
||||
- name: var-declaration
|
||||
- name: var-naming
|
||||
# TODO(SuperQ): See: https://github.com/prometheus/prometheus/issues/17766
|
||||
arguments:
|
||||
- []
|
||||
- []
|
||||
- - skip-package-name-checks: true
|
||||
testifylint:
|
||||
disable:
|
||||
- float-compare
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@
|
|||
extends: default
|
||||
ignore: |
|
||||
**/node_modules
|
||||
web/api/v1/testdata/openapi_*_golden.yaml
|
||||
|
||||
rules:
|
||||
braces:
|
||||
|
|
|
|||
62
CHANGELOG.md
62
CHANGELOG.md
|
|
@ -1,18 +1,60 @@
|
|||
# Changelog
|
||||
|
||||
## main / unreleased
|
||||
## 3.9.1 / 2026-01-07
|
||||
|
||||
* [FEATURE] Templates: Add urlQueryEscape to template functions. #17403
|
||||
* [BUGFIX] TSDB: Register `prometheus_tsdb_sample_ooo_delta` metric properly. #17477
|
||||
- [BUGFIX] Agent: fix crash shortly after startup from invalid type of object. #17802
|
||||
- [BUGFIX] Scraping: fix relabel keep/drop not working. #17807
|
||||
|
||||
## 3.9.0 / 2026-01-06
|
||||
|
||||
- [CHANGE] Native Histograms are no longer experimental! Make the `native-histogram` feature flag a no-op. Use `scrape_native_histograms` config option instead. #17528
|
||||
- [CHANGE] API: Add maximum limit of 10,000 sets of statistics to TSDB status endpoint. #17647
|
||||
- [FEATURE] API: Add /api/v1/features for clients to understand which features are supported. #17427
|
||||
- [FEATURE] Promtool: Add `start_timestamp` field for unit tests. #17636
|
||||
- [FEATURE] Promtool: Add `--format seriesjson` option to `tsdb dump` to output just series labels in JSON format. #13409
|
||||
- [FEATURE] Add `--storage.tsdb.delay-compact-file.path` flag for better interoperability with Thanos. #17435
|
||||
- [FEATURE] UI: Add an option on the query drop-down menu to duplicate that query panel. #17714
|
||||
- [ENHANCEMENT]: TSDB: add flag `--storage.tsdb.block-reload-interval` to configure TSDB Block Reload Interval. #16728
|
||||
- [ENHANCEMENT] UI: Add graph option to start the chart's Y axis at zero. #17565
|
||||
- [ENHANCEMENT] Scraping: Classic protobuf format no longer requires the unit in the metric name. #16834
|
||||
- [ENHANCEMENT] PromQL, Rules, SD, Scraping: Add native histograms to complement existing summaries. #17374
|
||||
- [ENHANCEMENT] Notifications: Add a histogram `prometheus_notifications_latency_histogram_seconds` to complement the existing summary. #16637
|
||||
- [ENHANCEMENT] Remote-write: Add custom scope support for AzureAD authentication. #17483
|
||||
- [ENHANCEMENT] SD: add a `config` label with job name for most `prometheus_sd_refresh` metrics. #17138
|
||||
- [ENHANCEMENT] TSDB: New histogram `prometheus_tsdb_sample_ooo_delta`, the distribution of out-of-order samples in seconds. Collected for all samples, accepted or not. #17477
|
||||
- [ENHANCEMENT] Remote-read: Validate histograms received via remote-read. #17561
|
||||
- [PERF] TSDB: Small optimizations to postings index. #17439
|
||||
- [PERF] Scraping: Speed up relabelling of series. #17530
|
||||
- [PERF] PromQL: Small optimisations in binary operators. #17524, #17519.
|
||||
- [BUGFIX] UI: PromQL autocomplete now shows the correct type and HELP text for OpenMetrics counters whose samples end in `_total`. #17682
|
||||
- [BUGFIX] UI: Fixed codemirror-promql incorrectly showing label completion suggestions after the closing curly brace of a vector selector. #17602
|
||||
- [BUGFIX] UI: Query editor no longer suggests a duration unit if one is already present after a number. #17605
|
||||
- [BUGFIX] PromQL: Fix some "vector cannot contain metrics with the same labelset" errors when experimental delayed name removal is enabled. #17678
|
||||
- [BUGFIX] PromQL: Fix possible corruption of PromQL text if the query had an empty `ignoring()` and non-empty grouping. #17643
|
||||
- [BUGFIX] PromQL: Fix resets/changes to return empty results for anchored selectors when all samples are outside the range. #17479
|
||||
- [BUGFIX] PromQL: Check more consistently for many-to-one matching in filter binary operators. #17668
|
||||
- [BUGFIX] PromQL: Fix collision in unary negation with non-overlapping series. #17708
|
||||
- [BUGFIX] PromQL: Fix collision in label_join and label_replace with non-overlapping series. #17703
|
||||
- [BUGFIX] PromQL: Fix bug with inconsistent results for queries with OR expression when experimental delayed name removal is enabled. #17161
|
||||
- [BUGFIX] PromQL: Ensure that `rate`/`increase`/`delta` of histograms results in a gauge histogram. #17608
|
||||
- [BUGFIX] PromQL: Do not panic while iterating over invalid histograms. #17559
|
||||
- [BUGFIX] TSDB: Reject chunk files whose encoded chunk length overflows int. #17533
|
||||
- [BUGFIX] TSDB: Do not panic during resolution reduction of invalid histograms. #17561
|
||||
- [BUGFIX] Remote-write Receive: Avoid duplicate labels when experimental type-and-unit-label feature is enabled. #17546
|
||||
- [BUGFIX] OTLP Receiver: Only write metadata to disk when experimental metadata-wal-records feature is enabled. #17472
|
||||
|
||||
## 3.8.1 / 2025-12-16
|
||||
|
||||
* [BUGFIX] remote: Fix Remote Write receiver, so it does not send wrong response headers for v1 flow and cause Prometheus senders to emit false partial error log and metrics. #17683
|
||||
|
||||
## 3.8.0 / 2025-11-28
|
||||
|
||||
* [CHANGE] Remote-write 2 (receiving): Update to [2.0-rc.4 spec](https://github.com/prometheus/docs/blob/60c24e450010df38cfcb4f65df874f6f9b26dbcb/docs/specs/prw/remote_write_spec_2_0.md). "created timestamp" (CT) is now called "start timestamp" (ST). #17411
|
||||
* [CHANGE] Remote-write: Update receiving to [2.0-rc.4 spec](https://github.com/prometheus/docs/blob/60c24e450010df38cfcb4f65df874f6f9b26dbcb/docs/specs/prw/remote_write_spec_2_0.md). "created timestamp" (CT) is now called "start timestamp" (ST). #17411
|
||||
* [CHANGE] TSDB: Native Histogram Custom Bounds with a NaN threshold are now rejected. #17287
|
||||
* [FEATURE] OAuth2: support jwt-bearer grant-type (RFC7523 3.1). #17592
|
||||
* [FEATURE] Dockerfile: Add OpenContainers spec labels to Dockerfile. #16483
|
||||
* [FEATURE] SD: Add unified AWS service discovery for ec2, lightsail and ecs services. #17406
|
||||
* [FEATURE] Native histograms are now a stable, but optional feature, use the `scrape_native_histogram` config setting. #17232 #17315
|
||||
* [FEATURE] Native histograms are now a stable, but optional feature, use the `scrape_native_histograms` config setting. #17232 #17315
|
||||
* [FEATURE] UI: Support anchored and smoothed keyword in promql editor. #17239
|
||||
* [FEATURE] UI: Show detailed relabeling steps for each discovered target. #17337
|
||||
* [FEATURE] Alerting: Add urlQueryEscape to template functions. #17403
|
||||
|
|
@ -237,7 +279,7 @@
|
|||
|
||||
## 3.2.1 / 2025-02-25
|
||||
|
||||
* [BUGFIX] Don't send Accept` header `escape=allow-utf-8` when `metric_name_validation_scheme: legacy` is configured. #16061
|
||||
* [BUGFIX] Don't send `Accept` header `escape=allow-utf-8` when `metric_name_validation_scheme: legacy` is configured. #16061
|
||||
|
||||
## 3.2.0 / 2025-02-17
|
||||
|
||||
|
|
@ -248,10 +290,10 @@
|
|||
* [ENHANCEMENT] scrape: Add metadata for automatic metrics to WAL for `metadata-wal-records` feature. #15837
|
||||
* [ENHANCEMENT] promtool: Support linting of scrape interval, through lint option `too-long-scrape-interval`. #15719
|
||||
* [ENHANCEMENT] promtool: Add --ignore-unknown-fields option. #15706
|
||||
* [ENHANCEMENT] ui: Make "hide empty rules" and hide empty rules" persistent #15807
|
||||
* [ENHANCEMENT] ui: Make "hide empty rules" and "hide empty rules" persistent #15807
|
||||
* [ENHANCEMENT] web/api: Add a limit parameter to `/query` and `/query_range`. #15552
|
||||
* [ENHANCEMENT] api: Add fields Node and ServerTime to `/status`. #15784
|
||||
* [PERF] Scraping: defer computing labels for dropped targets until they are needed by the UI. #15261
|
||||
* [PERF] Scraping: defer computing labels for dropped targets until they are needed by the UI. #15261
|
||||
* [BUGFIX] remotewrite2: Fix invalid metadata bug for metrics without metadata. #15829
|
||||
* [BUGFIX] remotewrite2: Fix the unit field propagation. #15825
|
||||
* [BUGFIX] scrape: Fix WAL metadata for histograms and summaries. #15832
|
||||
|
|
@ -268,9 +310,9 @@
|
|||
* [ENHANCEMENT] TSDB: Improve calculation of space used by labels. #13880
|
||||
* [ENHANCEMENT] Rules: new metric rule_group_last_rule_duration_sum_seconds. #15672
|
||||
* [ENHANCEMENT] Observability: Export 'go_sync_mutex_wait_total_seconds_total' metric. #15339
|
||||
* [ENHANCEMEN] Remote-Write: optionally use a DNS resolver that picks a random IP. #15329
|
||||
* [ENHANCEMENT] Remote-Write: optionally use a DNS resolver that picks a random IP. #15329
|
||||
* [PERF] Optimize `l=~".+"` matcher. #15474, #15684
|
||||
* [PERF] TSDB: Cache all symbols for compaction . #15455
|
||||
* [PERF] TSDB: Cache all symbols for compaction. #15455
|
||||
* [PERF] TSDB: MemPostings: keep a map of label values slices. #15426
|
||||
* [PERF] Remote-Write: Remove interning hook. #15456
|
||||
* [PERF] Scrape: optimize string manipulation for experimental native histograms with custom buckets. #15453
|
||||
|
|
|
|||
31
CODEOWNERS
31
CODEOWNERS
|
|
@ -1,10 +1,29 @@
|
|||
#
|
||||
# Please keep this file in sync with the MAINTAINERS.md file!
|
||||
#
|
||||
|
||||
# Prometheus team members are members of the "default maintainers" github team.
|
||||
# They are code owners by default for the whole repo.
|
||||
* @prometheus/default-maintainers
|
||||
|
||||
# Example adding a dedicated maintainer for AWS SD, and also "default
|
||||
# maintainers" so that they do not need to bypass codeowners check to merge
|
||||
# something.
|
||||
# Example comes from
|
||||
# https://github.com/prometheus/prometheus/pull/17105#issuecomment-3248209452
|
||||
# /discovery/aws/ @matt-gp @prometheus/default-maintainers
|
||||
# Subsystems.
|
||||
/Makefile @prometheus/default-maintainers @simonpasquier @SuperQ
|
||||
/cmd/promtool @prometheus/default-maintainers @dgl
|
||||
/documentation/prometheus-mixin @prometheus/default-maintainers @metalmatze
|
||||
/model/histogram @prometheus/default-maintainers @beorn7 @krajorama
|
||||
/web/ui @prometheus/default-maintainers @juliusv
|
||||
/web/ui/module @prometheus/default-maintainers @juliusv @nexucis
|
||||
/promql @prometheus/default-maintainers @roidelapluie
|
||||
/storage/remote @prometheus/default-maintainers @cstyan @bwplotka @tomwilkie @alexgreenbank
|
||||
/storage/remote/otlptranslator @prometheus/default-maintainers @aknuds1 @jesusvazquez @ArthurSens
|
||||
/tsdb @prometheus/default-maintainers @jesusvazquez @codesome @bwplotka @krajorama
|
||||
|
||||
# Service discovery.
|
||||
/discovery/kubernetes @prometheus/default-maintainers @brancz
|
||||
/discovery/stackit @prometheus/default-maintainers @jkroepke
|
||||
/discovery/aws/ @prometheus/default-maintainers @matt-gp @sysadmind
|
||||
# Pending
|
||||
# https://github.com/prometheus/prometheus/pull/15212#issuecomment-3575225179
|
||||
# /discovery/aliyun @prometheus/default-maintainers @KeyOfSpectator
|
||||
# https://github.com/prometheus/prometheus/pull/14108#issuecomment-2639515421
|
||||
# /discovery/nomad @prometheus/default-maintainers @jaloren @jrasell
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ Prometheus uses GitHub to manage reviews of pull requests.
|
|||
of inspiration. Also please see our [non-goals issue](https://github.com/prometheus/docs/issues/149) on areas that the Prometheus community doesn't plan to work on.
|
||||
|
||||
* Relevant coding style guidelines are the [Go Code Review
|
||||
Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
|
||||
Comments](https://go.dev/wiki/CodeReviewComments)
|
||||
and the _Formatting and style_ section of Peter Bourgon's [Go: Best
|
||||
Practices for Production
|
||||
Environments](https://peter.bourgon.org/go-in-production/#formatting-and-style).
|
||||
|
|
@ -78,8 +78,7 @@ go get example.com/some/module/pkg@vX.Y.Z
|
|||
Tidy up the `go.mod` and `go.sum` files:
|
||||
|
||||
```bash
|
||||
# The GO111MODULE variable can be omitted when the code isn't located in GOPATH.
|
||||
GO111MODULE=on go mod tidy
|
||||
go mod tidy
|
||||
```
|
||||
|
||||
You have to commit the changes to `go.mod` and `go.sum` before submitting the pull request.
|
||||
|
|
|
|||
|
|
@ -9,7 +9,8 @@ LABEL org.opencontainers.image.authors="The Prometheus Authors" \
|
|||
org.opencontainers.image.source="https://github.com/prometheus/prometheus" \
|
||||
org.opencontainers.image.url="https://github.com/prometheus/prometheus" \
|
||||
org.opencontainers.image.documentation="https://prometheus.io/docs" \
|
||||
org.opencontainers.image.licenses="Apache License 2.0"
|
||||
org.opencontainers.image.licenses="Apache License 2.0" \
|
||||
io.prometheus.image.variant="busybox"
|
||||
|
||||
ARG ARCH="amd64"
|
||||
ARG OS="linux"
|
||||
|
|
|
|||
29
Dockerfile.distroless
Normal file
29
Dockerfile.distroless
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
ARG DISTROLESS_ARCH="amd64"
|
||||
|
||||
# Use DISTROLESS_ARCH for base image selection (handles armv7->arm mapping).
|
||||
FROM gcr.io/distroless/static-debian13:nonroot-${DISTROLESS_ARCH}
|
||||
# Base image sets USER to 65532:65532 (nonroot user).
|
||||
|
||||
ARG ARCH="amd64"
|
||||
ARG OS="linux"
|
||||
|
||||
LABEL org.opencontainers.image.authors="The Prometheus Authors"
|
||||
LABEL org.opencontainers.image.vendor="Prometheus"
|
||||
LABEL org.opencontainers.image.title="Prometheus"
|
||||
LABEL org.opencontainers.image.description="The Prometheus monitoring system and time series database"
|
||||
LABEL org.opencontainers.image.source="https://github.com/prometheus/prometheus"
|
||||
LABEL org.opencontainers.image.url="https://github.com/prometheus/prometheus"
|
||||
LABEL org.opencontainers.image.documentation="https://prometheus.io/docs"
|
||||
LABEL org.opencontainers.image.licenses="Apache License 2.0"
|
||||
LABEL io.prometheus.image.variant="distroless"
|
||||
|
||||
COPY documentation/examples/prometheus.yml /etc/prometheus/prometheus.yml
|
||||
COPY LICENSE NOTICE npm_licenses.tar.bz2 /
|
||||
COPY .build/${OS}-${ARCH}/prometheus /bin/prometheus
|
||||
COPY .build/${OS}-${ARCH}/promtool /bin/promtool
|
||||
|
||||
WORKDIR /prometheus
|
||||
EXPOSE 9090
|
||||
ENTRYPOINT [ "/bin/prometheus" ]
|
||||
CMD [ "--config.file=/etc/prometheus/prometheus.yml", \
|
||||
"--storage.tsdb.path=/prometheus" ]
|
||||
|
|
@ -1,5 +1,7 @@
|
|||
# Maintainers
|
||||
|
||||
## Please keep this file in sync with the CODEOWNERS file!
|
||||
|
||||
General maintainers:
|
||||
* Bryan Boreham (bjboreham@gmail.com / @bboreham)
|
||||
* Ayoub Mrini (ayoubmrini424@gmail.com / @machine424)
|
||||
|
|
@ -16,12 +18,12 @@ Maintainers for specific parts of the codebase:
|
|||
* `model/histogram` and other code related to native histograms: Björn Rabenstein (<beorn@grafana.com> / @beorn7),
|
||||
George Krajcsovits (<gyorgy.krajcsovits@grafana.com> / @krajorama)
|
||||
* `storage`
|
||||
* `remote`: Callum Styan (<callumstyan@gmail.com> / @cstyan), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Tom Wilkie (tom.wilkie@gmail.com / @tomwilkie), Nicolás Pazos ( <npazosmendez@gmail.com> / @npazosmendez), Alex Greenbank ( <alexgreenbank@yahoo.com> / @alexgreenbank)
|
||||
* `remote`: Callum Styan (<callumstyan@gmail.com> / @cstyan), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Tom Wilkie (tom.wilkie@gmail.com / @tomwilkie), Alex Greenbank (<alexgreenbank@yahoo.com> / @alexgreenbank)
|
||||
* `otlptranslator`: Arthur Silva Sens (<arthursens2005@gmail.com> / @ArthurSens), Arve Knudsen (<arve.knudsen@gmail.com> / @aknuds1), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez)
|
||||
* `tsdb`: Ganesh Vernekar (<ganesh@grafana.com> / @codesome), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez), George Krajcsovits (<gyorgy.krajcsovits@grafana.com> / @krajorama)
|
||||
* `web`
|
||||
* `ui`: Julius Volz (<julius.volz@gmail.com> / @juliusv)
|
||||
* `module`: Augustin Husson (<husson.augustin@gmail.com> @nexucis)
|
||||
* `module`: Augustin Husson (<husson.augustin@gmail.com> / @nexucis)
|
||||
* `Makefile` and related build configuration: Simon Pasquier (<pasquier.simon@gmail.com> / @simonpasquier), Ben Kochie (<superq@gmail.com> / @SuperQ)
|
||||
|
||||
For the sake of brevity, not all subtrees are explicitly listed. Due to the
|
||||
|
|
|
|||
47
Makefile
47
Makefile
|
|
@ -1,4 +1,4 @@
|
|||
# Copyright 2018 The Prometheus Authors
|
||||
# Copyright The Prometheus Authors
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
|
@ -79,6 +79,20 @@ ui-lint:
|
|||
# new Mantine-based UI is fully integrated and the old app can be removed.
|
||||
cd $(UI_PATH)/react-app && npm run lint
|
||||
|
||||
.PHONY: generate-promql-functions
|
||||
generate-promql-functions: ui-install
|
||||
@echo ">> generating PromQL function signatures"
|
||||
@cd $(UI_PATH)/mantine-ui/src/promql/tools && $(GO) run ./gen_functions_list > ../functionSignatures.ts
|
||||
@echo ">> generating PromQL function documentation"
|
||||
@cd $(UI_PATH)/mantine-ui/src/promql/tools && $(GO) run ./gen_functions_docs $(CURDIR)/docs/querying/functions.md > ../functionDocs.tsx
|
||||
@echo ">> formatting generated files"
|
||||
@cd $(UI_PATH)/mantine-ui && npx prettier --write --print-width 120 src/promql/functionSignatures.ts src/promql/functionDocs.tsx
|
||||
|
||||
.PHONY: check-generated-promql-functions
|
||||
check-generated-promql-functions: generate-promql-functions
|
||||
@echo ">> checking generated PromQL functions"
|
||||
@git diff --exit-code -- $(UI_PATH)/mantine-ui/src/promql/functionSignatures.ts $(UI_PATH)/mantine-ui/src/promql/functionDocs.tsx || (echo "Generated PromQL function files are out of date. Please run 'make generate-promql-functions' and commit the changes." && false)
|
||||
|
||||
.PHONY: assets
|
||||
ifndef SKIP_UI_BUILD
|
||||
assets: check-node-version ui-install ui-build
|
||||
|
|
@ -152,15 +166,8 @@ tarball: npm_licenses common-tarball
|
|||
.PHONY: docker
|
||||
docker: npm_licenses common-docker
|
||||
|
||||
plugins/plugins.go: plugins.yml plugins/generate.go
|
||||
@echo ">> creating plugins list"
|
||||
$(GO) generate -tags plugins ./plugins
|
||||
|
||||
.PHONY: plugins
|
||||
plugins: plugins/plugins.go
|
||||
|
||||
.PHONY: build
|
||||
build: assets npm_licenses assets-compress plugins common-build
|
||||
build: assets npm_licenses assets-compress common-build
|
||||
|
||||
.PHONY: bench_tsdb
|
||||
bench_tsdb: $(PROMU)
|
||||
|
|
@ -189,14 +196,21 @@ update-features-testdata:
|
|||
@echo ">> updating features testdata"
|
||||
@$(GO) test ./cmd/prometheus -run TestFeaturesAPI -update-features
|
||||
|
||||
GO_SUBMODULE_DIRS := documentation/examples/remote_storage internal/tools web/ui/mantine-ui/src/promql/tools
|
||||
|
||||
.PHONY: update-all-go-deps
|
||||
update-all-go-deps:
|
||||
@$(MAKE) update-go-deps
|
||||
@echo ">> updating Go dependencies in ./documentation/examples/remote_storage/"
|
||||
@cd ./documentation/examples/remote_storage/ && for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \
|
||||
update-all-go-deps: update-go-deps
|
||||
$(foreach dir,$(GO_SUBMODULE_DIRS),$(MAKE) update-go-deps-in-dir DIR=$(dir);)
|
||||
@echo ">> syncing Go workspace"
|
||||
@$(GO) work sync
|
||||
|
||||
.PHONY: update-go-deps-in-dir
|
||||
update-go-deps-in-dir:
|
||||
@echo ">> updating Go dependencies in ./$(DIR)/"
|
||||
@cd ./$(DIR) && for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \
|
||||
$(GO) get $$m; \
|
||||
done
|
||||
@cd ./documentation/examples/remote_storage/ && $(GO) mod tidy
|
||||
@cd ./$(DIR) && $(GO) mod tidy
|
||||
|
||||
.PHONY: check-node-version
|
||||
check-node-version:
|
||||
|
|
@ -206,3 +220,8 @@ check-node-version:
|
|||
bump-go-version:
|
||||
@echo ">> bumping Go minor version"
|
||||
@./scripts/bump_go_version.sh
|
||||
|
||||
.PHONY: generate-fuzzing-seed-corpus
|
||||
generate-fuzzing-seed-corpus:
|
||||
@echo ">> Generating fuzzing seed corpus"
|
||||
@$(GO) generate -tags fuzzing ./util/fuzzing/corpus_gen
|
||||
|
|
|
|||
135
Makefile.common
135
Makefile.common
|
|
@ -1,4 +1,4 @@
|
|||
# Copyright 2018 The Prometheus Authors
|
||||
# Copyright The Prometheus Authors
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
|
@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
|
|||
SKIP_GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT_OPTS ?=
|
||||
GOLANGCI_LINT_VERSION ?= v2.6.2
|
||||
GOLANGCI_LINT_VERSION ?= v2.7.2
|
||||
GOLANGCI_FMT_OPTS ?=
|
||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
|
||||
# windows isn't included here because of the path separator being different.
|
||||
|
|
@ -82,11 +82,32 @@ endif
|
|||
PREFIX ?= $(shell pwd)
|
||||
BIN_DIR ?= $(shell pwd)
|
||||
DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
|
||||
DOCKERFILE_PATH ?= ./Dockerfile
|
||||
DOCKERBUILD_CONTEXT ?= ./
|
||||
DOCKER_REPO ?= prom
|
||||
|
||||
# Check if deprecated DOCKERFILE_PATH is set
|
||||
ifdef DOCKERFILE_PATH
|
||||
$(error DOCKERFILE_PATH is deprecated. Use DOCKERFILE_VARIANTS ?= $(DOCKERFILE_PATH) in the Makefile)
|
||||
endif
|
||||
|
||||
DOCKER_ARCHS ?= amd64
|
||||
DOCKERFILE_VARIANTS ?= Dockerfile $(wildcard Dockerfile.*)
|
||||
|
||||
# Function to extract variant from Dockerfile label.
|
||||
# Returns the variant name from io.prometheus.image.variant label, or "default" if not found.
|
||||
define dockerfile_variant
|
||||
$(strip $(or $(shell sed -n 's/.*io\.prometheus\.image\.variant="\([^"]*\)".*/\1/p' $(1)),default))
|
||||
endef
|
||||
|
||||
# Check for duplicate variant names (including default for Dockerfiles without labels).
|
||||
DOCKERFILE_VARIANT_NAMES := $(foreach df,$(DOCKERFILE_VARIANTS),$(call dockerfile_variant,$(df)))
|
||||
DOCKERFILE_VARIANT_NAMES_SORTED := $(sort $(DOCKERFILE_VARIANT_NAMES))
|
||||
ifneq ($(words $(DOCKERFILE_VARIANT_NAMES)),$(words $(DOCKERFILE_VARIANT_NAMES_SORTED)))
|
||||
$(error Duplicate variant names found. Each Dockerfile must have a unique io.prometheus.image.variant label, and only one can be without a label (default))
|
||||
endif
|
||||
|
||||
# Build variant:dockerfile pairs for shell iteration.
|
||||
DOCKERFILE_VARIANTS_WITH_NAMES := $(foreach df,$(DOCKERFILE_VARIANTS),$(call dockerfile_variant,$(df)):$(df))
|
||||
|
||||
BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS))
|
||||
PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS))
|
||||
|
|
@ -129,6 +150,12 @@ common-check_license:
|
|||
echo "license header checking failed:"; echo "$${licRes}"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo ">> checking for copyright years 2026 or later"
|
||||
@futureYearRes=$$(git grep -E 'Copyright (202[6-9]|20[3-9][0-9])' -- '*.go' ':!:vendor/*' || true); \
|
||||
if [ -n "$${futureYearRes}" ]; then \
|
||||
echo "Files with copyright year 2026 or later found (should use 'Copyright The Prometheus Authors'):"; echo "$${futureYearRes}"; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
.PHONY: common-deps
|
||||
common-deps:
|
||||
|
|
@ -220,28 +247,110 @@ common-docker-repo-name:
|
|||
.PHONY: common-docker $(BUILD_DOCKER_ARCHS)
|
||||
common-docker: $(BUILD_DOCKER_ARCHS)
|
||||
$(BUILD_DOCKER_ARCHS): common-docker-%:
|
||||
docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \
|
||||
-f $(DOCKERFILE_PATH) \
|
||||
--build-arg ARCH="$*" \
|
||||
--build-arg OS="linux" \
|
||||
$(DOCKERBUILD_CONTEXT)
|
||||
@for variant in $(DOCKERFILE_VARIANTS_WITH_NAMES); do \
|
||||
dockerfile=$${variant#*:}; \
|
||||
variant_name=$${variant%%:*}; \
|
||||
distroless_arch="$*"; \
|
||||
if [ "$*" = "armv7" ]; then \
|
||||
distroless_arch="arm"; \
|
||||
fi; \
|
||||
if [ "$$dockerfile" = "Dockerfile" ]; then \
|
||||
echo "Building default variant ($$variant_name) for linux-$* using $$dockerfile"; \
|
||||
docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \
|
||||
-f $$dockerfile \
|
||||
--build-arg ARCH="$*" \
|
||||
--build-arg OS="linux" \
|
||||
--build-arg DISTROLESS_ARCH="$$distroless_arch" \
|
||||
$(DOCKERBUILD_CONTEXT); \
|
||||
if [ "$$variant_name" != "default" ]; then \
|
||||
echo "Tagging default variant with $$variant_name suffix"; \
|
||||
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \
|
||||
"$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name"; \
|
||||
fi; \
|
||||
else \
|
||||
echo "Building $$variant_name variant for linux-$* using $$dockerfile"; \
|
||||
docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name" \
|
||||
-f $$dockerfile \
|
||||
--build-arg ARCH="$*" \
|
||||
--build-arg OS="linux" \
|
||||
--build-arg DISTROLESS_ARCH="$$distroless_arch" \
|
||||
$(DOCKERBUILD_CONTEXT); \
|
||||
fi; \
|
||||
done
|
||||
|
||||
.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
|
||||
common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
|
||||
$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
|
||||
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)"
|
||||
@for variant in $(DOCKERFILE_VARIANTS_WITH_NAMES); do \
|
||||
dockerfile=$${variant#*:}; \
|
||||
variant_name=$${variant%%:*}; \
|
||||
if [ "$$dockerfile" != "Dockerfile" ] || [ "$$variant_name" != "default" ]; then \
|
||||
echo "Pushing $$variant_name variant for linux-$*"; \
|
||||
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name"; \
|
||||
fi; \
|
||||
if [ "$$dockerfile" = "Dockerfile" ]; then \
|
||||
echo "Pushing default variant ($$variant_name) for linux-$*"; \
|
||||
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)"; \
|
||||
fi; \
|
||||
if [ "$(DOCKER_IMAGE_TAG)" = "latest" ]; then \
|
||||
if [ "$$dockerfile" != "Dockerfile" ] || [ "$$variant_name" != "default" ]; then \
|
||||
echo "Pushing $$variant_name variant version tags for linux-$*"; \
|
||||
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)-$$variant_name"; \
|
||||
fi; \
|
||||
if [ "$$dockerfile" = "Dockerfile" ]; then \
|
||||
echo "Pushing default variant version tag for linux-$*"; \
|
||||
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"; \
|
||||
fi; \
|
||||
fi; \
|
||||
done
|
||||
|
||||
DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION)))
|
||||
.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)
|
||||
common-docker-tag-latest: $(TAG_DOCKER_ARCHS)
|
||||
$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
|
||||
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
|
||||
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
|
||||
@for variant in $(DOCKERFILE_VARIANTS_WITH_NAMES); do \
|
||||
dockerfile=$${variant#*:}; \
|
||||
variant_name=$${variant%%:*}; \
|
||||
if [ "$$dockerfile" != "Dockerfile" ] || [ "$$variant_name" != "default" ]; then \
|
||||
echo "Tagging $$variant_name variant for linux-$* as latest"; \
|
||||
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest-$$variant_name"; \
|
||||
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)-$$variant_name"; \
|
||||
fi; \
|
||||
if [ "$$dockerfile" = "Dockerfile" ]; then \
|
||||
echo "Tagging default variant ($$variant_name) for linux-$* as latest"; \
|
||||
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"; \
|
||||
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"; \
|
||||
fi; \
|
||||
done
|
||||
|
||||
.PHONY: common-docker-manifest
|
||||
common-docker-manifest:
|
||||
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG))
|
||||
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)"
|
||||
@for variant in $(DOCKERFILE_VARIANTS_WITH_NAMES); do \
|
||||
dockerfile=$${variant#*:}; \
|
||||
variant_name=$${variant%%:*}; \
|
||||
if [ "$$dockerfile" != "Dockerfile" ] || [ "$$variant_name" != "default" ]; then \
|
||||
echo "Creating manifest for $$variant_name variant"; \
|
||||
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name); \
|
||||
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)-$$variant_name"; \
|
||||
fi; \
|
||||
if [ "$$dockerfile" = "Dockerfile" ]; then \
|
||||
echo "Creating default variant ($$variant_name) manifest"; \
|
||||
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG)); \
|
||||
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)"; \
|
||||
fi; \
|
||||
if [ "$(DOCKER_IMAGE_TAG)" = "latest" ]; then \
|
||||
if [ "$$dockerfile" != "Dockerfile" ] || [ "$$variant_name" != "default" ]; then \
|
||||
echo "Creating manifest for $$variant_name variant version tag"; \
|
||||
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):v$(DOCKER_MAJOR_VERSION_TAG)-$$variant_name" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):v$(DOCKER_MAJOR_VERSION_TAG)-$$variant_name); \
|
||||
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):v$(DOCKER_MAJOR_VERSION_TAG)-$$variant_name"; \
|
||||
fi; \
|
||||
if [ "$$dockerfile" = "Dockerfile" ]; then \
|
||||
echo "Creating default variant version tag manifest"; \
|
||||
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):v$(DOCKER_MAJOR_VERSION_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):v$(DOCKER_MAJOR_VERSION_TAG)); \
|
||||
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):v$(DOCKER_MAJOR_VERSION_TAG)"; \
|
||||
fi; \
|
||||
fi; \
|
||||
done
|
||||
|
||||
.PHONY: promu
|
||||
promu: $(PROMU)
|
||||
|
|
|
|||
41
README.md
41
README.md
|
|
@ -69,7 +69,7 @@ To build Prometheus from source code, You need:
|
|||
|
||||
* Go: Version specified in [go.mod](./go.mod) or greater.
|
||||
* NodeJS: Version specified in [.nvmrc](./web/ui/.nvmrc) or greater.
|
||||
* npm: Version 8 or greater (check with `npm --version` and [here](https://www.npmjs.com/)).
|
||||
* npm: Version 10 or greater (check with `npm --version` and [here](https://www.npmjs.com/)).
|
||||
|
||||
Start by cloning the repository:
|
||||
|
||||
|
|
@ -87,10 +87,10 @@ prometheus --config.file=your_config.yml
|
|||
```
|
||||
|
||||
*However*, when using `go install` to build Prometheus, Prometheus will expect to be able to
|
||||
read its web assets from local filesystem directories under `web/ui/static` and
|
||||
`web/ui/templates`. In order for these assets to be found, you will have to run Prometheus
|
||||
from the root of the cloned repository. Note also that these directories do not include the
|
||||
React UI unless it has been built explicitly using `make assets` or `make build`.
|
||||
read its web assets from local filesystem directories under `web/ui/static`. In order for
|
||||
these assets to be found, you will have to run Prometheus from the root of the cloned
|
||||
repository. Note also that this directory does not include the React UI unless it has been
|
||||
built explicitly using `make assets` or `make build`.
|
||||
|
||||
An example of the above configuration file can be found [here.](https://github.com/prometheus/prometheus/blob/main/documentation/examples/prometheus.yml)
|
||||
|
||||
|
|
@ -113,16 +113,31 @@ The Makefile provides several targets:
|
|||
|
||||
### Service discovery plugins
|
||||
|
||||
Prometheus is bundled with many service discovery plugins.
|
||||
When building Prometheus from source, you can edit the [plugins.yml](./plugins.yml)
|
||||
file to disable some service discoveries. The file is a yaml-formatted list of go
|
||||
import path that will be built into the Prometheus binary.
|
||||
Prometheus is bundled with many service discovery plugins. You can customize
|
||||
which service discoveries are included in your build using Go build tags.
|
||||
|
||||
After you have changed the file, you
|
||||
need to run `make build` again.
|
||||
To exclude service discoveries when building with `make build`, add the desired
|
||||
tags to the `.promu.yml` file under `build.tags.all`:
|
||||
|
||||
If you are using another method to compile Prometheus, `make plugins` will
|
||||
generate the plugins file accordingly.
|
||||
```yaml
|
||||
build:
|
||||
tags:
|
||||
all:
|
||||
- netgo
|
||||
- builtinassets
|
||||
- remove_all_sd # Exclude all optional SDs
|
||||
- enable_kubernetes_sd # Re-enable only kubernetes
|
||||
```
|
||||
|
||||
Then run `make build` as usual. Alternatively, when using `go build` directly:
|
||||
|
||||
```bash
|
||||
go build -tags "remove_all_sd,enable_kubernetes_sd" ./cmd/prometheus
|
||||
```
|
||||
|
||||
Available build tags:
|
||||
* `remove_all_sd` - Exclude all optional service discoveries (keeps file_sd, static_sd, and http_sd)
|
||||
* `enable_<name>_sd` - Re-enable a specific SD when using `remove_all_sd`
|
||||
|
||||
If you add out-of-tree plugins, which we do not endorse at the moment,
|
||||
additional steps might be needed to adjust the `go.mod` and `go.sum` files. As
|
||||
|
|
|
|||
26
RELEASE.md
26
RELEASE.md
|
|
@ -7,18 +7,20 @@ This page describes the release process and the currently planned schedule for u
|
|||
Release cadence of first pre-releases being cut is 6 weeks.
|
||||
Please see [the v2.55 RELEASE.md](https://github.com/prometheus/prometheus/blob/release-2.55/RELEASE.md) for the v2 release series schedule.
|
||||
|
||||
| release series | date of first pre-release (year-month-day) | release shepherd |
|
||||
|----------------|--------------------------------------------|------------------------------------|
|
||||
| v3.0 | 2024-11-14 | Jan Fajerski (GitHub: @jan--f) |
|
||||
| v3.1 | 2024-12-17 | Bryan Boreham (GitHub: @bboreham) |
|
||||
| v3.2 | 2025-01-28 | Jan Fajerski (GitHub: @jan--f) |
|
||||
| v3.3 | 2025-03-11 | Ayoub Mrini (Github: @machine424) |
|
||||
| v3.4 | 2025-04-29 | Jan-Otto Kröpke (Github: @jkroepke)|
|
||||
| v3.5 LTS | 2025-06-03 | Bryan Boreham (GitHub: @bboreham) |
|
||||
| v3.6 | 2025-08-01 | Ayoub Mrini (Github: @machine424) |
|
||||
| v3.7 | 2025-09-25 | Arthur Sens and George Krajcsovits (Github: @ArthurSens and @krajorama)|
|
||||
| v3.8 | 2025-11-06 | Jan Fajerski (GitHub: @jan--f) |
|
||||
| v3.9 | 2025-12-18 | **volunteer welcome** |
|
||||
| release series | date of first pre-release (year-month-day) | release shepherd |
|
||||
|----------------|--------------------------------------------|-------------------------------------------------------------------------|
|
||||
| v3.0 | 2024-11-14 | Jan Fajerski (GitHub: @jan--f) |
|
||||
| v3.1 | 2024-12-17 | Bryan Boreham (GitHub: @bboreham) |
|
||||
| v3.2 | 2025-01-28 | Jan Fajerski (GitHub: @jan--f) |
|
||||
| v3.3 | 2025-03-11 | Ayoub Mrini (Github: @machine424) |
|
||||
| v3.4 | 2025-04-29 | Jan-Otto Kröpke (Github: @jkroepke) |
|
||||
| v3.5 LTS | 2025-06-03 | Bryan Boreham (GitHub: @bboreham) |
|
||||
| v3.6 | 2025-08-01 | Ayoub Mrini (Github: @machine424) |
|
||||
| v3.7 | 2025-09-25 | Arthur Sens and George Krajcsovits (Github: @ArthurSens and @krajorama) |
|
||||
| v3.8 | 2025-11-06 | Jan Fajerski (GitHub: @jan--f) |
|
||||
| v3.9 | 2025-12-18 | Bryan Boreham (GitHub: @bboreham) |
|
||||
| v3.10 | 2026-02-05 | Ganesh Vernekar (Github: @codesome) |
|
||||
| v3.11 | 2026-03-19 | **volunteer welcome** |
|
||||
|
||||
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.
|
||||
|
||||
|
|
|
|||
2
VERSION
2
VERSION
|
|
@ -1 +1 @@
|
|||
3.8.0
|
||||
3.9.1
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
@ -233,8 +233,10 @@ func (c *flagConfig) setFeatureListOptions(logger *slog.Logger) error {
|
|||
c.tsdb.EnableMemorySnapshotOnShutdown = true
|
||||
logger.Info("Experimental memory snapshot on shutdown enabled")
|
||||
case "extra-scrape-metrics":
|
||||
c.scrape.ExtraMetrics = true
|
||||
logger.Info("Experimental additional scrape metrics enabled")
|
||||
t := true
|
||||
config.DefaultConfig.GlobalConfig.ExtraScrapeMetrics = &t
|
||||
config.DefaultGlobalConfig.ExtraScrapeMetrics = &t
|
||||
logger.Warn("This option for --enable-feature is being phased out. It currently changes the default for the extra_scrape_metrics config setting to true, but will become a no-op in a future version. Stop using this option and set extra_scrape_metrics in the config instead.", "option", o)
|
||||
case "metadata-wal-records":
|
||||
c.scrape.AppendMetadata = true
|
||||
c.web.AppendMetadata = true
|
||||
|
|
@ -263,12 +265,26 @@ func (c *flagConfig) setFeatureListOptions(logger *slog.Logger) error {
|
|||
case "ooo-native-histograms":
|
||||
logger.Warn("This option for --enable-feature is now permanently enabled and therefore a no-op.", "option", o)
|
||||
case "created-timestamp-zero-ingestion":
|
||||
// NOTE(bwplotka): Once AppendableV1 is removed, there will be only the TSDB and agent flags.
|
||||
c.scrape.EnableStartTimestampZeroIngestion = true
|
||||
c.web.STZeroIngestionEnabled = true
|
||||
c.tsdb.EnableSTAsZeroSample = true
|
||||
c.agent.EnableSTAsZeroSample = true
|
||||
|
||||
// Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers.
|
||||
// This is to widen the ST support surface.
|
||||
config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
|
||||
config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
|
||||
logger.Info("Experimental created timestamp zero ingestion enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols))
|
||||
logger.Info("Experimental start timestamp zero ingestion enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols))
|
||||
case "st-storage":
|
||||
// TODO(bwplotka): Implement ST Storage as per PROM-60 and document this hidden feature flag.
|
||||
c.tsdb.EnableSTStorage = true
|
||||
c.agent.EnableSTStorage = true
|
||||
|
||||
// Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers. This is to widen the ST support surface.
|
||||
config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
|
||||
config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
|
||||
logger.Info("Experimental start timestamp storage enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols))
|
||||
case "delayed-compaction":
|
||||
c.tsdb.EnableDelayedCompaction = true
|
||||
logger.Info("Experimental delayed compaction is enabled.")
|
||||
|
|
@ -278,6 +294,9 @@ func (c *flagConfig) setFeatureListOptions(logger *slog.Logger) error {
|
|||
case "promql-extended-range-selectors":
|
||||
parser.EnableExtendedRangeSelectors = true
|
||||
logger.Info("Experimental PromQL extended range selectors enabled.")
|
||||
case "promql-binop-fill-modifiers":
|
||||
parser.EnableBinopFillModifiers = true
|
||||
logger.Info("Experimental PromQL binary operator fill modifiers enabled.")
|
||||
case "":
|
||||
continue
|
||||
case "old-ui":
|
||||
|
|
@ -486,6 +505,9 @@ func main() {
|
|||
serverOnlyFlag(a, "storage.tsdb.delay-compact-file.path", "Path to a JSON file with uploaded TSDB blocks e.g. Thanos shipper meta file. If set TSDB will only compact 1 level blocks that are marked as uploaded in that file, improving external storage integrations e.g. with Thanos sidecar. 1+ level compactions won't be delayed.").
|
||||
Default("").StringVar(&tsdbDelayCompactFilePath)
|
||||
|
||||
serverOnlyFlag(a, "storage.tsdb.block-reload-interval", "Interval at which to check for new or removed blocks in storage. Users who manually backfill or drop blocks must wait up to this duration before changes become available.").
|
||||
Default("1m").Hidden().SetValue(&cfg.tsdb.BlockReloadInterval)
|
||||
|
||||
agentOnlyFlag(a, "storage.agent.path", "Base path for metrics storage.").
|
||||
Default("data-agent/").StringVar(&cfg.agentStoragePath)
|
||||
|
||||
|
|
@ -572,7 +594,7 @@ func main() {
|
|||
a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates.").
|
||||
Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval)
|
||||
|
||||
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui, otlp-deltatocumulative, promql-duration-expr, use-uncached-io, promql-extended-range-selectors. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
|
||||
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui, otlp-deltatocumulative, promql-duration-expr, use-uncached-io, promql-extended-range-selectors, promql-binop-fill-modifiers. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
|
||||
Default("").StringsVar(&cfg.featureList)
|
||||
|
||||
a.Flag("agent", "Run Prometheus in 'Agent mode'.").BoolVar(&agentMode)
|
||||
|
|
@ -677,8 +699,13 @@ func main() {
|
|||
}
|
||||
cfg.tsdb.MaxExemplars = cfgFile.StorageConfig.ExemplarsConfig.MaxExemplars
|
||||
}
|
||||
if cfg.tsdb.BlockReloadInterval < model.Duration(1*time.Second) {
|
||||
logger.Warn("The option --storage.tsdb.block-reload-interval is set to a value less than 1s. Setting it to 1s to avoid overload.")
|
||||
cfg.tsdb.BlockReloadInterval = model.Duration(1 * time.Second)
|
||||
}
|
||||
if cfgFile.StorageConfig.TSDBConfig != nil {
|
||||
cfg.tsdb.OutOfOrderTimeWindow = cfgFile.StorageConfig.TSDBConfig.OutOfOrderTimeWindow
|
||||
cfg.tsdb.StaleSeriesCompactionThreshold = cfgFile.StorageConfig.TSDBConfig.StaleSeriesCompactionThreshold
|
||||
if cfgFile.StorageConfig.TSDBConfig.Retention != nil {
|
||||
if cfgFile.StorageConfig.TSDBConfig.Retention.Time > 0 {
|
||||
cfg.tsdb.RetentionDuration = cfgFile.StorageConfig.TSDBConfig.Retention.Time
|
||||
|
|
@ -859,16 +886,29 @@ func main() {
|
|||
os.Exit(1)
|
||||
}
|
||||
|
||||
scrapeManager, err := scrape.NewManager(
|
||||
&cfg.scrape,
|
||||
logger.With("component", "scrape manager"),
|
||||
logging.NewJSONFileLogger,
|
||||
fanoutStorage,
|
||||
prometheus.DefaultRegisterer,
|
||||
)
|
||||
if err != nil {
|
||||
logger.Error("failed to create a scrape manager", "err", err)
|
||||
os.Exit(1)
|
||||
var scrapeManager *scrape.Manager
|
||||
{
|
||||
// TODO(bwplotka): Switch to AppendableV2 by default.
|
||||
// See: https://github.com/prometheus/prometheus/issues/17632
|
||||
var (
|
||||
scrapeAppendable storage.Appendable = fanoutStorage
|
||||
scrapeAppendableV2 storage.AppendableV2
|
||||
)
|
||||
if cfg.tsdb.EnableSTStorage {
|
||||
scrapeAppendable = nil
|
||||
scrapeAppendableV2 = fanoutStorage
|
||||
}
|
||||
scrapeManager, err = scrape.NewManager(
|
||||
&cfg.scrape,
|
||||
logger.With("component", "scrape manager"),
|
||||
logging.NewJSONFileLogger,
|
||||
scrapeAppendable, scrapeAppendableV2,
|
||||
prometheus.DefaultRegisterer,
|
||||
)
|
||||
if err != nil {
|
||||
logger.Error("failed to create a scrape manager", "err", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
|
|
@ -1354,6 +1394,9 @@ func main() {
|
|||
"RetentionDuration", cfg.tsdb.RetentionDuration,
|
||||
"WALSegmentSize", cfg.tsdb.WALSegmentSize,
|
||||
"WALCompressionType", cfg.tsdb.WALCompressionType,
|
||||
"BlockReloadInterval", cfg.tsdb.BlockReloadInterval,
|
||||
"EnableSTAsZeroSample", cfg.tsdb.EnableSTAsZeroSample,
|
||||
"EnableSTStorage", cfg.tsdb.EnableSTStorage,
|
||||
)
|
||||
|
||||
startTimeMargin := int64(2 * time.Duration(cfg.tsdb.MinBlockDuration).Seconds() * 1000)
|
||||
|
|
@ -1410,6 +1453,8 @@ func main() {
|
|||
"MinWALTime", cfg.agent.MinWALTime,
|
||||
"MaxWALTime", cfg.agent.MaxWALTime,
|
||||
"OutOfOrderTimeWindow", cfg.agent.OutOfOrderTimeWindow,
|
||||
"EnableSTAsZeroSample", cfg.agent.EnableSTAsZeroSample,
|
||||
"EnableSTStorage", cfg.tsdb.EnableSTStorage,
|
||||
)
|
||||
|
||||
localStorage.Set(db, 0)
|
||||
|
|
@ -1561,7 +1606,7 @@ func reloadConfig(filename string, enableExemplarStorage bool, logger *slog.Logg
|
|||
logger.Error("Failed to apply configuration", "err", err)
|
||||
failed = true
|
||||
}
|
||||
timingsLogger = timingsLogger.With((rl.name), time.Since(rstart))
|
||||
timingsLogger = timingsLogger.With(rl.name, time.Since(rstart))
|
||||
}
|
||||
if failed {
|
||||
return fmt.Errorf("one or more errors occurred while applying the new configuration (--config.file=%q)", filename)
|
||||
|
|
@ -1735,6 +1780,14 @@ func (s *readyStorage) Appender(ctx context.Context) storage.Appender {
|
|||
return notReadyAppender{}
|
||||
}
|
||||
|
||||
// AppenderV2 implements the Storage interface.
|
||||
func (s *readyStorage) AppenderV2(ctx context.Context) storage.AppenderV2 {
|
||||
if x := s.get(); x != nil {
|
||||
return x.AppenderV2(ctx)
|
||||
}
|
||||
return notReadyAppenderV2{}
|
||||
}
|
||||
|
||||
type notReadyAppender struct{}
|
||||
|
||||
// SetOptions does nothing in this appender implementation.
|
||||
|
|
@ -1768,6 +1821,15 @@ func (notReadyAppender) Commit() error { return tsdb.ErrNotReady }
|
|||
|
||||
func (notReadyAppender) Rollback() error { return tsdb.ErrNotReady }
|
||||
|
||||
type notReadyAppenderV2 struct{}
|
||||
|
||||
func (notReadyAppenderV2) Append(storage.SeriesRef, labels.Labels, int64, int64, float64, *histogram.Histogram, *histogram.FloatHistogram, storage.AOptions) (storage.SeriesRef, error) {
|
||||
return 0, tsdb.ErrNotReady
|
||||
}
|
||||
func (notReadyAppenderV2) Commit() error { return tsdb.ErrNotReady }
|
||||
|
||||
func (notReadyAppenderV2) Rollback() error { return tsdb.ErrNotReady }
|
||||
|
||||
// Close implements the Storage interface.
|
||||
func (s *readyStorage) Close() error {
|
||||
if x := s.get(); x != nil {
|
||||
|
|
@ -1911,6 +1973,10 @@ type tsdbOptions struct {
|
|||
EnableOverlappingCompaction bool
|
||||
UseUncachedIO bool
|
||||
BlockCompactionExcludeFunc tsdb.BlockExcludeFilterFunc
|
||||
BlockReloadInterval model.Duration
|
||||
EnableSTAsZeroSample bool
|
||||
EnableSTStorage bool
|
||||
StaleSeriesCompactionThreshold float64
|
||||
}
|
||||
|
||||
func (opts tsdbOptions) ToTSDBOptions() tsdb.Options {
|
||||
|
|
@ -1935,7 +2001,11 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options {
|
|||
EnableOverlappingCompaction: opts.EnableOverlappingCompaction,
|
||||
UseUncachedIO: opts.UseUncachedIO,
|
||||
BlockCompactionExcludeFunc: opts.BlockCompactionExcludeFunc,
|
||||
BlockReloadInterval: time.Duration(opts.BlockReloadInterval),
|
||||
FeatureRegistry: features.DefaultRegistry,
|
||||
EnableSTAsZeroSample: opts.EnableSTAsZeroSample,
|
||||
EnableSTStorage: opts.EnableSTStorage,
|
||||
StaleSeriesCompactionThreshold: opts.StaleSeriesCompactionThreshold,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1948,7 +2018,9 @@ type agentOptions struct {
|
|||
TruncateFrequency model.Duration
|
||||
MinWALTime, MaxWALTime model.Duration
|
||||
NoLockfile bool
|
||||
OutOfOrderTimeWindow int64
|
||||
OutOfOrderTimeWindow int64 // TODO(bwplotka): Unused option, fix it or remove.
|
||||
EnableSTAsZeroSample bool
|
||||
EnableSTStorage bool
|
||||
}
|
||||
|
||||
func (opts agentOptions) ToAgentOptions(outOfOrderTimeWindow int64) agent.Options {
|
||||
|
|
@ -1964,6 +2036,8 @@ func (opts agentOptions) ToAgentOptions(outOfOrderTimeWindow int64) agent.Option
|
|||
MaxWALTime: durationToInt64Millis(time.Duration(opts.MaxWALTime)),
|
||||
NoLockfile: opts.NoLockfile,
|
||||
OutOfOrderTimeWindow: outOfOrderTimeWindow,
|
||||
EnableSTAsZeroSample: opts.EnableSTAsZeroSample,
|
||||
EnableSTStorage: opts.EnableSTStorage,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2017 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2020 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2020 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
@ -334,7 +334,8 @@ func (p *queryLogTest) run(t *testing.T) {
|
|||
|
||||
p.query(t)
|
||||
|
||||
ql := readQueryLog(t, queryLogFile.Name())
|
||||
// Wait for query log entry to be written (avoid race with file I/O).
|
||||
ql := waitForQueryLog(t, queryLogFile.Name(), 1)
|
||||
qc := len(ql)
|
||||
if p.exactQueryCount() {
|
||||
require.Equal(t, 1, qc)
|
||||
|
|
@ -361,7 +362,8 @@ func (p *queryLogTest) run(t *testing.T) {
|
|||
p.query(t)
|
||||
qc++
|
||||
|
||||
ql = readQueryLog(t, queryLogFile.Name())
|
||||
// Wait for query log entry to be written (avoid race with file I/O).
|
||||
ql = waitForQueryLog(t, queryLogFile.Name(), qc)
|
||||
if p.exactQueryCount() {
|
||||
require.Len(t, ql, qc)
|
||||
} else {
|
||||
|
|
@ -392,7 +394,8 @@ func (p *queryLogTest) run(t *testing.T) {
|
|||
|
||||
qc++
|
||||
|
||||
ql = readQueryLog(t, newFile.Name())
|
||||
// Wait for query log entry to be written (avoid race with file I/O).
|
||||
ql = waitForQueryLog(t, newFile.Name(), qc)
|
||||
if p.exactQueryCount() {
|
||||
require.Len(t, ql, qc)
|
||||
} else {
|
||||
|
|
@ -404,7 +407,8 @@ func (p *queryLogTest) run(t *testing.T) {
|
|||
|
||||
p.query(t)
|
||||
|
||||
ql = readQueryLog(t, queryLogFile.Name())
|
||||
// Wait for query log entry to be written (avoid race with file I/O).
|
||||
ql = waitForQueryLog(t, queryLogFile.Name(), 1)
|
||||
qc = len(ql)
|
||||
if p.exactQueryCount() {
|
||||
require.Equal(t, 1, qc)
|
||||
|
|
@ -446,6 +450,18 @@ func readQueryLog(t *testing.T, path string) []queryLogLine {
|
|||
return ql
|
||||
}
|
||||
|
||||
// waitForQueryLog waits for the query log to contain at least minEntries entries,
|
||||
// polling at regular intervals until the timeout is reached.
|
||||
func waitForQueryLog(t *testing.T, path string, minEntries int) []queryLogLine {
|
||||
t.Helper()
|
||||
var ql []queryLogLine
|
||||
require.Eventually(t, func() bool {
|
||||
ql = readQueryLog(t, path)
|
||||
return len(ql) >= minEntries
|
||||
}, 5*time.Second, 100*time.Millisecond, "timed out waiting for query log to have at least %d entries, got %d", minEntries, len(ql))
|
||||
return ql
|
||||
}
|
||||
|
||||
func TestQueryLog(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2024 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2024 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
7
cmd/prometheus/testdata/features.json
vendored
7
cmd/prometheus/testdata/features.json
vendored
|
|
@ -4,6 +4,8 @@
|
|||
"exclude_alerts": true,
|
||||
"label_values_match": true,
|
||||
"lifecycle": false,
|
||||
"openapi_3.1": true,
|
||||
"openapi_3.2": true,
|
||||
"otlp_write_receiver": false,
|
||||
"query_stats": true,
|
||||
"query_warnings": true,
|
||||
|
|
@ -28,6 +30,9 @@
|
|||
"by": true,
|
||||
"delayed_name_removal": false,
|
||||
"duration_expr": false,
|
||||
"fill": false,
|
||||
"fill_left": false,
|
||||
"fill_right": false,
|
||||
"group_left": true,
|
||||
"group_right": true,
|
||||
"ignoring": true,
|
||||
|
|
@ -166,7 +171,7 @@
|
|||
"query_offset": true
|
||||
},
|
||||
"scrape": {
|
||||
"extra_scrape_metrics": false,
|
||||
"extra_scrape_metrics": true,
|
||||
"start_timestamp_zero_ingestion": false,
|
||||
"type_and_unit_labels": false
|
||||
},
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2025 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2023 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2023 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2020 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
@ -27,7 +27,6 @@ import (
|
|||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/textparse"
|
||||
"github.com/prometheus/prometheus/tsdb"
|
||||
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
||||
)
|
||||
|
||||
func getMinAndMaxTimestamps(p textparse.Parser) (int64, int64, error) {
|
||||
|
|
@ -94,7 +93,7 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn
|
|||
return err
|
||||
}
|
||||
defer func() {
|
||||
returnErr = tsdb_errors.NewMulti(returnErr, db.Close()).Err()
|
||||
returnErr = errors.Join(returnErr, db.Close())
|
||||
}()
|
||||
|
||||
var (
|
||||
|
|
@ -125,7 +124,7 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn
|
|||
return fmt.Errorf("block writer: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
err = tsdb_errors.NewMulti(err, w.Close()).Err()
|
||||
err = errors.Join(err, w.Close())
|
||||
}()
|
||||
|
||||
ctx := context.Background()
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2020 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
@ -162,7 +162,11 @@ func main() {
|
|||
checkRulesIgnoreUnknownFields := checkRulesCmd.Flag("ignore-unknown-fields", "Ignore unknown fields in the rule files. This is useful when you want to extend rule files with custom metadata. Ensure that those fields are removed before loading them into the Prometheus server as it performs strict checks by default.").Default("false").Bool()
|
||||
|
||||
checkMetricsCmd := checkCmd.Command("metrics", checkMetricsUsage)
|
||||
checkMetricsExtended := checkCmd.Flag("extended", "Print extended information related to the cardinality of the metrics.").Bool()
|
||||
checkMetricsExtended := checkMetricsCmd.Flag("extended", "Print extended information related to the cardinality of the metrics.").Bool()
|
||||
checkMetricsLint := checkMetricsCmd.Flag(
|
||||
"lint",
|
||||
"Linting checks to apply for metrics. Available options are: all, none. Use --lint=none to disable metrics linting.",
|
||||
).Default(lintOptionAll).String()
|
||||
agentMode := checkConfigCmd.Flag("agent", "Check config file for Prometheus in Agent mode.").Bool()
|
||||
|
||||
queryCmd := app.Command("query", "Run query against a Prometheus server.")
|
||||
|
|
@ -257,12 +261,13 @@ func main() {
|
|||
listHumanReadable := tsdbListCmd.Flag("human-readable", "Print human readable values.").Short('r').Bool()
|
||||
listPath := tsdbListCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String()
|
||||
|
||||
tsdbDumpCmd := tsdbCmd.Command("dump", "Dump samples from a TSDB.")
|
||||
tsdbDumpCmd := tsdbCmd.Command("dump", "Dump data (series+samples or optionally just series) from a TSDB.")
|
||||
dumpPath := tsdbDumpCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String()
|
||||
dumpSandboxDirRoot := tsdbDumpCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory will be created, this sandbox is used in case WAL replay generates chunks (default is the database path). The sandbox is cleaned up at the end.").String()
|
||||
dumpMinTime := tsdbDumpCmd.Flag("min-time", "Minimum timestamp to dump, in milliseconds since the Unix epoch.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64()
|
||||
dumpMaxTime := tsdbDumpCmd.Flag("max-time", "Maximum timestamp to dump, in milliseconds since the Unix epoch.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64()
|
||||
dumpMatch := tsdbDumpCmd.Flag("match", "Series selector. Can be specified multiple times.").Default("{__name__=~'(?s:.*)'}").Strings()
|
||||
dumpFormat := tsdbDumpCmd.Flag("format", "Output format of the dump (prom (default) or seriesjson).").Default("prom").Enum("prom", "seriesjson")
|
||||
|
||||
tsdbDumpOpenMetricsCmd := tsdbCmd.Command("dump-openmetrics", "[Experimental] Dump samples from a TSDB into OpenMetrics text format, excluding native histograms and staleness markers, which are not representable in OpenMetrics.")
|
||||
dumpOpenMetricsPath := tsdbDumpOpenMetricsCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String()
|
||||
|
|
@ -374,7 +379,7 @@ func main() {
|
|||
os.Exit(CheckRules(newRulesLintConfig(*checkRulesLint, *checkRulesLintFatal, *checkRulesIgnoreUnknownFields, model.UTF8Validation), *ruleFiles...))
|
||||
|
||||
case checkMetricsCmd.FullCommand():
|
||||
os.Exit(CheckMetrics(*checkMetricsExtended))
|
||||
os.Exit(CheckMetrics(*checkMetricsExtended, *checkMetricsLint))
|
||||
|
||||
case pushMetricsCmd.FullCommand():
|
||||
os.Exit(PushMetrics(remoteWriteURL, httpRoundTripper, *pushMetricsHeaders, *pushMetricsTimeout, *pushMetricsProtoMsg, *pushMetricsLabels, *metricFiles...))
|
||||
|
|
@ -428,9 +433,14 @@ func main() {
|
|||
os.Exit(checkErr(listBlocks(*listPath, *listHumanReadable)))
|
||||
|
||||
case tsdbDumpCmd.FullCommand():
|
||||
os.Exit(checkErr(dumpSamples(ctx, *dumpPath, *dumpSandboxDirRoot, *dumpMinTime, *dumpMaxTime, *dumpMatch, formatSeriesSet)))
|
||||
format := formatSeriesSet
|
||||
if *dumpFormat == "seriesjson" {
|
||||
format = formatSeriesSetLabelsToJSON
|
||||
}
|
||||
os.Exit(checkErr(dumpTSDBData(ctx, *dumpPath, *dumpSandboxDirRoot, *dumpMinTime, *dumpMaxTime, *dumpMatch, format)))
|
||||
|
||||
case tsdbDumpOpenMetricsCmd.FullCommand():
|
||||
os.Exit(checkErr(dumpSamples(ctx, *dumpOpenMetricsPath, *dumpOpenMetricsSandboxDirRoot, *dumpOpenMetricsMinTime, *dumpOpenMetricsMaxTime, *dumpOpenMetricsMatch, formatSeriesSetOpenMetrics)))
|
||||
os.Exit(checkErr(dumpTSDBData(ctx, *dumpOpenMetricsPath, *dumpOpenMetricsSandboxDirRoot, *dumpOpenMetricsMinTime, *dumpOpenMetricsMaxTime, *dumpOpenMetricsMatch, formatSeriesSetOpenMetrics)))
|
||||
// TODO(aSquare14): Work on adding support for custom block size.
|
||||
case openMetricsImportCmd.FullCommand():
|
||||
os.Exit(backfillOpenMetrics(*importFilePath, *importDBPath, *importHumanReadable, *importQuiet, *maxBlockDuration, *openMetricsLabels))
|
||||
|
|
@ -1012,36 +1022,53 @@ func ruleMetric(rule rulefmt.Rule) string {
|
|||
}
|
||||
|
||||
var checkMetricsUsage = strings.TrimSpace(`
|
||||
Pass Prometheus metrics over stdin to lint them for consistency and correctness.
|
||||
Pass Prometheus metrics over stdin to lint them for consistency and correctness, and optionally perform cardinality analysis.
|
||||
|
||||
examples:
|
||||
|
||||
$ cat metrics.prom | promtool check metrics
|
||||
|
||||
$ curl -s http://localhost:9090/metrics | promtool check metrics
|
||||
$ curl -s http://localhost:9090/metrics | promtool check metrics --extended
|
||||
|
||||
$ curl -s http://localhost:9100/metrics | promtool check metrics --extended --lint=none
|
||||
`)
|
||||
|
||||
// CheckMetrics performs a linting pass on input metrics.
|
||||
func CheckMetrics(extended bool) int {
|
||||
var buf bytes.Buffer
|
||||
tee := io.TeeReader(os.Stdin, &buf)
|
||||
l := promlint.New(tee)
|
||||
problems, err := l.Lint()
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "error while linting:", err)
|
||||
func CheckMetrics(extended bool, lint string) int {
|
||||
// Validate that at least one feature is enabled.
|
||||
if !extended && lint == lintOptionNone {
|
||||
fmt.Fprintln(os.Stderr, "error: at least one of --extended or linting must be enabled")
|
||||
fmt.Fprintln(os.Stderr, "Use --extended for cardinality analysis, or remove --lint=none to enable linting")
|
||||
return failureExitCode
|
||||
}
|
||||
|
||||
for _, p := range problems {
|
||||
fmt.Fprintln(os.Stderr, p.Metric, p.Text)
|
||||
var buf bytes.Buffer
|
||||
var (
|
||||
problems []promlint.Problem
|
||||
reader io.Reader
|
||||
err error
|
||||
)
|
||||
|
||||
if lint != lintOptionNone {
|
||||
tee := io.TeeReader(os.Stdin, &buf)
|
||||
l := promlint.New(tee)
|
||||
problems, err = l.Lint()
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "error while linting:", err)
|
||||
return failureExitCode
|
||||
}
|
||||
for _, p := range problems {
|
||||
fmt.Fprintln(os.Stderr, p.Metric, p.Text)
|
||||
}
|
||||
reader = &buf
|
||||
} else {
|
||||
reader = os.Stdin
|
||||
}
|
||||
|
||||
if len(problems) > 0 {
|
||||
return lintErrExitCode
|
||||
}
|
||||
hasLintProblems := len(problems) > 0
|
||||
|
||||
if extended {
|
||||
stats, total, err := checkMetricsExtended(&buf)
|
||||
stats, total, err := checkMetricsExtended(reader)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
return failureExitCode
|
||||
|
|
@ -1055,6 +1082,10 @@ func CheckMetrics(extended bool) int {
|
|||
w.Flush()
|
||||
}
|
||||
|
||||
if hasLintProblems {
|
||||
return lintErrExitCode
|
||||
}
|
||||
|
||||
return successExitCode
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
@ -18,6 +18,7 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
|
|
@ -402,6 +403,99 @@ func TestCheckMetricsExtended(t *testing.T) {
|
|||
}, stats)
|
||||
}
|
||||
|
||||
func TestCheckMetricsLintOptions(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("Skipping on windows")
|
||||
}
|
||||
|
||||
const testMetrics = `
|
||||
# HELP testMetric_CamelCase A test metric with camelCase
|
||||
# TYPE testMetric_CamelCase gauge
|
||||
testMetric_CamelCase{label="value1"} 1
|
||||
`
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
lint string
|
||||
extended bool
|
||||
wantErrCode int
|
||||
wantLint bool
|
||||
wantCard bool
|
||||
}{
|
||||
{
|
||||
name: "default_all_with_extended",
|
||||
lint: lintOptionAll,
|
||||
extended: true,
|
||||
wantErrCode: lintErrExitCode,
|
||||
wantLint: true,
|
||||
wantCard: true,
|
||||
},
|
||||
{
|
||||
name: "lint_none_with_extended",
|
||||
lint: lintOptionNone,
|
||||
extended: true,
|
||||
wantErrCode: successExitCode,
|
||||
wantLint: false,
|
||||
wantCard: true,
|
||||
},
|
||||
{
|
||||
name: "both_disabled_fails",
|
||||
lint: lintOptionNone,
|
||||
extended: false,
|
||||
wantErrCode: failureExitCode,
|
||||
wantLint: false,
|
||||
wantCard: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
r, w, err := os.Pipe()
|
||||
require.NoError(t, err)
|
||||
_, err = w.WriteString(testMetrics)
|
||||
require.NoError(t, err)
|
||||
w.Close()
|
||||
|
||||
oldStdin := os.Stdin
|
||||
os.Stdin = r
|
||||
defer func() { os.Stdin = oldStdin }()
|
||||
|
||||
oldStdout := os.Stdout
|
||||
oldStderr := os.Stderr
|
||||
rOut, wOut, err := os.Pipe()
|
||||
require.NoError(t, err)
|
||||
rErr, wErr, err := os.Pipe()
|
||||
require.NoError(t, err)
|
||||
os.Stdout = wOut
|
||||
os.Stderr = wErr
|
||||
|
||||
code := CheckMetrics(tt.extended, tt.lint)
|
||||
|
||||
wOut.Close()
|
||||
wErr.Close()
|
||||
os.Stdout = oldStdout
|
||||
os.Stderr = oldStderr
|
||||
|
||||
var outBuf, errBuf bytes.Buffer
|
||||
_, _ = io.Copy(&outBuf, rOut)
|
||||
_, _ = io.Copy(&errBuf, rErr)
|
||||
|
||||
require.Equal(t, tt.wantErrCode, code)
|
||||
if tt.wantLint {
|
||||
require.Contains(t, errBuf.String(), "testMetric_CamelCase")
|
||||
} else {
|
||||
require.NotContains(t, errBuf.String(), "testMetric_CamelCase")
|
||||
}
|
||||
|
||||
if tt.wantCard {
|
||||
require.Contains(t, outBuf.String(), "Cardinality")
|
||||
} else {
|
||||
require.NotContains(t, outBuf.String(), "Cardinality")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestExitCodes(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
|
|
@ -640,7 +734,6 @@ func TestTSDBDumpCommand(t *testing.T) {
|
|||
load 1m
|
||||
metric{foo="bar"} 1 2 3
|
||||
`)
|
||||
t.Cleanup(func() { storage.Close() })
|
||||
|
||||
for _, c := range []struct {
|
||||
name string
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2023 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2025 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2023 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2020 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
@ -15,6 +15,7 @@ package main
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
|
@ -28,7 +29,6 @@ import (
|
|||
"github.com/prometheus/prometheus/rules"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
"github.com/prometheus/prometheus/tsdb"
|
||||
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
||||
)
|
||||
|
||||
const maxSamplesInMemory = 5000
|
||||
|
|
@ -143,7 +143,7 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName
|
|||
var closed bool
|
||||
defer func() {
|
||||
if !closed {
|
||||
err = tsdb_errors.NewMulti(err, w.Close()).Err()
|
||||
err = errors.Join(err, w.Close())
|
||||
}
|
||||
}()
|
||||
app := newMultipleAppender(ctx, w)
|
||||
|
|
@ -181,7 +181,7 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName
|
|||
if err := app.flushAndCommit(ctx); err != nil {
|
||||
return fmt.Errorf("flush and commit: %w", err)
|
||||
}
|
||||
err = tsdb_errors.NewMulti(err, w.Close()).Err()
|
||||
err = errors.Join(err, w.Close())
|
||||
closed = true
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2020 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2021 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2021 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
3
cmd/promtool/testdata/dump-series-1.prom
vendored
Normal file
3
cmd/promtool/testdata/dump-series-1.prom
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
{"__name__":"heavy_metric","foo":"bar"}
|
||||
{"__name__":"heavy_metric","foo":"foo"}
|
||||
{"__name__":"metric","baz":"abc","foo":"bar"}
|
||||
2
cmd/promtool/testdata/dump-series-2.prom
vendored
Normal file
2
cmd/promtool/testdata/dump-series-2.prom
vendored
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
{"__name__":"heavy_metric","foo":"foo"}
|
||||
{"__name__":"metric","baz":"abc","foo":"bar"}
|
||||
1
cmd/promtool/testdata/dump-series-3.prom
vendored
Normal file
1
cmd/promtool/testdata/dump-series-3.prom
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"__name__":"metric","baz":"abc","foo":"bar"}
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2017 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
@ -17,6 +17,7 @@ import (
|
|||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
|
@ -42,7 +43,6 @@ import (
|
|||
"github.com/prometheus/prometheus/tsdb"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
||||
"github.com/prometheus/prometheus/tsdb/fileutil"
|
||||
"github.com/prometheus/prometheus/tsdb/index"
|
||||
)
|
||||
|
|
@ -338,7 +338,7 @@ func listBlocks(path string, humanReadable bool) error {
|
|||
return err
|
||||
}
|
||||
defer func() {
|
||||
err = tsdb_errors.NewMulti(err, db.Close()).Err()
|
||||
err = errors.Join(err, db.Close())
|
||||
}()
|
||||
blocks, err := db.Blocks()
|
||||
if err != nil {
|
||||
|
|
@ -424,7 +424,7 @@ func analyzeBlock(ctx context.Context, path, blockID string, limit int, runExten
|
|||
return err
|
||||
}
|
||||
defer func() {
|
||||
err = tsdb_errors.NewMulti(err, db.Close()).Err()
|
||||
err = errors.Join(err, db.Close())
|
||||
}()
|
||||
|
||||
meta := block.Meta()
|
||||
|
|
@ -624,7 +624,7 @@ func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb.
|
|||
return err
|
||||
}
|
||||
defer func() {
|
||||
err = tsdb_errors.NewMulti(err, chunkr.Close()).Err()
|
||||
err = errors.Join(err, chunkr.Close())
|
||||
}()
|
||||
|
||||
totalChunks := 0
|
||||
|
|
@ -706,13 +706,13 @@ func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb.
|
|||
|
||||
type SeriesSetFormatter func(series storage.SeriesSet) error
|
||||
|
||||
func dumpSamples(ctx context.Context, dbDir, sandboxDirRoot string, mint, maxt int64, match []string, formatter SeriesSetFormatter) (err error) {
|
||||
func dumpTSDBData(ctx context.Context, dbDir, sandboxDirRoot string, mint, maxt int64, match []string, formatter SeriesSetFormatter) (err error) {
|
||||
db, err := tsdb.OpenDBReadOnly(dbDir, sandboxDirRoot, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
err = tsdb_errors.NewMulti(err, db.Close()).Err()
|
||||
err = errors.Join(err, db.Close())
|
||||
}()
|
||||
q, err := db.Querier(mint, maxt)
|
||||
if err != nil {
|
||||
|
|
@ -742,7 +742,7 @@ func dumpSamples(ctx context.Context, dbDir, sandboxDirRoot string, mint, maxt i
|
|||
}
|
||||
|
||||
if ws := ss.Warnings(); len(ws) > 0 {
|
||||
return tsdb_errors.NewMulti(ws.AsErrors()...).Err()
|
||||
return errors.Join(ws.AsErrors()...)
|
||||
}
|
||||
|
||||
if ss.Err() != nil {
|
||||
|
|
@ -794,6 +794,30 @@ func CondensedString(ls labels.Labels) string {
|
|||
return b.String()
|
||||
}
|
||||
|
||||
func formatSeriesSetLabelsToJSON(ss storage.SeriesSet) error {
|
||||
seriesCache := make(map[string]struct{})
|
||||
for ss.Next() {
|
||||
series := ss.At()
|
||||
lbs := series.Labels()
|
||||
|
||||
b, err := json.Marshal(lbs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(b) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
s := string(b)
|
||||
if _, ok := seriesCache[s]; !ok {
|
||||
fmt.Println(s)
|
||||
seriesCache[s] = struct{}{}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func formatSeriesSetOpenMetrics(ss storage.SeriesSet) error {
|
||||
for ss.Next() {
|
||||
series := ss.At()
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2017 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2017 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
@ -63,7 +63,7 @@ func getDumpedSamples(t *testing.T, databasePath, sandboxDirRoot string, mint, m
|
|||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
err := dumpSamples(
|
||||
err := dumpTSDBData(
|
||||
context.Background(),
|
||||
databasePath,
|
||||
sandboxDirRoot,
|
||||
|
|
@ -97,7 +97,6 @@ func TestTSDBDump(t *testing.T) {
|
|||
heavy_metric{foo="bar"} 5 4 3 2 1
|
||||
heavy_metric{foo="foo"} 5 4 3 2 1
|
||||
`)
|
||||
t.Cleanup(func() { storage.Close() })
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
|
|
@ -106,13 +105,15 @@ func TestTSDBDump(t *testing.T) {
|
|||
sandboxDirRoot string
|
||||
match []string
|
||||
expectedDump string
|
||||
expectedSeries string
|
||||
}{
|
||||
{
|
||||
name: "default match",
|
||||
mint: math.MinInt64,
|
||||
maxt: math.MaxInt64,
|
||||
match: []string{"{__name__=~'(?s:.*)'}"},
|
||||
expectedDump: "testdata/dump-test-1.prom",
|
||||
name: "default match",
|
||||
mint: math.MinInt64,
|
||||
maxt: math.MaxInt64,
|
||||
match: []string{"{__name__=~'(?s:.*)'}"},
|
||||
expectedDump: "testdata/dump-test-1.prom",
|
||||
expectedSeries: "testdata/dump-series-1.prom",
|
||||
},
|
||||
{
|
||||
name: "default match with sandbox dir root set",
|
||||
|
|
@ -121,41 +122,47 @@ func TestTSDBDump(t *testing.T) {
|
|||
sandboxDirRoot: t.TempDir(),
|
||||
match: []string{"{__name__=~'(?s:.*)'}"},
|
||||
expectedDump: "testdata/dump-test-1.prom",
|
||||
expectedSeries: "testdata/dump-series-1.prom",
|
||||
},
|
||||
{
|
||||
name: "same matcher twice",
|
||||
mint: math.MinInt64,
|
||||
maxt: math.MaxInt64,
|
||||
match: []string{"{foo=~'.+'}", "{foo=~'.+'}"},
|
||||
expectedDump: "testdata/dump-test-1.prom",
|
||||
name: "same matcher twice",
|
||||
mint: math.MinInt64,
|
||||
maxt: math.MaxInt64,
|
||||
match: []string{"{foo=~'.+'}", "{foo=~'.+'}"},
|
||||
expectedDump: "testdata/dump-test-1.prom",
|
||||
expectedSeries: "testdata/dump-series-1.prom",
|
||||
},
|
||||
{
|
||||
name: "no duplication",
|
||||
mint: math.MinInt64,
|
||||
maxt: math.MaxInt64,
|
||||
match: []string{"{__name__=~'(?s:.*)'}", "{baz='abc'}"},
|
||||
expectedDump: "testdata/dump-test-1.prom",
|
||||
name: "no duplication",
|
||||
mint: math.MinInt64,
|
||||
maxt: math.MaxInt64,
|
||||
match: []string{"{__name__=~'(?s:.*)'}", "{baz='abc'}"},
|
||||
expectedDump: "testdata/dump-test-1.prom",
|
||||
expectedSeries: "testdata/dump-series-1.prom",
|
||||
},
|
||||
{
|
||||
name: "well merged",
|
||||
mint: math.MinInt64,
|
||||
maxt: math.MaxInt64,
|
||||
match: []string{"{__name__='heavy_metric'}", "{baz='abc'}"},
|
||||
expectedDump: "testdata/dump-test-1.prom",
|
||||
name: "well merged",
|
||||
mint: math.MinInt64,
|
||||
maxt: math.MaxInt64,
|
||||
match: []string{"{__name__='heavy_metric'}", "{baz='abc'}"},
|
||||
expectedDump: "testdata/dump-test-1.prom",
|
||||
expectedSeries: "testdata/dump-series-1.prom",
|
||||
},
|
||||
{
|
||||
name: "multi matchers",
|
||||
mint: math.MinInt64,
|
||||
maxt: math.MaxInt64,
|
||||
match: []string{"{__name__='heavy_metric',foo='foo'}", "{__name__='metric'}"},
|
||||
expectedDump: "testdata/dump-test-2.prom",
|
||||
name: "multi matchers",
|
||||
mint: math.MinInt64,
|
||||
maxt: math.MaxInt64,
|
||||
match: []string{"{__name__='heavy_metric',foo='foo'}", "{__name__='metric'}"},
|
||||
expectedDump: "testdata/dump-test-2.prom",
|
||||
expectedSeries: "testdata/dump-series-2.prom",
|
||||
},
|
||||
{
|
||||
name: "with reduced mint and maxt",
|
||||
mint: int64(60000),
|
||||
maxt: int64(120000),
|
||||
match: []string{"{__name__='metric'}"},
|
||||
expectedDump: "testdata/dump-test-3.prom",
|
||||
name: "with reduced mint and maxt",
|
||||
mint: int64(60000),
|
||||
maxt: int64(120000),
|
||||
match: []string{"{__name__='metric'}"},
|
||||
expectedDump: "testdata/dump-test-3.prom",
|
||||
expectedSeries: "testdata/dump-series-3.prom",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
|
|
@ -166,6 +173,12 @@ func TestTSDBDump(t *testing.T) {
|
|||
expectedMetrics = normalizeNewLine(expectedMetrics)
|
||||
// Sort both, because Prometheus does not guarantee the output order.
|
||||
require.Equal(t, sortLines(string(expectedMetrics)), sortLines(dumpedMetrics))
|
||||
|
||||
dumpedSeries := getDumpedSamples(t, storage.Dir(), tt.sandboxDirRoot, tt.mint, tt.maxt, tt.match, formatSeriesSetLabelsToJSON)
|
||||
expectedSeries, err := os.ReadFile(tt.expectedSeries)
|
||||
require.NoError(t, err)
|
||||
expectedSeries = normalizeNewLine(expectedSeries)
|
||||
require.Equal(t, sortLines(string(expectedSeries)), sortLines(dumpedSeries))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -182,7 +195,6 @@ func TestTSDBDumpOpenMetrics(t *testing.T) {
|
|||
my_counter{foo="bar", baz="abc"} 1 2 3 4 5
|
||||
my_gauge{bar="foo", abc="baz"} 9 8 0 4 7
|
||||
`)
|
||||
t.Cleanup(func() { storage.Close() })
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
@ -149,6 +149,10 @@ func LoadFile(filename string, agentMode bool, logger *slog.Logger) (*Config, er
|
|||
return cfg, nil
|
||||
}
|
||||
|
||||
func boolPtr(b bool) *bool {
|
||||
return &b
|
||||
}
|
||||
|
||||
// The defaults applied before parsing the respective config sections.
|
||||
var (
|
||||
// DefaultConfig is the default top-level configuration.
|
||||
|
|
@ -158,7 +162,6 @@ var (
|
|||
OTLPConfig: DefaultOTLPConfig,
|
||||
}
|
||||
|
||||
f bool
|
||||
// DefaultGlobalConfig is the default global configuration.
|
||||
DefaultGlobalConfig = GlobalConfig{
|
||||
ScrapeInterval: model.Duration(1 * time.Minute),
|
||||
|
|
@ -173,9 +176,10 @@ var (
|
|||
ScrapeProtocols: nil,
|
||||
// When the native histogram feature flag is enabled,
|
||||
// ScrapeNativeHistograms default changes to true.
|
||||
ScrapeNativeHistograms: &f,
|
||||
ScrapeNativeHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: false,
|
||||
AlwaysScrapeClassicHistograms: false,
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
MetricNameValidationScheme: model.UTF8Validation,
|
||||
MetricNameEscapingScheme: model.AllowUTF8,
|
||||
}
|
||||
|
|
@ -513,6 +517,10 @@ type GlobalConfig struct {
|
|||
ConvertClassicHistogramsToNHCB bool `yaml:"convert_classic_histograms_to_nhcb,omitempty"`
|
||||
// Whether to scrape a classic histogram, even if it is also exposed as a native histogram.
|
||||
AlwaysScrapeClassicHistograms bool `yaml:"always_scrape_classic_histograms,omitempty"`
|
||||
// Whether to enable additional scrape metrics.
|
||||
// When enabled, Prometheus stores samples for scrape_timeout_seconds,
|
||||
// scrape_sample_limit, and scrape_body_size_bytes.
|
||||
ExtraScrapeMetrics *bool `yaml:"extra_scrape_metrics,omitempty"`
|
||||
}
|
||||
|
||||
// ScrapeProtocol represents supported protocol for scraping metrics.
|
||||
|
|
@ -652,6 +660,9 @@ func (c *GlobalConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
|||
if gc.ScrapeNativeHistograms == nil {
|
||||
gc.ScrapeNativeHistograms = DefaultGlobalConfig.ScrapeNativeHistograms
|
||||
}
|
||||
if gc.ExtraScrapeMetrics == nil {
|
||||
gc.ExtraScrapeMetrics = DefaultGlobalConfig.ExtraScrapeMetrics
|
||||
}
|
||||
if gc.ScrapeProtocols == nil {
|
||||
if DefaultGlobalConfig.ScrapeProtocols != nil {
|
||||
// This is the case where the defaults are set due to a feature flag.
|
||||
|
|
@ -687,7 +698,17 @@ func (c *GlobalConfig) isZero() bool {
|
|||
c.ScrapeProtocols == nil &&
|
||||
c.ScrapeNativeHistograms == nil &&
|
||||
!c.ConvertClassicHistogramsToNHCB &&
|
||||
!c.AlwaysScrapeClassicHistograms
|
||||
!c.AlwaysScrapeClassicHistograms &&
|
||||
c.BodySizeLimit == 0 &&
|
||||
c.SampleLimit == 0 &&
|
||||
c.TargetLimit == 0 &&
|
||||
c.LabelLimit == 0 &&
|
||||
c.LabelNameLengthLimit == 0 &&
|
||||
c.LabelValueLengthLimit == 0 &&
|
||||
c.KeepDroppedTargets == 0 &&
|
||||
c.MetricNameValidationScheme == model.UnsetValidation &&
|
||||
c.MetricNameEscapingScheme == "" &&
|
||||
c.ExtraScrapeMetrics == nil
|
||||
}
|
||||
|
||||
const DefaultGoGCPercentage = 75
|
||||
|
|
@ -796,6 +817,11 @@ type ScrapeConfig struct {
|
|||
// blank in config files but must have a value if a ScrapeConfig is created
|
||||
// programmatically.
|
||||
MetricNameEscapingScheme string `yaml:"metric_name_escaping_scheme,omitempty"`
|
||||
// Whether to enable additional scrape metrics.
|
||||
// When enabled, Prometheus stores samples for scrape_timeout_seconds,
|
||||
// scrape_sample_limit, and scrape_body_size_bytes.
|
||||
// If not set (nil), inherits the value from the global configuration.
|
||||
ExtraScrapeMetrics *bool `yaml:"extra_scrape_metrics,omitempty"`
|
||||
|
||||
// We cannot do proper Go type embedding below as the parser will then parse
|
||||
// values arbitrarily into the overflow maps of further-down types.
|
||||
|
|
@ -897,6 +923,9 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error {
|
|||
if c.ScrapeNativeHistograms == nil {
|
||||
c.ScrapeNativeHistograms = globalConfig.ScrapeNativeHistograms
|
||||
}
|
||||
if c.ExtraScrapeMetrics == nil {
|
||||
c.ExtraScrapeMetrics = globalConfig.ExtraScrapeMetrics
|
||||
}
|
||||
|
||||
if c.ScrapeProtocols == nil {
|
||||
switch {
|
||||
|
|
@ -1022,7 +1051,7 @@ func ToEscapingScheme(s string, v model.ValidationScheme) (model.EscapingScheme,
|
|||
case model.LegacyValidation:
|
||||
return model.UnderscoreEscaping, nil
|
||||
case model.UnsetValidation:
|
||||
return model.NoEscaping, fmt.Errorf("v is unset: %s", v)
|
||||
return model.NoEscaping, fmt.Errorf("ValidationScheme is unset: %s", v)
|
||||
default:
|
||||
panic(fmt.Errorf("unhandled validation scheme: %s", v))
|
||||
}
|
||||
|
|
@ -1045,6 +1074,11 @@ func (c *ScrapeConfig) AlwaysScrapeClassicHistogramsEnabled() bool {
|
|||
return c.AlwaysScrapeClassicHistograms != nil && *c.AlwaysScrapeClassicHistograms
|
||||
}
|
||||
|
||||
// ExtraScrapeMetricsEnabled returns whether to enable extra scrape metrics.
|
||||
func (c *ScrapeConfig) ExtraScrapeMetricsEnabled() bool {
|
||||
return c.ExtraScrapeMetrics != nil && *c.ExtraScrapeMetrics
|
||||
}
|
||||
|
||||
// StorageConfig configures runtime reloadable configuration options.
|
||||
type StorageConfig struct {
|
||||
TSDBConfig *TSDBConfig `yaml:"tsdb,omitempty"`
|
||||
|
|
@ -1073,6 +1107,10 @@ type TSDBConfig struct {
|
|||
// This should not be used directly and must be converted into OutOfOrderTimeWindow.
|
||||
OutOfOrderTimeWindowFlag model.Duration `yaml:"out_of_order_time_window,omitempty"`
|
||||
|
||||
// StaleSeriesCompactionThreshold is a number between 0.0-1.0 indicating the % of stale series in
|
||||
// the in-memory Head block. If the % of stale series crosses this threshold, stale series compaction is run immediately.
|
||||
StaleSeriesCompactionThreshold float64 `yaml:"stale_series_compaction_threshold,omitempty"`
|
||||
|
||||
Retention *TSDBRetentionConfig `yaml:"retention,omitempty"`
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2017 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
@ -74,10 +74,6 @@ func mustParseURL(u string) *config.URL {
|
|||
return &config.URL{URL: parsed}
|
||||
}
|
||||
|
||||
func boolPtr(b bool) *bool {
|
||||
return &b
|
||||
}
|
||||
|
||||
const (
|
||||
globBodySizeLimit = 15 * units.MiB
|
||||
globSampleLimit = 1500
|
||||
|
|
@ -109,6 +105,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: false,
|
||||
ConvertClassicHistogramsToNHCB: false,
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
MetricNameValidationScheme: model.UTF8Validation,
|
||||
},
|
||||
|
||||
|
|
@ -236,6 +233,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -360,6 +358,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
HTTPClientConfig: config.HTTPClientConfig{
|
||||
BasicAuth: &config.BasicAuth{
|
||||
|
|
@ -470,6 +469,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -532,6 +532,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: "/metrics",
|
||||
Scheme: "http",
|
||||
|
|
@ -571,6 +572,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -616,6 +618,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -661,6 +664,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -696,6 +700,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -739,6 +744,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -779,6 +785,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -826,6 +833,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -863,6 +871,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -903,6 +912,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -936,6 +946,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -972,6 +983,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: "/federate",
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -1008,6 +1020,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -1044,6 +1057,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -1077,6 +1091,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -1118,6 +1133,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -1158,6 +1174,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -1195,6 +1212,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -1231,6 +1249,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -1271,6 +1290,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -1314,6 +1334,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(true),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -1377,6 +1398,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -1410,6 +1432,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
|
|
@ -1454,6 +1477,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
|
|
@ -1504,6 +1528,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -1544,6 +1569,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -1585,6 +1611,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
|
|
@ -1621,6 +1648,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -1659,6 +1687,7 @@ var expectedConf = &Config{
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -1704,8 +1733,9 @@ var expectedConf = &Config{
|
|||
},
|
||||
StorageConfig: StorageConfig{
|
||||
TSDBConfig: &TSDBConfig{
|
||||
OutOfOrderTimeWindow: 30 * time.Minute.Milliseconds(),
|
||||
OutOfOrderTimeWindowFlag: model.Duration(30 * time.Minute),
|
||||
OutOfOrderTimeWindow: 30 * time.Minute.Milliseconds(),
|
||||
OutOfOrderTimeWindowFlag: model.Duration(30 * time.Minute),
|
||||
StaleSeriesCompactionThreshold: 0.5,
|
||||
Retention: &TSDBRetentionConfig{
|
||||
Time: model.Duration(24 * time.Hour),
|
||||
Size: 1 * units.GiB,
|
||||
|
|
@ -2663,12 +2693,87 @@ func TestAgentMode(t *testing.T) {
|
|||
)
|
||||
}
|
||||
|
||||
func TestEmptyGlobalBlock(t *testing.T) {
|
||||
c, err := Load("global:\n", promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
exp := DefaultConfig
|
||||
exp.loaded = true
|
||||
require.Equal(t, exp, *c)
|
||||
func TestGlobalConfig(t *testing.T) {
|
||||
t.Run("empty block restores defaults", func(t *testing.T) {
|
||||
c, err := Load("global:\n", promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
exp := DefaultConfig
|
||||
exp.loaded = true
|
||||
require.Equal(t, exp, *c)
|
||||
})
|
||||
|
||||
// Verify that isZero() correctly identifies non-zero configurations for all
|
||||
// fields in GlobalConfig. This is important because isZero() is used during
|
||||
// YAML unmarshaling to detect empty global blocks that should be replaced
|
||||
// with defaults.
|
||||
t.Run("isZero", func(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
config GlobalConfig
|
||||
expectZero bool
|
||||
}{
|
||||
{
|
||||
name: "empty GlobalConfig",
|
||||
config: GlobalConfig{},
|
||||
expectZero: true,
|
||||
},
|
||||
{
|
||||
name: "ScrapeInterval set",
|
||||
config: GlobalConfig{ScrapeInterval: model.Duration(30 * time.Second)},
|
||||
expectZero: false,
|
||||
},
|
||||
{
|
||||
name: "BodySizeLimit set",
|
||||
config: GlobalConfig{BodySizeLimit: 1 * units.MiB},
|
||||
expectZero: false,
|
||||
},
|
||||
{
|
||||
name: "SampleLimit set",
|
||||
config: GlobalConfig{SampleLimit: 1000},
|
||||
expectZero: false,
|
||||
},
|
||||
{
|
||||
name: "TargetLimit set",
|
||||
config: GlobalConfig{TargetLimit: 500},
|
||||
expectZero: false,
|
||||
},
|
||||
{
|
||||
name: "LabelLimit set",
|
||||
config: GlobalConfig{LabelLimit: 100},
|
||||
expectZero: false,
|
||||
},
|
||||
{
|
||||
name: "LabelNameLengthLimit set",
|
||||
config: GlobalConfig{LabelNameLengthLimit: 50},
|
||||
expectZero: false,
|
||||
},
|
||||
{
|
||||
name: "LabelValueLengthLimit set",
|
||||
config: GlobalConfig{LabelValueLengthLimit: 200},
|
||||
expectZero: false,
|
||||
},
|
||||
{
|
||||
name: "KeepDroppedTargets set",
|
||||
config: GlobalConfig{KeepDroppedTargets: 10},
|
||||
expectZero: false,
|
||||
},
|
||||
{
|
||||
name: "MetricNameValidationScheme set",
|
||||
config: GlobalConfig{MetricNameValidationScheme: model.LegacyValidation},
|
||||
expectZero: false,
|
||||
},
|
||||
{
|
||||
name: "MetricNameEscapingScheme set",
|
||||
config: GlobalConfig{MetricNameEscapingScheme: model.EscapeUnderscores},
|
||||
expectZero: false,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result := tc.config.isZero()
|
||||
require.Equal(t, tc.expectZero, result)
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// ScrapeConfigOptions contains options for creating a scrape config.
|
||||
|
|
@ -2680,6 +2785,7 @@ type ScrapeConfigOptions struct {
|
|||
ScrapeNativeHistograms bool
|
||||
AlwaysScrapeClassicHistograms bool
|
||||
ConvertClassicHistToNHCB bool
|
||||
ExtraScrapeMetrics bool
|
||||
}
|
||||
|
||||
func TestGetScrapeConfigs(t *testing.T) {
|
||||
|
|
@ -2713,6 +2819,7 @@ func TestGetScrapeConfigs(t *testing.T) {
|
|||
ScrapeNativeHistograms: boolPtr(opts.ScrapeNativeHistograms),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(opts.AlwaysScrapeClassicHistograms),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(opts.ConvertClassicHistToNHCB),
|
||||
ExtraScrapeMetrics: boolPtr(opts.ExtraScrapeMetrics),
|
||||
}
|
||||
if opts.ScrapeProtocols == nil {
|
||||
sc.ScrapeProtocols = DefaultScrapeProtocols
|
||||
|
|
@ -2796,6 +2903,7 @@ func TestGetScrapeConfigs(t *testing.T) {
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
|
@ -2834,6 +2942,7 @@ func TestGetScrapeConfigs(t *testing.T) {
|
|||
ScrapeNativeHistograms: boolPtr(false),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
ExtraScrapeMetrics: boolPtr(false),
|
||||
|
||||
HTTPClientConfig: config.HTTPClientConfig{
|
||||
TLSConfig: config.TLSConfig{
|
||||
|
|
@ -2946,6 +3055,26 @@ func TestGetScrapeConfigs(t *testing.T) {
|
|||
configFile: "testdata/global_scrape_protocols_and_local_disable_scrape_native_hist.good.yml",
|
||||
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), ScrapeNativeHistograms: false, ScrapeProtocols: []ScrapeProtocol{PrometheusText0_0_4}})},
|
||||
},
|
||||
{
|
||||
name: "A global config that enables extra scrape metrics",
|
||||
configFile: "testdata/global_enable_extra_scrape_metrics.good.yml",
|
||||
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), ExtraScrapeMetrics: true})},
|
||||
},
|
||||
{
|
||||
name: "A global config that disables extra scrape metrics",
|
||||
configFile: "testdata/global_disable_extra_scrape_metrics.good.yml",
|
||||
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), ExtraScrapeMetrics: false})},
|
||||
},
|
||||
{
|
||||
name: "A global config that disables extra scrape metrics and scrape config that enables it",
|
||||
configFile: "testdata/local_enable_extra_scrape_metrics.good.yml",
|
||||
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), ExtraScrapeMetrics: true})},
|
||||
},
|
||||
{
|
||||
name: "A global config that enables extra scrape metrics and scrape config that disables it",
|
||||
configFile: "testdata/local_disable_extra_scrape_metrics.good.yml",
|
||||
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), ExtraScrapeMetrics: false})},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
|
|
@ -2962,6 +3091,99 @@ func TestGetScrapeConfigs(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestExtraScrapeMetrics(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
config string
|
||||
expectGlobal *bool
|
||||
expectEnabled bool
|
||||
}{
|
||||
{
|
||||
name: "default values (not set)",
|
||||
config: `
|
||||
scrape_configs:
|
||||
- job_name: test
|
||||
static_configs:
|
||||
- targets: ['localhost:9090']
|
||||
`,
|
||||
expectGlobal: boolPtr(false), // inherits from DefaultGlobalConfig
|
||||
expectEnabled: false,
|
||||
},
|
||||
{
|
||||
name: "global enabled",
|
||||
config: `
|
||||
global:
|
||||
extra_scrape_metrics: true
|
||||
scrape_configs:
|
||||
- job_name: test
|
||||
static_configs:
|
||||
- targets: ['localhost:9090']
|
||||
`,
|
||||
expectGlobal: boolPtr(true),
|
||||
expectEnabled: true,
|
||||
},
|
||||
{
|
||||
name: "global disabled",
|
||||
config: `
|
||||
global:
|
||||
extra_scrape_metrics: false
|
||||
scrape_configs:
|
||||
- job_name: test
|
||||
static_configs:
|
||||
- targets: ['localhost:9090']
|
||||
`,
|
||||
expectGlobal: boolPtr(false),
|
||||
expectEnabled: false,
|
||||
},
|
||||
{
|
||||
name: "scrape override enabled",
|
||||
config: `
|
||||
global:
|
||||
extra_scrape_metrics: false
|
||||
scrape_configs:
|
||||
- job_name: test
|
||||
extra_scrape_metrics: true
|
||||
static_configs:
|
||||
- targets: ['localhost:9090']
|
||||
`,
|
||||
expectGlobal: boolPtr(false),
|
||||
expectEnabled: true,
|
||||
},
|
||||
{
|
||||
name: "scrape override disabled",
|
||||
config: `
|
||||
global:
|
||||
extra_scrape_metrics: true
|
||||
scrape_configs:
|
||||
- job_name: test
|
||||
extra_scrape_metrics: false
|
||||
static_configs:
|
||||
- targets: ['localhost:9090']
|
||||
`,
|
||||
expectGlobal: boolPtr(true),
|
||||
expectEnabled: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cfg, err := Load(tc.config, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check global config
|
||||
require.Equal(t, tc.expectGlobal, cfg.GlobalConfig.ExtraScrapeMetrics)
|
||||
|
||||
// Check scrape config
|
||||
scfgs, err := cfg.GetScrapeConfigs()
|
||||
require.NoError(t, err)
|
||||
require.Len(t, scfgs, 1)
|
||||
|
||||
// Check the effective value via the helper method
|
||||
require.Equal(t, tc.expectEnabled, scfgs[0].ExtraScrapeMetricsEnabled())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func kubernetesSDHostURL() config.URL {
|
||||
tURL, _ := url.Parse("https://localhost:1234")
|
||||
return config.URL{URL: tURL}
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2017 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2024 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2024 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
1
config/testdata/conf.good.yml
vendored
1
config/testdata/conf.good.yml
vendored
|
|
@ -453,6 +453,7 @@ alerting:
|
|||
storage:
|
||||
tsdb:
|
||||
out_of_order_time_window: 30m
|
||||
stale_series_compaction_threshold: 0.5
|
||||
retention:
|
||||
time: 1d
|
||||
size: 1GB
|
||||
|
|
|
|||
6
config/testdata/global_disable_extra_scrape_metrics.good.yml
vendored
Normal file
6
config/testdata/global_disable_extra_scrape_metrics.good.yml
vendored
Normal file
|
|
@ -0,0 +1,6 @@
|
|||
global:
|
||||
extra_scrape_metrics: false
|
||||
scrape_configs:
|
||||
- job_name: prometheus
|
||||
static_configs:
|
||||
- targets: ['localhost:8080']
|
||||
6
config/testdata/global_enable_extra_scrape_metrics.good.yml
vendored
Normal file
6
config/testdata/global_enable_extra_scrape_metrics.good.yml
vendored
Normal file
|
|
@ -0,0 +1,6 @@
|
|||
global:
|
||||
extra_scrape_metrics: true
|
||||
scrape_configs:
|
||||
- job_name: prometheus
|
||||
static_configs:
|
||||
- targets: ['localhost:8080']
|
||||
7
config/testdata/local_disable_extra_scrape_metrics.good.yml
vendored
Normal file
7
config/testdata/local_disable_extra_scrape_metrics.good.yml
vendored
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
global:
|
||||
extra_scrape_metrics: true
|
||||
scrape_configs:
|
||||
- job_name: prometheus
|
||||
static_configs:
|
||||
- targets: ['localhost:8080']
|
||||
extra_scrape_metrics: false
|
||||
7
config/testdata/local_enable_extra_scrape_metrics.good.yml
vendored
Normal file
7
config/testdata/local_enable_extra_scrape_metrics.good.yml
vendored
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
global:
|
||||
extra_scrape_metrics: false
|
||||
scrape_configs:
|
||||
- job_name: prometheus
|
||||
static_configs:
|
||||
- targets: ['localhost:8080']
|
||||
extra_scrape_metrics: true
|
||||
|
|
@ -50,7 +50,7 @@ file for use with `file_sd`.
|
|||
The general principle with SD is to extract all the potentially useful
|
||||
information we can out of the SD, and let the user choose what they need of it
|
||||
using
|
||||
[relabelling](https://prometheus.io/docs/operating/configuration/#<relabel_config>).
|
||||
[relabelling](https://prometheus.io/docs/operating/configuration/#relabel_config).
|
||||
This information is generally termed metadata.
|
||||
|
||||
Metadata is exposed as a set of key/value pairs (labels) per target. The keys
|
||||
|
|
|
|||
|
|
@ -101,7 +101,8 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
|||
switch c.Role {
|
||||
case RoleEC2:
|
||||
if c.EC2SDConfig == nil {
|
||||
c.EC2SDConfig = &DefaultEC2SDConfig
|
||||
ec2Config := DefaultEC2SDConfig
|
||||
c.EC2SDConfig = &ec2Config
|
||||
}
|
||||
c.EC2SDConfig.HTTPClientConfig = c.HTTPClientConfig
|
||||
if c.Region != "" {
|
||||
|
|
@ -133,7 +134,8 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
|||
}
|
||||
case RoleECS:
|
||||
if c.ECSSDConfig == nil {
|
||||
c.ECSSDConfig = &DefaultECSSDConfig
|
||||
ecsConfig := DefaultECSSDConfig
|
||||
c.ECSSDConfig = &ecsConfig
|
||||
}
|
||||
c.ECSSDConfig.HTTPClientConfig = c.HTTPClientConfig
|
||||
if c.Region != "" {
|
||||
|
|
@ -165,7 +167,8 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
|||
}
|
||||
case RoleLightsail:
|
||||
if c.LightsailSDConfig == nil {
|
||||
c.LightsailSDConfig = &DefaultLightsailSDConfig
|
||||
lightsailConfig := DefaultLightsailSDConfig
|
||||
c.LightsailSDConfig = &lightsailConfig
|
||||
}
|
||||
c.LightsailSDConfig.HTTPClientConfig = c.HTTPClientConfig
|
||||
if c.Region != "" {
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ import (
|
|||
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gopkg.in/yaml.v3"
|
||||
"go.yaml.in/yaml/v3"
|
||||
)
|
||||
|
||||
func TestRoleUnmarshalYAML(t *testing.T) {
|
||||
|
|
@ -177,3 +177,109 @@ port: 9300`,
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestMultipleSDConfigsDoNotShareState verifies that multiple AWS SD configs
|
||||
// don't share the same underlying configuration object. This was a bug where
|
||||
// all configs pointed to the same global default, causing port and other
|
||||
// settings from one job to overwrite settings in another job.
|
||||
func TestMultipleSDConfigsDoNotShareState(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
yaml string
|
||||
validateFunc func(t *testing.T, cfg1, cfg2 *SDConfig)
|
||||
}{
|
||||
{
|
||||
name: "EC2MultipleJobsDifferentPorts",
|
||||
yaml: `
|
||||
- role: ec2
|
||||
region: us-west-2
|
||||
port: 9100
|
||||
filters:
|
||||
- name: tag:Name
|
||||
values: [host-1]
|
||||
- role: ec2
|
||||
region: us-west-2
|
||||
port: 9101
|
||||
filters:
|
||||
- name: tag:Name
|
||||
values: [host-2]`,
|
||||
validateFunc: func(t *testing.T, cfg1, cfg2 *SDConfig) {
|
||||
require.Equal(t, RoleEC2, cfg1.Role)
|
||||
require.Equal(t, RoleEC2, cfg2.Role)
|
||||
require.NotNil(t, cfg1.EC2SDConfig)
|
||||
require.NotNil(t, cfg2.EC2SDConfig)
|
||||
|
||||
// Verify ports are different and not shared
|
||||
require.Equal(t, 9100, cfg1.EC2SDConfig.Port)
|
||||
require.Equal(t, 9101, cfg2.EC2SDConfig.Port)
|
||||
|
||||
// Verify filters are different and not shared
|
||||
require.Len(t, cfg1.EC2SDConfig.Filters, 1)
|
||||
require.Len(t, cfg2.EC2SDConfig.Filters, 1)
|
||||
require.Equal(t, []string{"host-1"}, cfg1.EC2SDConfig.Filters[0].Values)
|
||||
require.Equal(t, []string{"host-2"}, cfg2.EC2SDConfig.Filters[0].Values)
|
||||
|
||||
// Most importantly: verify they're not the same pointer
|
||||
require.NotSame(t, cfg1.EC2SDConfig, cfg2.EC2SDConfig,
|
||||
"EC2SDConfig objects should not share the same memory address")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ECSMultipleJobsDifferentPorts",
|
||||
yaml: `
|
||||
- role: ecs
|
||||
region: us-east-1
|
||||
port: 8080
|
||||
clusters: [cluster-a]
|
||||
- role: ecs
|
||||
region: us-east-1
|
||||
port: 8081
|
||||
clusters: [cluster-b]`,
|
||||
validateFunc: func(t *testing.T, cfg1, cfg2 *SDConfig) {
|
||||
require.Equal(t, RoleECS, cfg1.Role)
|
||||
require.Equal(t, RoleECS, cfg2.Role)
|
||||
require.NotNil(t, cfg1.ECSSDConfig)
|
||||
require.NotNil(t, cfg2.ECSSDConfig)
|
||||
|
||||
require.Equal(t, 8080, cfg1.ECSSDConfig.Port)
|
||||
require.Equal(t, 8081, cfg2.ECSSDConfig.Port)
|
||||
require.Equal(t, []string{"cluster-a"}, cfg1.ECSSDConfig.Clusters)
|
||||
require.Equal(t, []string{"cluster-b"}, cfg2.ECSSDConfig.Clusters)
|
||||
|
||||
require.NotSame(t, cfg1.ECSSDConfig, cfg2.ECSSDConfig,
|
||||
"ECSSDConfig objects should not share the same memory address")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "LightsailMultipleJobsDifferentPorts",
|
||||
yaml: `
|
||||
- role: lightsail
|
||||
region: eu-west-1
|
||||
port: 7070
|
||||
- role: lightsail
|
||||
region: eu-west-1
|
||||
port: 7071`,
|
||||
validateFunc: func(t *testing.T, cfg1, cfg2 *SDConfig) {
|
||||
require.Equal(t, RoleLightsail, cfg1.Role)
|
||||
require.Equal(t, RoleLightsail, cfg2.Role)
|
||||
require.NotNil(t, cfg1.LightsailSDConfig)
|
||||
require.NotNil(t, cfg2.LightsailSDConfig)
|
||||
|
||||
require.Equal(t, 7070, cfg1.LightsailSDConfig.Port)
|
||||
require.Equal(t, 7071, cfg2.LightsailSDConfig.Port)
|
||||
|
||||
require.NotSame(t, cfg1.LightsailSDConfig, cfg2.LightsailSDConfig,
|
||||
"LightsailSDConfig objects should not share the same memory address")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var configs []SDConfig
|
||||
require.NoError(t, yaml.Unmarshal([]byte(tt.yaml), &configs))
|
||||
require.Len(t, configs, 2)
|
||||
tt.validateFunc(t, &configs[0], &configs[1])
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2021 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2024 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -28,6 +28,7 @@ import (
|
|||
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials/stscreds"
|
||||
"github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
|
||||
"github.com/aws/aws-sdk-go-v2/service/ec2"
|
||||
"github.com/aws/aws-sdk-go-v2/service/ecs"
|
||||
"github.com/aws/aws-sdk-go-v2/service/ecs/types"
|
||||
"github.com/aws/aws-sdk-go-v2/service/sts"
|
||||
|
|
@ -44,31 +45,37 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
ecsLabel = model.MetaLabelPrefix + "ecs_"
|
||||
ecsLabelCluster = ecsLabel + "cluster"
|
||||
ecsLabelClusterARN = ecsLabel + "cluster_arn"
|
||||
ecsLabelService = ecsLabel + "service"
|
||||
ecsLabelServiceARN = ecsLabel + "service_arn"
|
||||
ecsLabelServiceStatus = ecsLabel + "service_status"
|
||||
ecsLabelTaskGroup = ecsLabel + "task_group"
|
||||
ecsLabelTaskARN = ecsLabel + "task_arn"
|
||||
ecsLabelTaskDefinition = ecsLabel + "task_definition"
|
||||
ecsLabelRegion = ecsLabel + "region"
|
||||
ecsLabelAvailabilityZone = ecsLabel + "availability_zone"
|
||||
ecsLabelAZID = ecsLabel + "availability_zone_id"
|
||||
ecsLabelSubnetID = ecsLabel + "subnet_id"
|
||||
ecsLabelIPAddress = ecsLabel + "ip_address"
|
||||
ecsLabelLaunchType = ecsLabel + "launch_type"
|
||||
ecsLabelDesiredStatus = ecsLabel + "desired_status"
|
||||
ecsLabelLastStatus = ecsLabel + "last_status"
|
||||
ecsLabelHealthStatus = ecsLabel + "health_status"
|
||||
ecsLabelPlatformFamily = ecsLabel + "platform_family"
|
||||
ecsLabelPlatformVersion = ecsLabel + "platform_version"
|
||||
ecsLabelTag = ecsLabel + "tag_"
|
||||
ecsLabelTagCluster = ecsLabelTag + "cluster_"
|
||||
ecsLabelTagService = ecsLabelTag + "service_"
|
||||
ecsLabelTagTask = ecsLabelTag + "task_"
|
||||
ecsLabelSeparator = ","
|
||||
ecsLabel = model.MetaLabelPrefix + "ecs_"
|
||||
ecsLabelCluster = ecsLabel + "cluster"
|
||||
ecsLabelClusterARN = ecsLabel + "cluster_arn"
|
||||
ecsLabelService = ecsLabel + "service"
|
||||
ecsLabelServiceARN = ecsLabel + "service_arn"
|
||||
ecsLabelServiceStatus = ecsLabel + "service_status"
|
||||
ecsLabelTaskGroup = ecsLabel + "task_group"
|
||||
ecsLabelTaskARN = ecsLabel + "task_arn"
|
||||
ecsLabelTaskDefinition = ecsLabel + "task_definition"
|
||||
ecsLabelRegion = ecsLabel + "region"
|
||||
ecsLabelAvailabilityZone = ecsLabel + "availability_zone"
|
||||
ecsLabelSubnetID = ecsLabel + "subnet_id"
|
||||
ecsLabelIPAddress = ecsLabel + "ip_address"
|
||||
ecsLabelLaunchType = ecsLabel + "launch_type"
|
||||
ecsLabelDesiredStatus = ecsLabel + "desired_status"
|
||||
ecsLabelLastStatus = ecsLabel + "last_status"
|
||||
ecsLabelHealthStatus = ecsLabel + "health_status"
|
||||
ecsLabelPlatformFamily = ecsLabel + "platform_family"
|
||||
ecsLabelPlatformVersion = ecsLabel + "platform_version"
|
||||
ecsLabelTag = ecsLabel + "tag_"
|
||||
ecsLabelTagCluster = ecsLabelTag + "cluster_"
|
||||
ecsLabelTagService = ecsLabelTag + "service_"
|
||||
ecsLabelTagTask = ecsLabelTag + "task_"
|
||||
ecsLabelTagEC2 = ecsLabelTag + "ec2_"
|
||||
ecsLabelNetworkMode = ecsLabel + "network_mode"
|
||||
ecsLabelContainerInstanceARN = ecsLabel + "container_instance_arn"
|
||||
ecsLabelEC2InstanceID = ecsLabel + "ec2_instance_id"
|
||||
ecsLabelEC2InstanceType = ecsLabel + "ec2_instance_type"
|
||||
ecsLabelEC2InstancePrivateIP = ecsLabel + "ec2_instance_private_ip"
|
||||
ecsLabelEC2InstancePublicIP = ecsLabel + "ec2_instance_public_ip"
|
||||
ecsLabelPublicIP = ecsLabel + "public_ip"
|
||||
)
|
||||
|
||||
// DefaultECSSDConfig is the default ECS SD configuration.
|
||||
|
|
@ -153,6 +160,12 @@ type ecsClient interface {
|
|||
DescribeServices(context.Context, *ecs.DescribeServicesInput, ...func(*ecs.Options)) (*ecs.DescribeServicesOutput, error)
|
||||
ListTasks(context.Context, *ecs.ListTasksInput, ...func(*ecs.Options)) (*ecs.ListTasksOutput, error)
|
||||
DescribeTasks(context.Context, *ecs.DescribeTasksInput, ...func(*ecs.Options)) (*ecs.DescribeTasksOutput, error)
|
||||
DescribeContainerInstances(context.Context, *ecs.DescribeContainerInstancesInput, ...func(*ecs.Options)) (*ecs.DescribeContainerInstancesOutput, error)
|
||||
}
|
||||
|
||||
type ecsEC2Client interface {
|
||||
DescribeInstances(context.Context, *ec2.DescribeInstancesInput, ...func(*ec2.Options)) (*ec2.DescribeInstancesOutput, error)
|
||||
DescribeNetworkInterfaces(context.Context, *ec2.DescribeNetworkInterfacesInput, ...func(*ec2.Options)) (*ec2.DescribeNetworkInterfacesOutput, error)
|
||||
}
|
||||
|
||||
// ECSDiscovery periodically performs ECS-SD requests. It implements
|
||||
|
|
@ -162,6 +175,7 @@ type ECSDiscovery struct {
|
|||
logger *slog.Logger
|
||||
cfg *ECSSDConfig
|
||||
ecs ecsClient
|
||||
ec2 ecsEC2Client
|
||||
}
|
||||
|
||||
// NewECSDiscovery returns a new ECSDiscovery which periodically refreshes its targets.
|
||||
|
|
@ -191,7 +205,7 @@ func NewECSDiscovery(conf *ECSSDConfig, opts discovery.DiscovererOptions) (*ECSD
|
|||
}
|
||||
|
||||
func (d *ECSDiscovery) initEcsClient(ctx context.Context) error {
|
||||
if d.ecs != nil {
|
||||
if d.ecs != nil && d.ec2 != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -240,6 +254,10 @@ func (d *ECSDiscovery) initEcsClient(ctx context.Context) error {
|
|||
options.HTTPClient = client
|
||||
})
|
||||
|
||||
d.ec2 = ec2.NewFromConfig(cfg, func(options *ec2.Options) {
|
||||
options.HTTPClient = client
|
||||
})
|
||||
|
||||
// Test credentials by making a simple API call
|
||||
testCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
|
|
@ -458,6 +476,113 @@ func (d *ECSDiscovery) describeTasks(ctx context.Context, clusterARN string, tas
|
|||
return tasks, errg.Wait()
|
||||
}
|
||||
|
||||
// describeContainerInstances returns a map of container instance ARN to EC2 instance ID
|
||||
// Uses batching to respect AWS API limits (100 container instances per request).
|
||||
func (d *ECSDiscovery) describeContainerInstances(ctx context.Context, clusterARN string, containerInstanceARNs []string) (map[string]string, error) {
|
||||
if len(containerInstanceARNs) == 0 {
|
||||
return make(map[string]string), nil
|
||||
}
|
||||
|
||||
containerInstToEC2 := make(map[string]string)
|
||||
batchSize := 100 // AWS API limit
|
||||
|
||||
for _, batch := range batchSlice(containerInstanceARNs, batchSize) {
|
||||
resp, err := d.ecs.DescribeContainerInstances(ctx, &ecs.DescribeContainerInstancesInput{
|
||||
Cluster: aws.String(clusterARN),
|
||||
ContainerInstances: batch,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not describe container instances: %w", err)
|
||||
}
|
||||
|
||||
for _, ci := range resp.ContainerInstances {
|
||||
if ci.ContainerInstanceArn != nil && ci.Ec2InstanceId != nil {
|
||||
containerInstToEC2[*ci.ContainerInstanceArn] = *ci.Ec2InstanceId
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return containerInstToEC2, nil
|
||||
}
|
||||
|
||||
// ec2InstanceInfo holds information retrieved from EC2 DescribeInstances.
|
||||
type ec2InstanceInfo struct {
|
||||
privateIP string
|
||||
publicIP string
|
||||
subnetID string
|
||||
instanceType string
|
||||
tags map[string]string
|
||||
}
|
||||
|
||||
// describeEC2Instances returns a map of EC2 instance ID to instance information.
|
||||
func (d *ECSDiscovery) describeEC2Instances(ctx context.Context, instanceIDs []string) (map[string]ec2InstanceInfo, error) {
|
||||
if len(instanceIDs) == 0 {
|
||||
return make(map[string]ec2InstanceInfo), nil
|
||||
}
|
||||
|
||||
instanceInfo := make(map[string]ec2InstanceInfo)
|
||||
|
||||
resp, err := d.ec2.DescribeInstances(ctx, &ec2.DescribeInstancesInput{
|
||||
InstanceIds: instanceIDs,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not describe EC2 instances: %w", err)
|
||||
}
|
||||
|
||||
for _, reservation := range resp.Reservations {
|
||||
for _, instance := range reservation.Instances {
|
||||
if instance.InstanceId != nil && instance.PrivateIpAddress != nil {
|
||||
info := ec2InstanceInfo{
|
||||
privateIP: *instance.PrivateIpAddress,
|
||||
tags: make(map[string]string),
|
||||
}
|
||||
if instance.PublicIpAddress != nil {
|
||||
info.publicIP = *instance.PublicIpAddress
|
||||
}
|
||||
if instance.SubnetId != nil {
|
||||
info.subnetID = *instance.SubnetId
|
||||
}
|
||||
if instance.InstanceType != "" {
|
||||
info.instanceType = string(instance.InstanceType)
|
||||
}
|
||||
// Collect EC2 instance tags
|
||||
for _, tag := range instance.Tags {
|
||||
if tag.Key != nil && tag.Value != nil {
|
||||
info.tags[*tag.Key] = *tag.Value
|
||||
}
|
||||
}
|
||||
instanceInfo[*instance.InstanceId] = info
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return instanceInfo, nil
|
||||
}
|
||||
|
||||
// describeNetworkInterfaces returns a map of ENI ID to public IP address.
|
||||
func (d *ECSDiscovery) describeNetworkInterfaces(ctx context.Context, eniIDs []string) (map[string]string, error) {
|
||||
if len(eniIDs) == 0 {
|
||||
return make(map[string]string), nil
|
||||
}
|
||||
|
||||
eniToPublicIP := make(map[string]string)
|
||||
|
||||
resp, err := d.ec2.DescribeNetworkInterfaces(ctx, &ec2.DescribeNetworkInterfacesInput{
|
||||
NetworkInterfaceIds: eniIDs,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not describe network interfaces: %w", err)
|
||||
}
|
||||
|
||||
for _, eni := range resp.NetworkInterfaces {
|
||||
if eni.NetworkInterfaceId != nil && eni.Association != nil && eni.Association.PublicIp != nil {
|
||||
eniToPublicIP[*eni.NetworkInterfaceId] = *eni.Association.PublicIp
|
||||
}
|
||||
}
|
||||
|
||||
return eniToPublicIP, nil
|
||||
}
|
||||
|
||||
func batchSlice[T any](a []T, size int) [][]T {
|
||||
batches := make([][]T, 0, len(a)/size+1)
|
||||
for i := 0; i < len(a); i += size {
|
||||
|
|
@ -554,8 +679,76 @@ func (d *ECSDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
|
|||
if tasks, exists := serviceTaskMap[serviceArn]; exists {
|
||||
var serviceTargets []model.LabelSet
|
||||
|
||||
// Collect container instance ARNs for all EC2 tasks to get instance type
|
||||
var containerInstanceARNs []string
|
||||
taskToContainerInstance := make(map[string]string)
|
||||
// Collect ENI IDs for awsvpc tasks to get public IPs
|
||||
var eniIDs []string
|
||||
taskToENI := make(map[string]string)
|
||||
|
||||
for _, task := range tasks {
|
||||
// Find the ENI attachment to get the private IP address
|
||||
// Collect container instance ARN for any task running on EC2
|
||||
if task.ContainerInstanceArn != nil {
|
||||
containerInstanceARNs = append(containerInstanceARNs, *task.ContainerInstanceArn)
|
||||
taskToContainerInstance[*task.TaskArn] = *task.ContainerInstanceArn
|
||||
}
|
||||
|
||||
// Collect ENI IDs from awsvpc tasks
|
||||
for _, attachment := range task.Attachments {
|
||||
if attachment.Type != nil && *attachment.Type == "ElasticNetworkInterface" {
|
||||
for _, detail := range attachment.Details {
|
||||
if detail.Name != nil && *detail.Name == "networkInterfaceId" && detail.Value != nil {
|
||||
eniIDs = append(eniIDs, *detail.Value)
|
||||
taskToENI[*task.TaskArn] = *detail.Value
|
||||
break
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Batch describe container instances and EC2 instances to get instance type and other metadata
|
||||
var containerInstToEC2 map[string]string
|
||||
var ec2InstInfo map[string]ec2InstanceInfo
|
||||
if len(containerInstanceARNs) > 0 {
|
||||
var err error
|
||||
containerInstToEC2, err = d.describeContainerInstances(ctx, clusterArn, containerInstanceARNs)
|
||||
if err != nil {
|
||||
d.logger.Error("Failed to describe container instances", "cluster", clusterArn, "error", err)
|
||||
// Continue processing tasks
|
||||
} else {
|
||||
// Collect unique EC2 instance IDs
|
||||
ec2InstanceIDs := make([]string, 0, len(containerInstToEC2))
|
||||
for _, ec2ID := range containerInstToEC2 {
|
||||
ec2InstanceIDs = append(ec2InstanceIDs, ec2ID)
|
||||
}
|
||||
|
||||
// Batch describe EC2 instances
|
||||
ec2InstInfo, err = d.describeEC2Instances(ctx, ec2InstanceIDs)
|
||||
if err != nil {
|
||||
d.logger.Error("Failed to describe EC2 instances", "cluster", clusterArn, "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Batch describe ENIs to get public IPs for awsvpc tasks
|
||||
var eniToPublicIP map[string]string
|
||||
if len(eniIDs) > 0 {
|
||||
var err error
|
||||
eniToPublicIP, err = d.describeNetworkInterfaces(ctx, eniIDs)
|
||||
if err != nil {
|
||||
d.logger.Error("Failed to describe network interfaces", "cluster", clusterArn, "error", err)
|
||||
// Continue processing without ENI public IPs
|
||||
}
|
||||
}
|
||||
|
||||
for _, task := range tasks {
|
||||
var ipAddress, subnetID, publicIP string
|
||||
var networkMode string
|
||||
var ec2InstanceID, ec2InstanceType, ec2InstancePrivateIP, ec2InstancePublicIP string
|
||||
|
||||
// Try to get IP from ENI attachment (awsvpc mode)
|
||||
var eniAttachment *types.Attachment
|
||||
for _, attachment := range task.Attachments {
|
||||
if attachment.Type != nil && *attachment.Type == "ElasticNetworkInterface" {
|
||||
|
|
@ -563,19 +756,65 @@ func (d *ECSDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
|
|||
break
|
||||
}
|
||||
}
|
||||
if eniAttachment == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
var ipAddress, subnetID string
|
||||
for _, detail := range eniAttachment.Details {
|
||||
switch *detail.Name {
|
||||
case "privateIPv4Address":
|
||||
ipAddress = *detail.Value
|
||||
case "subnetId":
|
||||
subnetID = *detail.Value
|
||||
if eniAttachment != nil {
|
||||
// awsvpc networking mode - get IP from ENI
|
||||
networkMode = "awsvpc"
|
||||
for _, detail := range eniAttachment.Details {
|
||||
switch *detail.Name {
|
||||
case "privateIPv4Address":
|
||||
ipAddress = *detail.Value
|
||||
case "subnetId":
|
||||
subnetID = *detail.Value
|
||||
}
|
||||
}
|
||||
// Get public IP from ENI if available
|
||||
if eniID, ok := taskToENI[*task.TaskArn]; ok {
|
||||
if eniPublicIP, ok := eniToPublicIP[eniID]; ok {
|
||||
publicIP = eniPublicIP
|
||||
}
|
||||
}
|
||||
} else if task.ContainerInstanceArn != nil {
|
||||
// bridge/host networking mode - need to get EC2 instance IP and subnet
|
||||
networkMode = "bridge"
|
||||
containerInstARN, ok := taskToContainerInstance[*task.TaskArn]
|
||||
if ok {
|
||||
ec2InstanceID, ok = containerInstToEC2[containerInstARN]
|
||||
if ok {
|
||||
info, ok := ec2InstInfo[ec2InstanceID]
|
||||
if ok {
|
||||
ipAddress = info.privateIP
|
||||
publicIP = info.publicIP
|
||||
subnetID = info.subnetID
|
||||
ec2InstanceType = info.instanceType
|
||||
ec2InstancePrivateIP = info.privateIP
|
||||
ec2InstancePublicIP = info.publicIP
|
||||
} else {
|
||||
d.logger.Debug("EC2 instance info not found", "instance", ec2InstanceID, "task", *task.TaskArn)
|
||||
}
|
||||
} else {
|
||||
d.logger.Debug("Container instance not found in map", "arn", containerInstARN, "task", *task.TaskArn)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get EC2 instance metadata for awsvpc tasks running on EC2
|
||||
// We want the instance type and the host IPs for advanced use cases
|
||||
if networkMode == "awsvpc" && task.ContainerInstanceArn != nil {
|
||||
containerInstARN, ok := taskToContainerInstance[*task.TaskArn]
|
||||
if ok {
|
||||
ec2InstanceID, ok = containerInstToEC2[containerInstARN]
|
||||
if ok {
|
||||
info, ok := ec2InstInfo[ec2InstanceID]
|
||||
if ok {
|
||||
ec2InstanceType = info.instanceType
|
||||
ec2InstancePrivateIP = info.privateIP
|
||||
ec2InstancePublicIP = info.publicIP
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ipAddress == "" {
|
||||
continue
|
||||
}
|
||||
|
|
@ -589,13 +828,38 @@ func (d *ECSDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
|
|||
ecsLabelTaskARN: model.LabelValue(*task.TaskArn),
|
||||
ecsLabelTaskDefinition: model.LabelValue(*task.TaskDefinitionArn),
|
||||
ecsLabelIPAddress: model.LabelValue(ipAddress),
|
||||
ecsLabelSubnetID: model.LabelValue(subnetID),
|
||||
ecsLabelRegion: model.LabelValue(d.cfg.Region),
|
||||
ecsLabelLaunchType: model.LabelValue(task.LaunchType),
|
||||
ecsLabelAvailabilityZone: model.LabelValue(*task.AvailabilityZone),
|
||||
ecsLabelDesiredStatus: model.LabelValue(*task.DesiredStatus),
|
||||
ecsLabelLastStatus: model.LabelValue(*task.LastStatus),
|
||||
ecsLabelHealthStatus: model.LabelValue(task.HealthStatus),
|
||||
ecsLabelNetworkMode: model.LabelValue(networkMode),
|
||||
}
|
||||
|
||||
// Add subnet ID when available (awsvpc mode from ENI, bridge/host from EC2 instance)
|
||||
if subnetID != "" {
|
||||
labels[ecsLabelSubnetID] = model.LabelValue(subnetID)
|
||||
}
|
||||
|
||||
// Add container instance and EC2 instance info for EC2 launch type
|
||||
if task.ContainerInstanceArn != nil {
|
||||
labels[ecsLabelContainerInstanceARN] = model.LabelValue(*task.ContainerInstanceArn)
|
||||
}
|
||||
if ec2InstanceID != "" {
|
||||
labels[ecsLabelEC2InstanceID] = model.LabelValue(ec2InstanceID)
|
||||
}
|
||||
if ec2InstanceType != "" {
|
||||
labels[ecsLabelEC2InstanceType] = model.LabelValue(ec2InstanceType)
|
||||
}
|
||||
if ec2InstancePrivateIP != "" {
|
||||
labels[ecsLabelEC2InstancePrivateIP] = model.LabelValue(ec2InstancePrivateIP)
|
||||
}
|
||||
if ec2InstancePublicIP != "" {
|
||||
labels[ecsLabelEC2InstancePublicIP] = model.LabelValue(ec2InstancePublicIP)
|
||||
}
|
||||
if publicIP != "" {
|
||||
labels[ecsLabelPublicIP] = model.LabelValue(publicIP)
|
||||
}
|
||||
|
||||
if task.PlatformFamily != nil {
|
||||
|
|
@ -634,6 +898,15 @@ func (d *ECSDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
|
|||
}
|
||||
}
|
||||
|
||||
// Add EC2 instance tags (if running on EC2)
|
||||
if ec2InstanceID != "" {
|
||||
if info, ok := ec2InstInfo[ec2InstanceID]; ok {
|
||||
for tagKey, tagValue := range info.tags {
|
||||
labels[model.LabelName(ecsLabelTagEC2+strutil.SanitizeLabelName(tagKey))] = model.LabelValue(tagValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
serviceTargets = append(serviceTargets, labels)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -17,6 +17,8 @@ import (
|
|||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/ec2"
|
||||
ec2Types "github.com/aws/aws-sdk-go-v2/service/ec2/types"
|
||||
"github.com/aws/aws-sdk-go-v2/service/ecs"
|
||||
ecsTypes "github.com/aws/aws-sdk-go-v2/service/ecs/types"
|
||||
"github.com/prometheus/common/model"
|
||||
|
|
@ -29,9 +31,12 @@ import (
|
|||
type ecsDataStore struct {
|
||||
region string
|
||||
|
||||
clusters []ecsTypes.Cluster
|
||||
services []ecsTypes.Service
|
||||
tasks []ecsTypes.Task
|
||||
clusters []ecsTypes.Cluster
|
||||
services []ecsTypes.Service
|
||||
tasks []ecsTypes.Task
|
||||
containerInstances []ecsTypes.ContainerInstance
|
||||
ec2Instances map[string]ec2InstanceInfo // EC2 instance ID to instance info
|
||||
eniPublicIPs map[string]string // ENI ID to public IP
|
||||
}
|
||||
|
||||
func TestECSDiscoveryListClusterARNs(t *testing.T) {
|
||||
|
|
@ -716,6 +721,7 @@ func TestECSDiscoveryRefresh(t *testing.T) {
|
|||
Details: []ecsTypes.KeyValuePair{
|
||||
{Name: strptr("subnetId"), Value: strptr("subnet-12345")},
|
||||
{Name: strptr("privateIPv4Address"), Value: strptr("10.0.1.100")},
|
||||
{Name: strptr("networkInterfaceId"), Value: strptr("eni-fargate-123")},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -724,6 +730,9 @@ func TestECSDiscoveryRefresh(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
eniPublicIPs: map[string]string{
|
||||
"eni-fargate-123": "52.1.2.3",
|
||||
},
|
||||
},
|
||||
expected: []*targetgroup.Group{
|
||||
{
|
||||
|
|
@ -749,6 +758,8 @@ func TestECSDiscoveryRefresh(t *testing.T) {
|
|||
"__meta_ecs_health_status": model.LabelValue("HEALTHY"),
|
||||
"__meta_ecs_platform_family": model.LabelValue("Linux"),
|
||||
"__meta_ecs_platform_version": model.LabelValue("1.4.0"),
|
||||
"__meta_ecs_network_mode": model.LabelValue("awsvpc"),
|
||||
"__meta_ecs_public_ip": model.LabelValue("52.1.2.3"),
|
||||
"__meta_ecs_tag_cluster_Environment": model.LabelValue("test"),
|
||||
"__meta_ecs_tag_service_App": model.LabelValue("web"),
|
||||
"__meta_ecs_tag_task_Version": model.LabelValue("v1.0"),
|
||||
|
|
@ -825,14 +836,345 @@ func TestECSDiscoveryRefresh(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "TaskWithBridgeNetworking",
|
||||
ecsData: &ecsDataStore{
|
||||
region: "us-west-2",
|
||||
clusters: []ecsTypes.Cluster{
|
||||
{
|
||||
ClusterName: strptr("test-cluster"),
|
||||
ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
|
||||
Status: strptr("ACTIVE"),
|
||||
},
|
||||
},
|
||||
services: []ecsTypes.Service{
|
||||
{
|
||||
ServiceName: strptr("bridge-service"),
|
||||
ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/bridge-service"),
|
||||
ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
|
||||
Status: strptr("ACTIVE"),
|
||||
},
|
||||
},
|
||||
tasks: []ecsTypes.Task{
|
||||
{
|
||||
TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-bridge"),
|
||||
ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
|
||||
TaskDefinitionArn: strptr("arn:aws:ecs:us-west-2:123456789012:task-definition/bridge-task:1"),
|
||||
Group: strptr("service:bridge-service"),
|
||||
LaunchType: ecsTypes.LaunchTypeEc2,
|
||||
LastStatus: strptr("RUNNING"),
|
||||
DesiredStatus: strptr("RUNNING"),
|
||||
HealthStatus: ecsTypes.HealthStatusHealthy,
|
||||
AvailabilityZone: strptr("us-west-2a"),
|
||||
ContainerInstanceArn: strptr("arn:aws:ecs:us-west-2:123456789012:container-instance/test-cluster/abc123"),
|
||||
Attachments: []ecsTypes.Attachment{},
|
||||
},
|
||||
},
|
||||
containerInstances: []ecsTypes.ContainerInstance{
|
||||
{
|
||||
ContainerInstanceArn: strptr("arn:aws:ecs:us-west-2:123456789012:container-instance/test-cluster/abc123"),
|
||||
Ec2InstanceId: strptr("i-1234567890abcdef0"),
|
||||
Status: strptr("ACTIVE"),
|
||||
},
|
||||
},
|
||||
ec2Instances: map[string]ec2InstanceInfo{
|
||||
"i-1234567890abcdef0": {
|
||||
privateIP: "10.0.1.50",
|
||||
publicIP: "54.1.2.3",
|
||||
subnetID: "subnet-bridge-1",
|
||||
instanceType: "t3.medium",
|
||||
tags: map[string]string{
|
||||
"Name": "ecs-host-1",
|
||||
"Environment": "production",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []*targetgroup.Group{
|
||||
{
|
||||
Source: "us-west-2",
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
model.AddressLabel: model.LabelValue("10.0.1.50:80"),
|
||||
"__meta_ecs_cluster": model.LabelValue("test-cluster"),
|
||||
"__meta_ecs_cluster_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:cluster/test-cluster"),
|
||||
"__meta_ecs_service": model.LabelValue("bridge-service"),
|
||||
"__meta_ecs_service_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:service/test-cluster/bridge-service"),
|
||||
"__meta_ecs_service_status": model.LabelValue("ACTIVE"),
|
||||
"__meta_ecs_task_group": model.LabelValue("service:bridge-service"),
|
||||
"__meta_ecs_task_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:task/test-cluster/task-bridge"),
|
||||
"__meta_ecs_task_definition": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:task-definition/bridge-task:1"),
|
||||
"__meta_ecs_region": model.LabelValue("us-west-2"),
|
||||
"__meta_ecs_availability_zone": model.LabelValue("us-west-2a"),
|
||||
"__meta_ecs_ip_address": model.LabelValue("10.0.1.50"),
|
||||
"__meta_ecs_subnet_id": model.LabelValue("subnet-bridge-1"),
|
||||
"__meta_ecs_launch_type": model.LabelValue("EC2"),
|
||||
"__meta_ecs_desired_status": model.LabelValue("RUNNING"),
|
||||
"__meta_ecs_last_status": model.LabelValue("RUNNING"),
|
||||
"__meta_ecs_health_status": model.LabelValue("HEALTHY"),
|
||||
"__meta_ecs_network_mode": model.LabelValue("bridge"),
|
||||
"__meta_ecs_container_instance_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:container-instance/test-cluster/abc123"),
|
||||
"__meta_ecs_ec2_instance_id": model.LabelValue("i-1234567890abcdef0"),
|
||||
"__meta_ecs_ec2_instance_type": model.LabelValue("t3.medium"),
|
||||
"__meta_ecs_ec2_instance_private_ip": model.LabelValue("10.0.1.50"),
|
||||
"__meta_ecs_ec2_instance_public_ip": model.LabelValue("54.1.2.3"),
|
||||
"__meta_ecs_public_ip": model.LabelValue("54.1.2.3"),
|
||||
"__meta_ecs_tag_ec2_Name": model.LabelValue("ecs-host-1"),
|
||||
"__meta_ecs_tag_ec2_Environment": model.LabelValue("production"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "MixedNetworkingModes",
|
||||
ecsData: &ecsDataStore{
|
||||
region: "us-west-2",
|
||||
clusters: []ecsTypes.Cluster{
|
||||
{
|
||||
ClusterName: strptr("mixed-cluster"),
|
||||
ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/mixed-cluster"),
|
||||
Status: strptr("ACTIVE"),
|
||||
},
|
||||
},
|
||||
services: []ecsTypes.Service{
|
||||
{
|
||||
ServiceName: strptr("mixed-service"),
|
||||
ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/mixed-cluster/mixed-service"),
|
||||
ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/mixed-cluster"),
|
||||
Status: strptr("ACTIVE"),
|
||||
},
|
||||
},
|
||||
tasks: []ecsTypes.Task{
|
||||
{
|
||||
TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/mixed-cluster/task-awsvpc"),
|
||||
ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/mixed-cluster"),
|
||||
TaskDefinitionArn: strptr("arn:aws:ecs:us-west-2:123456789012:task-definition/awsvpc-task:1"),
|
||||
Group: strptr("service:mixed-service"),
|
||||
LaunchType: ecsTypes.LaunchTypeFargate,
|
||||
LastStatus: strptr("RUNNING"),
|
||||
DesiredStatus: strptr("RUNNING"),
|
||||
HealthStatus: ecsTypes.HealthStatusHealthy,
|
||||
AvailabilityZone: strptr("us-west-2a"),
|
||||
Attachments: []ecsTypes.Attachment{
|
||||
{
|
||||
Type: strptr("ElasticNetworkInterface"),
|
||||
Details: []ecsTypes.KeyValuePair{
|
||||
{Name: strptr("subnetId"), Value: strptr("subnet-12345")},
|
||||
{Name: strptr("privateIPv4Address"), Value: strptr("10.0.2.100")},
|
||||
{Name: strptr("networkInterfaceId"), Value: strptr("eni-mixed-awsvpc")},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/mixed-cluster/task-bridge"),
|
||||
ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/mixed-cluster"),
|
||||
TaskDefinitionArn: strptr("arn:aws:ecs:us-west-2:123456789012:task-definition/bridge-task:1"),
|
||||
Group: strptr("service:mixed-service"),
|
||||
LaunchType: ecsTypes.LaunchTypeEc2,
|
||||
LastStatus: strptr("RUNNING"),
|
||||
DesiredStatus: strptr("RUNNING"),
|
||||
HealthStatus: ecsTypes.HealthStatusHealthy,
|
||||
AvailabilityZone: strptr("us-west-2b"),
|
||||
ContainerInstanceArn: strptr("arn:aws:ecs:us-west-2:123456789012:container-instance/mixed-cluster/xyz789"),
|
||||
Attachments: []ecsTypes.Attachment{},
|
||||
},
|
||||
},
|
||||
containerInstances: []ecsTypes.ContainerInstance{
|
||||
{
|
||||
ContainerInstanceArn: strptr("arn:aws:ecs:us-west-2:123456789012:container-instance/mixed-cluster/xyz789"),
|
||||
Ec2InstanceId: strptr("i-0987654321fedcba0"),
|
||||
Status: strptr("ACTIVE"),
|
||||
},
|
||||
},
|
||||
ec2Instances: map[string]ec2InstanceInfo{
|
||||
"i-0987654321fedcba0": {
|
||||
privateIP: "10.0.1.75",
|
||||
publicIP: "54.2.3.4",
|
||||
subnetID: "subnet-bridge-2",
|
||||
instanceType: "t3.large",
|
||||
tags: map[string]string{
|
||||
"Name": "mixed-host",
|
||||
"Team": "platform",
|
||||
},
|
||||
},
|
||||
},
|
||||
eniPublicIPs: map[string]string{
|
||||
"eni-mixed-awsvpc": "52.2.3.4",
|
||||
},
|
||||
},
|
||||
expected: []*targetgroup.Group{
|
||||
{
|
||||
Source: "us-west-2",
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
model.AddressLabel: model.LabelValue("10.0.2.100:80"),
|
||||
"__meta_ecs_cluster": model.LabelValue("mixed-cluster"),
|
||||
"__meta_ecs_cluster_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:cluster/mixed-cluster"),
|
||||
"__meta_ecs_service": model.LabelValue("mixed-service"),
|
||||
"__meta_ecs_service_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:service/mixed-cluster/mixed-service"),
|
||||
"__meta_ecs_service_status": model.LabelValue("ACTIVE"),
|
||||
"__meta_ecs_task_group": model.LabelValue("service:mixed-service"),
|
||||
"__meta_ecs_task_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:task/mixed-cluster/task-awsvpc"),
|
||||
"__meta_ecs_task_definition": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:task-definition/awsvpc-task:1"),
|
||||
"__meta_ecs_region": model.LabelValue("us-west-2"),
|
||||
"__meta_ecs_availability_zone": model.LabelValue("us-west-2a"),
|
||||
"__meta_ecs_ip_address": model.LabelValue("10.0.2.100"),
|
||||
"__meta_ecs_subnet_id": model.LabelValue("subnet-12345"),
|
||||
"__meta_ecs_launch_type": model.LabelValue("FARGATE"),
|
||||
"__meta_ecs_desired_status": model.LabelValue("RUNNING"),
|
||||
"__meta_ecs_last_status": model.LabelValue("RUNNING"),
|
||||
"__meta_ecs_health_status": model.LabelValue("HEALTHY"),
|
||||
"__meta_ecs_network_mode": model.LabelValue("awsvpc"),
|
||||
"__meta_ecs_public_ip": model.LabelValue("52.2.3.4"),
|
||||
},
|
||||
{
|
||||
model.AddressLabel: model.LabelValue("10.0.1.75:80"),
|
||||
"__meta_ecs_cluster": model.LabelValue("mixed-cluster"),
|
||||
"__meta_ecs_cluster_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:cluster/mixed-cluster"),
|
||||
"__meta_ecs_service": model.LabelValue("mixed-service"),
|
||||
"__meta_ecs_service_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:service/mixed-cluster/mixed-service"),
|
||||
"__meta_ecs_service_status": model.LabelValue("ACTIVE"),
|
||||
"__meta_ecs_task_group": model.LabelValue("service:mixed-service"),
|
||||
"__meta_ecs_task_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:task/mixed-cluster/task-bridge"),
|
||||
"__meta_ecs_task_definition": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:task-definition/bridge-task:1"),
|
||||
"__meta_ecs_region": model.LabelValue("us-west-2"),
|
||||
"__meta_ecs_availability_zone": model.LabelValue("us-west-2b"),
|
||||
"__meta_ecs_ip_address": model.LabelValue("10.0.1.75"),
|
||||
"__meta_ecs_subnet_id": model.LabelValue("subnet-bridge-2"),
|
||||
"__meta_ecs_launch_type": model.LabelValue("EC2"),
|
||||
"__meta_ecs_desired_status": model.LabelValue("RUNNING"),
|
||||
"__meta_ecs_last_status": model.LabelValue("RUNNING"),
|
||||
"__meta_ecs_health_status": model.LabelValue("HEALTHY"),
|
||||
"__meta_ecs_network_mode": model.LabelValue("bridge"),
|
||||
"__meta_ecs_container_instance_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:container-instance/mixed-cluster/xyz789"),
|
||||
"__meta_ecs_ec2_instance_id": model.LabelValue("i-0987654321fedcba0"),
|
||||
"__meta_ecs_ec2_instance_type": model.LabelValue("t3.large"),
|
||||
"__meta_ecs_ec2_instance_private_ip": model.LabelValue("10.0.1.75"),
|
||||
"__meta_ecs_ec2_instance_public_ip": model.LabelValue("54.2.3.4"),
|
||||
"__meta_ecs_public_ip": model.LabelValue("54.2.3.4"),
|
||||
"__meta_ecs_tag_ec2_Name": model.LabelValue("mixed-host"),
|
||||
"__meta_ecs_tag_ec2_Team": model.LabelValue("platform"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "EC2WithAwsvpcNetworking",
|
||||
ecsData: &ecsDataStore{
|
||||
region: "us-west-2",
|
||||
clusters: []ecsTypes.Cluster{
|
||||
{
|
||||
ClusterName: strptr("ec2-awsvpc-cluster"),
|
||||
ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/ec2-awsvpc-cluster"),
|
||||
Status: strptr("ACTIVE"),
|
||||
},
|
||||
},
|
||||
services: []ecsTypes.Service{
|
||||
{
|
||||
ServiceName: strptr("ec2-awsvpc-service"),
|
||||
ServiceArn: strptr("arn:aws:ecs:us-west-2:123456789012:service/ec2-awsvpc-cluster/ec2-awsvpc-service"),
|
||||
ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/ec2-awsvpc-cluster"),
|
||||
Status: strptr("ACTIVE"),
|
||||
},
|
||||
},
|
||||
tasks: []ecsTypes.Task{
|
||||
{
|
||||
TaskArn: strptr("arn:aws:ecs:us-west-2:123456789012:task/ec2-awsvpc-cluster/task-ec2-awsvpc"),
|
||||
ClusterArn: strptr("arn:aws:ecs:us-west-2:123456789012:cluster/ec2-awsvpc-cluster"),
|
||||
TaskDefinitionArn: strptr("arn:aws:ecs:us-west-2:123456789012:task-definition/ec2-awsvpc-task:1"),
|
||||
Group: strptr("service:ec2-awsvpc-service"),
|
||||
LaunchType: ecsTypes.LaunchTypeEc2,
|
||||
LastStatus: strptr("RUNNING"),
|
||||
DesiredStatus: strptr("RUNNING"),
|
||||
HealthStatus: ecsTypes.HealthStatusHealthy,
|
||||
AvailabilityZone: strptr("us-west-2c"),
|
||||
ContainerInstanceArn: strptr("arn:aws:ecs:us-west-2:123456789012:container-instance/ec2-awsvpc-cluster/def456"),
|
||||
// Has BOTH ENI attachment AND container instance ARN - should use ENI
|
||||
Attachments: []ecsTypes.Attachment{
|
||||
{
|
||||
Type: strptr("ElasticNetworkInterface"),
|
||||
Details: []ecsTypes.KeyValuePair{
|
||||
{Name: strptr("subnetId"), Value: strptr("subnet-99999")},
|
||||
{Name: strptr("privateIPv4Address"), Value: strptr("10.0.3.200")},
|
||||
{Name: strptr("networkInterfaceId"), Value: strptr("eni-ec2-awsvpc")},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
eniPublicIPs: map[string]string{
|
||||
"eni-ec2-awsvpc": "52.3.4.5",
|
||||
},
|
||||
// Container instance data - IP should NOT be used, but instance type SHOULD be used
|
||||
containerInstances: []ecsTypes.ContainerInstance{
|
||||
{
|
||||
ContainerInstanceArn: strptr("arn:aws:ecs:us-west-2:123456789012:container-instance/ec2-awsvpc-cluster/def456"),
|
||||
Ec2InstanceId: strptr("i-ec2awsvpcinstance"),
|
||||
Status: strptr("ACTIVE"),
|
||||
},
|
||||
},
|
||||
ec2Instances: map[string]ec2InstanceInfo{
|
||||
"i-ec2awsvpcinstance": {
|
||||
privateIP: "10.0.9.99", // This IP should NOT be used (ENI IP is used instead)
|
||||
publicIP: "54.3.4.5", // This public IP SHOULD be exposed
|
||||
subnetID: "subnet-wrong", // This subnet should NOT be used (ENI subnet is used instead)
|
||||
instanceType: "c5.2xlarge", // This instance type SHOULD be used
|
||||
tags: map[string]string{
|
||||
"Name": "ec2-awsvpc-host",
|
||||
"Owner": "team-a",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []*targetgroup.Group{
|
||||
{
|
||||
Source: "us-west-2",
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
model.AddressLabel: model.LabelValue("10.0.3.200:80"),
|
||||
"__meta_ecs_cluster": model.LabelValue("ec2-awsvpc-cluster"),
|
||||
"__meta_ecs_cluster_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:cluster/ec2-awsvpc-cluster"),
|
||||
"__meta_ecs_service": model.LabelValue("ec2-awsvpc-service"),
|
||||
"__meta_ecs_service_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:service/ec2-awsvpc-cluster/ec2-awsvpc-service"),
|
||||
"__meta_ecs_service_status": model.LabelValue("ACTIVE"),
|
||||
"__meta_ecs_task_group": model.LabelValue("service:ec2-awsvpc-service"),
|
||||
"__meta_ecs_task_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:task/ec2-awsvpc-cluster/task-ec2-awsvpc"),
|
||||
"__meta_ecs_task_definition": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:task-definition/ec2-awsvpc-task:1"),
|
||||
"__meta_ecs_region": model.LabelValue("us-west-2"),
|
||||
"__meta_ecs_availability_zone": model.LabelValue("us-west-2c"),
|
||||
"__meta_ecs_ip_address": model.LabelValue("10.0.3.200"),
|
||||
"__meta_ecs_subnet_id": model.LabelValue("subnet-99999"),
|
||||
"__meta_ecs_launch_type": model.LabelValue("EC2"),
|
||||
"__meta_ecs_desired_status": model.LabelValue("RUNNING"),
|
||||
"__meta_ecs_last_status": model.LabelValue("RUNNING"),
|
||||
"__meta_ecs_health_status": model.LabelValue("HEALTHY"),
|
||||
"__meta_ecs_network_mode": model.LabelValue("awsvpc"),
|
||||
"__meta_ecs_container_instance_arn": model.LabelValue("arn:aws:ecs:us-west-2:123456789012:container-instance/ec2-awsvpc-cluster/def456"),
|
||||
"__meta_ecs_ec2_instance_id": model.LabelValue("i-ec2awsvpcinstance"),
|
||||
"__meta_ecs_ec2_instance_type": model.LabelValue("c5.2xlarge"),
|
||||
"__meta_ecs_ec2_instance_private_ip": model.LabelValue("10.0.9.99"),
|
||||
"__meta_ecs_ec2_instance_public_ip": model.LabelValue("54.3.4.5"),
|
||||
"__meta_ecs_public_ip": model.LabelValue("52.3.4.5"),
|
||||
"__meta_ecs_tag_ec2_Name": model.LabelValue("ec2-awsvpc-host"),
|
||||
"__meta_ecs_tag_ec2_Owner": model.LabelValue("team-a"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
client := newMockECSClient(tt.ecsData)
|
||||
ecsClient := newMockECSClient(tt.ecsData)
|
||||
ec2Client := newMockECSEC2Client(tt.ecsData.ec2Instances, tt.ecsData.eniPublicIPs)
|
||||
|
||||
d := &ECSDiscovery{
|
||||
ecs: client,
|
||||
ecs: ecsClient,
|
||||
ec2: ec2Client,
|
||||
cfg: &ECSSDConfig{
|
||||
Region: tt.ecsData.region,
|
||||
Port: 80,
|
||||
|
|
@ -951,3 +1293,91 @@ func (m *mockECSClient) DescribeTasks(_ context.Context, input *ecs.DescribeTask
|
|||
Tasks: tasks,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *mockECSClient) DescribeContainerInstances(_ context.Context, input *ecs.DescribeContainerInstancesInput, _ ...func(*ecs.Options)) (*ecs.DescribeContainerInstancesOutput, error) {
|
||||
var containerInstances []ecsTypes.ContainerInstance
|
||||
for _, ciArn := range input.ContainerInstances {
|
||||
for _, ci := range m.ecsData.containerInstances {
|
||||
if *ci.ContainerInstanceArn == ciArn {
|
||||
containerInstances = append(containerInstances, ci)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &ecs.DescribeContainerInstancesOutput{
|
||||
ContainerInstances: containerInstances,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Mock EC2 client wrapper for ECS tests.
|
||||
type mockECSEC2Client struct {
|
||||
ec2Instances map[string]ec2InstanceInfo
|
||||
eniPublicIPs map[string]string
|
||||
}
|
||||
|
||||
func newMockECSEC2Client(ec2Instances map[string]ec2InstanceInfo, eniPublicIPs map[string]string) *mockECSEC2Client {
|
||||
return &mockECSEC2Client{
|
||||
ec2Instances: ec2Instances,
|
||||
eniPublicIPs: eniPublicIPs,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *mockECSEC2Client) DescribeInstances(_ context.Context, input *ec2.DescribeInstancesInput, _ ...func(*ec2.Options)) (*ec2.DescribeInstancesOutput, error) {
|
||||
var reservations []ec2Types.Reservation
|
||||
|
||||
for _, instanceID := range input.InstanceIds {
|
||||
if info, ok := m.ec2Instances[instanceID]; ok {
|
||||
instance := ec2Types.Instance{
|
||||
InstanceId: &instanceID,
|
||||
PrivateIpAddress: &info.privateIP,
|
||||
}
|
||||
if info.publicIP != "" {
|
||||
instance.PublicIpAddress = &info.publicIP
|
||||
}
|
||||
if info.subnetID != "" {
|
||||
instance.SubnetId = &info.subnetID
|
||||
}
|
||||
if info.instanceType != "" {
|
||||
instance.InstanceType = ec2Types.InstanceType(info.instanceType)
|
||||
}
|
||||
// Add tags
|
||||
for tagKey, tagValue := range info.tags {
|
||||
instance.Tags = append(instance.Tags, ec2Types.Tag{
|
||||
Key: &tagKey,
|
||||
Value: &tagValue,
|
||||
})
|
||||
}
|
||||
reservation := ec2Types.Reservation{
|
||||
Instances: []ec2Types.Instance{instance},
|
||||
}
|
||||
reservations = append(reservations, reservation)
|
||||
}
|
||||
}
|
||||
|
||||
return &ec2.DescribeInstancesOutput{
|
||||
Reservations: reservations,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *mockECSEC2Client) DescribeNetworkInterfaces(_ context.Context, input *ec2.DescribeNetworkInterfacesInput, _ ...func(*ec2.Options)) (*ec2.DescribeNetworkInterfacesOutput, error) {
|
||||
var networkInterfaces []ec2Types.NetworkInterface
|
||||
|
||||
for _, eniID := range input.NetworkInterfaceIds {
|
||||
if publicIP, ok := m.eniPublicIPs[eniID]; ok {
|
||||
eni := ec2Types.NetworkInterface{
|
||||
NetworkInterfaceId: &eniID,
|
||||
}
|
||||
if publicIP != "" {
|
||||
eni.Association = &ec2Types.NetworkInterfaceAssociation{
|
||||
PublicIp: &publicIP,
|
||||
}
|
||||
}
|
||||
networkInterfaces = append(networkInterfaces, eni)
|
||||
}
|
||||
}
|
||||
|
||||
return &ec2.DescribeNetworkInterfacesOutput{
|
||||
NetworkInterfaces: networkInterfaces,
|
||||
}, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2021 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2020 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2020 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2020 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2020 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2024 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2016 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2019 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2020 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2020 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2020 The Prometheus Authors
|
||||
// Copyright The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue