From 1e317d00987888807ba8998f0d0c452ac5eda463 Mon Sep 17 00:00:00 2001 From: Arthur Silva Sens Date: Tue, 6 Jan 2026 09:00:49 -0300 Subject: [PATCH] Add configuration option to control `extra-scrape-metrics` (#17606) --- cmd/prometheus/main.go | 6 +- cmd/prometheus/testdata/features.json | 2 +- config/config.go | 31 +++- config/config_test.go | 154 +++++++++++++++++- ...obal_disable_extra_scrape_metrics.good.yml | 6 + ...lobal_enable_extra_scrape_metrics.good.yml | 6 + ...ocal_disable_extra_scrape_metrics.good.yml | 7 + ...local_enable_extra_scrape_metrics.good.yml | 7 + docs/configuration/configuration.md | 12 ++ docs/feature_flags.md | 2 + scrape/manager.go | 4 +- scrape/scrape.go | 2 +- 12 files changed, 226 insertions(+), 13 deletions(-) create mode 100644 config/testdata/global_disable_extra_scrape_metrics.good.yml create mode 100644 config/testdata/global_enable_extra_scrape_metrics.good.yml create mode 100644 config/testdata/local_disable_extra_scrape_metrics.good.yml create mode 100644 config/testdata/local_enable_extra_scrape_metrics.good.yml diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index c330671b1e..ee60e58b2e 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -233,8 +233,10 @@ func (c *flagConfig) setFeatureListOptions(logger *slog.Logger) error { c.tsdb.EnableMemorySnapshotOnShutdown = true logger.Info("Experimental memory snapshot on shutdown enabled") case "extra-scrape-metrics": - c.scrape.ExtraMetrics = true - logger.Info("Experimental additional scrape metrics enabled") + t := true + config.DefaultConfig.GlobalConfig.ExtraScrapeMetrics = &t + config.DefaultGlobalConfig.ExtraScrapeMetrics = &t + logger.Warn("This option for --enable-feature is being phased out. It currently changes the default for the extra_scrape_metrics config setting to true, but will become a no-op in a future version. Stop using this option and set extra_scrape_metrics in the config instead.", "option", o) case "metadata-wal-records": c.scrape.AppendMetadata = true c.web.AppendMetadata = true diff --git a/cmd/prometheus/testdata/features.json b/cmd/prometheus/testdata/features.json index fbffd941fd..145bb04d77 100644 --- a/cmd/prometheus/testdata/features.json +++ b/cmd/prometheus/testdata/features.json @@ -166,7 +166,7 @@ "query_offset": true }, "scrape": { - "extra_scrape_metrics": false, + "extra_scrape_metrics": true, "start_timestamp_zero_ingestion": false, "type_and_unit_labels": false }, diff --git a/config/config.go b/config/config.go index cce8fc4168..0b9b059ab2 100644 --- a/config/config.go +++ b/config/config.go @@ -149,6 +149,10 @@ func LoadFile(filename string, agentMode bool, logger *slog.Logger) (*Config, er return cfg, nil } +func boolPtr(b bool) *bool { + return &b +} + // The defaults applied before parsing the respective config sections. var ( // DefaultConfig is the default top-level configuration. @@ -158,7 +162,6 @@ var ( OTLPConfig: DefaultOTLPConfig, } - f bool // DefaultGlobalConfig is the default global configuration. DefaultGlobalConfig = GlobalConfig{ ScrapeInterval: model.Duration(1 * time.Minute), @@ -173,9 +176,10 @@ var ( ScrapeProtocols: nil, // When the native histogram feature flag is enabled, // ScrapeNativeHistograms default changes to true. - ScrapeNativeHistograms: &f, + ScrapeNativeHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: false, AlwaysScrapeClassicHistograms: false, + ExtraScrapeMetrics: boolPtr(false), MetricNameValidationScheme: model.UTF8Validation, MetricNameEscapingScheme: model.AllowUTF8, } @@ -513,6 +517,10 @@ type GlobalConfig struct { ConvertClassicHistogramsToNHCB bool `yaml:"convert_classic_histograms_to_nhcb,omitempty"` // Whether to scrape a classic histogram, even if it is also exposed as a native histogram. AlwaysScrapeClassicHistograms bool `yaml:"always_scrape_classic_histograms,omitempty"` + // Whether to enable additional scrape metrics. + // When enabled, Prometheus stores samples for scrape_timeout_seconds, + // scrape_sample_limit, and scrape_body_size_bytes. + ExtraScrapeMetrics *bool `yaml:"extra_scrape_metrics,omitempty"` } // ScrapeProtocol represents supported protocol for scraping metrics. @@ -652,6 +660,9 @@ func (c *GlobalConfig) UnmarshalYAML(unmarshal func(any) error) error { if gc.ScrapeNativeHistograms == nil { gc.ScrapeNativeHistograms = DefaultGlobalConfig.ScrapeNativeHistograms } + if gc.ExtraScrapeMetrics == nil { + gc.ExtraScrapeMetrics = DefaultGlobalConfig.ExtraScrapeMetrics + } if gc.ScrapeProtocols == nil { if DefaultGlobalConfig.ScrapeProtocols != nil { // This is the case where the defaults are set due to a feature flag. @@ -696,7 +707,8 @@ func (c *GlobalConfig) isZero() bool { c.LabelValueLengthLimit == 0 && c.KeepDroppedTargets == 0 && c.MetricNameValidationScheme == model.UnsetValidation && - c.MetricNameEscapingScheme == "" + c.MetricNameEscapingScheme == "" && + c.ExtraScrapeMetrics == nil } const DefaultGoGCPercentage = 75 @@ -805,6 +817,11 @@ type ScrapeConfig struct { // blank in config files but must have a value if a ScrapeConfig is created // programmatically. MetricNameEscapingScheme string `yaml:"metric_name_escaping_scheme,omitempty"` + // Whether to enable additional scrape metrics. + // When enabled, Prometheus stores samples for scrape_timeout_seconds, + // scrape_sample_limit, and scrape_body_size_bytes. + // If not set (nil), inherits the value from the global configuration. + ExtraScrapeMetrics *bool `yaml:"extra_scrape_metrics,omitempty"` // We cannot do proper Go type embedding below as the parser will then parse // values arbitrarily into the overflow maps of further-down types. @@ -906,6 +923,9 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error { if c.ScrapeNativeHistograms == nil { c.ScrapeNativeHistograms = globalConfig.ScrapeNativeHistograms } + if c.ExtraScrapeMetrics == nil { + c.ExtraScrapeMetrics = globalConfig.ExtraScrapeMetrics + } if c.ScrapeProtocols == nil { switch { @@ -1054,6 +1074,11 @@ func (c *ScrapeConfig) AlwaysScrapeClassicHistogramsEnabled() bool { return c.AlwaysScrapeClassicHistograms != nil && *c.AlwaysScrapeClassicHistograms } +// ExtraScrapeMetricsEnabled returns whether to enable extra scrape metrics. +func (c *ScrapeConfig) ExtraScrapeMetricsEnabled() bool { + return c.ExtraScrapeMetrics != nil && *c.ExtraScrapeMetrics +} + // StorageConfig configures runtime reloadable configuration options. type StorageConfig struct { TSDBConfig *TSDBConfig `yaml:"tsdb,omitempty"` diff --git a/config/config_test.go b/config/config_test.go index aefdd5248c..08aa0b4f06 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -74,10 +74,6 @@ func mustParseURL(u string) *config.URL { return &config.URL{URL: parsed} } -func boolPtr(b bool) *bool { - return &b -} - const ( globBodySizeLimit = 15 * units.MiB globSampleLimit = 1500 @@ -109,6 +105,7 @@ var expectedConf = &Config{ ScrapeNativeHistograms: boolPtr(false), AlwaysScrapeClassicHistograms: false, ConvertClassicHistogramsToNHCB: false, + ExtraScrapeMetrics: boolPtr(false), MetricNameValidationScheme: model.UTF8Validation, }, @@ -236,6 +233,7 @@ var expectedConf = &Config{ ScrapeNativeHistograms: boolPtr(false), AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), + ExtraScrapeMetrics: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -360,6 +358,7 @@ var expectedConf = &Config{ ScrapeNativeHistograms: boolPtr(false), AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), + ExtraScrapeMetrics: boolPtr(false), HTTPClientConfig: config.HTTPClientConfig{ BasicAuth: &config.BasicAuth{ @@ -470,6 +469,7 @@ var expectedConf = &Config{ ScrapeNativeHistograms: boolPtr(false), AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), + ExtraScrapeMetrics: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -532,6 +532,7 @@ var expectedConf = &Config{ ScrapeNativeHistograms: boolPtr(false), AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), + ExtraScrapeMetrics: boolPtr(false), MetricsPath: "/metrics", Scheme: "http", @@ -571,6 +572,7 @@ var expectedConf = &Config{ ScrapeNativeHistograms: boolPtr(false), AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), + ExtraScrapeMetrics: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -616,6 +618,7 @@ var expectedConf = &Config{ ScrapeNativeHistograms: boolPtr(false), AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), + ExtraScrapeMetrics: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -661,6 +664,7 @@ var expectedConf = &Config{ ScrapeNativeHistograms: boolPtr(false), AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), + ExtraScrapeMetrics: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -696,6 +700,7 @@ var expectedConf = &Config{ ScrapeNativeHistograms: boolPtr(false), AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), + ExtraScrapeMetrics: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -739,6 +744,7 @@ var expectedConf = &Config{ ScrapeNativeHistograms: boolPtr(false), AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), + ExtraScrapeMetrics: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -779,6 +785,7 @@ var expectedConf = &Config{ ScrapeNativeHistograms: boolPtr(false), AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), + ExtraScrapeMetrics: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -826,6 +833,7 @@ var expectedConf = &Config{ ScrapeNativeHistograms: boolPtr(false), AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), + ExtraScrapeMetrics: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -863,6 +871,7 @@ var expectedConf = &Config{ ScrapeNativeHistograms: boolPtr(false), AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), + ExtraScrapeMetrics: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -903,6 +912,7 @@ var expectedConf = &Config{ ScrapeNativeHistograms: boolPtr(false), AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), + ExtraScrapeMetrics: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -936,6 +946,7 @@ var expectedConf = &Config{ ScrapeNativeHistograms: boolPtr(false), AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), + ExtraScrapeMetrics: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -972,6 +983,7 @@ var expectedConf = &Config{ ScrapeNativeHistograms: boolPtr(false), AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), + ExtraScrapeMetrics: boolPtr(false), MetricsPath: "/federate", Scheme: DefaultScrapeConfig.Scheme, @@ -1008,6 +1020,7 @@ var expectedConf = &Config{ ScrapeNativeHistograms: boolPtr(false), AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), + ExtraScrapeMetrics: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1044,6 +1057,7 @@ var expectedConf = &Config{ ScrapeNativeHistograms: boolPtr(false), AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), + ExtraScrapeMetrics: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1077,6 +1091,7 @@ var expectedConf = &Config{ ScrapeNativeHistograms: boolPtr(false), AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), + ExtraScrapeMetrics: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1118,6 +1133,7 @@ var expectedConf = &Config{ ScrapeNativeHistograms: boolPtr(false), AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), + ExtraScrapeMetrics: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1158,6 +1174,7 @@ var expectedConf = &Config{ ScrapeNativeHistograms: boolPtr(false), AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), + ExtraScrapeMetrics: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1195,6 +1212,7 @@ var expectedConf = &Config{ ScrapeNativeHistograms: boolPtr(false), AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), + ExtraScrapeMetrics: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1231,6 +1249,7 @@ var expectedConf = &Config{ ScrapeNativeHistograms: boolPtr(false), AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), + ExtraScrapeMetrics: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1271,6 +1290,7 @@ var expectedConf = &Config{ ScrapeNativeHistograms: boolPtr(false), AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), + ExtraScrapeMetrics: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1314,6 +1334,7 @@ var expectedConf = &Config{ ScrapeNativeHistograms: boolPtr(true), AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), + ExtraScrapeMetrics: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1377,6 +1398,7 @@ var expectedConf = &Config{ ScrapeNativeHistograms: boolPtr(false), AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), + ExtraScrapeMetrics: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1410,6 +1432,7 @@ var expectedConf = &Config{ ScrapeNativeHistograms: boolPtr(false), AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), + ExtraScrapeMetrics: boolPtr(false), HTTPClientConfig: config.DefaultHTTPClientConfig, MetricsPath: DefaultScrapeConfig.MetricsPath, @@ -1454,6 +1477,7 @@ var expectedConf = &Config{ ScrapeNativeHistograms: boolPtr(false), AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), + ExtraScrapeMetrics: boolPtr(false), HTTPClientConfig: config.DefaultHTTPClientConfig, MetricsPath: DefaultScrapeConfig.MetricsPath, @@ -1504,6 +1528,7 @@ var expectedConf = &Config{ ScrapeNativeHistograms: boolPtr(false), AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), + ExtraScrapeMetrics: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1544,6 +1569,7 @@ var expectedConf = &Config{ ScrapeNativeHistograms: boolPtr(false), AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), + ExtraScrapeMetrics: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1585,6 +1611,7 @@ var expectedConf = &Config{ ScrapeNativeHistograms: boolPtr(false), AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), + ExtraScrapeMetrics: boolPtr(false), HTTPClientConfig: config.DefaultHTTPClientConfig, MetricsPath: DefaultScrapeConfig.MetricsPath, @@ -1621,6 +1648,7 @@ var expectedConf = &Config{ ScrapeNativeHistograms: boolPtr(false), AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), + ExtraScrapeMetrics: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1659,6 +1687,7 @@ var expectedConf = &Config{ ScrapeNativeHistograms: boolPtr(false), AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), + ExtraScrapeMetrics: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -2755,6 +2784,7 @@ type ScrapeConfigOptions struct { ScrapeNativeHistograms bool AlwaysScrapeClassicHistograms bool ConvertClassicHistToNHCB bool + ExtraScrapeMetrics bool } func TestGetScrapeConfigs(t *testing.T) { @@ -2788,6 +2818,7 @@ func TestGetScrapeConfigs(t *testing.T) { ScrapeNativeHistograms: boolPtr(opts.ScrapeNativeHistograms), AlwaysScrapeClassicHistograms: boolPtr(opts.AlwaysScrapeClassicHistograms), ConvertClassicHistogramsToNHCB: boolPtr(opts.ConvertClassicHistToNHCB), + ExtraScrapeMetrics: boolPtr(opts.ExtraScrapeMetrics), } if opts.ScrapeProtocols == nil { sc.ScrapeProtocols = DefaultScrapeProtocols @@ -2871,6 +2902,7 @@ func TestGetScrapeConfigs(t *testing.T) { ScrapeNativeHistograms: boolPtr(false), AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), + ExtraScrapeMetrics: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -2909,6 +2941,7 @@ func TestGetScrapeConfigs(t *testing.T) { ScrapeNativeHistograms: boolPtr(false), AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), + ExtraScrapeMetrics: boolPtr(false), HTTPClientConfig: config.HTTPClientConfig{ TLSConfig: config.TLSConfig{ @@ -3021,6 +3054,26 @@ func TestGetScrapeConfigs(t *testing.T) { configFile: "testdata/global_scrape_protocols_and_local_disable_scrape_native_hist.good.yml", expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), ScrapeNativeHistograms: false, ScrapeProtocols: []ScrapeProtocol{PrometheusText0_0_4}})}, }, + { + name: "A global config that enables extra scrape metrics", + configFile: "testdata/global_enable_extra_scrape_metrics.good.yml", + expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), ExtraScrapeMetrics: true})}, + }, + { + name: "A global config that disables extra scrape metrics", + configFile: "testdata/global_disable_extra_scrape_metrics.good.yml", + expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), ExtraScrapeMetrics: false})}, + }, + { + name: "A global config that disables extra scrape metrics and scrape config that enables it", + configFile: "testdata/local_enable_extra_scrape_metrics.good.yml", + expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), ExtraScrapeMetrics: true})}, + }, + { + name: "A global config that enables extra scrape metrics and scrape config that disables it", + configFile: "testdata/local_disable_extra_scrape_metrics.good.yml", + expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), ExtraScrapeMetrics: false})}, + }, } for _, tc := range testCases { @@ -3037,6 +3090,99 @@ func TestGetScrapeConfigs(t *testing.T) { } } +func TestExtraScrapeMetrics(t *testing.T) { + tests := []struct { + name string + config string + expectGlobal *bool + expectEnabled bool + }{ + { + name: "default values (not set)", + config: ` +scrape_configs: + - job_name: test + static_configs: + - targets: ['localhost:9090'] +`, + expectGlobal: boolPtr(false), // inherits from DefaultGlobalConfig + expectEnabled: false, + }, + { + name: "global enabled", + config: ` +global: + extra_scrape_metrics: true +scrape_configs: + - job_name: test + static_configs: + - targets: ['localhost:9090'] +`, + expectGlobal: boolPtr(true), + expectEnabled: true, + }, + { + name: "global disabled", + config: ` +global: + extra_scrape_metrics: false +scrape_configs: + - job_name: test + static_configs: + - targets: ['localhost:9090'] +`, + expectGlobal: boolPtr(false), + expectEnabled: false, + }, + { + name: "scrape override enabled", + config: ` +global: + extra_scrape_metrics: false +scrape_configs: + - job_name: test + extra_scrape_metrics: true + static_configs: + - targets: ['localhost:9090'] +`, + expectGlobal: boolPtr(false), + expectEnabled: true, + }, + { + name: "scrape override disabled", + config: ` +global: + extra_scrape_metrics: true +scrape_configs: + - job_name: test + extra_scrape_metrics: false + static_configs: + - targets: ['localhost:9090'] +`, + expectGlobal: boolPtr(true), + expectEnabled: false, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + cfg, err := Load(tc.config, promslog.NewNopLogger()) + require.NoError(t, err) + + // Check global config + require.Equal(t, tc.expectGlobal, cfg.GlobalConfig.ExtraScrapeMetrics) + + // Check scrape config + scfgs, err := cfg.GetScrapeConfigs() + require.NoError(t, err) + require.Len(t, scfgs, 1) + + // Check the effective value via the helper method + require.Equal(t, tc.expectEnabled, scfgs[0].ExtraScrapeMetricsEnabled()) + }) + } +} + func kubernetesSDHostURL() config.URL { tURL, _ := url.Parse("https://localhost:1234") return config.URL{URL: tURL} diff --git a/config/testdata/global_disable_extra_scrape_metrics.good.yml b/config/testdata/global_disable_extra_scrape_metrics.good.yml new file mode 100644 index 0000000000..26c6e4b8b5 --- /dev/null +++ b/config/testdata/global_disable_extra_scrape_metrics.good.yml @@ -0,0 +1,6 @@ +global: + extra_scrape_metrics: false +scrape_configs: + - job_name: prometheus + static_configs: + - targets: ['localhost:8080'] diff --git a/config/testdata/global_enable_extra_scrape_metrics.good.yml b/config/testdata/global_enable_extra_scrape_metrics.good.yml new file mode 100644 index 0000000000..1d7ea2db1c --- /dev/null +++ b/config/testdata/global_enable_extra_scrape_metrics.good.yml @@ -0,0 +1,6 @@ +global: + extra_scrape_metrics: true +scrape_configs: + - job_name: prometheus + static_configs: + - targets: ['localhost:8080'] diff --git a/config/testdata/local_disable_extra_scrape_metrics.good.yml b/config/testdata/local_disable_extra_scrape_metrics.good.yml new file mode 100644 index 0000000000..a1b7c646fa --- /dev/null +++ b/config/testdata/local_disable_extra_scrape_metrics.good.yml @@ -0,0 +1,7 @@ +global: + extra_scrape_metrics: true +scrape_configs: + - job_name: prometheus + static_configs: + - targets: ['localhost:8080'] + extra_scrape_metrics: false diff --git a/config/testdata/local_enable_extra_scrape_metrics.good.yml b/config/testdata/local_enable_extra_scrape_metrics.good.yml new file mode 100644 index 0000000000..a1c8b2808e --- /dev/null +++ b/config/testdata/local_enable_extra_scrape_metrics.good.yml @@ -0,0 +1,7 @@ +global: + extra_scrape_metrics: false +scrape_configs: + - job_name: prometheus + static_configs: + - targets: ['localhost:8080'] + extra_scrape_metrics: true diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 3b71f26fc2..4079daae02 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -159,6 +159,12 @@ global: # native histogram with custom buckets. [ always_scrape_classic_histograms: | default = false ] + # When enabled, Prometheus stores additional time series for each scrape: + # scrape_timeout_seconds, scrape_sample_limit, and scrape_body_size_bytes. + # These metrics help monitor how close targets are to their configured limits. + # This option can be overridden per scrape config. + [ extra_scrape_metrics: | default = false ] + # The following explains the various combinations of the last three options # in various exposition cases. # @@ -647,6 +653,12 @@ metric_relabel_configs: # native histogram with custom buckets. [ always_scrape_classic_histograms: | default = ] +# When enabled, Prometheus stores additional time series for this scrape job: +# scrape_timeout_seconds, scrape_sample_limit, and scrape_body_size_bytes. +# These metrics help monitor how close targets are to their configured limits. +# If not set, inherits the value from the global configuration. +[ extra_scrape_metrics: | default = ] + # See global configuration above for further explanations of how the last three # options combine their effects. diff --git a/docs/feature_flags.md b/docs/feature_flags.md index 74daa11c13..af08eebb45 100644 --- a/docs/feature_flags.md +++ b/docs/feature_flags.md @@ -28,6 +28,8 @@ and m-mapped chunks, while a WAL replay from disk is only needed for the parts o `--enable-feature=extra-scrape-metrics` +> **Note:** This feature flag is deprecated. Please use the `extra_scrape_metrics` configuration option instead (available at both global and scrape-config level). The feature flag will be removed in a future major version. See the [configuration documentation](configuration/configuration.md) for more details. + When enabled, for each instance scrape, Prometheus stores a sample in the following additional time series: - `scrape_timeout_seconds`. The configured `scrape_timeout` for a target. This allows you to measure each target to find out how close they are to timing out with `scrape_duration_seconds / scrape_timeout_seconds`. diff --git a/scrape/manager.go b/scrape/manager.go index bd68c186c0..a2297aa824 100644 --- a/scrape/manager.go +++ b/scrape/manager.go @@ -70,7 +70,8 @@ func NewManager(o *Options, logger *slog.Logger, newScrapeFailureLogger func(str // Register scrape features. if r := o.FeatureRegistry; r != nil { - r.Set(features.Scrape, "extra_scrape_metrics", o.ExtraMetrics) + // "Extra scrape metrics" is always enabled because it moved from feature flag to config file. + r.Enable(features.Scrape, "extra_scrape_metrics") r.Set(features.Scrape, "start_timestamp_zero_ingestion", o.EnableStartTimestampZeroIngestion) r.Set(features.Scrape, "type_and_unit_labels", o.EnableTypeAndUnitLabels) } @@ -80,7 +81,6 @@ func NewManager(o *Options, logger *slog.Logger, newScrapeFailureLogger func(str // Options are the configuration parameters to the scrape manager. type Options struct { - ExtraMetrics bool // Option used by downstream scraper users like OpenTelemetry Collector // to help lookup metric metadata. Should be false for Prometheus. PassMetadataInContext bool diff --git a/scrape/scrape.go b/scrape/scrape.go index 33683b4caf..70ca8ad42a 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -1212,12 +1212,12 @@ func newScrapeLoop(opts scrapeLoopOptions) *scrapeLoop { fallbackScrapeProtocol: opts.sp.config.ScrapeFallbackProtocol.HeaderMediaType(), enableCompression: opts.sp.config.EnableCompression, mrc: opts.sp.config.MetricRelabelConfigs, + reportExtraMetrics: opts.sp.config.ExtraScrapeMetricsEnabled(), validationScheme: opts.sp.config.MetricNameValidationScheme, // scrape.Options. enableSTZeroIngestion: opts.sp.options.EnableStartTimestampZeroIngestion, enableTypeAndUnitLabels: opts.sp.options.EnableTypeAndUnitLabels, - reportExtraMetrics: opts.sp.options.ExtraMetrics, appendMetadataToWAL: opts.sp.options.AppendMetadata, passMetadataInContext: opts.sp.options.PassMetadataInContext, skipOffsetting: opts.sp.options.skipOffsetting,