diff --git a/scrape/metrics.go b/scrape/metrics.go index 4662a9fd9e..34f1e28dba 100644 --- a/scrape/metrics.go +++ b/scrape/metrics.go @@ -56,6 +56,7 @@ type scrapeMetrics struct { targetScrapeExemplarOutOfOrder prometheus.Counter targetScrapePoolExceededLabelLimits prometheus.Counter targetScrapeNativeHistogramBucketLimit prometheus.Counter + targetScrapeDuration prometheus.Histogram } func newScrapeMetrics(reg prometheus.Registerer) (*scrapeMetrics, error) { @@ -252,6 +253,15 @@ func newScrapeMetrics(reg prometheus.Registerer) (*scrapeMetrics, error) { Help: "Total number of exemplar rejected due to not being out of the expected order.", }, ) + sm.targetScrapeDuration = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Name: "prometheus_target_scrape_duration_seconds", + Help: "Total duration of the scrape from start to commit completion in seconds.", + NativeHistogramBucketFactor: 1.1, + NativeHistogramMaxBucketNumber: 100, + NativeHistogramMinResetDuration: 1 * time.Hour, + }, + ) for _, collector := range []prometheus.Collector{ // Used by Manager. @@ -284,6 +294,7 @@ func newScrapeMetrics(reg prometheus.Registerer) (*scrapeMetrics, error) { sm.targetScrapeExemplarOutOfOrder, sm.targetScrapePoolExceededLabelLimits, sm.targetScrapeNativeHistogramBucketLimit, + sm.targetScrapeDuration, } { err := reg.Register(collector) if err != nil { @@ -324,6 +335,7 @@ func (sm *scrapeMetrics) Unregister() { sm.reg.Unregister(sm.targetScrapeExemplarOutOfOrder) sm.reg.Unregister(sm.targetScrapePoolExceededLabelLimits) sm.reg.Unregister(sm.targetScrapeNativeHistogramBucketLimit) + sm.reg.Unregister(sm.targetScrapeDuration) } type TargetsGatherer interface { diff --git a/scrape/scrape.go b/scrape/scrape.go index 1a99155d09..58df858b3d 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -1335,6 +1335,11 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er return } err = app.Commit() + if sl.reportExtraMetrics { + totalDuration := time.Since(start) + // Record total scrape duration metric. + sl.metrics.targetScrapeDuration.Observe(totalDuration.Seconds()) + } if err != nil { sl.l.Error("Scrape commit failed", "err", err) }