This commit is contained in:
Owen Williams 2026-02-03 23:10:38 +05:30 committed by GitHub
commit 44d7af0ae6
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
20 changed files with 3517 additions and 2807 deletions

View file

@ -200,7 +200,6 @@ func TestBasicContentNegotiation(t *testing.T) {
}
func TestSampleDelivery(t *testing.T) {
t.Parallel()
// Let's create an even number of send batches, so we don't run into the
// batch timeout case.
n := 3
@ -409,7 +408,6 @@ func TestWALMetadataDelivery(t *testing.T) {
}
func TestSampleDeliveryTimeout(t *testing.T) {
t.Parallel()
for _, protoMsg := range []remoteapi.WriteMessageType{remoteapi.WriteV1MessageType, remoteapi.WriteV2MessageType} {
t.Run(fmt.Sprint(protoMsg), func(t *testing.T) {
// Let's send one less sample than batch size, and wait the timeout duration
@ -2038,7 +2036,6 @@ func TestIsSampleOld(t *testing.T) {
// Simulates scenario in which remote write endpoint is down and a subset of samples is dropped due to age limit while backoffing.
func TestSendSamplesWithBackoffWithSampleAgeLimit(t *testing.T) {
t.Parallel()
for _, protoMsg := range []remoteapi.WriteMessageType{remoteapi.WriteV1MessageType, remoteapi.WriteV2MessageType} {
t.Run(fmt.Sprint(protoMsg), func(t *testing.T) {
maxSamplesPerSend := 10

View file

@ -490,7 +490,7 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H
return
}
decoded <- series
case record.Samples:
case record.Samples, record.SamplesV2:
samples := db.walReplaySamplesPool.Get()[:0]
samples, err = dec.Samples(rec, samples)
if err != nil {
@ -710,7 +710,7 @@ func (db *DB) truncate(mint int64) error {
db.metrics.checkpointCreationTotal.Inc()
if _, err = wlog.Checkpoint(db.logger, db.wal, first, last, db.keepSeriesInWALCheckpointFn(last), mint); err != nil {
if _, err = wlog.Checkpoint(db.logger, db.wal, first, last, db.keepSeriesInWALCheckpointFn(last), mint, db.opts.EnableSTStorage); err != nil {
db.metrics.checkpointCreationFail.Inc()
var cerr *wlog.CorruptionErr
if errors.As(err, &cerr) {
@ -1156,7 +1156,7 @@ func (a *appenderBase) log() error {
a.mtx.RLock()
defer a.mtx.RUnlock()
var encoder record.Encoder
encoder := record.Encoder{EnableSTStorage: a.opts.EnableSTStorage}
buf := a.bufPool.Get().([]byte)
defer func() {
a.bufPool.Put(buf) //nolint:staticcheck
@ -1280,7 +1280,7 @@ func (a *appenderBase) logSeries() error {
a.bufPool.Put(buf) //nolint:staticcheck
}()
var encoder record.Encoder
encoder := record.Encoder{EnableSTStorage: a.opts.EnableSTStorage}
buf = encoder.Series(a.pendingSeries, buf)
if err := a.wal.Log(buf); err != nil {
return err

View file

@ -18,6 +18,7 @@ import (
"fmt"
"math"
"path/filepath"
"strconv"
"testing"
"time"
@ -89,278 +90,269 @@ func TestDB_InvalidSeries_AppendV2(t *testing.T) {
})
}
func TestCommit_AppendV2(t *testing.T) {
func TestCommitAppendV2(t *testing.T) {
const (
numDatapoints = 1000
numHistograms = 100
numSeries = 8
)
for _, enableStStorage := range []bool{false, true} {
t.Run("enableStStorage="+strconv.FormatBool(enableStStorage), func(t *testing.T) {
opts := DefaultOptions()
opts.EnableSTStorage = enableStStorage
s := createTestAgentDB(t, nil, opts)
s := createTestAgentDB(t, nil, DefaultOptions())
app := s.AppenderV2(context.TODO())
app := s.AppenderV2(context.TODO())
lbls := labelsForTest(t.Name(), numSeries)
for _, l := range lbls {
lset := labels.New(l...)
lbls := labelsForTest(t.Name(), numSeries)
for _, l := range lbls {
lset := labels.New(l...)
for i := range numDatapoints {
sample := chunks.GenerateSamples(0, 1)
_, err := app.Append(0, lset, 0, sample[0].T(), sample[0].F(), nil, nil, storage.AOptions{
Exemplars: []exemplar.Exemplar{{
Labels: lset,
Ts: sample[0].T() + int64(i),
Value: sample[0].F(),
HasTs: true,
}},
})
for i := range numDatapoints {
sample := chunks.GenerateSamples(0, 1)
_, err := app.Append(0, lset, int64(i), sample[0].T()+2000, sample[0].F(), nil, nil, storage.AOptions{
Exemplars: []exemplar.Exemplar{{
Labels: lset,
Ts: sample[0].T() + int64(i) + 2000,
Value: sample[0].F(),
HasTs: true,
}},
})
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
histograms := tsdbutil.GenerateTestHistograms(numHistograms)
for i := range numHistograms {
_, err := app.Append(0, lset, int64(i), int64(i+2000), 0, histograms[i], nil, storage.AOptions{})
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_custom_buckets_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
customBucketHistograms := tsdbutil.GenerateTestCustomBucketsHistograms(numHistograms)
for i := range numHistograms {
_, err := app.Append(0, lset, int64(i), int64(i+2000), 0, customBucketHistograms[i], nil, storage.AOptions{})
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_float_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
floatHistograms := tsdbutil.GenerateTestFloatHistograms(numHistograms)
for i := range numHistograms {
_, err := app.Append(0, lset, int64(i), int64(i+2000), 0, nil, floatHistograms[i], storage.AOptions{})
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_custom_buckets_float_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
customBucketFloatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numHistograms)
for i := range numHistograms {
_, err := app.Append(0, lset, int64(i), int64(i+2000), 0, nil, customBucketFloatHistograms[i], storage.AOptions{})
require.NoError(t, err)
}
}
require.NoError(t, app.Commit())
require.NoError(t, s.Close())
sr, err := wlog.NewSegmentsReader(s.wal.Dir())
require.NoError(t, err)
}
defer func() {
require.NoError(t, sr.Close())
}()
// Read records from WAL and check for expected count of series, samples, and exemplars.
var (
r = wlog.NewReader(sr)
dec = record.NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
walSeriesCount, walSamplesCount, walExemplarsCount, walHistogramCount, walFloatHistogramCount int
)
for r.Next() {
rec := r.Record()
switch dec.Type(rec) {
case record.Series:
var series []record.RefSeries
series, err = dec.Series(rec, series)
require.NoError(t, err)
walSeriesCount += len(series)
case record.Samples, record.SamplesV2:
var samples []record.RefSample
samples, err = dec.Samples(rec, samples)
require.NoError(t, err)
walSamplesCount += len(samples)
case record.HistogramSamples, record.CustomBucketsHistogramSamples:
var histograms []record.RefHistogramSample
histograms, err = dec.HistogramSamples(rec, histograms)
require.NoError(t, err)
walHistogramCount += len(histograms)
case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples:
var floatHistograms []record.RefFloatHistogramSample
floatHistograms, err = dec.FloatHistogramSamples(rec, floatHistograms)
require.NoError(t, err)
walFloatHistogramCount += len(floatHistograms)
case record.Exemplars:
var exemplars []record.RefExemplar
exemplars, err = dec.Exemplars(rec, exemplars)
require.NoError(t, err)
walExemplarsCount += len(exemplars)
default:
}
}
// Check that the WAL contained the same number of committed series/samples/exemplars.
require.Equal(t, numSeries*5, walSeriesCount, "unexpected number of series")
require.Equal(t, numSeries*numDatapoints, walSamplesCount, "unexpected number of samples")
require.Equal(t, numSeries*numDatapoints, walExemplarsCount, "unexpected number of exemplars")
require.Equal(t, numSeries*numHistograms*2, walHistogramCount, "unexpected number of histograms")
require.Equal(t, numSeries*numHistograms*2, walFloatHistogramCount, "unexpected number of float histograms")
// Check that we can still create both kinds of Appender - see https://github.com/prometheus/prometheus/issues/17800.
_ = s.Appender(context.TODO())
_ = s.AppenderV2(context.TODO())
})
}
lbls = labelsForTest(t.Name()+"_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
histograms := tsdbutil.GenerateTestHistograms(numHistograms)
for i := range numHistograms {
_, err := app.Append(0, lset, 0, int64(i), 0, histograms[i], nil, storage.AOptions{})
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_custom_buckets_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
customBucketHistograms := tsdbutil.GenerateTestCustomBucketsHistograms(numHistograms)
for i := range numHistograms {
_, err := app.Append(0, lset, 0, int64(i), 0, customBucketHistograms[i], nil, storage.AOptions{})
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_float_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
floatHistograms := tsdbutil.GenerateTestFloatHistograms(numHistograms)
for i := range numHistograms {
_, err := app.Append(0, lset, 0, int64(i), 0, nil, floatHistograms[i], storage.AOptions{})
require.NoError(t, err)
}
}
lbls = labelsForTest(t.Name()+"_custom_buckets_float_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
customBucketFloatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numHistograms)
for i := range numHistograms {
_, err := app.Append(0, lset, 0, int64(i), 0, nil, customBucketFloatHistograms[i], storage.AOptions{})
require.NoError(t, err)
}
}
require.NoError(t, app.Commit())
require.NoError(t, s.Close())
sr, err := wlog.NewSegmentsReader(s.wal.Dir())
require.NoError(t, err)
defer func() {
require.NoError(t, sr.Close())
}()
// Read records from WAL and check for expected count of series, samples, and exemplars.
var (
r = wlog.NewReader(sr)
dec = record.NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
walSeriesCount, walSamplesCount, walExemplarsCount, walHistogramCount, walFloatHistogramCount int
)
for r.Next() {
rec := r.Record()
switch dec.Type(rec) {
case record.Series:
var series []record.RefSeries
series, err = dec.Series(rec, series)
require.NoError(t, err)
walSeriesCount += len(series)
case record.Samples:
var samples []record.RefSample
samples, err = dec.Samples(rec, samples)
require.NoError(t, err)
walSamplesCount += len(samples)
case record.HistogramSamples, record.CustomBucketsHistogramSamples:
var histograms []record.RefHistogramSample
histograms, err = dec.HistogramSamples(rec, histograms)
require.NoError(t, err)
walHistogramCount += len(histograms)
case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples:
var floatHistograms []record.RefFloatHistogramSample
floatHistograms, err = dec.FloatHistogramSamples(rec, floatHistograms)
require.NoError(t, err)
walFloatHistogramCount += len(floatHistograms)
case record.Exemplars:
var exemplars []record.RefExemplar
exemplars, err = dec.Exemplars(rec, exemplars)
require.NoError(t, err)
walExemplarsCount += len(exemplars)
default:
}
}
// Check that the WAL contained the same number of committed series/samples/exemplars.
require.Equal(t, numSeries*5, walSeriesCount, "unexpected number of series")
require.Equal(t, numSeries*numDatapoints, walSamplesCount, "unexpected number of samples")
require.Equal(t, numSeries*numDatapoints, walExemplarsCount, "unexpected number of exemplars")
require.Equal(t, numSeries*numHistograms*2, walHistogramCount, "unexpected number of histograms")
require.Equal(t, numSeries*numHistograms*2, walFloatHistogramCount, "unexpected number of float histograms")
// Check that we can still create both kinds of Appender - see https://github.com/prometheus/prometheus/issues/17800.
_ = s.Appender(context.TODO())
_ = s.AppenderV2(context.TODO())
}
func TestRollback_AppendV2(t *testing.T) {
func TestRollbackAppendV2(t *testing.T) {
const (
numDatapoints = 1000
numHistograms = 100
numSeries = 8
)
s := createTestAgentDB(t, nil, DefaultOptions())
app := s.AppenderV2(context.TODO())
for _, enableStStorage := range []bool{false, true} {
opts := DefaultOptions()
opts.EnableSTStorage = enableStStorage
s := createTestAgentDB(t, nil, opts)
app := s.AppenderV2(context.TODO())
lbls := labelsForTest(t.Name(), numSeries)
for _, l := range lbls {
lset := labels.New(l...)
lbls := labelsForTest(t.Name(), numSeries)
for _, l := range lbls {
lset := labels.New(l...)
for range numDatapoints {
sample := chunks.GenerateSamples(0, 1)
_, err := app.Append(0, lset, 0, sample[0].T(), sample[0].F(), nil, nil, storage.AOptions{})
require.NoError(t, err)
for i := range numDatapoints {
sample := chunks.GenerateSamples(0, 1)
_, err := app.Append(0, lset, int64(i), sample[0].T()+2000, sample[0].F(), nil, nil, storage.AOptions{})
require.NoError(t, err)
}
}
}
lbls = labelsForTest(t.Name()+"_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
lbls = labelsForTest(t.Name()+"_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
histograms := tsdbutil.GenerateTestHistograms(numHistograms)
histograms := tsdbutil.GenerateTestHistograms(numHistograms)
for i := range numHistograms {
_, err := app.Append(0, lset, 0, int64(i), 0, histograms[i], nil, storage.AOptions{})
require.NoError(t, err)
for i := range numHistograms {
_, err := app.Append(0, lset, int64(i), int64(i+2000), 0, histograms[i], nil, storage.AOptions{})
require.NoError(t, err)
}
}
}
lbls = labelsForTest(t.Name()+"_custom_buckets_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
lbls = labelsForTest(t.Name()+"_custom_buckets_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
histograms := tsdbutil.GenerateTestCustomBucketsHistograms(numHistograms)
histograms := tsdbutil.GenerateTestCustomBucketsHistograms(numHistograms)
for i := range numHistograms {
_, err := app.Append(0, lset, 0, int64(i), 0, histograms[i], nil, storage.AOptions{})
require.NoError(t, err)
for i := range numHistograms {
_, err := app.Append(0, lset, int64(i), int64(i+2000), 0, histograms[i], nil, storage.AOptions{})
require.NoError(t, err)
}
}
}
lbls = labelsForTest(t.Name()+"_float_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
lbls = labelsForTest(t.Name()+"_float_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
floatHistograms := tsdbutil.GenerateTestFloatHistograms(numHistograms)
floatHistograms := tsdbutil.GenerateTestFloatHistograms(numHistograms)
for i := range numHistograms {
_, err := app.Append(0, lset, 0, int64(i), 0, nil, floatHistograms[i], storage.AOptions{})
require.NoError(t, err)
for i := range numHistograms {
_, err := app.Append(0, lset, int64(i), int64(i+2000), 0, nil, floatHistograms[i], storage.AOptions{})
require.NoError(t, err)
}
}
}
lbls = labelsForTest(t.Name()+"_custom_buckets_float_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
lbls = labelsForTest(t.Name()+"_custom_buckets_float_histogram", numSeries)
for _, l := range lbls {
lset := labels.New(l...)
floatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numHistograms)
floatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numHistograms)
for i := range numHistograms {
_, err := app.Append(0, lset, 0, int64(i), 0, nil, floatHistograms[i], storage.AOptions{})
require.NoError(t, err)
for i := range numHistograms {
_, err := app.Append(0, lset, int64(i), int64(i+2000), 0, nil, floatHistograms[i], storage.AOptions{})
require.NoError(t, err)
}
}
}
// Do a rollback, which should clear uncommitted data. A followup call to
// commit should persist nothing to the WAL.
require.NoError(t, app.Rollback())
require.NoError(t, app.Commit())
require.NoError(t, s.Close())
// Do a rollback, which should clear uncommitted data. A followup call to
// commit should persist nothing to the WAL.
require.NoError(t, app.Rollback())
require.NoError(t, app.Commit())
require.NoError(t, s.Close())
sr, err := wlog.NewSegmentsReader(s.wal.Dir())
require.NoError(t, err)
defer func() {
require.NoError(t, sr.Close())
}()
sr, err := wlog.NewSegmentsReader(s.wal.Dir())
require.NoError(t, err)
defer func() {
require.NoError(t, sr.Close())
}()
// Read records from WAL and check for expected count of series and samples.
var (
r = wlog.NewReader(sr)
dec = record.NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
// Read records from WAL and check for expected count of series and samples.
var (
r = wlog.NewReader(sr)
dec = record.NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
walSeriesCount, walSamplesCount, walHistogramCount, walFloatHistogramCount, walExemplarsCount int
)
for r.Next() {
rec := r.Record()
switch dec.Type(rec) {
case record.Series:
var series []record.RefSeries
series, err = dec.Series(rec, series)
require.NoError(t, err)
walSeriesCount += len(series)
walSeriesCount int
)
for r.Next() {
rec := r.Record()
switch dec.Type(rec) {
case record.Series:
var series []record.RefSeries
series, err = dec.Series(rec, series)
require.NoError(t, err)
walSeriesCount += len(series)
case record.Samples:
var samples []record.RefSample
samples, err = dec.Samples(rec, samples)
require.NoError(t, err)
walSamplesCount += len(samples)
case record.Samples, record.SamplesV2:
t.Errorf("should not have found samples")
case record.Exemplars:
var exemplars []record.RefExemplar
exemplars, err = dec.Exemplars(rec, exemplars)
require.NoError(t, err)
walExemplarsCount += len(exemplars)
case record.Exemplars:
t.Errorf("should not have found exemplars")
case record.HistogramSamples, record.CustomBucketsHistogramSamples:
var histograms []record.RefHistogramSample
histograms, err = dec.HistogramSamples(rec, histograms)
require.NoError(t, err)
walHistogramCount += len(histograms)
case record.HistogramSamples, record.CustomBucketsHistogramSamples, record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples:
t.Errorf("should not have found histograms")
case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples:
var floatHistograms []record.RefFloatHistogramSample
floatHistograms, err = dec.FloatHistogramSamples(rec, floatHistograms)
require.NoError(t, err)
walFloatHistogramCount += len(floatHistograms)
default:
default:
}
}
}
// Check that only series get stored after calling Rollback.
require.Equal(t, numSeries*5, walSeriesCount, "series should have been written to WAL")
require.Equal(t, 0, walSamplesCount, "samples should not have been written to WAL")
require.Equal(t, 0, walExemplarsCount, "exemplars should not have been written to WAL")
require.Equal(t, 0, walHistogramCount, "histograms should not have been written to WAL")
require.Equal(t, 0, walFloatHistogramCount, "float histograms should not have been written to WAL")
// Check that only series get stored after calling Rollback.
require.Equal(t, numSeries*5, walSeriesCount, "series should have been written to WAL")
}
}
func TestFullTruncateWAL_AppendV2(t *testing.T) {

View file

@ -225,7 +225,7 @@ func TestCommit(t *testing.T) {
require.NoError(t, err)
walSeriesCount += len(series)
case record.Samples:
case record.Samples, record.SamplesV2:
var samples []record.RefSample
samples, err = dec.Samples(rec, samples)
require.NoError(t, err)
@ -361,7 +361,7 @@ func TestRollback(t *testing.T) {
require.NoError(t, err)
walSeriesCount += len(series)
case record.Samples:
case record.Samples, record.SamplesV2:
var samples []record.RefSample
samples, err = dec.Samples(rec, samples)
require.NoError(t, err)
@ -1344,7 +1344,7 @@ func readWALSamples(t *testing.T, walDir string) []walSample {
series, err := dec.Series(rec, nil)
require.NoError(t, err)
lastSeries = series[0]
case record.Samples:
case record.Samples, record.SamplesV2:
samples, err = dec.Samples(rec, samples[:0])
require.NoError(t, err)
for _, s := range samples {

View file

@ -0,0 +1,130 @@
// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package compression
import (
"errors"
"fmt"
"github.com/golang/snappy"
"github.com/klauspost/compress/zstd"
)
// Type represents the compression type used for encoding and decoding data.
type Type string
const (
// None represents no compression case.
// None it's a default when Type is empty.
None Type = "none"
// Snappy represents snappy block format.
Snappy Type = "snappy"
// Zstd represents zstd compression.
Zstd Type = "zstd"
)
// Encoder provides compression encoding functionality for supported compression
// types. It is agnostic to the content being compressed, operating on byte
// slices of serialized data streams. The encoder maintains internal state for
// Zstd compression and can handle multiple compression types including None,
// Snappy, and Zstd.
type Encoder struct {
w *zstd.Encoder
}
// NewEncoder creates a new Encoder. Returns an error if the zstd encoder cannot
// be initialized.
func NewEncoder() (*Encoder, error) {
e := &Encoder{}
w, err := zstd.NewWriter(nil)
if err != nil {
return nil, err
}
e.w = w
return e, nil
}
// Encode returns the encoded form of src for the given compression type. It also
// returns the indicator if the compression was performed. Encode may skip
// compressing for None type, but also when src is too large e.g. for Snappy block format.
//
// The buf is used as a buffer for returned encoding, and it must not overlap with
// src. It is valid to pass a nil buf.
func (e *Encoder) Encode(t Type, src, buf []byte) (_ []byte, compressed bool, err error) {
switch {
case len(src) == 0, t == "", t == None:
return src, false, nil
case t == Snappy:
// If MaxEncodedLen is less than 0 the record is too large to be compressed.
if snappy.MaxEncodedLen(len(src)) < 0 {
return src, false, nil
}
// The snappy library uses `len` to calculate if we need a new buffer.
// In order to allocate as few buffers as possible make the length
// equal to the capacity.
buf = buf[:cap(buf)]
return snappy.Encode(buf, src), true, nil
case t == Zstd:
if e == nil {
return nil, false, errors.New("zstd requested but encoder was not initialized with NewEncoder()")
}
return e.w.EncodeAll(src, buf[:0]), true, nil
default:
return nil, false, fmt.Errorf("unsupported compression type: %s", t)
}
}
// Decoder provides decompression functionality for supported compression types.
// It is agnostic to the content being decompressed, operating on byte slices of
// serialized data streams. The decoder maintains internal state for Zstd
// decompression and can handle multiple compression types including None,
// Snappy, and Zstd.
type Decoder struct {
r *zstd.Decoder
}
// NewDecoder creates a new Decoder.
func NewDecoder() *Decoder {
d := &Decoder{}
// Calling zstd.NewReader with a nil io.Reader and no options cannot return an error.
r, _ := zstd.NewReader(nil)
d.r = r
return d
}
// Decode returns the decoded form of src or error, given expected compression type.
//
// The buf is used as a buffer for the returned decoded entry, and it must not
// overlap with src. It is valid to pass a nil buf.
func (d *Decoder) Decode(t Type, src, buf []byte) (_ []byte, err error) {
switch {
case len(src) == 0, t == "", t == None:
return src, nil
case t == Snappy:
// The snappy library uses `len` to calculate if we need a new buffer.
// In order to allocate as few buffers as possible make the length
// equal to the capacity.
buf = buf[:cap(buf)]
return snappy.Decode(buf, src)
case t == Zstd:
if d == nil {
return nil, errors.New("zstd requested but Decoder was not initialized with NewDecoder()")
}
return d.r.DecodeAll(src, buf[:0])
default:
return nil, fmt.Errorf("unsupported compression type: %s", t)
}
}

View file

@ -193,7 +193,7 @@ func TestDataNotAvailableAfterRollback_AppendV2(t *testing.T) {
require.NoError(t, err)
walSeriesCount += len(series)
case record.Samples:
case record.Samples, record.SamplesV2:
var samples []record.RefSample
samples, err = dec.Samples(rec, samples)
require.NoError(t, err)
@ -968,16 +968,18 @@ func TestWALReplayRaceOnSamplesLoggedBeforeSeries_AppendV2(t *testing.T) {
// We test both with few and many samples appended after series creation. If samples are < 120 then there's no
// mmap-ed chunk, otherwise there's at least 1 mmap-ed chunk when replaying the WAL.
for _, numSamplesAfterSeriesCreation := range []int{1, 1000} {
for run := 1; run <= numRuns; run++ {
t.Run(fmt.Sprintf("samples after series creation = %d, run = %d", numSamplesAfterSeriesCreation, run), func(t *testing.T) {
testWALReplayRaceOnSamplesLoggedBeforeSeriesAppendV2(t, numSamplesBeforeSeriesCreation, numSamplesAfterSeriesCreation)
})
for _, enableStStorage := range []bool{false, true} {
for _, numSamplesAfterSeriesCreation := range []int{1, 1000} {
for run := 1; run <= numRuns; run++ {
t.Run(fmt.Sprintf("samples after series creation = %d, run = %d, stStorage = %v", numSamplesAfterSeriesCreation, run, enableStStorage), func(t *testing.T) {
testWALReplayRaceOnSamplesLoggedBeforeSeriesAppendV2(t, numSamplesBeforeSeriesCreation, numSamplesAfterSeriesCreation, enableStStorage)
})
}
}
}
}
func testWALReplayRaceOnSamplesLoggedBeforeSeriesAppendV2(t *testing.T, numSamplesBeforeSeriesCreation, numSamplesAfterSeriesCreation int) {
func testWALReplayRaceOnSamplesLoggedBeforeSeriesAppendV2(t *testing.T, numSamplesBeforeSeriesCreation, numSamplesAfterSeriesCreation int, enableStStorage bool) {
const numSeries = 1000
db := newTestDB(t)
@ -985,7 +987,7 @@ func testWALReplayRaceOnSamplesLoggedBeforeSeriesAppendV2(t *testing.T, numSampl
for seriesRef := 1; seriesRef <= numSeries; seriesRef++ {
// Log samples before the series is logged to the WAL.
var enc record.Encoder
enc := record.Encoder{EnableSTStorage: enableStStorage}
var samples []record.RefSample
for ts := range numSamplesBeforeSeriesCreation {
@ -1176,139 +1178,143 @@ func TestTombstoneCleanResultEmptyBlock_AppendV2(t *testing.T) {
func TestSizeRetention_AppendV2(t *testing.T) {
t.Parallel()
opts := DefaultOptions()
opts.OutOfOrderTimeWindow = 100
db := newTestDB(t, withOpts(opts), withRngs(100))
for _, enableStStorage := range []bool{false, true} {
t.Run("enableStStorage="+strconv.FormatBool(enableStStorage), func(t *testing.T) {
opts := DefaultOptions()
opts.OutOfOrderTimeWindow = 100
db := newTestDB(t, withOpts(opts), withRngs(100))
blocks := []*BlockMeta{
{MinTime: 100, MaxTime: 200}, // Oldest block
{MinTime: 200, MaxTime: 300},
{MinTime: 300, MaxTime: 400},
{MinTime: 400, MaxTime: 500},
{MinTime: 500, MaxTime: 600}, // Newest Block
}
blocks := []*BlockMeta{
{MinTime: 100, MaxTime: 200}, // Oldest block
{MinTime: 200, MaxTime: 300},
{MinTime: 300, MaxTime: 400},
{MinTime: 400, MaxTime: 500},
{MinTime: 500, MaxTime: 600}, // Newest Block
}
for _, m := range blocks {
createBlock(t, db.Dir(), genSeries(100, 10, m.MinTime, m.MaxTime))
}
for _, m := range blocks {
createBlock(t, db.Dir(), genSeries(100, 10, m.MinTime, m.MaxTime))
}
headBlocks := []*BlockMeta{
{MinTime: 700, MaxTime: 800},
}
headBlocks := []*BlockMeta{
{MinTime: 700, MaxTime: 800},
}
// Add some data to the WAL.
headApp := db.Head().AppenderV2(context.Background())
var aSeries labels.Labels
var it chunkenc.Iterator
for _, m := range headBlocks {
series := genSeries(100, 10, m.MinTime, m.MaxTime+1)
for _, s := range series {
aSeries = s.Labels()
it = s.Iterator(it)
for it.Next() == chunkenc.ValFloat {
tim, v := it.At()
_, err := headApp.Append(0, s.Labels(), 0, tim, v, nil, nil, storage.AOptions{})
// Add some data to the WAL.
headApp := db.Head().AppenderV2(context.Background())
var aSeries labels.Labels
var it chunkenc.Iterator
for _, m := range headBlocks {
series := genSeries(100, 10, m.MinTime, m.MaxTime+1)
for _, s := range series {
aSeries = s.Labels()
it = s.Iterator(it)
for it.Next() == chunkenc.ValFloat {
tim, v := it.At()
_, err := headApp.Append(0, s.Labels(), 0, tim, v, nil, nil, storage.AOptions{})
require.NoError(t, err)
}
require.NoError(t, it.Err())
}
}
require.NoError(t, headApp.Commit())
db.Head().mmapHeadChunks()
require.Eventually(t, func() bool {
return db.Head().chunkDiskMapper.IsQueueEmpty()
}, 2*time.Second, 100*time.Millisecond)
// Test that registered size matches the actual disk size.
require.NoError(t, db.reloadBlocks()) // Reload the db to register the new db size.
require.Len(t, db.Blocks(), len(blocks)) // Ensure all blocks are registered.
blockSize := int64(prom_testutil.ToFloat64(db.metrics.blocksBytes)) // Use the actual internal metrics.
walSize, err := db.Head().wal.Size()
require.NoError(t, err)
cdmSize, err := db.Head().chunkDiskMapper.Size()
require.NoError(t, err)
require.NotZero(t, cdmSize)
// Expected size should take into account block size + WAL size + Head
// chunks size
expSize := blockSize + walSize + cdmSize
actSize, err := fileutil.DirSize(db.Dir())
require.NoError(t, err)
require.Equal(t, expSize, actSize, "registered size doesn't match actual disk size")
// Create a WAL checkpoint, and compare sizes.
first, last, err := wlog.Segments(db.Head().wal.Dir())
require.NoError(t, err)
_, err = wlog.Checkpoint(promslog.NewNopLogger(), db.Head().wal, first, last-1, func(chunks.HeadSeriesRef) bool { return false }, 0, enableStStorage)
require.NoError(t, err)
blockSize = int64(prom_testutil.ToFloat64(db.metrics.blocksBytes)) // Use the actual internal metrics.
walSize, err = db.Head().wal.Size()
require.NoError(t, err)
cdmSize, err = db.Head().chunkDiskMapper.Size()
require.NoError(t, err)
require.NotZero(t, cdmSize)
expSize = blockSize + walSize + cdmSize
actSize, err = fileutil.DirSize(db.Dir())
require.NoError(t, err)
require.Equal(t, expSize, actSize, "registered size doesn't match actual disk size")
// Truncate Chunk Disk Mapper and compare sizes.
require.NoError(t, db.Head().chunkDiskMapper.Truncate(900))
cdmSize, err = db.Head().chunkDiskMapper.Size()
require.NoError(t, err)
require.NotZero(t, cdmSize)
expSize = blockSize + walSize + cdmSize
actSize, err = fileutil.DirSize(db.Dir())
require.NoError(t, err)
require.Equal(t, expSize, actSize, "registered size doesn't match actual disk size")
// Add some out of order samples to check the size of WBL.
headApp = db.Head().AppenderV2(context.Background())
for ts := int64(750); ts < 800; ts++ {
_, err := headApp.Append(0, aSeries, 0, ts, float64(ts), nil, nil, storage.AOptions{})
require.NoError(t, err)
}
require.NoError(t, it.Err())
}
require.NoError(t, headApp.Commit())
walSize, err = db.Head().wal.Size()
require.NoError(t, err)
wblSize, err := db.Head().wbl.Size()
require.NoError(t, err)
require.NotZero(t, wblSize)
cdmSize, err = db.Head().chunkDiskMapper.Size()
require.NoError(t, err)
expSize = blockSize + walSize + wblSize + cdmSize
actSize, err = fileutil.DirSize(db.Dir())
require.NoError(t, err)
require.Equal(t, expSize, actSize, "registered size doesn't match actual disk size")
// Decrease the max bytes limit so that a delete is triggered.
// Check total size, total count and check that the oldest block was deleted.
firstBlockSize := db.Blocks()[0].Size()
sizeLimit := actSize - firstBlockSize
db.opts.MaxBytes = sizeLimit // Set the new db size limit one block smaller that the actual size.
require.NoError(t, db.reloadBlocks()) // Reload the db to register the new db size.
expBlocks := blocks[1:]
actBlocks := db.Blocks()
blockSize = int64(prom_testutil.ToFloat64(db.metrics.blocksBytes))
walSize, err = db.Head().wal.Size()
require.NoError(t, err)
cdmSize, err = db.Head().chunkDiskMapper.Size()
require.NoError(t, err)
require.NotZero(t, cdmSize)
// Expected size should take into account block size + WAL size + WBL size
expSize = blockSize + walSize + wblSize + cdmSize
actRetentionCount := int(prom_testutil.ToFloat64(db.metrics.sizeRetentionCount))
actSize, err = fileutil.DirSize(db.Dir())
require.NoError(t, err)
require.Equal(t, 1, actRetentionCount, "metric retention count mismatch")
require.Equal(t, expSize, actSize, "metric db size doesn't match actual disk size")
require.LessOrEqual(t, expSize, sizeLimit, "actual size (%v) is expected to be less than or equal to limit (%v)", expSize, sizeLimit)
require.Len(t, actBlocks, len(blocks)-1, "new block count should be decreased from:%v to:%v", len(blocks), len(blocks)-1)
require.Equal(t, expBlocks[0].MaxTime, actBlocks[0].meta.MaxTime, "maxT mismatch of the first block")
require.Equal(t, expBlocks[len(expBlocks)-1].MaxTime, actBlocks[len(actBlocks)-1].meta.MaxTime, "maxT mismatch of the last block")
})
}
require.NoError(t, headApp.Commit())
db.Head().mmapHeadChunks()
require.Eventually(t, func() bool {
return db.Head().chunkDiskMapper.IsQueueEmpty()
}, 2*time.Second, 100*time.Millisecond)
// Test that registered size matches the actual disk size.
require.NoError(t, db.reloadBlocks()) // Reload the db to register the new db size.
require.Len(t, db.Blocks(), len(blocks)) // Ensure all blocks are registered.
blockSize := int64(prom_testutil.ToFloat64(db.metrics.blocksBytes)) // Use the actual internal metrics.
walSize, err := db.Head().wal.Size()
require.NoError(t, err)
cdmSize, err := db.Head().chunkDiskMapper.Size()
require.NoError(t, err)
require.NotZero(t, cdmSize)
// Expected size should take into account block size + WAL size + Head
// chunks size
expSize := blockSize + walSize + cdmSize
actSize, err := fileutil.DirSize(db.Dir())
require.NoError(t, err)
require.Equal(t, expSize, actSize, "registered size doesn't match actual disk size")
// Create a WAL checkpoint, and compare sizes.
first, last, err := wlog.Segments(db.Head().wal.Dir())
require.NoError(t, err)
_, err = wlog.Checkpoint(promslog.NewNopLogger(), db.Head().wal, first, last-1, func(chunks.HeadSeriesRef) bool { return false }, 0)
require.NoError(t, err)
blockSize = int64(prom_testutil.ToFloat64(db.metrics.blocksBytes)) // Use the actual internal metrics.
walSize, err = db.Head().wal.Size()
require.NoError(t, err)
cdmSize, err = db.Head().chunkDiskMapper.Size()
require.NoError(t, err)
require.NotZero(t, cdmSize)
expSize = blockSize + walSize + cdmSize
actSize, err = fileutil.DirSize(db.Dir())
require.NoError(t, err)
require.Equal(t, expSize, actSize, "registered size doesn't match actual disk size")
// Truncate Chunk Disk Mapper and compare sizes.
require.NoError(t, db.Head().chunkDiskMapper.Truncate(900))
cdmSize, err = db.Head().chunkDiskMapper.Size()
require.NoError(t, err)
require.NotZero(t, cdmSize)
expSize = blockSize + walSize + cdmSize
actSize, err = fileutil.DirSize(db.Dir())
require.NoError(t, err)
require.Equal(t, expSize, actSize, "registered size doesn't match actual disk size")
// Add some out of order samples to check the size of WBL.
headApp = db.Head().AppenderV2(context.Background())
for ts := int64(750); ts < 800; ts++ {
_, err := headApp.Append(0, aSeries, 0, ts, float64(ts), nil, nil, storage.AOptions{})
require.NoError(t, err)
}
require.NoError(t, headApp.Commit())
walSize, err = db.Head().wal.Size()
require.NoError(t, err)
wblSize, err := db.Head().wbl.Size()
require.NoError(t, err)
require.NotZero(t, wblSize)
cdmSize, err = db.Head().chunkDiskMapper.Size()
require.NoError(t, err)
expSize = blockSize + walSize + wblSize + cdmSize
actSize, err = fileutil.DirSize(db.Dir())
require.NoError(t, err)
require.Equal(t, expSize, actSize, "registered size doesn't match actual disk size")
// Decrease the max bytes limit so that a delete is triggered.
// Check total size, total count and check that the oldest block was deleted.
firstBlockSize := db.Blocks()[0].Size()
sizeLimit := actSize - firstBlockSize
db.opts.MaxBytes = sizeLimit // Set the new db size limit one block smaller that the actual size.
require.NoError(t, db.reloadBlocks()) // Reload the db to register the new db size.
expBlocks := blocks[1:]
actBlocks := db.Blocks()
blockSize = int64(prom_testutil.ToFloat64(db.metrics.blocksBytes))
walSize, err = db.Head().wal.Size()
require.NoError(t, err)
cdmSize, err = db.Head().chunkDiskMapper.Size()
require.NoError(t, err)
require.NotZero(t, cdmSize)
// Expected size should take into account block size + WAL size + WBL size
expSize = blockSize + walSize + wblSize + cdmSize
actRetentionCount := int(prom_testutil.ToFloat64(db.metrics.sizeRetentionCount))
actSize, err = fileutil.DirSize(db.Dir())
require.NoError(t, err)
require.Equal(t, 1, actRetentionCount, "metric retention count mismatch")
require.Equal(t, expSize, actSize, "metric db size doesn't match actual disk size")
require.LessOrEqual(t, expSize, sizeLimit, "actual size (%v) is expected to be less than or equal to limit (%v)", expSize, sizeLimit)
require.Len(t, actBlocks, len(blocks)-1, "new block count should be decreased from:%v to:%v", len(blocks), len(blocks)-1)
require.Equal(t, expBlocks[0].MaxTime, actBlocks[0].meta.MaxTime, "maxT mismatch of the first block")
require.Equal(t, expBlocks[len(expBlocks)-1].MaxTime, actBlocks[len(actBlocks)-1].meta.MaxTime, "maxT mismatch of the last block")
}
func TestNotMatcherSelectsLabelsUnsetSeries_AppendV2(t *testing.T) {
@ -1499,33 +1505,36 @@ func TestInitializeHeadTimestamp_AppendV2(t *testing.T) {
require.Equal(t, int64(1000), db.head.MaxTime())
require.True(t, db.head.initialized())
})
t.Run("wal-only", func(t *testing.T) {
dir := t.TempDir()
require.NoError(t, os.MkdirAll(path.Join(dir, "wal"), 0o777))
w, err := wlog.New(nil, nil, path.Join(dir, "wal"), compression.None)
require.NoError(t, err)
for _, enableStStorage := range []bool{false, true} {
t.Run("wal-only-st-"+strconv.FormatBool(enableStStorage), func(t *testing.T) {
dir := t.TempDir()
var enc record.Encoder
err = w.Log(
enc.Series([]record.RefSeries{
{Ref: 123, Labels: labels.FromStrings("a", "1")},
{Ref: 124, Labels: labels.FromStrings("a", "2")},
}, nil),
enc.Samples([]record.RefSample{
{Ref: 123, T: 5000, V: 1},
{Ref: 124, T: 15000, V: 1},
}, nil),
)
require.NoError(t, err)
require.NoError(t, w.Close())
require.NoError(t, os.MkdirAll(path.Join(dir, "wal"), 0o777))
w, err := wlog.New(nil, nil, path.Join(dir, "wal"), compression.None)
require.NoError(t, err)
db := newTestDB(t, withDir(dir))
enc := record.Encoder{EnableSTStorage: enableStStorage}
err = w.Log(
enc.Series([]record.RefSeries{
{Ref: 123, Labels: labels.FromStrings("a", "1")},
{Ref: 124, Labels: labels.FromStrings("a", "2")},
}, nil),
enc.Samples([]record.RefSample{
{Ref: 123, T: 5000, V: 1},
{Ref: 124, T: 15000, V: 1},
}, nil),
)
require.NoError(t, err)
require.NoError(t, w.Close())
require.Equal(t, int64(5000), db.head.MinTime())
require.Equal(t, int64(15000), db.head.MaxTime())
require.True(t, db.head.initialized())
})
db := newTestDB(t, withDir(dir))
require.Equal(t, int64(5000), db.head.MinTime())
require.Equal(t, int64(15000), db.head.MaxTime())
require.True(t, db.head.initialized())
})
}
t.Run("existing-block", func(t *testing.T) {
dir := t.TempDir()
@ -1537,37 +1546,39 @@ func TestInitializeHeadTimestamp_AppendV2(t *testing.T) {
require.Equal(t, int64(2000), db.head.MaxTime())
require.True(t, db.head.initialized())
})
t.Run("existing-block-and-wal", func(t *testing.T) {
dir := t.TempDir()
for _, enableStStorage := range []bool{false, true} {
t.Run("existing-block-and-wal-st-"+strconv.FormatBool(enableStStorage), func(t *testing.T) {
dir := t.TempDir()
createBlock(t, dir, genSeries(1, 1, 1000, 6000))
createBlock(t, dir, genSeries(1, 1, 1000, 6000))
require.NoError(t, os.MkdirAll(path.Join(dir, "wal"), 0o777))
w, err := wlog.New(nil, nil, path.Join(dir, "wal"), compression.None)
require.NoError(t, err)
require.NoError(t, os.MkdirAll(path.Join(dir, "wal"), 0o777))
w, err := wlog.New(nil, nil, path.Join(dir, "wal"), compression.None)
require.NoError(t, err)
var enc record.Encoder
err = w.Log(
enc.Series([]record.RefSeries{
{Ref: 123, Labels: labels.FromStrings("a", "1")},
{Ref: 124, Labels: labels.FromStrings("a", "2")},
}, nil),
enc.Samples([]record.RefSample{
{Ref: 123, T: 5000, V: 1},
{Ref: 124, T: 15000, V: 1},
}, nil),
)
require.NoError(t, err)
require.NoError(t, w.Close())
enc := record.Encoder{EnableSTStorage: enableStStorage}
err = w.Log(
enc.Series([]record.RefSeries{
{Ref: 123, Labels: labels.FromStrings("a", "1")},
{Ref: 124, Labels: labels.FromStrings("a", "2")},
}, nil),
enc.Samples([]record.RefSample{
{Ref: 123, T: 5000, V: 1},
{Ref: 124, T: 15000, V: 1},
}, nil),
)
require.NoError(t, err)
require.NoError(t, w.Close())
db := newTestDB(t, withDir(dir))
db := newTestDB(t, withDir(dir))
require.Equal(t, int64(6000), db.head.MinTime())
require.Equal(t, int64(15000), db.head.MaxTime())
require.True(t, db.head.initialized())
// Check that old series has been GCed.
require.Equal(t, 1.0, prom_testutil.ToFloat64(db.head.metrics.series))
})
require.Equal(t, int64(6000), db.head.MinTime())
require.Equal(t, int64(15000), db.head.MaxTime())
require.True(t, db.head.initialized())
// Check that old series has been GCed.
require.Equal(t, 1.0, prom_testutil.ToFloat64(db.head.metrics.series))
})
}
}
func TestNoEmptyBlocks_AppendV2(t *testing.T) {
@ -3273,7 +3284,7 @@ func testOOOWALWriteAppendV2(t *testing.T,
series, err := dec.Series(rec, nil)
require.NoError(t, err)
records = append(records, series)
case record.Samples:
case record.Samples, record.SamplesV2:
samples, err := dec.Samples(rec, nil)
require.NoError(t, err)
records = append(records, samples)
@ -3430,112 +3441,116 @@ func TestMetadataInWAL_AppenderV2(t *testing.T) {
}
func TestMetadataCheckpointingOnlyKeepsLatestEntry_AppendV2(t *testing.T) {
ctx := context.Background()
numSamples := 10000
hb, w := newTestHead(t, int64(numSamples)*10, compression.None, false)
hb.opts.EnableMetadataWALRecords = true
for _, enableStStorage := range []bool{false, true} {
t.Run("enableStStorage="+strconv.FormatBool(enableStStorage), func(t *testing.T) {
ctx := context.Background()
numSamples := 10000
hb, w := newTestHead(t, int64(numSamples)*10, compression.None, false)
hb.opts.EnableMetadataWALRecords = true
// Add some series so we can append metadata to them.
s1 := labels.FromStrings("a", "b")
s2 := labels.FromStrings("c", "d")
s3 := labels.FromStrings("e", "f")
s4 := labels.FromStrings("g", "h")
// Add some series so we can append metadata to them.
s1 := labels.FromStrings("a", "b")
s2 := labels.FromStrings("c", "d")
s3 := labels.FromStrings("e", "f")
s4 := labels.FromStrings("g", "h")
m1 := metadata.Metadata{Type: "gauge", Unit: "unit_1", Help: "help_1"}
m2 := metadata.Metadata{Type: "gauge", Unit: "unit_2", Help: "help_2"}
m3 := metadata.Metadata{Type: "gauge", Unit: "unit_3", Help: "help_3"}
m4 := metadata.Metadata{Type: "gauge", Unit: "unit_4", Help: "help_4"}
m1 := metadata.Metadata{Type: "gauge", Unit: "unit_1", Help: "help_1"}
m2 := metadata.Metadata{Type: "gauge", Unit: "unit_2", Help: "help_2"}
m3 := metadata.Metadata{Type: "gauge", Unit: "unit_3", Help: "help_3"}
m4 := metadata.Metadata{Type: "gauge", Unit: "unit_4", Help: "help_4"}
app := hb.AppenderV2(ctx)
ts := int64(0)
_, err := app.Append(0, s1, 0, ts, 0, nil, nil, storage.AOptions{Metadata: m1})
require.NoError(t, err)
_, err = app.Append(0, s2, 0, ts, 0, nil, nil, storage.AOptions{Metadata: m2})
require.NoError(t, err)
_, err = app.Append(0, s3, 0, ts, 0, nil, nil, storage.AOptions{Metadata: m3})
require.NoError(t, err)
_, err = app.Append(0, s4, 0, ts, 0, nil, nil, storage.AOptions{Metadata: m4})
require.NoError(t, err)
require.NoError(t, app.Commit())
app := hb.AppenderV2(ctx)
ts := int64(0)
_, err := app.Append(0, s1, 0, ts, 0, nil, nil, storage.AOptions{Metadata: m1})
require.NoError(t, err)
_, err = app.Append(0, s2, 0, ts, 0, nil, nil, storage.AOptions{Metadata: m2})
require.NoError(t, err)
_, err = app.Append(0, s3, 0, ts, 0, nil, nil, storage.AOptions{Metadata: m3})
require.NoError(t, err)
_, err = app.Append(0, s4, 0, ts, 0, nil, nil, storage.AOptions{Metadata: m4})
require.NoError(t, err)
require.NoError(t, app.Commit())
// Update metadata for first series.
m5 := metadata.Metadata{Type: "counter", Unit: "unit_5", Help: "help_5"}
app = hb.AppenderV2(ctx)
ts++
_, err = app.Append(0, s1, 0, ts, 0, nil, nil, storage.AOptions{Metadata: m5})
require.NoError(t, err)
require.NoError(t, app.Commit())
// Update metadata for first series.
m5 := metadata.Metadata{Type: "counter", Unit: "unit_5", Help: "help_5"}
app = hb.AppenderV2(ctx)
ts++
_, err = app.Append(0, s1, 0, ts, 0, nil, nil, storage.AOptions{Metadata: m5})
require.NoError(t, err)
require.NoError(t, app.Commit())
// Switch back-and-forth metadata for second series.
// Since it ended on a new metadata record, we expect a single new entry.
m6 := metadata.Metadata{Type: "counter", Unit: "unit_6", Help: "help_6"}
// Switch back-and-forth metadata for second series.
// Since it ended on a new metadata record, we expect a single new entry.
m6 := metadata.Metadata{Type: "counter", Unit: "unit_6", Help: "help_6"}
app = hb.AppenderV2(ctx)
ts++
_, err = app.Append(0, s2, 0, ts, 0, nil, nil, storage.AOptions{Metadata: m6})
require.NoError(t, err)
require.NoError(t, app.Commit())
app = hb.AppenderV2(ctx)
ts++
_, err = app.Append(0, s2, 0, ts, 0, nil, nil, storage.AOptions{Metadata: m6})
require.NoError(t, err)
require.NoError(t, app.Commit())
app = hb.AppenderV2(ctx)
ts++
_, err = app.Append(0, s2, 0, ts, 0, nil, nil, storage.AOptions{Metadata: m2})
require.NoError(t, err)
require.NoError(t, app.Commit())
app = hb.AppenderV2(ctx)
ts++
_, err = app.Append(0, s2, 0, ts, 0, nil, nil, storage.AOptions{Metadata: m2})
require.NoError(t, err)
require.NoError(t, app.Commit())
app = hb.AppenderV2(ctx)
ts++
_, err = app.Append(0, s2, 0, ts, 0, nil, nil, storage.AOptions{Metadata: m6})
require.NoError(t, err)
require.NoError(t, app.Commit())
app = hb.AppenderV2(ctx)
ts++
_, err = app.Append(0, s2, 0, ts, 0, nil, nil, storage.AOptions{Metadata: m6})
require.NoError(t, err)
require.NoError(t, app.Commit())
app = hb.AppenderV2(ctx)
ts++
_, err = app.Append(0, s2, 0, ts, 0, nil, nil, storage.AOptions{Metadata: m2})
require.NoError(t, err)
require.NoError(t, app.Commit())
app = hb.AppenderV2(ctx)
ts++
_, err = app.Append(0, s2, 0, ts, 0, nil, nil, storage.AOptions{Metadata: m2})
require.NoError(t, err)
require.NoError(t, app.Commit())
app = hb.AppenderV2(ctx)
ts++
_, err = app.Append(0, s2, 0, ts, 0, nil, nil, storage.AOptions{Metadata: m6})
require.NoError(t, err)
require.NoError(t, app.Commit())
app = hb.AppenderV2(ctx)
ts++
_, err = app.Append(0, s2, 0, ts, 0, nil, nil, storage.AOptions{Metadata: m6})
require.NoError(t, err)
require.NoError(t, app.Commit())
// Let's create a checkpoint.
first, last, err := wlog.Segments(w.Dir())
require.NoError(t, err)
keep := func(id chunks.HeadSeriesRef) bool {
return id != 3
// Let's create a checkpoint.
first, last, err := wlog.Segments(w.Dir())
require.NoError(t, err)
keep := func(id chunks.HeadSeriesRef) bool {
return id != 3
}
_, err = wlog.Checkpoint(promslog.NewNopLogger(), w, first, last-1, keep, 0, enableStStorage)
require.NoError(t, err)
// Confirm there's been a checkpoint.
cdir, _, err := wlog.LastCheckpoint(w.Dir())
require.NoError(t, err)
// Read in checkpoint and WAL.
recs := readTestWAL(t, cdir)
var gotMetadataBlocks [][]record.RefMetadata
for _, rec := range recs {
if mr, ok := rec.([]record.RefMetadata); ok {
gotMetadataBlocks = append(gotMetadataBlocks, mr)
}
}
// There should only be 1 metadata block present, with only the latest
// metadata kept around.
wantMetadata := []record.RefMetadata{
{Ref: 1, Type: record.GetMetricType(m5.Type), Unit: m5.Unit, Help: m5.Help},
{Ref: 2, Type: record.GetMetricType(m6.Type), Unit: m6.Unit, Help: m6.Help},
{Ref: 4, Type: record.GetMetricType(m4.Type), Unit: m4.Unit, Help: m4.Help},
}
require.Len(t, gotMetadataBlocks, 1)
require.Len(t, gotMetadataBlocks[0], 3)
gotMetadataBlock := gotMetadataBlocks[0]
sort.Slice(gotMetadataBlock, func(i, j int) bool { return gotMetadataBlock[i].Ref < gotMetadataBlock[j].Ref })
require.Equal(t, wantMetadata, gotMetadataBlock)
require.NoError(t, hb.Close())
})
}
_, err = wlog.Checkpoint(promslog.NewNopLogger(), w, first, last-1, keep, 0)
require.NoError(t, err)
// Confirm there's been a checkpoint.
cdir, _, err := wlog.LastCheckpoint(w.Dir())
require.NoError(t, err)
// Read in checkpoint and WAL.
recs := readTestWAL(t, cdir)
var gotMetadataBlocks [][]record.RefMetadata
for _, rec := range recs {
if mr, ok := rec.([]record.RefMetadata); ok {
gotMetadataBlocks = append(gotMetadataBlocks, mr)
}
}
// There should only be 1 metadata block present, with only the latest
// metadata kept around.
wantMetadata := []record.RefMetadata{
{Ref: 1, Type: record.GetMetricType(m5.Type), Unit: m5.Unit, Help: m5.Help},
{Ref: 2, Type: record.GetMetricType(m6.Type), Unit: m6.Unit, Help: m6.Help},
{Ref: 4, Type: record.GetMetricType(m4.Type), Unit: m4.Unit, Help: m4.Help},
}
require.Len(t, gotMetadataBlocks, 1)
require.Len(t, gotMetadataBlocks[0], 3)
gotMetadataBlock := gotMetadataBlocks[0]
sort.Slice(gotMetadataBlock, func(i, j int) bool { return gotMetadataBlock[i].Ref < gotMetadataBlock[j].Ref })
require.Equal(t, wantMetadata, gotMetadataBlock)
require.NoError(t, hb.Close())
}
func TestMetadataAssertInMemoryData_AppendV2(t *testing.T) {

View file

@ -394,7 +394,7 @@ func TestDataNotAvailableAfterRollback(t *testing.T) {
require.NoError(t, err)
walSeriesCount += len(series)
case record.Samples:
case record.Samples, record.SamplesV2:
var samples []record.RefSample
samples, err = dec.Samples(rec, samples)
require.NoError(t, err)
@ -1169,24 +1169,25 @@ func TestWALReplayRaceOnSamplesLoggedBeforeSeries(t *testing.T) {
// We test both with few and many samples appended after series creation. If samples are < 120 then there's no
// mmap-ed chunk, otherwise there's at least 1 mmap-ed chunk when replaying the WAL.
for _, numSamplesAfterSeriesCreation := range []int{1, 1000} {
for run := 1; run <= numRuns; run++ {
t.Run(fmt.Sprintf("samples after series creation = %d, run = %d", numSamplesAfterSeriesCreation, run), func(t *testing.T) {
testWALReplayRaceOnSamplesLoggedBeforeSeries(t, numSamplesBeforeSeriesCreation, numSamplesAfterSeriesCreation)
})
for _, enableStStorage := range []bool{false, true} {
for _, numSamplesAfterSeriesCreation := range []int{1, 1000} {
for run := 1; run <= numRuns; run++ {
t.Run(fmt.Sprintf("samples after series creation = %d, run = %d, stStorage=%v", numSamplesAfterSeriesCreation, run, enableStStorage), func(t *testing.T) {
testWALReplayRaceOnSamplesLoggedBeforeSeries(t, numSamplesBeforeSeriesCreation, numSamplesAfterSeriesCreation, enableStStorage)
})
}
}
}
}
func testWALReplayRaceOnSamplesLoggedBeforeSeries(t *testing.T, numSamplesBeforeSeriesCreation, numSamplesAfterSeriesCreation int) {
func testWALReplayRaceOnSamplesLoggedBeforeSeries(t *testing.T, numSamplesBeforeSeriesCreation, numSamplesAfterSeriesCreation int, enableStStorage bool) {
const numSeries = 1000
db := newTestDB(t)
db.DisableCompactions()
for seriesRef := 1; seriesRef <= numSeries; seriesRef++ {
// Log samples before the series is logged to the WAL.
var enc record.Encoder
enc := record.Encoder{EnableSTStorage: enableStStorage}
var samples []record.RefSample
for ts := range numSamplesBeforeSeriesCreation {
@ -1550,139 +1551,143 @@ func TestRetentionDurationMetric(t *testing.T) {
func TestSizeRetention(t *testing.T) {
t.Parallel()
opts := DefaultOptions()
opts.OutOfOrderTimeWindow = 100
db := newTestDB(t, withOpts(opts), withRngs(100))
for _, enableStStorage := range []bool{false, true} {
t.Run("enableStStorage="+strconv.FormatBool(enableStStorage), func(t *testing.T) {
opts := DefaultOptions()
opts.OutOfOrderTimeWindow = 100
db := newTestDB(t, withOpts(opts), withRngs(100))
blocks := []*BlockMeta{
{MinTime: 100, MaxTime: 200}, // Oldest block
{MinTime: 200, MaxTime: 300},
{MinTime: 300, MaxTime: 400},
{MinTime: 400, MaxTime: 500},
{MinTime: 500, MaxTime: 600}, // Newest Block
}
blocks := []*BlockMeta{
{MinTime: 100, MaxTime: 200}, // Oldest block
{MinTime: 200, MaxTime: 300},
{MinTime: 300, MaxTime: 400},
{MinTime: 400, MaxTime: 500},
{MinTime: 500, MaxTime: 600}, // Newest Block
}
for _, m := range blocks {
createBlock(t, db.Dir(), genSeries(100, 10, m.MinTime, m.MaxTime))
}
for _, m := range blocks {
createBlock(t, db.Dir(), genSeries(100, 10, m.MinTime, m.MaxTime))
}
headBlocks := []*BlockMeta{
{MinTime: 700, MaxTime: 800},
}
headBlocks := []*BlockMeta{
{MinTime: 700, MaxTime: 800},
}
// Add some data to the WAL.
headApp := db.Head().Appender(context.Background())
var aSeries labels.Labels
var it chunkenc.Iterator
for _, m := range headBlocks {
series := genSeries(100, 10, m.MinTime, m.MaxTime+1)
for _, s := range series {
aSeries = s.Labels()
it = s.Iterator(it)
for it.Next() == chunkenc.ValFloat {
tim, v := it.At()
_, err := headApp.Append(0, s.Labels(), tim, v)
// Add some data to the WAL.
headApp := db.Head().Appender(context.Background())
var aSeries labels.Labels
var it chunkenc.Iterator
for _, m := range headBlocks {
series := genSeries(100, 10, m.MinTime, m.MaxTime+1)
for _, s := range series {
aSeries = s.Labels()
it = s.Iterator(it)
for it.Next() == chunkenc.ValFloat {
tim, v := it.At()
_, err := headApp.Append(0, s.Labels(), tim, v)
require.NoError(t, err)
}
require.NoError(t, it.Err())
}
}
require.NoError(t, headApp.Commit())
db.Head().mmapHeadChunks()
require.Eventually(t, func() bool {
return db.Head().chunkDiskMapper.IsQueueEmpty()
}, 2*time.Second, 100*time.Millisecond)
// Test that registered size matches the actual disk size.
require.NoError(t, db.reloadBlocks()) // Reload the db to register the new db size.
require.Len(t, db.Blocks(), len(blocks)) // Ensure all blocks are registered.
blockSize := int64(prom_testutil.ToFloat64(db.metrics.blocksBytes)) // Use the actual internal metrics.
walSize, err := db.Head().wal.Size()
require.NoError(t, err)
cdmSize, err := db.Head().chunkDiskMapper.Size()
require.NoError(t, err)
require.NotZero(t, cdmSize)
// Expected size should take into account block size + WAL size + Head
// chunks size
expSize := blockSize + walSize + cdmSize
actSize, err := fileutil.DirSize(db.Dir())
require.NoError(t, err)
require.Equal(t, expSize, actSize, "registered size doesn't match actual disk size")
// Create a WAL checkpoint, and compare sizes.
first, last, err := wlog.Segments(db.Head().wal.Dir())
require.NoError(t, err)
_, err = wlog.Checkpoint(promslog.NewNopLogger(), db.Head().wal, first, last-1, func(chunks.HeadSeriesRef) bool { return false }, 0, enableStStorage)
require.NoError(t, err)
blockSize = int64(prom_testutil.ToFloat64(db.metrics.blocksBytes)) // Use the actual internal metrics.
walSize, err = db.Head().wal.Size()
require.NoError(t, err)
cdmSize, err = db.Head().chunkDiskMapper.Size()
require.NoError(t, err)
require.NotZero(t, cdmSize)
expSize = blockSize + walSize + cdmSize
actSize, err = fileutil.DirSize(db.Dir())
require.NoError(t, err)
require.Equal(t, expSize, actSize, "registered size doesn't match actual disk size")
// Truncate Chunk Disk Mapper and compare sizes.
require.NoError(t, db.Head().chunkDiskMapper.Truncate(900))
cdmSize, err = db.Head().chunkDiskMapper.Size()
require.NoError(t, err)
require.NotZero(t, cdmSize)
expSize = blockSize + walSize + cdmSize
actSize, err = fileutil.DirSize(db.Dir())
require.NoError(t, err)
require.Equal(t, expSize, actSize, "registered size doesn't match actual disk size")
// Add some out of order samples to check the size of WBL.
headApp = db.Head().Appender(context.Background())
for ts := int64(750); ts < 800; ts++ {
_, err := headApp.Append(0, aSeries, ts, float64(ts))
require.NoError(t, err)
}
require.NoError(t, it.Err())
}
require.NoError(t, headApp.Commit())
walSize, err = db.Head().wal.Size()
require.NoError(t, err)
wblSize, err := db.Head().wbl.Size()
require.NoError(t, err)
require.NotZero(t, wblSize)
cdmSize, err = db.Head().chunkDiskMapper.Size()
require.NoError(t, err)
expSize = blockSize + walSize + wblSize + cdmSize
actSize, err = fileutil.DirSize(db.Dir())
require.NoError(t, err)
require.Equal(t, expSize, actSize, "registered size doesn't match actual disk size")
// Decrease the max bytes limit so that a delete is triggered.
// Check total size, total count and check that the oldest block was deleted.
firstBlockSize := db.Blocks()[0].Size()
sizeLimit := actSize - firstBlockSize
db.opts.MaxBytes = sizeLimit // Set the new db size limit one block smaller that the actual size.
require.NoError(t, db.reloadBlocks()) // Reload the db to register the new db size.
expBlocks := blocks[1:]
actBlocks := db.Blocks()
blockSize = int64(prom_testutil.ToFloat64(db.metrics.blocksBytes))
walSize, err = db.Head().wal.Size()
require.NoError(t, err)
cdmSize, err = db.Head().chunkDiskMapper.Size()
require.NoError(t, err)
require.NotZero(t, cdmSize)
// Expected size should take into account block size + WAL size + WBL size
expSize = blockSize + walSize + wblSize + cdmSize
actRetentionCount := int(prom_testutil.ToFloat64(db.metrics.sizeRetentionCount))
actSize, err = fileutil.DirSize(db.Dir())
require.NoError(t, err)
require.Equal(t, 1, actRetentionCount, "metric retention count mismatch")
require.Equal(t, expSize, actSize, "metric db size doesn't match actual disk size")
require.LessOrEqual(t, expSize, sizeLimit, "actual size (%v) is expected to be less than or equal to limit (%v)", expSize, sizeLimit)
require.Len(t, actBlocks, len(blocks)-1, "new block count should be decreased from:%v to:%v", len(blocks), len(blocks)-1)
require.Equal(t, expBlocks[0].MaxTime, actBlocks[0].meta.MaxTime, "maxT mismatch of the first block")
require.Equal(t, expBlocks[len(expBlocks)-1].MaxTime, actBlocks[len(actBlocks)-1].meta.MaxTime, "maxT mismatch of the last block")
})
}
require.NoError(t, headApp.Commit())
db.Head().mmapHeadChunks()
require.Eventually(t, func() bool {
return db.Head().chunkDiskMapper.IsQueueEmpty()
}, 2*time.Second, 100*time.Millisecond)
// Test that registered size matches the actual disk size.
require.NoError(t, db.reloadBlocks()) // Reload the db to register the new db size.
require.Len(t, db.Blocks(), len(blocks)) // Ensure all blocks are registered.
blockSize := int64(prom_testutil.ToFloat64(db.metrics.blocksBytes)) // Use the actual internal metrics.
walSize, err := db.Head().wal.Size()
require.NoError(t, err)
cdmSize, err := db.Head().chunkDiskMapper.Size()
require.NoError(t, err)
require.NotZero(t, cdmSize)
// Expected size should take into account block size + WAL size + Head
// chunks size
expSize := blockSize + walSize + cdmSize
actSize, err := fileutil.DirSize(db.Dir())
require.NoError(t, err)
require.Equal(t, expSize, actSize, "registered size doesn't match actual disk size")
// Create a WAL checkpoint, and compare sizes.
first, last, err := wlog.Segments(db.Head().wal.Dir())
require.NoError(t, err)
_, err = wlog.Checkpoint(promslog.NewNopLogger(), db.Head().wal, first, last-1, func(chunks.HeadSeriesRef) bool { return false }, 0)
require.NoError(t, err)
blockSize = int64(prom_testutil.ToFloat64(db.metrics.blocksBytes)) // Use the actual internal metrics.
walSize, err = db.Head().wal.Size()
require.NoError(t, err)
cdmSize, err = db.Head().chunkDiskMapper.Size()
require.NoError(t, err)
require.NotZero(t, cdmSize)
expSize = blockSize + walSize + cdmSize
actSize, err = fileutil.DirSize(db.Dir())
require.NoError(t, err)
require.Equal(t, expSize, actSize, "registered size doesn't match actual disk size")
// Truncate Chunk Disk Mapper and compare sizes.
require.NoError(t, db.Head().chunkDiskMapper.Truncate(900))
cdmSize, err = db.Head().chunkDiskMapper.Size()
require.NoError(t, err)
require.NotZero(t, cdmSize)
expSize = blockSize + walSize + cdmSize
actSize, err = fileutil.DirSize(db.Dir())
require.NoError(t, err)
require.Equal(t, expSize, actSize, "registered size doesn't match actual disk size")
// Add some out of order samples to check the size of WBL.
headApp = db.Head().Appender(context.Background())
for ts := int64(750); ts < 800; ts++ {
_, err := headApp.Append(0, aSeries, ts, float64(ts))
require.NoError(t, err)
}
require.NoError(t, headApp.Commit())
walSize, err = db.Head().wal.Size()
require.NoError(t, err)
wblSize, err := db.Head().wbl.Size()
require.NoError(t, err)
require.NotZero(t, wblSize)
cdmSize, err = db.Head().chunkDiskMapper.Size()
require.NoError(t, err)
expSize = blockSize + walSize + wblSize + cdmSize
actSize, err = fileutil.DirSize(db.Dir())
require.NoError(t, err)
require.Equal(t, expSize, actSize, "registered size doesn't match actual disk size")
// Decrease the max bytes limit so that a delete is triggered.
// Check total size, total count and check that the oldest block was deleted.
firstBlockSize := db.Blocks()[0].Size()
sizeLimit := actSize - firstBlockSize
db.opts.MaxBytes = sizeLimit // Set the new db size limit one block smaller that the actual size.
require.NoError(t, db.reloadBlocks()) // Reload the db to register the new db size.
expBlocks := blocks[1:]
actBlocks := db.Blocks()
blockSize = int64(prom_testutil.ToFloat64(db.metrics.blocksBytes))
walSize, err = db.Head().wal.Size()
require.NoError(t, err)
cdmSize, err = db.Head().chunkDiskMapper.Size()
require.NoError(t, err)
require.NotZero(t, cdmSize)
// Expected size should take into account block size + WAL size + WBL size
expSize = blockSize + walSize + wblSize + cdmSize
actRetentionCount := int(prom_testutil.ToFloat64(db.metrics.sizeRetentionCount))
actSize, err = fileutil.DirSize(db.Dir())
require.NoError(t, err)
require.Equal(t, 1, actRetentionCount, "metric retention count mismatch")
require.Equal(t, expSize, actSize, "metric db size doesn't match actual disk size")
require.LessOrEqual(t, expSize, sizeLimit, "actual size (%v) is expected to be less than or equal to limit (%v)", expSize, sizeLimit)
require.Len(t, actBlocks, len(blocks)-1, "new block count should be decreased from:%v to:%v", len(blocks), len(blocks)-1)
require.Equal(t, expBlocks[0].MaxTime, actBlocks[0].meta.MaxTime, "maxT mismatch of the first block")
require.Equal(t, expBlocks[len(expBlocks)-1].MaxTime, actBlocks[len(actBlocks)-1].meta.MaxTime, "maxT mismatch of the last block")
}
func TestSizeRetentionMetric(t *testing.T) {
@ -2071,33 +2076,36 @@ func TestInitializeHeadTimestamp(t *testing.T) {
require.Equal(t, int64(1000), db.head.MaxTime())
require.True(t, db.head.initialized())
})
t.Run("wal-only", func(t *testing.T) {
dir := t.TempDir()
require.NoError(t, os.MkdirAll(path.Join(dir, "wal"), 0o777))
w, err := wlog.New(nil, nil, path.Join(dir, "wal"), compression.None)
require.NoError(t, err)
for _, enableStStorage := range []bool{false, true} {
t.Run("wal-only-st-"+strconv.FormatBool(enableStStorage), func(t *testing.T) {
dir := t.TempDir()
var enc record.Encoder
err = w.Log(
enc.Series([]record.RefSeries{
{Ref: 123, Labels: labels.FromStrings("a", "1")},
{Ref: 124, Labels: labels.FromStrings("a", "2")},
}, nil),
enc.Samples([]record.RefSample{
{Ref: 123, T: 5000, V: 1},
{Ref: 124, T: 15000, V: 1},
}, nil),
)
require.NoError(t, err)
require.NoError(t, w.Close())
require.NoError(t, os.MkdirAll(path.Join(dir, "wal"), 0o777))
w, err := wlog.New(nil, nil, path.Join(dir, "wal"), compression.None)
require.NoError(t, err)
db := newTestDB(t, withDir(dir))
enc := record.Encoder{EnableSTStorage: enableStStorage}
err = w.Log(
enc.Series([]record.RefSeries{
{Ref: 123, Labels: labels.FromStrings("a", "1")},
{Ref: 124, Labels: labels.FromStrings("a", "2")},
}, nil),
enc.Samples([]record.RefSample{
{Ref: 123, T: 5000, V: 1},
{Ref: 124, T: 15000, V: 1},
}, nil),
)
require.NoError(t, err)
require.NoError(t, w.Close())
require.Equal(t, int64(5000), db.head.MinTime())
require.Equal(t, int64(15000), db.head.MaxTime())
require.True(t, db.head.initialized())
})
db := newTestDB(t, withDir(dir))
require.Equal(t, int64(5000), db.head.MinTime())
require.Equal(t, int64(15000), db.head.MaxTime())
require.True(t, db.head.initialized())
})
}
t.Run("existing-block", func(t *testing.T) {
dir := t.TempDir()
@ -2109,37 +2117,40 @@ func TestInitializeHeadTimestamp(t *testing.T) {
require.Equal(t, int64(2000), db.head.MaxTime())
require.True(t, db.head.initialized())
})
t.Run("existing-block-and-wal", func(t *testing.T) {
dir := t.TempDir()
createBlock(t, dir, genSeries(1, 1, 1000, 6000))
for _, enableStStorage := range []bool{false, true} {
t.Run("existing-block-and-wal-"+strconv.FormatBool(enableStStorage), func(t *testing.T) {
dir := t.TempDir()
require.NoError(t, os.MkdirAll(path.Join(dir, "wal"), 0o777))
w, err := wlog.New(nil, nil, path.Join(dir, "wal"), compression.None)
require.NoError(t, err)
createBlock(t, dir, genSeries(1, 1, 1000, 6000))
var enc record.Encoder
err = w.Log(
enc.Series([]record.RefSeries{
{Ref: 123, Labels: labels.FromStrings("a", "1")},
{Ref: 124, Labels: labels.FromStrings("a", "2")},
}, nil),
enc.Samples([]record.RefSample{
{Ref: 123, T: 5000, V: 1},
{Ref: 124, T: 15000, V: 1},
}, nil),
)
require.NoError(t, err)
require.NoError(t, w.Close())
require.NoError(t, os.MkdirAll(path.Join(dir, "wal"), 0o777))
w, err := wlog.New(nil, nil, path.Join(dir, "wal"), compression.None)
require.NoError(t, err)
db := newTestDB(t, withDir(dir))
enc := record.Encoder{EnableSTStorage: enableStStorage}
err = w.Log(
enc.Series([]record.RefSeries{
{Ref: 123, Labels: labels.FromStrings("a", "1")},
{Ref: 124, Labels: labels.FromStrings("a", "2")},
}, nil),
enc.Samples([]record.RefSample{
{Ref: 123, T: 5000, V: 1},
{Ref: 124, T: 15000, V: 1},
}, nil),
)
require.NoError(t, err)
require.NoError(t, w.Close())
require.Equal(t, int64(6000), db.head.MinTime())
require.Equal(t, int64(15000), db.head.MaxTime())
require.True(t, db.head.initialized())
// Check that old series has been GCed.
require.Equal(t, 1.0, prom_testutil.ToFloat64(db.head.metrics.series))
})
db := newTestDB(t, withDir(dir))
require.Equal(t, int64(6000), db.head.MinTime())
require.Equal(t, int64(15000), db.head.MaxTime())
require.True(t, db.head.initialized())
// Check that old series has been GCed.
require.Equal(t, 1.0, prom_testutil.ToFloat64(db.head.metrics.series))
})
}
}
func TestNoEmptyBlocks(t *testing.T) {
@ -4530,7 +4541,7 @@ func testOOOWALWrite(t *testing.T,
series, err := dec.Series(rec, nil)
require.NoError(t, err)
records = append(records, series)
case record.Samples:
case record.Samples, record.SamplesV2:
samples, err := dec.Samples(rec, nil)
require.NoError(t, err)
records = append(records, samples)
@ -4691,102 +4702,106 @@ func TestMetadataCheckpointingOnlyKeepsLatestEntry(t *testing.T) {
require.NoError(t, err)
}
ctx := context.Background()
numSamples := 10000
hb, w := newTestHead(t, int64(numSamples)*10, compression.None, false)
for _, enableStStorage := range []bool{false, true} {
t.Run("enableStStorage="+strconv.FormatBool(enableStStorage), func(t *testing.T) {
ctx := context.Background()
numSamples := 10000
hb, w := newTestHead(t, int64(numSamples)*10, compression.None, false)
// Add some series so we can append metadata to them.
app := hb.Appender(ctx)
s1 := labels.FromStrings("a", "b")
s2 := labels.FromStrings("c", "d")
s3 := labels.FromStrings("e", "f")
s4 := labels.FromStrings("g", "h")
// Add some series so we can append metadata to them.
app := hb.Appender(ctx)
s1 := labels.FromStrings("a", "b")
s2 := labels.FromStrings("c", "d")
s3 := labels.FromStrings("e", "f")
s4 := labels.FromStrings("g", "h")
for _, s := range []labels.Labels{s1, s2, s3, s4} {
_, err := app.Append(0, s, 0, 0)
require.NoError(t, err)
for _, s := range []labels.Labels{s1, s2, s3, s4} {
_, err := app.Append(0, s, 0, 0)
require.NoError(t, err)
}
require.NoError(t, app.Commit())
// Add a first round of metadata to the first three series.
// Re-take the Appender, as the previous Commit will have it closed.
m1 := metadata.Metadata{Type: "gauge", Unit: "unit_1", Help: "help_1"}
m2 := metadata.Metadata{Type: "gauge", Unit: "unit_2", Help: "help_2"}
m3 := metadata.Metadata{Type: "gauge", Unit: "unit_3", Help: "help_3"}
m4 := metadata.Metadata{Type: "gauge", Unit: "unit_4", Help: "help_4"}
app = hb.Appender(ctx)
updateMetadata(t, app, s1, m1)
updateMetadata(t, app, s2, m2)
updateMetadata(t, app, s3, m3)
updateMetadata(t, app, s4, m4)
require.NoError(t, app.Commit())
// Update metadata for first series.
m5 := metadata.Metadata{Type: "counter", Unit: "unit_5", Help: "help_5"}
app = hb.Appender(ctx)
updateMetadata(t, app, s1, m5)
require.NoError(t, app.Commit())
// Switch back-and-forth metadata for second series.
// Since it ended on a new metadata record, we expect a single new entry.
m6 := metadata.Metadata{Type: "counter", Unit: "unit_6", Help: "help_6"}
app = hb.Appender(ctx)
updateMetadata(t, app, s2, m6)
require.NoError(t, app.Commit())
app = hb.Appender(ctx)
updateMetadata(t, app, s2, m2)
require.NoError(t, app.Commit())
app = hb.Appender(ctx)
updateMetadata(t, app, s2, m6)
require.NoError(t, app.Commit())
app = hb.Appender(ctx)
updateMetadata(t, app, s2, m2)
require.NoError(t, app.Commit())
app = hb.Appender(ctx)
updateMetadata(t, app, s2, m6)
require.NoError(t, app.Commit())
// Let's create a checkpoint.
first, last, err := wlog.Segments(w.Dir())
require.NoError(t, err)
keep := func(id chunks.HeadSeriesRef) bool {
return id != 3
}
_, err = wlog.Checkpoint(promslog.NewNopLogger(), w, first, last-1, keep, 0, enableStStorage)
require.NoError(t, err)
// Confirm there's been a checkpoint.
cdir, _, err := wlog.LastCheckpoint(w.Dir())
require.NoError(t, err)
// Read in checkpoint and WAL.
recs := readTestWAL(t, cdir)
var gotMetadataBlocks [][]record.RefMetadata
for _, rec := range recs {
if mr, ok := rec.([]record.RefMetadata); ok {
gotMetadataBlocks = append(gotMetadataBlocks, mr)
}
}
// There should only be 1 metadata block present, with only the latest
// metadata kept around.
wantMetadata := []record.RefMetadata{
{Ref: 1, Type: record.GetMetricType(m5.Type), Unit: m5.Unit, Help: m5.Help},
{Ref: 2, Type: record.GetMetricType(m6.Type), Unit: m6.Unit, Help: m6.Help},
{Ref: 4, Type: record.GetMetricType(m4.Type), Unit: m4.Unit, Help: m4.Help},
}
require.Len(t, gotMetadataBlocks, 1)
require.Len(t, gotMetadataBlocks[0], 3)
gotMetadataBlock := gotMetadataBlocks[0]
sort.Slice(gotMetadataBlock, func(i, j int) bool { return gotMetadataBlock[i].Ref < gotMetadataBlock[j].Ref })
require.Equal(t, wantMetadata, gotMetadataBlock)
require.NoError(t, hb.Close())
})
}
require.NoError(t, app.Commit())
// Add a first round of metadata to the first three series.
// Re-take the Appender, as the previous Commit will have it closed.
m1 := metadata.Metadata{Type: "gauge", Unit: "unit_1", Help: "help_1"}
m2 := metadata.Metadata{Type: "gauge", Unit: "unit_2", Help: "help_2"}
m3 := metadata.Metadata{Type: "gauge", Unit: "unit_3", Help: "help_3"}
m4 := metadata.Metadata{Type: "gauge", Unit: "unit_4", Help: "help_4"}
app = hb.Appender(ctx)
updateMetadata(t, app, s1, m1)
updateMetadata(t, app, s2, m2)
updateMetadata(t, app, s3, m3)
updateMetadata(t, app, s4, m4)
require.NoError(t, app.Commit())
// Update metadata for first series.
m5 := metadata.Metadata{Type: "counter", Unit: "unit_5", Help: "help_5"}
app = hb.Appender(ctx)
updateMetadata(t, app, s1, m5)
require.NoError(t, app.Commit())
// Switch back-and-forth metadata for second series.
// Since it ended on a new metadata record, we expect a single new entry.
m6 := metadata.Metadata{Type: "counter", Unit: "unit_6", Help: "help_6"}
app = hb.Appender(ctx)
updateMetadata(t, app, s2, m6)
require.NoError(t, app.Commit())
app = hb.Appender(ctx)
updateMetadata(t, app, s2, m2)
require.NoError(t, app.Commit())
app = hb.Appender(ctx)
updateMetadata(t, app, s2, m6)
require.NoError(t, app.Commit())
app = hb.Appender(ctx)
updateMetadata(t, app, s2, m2)
require.NoError(t, app.Commit())
app = hb.Appender(ctx)
updateMetadata(t, app, s2, m6)
require.NoError(t, app.Commit())
// Let's create a checkpoint.
first, last, err := wlog.Segments(w.Dir())
require.NoError(t, err)
keep := func(id chunks.HeadSeriesRef) bool {
return id != 3
}
_, err = wlog.Checkpoint(promslog.NewNopLogger(), w, first, last-1, keep, 0)
require.NoError(t, err)
// Confirm there's been a checkpoint.
cdir, _, err := wlog.LastCheckpoint(w.Dir())
require.NoError(t, err)
// Read in checkpoint and WAL.
recs := readTestWAL(t, cdir)
var gotMetadataBlocks [][]record.RefMetadata
for _, rec := range recs {
if mr, ok := rec.([]record.RefMetadata); ok {
gotMetadataBlocks = append(gotMetadataBlocks, mr)
}
}
// There should only be 1 metadata block present, with only the latest
// metadata kept around.
wantMetadata := []record.RefMetadata{
{Ref: 1, Type: record.GetMetricType(m5.Type), Unit: m5.Unit, Help: m5.Help},
{Ref: 2, Type: record.GetMetricType(m6.Type), Unit: m6.Unit, Help: m6.Help},
{Ref: 4, Type: record.GetMetricType(m4.Type), Unit: m4.Unit, Help: m4.Help},
}
require.Len(t, gotMetadataBlocks, 1)
require.Len(t, gotMetadataBlocks[0], 3)
gotMetadataBlock := gotMetadataBlocks[0]
sort.Slice(gotMetadataBlock, func(i, j int) bool { return gotMetadataBlock[i].Ref < gotMetadataBlock[j].Ref })
require.Equal(t, wantMetadata, gotMetadataBlock)
require.NoError(t, hb.Close())
}
func TestMetadataAssertInMemoryData(t *testing.T) {

View file

@ -201,6 +201,11 @@ type HeadOptions struct {
// NOTE(bwplotka): This feature might be deprecated and removed once PROM-60
// is implemented.
EnableMetadataWALRecords bool
// EnableSTStorage determines whether databases (WAL/WBL, tsdb,
// agent) should set a Start Time value per sample. Currently not
// user-settable and only set in tests.
EnableSTStorage bool
}
const (
@ -1382,7 +1387,7 @@ func (h *Head) truncateWAL(mint int64) error {
}
h.metrics.checkpointCreationTotal.Inc()
if _, err = wlog.Checkpoint(h.logger, h.wal, first, last, h.keepSeriesInWALCheckpointFn(mint), mint); err != nil {
if _, err = wlog.Checkpoint(h.logger, h.wal, first, last, h.keepSeriesInWALCheckpointFn(mint), mint, h.opts.EnableSTStorage); err != nil {
h.metrics.checkpointCreationFail.Inc()
var cerr *chunks.CorruptionErr
if errors.As(err, &cerr) {
@ -1676,7 +1681,7 @@ func (h *Head) Delete(ctx context.Context, mint, maxt int64, ms ...*labels.Match
}
if h.wal != nil {
var enc record.Encoder
enc := record.Encoder{EnableSTStorage: h.opts.EnableSTStorage}
if err := h.wal.Log(enc.Tombstones(stones, nil)); err != nil {
return err
}

View file

@ -1059,7 +1059,7 @@ func (a *headAppenderBase) log() error {
defer func() { a.head.putBytesBuffer(buf) }()
var rec []byte
var enc record.Encoder
enc := record.Encoder{EnableSTStorage: a.head.opts.EnableSTStorage}
if len(a.seriesRefs) > 0 {
rec = enc.Series(a.seriesRefs, buf)
@ -1742,6 +1742,9 @@ func (a *headAppenderBase) Commit() (err error) {
chunkRange: h.chunkRange.Load(),
samplesPerChunk: h.opts.SamplesPerChunk,
},
enc: record.Encoder{
EnableSTStorage: h.opts.EnableSTStorage,
},
}
for _, b := range a.batches {

View file

@ -1867,296 +1867,300 @@ func TestHistogramInWALAndMmapChunk_AppenderV2(t *testing.T) {
}
func TestChunkSnapshot_AppenderV2(t *testing.T) {
head, _ := newTestHead(t, 120*4, compression.None, false)
defer func() {
head.opts.EnableMemorySnapshotOnShutdown = false
require.NoError(t, head.Close())
}()
for _, enableStStorage := range []bool{false, true} {
t.Run("enableStStorage="+strconv.FormatBool(enableStStorage), func(t *testing.T) {
head, _ := newTestHead(t, 120*4, compression.None, false)
defer func() {
head.opts.EnableMemorySnapshotOnShutdown = false
require.NoError(t, head.Close())
}()
type ex struct {
seriesLabels labels.Labels
e exemplar.Exemplar
}
numSeries := 10
expSeries := make(map[string][]chunks.Sample)
expHist := make(map[string][]chunks.Sample)
expFloatHist := make(map[string][]chunks.Sample)
expTombstones := make(map[storage.SeriesRef]tombstones.Intervals)
expExemplars := make([]ex, 0)
histograms := tsdbutil.GenerateTestGaugeHistograms(481)
floatHistogram := tsdbutil.GenerateTestGaugeFloatHistograms(481)
newExemplar := func(lbls labels.Labels, ts int64) exemplar.Exemplar {
e := ex{
seriesLabels: lbls,
e: exemplar.Exemplar{
Labels: labels.FromStrings("trace_id", strconv.Itoa(rand.Int())),
Value: rand.Float64(),
Ts: ts,
},
}
expExemplars = append(expExemplars, e)
return e.e
}
checkSamples := func() {
q, err := NewBlockQuerier(head, math.MinInt64, math.MaxInt64)
require.NoError(t, err)
series := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*"))
require.Equal(t, expSeries, series)
}
checkHistograms := func() {
q, err := NewBlockQuerier(head, math.MinInt64, math.MaxInt64)
require.NoError(t, err)
series := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "hist", "baz.*"))
require.Equal(t, expHist, series)
}
checkFloatHistograms := func() {
q, err := NewBlockQuerier(head, math.MinInt64, math.MaxInt64)
require.NoError(t, err)
series := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "floathist", "bat.*"))
require.Equal(t, expFloatHist, series)
}
checkTombstones := func() {
tr, err := head.Tombstones()
require.NoError(t, err)
actTombstones := make(map[storage.SeriesRef]tombstones.Intervals)
require.NoError(t, tr.Iter(func(ref storage.SeriesRef, itvs tombstones.Intervals) error {
for _, itv := range itvs {
actTombstones[ref].Add(itv)
type ex struct {
seriesLabels labels.Labels
e exemplar.Exemplar
}
numSeries := 10
expSeries := make(map[string][]chunks.Sample)
expHist := make(map[string][]chunks.Sample)
expFloatHist := make(map[string][]chunks.Sample)
expTombstones := make(map[storage.SeriesRef]tombstones.Intervals)
expExemplars := make([]ex, 0)
histograms := tsdbutil.GenerateTestGaugeHistograms(481)
floatHistogram := tsdbutil.GenerateTestGaugeFloatHistograms(481)
newExemplar := func(lbls labels.Labels, ts int64) exemplar.Exemplar {
e := ex{
seriesLabels: lbls,
e: exemplar.Exemplar{
Labels: labels.FromStrings("trace_id", strconv.Itoa(rand.Int())),
Value: rand.Float64(),
Ts: ts,
},
}
expExemplars = append(expExemplars, e)
return e.e
}
checkSamples := func() {
q, err := NewBlockQuerier(head, math.MinInt64, math.MaxInt64)
require.NoError(t, err)
series := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*"))
require.Equal(t, expSeries, series)
}
checkHistograms := func() {
q, err := NewBlockQuerier(head, math.MinInt64, math.MaxInt64)
require.NoError(t, err)
series := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "hist", "baz.*"))
require.Equal(t, expHist, series)
}
checkFloatHistograms := func() {
q, err := NewBlockQuerier(head, math.MinInt64, math.MaxInt64)
require.NoError(t, err)
series := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "floathist", "bat.*"))
require.Equal(t, expFloatHist, series)
}
checkTombstones := func() {
tr, err := head.Tombstones()
require.NoError(t, err)
actTombstones := make(map[storage.SeriesRef]tombstones.Intervals)
require.NoError(t, tr.Iter(func(ref storage.SeriesRef, itvs tombstones.Intervals) error {
for _, itv := range itvs {
actTombstones[ref].Add(itv)
}
return nil
}))
require.Equal(t, expTombstones, actTombstones)
}
checkExemplars := func() {
actExemplars := make([]ex, 0, len(expExemplars))
err := head.exemplars.IterateExemplars(func(seriesLabels labels.Labels, e exemplar.Exemplar) error {
actExemplars = append(actExemplars, ex{
seriesLabels: seriesLabels,
e: e,
})
return nil
})
require.NoError(t, err)
// Verifies both existence of right exemplars and order of exemplars in the buffer.
testutil.RequireEqualWithOptions(t, expExemplars, actExemplars, []cmp.Option{cmp.AllowUnexported(ex{})})
}
var (
wlast, woffset int
err error
)
closeHeadAndCheckSnapshot := func() {
require.NoError(t, head.Close())
_, sidx, soffset, err := LastChunkSnapshot(head.opts.ChunkDirRoot)
require.NoError(t, err)
require.Equal(t, wlast, sidx)
require.Equal(t, woffset, soffset)
}
openHeadAndCheckReplay := func() {
w, err := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, compression.None)
require.NoError(t, err)
head, err = NewHead(nil, nil, w, nil, head.opts, nil)
require.NoError(t, err)
require.NoError(t, head.Init(math.MinInt64))
checkSamples()
checkHistograms()
checkFloatHistograms()
checkTombstones()
checkExemplars()
}
{ // Initial data that goes into snapshot.
// Add some initial samples with >=1 m-map chunk.
app := head.AppenderV2(context.Background())
for i := 1; i <= numSeries; i++ {
lbls := labels.FromStrings("foo", fmt.Sprintf("bar%d", i))
lblStr := lbls.String()
lblsHist := labels.FromStrings("hist", fmt.Sprintf("baz%d", i))
lblsHistStr := lblsHist.String()
lblsFloatHist := labels.FromStrings("floathist", fmt.Sprintf("bat%d", i))
lblsFloatHistStr := lblsFloatHist.String()
// 240 samples should m-map at least 1 chunk.
for ts := int64(1); ts <= 240; ts++ {
// Add an exemplar, but only to float sample.
aOpts := storage.AOptions{}
if ts%10 == 0 {
aOpts.Exemplars = []exemplar.Exemplar{newExemplar(lbls, ts)}
}
val := rand.Float64()
expSeries[lblStr] = append(expSeries[lblStr], sample{0, ts, val, nil, nil})
_, err := app.Append(0, lbls, 0, ts, val, nil, nil, aOpts)
require.NoError(t, err)
hist := histograms[int(ts)]
expHist[lblsHistStr] = append(expHist[lblsHistStr], sample{0, ts, 0, hist, nil})
_, err = app.Append(0, lblsHist, 0, ts, 0, hist, nil, storage.AOptions{})
require.NoError(t, err)
floatHist := floatHistogram[int(ts)]
expFloatHist[lblsFloatHistStr] = append(expFloatHist[lblsFloatHistStr], sample{0, ts, 0, nil, floatHist})
_, err = app.Append(0, lblsFloatHist, 0, ts, 0, nil, floatHist, storage.AOptions{})
require.NoError(t, err)
// Create multiple WAL records (commit).
if ts%10 == 0 {
require.NoError(t, app.Commit())
app = head.AppenderV2(context.Background())
}
}
}
require.NoError(t, app.Commit())
// Add some tombstones.
enc := record.Encoder{EnableSTStorage: enableStStorage}
for i := 1; i <= numSeries; i++ {
ref := storage.SeriesRef(i)
itvs := tombstones.Intervals{
{Mint: 1234, Maxt: 2345},
{Mint: 3456, Maxt: 4567},
}
for _, itv := range itvs {
expTombstones[ref].Add(itv)
}
head.tombstones.AddInterval(ref, itvs...)
err := head.wal.Log(enc.Tombstones([]tombstones.Stone{
{Ref: ref, Intervals: itvs},
}, nil))
require.NoError(t, err)
}
}
// These references should be the ones used for the snapshot.
wlast, woffset, err = head.wal.LastSegmentAndOffset()
require.NoError(t, err)
if woffset != 0 && woffset < 32*1024 {
// The page is always filled before taking the snapshot.
woffset = 32 * 1024
}
{
// Creating snapshot and verifying it.
head.opts.EnableMemorySnapshotOnShutdown = true
closeHeadAndCheckSnapshot() // This will create a snapshot.
// Test the replay of snapshot.
openHeadAndCheckReplay()
}
{ // Additional data to only include in WAL and m-mapped chunks and not snapshot. This mimics having an old snapshot on disk.
// Add more samples.
app := head.AppenderV2(context.Background())
for i := 1; i <= numSeries; i++ {
lbls := labels.FromStrings("foo", fmt.Sprintf("bar%d", i))
lblStr := lbls.String()
lblsHist := labels.FromStrings("hist", fmt.Sprintf("baz%d", i))
lblsHistStr := lblsHist.String()
lblsFloatHist := labels.FromStrings("floathist", fmt.Sprintf("bat%d", i))
lblsFloatHistStr := lblsFloatHist.String()
// 240 samples should m-map at least 1 chunk.
for ts := int64(241); ts <= 480; ts++ {
// Add an exemplar, but only to float sample.
aOpts := storage.AOptions{}
if ts%10 == 0 {
aOpts.Exemplars = []exemplar.Exemplar{newExemplar(lbls, ts)}
}
val := rand.Float64()
expSeries[lblStr] = append(expSeries[lblStr], sample{0, ts, val, nil, nil})
_, err := app.Append(0, lbls, 0, ts, val, nil, nil, aOpts)
require.NoError(t, err)
hist := histograms[int(ts)]
expHist[lblsHistStr] = append(expHist[lblsHistStr], sample{0, ts, 0, hist, nil})
_, err = app.Append(0, lblsHist, 0, ts, 0, hist, nil, storage.AOptions{})
require.NoError(t, err)
floatHist := floatHistogram[int(ts)]
expFloatHist[lblsFloatHistStr] = append(expFloatHist[lblsFloatHistStr], sample{0, ts, 0, nil, floatHist})
_, err = app.Append(0, lblsFloatHist, 0, ts, 0, nil, floatHist, storage.AOptions{})
require.NoError(t, err)
// Create multiple WAL records (commit).
if ts%10 == 0 {
require.NoError(t, app.Commit())
app = head.AppenderV2(context.Background())
}
}
}
require.NoError(t, app.Commit())
// Add more tombstones.
enc := record.Encoder{EnableSTStorage: enableStStorage}
for i := 1; i <= numSeries; i++ {
ref := storage.SeriesRef(i)
itvs := tombstones.Intervals{
{Mint: 12345, Maxt: 23456},
{Mint: 34567, Maxt: 45678},
}
for _, itv := range itvs {
expTombstones[ref].Add(itv)
}
head.tombstones.AddInterval(ref, itvs...)
err := head.wal.Log(enc.Tombstones([]tombstones.Stone{
{Ref: ref, Intervals: itvs},
}, nil))
require.NoError(t, err)
}
}
{
// Close Head and verify that new snapshot was not created.
head.opts.EnableMemorySnapshotOnShutdown = false
closeHeadAndCheckSnapshot() // This should not create a snapshot.
// Test the replay of snapshot, m-map chunks, and WAL.
head.opts.EnableMemorySnapshotOnShutdown = true // Enabled to read from snapshot.
openHeadAndCheckReplay()
}
// Creating another snapshot should delete the older snapshot and replay still works fine.
wlast, woffset, err = head.wal.LastSegmentAndOffset()
require.NoError(t, err)
if woffset != 0 && woffset < 32*1024 {
// The page is always filled before taking the snapshot.
woffset = 32 * 1024
}
{
// Close Head and verify that new snapshot was created.
closeHeadAndCheckSnapshot()
// Verify that there is only 1 snapshot.
files, err := os.ReadDir(head.opts.ChunkDirRoot)
require.NoError(t, err)
snapshots := 0
for i := len(files) - 1; i >= 0; i-- {
fi := files[i]
if strings.HasPrefix(fi.Name(), chunkSnapshotPrefix) {
snapshots++
require.Equal(t, chunkSnapshotDir(wlast, woffset), fi.Name())
}
}
require.Equal(t, 1, snapshots)
// Test the replay of snapshot.
head.opts.EnableMemorySnapshotOnShutdown = true // Enabled to read from snapshot.
// Disabling exemplars to check that it does not hard fail replay
// https://github.com/prometheus/prometheus/issues/9437#issuecomment-933285870.
head.opts.EnableExemplarStorage = false
head.opts.MaxExemplars.Store(0)
expExemplars = expExemplars[:0]
openHeadAndCheckReplay()
require.Equal(t, 0.0, prom_testutil.ToFloat64(head.metrics.snapshotReplayErrorTotal))
}
return nil
}))
require.Equal(t, expTombstones, actTombstones)
}
checkExemplars := func() {
actExemplars := make([]ex, 0, len(expExemplars))
err := head.exemplars.IterateExemplars(func(seriesLabels labels.Labels, e exemplar.Exemplar) error {
actExemplars = append(actExemplars, ex{
seriesLabels: seriesLabels,
e: e,
})
return nil
})
require.NoError(t, err)
// Verifies both existence of right exemplars and order of exemplars in the buffer.
testutil.RequireEqualWithOptions(t, expExemplars, actExemplars, []cmp.Option{cmp.AllowUnexported(ex{})})
}
var (
wlast, woffset int
err error
)
closeHeadAndCheckSnapshot := func() {
require.NoError(t, head.Close())
_, sidx, soffset, err := LastChunkSnapshot(head.opts.ChunkDirRoot)
require.NoError(t, err)
require.Equal(t, wlast, sidx)
require.Equal(t, woffset, soffset)
}
openHeadAndCheckReplay := func() {
w, err := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, compression.None)
require.NoError(t, err)
head, err = NewHead(nil, nil, w, nil, head.opts, nil)
require.NoError(t, err)
require.NoError(t, head.Init(math.MinInt64))
checkSamples()
checkHistograms()
checkFloatHistograms()
checkTombstones()
checkExemplars()
}
{ // Initial data that goes into snapshot.
// Add some initial samples with >=1 m-map chunk.
app := head.AppenderV2(context.Background())
for i := 1; i <= numSeries; i++ {
lbls := labels.FromStrings("foo", fmt.Sprintf("bar%d", i))
lblStr := lbls.String()
lblsHist := labels.FromStrings("hist", fmt.Sprintf("baz%d", i))
lblsHistStr := lblsHist.String()
lblsFloatHist := labels.FromStrings("floathist", fmt.Sprintf("bat%d", i))
lblsFloatHistStr := lblsFloatHist.String()
// 240 samples should m-map at least 1 chunk.
for ts := int64(1); ts <= 240; ts++ {
// Add an exemplar, but only to float sample.
aOpts := storage.AOptions{}
if ts%10 == 0 {
aOpts.Exemplars = []exemplar.Exemplar{newExemplar(lbls, ts)}
}
val := rand.Float64()
expSeries[lblStr] = append(expSeries[lblStr], sample{0, ts, val, nil, nil})
_, err := app.Append(0, lbls, 0, ts, val, nil, nil, aOpts)
require.NoError(t, err)
hist := histograms[int(ts)]
expHist[lblsHistStr] = append(expHist[lblsHistStr], sample{0, ts, 0, hist, nil})
_, err = app.Append(0, lblsHist, 0, ts, 0, hist, nil, storage.AOptions{})
require.NoError(t, err)
floatHist := floatHistogram[int(ts)]
expFloatHist[lblsFloatHistStr] = append(expFloatHist[lblsFloatHistStr], sample{0, ts, 0, nil, floatHist})
_, err = app.Append(0, lblsFloatHist, 0, ts, 0, nil, floatHist, storage.AOptions{})
require.NoError(t, err)
// Create multiple WAL records (commit).
if ts%10 == 0 {
require.NoError(t, app.Commit())
app = head.AppenderV2(context.Background())
}
}
}
require.NoError(t, app.Commit())
// Add some tombstones.
var enc record.Encoder
for i := 1; i <= numSeries; i++ {
ref := storage.SeriesRef(i)
itvs := tombstones.Intervals{
{Mint: 1234, Maxt: 2345},
{Mint: 3456, Maxt: 4567},
}
for _, itv := range itvs {
expTombstones[ref].Add(itv)
}
head.tombstones.AddInterval(ref, itvs...)
err := head.wal.Log(enc.Tombstones([]tombstones.Stone{
{Ref: ref, Intervals: itvs},
}, nil))
require.NoError(t, err)
}
}
// These references should be the ones used for the snapshot.
wlast, woffset, err = head.wal.LastSegmentAndOffset()
require.NoError(t, err)
if woffset != 0 && woffset < 32*1024 {
// The page is always filled before taking the snapshot.
woffset = 32 * 1024
}
{
// Creating snapshot and verifying it.
head.opts.EnableMemorySnapshotOnShutdown = true
closeHeadAndCheckSnapshot() // This will create a snapshot.
// Test the replay of snapshot.
openHeadAndCheckReplay()
}
{ // Additional data to only include in WAL and m-mapped chunks and not snapshot. This mimics having an old snapshot on disk.
// Add more samples.
app := head.AppenderV2(context.Background())
for i := 1; i <= numSeries; i++ {
lbls := labels.FromStrings("foo", fmt.Sprintf("bar%d", i))
lblStr := lbls.String()
lblsHist := labels.FromStrings("hist", fmt.Sprintf("baz%d", i))
lblsHistStr := lblsHist.String()
lblsFloatHist := labels.FromStrings("floathist", fmt.Sprintf("bat%d", i))
lblsFloatHistStr := lblsFloatHist.String()
// 240 samples should m-map at least 1 chunk.
for ts := int64(241); ts <= 480; ts++ {
// Add an exemplar, but only to float sample.
aOpts := storage.AOptions{}
if ts%10 == 0 {
aOpts.Exemplars = []exemplar.Exemplar{newExemplar(lbls, ts)}
}
val := rand.Float64()
expSeries[lblStr] = append(expSeries[lblStr], sample{0, ts, val, nil, nil})
_, err := app.Append(0, lbls, 0, ts, val, nil, nil, aOpts)
require.NoError(t, err)
hist := histograms[int(ts)]
expHist[lblsHistStr] = append(expHist[lblsHistStr], sample{0, ts, 0, hist, nil})
_, err = app.Append(0, lblsHist, 0, ts, 0, hist, nil, storage.AOptions{})
require.NoError(t, err)
floatHist := floatHistogram[int(ts)]
expFloatHist[lblsFloatHistStr] = append(expFloatHist[lblsFloatHistStr], sample{0, ts, 0, nil, floatHist})
_, err = app.Append(0, lblsFloatHist, 0, ts, 0, nil, floatHist, storage.AOptions{})
require.NoError(t, err)
// Create multiple WAL records (commit).
if ts%10 == 0 {
require.NoError(t, app.Commit())
app = head.AppenderV2(context.Background())
}
}
}
require.NoError(t, app.Commit())
// Add more tombstones.
var enc record.Encoder
for i := 1; i <= numSeries; i++ {
ref := storage.SeriesRef(i)
itvs := tombstones.Intervals{
{Mint: 12345, Maxt: 23456},
{Mint: 34567, Maxt: 45678},
}
for _, itv := range itvs {
expTombstones[ref].Add(itv)
}
head.tombstones.AddInterval(ref, itvs...)
err := head.wal.Log(enc.Tombstones([]tombstones.Stone{
{Ref: ref, Intervals: itvs},
}, nil))
require.NoError(t, err)
}
}
{
// Close Head and verify that new snapshot was not created.
head.opts.EnableMemorySnapshotOnShutdown = false
closeHeadAndCheckSnapshot() // This should not create a snapshot.
// Test the replay of snapshot, m-map chunks, and WAL.
head.opts.EnableMemorySnapshotOnShutdown = true // Enabled to read from snapshot.
openHeadAndCheckReplay()
}
// Creating another snapshot should delete the older snapshot and replay still works fine.
wlast, woffset, err = head.wal.LastSegmentAndOffset()
require.NoError(t, err)
if woffset != 0 && woffset < 32*1024 {
// The page is always filled before taking the snapshot.
woffset = 32 * 1024
}
{
// Close Head and verify that new snapshot was created.
closeHeadAndCheckSnapshot()
// Verify that there is only 1 snapshot.
files, err := os.ReadDir(head.opts.ChunkDirRoot)
require.NoError(t, err)
snapshots := 0
for i := len(files) - 1; i >= 0; i-- {
fi := files[i]
if strings.HasPrefix(fi.Name(), chunkSnapshotPrefix) {
snapshots++
require.Equal(t, chunkSnapshotDir(wlast, woffset), fi.Name())
}
}
require.Equal(t, 1, snapshots)
// Test the replay of snapshot.
head.opts.EnableMemorySnapshotOnShutdown = true // Enabled to read from snapshot.
// Disabling exemplars to check that it does not hard fail replay
// https://github.com/prometheus/prometheus/issues/9437#issuecomment-933285870.
head.opts.EnableExemplarStorage = false
head.opts.MaxExemplars.Store(0)
expExemplars = expExemplars[:0]
openHeadAndCheckReplay()
require.Equal(t, 0.0, prom_testutil.ToFloat64(head.metrics.snapshotReplayErrorTotal))
}
}

File diff suppressed because it is too large Load diff

View file

@ -170,7 +170,7 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch
return
}
decoded <- series
case record.Samples:
case record.Samples, record.SamplesV2:
samples := h.wlReplaySamplesPool.Get()[:0]
samples, err = dec.Samples(r.Record(), samples)
if err != nil {
@ -799,7 +799,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch
var err error
rec := r.Record()
switch dec.Type(rec) {
case record.Samples:
case record.Samples, record.SamplesV2:
samples := h.wlReplaySamplesPool.Get()[:0]
samples, err = dec.Samples(rec, samples)
if err != nil {
@ -1401,7 +1401,7 @@ func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) {
// Assuming 100 bytes (overestimate) per exemplar, that's ~1MB.
maxExemplarsPerRecord := 10000
batch := make([]record.RefExemplar, 0, maxExemplarsPerRecord)
enc := record.Encoder{}
enc := record.Encoder{EnableSTStorage: h.opts.EnableSTStorage}
flushExemplars := func() error {
if len(batch) == 0 {
return nil

207
tsdb/record/bench_test.go Normal file
View file

@ -0,0 +1,207 @@
// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package record_test
import (
"fmt"
"testing"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/tsdb/compression"
"github.com/prometheus/prometheus/tsdb/record"
"github.com/prometheus/prometheus/util/testrecord"
)
func zeroOutSTs(samples []record.RefSample) []record.RefSample {
out := make([]record.RefSample, len(samples))
for i := range samples {
out[i] = samples[i]
out[i].ST = 0
}
return out
}
func TestEncodeDecode(t *testing.T) {
for _, enableStStorage := range []bool{false, true} {
for _, tcase := range []testrecord.RefSamplesCase{
testrecord.Realistic1000Samples,
testrecord.Realistic1000WithVariableSTSamples,
testrecord.Realistic1000WithConstSTSamples,
testrecord.WorstCase1000,
testrecord.WorstCase1000WithSTSamples,
} {
var (
dec record.Decoder
buf []byte
enc = record.Encoder{EnableSTStorage: enableStStorage}
)
s := testrecord.GenTestRefSamplesCase(t, tcase)
{
got, err := dec.Samples(enc.Samples(s, nil), nil)
require.NoError(t, err)
// if ST is off, we expect all STs to be zero
expected := s
if !enableStStorage {
expected = zeroOutSTs(s)
}
require.Equal(t, expected, got)
}
// With byte buffer (append!)
{
buf = make([]byte, 10, 1e5)
got, err := dec.Samples(enc.Samples(s, buf)[10:], nil)
require.NoError(t, err)
expected := s
if !enableStStorage {
expected = zeroOutSTs(s)
}
require.Equal(t, expected, got)
}
// With sample slice
{
samples := make([]record.RefSample, 0, len(s)+1)
got, err := dec.Samples(enc.Samples(s, nil), samples)
require.NoError(t, err)
expected := s
if !enableStStorage {
expected = zeroOutSTs(s)
}
require.Equal(t, expected, got)
}
// With compression.
{
buf := enc.Samples(s, nil)
cEnc, err := compression.NewEncoder()
require.NoError(t, err)
buf, _, err = cEnc.Encode(compression.Zstd, buf, nil)
require.NoError(t, err)
buf, err = compression.NewDecoder().Decode(compression.Zstd, buf, nil)
require.NoError(t, err)
got, err := dec.Samples(buf, nil)
require.NoError(t, err)
expected := s
if !enableStStorage {
expected = zeroOutSTs(s)
}
require.Equal(t, expected, got)
}
}
}
}
var (
compressions = []compression.Type{compression.None, compression.Snappy, compression.Zstd}
dataCases = []testrecord.RefSamplesCase{
testrecord.Realistic1000Samples,
testrecord.Realistic1000WithVariableSTSamples,
testrecord.Realistic1000WithConstSTSamples,
testrecord.WorstCase1000,
testrecord.WorstCase1000WithSTSamples,
}
UseV2 = true
)
/*
export bench=encode-v2 && go test ./tsdb/record/... \
-run '^$' -bench '^BenchmarkEncode_Samples' \
-benchtime 5s -count 6 -cpu 2 -timeout 999m \
| tee ${bench}.txt
*/
func BenchmarkEncode_Samples(b *testing.B) {
for _, compr := range compressions {
for _, data := range dataCases {
b.Run(fmt.Sprintf("compr=%v/data=%v", compr, data), func(b *testing.B) {
var (
samples = testrecord.GenTestRefSamplesCase(b, data)
enc = record.Encoder{EnableSTStorage: UseV2}
buf []byte
cBuf []byte
)
cEnc, err := compression.NewEncoder()
require.NoError(b, err)
// Warm up.
buf = enc.Samples(samples, buf[:0])
cBuf, _, err = cEnc.Encode(compr, buf, cBuf[:0])
require.NoError(b, err)
b.ReportAllocs()
b.ResetTimer()
for b.Loop() {
buf = enc.Samples(samples, buf[:0])
b.ReportMetric(float64(len(buf)), "B/rec")
cBuf, _, _ = cEnc.Encode(compr, buf, cBuf[:0])
b.ReportMetric(float64(len(cBuf)), "B/compressed-rec")
}
})
}
}
}
/*
export bench=decode-v2 && go test ./tsdb/record/... \
-run '^$' -bench '^BenchmarkDecode_Samples' \
-benchtime 5s -count 6 -cpu 2 -timeout 999m \
| tee ${bench}.txt
*/
func BenchmarkDecode_Samples(b *testing.B) {
for _, compr := range compressions {
for _, data := range dataCases {
b.Run(fmt.Sprintf("compr=%v/data=%v", compr, data), func(b *testing.B) {
var (
samples = testrecord.GenTestRefSamplesCase(b, data)
enc = record.Encoder{EnableSTStorage: UseV2}
dec record.Decoder
cDec = compression.NewDecoder()
cBuf []byte
samplesBuf []record.RefSample
)
buf := enc.Samples(samples, nil)
cEnc, err := compression.NewEncoder()
require.NoError(b, err)
buf, _, err = cEnc.Encode(compr, buf, nil)
require.NoError(b, err)
// Warm up.
cBuf, err = cDec.Decode(compr, buf, cBuf[:0])
require.NoError(b, err)
samplesBuf, err = dec.Samples(cBuf, samplesBuf[:0])
require.NoError(b, err)
b.ReportAllocs()
b.ResetTimer()
for b.Loop() {
cBuf, _ = cDec.Decode(compr, buf, cBuf[:0])
samplesBuf, _ = dec.Samples(cBuf, samplesBuf[:0])
}
})
}
}
}

View file

@ -58,6 +58,8 @@ const (
CustomBucketsHistogramSamples Type = 9
// CustomBucketsFloatHistogramSamples is used to match WAL records of type Float Histogram with custom buckets.
CustomBucketsFloatHistogramSamples Type = 10
// SamplesV2 is an enhanced sample record with an encoding scheme that allows storing float samples with timestamp and an optional ST per sample.
SamplesV2 Type = 11
)
func (rt Type) String() string {
@ -66,6 +68,8 @@ func (rt Type) String() string {
return "series"
case Samples:
return "samples"
case SamplesV2:
return "samples-v2"
case Tombstones:
return "tombstones"
case Exemplars:
@ -157,12 +161,12 @@ type RefSeries struct {
Labels labels.Labels
}
// RefSample is a timestamp/value pair associated with a reference to a series.
// RefSample is a timestamp/st/value struct associated with a reference to a series.
// TODO(beorn7): Perhaps make this "polymorphic", including histogram and float-histogram pointers? Then get rid of RefHistogramSample.
type RefSample struct {
Ref chunks.HeadSeriesRef
T int64
V float64
Ref chunks.HeadSeriesRef
ST, T int64
V float64
}
// RefMetadata is the metadata associated with a series ID.
@ -182,6 +186,7 @@ type RefExemplar struct {
}
// RefHistogramSample is a histogram.
// TODO(owilliams): Add support for ST.
type RefHistogramSample struct {
Ref chunks.HeadSeriesRef
T int64
@ -189,6 +194,7 @@ type RefHistogramSample struct {
}
// RefFloatHistogramSample is a float histogram.
// TODO(owilliams): Add support for ST.
type RefFloatHistogramSample struct {
Ref chunks.HeadSeriesRef
T int64
@ -220,7 +226,7 @@ func (*Decoder) Type(rec []byte) Type {
return Unknown
}
switch t := Type(rec[0]); t {
case Series, Samples, Tombstones, Exemplars, MmapMarkers, Metadata, HistogramSamples, FloatHistogramSamples, CustomBucketsHistogramSamples, CustomBucketsFloatHistogramSamples:
case Series, Samples, SamplesV2, Tombstones, Exemplars, MmapMarkers, Metadata, HistogramSamples, FloatHistogramSamples, CustomBucketsHistogramSamples, CustomBucketsFloatHistogramSamples:
return t
}
return Unknown
@ -311,12 +317,20 @@ func (d *Decoder) DecodeLabels(dec *encoding.Decbuf) labels.Labels {
}
// Samples appends samples in rec to the given slice.
func (*Decoder) Samples(rec []byte, samples []RefSample) ([]RefSample, error) {
func (d *Decoder) Samples(rec []byte, samples []RefSample) ([]RefSample, error) {
dec := encoding.Decbuf{B: rec}
if Type(dec.Byte()) != Samples {
return nil, errors.New("invalid record type")
switch typ := dec.Byte(); Type(typ) {
case Samples:
return d.samplesV1(&dec, samples)
case SamplesV2:
return d.samplesV2(&dec, samples)
default:
return nil, fmt.Errorf("invalid record type %v, expected Samples(2) or SamplesV2(11)", typ)
}
}
// samplesV1 appends samples in rec to the given slice, while ignoring ST information.
func (*Decoder) samplesV1(dec *encoding.Decbuf, samples []RefSample) ([]RefSample, error) {
if dec.Len() == 0 {
return samples, nil
}
@ -349,6 +363,60 @@ func (*Decoder) Samples(rec []byte, samples []RefSample) ([]RefSample, error) {
return samples, nil
}
// samplesV2 appends samples in rec to the given slice using the V2 algorithm,
// which is more efficient and supports ST (See Encoder.samplesV2 definition).
func (*Decoder) samplesV2(dec *encoding.Decbuf, samples []RefSample) ([]RefSample, error) {
if dec.Len() == 0 {
return samples, nil
}
// Allow 1 byte for each varint and 8 for the value; the output slice must be at least that big.
if minSize := dec.Len() / (1 + 1 + 8); cap(samples) < minSize {
samples = make([]RefSample, 0, minSize)
}
var firstT, firstST int64
for len(dec.B) > 0 && dec.Err() == nil {
var prev RefSample
var ref, t, ST int64
var val uint64
if len(samples) == 0 {
ref = dec.Varint64()
firstT = dec.Varint64()
t = firstT
ST = dec.Varint64()
firstST = ST
} else {
prev = samples[len(samples)-1]
ref = int64(prev.Ref) + dec.Varint64()
t = firstT + dec.Varint64()
stMarker := dec.Byte()
switch stMarker {
case noST:
case sameST:
ST = prev.ST
default:
ST = firstST + dec.Varint64()
}
}
val = dec.Be64()
samples = append(samples, RefSample{
Ref: chunks.HeadSeriesRef(ref),
ST: ST,
T: t,
V: math.Float64frombits(val),
})
}
if dec.Err() != nil {
return nil, fmt.Errorf("decode error after %d samples: %w", len(samples), dec.Err())
}
if len(dec.B) > 0 {
return nil, fmt.Errorf("unexpected %d bytes left in entry", len(dec.B))
}
return samples, nil
}
// Tombstones appends tombstones in rec to the given slice.
func (*Decoder) Tombstones(rec []byte, tstones []tombstones.Stone) ([]tombstones.Stone, error) {
dec := encoding.Decbuf{B: rec}
@ -656,7 +724,11 @@ func DecodeFloatHistogram(buf *encoding.Decbuf, fh *histogram.FloatHistogram) {
// Encoder encodes series, sample, and tombstones records.
// The zero value is ready to use.
type Encoder struct{}
type Encoder struct {
// EnableSTStorage enables the SamplesV2 encoding, which is more efficient
// than V1 and supports start time per sample.
EnableSTStorage bool
}
// Series appends the encoded series to b and returns the resulting slice.
func (*Encoder) Series(series []RefSeries, b []byte) []byte {
@ -702,7 +774,16 @@ func EncodeLabels(buf *encoding.Encbuf, lbls labels.Labels) {
}
// Samples appends the encoded samples to b and returns the resulting slice.
func (*Encoder) Samples(samples []RefSample, b []byte) []byte {
// Depending on the ST existence it either writes Samples or SamplesWithST record.
func (e *Encoder) Samples(samples []RefSample, b []byte) []byte {
if e.EnableSTStorage {
return e.samplesV2(samples, b)
}
return e.samplesV1(samples, b)
}
// Samples appends the encoded samples to b and returns the resulting slice.
func (*Encoder) samplesV1(samples []RefSample, b []byte) []byte {
buf := encoding.Encbuf{B: b}
buf.PutByte(byte(Samples))
@ -725,6 +806,56 @@ func (*Encoder) Samples(samples []RefSample, b []byte) []byte {
return buf.Get()
}
const (
// Start timestamp marker values for indicating trivial cases.
noST byte = iota // Sample has no start time.
sameST // Sample timestamp exists and is the same as the start time of the previous series.
explicitST // Explicit start timestamp value, delta to first start time.
)
// samplesV2 appends the encoded samples to b and returns the resulting slice
// using a more efficient per-sample delta encoding and allows for ST
// storage.
func (*Encoder) samplesV2(samples []RefSample, b []byte) []byte {
buf := encoding.Encbuf{B: b}
buf.PutByte(byte(SamplesV2))
if len(samples) == 0 {
return buf.Get()
}
// Store first ref, timestamp, ST, and value.
first := samples[0]
buf.PutVarint64(int64(first.Ref))
buf.PutVarint64(first.T)
buf.PutVarint64(first.ST)
buf.PutBE64(math.Float64bits(first.V))
// Subsequent values are delta to the immediate previous values, and in the
// case of start timestamp, use the marker byte to indicate what the value should
// be if it's one of the trivial cases.
for i := 1; i < len(samples); i++ {
s := samples[i]
prev := samples[i-1]
buf.PutVarint64(int64(s.Ref) - int64(prev.Ref))
buf.PutVarint64(s.T - first.T)
switch s.ST {
case 0:
buf.PutByte(noST)
case prev.ST:
buf.PutByte(sameST)
default:
buf.PutByte(explicitST)
buf.PutVarint64(s.ST - first.ST)
}
buf.PutBE64(math.Float64bits(s.V))
}
return buf.Get()
}
// Tombstones appends the encoded tombstones to b and returns the resulting slice.
func (*Encoder) Tombstones(tstones []tombstones.Stone, b []byte) []byte {
buf := encoding.Encbuf{B: b}

View file

@ -76,15 +76,63 @@ func TestRecord_EncodeDecode(t *testing.T) {
require.NoError(t, err)
require.Equal(t, metadata, decMetadata)
// Without ST.
samples := []RefSample{
{Ref: 0, T: 12423423, V: 1.2345},
{Ref: 123, T: -1231, V: -123},
{Ref: 2, T: 0, V: 99999},
}
decSamples, err := dec.Samples(enc.Samples(samples, nil), nil)
encoded := enc.Samples(samples, nil)
require.Equal(t, Samples, dec.Type(encoded))
decSamples, err := dec.Samples(encoded, nil)
require.NoError(t, err)
require.Equal(t, samples, decSamples)
enc = Encoder{EnableSTStorage: true}
// Without ST again, but with V1 encoder that enables SamplesV2
samples = []RefSample{
{Ref: 0, T: 12423423, V: 1.2345},
{Ref: 123, T: -1231, V: -123},
{Ref: 2, T: 0, V: 99999},
}
encoded = enc.Samples(samples, nil)
require.Equal(t, SamplesV2, dec.Type(encoded))
decSamples, err = dec.Samples(encoded, nil)
require.NoError(t, err)
require.Equal(t, samples, decSamples)
// With ST.
samplesWithST := []RefSample{
{Ref: 0, T: 12423423, ST: 14, V: 1.2345},
{Ref: 123, T: -1231, ST: 14, V: -123},
{Ref: 2, T: 0, ST: 14, V: 99999},
}
encoded = enc.Samples(samplesWithST, nil)
require.Equal(t, SamplesV2, dec.Type(encoded))
decSamples, err = dec.Samples(encoded, nil)
require.NoError(t, err)
require.Equal(t, samplesWithST, decSamples)
// With ST (ST[i] == T[i-1])
samplesWithSTDelta := []RefSample{
{Ref: 0, T: 12423400, ST: 12423300, V: 1.2345},
{Ref: 123, T: 12423500, ST: 12423400, V: -123},
{Ref: 2, T: 12423600, ST: 12423500, V: 99999},
}
decSamples, err = dec.Samples(enc.Samples(samplesWithSTDelta, nil), nil)
require.NoError(t, err)
require.Equal(t, samplesWithSTDelta, decSamples)
// With ST (ST[i] == ST[i-1])
samplesWithConstST := []RefSample{
{Ref: 0, T: 12423400, ST: 12423300, V: 1.2345},
{Ref: 123, T: 12423500, ST: 12423300, V: -123},
{Ref: 2, T: 12423600, ST: 12423300, V: 99999},
}
decSamples, err = dec.Samples(enc.Samples(samplesWithConstST, nil), nil)
require.NoError(t, err)
require.Equal(t, samplesWithConstST, decSamples)
// Intervals get split up into single entries. So we don't get back exactly
// what we put in.
tstones := []tombstones.Stone{
@ -227,252 +275,262 @@ func TestRecord_EncodeDecode(t *testing.T) {
}
func TestRecord_DecodeInvalidHistogramSchema(t *testing.T) {
for _, schema := range []int32{-100, 100} {
t.Run(fmt.Sprintf("schema=%d", schema), func(t *testing.T) {
var enc Encoder
for _, enableStStorage := range []bool{false, true} {
for _, schema := range []int32{-100, 100} {
t.Run(fmt.Sprintf("schema=%d,stStorage=%v", schema, enableStStorage), func(t *testing.T) {
enc := Encoder{EnableSTStorage: enableStStorage}
var output bytes.Buffer
logger := promslog.New(&promslog.Config{Writer: &output})
dec := NewDecoder(labels.NewSymbolTable(), logger)
histograms := []RefHistogramSample{
{
Ref: 56,
T: 1234,
H: &histogram.Histogram{
Count: 5,
ZeroCount: 2,
ZeroThreshold: 0.001,
Sum: 18.4 * rand.Float64(),
Schema: schema,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
var output bytes.Buffer
logger := promslog.New(&promslog.Config{Writer: &output})
dec := NewDecoder(labels.NewSymbolTable(), logger)
histograms := []RefHistogramSample{
{
Ref: 56,
T: 1234,
H: &histogram.Histogram{
Count: 5,
ZeroCount: 2,
ZeroThreshold: 0.001,
Sum: 18.4 * rand.Float64(),
Schema: schema,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
},
PositiveBuckets: []int64{1, 1, -1, 0},
},
},
}
histSamples, _ := enc.HistogramSamples(histograms, nil)
decHistograms, err := dec.HistogramSamples(histSamples, nil)
require.NoError(t, err)
require.Empty(t, decHistograms)
require.Contains(t, output.String(), "skipping histogram with unknown schema in WAL record")
})
}
histSamples, _ := enc.HistogramSamples(histograms, nil)
decHistograms, err := dec.HistogramSamples(histSamples, nil)
require.NoError(t, err)
require.Empty(t, decHistograms)
require.Contains(t, output.String(), "skipping histogram with unknown schema in WAL record")
})
}
}
}
func TestRecord_DecodeInvalidFloatHistogramSchema(t *testing.T) {
for _, schema := range []int32{-100, 100} {
t.Run(fmt.Sprintf("schema=%d", schema), func(t *testing.T) {
var enc Encoder
for _, enableStStorage := range []bool{false, true} {
for _, schema := range []int32{-100, 100} {
t.Run(fmt.Sprintf("schema=%d,stStorage=%v", schema, enableStStorage), func(t *testing.T) {
enc := Encoder{EnableSTStorage: enableStStorage}
var output bytes.Buffer
logger := promslog.New(&promslog.Config{Writer: &output})
dec := NewDecoder(labels.NewSymbolTable(), logger)
histograms := []RefFloatHistogramSample{
{
Ref: 56,
T: 1234,
FH: &histogram.FloatHistogram{
Count: 5,
ZeroCount: 2,
ZeroThreshold: 0.001,
Sum: 18.4 * rand.Float64(),
Schema: schema,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
var output bytes.Buffer
logger := promslog.New(&promslog.Config{Writer: &output})
dec := NewDecoder(labels.NewSymbolTable(), logger)
histograms := []RefFloatHistogramSample{
{
Ref: 56,
T: 1234,
FH: &histogram.FloatHistogram{
Count: 5,
ZeroCount: 2,
ZeroThreshold: 0.001,
Sum: 18.4 * rand.Float64(),
Schema: schema,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []float64{1, 1, -1, 0},
},
PositiveBuckets: []float64{1, 1, -1, 0},
},
},
}
histSamples, _ := enc.FloatHistogramSamples(histograms, nil)
decHistograms, err := dec.FloatHistogramSamples(histSamples, nil)
require.NoError(t, err)
require.Empty(t, decHistograms)
require.Contains(t, output.String(), "skipping histogram with unknown schema in WAL record")
})
}
histSamples, _ := enc.FloatHistogramSamples(histograms, nil)
decHistograms, err := dec.FloatHistogramSamples(histSamples, nil)
require.NoError(t, err)
require.Empty(t, decHistograms)
require.Contains(t, output.String(), "skipping histogram with unknown schema in WAL record")
})
}
}
}
func TestRecord_DecodeTooHighResolutionHistogramSchema(t *testing.T) {
for _, schema := range []int32{9, 52} {
t.Run(fmt.Sprintf("schema=%d", schema), func(t *testing.T) {
var enc Encoder
for _, enableStStorage := range []bool{false, true} {
for _, schema := range []int32{9, 52} {
t.Run(fmt.Sprintf("schema=%d,stStorage=%v", schema, enableStStorage), func(t *testing.T) {
enc := Encoder{EnableSTStorage: enableStStorage}
var output bytes.Buffer
logger := promslog.New(&promslog.Config{Writer: &output})
dec := NewDecoder(labels.NewSymbolTable(), logger)
histograms := []RefHistogramSample{
{
Ref: 56,
T: 1234,
H: &histogram.Histogram{
Count: 5,
ZeroCount: 2,
ZeroThreshold: 0.001,
Sum: 18.4 * rand.Float64(),
Schema: schema,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
var output bytes.Buffer
logger := promslog.New(&promslog.Config{Writer: &output})
dec := NewDecoder(labels.NewSymbolTable(), logger)
histograms := []RefHistogramSample{
{
Ref: 56,
T: 1234,
H: &histogram.Histogram{
Count: 5,
ZeroCount: 2,
ZeroThreshold: 0.001,
Sum: 18.4 * rand.Float64(),
Schema: schema,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
},
PositiveBuckets: []int64{1, 1, -1, 0},
},
},
}
histSamples, _ := enc.HistogramSamples(histograms, nil)
decHistograms, err := dec.HistogramSamples(histSamples, nil)
require.NoError(t, err)
require.Len(t, decHistograms, 1)
require.Equal(t, histogram.ExponentialSchemaMax, decHistograms[0].H.Schema)
})
}
histSamples, _ := enc.HistogramSamples(histograms, nil)
decHistograms, err := dec.HistogramSamples(histSamples, nil)
require.NoError(t, err)
require.Len(t, decHistograms, 1)
require.Equal(t, histogram.ExponentialSchemaMax, decHistograms[0].H.Schema)
})
}
}
}
func TestRecord_DecodeTooHighResolutionFloatHistogramSchema(t *testing.T) {
for _, schema := range []int32{9, 52} {
t.Run(fmt.Sprintf("schema=%d", schema), func(t *testing.T) {
var enc Encoder
for _, enableStStorage := range []bool{false, true} {
for _, schema := range []int32{9, 52} {
t.Run(fmt.Sprintf("schema=%d,stStorage=%v", schema, enableStStorage), func(t *testing.T) {
enc := Encoder{EnableSTStorage: enableStStorage}
var output bytes.Buffer
logger := promslog.New(&promslog.Config{Writer: &output})
dec := NewDecoder(labels.NewSymbolTable(), logger)
histograms := []RefFloatHistogramSample{
{
Ref: 56,
T: 1234,
FH: &histogram.FloatHistogram{
Count: 5,
ZeroCount: 2,
ZeroThreshold: 0.001,
Sum: 18.4 * rand.Float64(),
Schema: schema,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
var output bytes.Buffer
logger := promslog.New(&promslog.Config{Writer: &output})
dec := NewDecoder(labels.NewSymbolTable(), logger)
histograms := []RefFloatHistogramSample{
{
Ref: 56,
T: 1234,
FH: &histogram.FloatHistogram{
Count: 5,
ZeroCount: 2,
ZeroThreshold: 0.001,
Sum: 18.4 * rand.Float64(),
Schema: schema,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []float64{1, 1, -1, 0},
},
PositiveBuckets: []float64{1, 1, -1, 0},
},
},
}
histSamples, _ := enc.FloatHistogramSamples(histograms, nil)
decHistograms, err := dec.FloatHistogramSamples(histSamples, nil)
require.NoError(t, err)
require.Len(t, decHistograms, 1)
require.Equal(t, histogram.ExponentialSchemaMax, decHistograms[0].FH.Schema)
})
}
histSamples, _ := enc.FloatHistogramSamples(histograms, nil)
decHistograms, err := dec.FloatHistogramSamples(histSamples, nil)
require.NoError(t, err)
require.Len(t, decHistograms, 1)
require.Equal(t, histogram.ExponentialSchemaMax, decHistograms[0].FH.Schema)
})
}
}
}
// TestRecord_Corrupted ensures that corrupted records return the correct error.
// Bugfix check for pull/521 and pull/523.
func TestRecord_Corrupted(t *testing.T) {
var enc Encoder
dec := NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
for _, enableStStorage := range []bool{false, true} {
enc := Encoder{EnableSTStorage: enableStStorage}
dec := NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
t.Run("Test corrupted series record", func(t *testing.T) {
series := []RefSeries{
{
Ref: 100,
Labels: labels.FromStrings("abc", "def", "123", "456"),
},
}
corrupted := enc.Series(series, nil)[:8]
_, err := dec.Series(corrupted, nil)
require.Equal(t, err, encoding.ErrInvalidSize)
})
t.Run("Test corrupted sample record", func(t *testing.T) {
samples := []RefSample{
{Ref: 0, T: 12423423, V: 1.2345},
}
corrupted := enc.Samples(samples, nil)[:8]
_, err := dec.Samples(corrupted, nil)
require.ErrorIs(t, err, encoding.ErrInvalidSize)
})
t.Run("Test corrupted tombstone record", func(t *testing.T) {
tstones := []tombstones.Stone{
{Ref: 123, Intervals: tombstones.Intervals{
{Mint: -1000, Maxt: 1231231},
{Mint: 5000, Maxt: 0},
}},
}
corrupted := enc.Tombstones(tstones, nil)[:8]
_, err := dec.Tombstones(corrupted, nil)
require.Equal(t, err, encoding.ErrInvalidSize)
})
t.Run("Test corrupted exemplar record", func(t *testing.T) {
exemplars := []RefExemplar{
{Ref: 0, T: 12423423, V: 1.2345, Labels: labels.FromStrings("trace_id", "asdf")},
}
corrupted := enc.Exemplars(exemplars, nil)[:8]
_, err := dec.Exemplars(corrupted, nil)
require.ErrorIs(t, err, encoding.ErrInvalidSize)
})
t.Run("Test corrupted metadata record", func(t *testing.T) {
meta := []RefMetadata{
{Ref: 147, Type: uint8(Counter), Unit: "unit", Help: "help"},
}
corrupted := enc.Metadata(meta, nil)[:8]
_, err := dec.Metadata(corrupted, nil)
require.ErrorIs(t, err, encoding.ErrInvalidSize)
})
t.Run("Test corrupted histogram record", func(t *testing.T) {
histograms := []RefHistogramSample{
{
Ref: 56,
T: 1234,
H: &histogram.Histogram{
Count: 5,
ZeroCount: 2,
ZeroThreshold: 0.001,
Sum: 18.4 * rand.Float64(),
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
t.Run("Test corrupted series record", func(t *testing.T) {
series := []RefSeries{
{
Ref: 100,
Labels: labels.FromStrings("abc", "def", "123", "456"),
},
},
{
Ref: 67,
T: 5678,
H: &histogram.Histogram{
Count: 8,
ZeroThreshold: 0.001,
Sum: 35.5,
Schema: -53,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 2},
},
PositiveBuckets: []int64{2, -1, 2, 0},
CustomValues: []float64{0, 2, 4, 6, 8},
},
},
}
}
corruptedHists, customBucketsHists := enc.HistogramSamples(histograms, nil)
corruptedHists = corruptedHists[:8]
corruptedCustomBucketsHists := enc.CustomBucketsHistogramSamples(customBucketsHists, nil)
corruptedCustomBucketsHists = corruptedCustomBucketsHists[:8]
_, err := dec.HistogramSamples(corruptedHists, nil)
require.ErrorIs(t, err, encoding.ErrInvalidSize)
_, err = dec.HistogramSamples(corruptedCustomBucketsHists, nil)
require.ErrorIs(t, err, encoding.ErrInvalidSize)
})
corrupted := enc.Series(series, nil)[:8]
_, err := dec.Series(corrupted, nil)
require.Equal(t, err, encoding.ErrInvalidSize)
})
t.Run("Test corrupted sample record", func(t *testing.T) {
samples := []RefSample{
{Ref: 0, T: 12423423, V: 1.2345},
}
corrupted := enc.Samples(samples, nil)[:8]
_, err := dec.Samples(corrupted, nil)
require.ErrorIs(t, err, encoding.ErrInvalidSize)
})
t.Run("Test corrupted tombstone record", func(t *testing.T) {
tstones := []tombstones.Stone{
{Ref: 123, Intervals: tombstones.Intervals{
{Mint: -1000, Maxt: 1231231},
{Mint: 5000, Maxt: 0},
}},
}
corrupted := enc.Tombstones(tstones, nil)[:8]
_, err := dec.Tombstones(corrupted, nil)
require.Equal(t, err, encoding.ErrInvalidSize)
})
t.Run("Test corrupted exemplar record", func(t *testing.T) {
exemplars := []RefExemplar{
{Ref: 0, T: 12423423, V: 1.2345, Labels: labels.FromStrings("trace_id", "asdf")},
}
corrupted := enc.Exemplars(exemplars, nil)[:8]
_, err := dec.Exemplars(corrupted, nil)
require.ErrorIs(t, err, encoding.ErrInvalidSize)
})
t.Run("Test corrupted metadata record", func(t *testing.T) {
meta := []RefMetadata{
{Ref: 147, Type: uint8(Counter), Unit: "unit", Help: "help"},
}
corrupted := enc.Metadata(meta, nil)[:8]
_, err := dec.Metadata(corrupted, nil)
require.ErrorIs(t, err, encoding.ErrInvalidSize)
})
t.Run("Test corrupted histogram record", func(t *testing.T) {
histograms := []RefHistogramSample{
{
Ref: 56,
T: 1234,
H: &histogram.Histogram{
Count: 5,
ZeroCount: 2,
ZeroThreshold: 0.001,
Sum: 18.4 * rand.Float64(),
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
},
},
{
Ref: 67,
T: 5678,
H: &histogram.Histogram{
Count: 8,
ZeroThreshold: 0.001,
Sum: 35.5,
Schema: -53,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 2, Length: 2},
},
PositiveBuckets: []int64{2, -1, 2, 0},
CustomValues: []float64{0, 2, 4, 6, 8},
},
},
}
corruptedHists, customBucketsHists := enc.HistogramSamples(histograms, nil)
corruptedHists = corruptedHists[:8]
corruptedCustomBucketsHists := enc.CustomBucketsHistogramSamples(customBucketsHists, nil)
corruptedCustomBucketsHists = corruptedCustomBucketsHists[:8]
_, err := dec.HistogramSamples(corruptedHists, nil)
require.ErrorIs(t, err, encoding.ErrInvalidSize)
_, err = dec.HistogramSamples(corruptedCustomBucketsHists, nil)
require.ErrorIs(t, err, encoding.ErrInvalidSize)
})
}
}
func TestRecord_Type(t *testing.T) {
@ -487,6 +545,16 @@ func TestRecord_Type(t *testing.T) {
recordType = dec.Type(enc.Samples(samples, nil))
require.Equal(t, Samples, recordType)
// With EnableSTStorage set, all Samples are V2
enc = Encoder{EnableSTStorage: true}
samples = []RefSample{{Ref: 123, T: 12345, V: 1.2345}}
recordType = dec.Type(enc.Samples(samples, nil))
require.Equal(t, SamplesV2, recordType)
samplesST := []RefSample{{Ref: 123, ST: 1, T: 12345, V: 1.2345}}
recordType = dec.Type(enc.Samples(samplesST, nil))
require.Equal(t, SamplesV2, recordType)
tstones := []tombstones.Stone{{Ref: 1, Intervals: tombstones.Intervals{{Mint: 1, Maxt: 2}}}}
recordType = dec.Type(enc.Tombstones(tstones, nil))
require.Equal(t, Tombstones, recordType)
@ -716,24 +784,26 @@ func BenchmarkWAL_HistogramEncoding(b *testing.B) {
make: initNHCBRefs,
},
} {
for _, labelCount := range []int{0, 10, 50} {
for _, histograms := range []int{10, 100, 1000} {
for _, buckets := range []int{0, 1, 10, 100} {
b.Run(fmt.Sprintf("type=%s/labels=%d/histograms=%d/buckets=%d", maker.name, labelCount, histograms, buckets), func(b *testing.B) {
series, samples, nhcbs := maker.make(labelCount, histograms, buckets)
enc := Encoder{}
for b.Loop() {
var buf []byte
enc.Series(series, buf)
enc.Samples(samples, buf)
var leftOver []RefHistogramSample
_, leftOver = enc.HistogramSamples(nhcbs, buf)
if len(leftOver) > 0 {
enc.CustomBucketsHistogramSamples(leftOver, buf)
for _, enableStStorage := range []bool{false, true} {
for _, labelCount := range []int{0, 10, 50} {
for _, histograms := range []int{10, 100, 1000} {
for _, buckets := range []int{0, 1, 10, 100} {
b.Run(fmt.Sprintf("type=%s/labels=%d/histograms=%d/buckets=%d", maker.name, labelCount, histograms, buckets), func(b *testing.B) {
series, samples, nhcbs := maker.make(labelCount, histograms, buckets)
enc := Encoder{EnableSTStorage: enableStStorage}
for b.Loop() {
var buf []byte
enc.Series(series, buf)
enc.Samples(samples, buf)
var leftOver []RefHistogramSample
_, leftOver = enc.HistogramSamples(nhcbs, buf)
if len(leftOver) > 0 {
enc.CustomBucketsHistogramSamples(leftOver, buf)
}
b.ReportMetric(float64(len(buf)), "recordBytes/ops")
}
b.ReportMetric(float64(len(buf)), "recordBytes/ops")
}
})
})
}
}
}
}

View file

@ -92,7 +92,7 @@ const CheckpointPrefix = "checkpoint."
// segmented format as the original WAL itself.
// This makes it easy to read it through the WAL package and concatenate
// it with the original WAL.
func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.HeadSeriesRef) bool, mint int64) (*CheckpointStats, error) {
func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.HeadSeriesRef) bool, mint int64, enableStStorage bool) (*CheckpointStats, error) {
stats := &CheckpointStats{}
var sgmReader io.ReadCloser
@ -156,7 +156,7 @@ func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.He
metadata []record.RefMetadata
st = labels.NewSymbolTable() // Needed for decoding; labels do not outlive this function.
dec = record.NewDecoder(st, logger)
enc record.Encoder
enc = record.Encoder{EnableSTStorage: enableStStorage}
buf []byte
recs [][]byte
@ -190,7 +190,7 @@ func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.He
stats.TotalSeries += len(series)
stats.DroppedSeries += len(series) - len(repl)
case record.Samples:
case record.Samples, record.SamplesV2:
samples, err = dec.Samples(rec, samples)
if err != nil {
return nil, fmt.Errorf("decode samples: %w", err)

View file

@ -171,249 +171,255 @@ func TestCheckpoint(t *testing.T) {
}
}
for _, compress := range compression.Types() {
t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) {
dir := t.TempDir()
for _, enableStStorage := range []bool{false, true} {
for _, compress := range compression.Types() {
t.Run(fmt.Sprintf("compress=%s,stStorage=%v", compress, enableStStorage), func(t *testing.T) {
dir := t.TempDir()
var enc record.Encoder
// Create a dummy segment to bump the initial number.
seg, err := CreateSegment(dir, 100)
require.NoError(t, err)
require.NoError(t, seg.Close())
// Manually create checkpoint for 99 and earlier.
w, err := New(nil, nil, filepath.Join(dir, "checkpoint.0099"), compress)
require.NoError(t, err)
// Add some data we expect to be around later.
err = w.Log(enc.Series([]record.RefSeries{
{Ref: 0, Labels: labels.FromStrings("a", "b", "c", "0")},
{Ref: 1, Labels: labels.FromStrings("a", "b", "c", "1")},
}, nil))
require.NoError(t, err)
// Log an unknown record, that might have come from a future Prometheus version.
require.NoError(t, w.Log([]byte{255}))
require.NoError(t, w.Close())
// Start a WAL and write records to it as usual.
w, err = NewSize(nil, nil, dir, 128*1024, compress)
require.NoError(t, err)
samplesInWAL, histogramsInWAL, floatHistogramsInWAL := 0, 0, 0
var last int64
for i := 0; ; i++ {
_, n, err := Segments(w.Dir())
enc := record.Encoder{EnableSTStorage: enableStStorage}
// Create a dummy segment to bump the initial number.
seg, err := CreateSegment(dir, 100)
require.NoError(t, err)
if n >= 106 {
break
}
// Write some series initially.
if i == 0 {
b := enc.Series([]record.RefSeries{
{Ref: 2, Labels: labels.FromStrings("a", "b", "c", "2")},
{Ref: 3, Labels: labels.FromStrings("a", "b", "c", "3")},
{Ref: 4, Labels: labels.FromStrings("a", "b", "c", "4")},
{Ref: 5, Labels: labels.FromStrings("a", "b", "c", "5")},
require.NoError(t, seg.Close())
// Manually create checkpoint for 99 and earlier.
w, err := New(nil, nil, filepath.Join(dir, "checkpoint.0099"), compress)
require.NoError(t, err)
// Add some data we expect to be around later.
err = w.Log(enc.Series([]record.RefSeries{
{Ref: 0, Labels: labels.FromStrings("a", "b", "c", "0")},
{Ref: 1, Labels: labels.FromStrings("a", "b", "c", "1")},
}, nil))
require.NoError(t, err)
// Log an unknown record, that might have come from a future Prometheus version.
require.NoError(t, w.Log([]byte{255}))
require.NoError(t, w.Close())
// Start a WAL and write records to it as usual.
w, err = NewSize(nil, nil, dir, 128*1024, compress)
require.NoError(t, err)
samplesInWAL, histogramsInWAL, floatHistogramsInWAL := 0, 0, 0
var last int64
for i := 0; ; i++ {
_, n, err := Segments(w.Dir())
require.NoError(t, err)
if n >= 106 {
break
}
// Write some series initially.
if i == 0 {
b := enc.Series([]record.RefSeries{
{Ref: 2, Labels: labels.FromStrings("a", "b", "c", "2")},
{Ref: 3, Labels: labels.FromStrings("a", "b", "c", "3")},
{Ref: 4, Labels: labels.FromStrings("a", "b", "c", "4")},
{Ref: 5, Labels: labels.FromStrings("a", "b", "c", "5")},
}, nil)
require.NoError(t, w.Log(b))
b = enc.Metadata([]record.RefMetadata{
{Ref: 2, Unit: "unit", Help: "help"},
{Ref: 3, Unit: "unit", Help: "help"},
{Ref: 4, Unit: "unit", Help: "help"},
{Ref: 5, Unit: "unit", Help: "help"},
}, nil)
require.NoError(t, w.Log(b))
}
// Write samples until the WAL has enough segments.
// Make them have drifting timestamps within a record to see that they
// get filtered properly.
b := enc.Samples([]record.RefSample{
{Ref: 0, T: last, V: float64(i)},
{Ref: 1, T: last + 10000, V: float64(i)},
{Ref: 2, T: last + 20000, V: float64(i)},
{Ref: 3, T: last + 30000, V: float64(i)},
}, nil)
require.NoError(t, w.Log(b))
samplesInWAL += 4
h := makeHistogram(i)
b, _ = enc.HistogramSamples([]record.RefHistogramSample{
{Ref: 0, T: last, H: h},
{Ref: 1, T: last + 10000, H: h},
{Ref: 2, T: last + 20000, H: h},
{Ref: 3, T: last + 30000, H: h},
}, nil)
require.NoError(t, w.Log(b))
histogramsInWAL += 4
cbh := makeCustomBucketHistogram(i)
b = enc.CustomBucketsHistogramSamples([]record.RefHistogramSample{
{Ref: 0, T: last, H: cbh},
{Ref: 1, T: last + 10000, H: cbh},
{Ref: 2, T: last + 20000, H: cbh},
{Ref: 3, T: last + 30000, H: cbh},
}, nil)
require.NoError(t, w.Log(b))
histogramsInWAL += 4
fh := makeFloatHistogram(i)
b, _ = enc.FloatHistogramSamples([]record.RefFloatHistogramSample{
{Ref: 0, T: last, FH: fh},
{Ref: 1, T: last + 10000, FH: fh},
{Ref: 2, T: last + 20000, FH: fh},
{Ref: 3, T: last + 30000, FH: fh},
}, nil)
require.NoError(t, w.Log(b))
floatHistogramsInWAL += 4
cbfh := makeCustomBucketFloatHistogram(i)
b = enc.CustomBucketsFloatHistogramSamples([]record.RefFloatHistogramSample{
{Ref: 0, T: last, FH: cbfh},
{Ref: 1, T: last + 10000, FH: cbfh},
{Ref: 2, T: last + 20000, FH: cbfh},
{Ref: 3, T: last + 30000, FH: cbfh},
}, nil)
require.NoError(t, w.Log(b))
floatHistogramsInWAL += 4
b = enc.Exemplars([]record.RefExemplar{
{Ref: 1, T: last, V: float64(i), Labels: labels.FromStrings("trace_id", fmt.Sprintf("trace-%d", i))},
}, nil)
require.NoError(t, w.Log(b))
// Write changing metadata for each series. In the end, only the latest
// version should end up in the checkpoint.
b = enc.Metadata([]record.RefMetadata{
{Ref: 2, Unit: "unit", Help: "help"},
{Ref: 3, Unit: "unit", Help: "help"},
{Ref: 4, Unit: "unit", Help: "help"},
{Ref: 5, Unit: "unit", Help: "help"},
{Ref: 0, Unit: strconv.FormatInt(last, 10), Help: strconv.FormatInt(last, 10)},
{Ref: 1, Unit: strconv.FormatInt(last, 10), Help: strconv.FormatInt(last, 10)},
{Ref: 2, Unit: strconv.FormatInt(last, 10), Help: strconv.FormatInt(last, 10)},
{Ref: 3, Unit: strconv.FormatInt(last, 10), Help: strconv.FormatInt(last, 10)},
}, nil)
require.NoError(t, w.Log(b))
last += 100
}
// Write samples until the WAL has enough segments.
// Make them have drifting timestamps within a record to see that they
// get filtered properly.
b := enc.Samples([]record.RefSample{
{Ref: 0, T: last, V: float64(i)},
{Ref: 1, T: last + 10000, V: float64(i)},
{Ref: 2, T: last + 20000, V: float64(i)},
{Ref: 3, T: last + 30000, V: float64(i)},
}, nil)
require.NoError(t, w.Log(b))
samplesInWAL += 4
h := makeHistogram(i)
b, _ = enc.HistogramSamples([]record.RefHistogramSample{
{Ref: 0, T: last, H: h},
{Ref: 1, T: last + 10000, H: h},
{Ref: 2, T: last + 20000, H: h},
{Ref: 3, T: last + 30000, H: h},
}, nil)
require.NoError(t, w.Log(b))
histogramsInWAL += 4
cbh := makeCustomBucketHistogram(i)
b = enc.CustomBucketsHistogramSamples([]record.RefHistogramSample{
{Ref: 0, T: last, H: cbh},
{Ref: 1, T: last + 10000, H: cbh},
{Ref: 2, T: last + 20000, H: cbh},
{Ref: 3, T: last + 30000, H: cbh},
}, nil)
require.NoError(t, w.Log(b))
histogramsInWAL += 4
fh := makeFloatHistogram(i)
b, _ = enc.FloatHistogramSamples([]record.RefFloatHistogramSample{
{Ref: 0, T: last, FH: fh},
{Ref: 1, T: last + 10000, FH: fh},
{Ref: 2, T: last + 20000, FH: fh},
{Ref: 3, T: last + 30000, FH: fh},
}, nil)
require.NoError(t, w.Log(b))
floatHistogramsInWAL += 4
cbfh := makeCustomBucketFloatHistogram(i)
b = enc.CustomBucketsFloatHistogramSamples([]record.RefFloatHistogramSample{
{Ref: 0, T: last, FH: cbfh},
{Ref: 1, T: last + 10000, FH: cbfh},
{Ref: 2, T: last + 20000, FH: cbfh},
{Ref: 3, T: last + 30000, FH: cbfh},
}, nil)
require.NoError(t, w.Log(b))
floatHistogramsInWAL += 4
require.NoError(t, w.Close())
b = enc.Exemplars([]record.RefExemplar{
{Ref: 1, T: last, V: float64(i), Labels: labels.FromStrings("trace_id", fmt.Sprintf("trace-%d", i))},
}, nil)
require.NoError(t, w.Log(b))
stats, err := Checkpoint(promslog.NewNopLogger(), w, 100, 106, func(x chunks.HeadSeriesRef) bool {
return x%2 == 0
}, last/2, enableStStorage)
require.NoError(t, err)
require.NoError(t, w.Truncate(107))
require.NoError(t, DeleteCheckpoints(w.Dir(), 106))
require.Equal(t, histogramsInWAL+floatHistogramsInWAL+samplesInWAL, stats.TotalSamples)
require.Positive(t, stats.DroppedSamples)
// Write changing metadata for each series. In the end, only the latest
// version should end up in the checkpoint.
b = enc.Metadata([]record.RefMetadata{
{Ref: 0, Unit: strconv.FormatInt(last, 10), Help: strconv.FormatInt(last, 10)},
{Ref: 1, Unit: strconv.FormatInt(last, 10), Help: strconv.FormatInt(last, 10)},
{Ref: 2, Unit: strconv.FormatInt(last, 10), Help: strconv.FormatInt(last, 10)},
{Ref: 3, Unit: strconv.FormatInt(last, 10), Help: strconv.FormatInt(last, 10)},
}, nil)
require.NoError(t, w.Log(b))
// Only the new checkpoint should be left.
files, err := os.ReadDir(dir)
require.NoError(t, err)
require.Len(t, files, 1)
require.Equal(t, "checkpoint.00000106", files[0].Name())
last += 100
}
require.NoError(t, w.Close())
sr, err := NewSegmentsReader(filepath.Join(dir, "checkpoint.00000106"))
require.NoError(t, err)
defer sr.Close()
stats, err := Checkpoint(promslog.NewNopLogger(), w, 100, 106, func(x chunks.HeadSeriesRef) bool {
return x%2 == 0
}, last/2)
require.NoError(t, err)
require.NoError(t, w.Truncate(107))
require.NoError(t, DeleteCheckpoints(w.Dir(), 106))
require.Equal(t, histogramsInWAL+floatHistogramsInWAL+samplesInWAL, stats.TotalSamples)
require.Positive(t, stats.DroppedSamples)
dec := record.NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
var series []record.RefSeries
var metadata []record.RefMetadata
r := NewReader(sr)
// Only the new checkpoint should be left.
files, err := os.ReadDir(dir)
require.NoError(t, err)
require.Len(t, files, 1)
require.Equal(t, "checkpoint.00000106", files[0].Name())
samplesInCheckpoint, histogramsInCheckpoint, floatHistogramsInCheckpoint := 0, 0, 0
for r.Next() {
rec := r.Record()
sr, err := NewSegmentsReader(filepath.Join(dir, "checkpoint.00000106"))
require.NoError(t, err)
defer sr.Close()
dec := record.NewDecoder(labels.NewSymbolTable(), promslog.NewNopLogger())
var series []record.RefSeries
var metadata []record.RefMetadata
r := NewReader(sr)
samplesInCheckpoint, histogramsInCheckpoint, floatHistogramsInCheckpoint := 0, 0, 0
for r.Next() {
rec := r.Record()
switch dec.Type(rec) {
case record.Series:
series, err = dec.Series(rec, series)
require.NoError(t, err)
case record.Samples:
samples, err := dec.Samples(rec, nil)
require.NoError(t, err)
for _, s := range samples {
require.GreaterOrEqual(t, s.T, last/2, "sample with wrong timestamp")
switch dec.Type(rec) {
case record.Series:
series, err = dec.Series(rec, series)
require.NoError(t, err)
case record.Samples, record.SamplesV2:
samples, err := dec.Samples(rec, nil)
require.NoError(t, err)
for _, s := range samples {
require.GreaterOrEqual(t, s.T, last/2, "sample with wrong timestamp")
}
samplesInCheckpoint += len(samples)
case record.HistogramSamples, record.CustomBucketsHistogramSamples:
histograms, err := dec.HistogramSamples(rec, nil)
require.NoError(t, err)
for _, h := range histograms {
require.GreaterOrEqual(t, h.T, last/2, "histogram with wrong timestamp")
}
histogramsInCheckpoint += len(histograms)
case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples:
floatHistograms, err := dec.FloatHistogramSamples(rec, nil)
require.NoError(t, err)
for _, h := range floatHistograms {
require.GreaterOrEqual(t, h.T, last/2, "float histogram with wrong timestamp")
}
floatHistogramsInCheckpoint += len(floatHistograms)
case record.Exemplars:
exemplars, err := dec.Exemplars(rec, nil)
require.NoError(t, err)
for _, e := range exemplars {
require.GreaterOrEqual(t, e.T, last/2, "exemplar with wrong timestamp")
}
case record.Metadata:
metadata, err = dec.Metadata(rec, metadata)
require.NoError(t, err)
}
samplesInCheckpoint += len(samples)
case record.HistogramSamples, record.CustomBucketsHistogramSamples:
histograms, err := dec.HistogramSamples(rec, nil)
require.NoError(t, err)
for _, h := range histograms {
require.GreaterOrEqual(t, h.T, last/2, "histogram with wrong timestamp")
}
histogramsInCheckpoint += len(histograms)
case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples:
floatHistograms, err := dec.FloatHistogramSamples(rec, nil)
require.NoError(t, err)
for _, h := range floatHistograms {
require.GreaterOrEqual(t, h.T, last/2, "float histogram with wrong timestamp")
}
floatHistogramsInCheckpoint += len(floatHistograms)
case record.Exemplars:
exemplars, err := dec.Exemplars(rec, nil)
require.NoError(t, err)
for _, e := range exemplars {
require.GreaterOrEqual(t, e.T, last/2, "exemplar with wrong timestamp")
}
case record.Metadata:
metadata, err = dec.Metadata(rec, metadata)
require.NoError(t, err)
}
}
require.NoError(t, r.Err())
// Making sure we replayed some samples. We expect >50% samples to be still present.
require.Greater(t, float64(samplesInCheckpoint)/float64(samplesInWAL), 0.5)
require.Less(t, float64(samplesInCheckpoint)/float64(samplesInWAL), 0.8)
require.Greater(t, float64(histogramsInCheckpoint)/float64(histogramsInWAL), 0.5)
require.Less(t, float64(histogramsInCheckpoint)/float64(histogramsInWAL), 0.8)
require.Greater(t, float64(floatHistogramsInCheckpoint)/float64(floatHistogramsInWAL), 0.5)
require.Less(t, float64(floatHistogramsInCheckpoint)/float64(floatHistogramsInWAL), 0.8)
require.NoError(t, r.Err())
// Making sure we replayed some samples. We expect >50% samples to be still present.
require.Greater(t, float64(samplesInCheckpoint)/float64(samplesInWAL), 0.5)
require.Less(t, float64(samplesInCheckpoint)/float64(samplesInWAL), 0.8)
require.Greater(t, float64(histogramsInCheckpoint)/float64(histogramsInWAL), 0.5)
require.Less(t, float64(histogramsInCheckpoint)/float64(histogramsInWAL), 0.8)
require.Greater(t, float64(floatHistogramsInCheckpoint)/float64(floatHistogramsInWAL), 0.5)
require.Less(t, float64(floatHistogramsInCheckpoint)/float64(floatHistogramsInWAL), 0.8)
expectedRefSeries := []record.RefSeries{
{Ref: 0, Labels: labels.FromStrings("a", "b", "c", "0")},
{Ref: 2, Labels: labels.FromStrings("a", "b", "c", "2")},
{Ref: 4, Labels: labels.FromStrings("a", "b", "c", "4")},
}
testutil.RequireEqual(t, expectedRefSeries, series)
expectedRefSeries := []record.RefSeries{
{Ref: 0, Labels: labels.FromStrings("a", "b", "c", "0")},
{Ref: 2, Labels: labels.FromStrings("a", "b", "c", "2")},
{Ref: 4, Labels: labels.FromStrings("a", "b", "c", "4")},
}
testutil.RequireEqual(t, expectedRefSeries, series)
expectedRefMetadata := []record.RefMetadata{
{Ref: 0, Unit: strconv.FormatInt(last-100, 10), Help: strconv.FormatInt(last-100, 10)},
{Ref: 2, Unit: strconv.FormatInt(last-100, 10), Help: strconv.FormatInt(last-100, 10)},
{Ref: 4, Unit: "unit", Help: "help"},
}
sort.Slice(metadata, func(i, j int) bool { return metadata[i].Ref < metadata[j].Ref })
require.Equal(t, expectedRefMetadata, metadata)
})
expectedRefMetadata := []record.RefMetadata{
{Ref: 0, Unit: strconv.FormatInt(last-100, 10), Help: strconv.FormatInt(last-100, 10)},
{Ref: 2, Unit: strconv.FormatInt(last-100, 10), Help: strconv.FormatInt(last-100, 10)},
{Ref: 4, Unit: "unit", Help: "help"},
}
sort.Slice(metadata, func(i, j int) bool { return metadata[i].Ref < metadata[j].Ref })
require.Equal(t, expectedRefMetadata, metadata)
})
}
}
}
func TestCheckpointNoTmpFolderAfterError(t *testing.T) {
// Create a new wlog with invalid data.
dir := t.TempDir()
w, err := NewSize(nil, nil, dir, 64*1024, compression.None)
require.NoError(t, err)
var enc record.Encoder
require.NoError(t, w.Log(enc.Series([]record.RefSeries{
{Ref: 0, Labels: labels.FromStrings("a", "b", "c", "2")},
}, nil)))
require.NoError(t, w.Close())
for _, enableStStorage := range []bool{false, true} {
t.Run("enableStStorage="+strconv.FormatBool(enableStStorage), func(t *testing.T) {
// Create a new wlog with invalid data.
dir := t.TempDir()
w, err := NewSize(nil, nil, dir, 64*1024, compression.None)
require.NoError(t, err)
enc := record.Encoder{EnableSTStorage: enableStStorage}
require.NoError(t, w.Log(enc.Series([]record.RefSeries{
{Ref: 0, Labels: labels.FromStrings("a", "b", "c", "2")},
}, nil)))
require.NoError(t, w.Close())
// Corrupt data.
f, err := os.OpenFile(filepath.Join(w.Dir(), "00000000"), os.O_WRONLY, 0o666)
require.NoError(t, err)
_, err = f.WriteAt([]byte{42}, 1)
require.NoError(t, err)
require.NoError(t, f.Close())
// Corrupt data.
f, err := os.OpenFile(filepath.Join(w.Dir(), "00000000"), os.O_WRONLY, 0o666)
require.NoError(t, err)
_, err = f.WriteAt([]byte{42}, 1)
require.NoError(t, err)
require.NoError(t, f.Close())
// Run the checkpoint and since the wlog contains corrupt data this should return an error.
_, err = Checkpoint(promslog.NewNopLogger(), w, 0, 1, nil, 0)
require.Error(t, err)
// Run the checkpoint and since the wlog contains corrupt data this should return an error.
_, err = Checkpoint(promslog.NewNopLogger(), w, 0, 1, nil, 0, enableStStorage)
require.Error(t, err)
// Walk the wlog dir to make sure there are no tmp folder left behind after the error.
err = filepath.Walk(w.Dir(), func(path string, info os.FileInfo, err error) error {
if err != nil {
return fmt.Errorf("access err %q: %w", path, err)
}
if info.IsDir() && strings.HasSuffix(info.Name(), ".tmp") {
return fmt.Errorf("wlog dir contains temporary folder:%s", info.Name())
}
return nil
})
require.NoError(t, err)
// Walk the wlog dir to make sure there are no tmp folder left behind after the error.
err = filepath.Walk(w.Dir(), func(path string, info os.FileInfo, err error) error {
if err != nil {
return fmt.Errorf("access err %q: %w", path, err)
}
if info.IsDir() && strings.HasSuffix(info.Name(), ".tmp") {
return fmt.Errorf("wlog dir contains temporary folder:%s", info.Name())
}
return nil
})
require.NoError(t, err)
})
}
}

View file

@ -519,7 +519,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error {
}
w.writer.StoreSeries(series, segmentNum)
case record.Samples:
case record.Samples, record.SamplesV2:
// If we're not tailing a segment we can ignore any samples records we see.
// This speeds up replay of the WAL by > 10x.
if !tail {

File diff suppressed because it is too large Load diff

96
util/testrecord/record.go Normal file
View file

@ -0,0 +1,96 @@
// Copyright 2025 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package testrecord
import (
"math"
"testing"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/record"
)
type RefSamplesCase string
const (
Realistic1000Samples RefSamplesCase = "real1000"
Realistic1000WithVariableSTSamples RefSamplesCase = "real1000-vst"
Realistic1000WithConstSTSamples RefSamplesCase = "real1000-cst"
WorstCase1000 RefSamplesCase = "worst1000"
WorstCase1000WithSTSamples RefSamplesCase = "worst1000-st"
)
func GenTestRefSamplesCase(t testing.TB, c RefSamplesCase) []record.RefSample {
t.Helper()
ret := make([]record.RefSample, 1e3)
switch c {
// Samples are across series, so likely all have the same timestamp.
case Realistic1000Samples:
for i := range ret {
ret[i].Ref = chunks.HeadSeriesRef(i)
ret[i].T = int64(12423423)
ret[i].V = highVarianceFloat(i)
}
// Likely the start times will all be the same with deltas.
case Realistic1000WithConstSTSamples:
for i := range ret {
ret[i].Ref = chunks.HeadSeriesRef(i)
ret[i].ST = int64(12423423)
ret[i].T = int64(12423423 + 15)
ret[i].V = highVarianceFloat(i)
}
// Maybe series have different start times though
case Realistic1000WithVariableSTSamples:
for i := range ret {
ret[i].Ref = chunks.HeadSeriesRef(i)
ret[i].ST = int64((12423423 / 9) * (i % 10))
ret[i].T = int64(12423423)
ret[i].V = highVarianceFloat(i)
}
case WorstCase1000:
for i := range ret {
ret[i].Ref = chunks.HeadSeriesRef(i)
ret[i].T = highVarianceInt(i)
ret[i].V = highVarianceFloat(i)
}
case WorstCase1000WithSTSamples:
for i := range ret {
ret[i].Ref = chunks.HeadSeriesRef(i)
// Worst case is when the values are significantly different
// to each other which breaks delta encoding.
ret[i].ST = highVarianceInt(i+1) / 1024 // Make sure ST is not comparable to T
ret[i].T = highVarianceInt(i)
ret[i].V = highVarianceFloat(i)
}
default:
t.Fatal("unknown case", c)
}
return ret
}
func highVarianceInt(i int) int64 {
if i%2 == 0 {
return math.MinInt32
}
return math.MaxInt32
}
func highVarianceFloat(i int) float64 {
if i%2 == 0 {
return math.SmallestNonzeroFloat32
}
return math.MaxFloat32
}