tsdb: add auto-cleanup to newTestHead and remove redundant cleanup calls (#17890)

Add automatic cleanup to newTestHeadWithOptions so that heads created
with newTestHead are automatically closed when the test ends. This
simplifies test code by removing the need for manual cleanup in most
cases.

Changes:
- Add t.Cleanup in newTestHeadWithOptions immediately after creating
  the head, using _ = h.Close() to handle double-close gracefully
- Remove redundant t.Cleanup, defer, and explicit Close calls from
  tests that use newTestHead
- Add cleanup for heads created with NewHead directly in restart
  patterns (e.g., restartHeadAndVerifySeriesCounts, startHead)

Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
This commit is contained in:
Arve Knudsen 2026-01-19 12:57:05 +01:00 committed by GitHub
parent dd85d7ca97
commit 572f247b4d
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 21 additions and 146 deletions

View file

@ -1452,9 +1452,6 @@ func TestHeadCompactionWithHistograms(t *testing.T) {
t.Run(fmt.Sprintf("float=%t", floatTest), func(t *testing.T) {
head, _ := newTestHead(t, DefaultBlockDuration, compression.None, false)
require.NoError(t, head.Init(0))
t.Cleanup(func() {
require.NoError(t, head.Close())
})
minute := func(m int) int64 { return int64(m) * time.Minute.Milliseconds() }
ctx := context.Background()
@ -1631,13 +1628,7 @@ func TestSparseHistogramSpaceSavings(t *testing.T) {
),
func(t *testing.T) {
oldHead, _ := newTestHead(t, DefaultBlockDuration, compression.None, false)
t.Cleanup(func() {
require.NoError(t, oldHead.Close())
})
sparseHead, _ := newTestHead(t, DefaultBlockDuration, compression.None, false)
t.Cleanup(func() {
require.NoError(t, sparseHead.Close())
})
var allSparseSeries []struct {
baseLabels labels.Labels

View file

@ -352,7 +352,6 @@ func TestHeadAppenderV2_ActiveAppenders(t *testing.T) {
func TestHeadAppenderV2_RaceBetweenSeriesCreationAndGC(t *testing.T) {
head, _ := newTestHead(t, 1000, compression.None, false)
t.Cleanup(func() { _ = head.Close() })
require.NoError(t, head.Init(0))
const totalSeries = 100_000
@ -395,7 +394,6 @@ func TestHeadAppenderV2_CanGCSeriesCreatedWithoutSamples(t *testing.T) {
t.Run(op, func(t *testing.T) {
chunkRange := time.Hour.Milliseconds()
head, _ := newTestHead(t, chunkRange, compression.None, true)
t.Cleanup(func() { _ = head.Close() })
require.NoError(t, head.Init(0))
@ -1864,7 +1862,8 @@ func TestHeadAppenderV2_Append_Histogram(t *testing.T) {
func TestHistogramInWALAndMmapChunk_AppenderV2(t *testing.T) {
head, _ := newTestHead(t, 3000, compression.None, false)
t.Cleanup(func() {
require.NoError(t, head.Close())
// Captures head by reference, so it closes the final head after restarts.
_ = head.Close()
})
require.NoError(t, head.Init(0))
@ -2011,9 +2010,10 @@ func TestHistogramInWALAndMmapChunk_AppenderV2(t *testing.T) {
}
// Restart head.
walDir := head.wal.Dir()
require.NoError(t, head.Close())
startHead := func() {
w, err := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, compression.None)
w, err := wlog.NewSize(nil, nil, walDir, 32768, compression.None)
require.NoError(t, err)
head, err = NewHead(nil, nil, w, nil, head.opts, nil)
require.NoError(t, err)
@ -4081,7 +4081,6 @@ func TestWALSampleAndExemplarOrder_AppenderV2(t *testing.T) {
func TestHeadAppenderV2_Append_FloatWithSameTimestampAsPreviousHistogram(t *testing.T) {
head, _ := newTestHead(t, DefaultBlockDuration, compression.None, false)
t.Cleanup(func() { head.Close() })
ls := labels.FromStrings(labels.MetricName, "test")
@ -4489,7 +4488,8 @@ func testHeadAppenderV2AppendHistogramAndCommitConcurrency(t *testing.T, appendF
func TestHeadAppenderV2_NumStaleSeries(t *testing.T) {
head, _ := newTestHead(t, 1000, compression.None, false)
t.Cleanup(func() {
require.NoError(t, head.Close())
// Captures head by reference, so it closes the final head after restarts.
_ = head.Close()
})
require.NoError(t, head.Init(0))

View file

@ -230,7 +230,6 @@ func BenchmarkHeadAppender_AppendCommit(b *testing.B) {
opts := newTestHeadDefaultOptions(10000, false)
opts.EnableExemplarStorage = true // We benchmark with exemplars, benchmark with them.
h, _ := newTestHeadWithOptions(b, compression.None, opts)
b.Cleanup(func() { require.NoError(b, h.Close()) })
ts := int64(1000)

View file

@ -84,6 +84,12 @@ func newTestHeadWithOptions(t testing.TB, compressWAL compression.Type, opts *He
h, err := NewHead(nil, nil, wal, nil, opts, nil)
require.NoError(t, err)
t.Cleanup(func() {
// Use _ = h.Close() instead of require.NoError because some tests
// explicitly close the head as part of their test logic (e.g., to
// restart/reopen the head), and we don't want to fail on double-close.
_ = h.Close()
})
require.NoError(t, h.chunkDiskMapper.IterateAllChunks(func(chunks.HeadSeriesRef, chunks.ChunkDiskMapperRef, int64, int64, uint16, chunkenc.Encoding, bool) error {
return nil
@ -95,9 +101,6 @@ func newTestHeadWithOptions(t testing.TB, compressWAL compression.Type, opts *He
func BenchmarkCreateSeries(b *testing.B) {
series := genSeries(b.N, 10, 0, 0)
h, _ := newTestHead(b, 10000, compression.None, false)
b.Cleanup(func() {
require.NoError(b, h.Close())
})
b.ReportAllocs()
b.ResetTimer()
@ -473,9 +476,6 @@ func BenchmarkLoadRealWLs(b *testing.B) {
// returned results are correct.
func TestHead_HighConcurrencyReadAndWrite(t *testing.T) {
head, _ := newTestHead(t, DefaultBlockDuration, compression.None, false)
defer func() {
require.NoError(t, head.Close())
}()
seriesCnt := 1000
readConcurrency := 2
@ -703,9 +703,6 @@ func TestHead_ReadWAL(t *testing.T) {
}
head, w := newTestHead(t, 1000, compress, false)
defer func() {
require.NoError(t, head.Close())
}()
populateTestWL(t, w, entries, nil)
@ -1056,9 +1053,6 @@ func TestHead_WALCheckpointMultiRef(t *testing.T) {
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
h, w := newTestHead(t, 1000, compression.None, false)
t.Cleanup(func() {
require.NoError(t, h.Close())
})
populateTestWL(t, w, tc.walEntries, nil)
first, _, err := wlog.Segments(w.Dir())
@ -1134,9 +1128,6 @@ func TestHead_KeepSeriesInWALCheckpoint(t *testing.T) {
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
h, _ := newTestHead(t, 1000, compression.None, false)
t.Cleanup(func() {
require.NoError(t, h.Close())
})
if tc.prepare != nil {
tc.prepare(t, h)
@ -1152,7 +1143,6 @@ func TestHead_KeepSeriesInWALCheckpoint(t *testing.T) {
func TestHead_ActiveAppenders(t *testing.T) {
head, _ := newTestHead(t, 1000, compression.None, false)
defer head.Close()
require.NoError(t, head.Init(0))
@ -1185,7 +1175,6 @@ func TestHead_ActiveAppenders(t *testing.T) {
func TestHead_RaceBetweenSeriesCreationAndGC(t *testing.T) {
head, _ := newTestHead(t, 1000, compression.None, false)
t.Cleanup(func() { _ = head.Close() })
require.NoError(t, head.Init(0))
const totalSeries = 100_000
@ -1228,7 +1217,6 @@ func TestHead_CanGarbagecollectSeriesCreatedWithoutSamples(t *testing.T) {
t.Run(op, func(t *testing.T) {
chunkRange := time.Hour.Milliseconds()
head, _ := newTestHead(t, chunkRange, compression.None, true)
t.Cleanup(func() { _ = head.Close() })
require.NoError(t, head.Init(0))
@ -1267,7 +1255,6 @@ func TestHead_UnknownWALRecord(t *testing.T) {
head, w := newTestHead(t, 1000, compression.None, false)
w.Log([]byte{255, 42})
require.NoError(t, head.Init(0))
require.NoError(t, head.Close())
}
// BenchmarkHead_Truncate is quite heavy, so consider running it with
@ -1277,9 +1264,6 @@ func BenchmarkHead_Truncate(b *testing.B) {
prepare := func(b *testing.B, churn int) *Head {
h, _ := newTestHead(b, 1000, compression.None, false)
b.Cleanup(func() {
require.NoError(b, h.Close())
})
h.initTime(0)
@ -1346,9 +1330,6 @@ func BenchmarkHead_Truncate(b *testing.B) {
func TestHead_Truncate(t *testing.T) {
h, _ := newTestHead(t, 1000, compression.None, false)
defer func() {
require.NoError(t, h.Close())
}()
h.initTime(0)
@ -1671,9 +1652,6 @@ func TestHeadDeleteSeriesWithoutSamples(t *testing.T) {
},
}
head, w := newTestHead(t, 1000, compress, false)
defer func() {
require.NoError(t, head.Close())
}()
populateTestWL(t, w, entries, nil)
@ -1818,9 +1796,6 @@ func TestHeadDeleteSimple(t *testing.T) {
func TestDeleteUntilCurMax(t *testing.T) {
hb, _ := newTestHead(t, 1000000, compression.None, false)
defer func() {
require.NoError(t, hb.Close())
}()
numSamples := int64(10)
app := hb.Appender(context.Background())
@ -1963,9 +1938,6 @@ func TestDelete_e2e(t *testing.T) {
}
hb, _ := newTestHead(t, 100000, compression.None, false)
defer func() {
require.NoError(t, hb.Close())
}()
app := hb.Appender(context.Background())
for _, l := range lbls {
@ -2331,9 +2303,6 @@ func TestGCChunkAccess(t *testing.T) {
// Put a chunk, select it. GC it and then access it.
const chunkRange = 1000
h, _ := newTestHead(t, chunkRange, compression.None, false)
defer func() {
require.NoError(t, h.Close())
}()
cOpts := chunkOpts{
chunkDiskMapper: h.chunkDiskMapper,
@ -2390,9 +2359,6 @@ func TestGCSeriesAccess(t *testing.T) {
// Put a series, select it. GC it and then access it.
const chunkRange = 1000
h, _ := newTestHead(t, chunkRange, compression.None, false)
defer func() {
require.NoError(t, h.Close())
}()
cOpts := chunkOpts{
chunkDiskMapper: h.chunkDiskMapper,
@ -2449,9 +2415,6 @@ func TestGCSeriesAccess(t *testing.T) {
func TestUncommittedSamplesNotLostOnTruncate(t *testing.T) {
h, _ := newTestHead(t, 1000, compression.None, false)
defer func() {
require.NoError(t, h.Close())
}()
h.initTime(0)
@ -2479,9 +2442,6 @@ func TestUncommittedSamplesNotLostOnTruncate(t *testing.T) {
func TestRemoveSeriesAfterRollbackAndTruncate(t *testing.T) {
h, _ := newTestHead(t, 1000, compression.None, false)
defer func() {
require.NoError(t, h.Close())
}()
h.initTime(0)
@ -2512,9 +2472,6 @@ func TestHead_LogRollback(t *testing.T) {
for _, compress := range []compression.Type{compression.None, compression.Snappy, compression.Zstd} {
t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) {
h, w := newTestHead(t, 1000, compress, false)
defer func() {
require.NoError(t, h.Close())
}()
app := h.Appender(context.Background())
_, err := app.Append(0, labels.FromStrings("a", "b"), 1, 2)
@ -2534,9 +2491,6 @@ func TestHead_LogRollback(t *testing.T) {
func TestHead_ReturnsSortedLabelValues(t *testing.T) {
h, _ := newTestHead(t, 1000, compression.None, false)
defer func() {
require.NoError(t, h.Close())
}()
h.initTime(0)
@ -2807,9 +2761,6 @@ func TestHeadReadWriterRepair(t *testing.T) {
func TestNewWalSegmentOnTruncate(t *testing.T) {
h, wal := newTestHead(t, 1000, compression.None, false)
defer func() {
require.NoError(t, h.Close())
}()
add := func(ts int64) {
app := h.Appender(context.Background())
_, err := app.Append(0, labels.FromStrings("a", "b"), ts, 0)
@ -2837,9 +2788,6 @@ func TestNewWalSegmentOnTruncate(t *testing.T) {
func TestAddDuplicateLabelName(t *testing.T) {
h, _ := newTestHead(t, 1000, compression.None, false)
defer func() {
require.NoError(t, h.Close())
}()
add := func(labels labels.Labels, labelName string) {
app := h.Appender(context.Background())
@ -3035,9 +2983,6 @@ func TestIsolationRollback(t *testing.T) {
// Rollback after a failed append and test if the low watermark has progressed anyway.
hb, _ := newTestHead(t, 1000, compression.None, false)
defer func() {
require.NoError(t, hb.Close())
}()
app := hb.Appender(context.Background())
_, err := app.Append(0, labels.FromStrings("foo", "bar"), 0, 0)
@ -3066,9 +3011,6 @@ func TestIsolationLowWatermarkMonotonous(t *testing.T) {
}
hb, _ := newTestHead(t, 1000, compression.None, false)
defer func() {
require.NoError(t, hb.Close())
}()
app1 := hb.Appender(context.Background())
_, err := app1.Append(0, labels.FromStrings("foo", "bar"), 0, 0)
@ -3103,9 +3045,6 @@ func TestIsolationAppendIDZeroIsNoop(t *testing.T) {
}
h, _ := newTestHead(t, 1000, compression.None, false)
defer func() {
require.NoError(t, h.Close())
}()
h.initTime(0)
@ -3135,9 +3074,6 @@ func TestIsolationWithoutAdd(t *testing.T) {
}
hb, _ := newTestHead(t, 1000, compression.None, false)
defer func() {
require.NoError(t, hb.Close())
}()
app := hb.Appender(context.Background())
require.NoError(t, app.Commit())
@ -3257,9 +3193,6 @@ func testOutOfOrderSamplesMetric(t *testing.T, scenario sampleTypeScenario, opti
func testHeadSeriesChunkRace(t *testing.T) {
h, _ := newTestHead(t, 1000, compression.None, false)
defer func() {
require.NoError(t, h.Close())
}()
require.NoError(t, h.Init(0))
app := h.Appender(context.Background())
@ -3292,9 +3225,6 @@ func testHeadSeriesChunkRace(t *testing.T) {
func TestHeadLabelNamesValuesWithMinMaxRange(t *testing.T) {
head, _ := newTestHead(t, 1000, compression.None, false)
defer func() {
require.NoError(t, head.Close())
}()
const (
firstSeriesTimestamp int64 = 100
@ -3353,7 +3283,6 @@ func TestHeadLabelNamesValuesWithMinMaxRange(t *testing.T) {
func TestHeadLabelValuesWithMatchers(t *testing.T) {
head, _ := newTestHead(t, 1000, compression.None, false)
t.Cleanup(func() { require.NoError(t, head.Close()) })
ctx := context.Background()
@ -3429,9 +3358,6 @@ func TestHeadLabelValuesWithMatchers(t *testing.T) {
func TestHeadLabelNamesWithMatchers(t *testing.T) {
head, _ := newTestHead(t, 1000, compression.None, false)
defer func() {
require.NoError(t, head.Close())
}()
app := head.Appender(context.Background())
for i := range 100 {
@ -3499,9 +3425,6 @@ func TestHeadShardedPostings(t *testing.T) {
headOpts := newTestHeadDefaultOptions(1000, false)
headOpts.EnableSharding = true
head, _ := newTestHeadWithOptions(t, compression.None, headOpts)
defer func() {
require.NoError(t, head.Close())
}()
ctx := context.Background()
@ -3562,9 +3485,6 @@ func TestHeadShardedPostings(t *testing.T) {
func TestErrReuseAppender(t *testing.T) {
head, _ := newTestHead(t, 1000, compression.None, false)
defer func() {
require.NoError(t, head.Close())
}()
app := head.Appender(context.Background())
_, err := app.Append(0, labels.FromStrings("test", "test"), 0, 0)
@ -3625,8 +3545,6 @@ func TestHeadMintAfterTruncation(t *testing.T) {
require.NoError(t, head.Truncate(7500))
require.Equal(t, int64(7500), head.MinTime())
require.Equal(t, int64(7500), head.minValidTime.Load())
require.NoError(t, head.Close())
}
func TestHeadExemplars(t *testing.T) {
@ -3648,13 +3566,11 @@ func TestHeadExemplars(t *testing.T) {
})
require.NoError(t, err)
require.NoError(t, app.Commit())
require.NoError(t, head.Close())
}
func BenchmarkHeadLabelValuesWithMatchers(b *testing.B) {
chunkRange := int64(2000)
head, _ := newTestHead(b, chunkRange, compression.None, false)
b.Cleanup(func() { require.NoError(b, head.Close()) })
ctx := context.Background()
@ -4100,9 +4016,6 @@ func TestAppendHistogram(t *testing.T) {
for _, numHistograms := range []int{1, 10, 150, 200, 250, 300} {
t.Run(strconv.Itoa(numHistograms), func(t *testing.T) {
head, _ := newTestHead(t, 1000, compression.None, false)
t.Cleanup(func() {
require.NoError(t, head.Close())
})
require.NoError(t, head.Init(0))
ingestTs := int64(0)
@ -4205,7 +4118,8 @@ func TestAppendHistogram(t *testing.T) {
func TestHistogramInWALAndMmapChunk(t *testing.T) {
head, _ := newTestHead(t, 3000, compression.None, false)
t.Cleanup(func() {
require.NoError(t, head.Close())
// Captures head by reference, so it closes the final head after restarts.
_ = head.Close()
})
require.NoError(t, head.Init(0))
@ -4352,9 +4266,10 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) {
}
// Restart head.
walDir := head.wal.Dir()
require.NoError(t, head.Close())
startHead := func() {
w, err := wlog.NewSize(nil, nil, head.wal.Dir(), 32768, compression.None)
w, err := wlog.NewSize(nil, nil, walDir, 32768, compression.None)
require.NoError(t, err)
head, err = NewHead(nil, nil, w, nil, head.opts, nil)
require.NoError(t, err)
@ -5680,9 +5595,6 @@ func testOOOMmapReplay(t *testing.T, scenario sampleTypeScenario) {
func TestHeadInit_DiscardChunksWithUnsupportedEncoding(t *testing.T) {
h, _ := newTestHead(t, 1000, compression.None, false)
defer func() {
require.NoError(t, h.Close())
}()
require.NoError(t, h.Init(0))
@ -5727,6 +5639,9 @@ func TestHeadInit_DiscardChunksWithUnsupportedEncoding(t *testing.T) {
require.NoError(t, err)
h, err = NewHead(nil, nil, wal, nil, h.opts, nil)
require.NoError(t, err)
t.Cleanup(func() {
_ = h.Close()
})
require.NoError(t, h.Init(0))
series, created, err = h.getOrCreate(seriesLabels.Hash(), seriesLabels, false)
@ -6367,9 +6282,6 @@ func TestCuttingNewHeadChunks(t *testing.T) {
for testName, tc := range testCases {
t.Run(testName, func(t *testing.T) {
h, _ := newTestHead(t, DefaultBlockDuration, compression.None, false)
defer func() {
require.NoError(t, h.Close())
}()
a := h.Appender(context.Background())
@ -6435,9 +6347,6 @@ func TestHeadDetectsDuplicateSampleAtSizeLimit(t *testing.T) {
baseTS := int64(1695209650)
h, _ := newTestHead(t, DefaultBlockDuration, compression.None, false)
defer func() {
require.NoError(t, h.Close())
}()
a := h.Appender(context.Background())
var err error
@ -6502,9 +6411,6 @@ func TestWALSampleAndExemplarOrder(t *testing.T) {
for testName, tc := range testcases {
t.Run(testName, func(t *testing.T) {
h, w := newTestHead(t, 1000, compression.None, false)
defer func() {
require.NoError(t, h.Close())
}()
app := h.Appender(context.Background())
ref, err := tc.appendF(app, 10)
@ -6552,7 +6458,6 @@ func TestHeadCompactionWhileAppendAndCommitExemplar(t *testing.T) {
require.NoError(t, err)
h.Truncate(10)
app.Commit()
h.Close()
}
func labelsWithHashCollision() (labels.Labels, labels.Labels) {
@ -6648,7 +6553,6 @@ func TestPostingsCardinalityStats(t *testing.T) {
func TestHeadAppender_AppendFloatWithSameTimestampAsPreviousHistogram(t *testing.T) {
head, _ := newTestHead(t, DefaultBlockDuration, compression.None, false)
t.Cleanup(func() { head.Close() })
ls := labels.FromStrings(labels.MetricName, "test")
@ -6872,9 +6776,6 @@ func TestHeadAppender_AppendST(t *testing.T) {
} {
t.Run(tc.name, func(t *testing.T) {
h, _ := newTestHead(t, DefaultBlockDuration, compression.None, false)
defer func() {
require.NoError(t, h.Close())
}()
a := h.Appender(context.Background())
lbls := labels.FromStrings("foo", "bar")
for _, sample := range tc.appendableSamples {
@ -6950,10 +6851,6 @@ func TestHeadAppender_AppendHistogramSTZeroSample(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
h, _ := newTestHead(t, DefaultBlockDuration, compression.None, false)
defer func() {
require.NoError(t, h.Close())
}()
lbls := labels.FromStrings("foo", "bar")
var ref storage.SeriesRef
@ -6979,9 +6876,6 @@ func TestHeadCompactableDoesNotCompactEmptyHead(t *testing.T) {
// would return true which is incorrect. This test verifies that we short-circuit
// the check when the head has not yet had any samples added.
head, _ := newTestHead(t, 1, compression.None, false)
defer func() {
require.NoError(t, head.Close())
}()
require.False(t, head.compactable())
}
@ -7021,9 +6915,6 @@ func TestHeadAppendHistogramAndCommitConcurrency(t *testing.T) {
func testHeadAppendHistogramAndCommitConcurrency(t *testing.T, appendFn func(storage.Appender, int) error) {
head, _ := newTestHead(t, 1000, compression.None, false)
defer func() {
require.NoError(t, head.Close())
}()
wg := sync.WaitGroup{}
wg.Add(2)
@ -7057,7 +6948,8 @@ func testHeadAppendHistogramAndCommitConcurrency(t *testing.T, appendFn func(sto
func TestHead_NumStaleSeries(t *testing.T) {
head, _ := newTestHead(t, 1000, compression.None, false)
t.Cleanup(func() {
require.NoError(t, head.Close())
// Captures head by reference, so it closes the final head after restarts.
_ = head.Close()
})
require.NoError(t, head.Init(0))
@ -7228,9 +7120,6 @@ func TestHistogramStalenessConversionMetrics(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
head, _ := newTestHead(t, 1000, compression.None, false)
defer func() {
require.NoError(t, head.Close())
}()
lbls := labels.FromStrings("name", tc.name)

View file

@ -301,9 +301,6 @@ func TestOOOHeadIndexReader_Series(t *testing.T) {
for _, headChunk := range []bool{false, true} {
t.Run(fmt.Sprintf("name=%s, permutation=%d, headChunk=%t", tc.name, perm, headChunk), func(t *testing.T) {
h, _ := newTestHead(t, 1000, compression.None, true)
defer func() {
require.NoError(t, h.Close())
}()
require.NoError(t, h.Init(0))
s1, _, _ := h.getOrCreate(s1ID, s1Lset, false)
@ -389,7 +386,6 @@ func TestOOOHeadChunkReader_LabelValues(t *testing.T) {
func testOOOHeadChunkReader_LabelValues(t *testing.T, scenario sampleTypeScenario) {
chunkRange := int64(2000)
head, _ := newTestHead(t, chunkRange, compression.None, true)
t.Cleanup(func() { require.NoError(t, head.Close()) })
ctx := context.Background()