Move commonly used context into PruneState and simplify helpers

heap_page_prune_and_freeze() and many of its helpers use the heap
buffer, block number, and page. Other helpers took the heap page and
didn't use it. Initializing these values once during
prune_freeze_setup() simplifies the helpers' interfaces and avoids any
repeated calls to BufferGetBlockNumber() and BufferGetPage().

While updating PruneState, also reorganize its fields to make the layout
and member documentation more consistent.

Author: Melanie Plageman <melanieplageman@gmail.com>
Reviewed-by: Chao Li <li.evan.chao@gmail.com>
Discussion: https://postgr.es/m/BD8B69E7-26D8-4706-9164-597C6AE57812%40gmail.com
This commit is contained in:
Melanie Plageman 2026-03-05 16:10:29 -05:00
parent ac0accafd6
commit 59663e4207

View file

@ -45,6 +45,16 @@ typedef struct
/* whether to attempt freezing tuples */
bool attempt_freeze;
struct VacuumCutoffs *cutoffs;
Relation relation;
/*
* Keep the buffer, block, and page handy so that helpers needing to
* access them don't need to make repeated calls to BufferGetBlockNumber()
* and BufferGetPage().
*/
BlockNumber block;
Buffer buffer;
Page page;
/*-------------------------------------------------------
* Fields describing what to do to the page
@ -98,11 +108,19 @@ typedef struct
*/
int8 htsv[MaxHeapTuplesPerPage + 1];
/*
* Freezing-related state.
/*-------------------------------------------------------
* Working state for freezing
*-------------------------------------------------------
*/
HeapPageFreeze pagefrz;
/*
* The snapshot conflict horizon used when freezing tuples. The final
* snapshot conflict horizon for the record may be newer if pruning
* removes newer transaction IDs.
*/
TransactionId frz_conflict_horizon;
/*-------------------------------------------------------
* Information about what was done
*
@ -129,13 +147,6 @@ typedef struct
int lpdead_items; /* number of items in the array */
OffsetNumber *deadoffsets; /* points directly to presult->deadoffsets */
/*
* The snapshot conflict horizon used when freezing tuples. The final
* snapshot conflict horizon for the record may be newer if pruning
* removes newer transaction IDs.
*/
TransactionId frz_conflict_horizon;
/*
* all_visible and all_frozen indicate if the all-visible and all-frozen
* bits in the visibility map can be set for this page after pruning.
@ -162,14 +173,12 @@ static void prune_freeze_setup(PruneFreezeParams *params,
MultiXactId *new_relmin_mxid,
PruneFreezeResult *presult,
PruneState *prstate);
static void prune_freeze_plan(Oid reloid, Buffer buffer,
PruneState *prstate,
static void prune_freeze_plan(PruneState *prstate,
OffsetNumber *off_loc);
static HTSV_Result heap_prune_satisfies_vacuum(PruneState *prstate,
HeapTuple tup,
Buffer buffer);
HeapTuple tup);
static inline HTSV_Result htsv_get_valid_status(int status);
static void heap_prune_chain(Page page, BlockNumber blockno, OffsetNumber maxoff,
static void heap_prune_chain(OffsetNumber maxoff,
OffsetNumber rootoffnum, PruneState *prstate);
static void heap_prune_record_prunable(PruneState *prstate, TransactionId xid);
static void heap_prune_record_redirect(PruneState *prstate,
@ -181,15 +190,14 @@ static void heap_prune_record_dead_or_unused(PruneState *prstate, OffsetNumber o
bool was_normal);
static void heap_prune_record_unused(PruneState *prstate, OffsetNumber offnum, bool was_normal);
static void heap_prune_record_unchanged_lp_unused(Page page, PruneState *prstate, OffsetNumber offnum);
static void heap_prune_record_unchanged_lp_normal(Page page, PruneState *prstate, OffsetNumber offnum);
static void heap_prune_record_unchanged_lp_dead(Page page, PruneState *prstate, OffsetNumber offnum);
static void heap_prune_record_unchanged_lp_unused(PruneState *prstate, OffsetNumber offnum);
static void heap_prune_record_unchanged_lp_normal(PruneState *prstate, OffsetNumber offnum);
static void heap_prune_record_unchanged_lp_dead(PruneState *prstate, OffsetNumber offnum);
static void heap_prune_record_unchanged_lp_redirect(PruneState *prstate, OffsetNumber offnum);
static void page_verify_redirects(Page page);
static bool heap_page_will_freeze(Relation relation, Buffer buffer,
bool did_tuple_hint_fpi, bool do_prune, bool do_hint_prune,
static bool heap_page_will_freeze(bool did_tuple_hint_fpi, bool do_prune, bool do_hint_prune,
PruneState *prstate);
@ -342,6 +350,10 @@ prune_freeze_setup(PruneFreezeParams *params,
Assert(!(params->options & HEAP_PAGE_PRUNE_FREEZE) || params->cutoffs);
prstate->attempt_freeze = (params->options & HEAP_PAGE_PRUNE_FREEZE) != 0;
prstate->cutoffs = params->cutoffs;
prstate->relation = params->relation;
prstate->block = BufferGetBlockNumber(params->buffer);
prstate->buffer = params->buffer;
prstate->page = BufferGetPage(params->buffer);
/*
* Our strategy is to scan the page and make lists of items to change,
@ -455,16 +467,15 @@ prune_freeze_setup(PruneFreezeParams *params,
* *off_loc is used for error callback and cleared before returning.
*/
static void
prune_freeze_plan(Oid reloid, Buffer buffer, PruneState *prstate,
OffsetNumber *off_loc)
prune_freeze_plan(PruneState *prstate, OffsetNumber *off_loc)
{
Page page = BufferGetPage(buffer);
BlockNumber blockno = BufferGetBlockNumber(buffer);
OffsetNumber maxoff = PageGetMaxOffsetNumber(page);
Page page = prstate->page;
BlockNumber blockno = prstate->block;
OffsetNumber maxoff = PageGetMaxOffsetNumber(prstate->page);
OffsetNumber offnum;
HeapTupleData tup;
tup.t_tableOid = reloid;
tup.t_tableOid = RelationGetRelid(prstate->relation);
/*
* Determine HTSV for all tuples, and queue them up for processing as HOT
@ -505,7 +516,7 @@ prune_freeze_plan(Oid reloid, Buffer buffer, PruneState *prstate,
/* Nothing to do if slot doesn't contain a tuple */
if (!ItemIdIsUsed(itemid))
{
heap_prune_record_unchanged_lp_unused(page, prstate, offnum);
heap_prune_record_unchanged_lp_unused(prstate, offnum);
continue;
}
@ -518,7 +529,7 @@ prune_freeze_plan(Oid reloid, Buffer buffer, PruneState *prstate,
if (unlikely(prstate->mark_unused_now))
heap_prune_record_unused(prstate, offnum, false);
else
heap_prune_record_unchanged_lp_dead(page, prstate, offnum);
heap_prune_record_unchanged_lp_dead(prstate, offnum);
continue;
}
@ -539,8 +550,7 @@ prune_freeze_plan(Oid reloid, Buffer buffer, PruneState *prstate,
tup.t_len = ItemIdGetLength(itemid);
ItemPointerSet(&tup.t_self, blockno, offnum);
prstate->htsv[offnum] = heap_prune_satisfies_vacuum(prstate, &tup,
buffer);
prstate->htsv[offnum] = heap_prune_satisfies_vacuum(prstate, &tup);
if (!HeapTupleHeaderIsHeapOnly(htup))
prstate->root_items[prstate->nroot_items++] = offnum;
@ -571,7 +581,7 @@ prune_freeze_plan(Oid reloid, Buffer buffer, PruneState *prstate,
*off_loc = offnum;
/* Process this item or chain of items */
heap_prune_chain(page, blockno, maxoff, offnum, prstate);
heap_prune_chain(maxoff, offnum, prstate);
}
/*
@ -627,7 +637,7 @@ prune_freeze_plan(Oid reloid, Buffer buffer, PruneState *prstate,
}
}
else
heap_prune_record_unchanged_lp_normal(page, prstate, offnum);
heap_prune_record_unchanged_lp_normal(prstate, offnum);
}
/* We should now have processed every tuple exactly once */
@ -648,7 +658,7 @@ prune_freeze_plan(Oid reloid, Buffer buffer, PruneState *prstate,
/*
* Decide whether to proceed with freezing according to the freeze plans
* prepared for the given heap buffer. If freezing is chosen, this function
* prepared for the current heap buffer. If freezing is chosen, this function
* performs several pre-freeze checks.
*
* The values of do_prune, do_hint_prune, and did_tuple_hint_fpi must be
@ -660,8 +670,7 @@ prune_freeze_plan(Oid reloid, Buffer buffer, PruneState *prstate,
* page, and false otherwise.
*/
static bool
heap_page_will_freeze(Relation relation, Buffer buffer,
bool did_tuple_hint_fpi,
heap_page_will_freeze(bool did_tuple_hint_fpi,
bool do_prune,
bool do_hint_prune,
PruneState *prstate)
@ -709,18 +718,19 @@ heap_page_will_freeze(Relation relation, Buffer buffer,
* Freezing would make the page all-frozen. Have already emitted
* an FPI or will do so anyway?
*/
if (RelationNeedsWAL(relation))
if (RelationNeedsWAL(prstate->relation))
{
if (did_tuple_hint_fpi)
do_freeze = true;
else if (do_prune)
{
if (XLogCheckBufferNeedsBackup(buffer))
if (XLogCheckBufferNeedsBackup(prstate->buffer))
do_freeze = true;
}
else if (do_hint_prune)
{
if (XLogHintBitIsNeeded() && XLogCheckBufferNeedsBackup(buffer))
if (XLogHintBitIsNeeded() &&
XLogCheckBufferNeedsBackup(prstate->buffer))
do_freeze = true;
}
}
@ -733,7 +743,7 @@ heap_page_will_freeze(Relation relation, Buffer buffer,
* Validate the tuples we will be freezing before entering the
* critical section.
*/
heap_pre_freeze_checks(buffer, prstate->frozen, prstate->nfrozen);
heap_pre_freeze_checks(prstate->buffer, prstate->frozen, prstate->nfrozen);
/*
* Calculate what the snapshot conflict horizon should be for a record
@ -822,8 +832,6 @@ heap_page_prune_and_freeze(PruneFreezeParams *params,
TransactionId *new_relfrozen_xid,
MultiXactId *new_relmin_mxid)
{
Buffer buffer = params->buffer;
Page page = BufferGetPage(buffer);
PruneState prstate;
bool do_freeze;
bool do_prune;
@ -842,8 +850,7 @@ heap_page_prune_and_freeze(PruneFreezeParams *params,
* Prepare queue of state changes to later be executed in a critical
* section.
*/
prune_freeze_plan(RelationGetRelid(params->relation),
buffer, &prstate, off_loc);
prune_freeze_plan(&prstate, off_loc);
/*
* If checksums are enabled, calling heap_prune_satisfies_vacuum() while
@ -861,15 +868,14 @@ heap_page_prune_and_freeze(PruneFreezeParams *params,
* pd_prune_xid field or the page was marked full, we will update the hint
* bit.
*/
do_hint_prune = ((PageHeader) page)->pd_prune_xid != prstate.new_prune_xid ||
PageIsFull(page);
do_hint_prune = ((PageHeader) prstate.page)->pd_prune_xid != prstate.new_prune_xid ||
PageIsFull(prstate.page);
/*
* Decide if we want to go ahead with freezing according to the freeze
* plans we prepared, or not.
*/
do_freeze = heap_page_will_freeze(params->relation, buffer,
did_tuple_hint_fpi,
do_freeze = heap_page_will_freeze(did_tuple_hint_fpi,
do_prune,
do_hint_prune,
&prstate);
@ -901,14 +907,14 @@ heap_page_prune_and_freeze(PruneFreezeParams *params,
* Update the page's pd_prune_xid field to either zero, or the lowest
* XID of any soon-prunable tuple.
*/
((PageHeader) page)->pd_prune_xid = prstate.new_prune_xid;
((PageHeader) prstate.page)->pd_prune_xid = prstate.new_prune_xid;
/*
* Also clear the "page is full" flag, since there's no point in
* repeating the prune/defrag process until something else happens to
* the page.
*/
PageClearFull(page);
PageClearFull(prstate.page);
/*
* If that's all we had to do to the page, this is a non-WAL-logged
@ -916,7 +922,7 @@ heap_page_prune_and_freeze(PruneFreezeParams *params,
* the buffer dirty below.
*/
if (!do_freeze && !do_prune)
MarkBufferDirtyHint(buffer, true);
MarkBufferDirtyHint(prstate.buffer, true);
}
if (do_prune || do_freeze)
@ -924,21 +930,21 @@ heap_page_prune_and_freeze(PruneFreezeParams *params,
/* Apply the planned item changes and repair page fragmentation. */
if (do_prune)
{
heap_page_prune_execute(buffer, false,
heap_page_prune_execute(prstate.buffer, false,
prstate.redirected, prstate.nredirected,
prstate.nowdead, prstate.ndead,
prstate.nowunused, prstate.nunused);
}
if (do_freeze)
heap_freeze_prepared_tuples(buffer, prstate.frozen, prstate.nfrozen);
heap_freeze_prepared_tuples(prstate.buffer, prstate.frozen, prstate.nfrozen);
MarkBufferDirty(buffer);
MarkBufferDirty(prstate.buffer);
/*
* Emit a WAL XLOG_HEAP2_PRUNE* record showing what we did
*/
if (RelationNeedsWAL(params->relation))
if (RelationNeedsWAL(prstate.relation))
{
/*
* The snapshotConflictHorizon for the whole record should be the
@ -958,7 +964,7 @@ heap_page_prune_and_freeze(PruneFreezeParams *params,
else
conflict_xid = prstate.latest_xid_removed;
log_heap_prune_and_freeze(params->relation, buffer,
log_heap_prune_and_freeze(prstate.relation, prstate.buffer,
InvalidBuffer, /* vmbuffer */
0, /* vmflags */
conflict_xid,
@ -1018,12 +1024,12 @@ heap_page_prune_and_freeze(PruneFreezeParams *params,
* Perform visibility checks for heap pruning.
*/
static HTSV_Result
heap_prune_satisfies_vacuum(PruneState *prstate, HeapTuple tup, Buffer buffer)
heap_prune_satisfies_vacuum(PruneState *prstate, HeapTuple tup)
{
HTSV_Result res;
TransactionId dead_after;
res = HeapTupleSatisfiesVacuumHorizon(tup, buffer, &dead_after);
res = HeapTupleSatisfiesVacuumHorizon(tup, prstate->buffer, &dead_after);
if (res != HEAPTUPLE_RECENTLY_DEAD)
return res;
@ -1100,13 +1106,14 @@ htsv_get_valid_status(int status)
* based on that outcome.
*/
static void
heap_prune_chain(Page page, BlockNumber blockno, OffsetNumber maxoff,
OffsetNumber rootoffnum, PruneState *prstate)
heap_prune_chain(OffsetNumber maxoff, OffsetNumber rootoffnum,
PruneState *prstate)
{
TransactionId priorXmax = InvalidTransactionId;
ItemId rootlp;
OffsetNumber offnum;
OffsetNumber chainitems[MaxHeapTuplesPerPage];
Page page = prstate->page;
/*
* After traversing the HOT chain, ndeadchain is the index in chainitems
@ -1235,7 +1242,7 @@ heap_prune_chain(Page page, BlockNumber blockno, OffsetNumber maxoff,
/*
* Advance to next chain member.
*/
Assert(ItemPointerGetBlockNumber(&htup->t_ctid) == blockno);
Assert(ItemPointerGetBlockNumber(&htup->t_ctid) == prstate->block);
offnum = ItemPointerGetOffsetNumber(&htup->t_ctid);
priorXmax = HeapTupleHeaderGetUpdateXid(htup);
}
@ -1270,7 +1277,7 @@ process_chain:
i++;
}
for (; i < nchain; i++)
heap_prune_record_unchanged_lp_normal(page, prstate, chainitems[i]);
heap_prune_record_unchanged_lp_normal(prstate, chainitems[i]);
}
else if (ndeadchain == nchain)
{
@ -1296,7 +1303,7 @@ process_chain:
/* the rest of tuples in the chain are normal, unchanged tuples */
for (int i = ndeadchain; i < nchain; i++)
heap_prune_record_unchanged_lp_normal(page, prstate, chainitems[i]);
heap_prune_record_unchanged_lp_normal(prstate, chainitems[i]);
}
}
@ -1421,7 +1428,7 @@ heap_prune_record_unused(PruneState *prstate, OffsetNumber offnum, bool was_norm
* Record an unused line pointer that is left unchanged.
*/
static void
heap_prune_record_unchanged_lp_unused(Page page, PruneState *prstate, OffsetNumber offnum)
heap_prune_record_unchanged_lp_unused(PruneState *prstate, OffsetNumber offnum)
{
Assert(!prstate->processed[offnum]);
prstate->processed[offnum] = true;
@ -1432,9 +1439,10 @@ heap_prune_record_unchanged_lp_unused(Page page, PruneState *prstate, OffsetNumb
* update bookkeeping of tuple counts and page visibility.
*/
static void
heap_prune_record_unchanged_lp_normal(Page page, PruneState *prstate, OffsetNumber offnum)
heap_prune_record_unchanged_lp_normal(PruneState *prstate, OffsetNumber offnum)
{
HeapTupleHeader htup;
Page page = prstate->page;
Assert(!prstate->processed[offnum]);
prstate->processed[offnum] = true;
@ -1615,7 +1623,7 @@ heap_prune_record_unchanged_lp_normal(Page page, PruneState *prstate, OffsetNumb
* Record line pointer that was already LP_DEAD and is left unchanged.
*/
static void
heap_prune_record_unchanged_lp_dead(Page page, PruneState *prstate, OffsetNumber offnum)
heap_prune_record_unchanged_lp_dead(PruneState *prstate, OffsetNumber offnum)
{
Assert(!prstate->processed[offnum]);
prstate->processed[offnum] = true;