Use UnlockReleaseBuffer() in more places

An upcoming commit will make UnlockReleaseBuffer() considerably faster and
more scalable than doing LockBuffer(BUFFER_LOCK_UNLOCK); ReleaseBuffer();. But
it's a small performance benefit even as-is.

Most of the callsites changed in this patch are not performance sensitive,
however some, like the nbtree ones, are in critical paths.

This patch changes all the easily convertible places over to
UnlockReleaseBuffer() mainly because I needed to check all of them anyway, and
reducing cases where the operations are done separately makes the checking
easier.

Reviewed-by: Melanie Plageman <melanieplageman@gmail.com>
Discussion: https://postgr.es/m/5ubipyssiju5twkb7zgqwdr7q2vhpkpmuelxfpanetlk6ofnop@hvxb4g2amb2d
This commit is contained in:
Andres Freund 2026-03-27 15:27:04 -04:00
parent 41d3d64e87
commit 8df3c48e46
8 changed files with 42 additions and 28 deletions

View file

@ -368,8 +368,7 @@ gin_check_posting_tree_parent_keys_consistency(Relation rel, BlockNumber posting
stack->next = ptr;
}
}
LockBuffer(buffer, GIN_UNLOCK);
ReleaseBuffer(buffer);
UnlockReleaseBuffer(buffer);
/* Step to next item in the queue */
stack_next = stack->next;
@ -642,8 +641,7 @@ gin_check_parent_keys_consistency(Relation rel,
prev_attnum = current_attnum;
}
LockBuffer(buffer, GIN_UNLOCK);
ReleaseBuffer(buffer);
UnlockReleaseBuffer(buffer);
/* Step to next item in the queue */
stack_next = stack->next;

View file

@ -193,8 +193,7 @@ get_raw_page_internal(text *relname, ForkNumber forknum, BlockNumber blkno)
memcpy(raw_page_data, BufferGetPage(buf), BLCKSZ);
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
ReleaseBuffer(buf);
UnlockReleaseBuffer(buf);
relation_close(rel, AccessShareLock);

View file

@ -1697,8 +1697,7 @@ heap_fetch(Relation relation,
offnum = ItemPointerGetOffsetNumber(tid);
if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
{
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
ReleaseBuffer(buffer);
UnlockReleaseBuffer(buffer);
*userbuf = InvalidBuffer;
tuple->t_data = NULL;
return false;
@ -1714,8 +1713,7 @@ heap_fetch(Relation relation,
*/
if (!ItemIdIsNormal(lp))
{
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
ReleaseBuffer(buffer);
UnlockReleaseBuffer(buffer);
*userbuf = InvalidBuffer;
tuple->t_data = NULL;
return false;

View file

@ -711,14 +711,15 @@ loop:
* unlock the two buffers in, so this can be slightly simpler than the
* code above.
*/
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
if (otherBuffer == InvalidBuffer)
ReleaseBuffer(buffer);
UnlockReleaseBuffer(buffer);
else if (otherBlock != targetBlock)
{
UnlockReleaseBuffer(buffer);
LockBuffer(otherBuffer, BUFFER_LOCK_UNLOCK);
ReleaseBuffer(buffer);
}
else
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
/* Is there an ongoing bulk extension? */
if (bistate && bistate->next_free != InvalidBlockNumber)

View file

@ -1011,24 +1011,47 @@ _bt_relandgetbuf(Relation rel, Buffer obuf, BlockNumber blkno, int access)
Assert(BlockNumberIsValid(blkno));
if (BufferIsValid(obuf))
_bt_unlockbuf(rel, obuf);
buf = ReleaseAndReadBuffer(obuf, rel, blkno);
_bt_lockbuf(rel, buf, access);
{
if (BufferGetBlockNumber(obuf) == blkno)
{
/* trade in old lock mode for new lock */
_bt_unlockbuf(rel, obuf);
buf = obuf;
}
else
{
/* release lock and pin at once, that's a bit more efficient */
_bt_relbuf(rel, obuf);
buf = ReadBuffer(rel, blkno);
}
}
else
buf = ReadBuffer(rel, blkno);
_bt_lockbuf(rel, buf, access);
_bt_checkpage(rel, buf);
return buf;
}
/*
* _bt_relbuf() -- release a locked buffer.
*
* Lock and pin (refcount) are both dropped.
* Lock and pin (refcount) are both dropped. This is a bit more efficient than
* doing the two operations separately.
*/
void
_bt_relbuf(Relation rel, Buffer buf)
{
_bt_unlockbuf(rel, buf);
ReleaseBuffer(buf);
/*
* Buffer is pinned and locked, which means that it is expected to be
* defined and addressable. Check that proactively.
*/
VALGRIND_CHECK_MEM_IS_DEFINED(BufferGetPage(buf), BLCKSZ);
if (!RelationUsesLocalBuffers(rel))
VALGRIND_MAKE_MEM_NOACCESS(BufferGetPage(buf), BLCKSZ);
UnlockReleaseBuffer(buf);
}
/*

View file

@ -2561,8 +2561,7 @@ again:
XLogNeedsFlush(BufferGetLSN(buf_hdr)) &&
StrategyRejectBuffer(strategy, buf_hdr, from_ring))
{
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
UnpinBuffer(buf_hdr);
UnlockReleaseBuffer(buf);
goto again;
}

View file

@ -915,9 +915,8 @@ fsm_vacuum_page(Relation rel, FSMAddress addr,
((FSMPage) PageGetContents(page))->fp_next_slot = 0;
BufferFinishSetHintBits(buf, false, false);
}
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
ReleaseBuffer(buf);
UnlockReleaseBuffer(buf);
return max_avail;
}

View file

@ -221,9 +221,7 @@ modify_rel_block(PG_FUNCTION_ARGS)
*/
memcpy(page, BufferGetPage(buf), BLCKSZ);
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
ReleaseBuffer(buf);
UnlockReleaseBuffer(buf);
/*
* Don't want to have a buffer in-memory that's marked valid where the
@ -496,8 +494,7 @@ invalidate_rel_block(PG_FUNCTION_ARGS)
else
FlushOneBuffer(buf);
}
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
ReleaseBuffer(buf);
UnlockReleaseBuffer(buf);
if (BufferIsLocal(buf))
InvalidateLocalBuffer(GetLocalBufferDescriptor(-buf - 1), true);