postgresql/src/backend/executor/nodeRecursiveunion.c

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

341 lines
9.5 KiB
C
Raw Normal View History

/*-------------------------------------------------------------------------
*
* nodeRecursiveunion.c
* routines to handle RecursiveUnion nodes.
*
* To implement UNION (without ALL), we need a hashtable that stores tuples
* already seen. The hash key is computed from the grouping columns.
*
*
* Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
2010-09-20 16:08:53 -04:00
* src/backend/executor/nodeRecursiveunion.c
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "executor/executor.h"
#include "executor/nodeRecursiveunion.h"
#include "miscadmin.h"
#include "utils/memutils.h"
/*
* Initialize the hash table to empty.
*/
static void
build_hash_table(RecursiveUnionState *rustate)
{
RecursiveUnion *node = (RecursiveUnion *) rustate->ps.plan;
TupleDesc desc = ExecGetResultType(outerPlanState(rustate));
Assert(node->numCols > 0);
/*
* If both child plans deliver the same fixed tuple slot type, we can tell
* BuildTupleHashTable to expect that slot type as input. Otherwise,
* we'll pass NULL denoting that any slot type is possible.
*/
rustate->hashtable = BuildTupleHashTable(&rustate->ps,
desc,
ExecGetCommonChildSlotOps(&rustate->ps),
node->numCols,
node->dupColIdx,
rustate->eqfuncoids,
rustate->hashfunctions,
node->dupCollations,
node->numGroups,
0,
rustate->ps.state->es_query_cxt,
Use BumpContext contexts in TupleHashTables, and do some code cleanup. For all extant uses of TupleHashTables, execGrouping.c itself does nothing with the "tablecxt" except to allocate new hash entries in it, and the callers do nothing with it except to reset the whole context. So this is an ideal use-case for a BumpContext, and the hash tables are frequently big enough for the savings to be significant. (Commit cc721c459 already taught nodeAgg.c this idea, but neglected the other callers of BuildTupleHashTable.) While at it, let's clean up some ill-advised leftovers from rebasing TupleHashTables on simplehash.h: * Many comments and variable names were based on the idea that the tablecxt holds the whole TupleHashTable, whereas now it only holds the hashed tuples (plus any caller-defined "additional storage"). Rename to names like tuplescxt and tuplesContext, and adjust the comments. Also adjust the memory context names to be like "<Foo> hashed tuples". * Make ResetTupleHashTable() reset the tuplescxt rather than relying on the caller to do so; that was fairly bizarre and seems like a recipe for leaks. This is less efficient in the case where nodeAgg.c uses the same tuplescxt for several different hashtables, but only microscopically so because mcxt.c will short-circuit the extra resets via its isReset flag. I judge the extra safety and intellectual cleanliness well worth those few cycles. * Remove the long-obsolete "allow_jit" check added by ac88807f9; instead, just Assert that metacxt and tuplescxt are different. We need that anyway for this definition of ResetTupleHashTable() to be safe. There is a side issue of the extent to which this change invalidates the planner's estimates of hashtable memory consumption. However, those estimates are already pretty bad, so improving them seems like it can be a separate project. This change is useful to do first to establish consistent executor behavior that the planner can expect. A loose end not addressed here is that the "entrysize" calculation in BuildTupleHashTable seems wrong: "sizeof(TupleHashEntryData) + additionalsize" corresponds neither to the size of the simplehash entries nor to the total space needed per tuple. It's questionable why BuildTupleHashTable is second-guessing its caller's nbuckets choice at all, since the original source of the number should have had more information. But that all seems wrapped up with the planner's estimation logic, so let's leave it for the planned followup patch. Reported-by: Jeff Janes <jeff.janes@gmail.com> Reported-by: David Rowley <dgrowleyml@gmail.com> Author: Tom Lane <tgl@sss.pgh.pa.us> Reviewed-by: David Rowley <dgrowleyml@gmail.com> Discussion: https://postgr.es/m/CAMkU=1zia0JfW_QR8L5xA2vpa0oqVuiapm78h=WpNsHH13_9uw@mail.gmail.com Discussion: https://postgr.es/m/2268409.1761512111@sss.pgh.pa.us
2025-10-30 11:21:22 -04:00
rustate->tuplesContext,
rustate->tempContext,
false);
}
/* ----------------------------------------------------------------
* ExecRecursiveUnion(node)
*
* Scans the recursive query sequentially and returns the next
* qualifying tuple.
*
* 1. evaluate non recursive term and assign the result to RT
*
* 2. execute recursive terms
*
* 2.1 WT := RT
* 2.2 while WT is not empty repeat 2.3 to 2.6. if WT is empty returns RT
* 2.3 replace the name of recursive term with WT
* 2.4 evaluate the recursive term and store into WT
* 2.5 append WT to RT
* 2.6 go back to 2.2
* ----------------------------------------------------------------
*/
static TupleTableSlot *
ExecRecursiveUnion(PlanState *pstate)
{
RecursiveUnionState *node = castNode(RecursiveUnionState, pstate);
PlanState *outerPlan = outerPlanState(node);
PlanState *innerPlan = innerPlanState(node);
RecursiveUnion *plan = (RecursiveUnion *) node->ps.plan;
TupleTableSlot *slot;
bool isnew;
CHECK_FOR_INTERRUPTS();
/* 1. Evaluate non-recursive term */
if (!node->recursing)
{
for (;;)
{
slot = ExecProcNode(outerPlan);
if (TupIsNull(slot))
break;
if (plan->numCols > 0)
{
/* Find or build hashtable entry for this tuple's group */
LookupTupleHashEntry(node->hashtable, slot, &isnew, NULL);
/* Must reset temp context after each hashtable lookup */
MemoryContextReset(node->tempContext);
/* Ignore tuple if already seen */
if (!isnew)
continue;
}
/* Each non-duplicate tuple goes to the working table ... */
tuplestore_puttupleslot(node->working_table, slot);
/* ... and to the caller */
return slot;
}
node->recursing = true;
}
/* 2. Execute recursive term */
for (;;)
{
slot = ExecProcNode(innerPlan);
if (TupIsNull(slot))
{
Tuplestorestate *swaptemp;
/* Done if there's nothing in the intermediate table */
if (node->intermediate_empty)
break;
/*
* Now we let the intermediate table become the work table. We
* need a fresh intermediate table, so delete the tuples from the
* current working table and use that as the new intermediate
* table. This saves a round of free/malloc from creating a new
* tuple store.
*/
tuplestore_clear(node->working_table);
swaptemp = node->working_table;
node->working_table = node->intermediate_table;
node->intermediate_table = swaptemp;
/* mark the intermediate table as empty */
node->intermediate_empty = true;
/* reset the recursive term */
innerPlan->chgParam = bms_add_member(innerPlan->chgParam,
plan->wtParam);
/* and continue fetching from recursive term */
continue;
}
if (plan->numCols > 0)
{
/* Find or build hashtable entry for this tuple's group */
LookupTupleHashEntry(node->hashtable, slot, &isnew, NULL);
/* Must reset temp context after each hashtable lookup */
MemoryContextReset(node->tempContext);
/* Ignore tuple if already seen */
if (!isnew)
continue;
}
/* Else, tuple is good; stash it in intermediate table ... */
node->intermediate_empty = false;
tuplestore_puttupleslot(node->intermediate_table, slot);
/* ... and return it */
return slot;
}
return NULL;
}
/* ----------------------------------------------------------------
* ExecInitRecursiveUnion
* ----------------------------------------------------------------
*/
RecursiveUnionState *
ExecInitRecursiveUnion(RecursiveUnion *node, EState *estate, int eflags)
{
RecursiveUnionState *rustate;
ParamExecData *prmdata;
/* check for unsupported flags */
Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
/*
* create state structure
*/
rustate = makeNode(RecursiveUnionState);
rustate->ps.plan = (Plan *) node;
rustate->ps.state = estate;
rustate->ps.ExecProcNode = ExecRecursiveUnion;
rustate->eqfuncoids = NULL;
rustate->hashfunctions = NULL;
rustate->hashtable = NULL;
rustate->tempContext = NULL;
Use BumpContext contexts in TupleHashTables, and do some code cleanup. For all extant uses of TupleHashTables, execGrouping.c itself does nothing with the "tablecxt" except to allocate new hash entries in it, and the callers do nothing with it except to reset the whole context. So this is an ideal use-case for a BumpContext, and the hash tables are frequently big enough for the savings to be significant. (Commit cc721c459 already taught nodeAgg.c this idea, but neglected the other callers of BuildTupleHashTable.) While at it, let's clean up some ill-advised leftovers from rebasing TupleHashTables on simplehash.h: * Many comments and variable names were based on the idea that the tablecxt holds the whole TupleHashTable, whereas now it only holds the hashed tuples (plus any caller-defined "additional storage"). Rename to names like tuplescxt and tuplesContext, and adjust the comments. Also adjust the memory context names to be like "<Foo> hashed tuples". * Make ResetTupleHashTable() reset the tuplescxt rather than relying on the caller to do so; that was fairly bizarre and seems like a recipe for leaks. This is less efficient in the case where nodeAgg.c uses the same tuplescxt for several different hashtables, but only microscopically so because mcxt.c will short-circuit the extra resets via its isReset flag. I judge the extra safety and intellectual cleanliness well worth those few cycles. * Remove the long-obsolete "allow_jit" check added by ac88807f9; instead, just Assert that metacxt and tuplescxt are different. We need that anyway for this definition of ResetTupleHashTable() to be safe. There is a side issue of the extent to which this change invalidates the planner's estimates of hashtable memory consumption. However, those estimates are already pretty bad, so improving them seems like it can be a separate project. This change is useful to do first to establish consistent executor behavior that the planner can expect. A loose end not addressed here is that the "entrysize" calculation in BuildTupleHashTable seems wrong: "sizeof(TupleHashEntryData) + additionalsize" corresponds neither to the size of the simplehash entries nor to the total space needed per tuple. It's questionable why BuildTupleHashTable is second-guessing its caller's nbuckets choice at all, since the original source of the number should have had more information. But that all seems wrapped up with the planner's estimation logic, so let's leave it for the planned followup patch. Reported-by: Jeff Janes <jeff.janes@gmail.com> Reported-by: David Rowley <dgrowleyml@gmail.com> Author: Tom Lane <tgl@sss.pgh.pa.us> Reviewed-by: David Rowley <dgrowleyml@gmail.com> Discussion: https://postgr.es/m/CAMkU=1zia0JfW_QR8L5xA2vpa0oqVuiapm78h=WpNsHH13_9uw@mail.gmail.com Discussion: https://postgr.es/m/2268409.1761512111@sss.pgh.pa.us
2025-10-30 11:21:22 -04:00
rustate->tuplesContext = NULL;
/* initialize processing state */
rustate->recursing = false;
rustate->intermediate_empty = true;
rustate->working_table = tuplestore_begin_heap(false, false, work_mem);
rustate->intermediate_table = tuplestore_begin_heap(false, false, work_mem);
/*
* If hashing, we need a per-tuple memory context for comparisons, and a
* longer-lived context to store the hash table. The table can't just be
* kept in the per-query context because we want to be able to throw it
Use BumpContext contexts in TupleHashTables, and do some code cleanup. For all extant uses of TupleHashTables, execGrouping.c itself does nothing with the "tablecxt" except to allocate new hash entries in it, and the callers do nothing with it except to reset the whole context. So this is an ideal use-case for a BumpContext, and the hash tables are frequently big enough for the savings to be significant. (Commit cc721c459 already taught nodeAgg.c this idea, but neglected the other callers of BuildTupleHashTable.) While at it, let's clean up some ill-advised leftovers from rebasing TupleHashTables on simplehash.h: * Many comments and variable names were based on the idea that the tablecxt holds the whole TupleHashTable, whereas now it only holds the hashed tuples (plus any caller-defined "additional storage"). Rename to names like tuplescxt and tuplesContext, and adjust the comments. Also adjust the memory context names to be like "<Foo> hashed tuples". * Make ResetTupleHashTable() reset the tuplescxt rather than relying on the caller to do so; that was fairly bizarre and seems like a recipe for leaks. This is less efficient in the case where nodeAgg.c uses the same tuplescxt for several different hashtables, but only microscopically so because mcxt.c will short-circuit the extra resets via its isReset flag. I judge the extra safety and intellectual cleanliness well worth those few cycles. * Remove the long-obsolete "allow_jit" check added by ac88807f9; instead, just Assert that metacxt and tuplescxt are different. We need that anyway for this definition of ResetTupleHashTable() to be safe. There is a side issue of the extent to which this change invalidates the planner's estimates of hashtable memory consumption. However, those estimates are already pretty bad, so improving them seems like it can be a separate project. This change is useful to do first to establish consistent executor behavior that the planner can expect. A loose end not addressed here is that the "entrysize" calculation in BuildTupleHashTable seems wrong: "sizeof(TupleHashEntryData) + additionalsize" corresponds neither to the size of the simplehash entries nor to the total space needed per tuple. It's questionable why BuildTupleHashTable is second-guessing its caller's nbuckets choice at all, since the original source of the number should have had more information. But that all seems wrapped up with the planner's estimation logic, so let's leave it for the planned followup patch. Reported-by: Jeff Janes <jeff.janes@gmail.com> Reported-by: David Rowley <dgrowleyml@gmail.com> Author: Tom Lane <tgl@sss.pgh.pa.us> Reviewed-by: David Rowley <dgrowleyml@gmail.com> Discussion: https://postgr.es/m/CAMkU=1zia0JfW_QR8L5xA2vpa0oqVuiapm78h=WpNsHH13_9uw@mail.gmail.com Discussion: https://postgr.es/m/2268409.1761512111@sss.pgh.pa.us
2025-10-30 11:21:22 -04:00
* away when rescanning. We can use a BumpContext to save storage,
* because we will have no need to delete individual table entries.
*/
if (node->numCols > 0)
{
rustate->tempContext =
AllocSetContextCreate(CurrentMemoryContext,
"RecursiveUnion",
Add macros to make AllocSetContextCreate() calls simpler and safer. I found that half a dozen (nearly 5%) of our AllocSetContextCreate calls had typos in the context-sizing parameters. While none of these led to especially significant problems, they did create minor inefficiencies, and it's now clear that expecting people to copy-and-paste those calls accurately is not a great idea. Let's reduce the risk of future errors by introducing single macros that encapsulate the common use-cases. Three such macros are enough to cover all but two special-purpose contexts; those two calls can be left as-is, I think. While this patch doesn't in itself improve matters for third-party extensions, it doesn't break anything for them either, and they can gradually adopt the simplified notation over time. In passing, change TopMemoryContext to use the default allocation parameters. Formerly it could only be extended 8K at a time. That was probably reasonable when this code was written; but nowadays we create many more contexts than we did then, so that it's not unusual to have a couple hundred K in TopMemoryContext, even without considering various dubious code that sticks other things there. There seems no good reason not to let it use growing blocks like most other contexts. Back-patch to 9.6, mostly because that's still close enough to HEAD that it's easy to do so, and keeping the branches in sync can be expected to avoid some future back-patching pain. The bugs fixed by these changes don't seem to be significant enough to justify fixing them further back. Discussion: <21072.1472321324@sss.pgh.pa.us>
2016-08-27 17:50:38 -04:00
ALLOCSET_DEFAULT_SIZES);
Use BumpContext contexts in TupleHashTables, and do some code cleanup. For all extant uses of TupleHashTables, execGrouping.c itself does nothing with the "tablecxt" except to allocate new hash entries in it, and the callers do nothing with it except to reset the whole context. So this is an ideal use-case for a BumpContext, and the hash tables are frequently big enough for the savings to be significant. (Commit cc721c459 already taught nodeAgg.c this idea, but neglected the other callers of BuildTupleHashTable.) While at it, let's clean up some ill-advised leftovers from rebasing TupleHashTables on simplehash.h: * Many comments and variable names were based on the idea that the tablecxt holds the whole TupleHashTable, whereas now it only holds the hashed tuples (plus any caller-defined "additional storage"). Rename to names like tuplescxt and tuplesContext, and adjust the comments. Also adjust the memory context names to be like "<Foo> hashed tuples". * Make ResetTupleHashTable() reset the tuplescxt rather than relying on the caller to do so; that was fairly bizarre and seems like a recipe for leaks. This is less efficient in the case where nodeAgg.c uses the same tuplescxt for several different hashtables, but only microscopically so because mcxt.c will short-circuit the extra resets via its isReset flag. I judge the extra safety and intellectual cleanliness well worth those few cycles. * Remove the long-obsolete "allow_jit" check added by ac88807f9; instead, just Assert that metacxt and tuplescxt are different. We need that anyway for this definition of ResetTupleHashTable() to be safe. There is a side issue of the extent to which this change invalidates the planner's estimates of hashtable memory consumption. However, those estimates are already pretty bad, so improving them seems like it can be a separate project. This change is useful to do first to establish consistent executor behavior that the planner can expect. A loose end not addressed here is that the "entrysize" calculation in BuildTupleHashTable seems wrong: "sizeof(TupleHashEntryData) + additionalsize" corresponds neither to the size of the simplehash entries nor to the total space needed per tuple. It's questionable why BuildTupleHashTable is second-guessing its caller's nbuckets choice at all, since the original source of the number should have had more information. But that all seems wrapped up with the planner's estimation logic, so let's leave it for the planned followup patch. Reported-by: Jeff Janes <jeff.janes@gmail.com> Reported-by: David Rowley <dgrowleyml@gmail.com> Author: Tom Lane <tgl@sss.pgh.pa.us> Reviewed-by: David Rowley <dgrowleyml@gmail.com> Discussion: https://postgr.es/m/CAMkU=1zia0JfW_QR8L5xA2vpa0oqVuiapm78h=WpNsHH13_9uw@mail.gmail.com Discussion: https://postgr.es/m/2268409.1761512111@sss.pgh.pa.us
2025-10-30 11:21:22 -04:00
rustate->tuplesContext =
BumpContextCreate(CurrentMemoryContext,
"RecursiveUnion hashed tuples",
ALLOCSET_DEFAULT_SIZES);
}
/*
* Make the state structure available to descendant WorkTableScan nodes
* via the Param slot reserved for it.
*/
prmdata = &(estate->es_param_exec_vals[node->wtParam]);
Assert(prmdata->execPlan == NULL);
prmdata->value = PointerGetDatum(rustate);
prmdata->isnull = false;
/*
* Miscellaneous initialization
*
* RecursiveUnion plans don't have expression contexts because they never
* call ExecQual or ExecProject.
*/
Assert(node->plan.qual == NIL);
/*
* RecursiveUnion nodes still have Result slots, which hold pointers to
* tuples, so we have to initialize them.
*/
Don't require return slots for nodes without projection. In a lot of nodes the return slot is not required. That can either be because the node doesn't do any projection (say an Append node), or because the node does perform projections but the projection is optimized away because the projection would yield an identical row. Slots aren't that small, especially for wide rows, so it's worthwhile to avoid creating them. It's not possible to just skip creating the slot - it's currently used to determine the tuple descriptor returned by ExecGetResultType(). So separate the determination of the result type from the slot creation. The work previously done internally ExecInitResultTupleSlotTL() can now also be done separately with ExecInitResultTypeTL() and ExecInitResultSlot(). That way nodes that aren't guaranteed to need a result slot, can use ExecInitResultTypeTL() to determine the result type of the node, and ExecAssignScanProjectionInfo() (via ExecConditionalAssignProjectionInfo()) determines that a result slot is needed, it is created with ExecInitResultSlot(). Besides the advantage of avoiding to create slots that then are unused, this is necessary preparation for later patches around tuple table slot abstraction. In particular separating the return descriptor and slot is a prerequisite to allow JITing of tuple deforming with knowledge of the underlying tuple format, and to avoid unnecessarily creating JITed tuple deforming for virtual slots. This commit removes a redundant argument from ExecInitResultTupleSlotTL(). While this commit touches a lot of the relevant lines anyway, it'd normally still not worthwhile to cause breakage, except that aforementioned later commits will touch *all* ExecInitResultTupleSlotTL() callers anyway (but fits worse thematically). Author: Andres Freund Discussion: https://postgr.es/m/20181105210039.hh4vvi4vwoq5ba2q@alap3.anarazel.de
2018-11-09 20:19:39 -05:00
ExecInitResultTypeTL(&rustate->ps);
/*
* Initialize result tuple type. (Note: we have to set up the result type
* before initializing child nodes, because nodeWorktablescan.c expects it
* to be valid.)
*/
rustate->ps.ps_ProjInfo = NULL;
/*
* initialize child nodes
*/
outerPlanState(rustate) = ExecInitNode(outerPlan(node), estate, eflags);
innerPlanState(rustate) = ExecInitNode(innerPlan(node), estate, eflags);
/*
* If hashing, precompute fmgr lookup data for inner loop, and create the
* hash table.
*/
if (node->numCols > 0)
{
execTuplesHashPrepare(node->numCols,
node->dupOperators,
&rustate->eqfuncoids,
&rustate->hashfunctions);
build_hash_table(rustate);
}
return rustate;
}
/* ----------------------------------------------------------------
* ExecEndRecursiveUnion
*
* frees any storage allocated through C routines.
* ----------------------------------------------------------------
*/
void
ExecEndRecursiveUnion(RecursiveUnionState *node)
{
/* Release tuplestores */
tuplestore_end(node->working_table);
tuplestore_end(node->intermediate_table);
Use BumpContext contexts in TupleHashTables, and do some code cleanup. For all extant uses of TupleHashTables, execGrouping.c itself does nothing with the "tablecxt" except to allocate new hash entries in it, and the callers do nothing with it except to reset the whole context. So this is an ideal use-case for a BumpContext, and the hash tables are frequently big enough for the savings to be significant. (Commit cc721c459 already taught nodeAgg.c this idea, but neglected the other callers of BuildTupleHashTable.) While at it, let's clean up some ill-advised leftovers from rebasing TupleHashTables on simplehash.h: * Many comments and variable names were based on the idea that the tablecxt holds the whole TupleHashTable, whereas now it only holds the hashed tuples (plus any caller-defined "additional storage"). Rename to names like tuplescxt and tuplesContext, and adjust the comments. Also adjust the memory context names to be like "<Foo> hashed tuples". * Make ResetTupleHashTable() reset the tuplescxt rather than relying on the caller to do so; that was fairly bizarre and seems like a recipe for leaks. This is less efficient in the case where nodeAgg.c uses the same tuplescxt for several different hashtables, but only microscopically so because mcxt.c will short-circuit the extra resets via its isReset flag. I judge the extra safety and intellectual cleanliness well worth those few cycles. * Remove the long-obsolete "allow_jit" check added by ac88807f9; instead, just Assert that metacxt and tuplescxt are different. We need that anyway for this definition of ResetTupleHashTable() to be safe. There is a side issue of the extent to which this change invalidates the planner's estimates of hashtable memory consumption. However, those estimates are already pretty bad, so improving them seems like it can be a separate project. This change is useful to do first to establish consistent executor behavior that the planner can expect. A loose end not addressed here is that the "entrysize" calculation in BuildTupleHashTable seems wrong: "sizeof(TupleHashEntryData) + additionalsize" corresponds neither to the size of the simplehash entries nor to the total space needed per tuple. It's questionable why BuildTupleHashTable is second-guessing its caller's nbuckets choice at all, since the original source of the number should have had more information. But that all seems wrapped up with the planner's estimation logic, so let's leave it for the planned followup patch. Reported-by: Jeff Janes <jeff.janes@gmail.com> Reported-by: David Rowley <dgrowleyml@gmail.com> Author: Tom Lane <tgl@sss.pgh.pa.us> Reviewed-by: David Rowley <dgrowleyml@gmail.com> Discussion: https://postgr.es/m/CAMkU=1zia0JfW_QR8L5xA2vpa0oqVuiapm78h=WpNsHH13_9uw@mail.gmail.com Discussion: https://postgr.es/m/2268409.1761512111@sss.pgh.pa.us
2025-10-30 11:21:22 -04:00
/* free subsidiary stuff including hashtable data */
if (node->tempContext)
MemoryContextDelete(node->tempContext);
Use BumpContext contexts in TupleHashTables, and do some code cleanup. For all extant uses of TupleHashTables, execGrouping.c itself does nothing with the "tablecxt" except to allocate new hash entries in it, and the callers do nothing with it except to reset the whole context. So this is an ideal use-case for a BumpContext, and the hash tables are frequently big enough for the savings to be significant. (Commit cc721c459 already taught nodeAgg.c this idea, but neglected the other callers of BuildTupleHashTable.) While at it, let's clean up some ill-advised leftovers from rebasing TupleHashTables on simplehash.h: * Many comments and variable names were based on the idea that the tablecxt holds the whole TupleHashTable, whereas now it only holds the hashed tuples (plus any caller-defined "additional storage"). Rename to names like tuplescxt and tuplesContext, and adjust the comments. Also adjust the memory context names to be like "<Foo> hashed tuples". * Make ResetTupleHashTable() reset the tuplescxt rather than relying on the caller to do so; that was fairly bizarre and seems like a recipe for leaks. This is less efficient in the case where nodeAgg.c uses the same tuplescxt for several different hashtables, but only microscopically so because mcxt.c will short-circuit the extra resets via its isReset flag. I judge the extra safety and intellectual cleanliness well worth those few cycles. * Remove the long-obsolete "allow_jit" check added by ac88807f9; instead, just Assert that metacxt and tuplescxt are different. We need that anyway for this definition of ResetTupleHashTable() to be safe. There is a side issue of the extent to which this change invalidates the planner's estimates of hashtable memory consumption. However, those estimates are already pretty bad, so improving them seems like it can be a separate project. This change is useful to do first to establish consistent executor behavior that the planner can expect. A loose end not addressed here is that the "entrysize" calculation in BuildTupleHashTable seems wrong: "sizeof(TupleHashEntryData) + additionalsize" corresponds neither to the size of the simplehash entries nor to the total space needed per tuple. It's questionable why BuildTupleHashTable is second-guessing its caller's nbuckets choice at all, since the original source of the number should have had more information. But that all seems wrapped up with the planner's estimation logic, so let's leave it for the planned followup patch. Reported-by: Jeff Janes <jeff.janes@gmail.com> Reported-by: David Rowley <dgrowleyml@gmail.com> Author: Tom Lane <tgl@sss.pgh.pa.us> Reviewed-by: David Rowley <dgrowleyml@gmail.com> Discussion: https://postgr.es/m/CAMkU=1zia0JfW_QR8L5xA2vpa0oqVuiapm78h=WpNsHH13_9uw@mail.gmail.com Discussion: https://postgr.es/m/2268409.1761512111@sss.pgh.pa.us
2025-10-30 11:21:22 -04:00
if (node->tuplesContext)
MemoryContextDelete(node->tuplesContext);
/*
* close down subplans
*/
ExecEndNode(outerPlanState(node));
ExecEndNode(innerPlanState(node));
}
/* ----------------------------------------------------------------
* ExecReScanRecursiveUnion
*
* Rescans the relation.
* ----------------------------------------------------------------
*/
void
ExecReScanRecursiveUnion(RecursiveUnionState *node)
{
PlanState *outerPlan = outerPlanState(node);
PlanState *innerPlan = innerPlanState(node);
RecursiveUnion *plan = (RecursiveUnion *) node->ps.plan;
/*
* Set recursive term's chgParam to tell it that we'll modify the working
* table and therefore it has to rescan.
*/
innerPlan->chgParam = bms_add_member(innerPlan->chgParam, plan->wtParam);
/*
* if chgParam of subnode is not null then plan will be re-scanned by
* first ExecProcNode. Because of above, we only have to do this to the
* non-recursive term.
*/
if (outerPlan->chgParam == NULL)
ExecReScan(outerPlan);
/* Empty hashtable if needed */
if (plan->numCols > 0)
ResetTupleHashTable(node->hashtable);
/* reset processing state */
node->recursing = false;
node->intermediate_empty = true;
tuplestore_clear(node->working_table);
tuplestore_clear(node->intermediate_table);
}