mirror of
https://github.com/postgres/postgres.git
synced 2026-02-03 20:40:14 -05:00
generate their output tuple descriptors from their target lists (ie, using ExecAssignResultTypeFromTL()). We long ago fixed things so that all node types have minimally valid tlists, so there's no longer any good reason to have two different ways of doing it. This change is needed to fix bug reported by Hayden James: the fix of 2005-11-03 to emit the correct column names after optimizing away a SubqueryScan node didn't work if the new top-level plan node used ExecAssignResultTypeFromOuterPlan to generate its tupdesc, since the next plan node down won't have the correct column labels.
311 lines
8 KiB
C
311 lines
8 KiB
C
/*-------------------------------------------------------------------------
|
|
*
|
|
* nodeMaterial.c
|
|
* Routines to handle materialization nodes.
|
|
*
|
|
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
|
|
* Portions Copyright (c) 1994, Regents of the University of California
|
|
*
|
|
*
|
|
* IDENTIFICATION
|
|
* $PostgreSQL: pgsql/src/backend/executor/nodeMaterial.c,v 1.51 2005/11/23 20:27:57 tgl Exp $
|
|
*
|
|
*-------------------------------------------------------------------------
|
|
*/
|
|
/*
|
|
* INTERFACE ROUTINES
|
|
* ExecMaterial - materialize the result of a subplan
|
|
* ExecInitMaterial - initialize node and subnodes
|
|
* ExecEndMaterial - shutdown node and subnodes
|
|
*
|
|
*/
|
|
#include "postgres.h"
|
|
|
|
#include "access/heapam.h"
|
|
#include "executor/executor.h"
|
|
#include "executor/nodeMaterial.h"
|
|
#include "miscadmin.h"
|
|
#include "utils/tuplestore.h"
|
|
|
|
/* ----------------------------------------------------------------
|
|
* ExecMaterial
|
|
*
|
|
* As long as we are at the end of the data collected in the tuplestore,
|
|
* we collect one new row from the subplan on each call, and stash it
|
|
* aside in the tuplestore before returning it. The tuplestore is
|
|
* only read if we are asked to scan backwards, rescan, or mark/restore.
|
|
*
|
|
* ----------------------------------------------------------------
|
|
*/
|
|
TupleTableSlot * /* result tuple from subplan */
|
|
ExecMaterial(MaterialState *node)
|
|
{
|
|
EState *estate;
|
|
ScanDirection dir;
|
|
bool forward;
|
|
Tuplestorestate *tuplestorestate;
|
|
HeapTuple heapTuple = NULL;
|
|
bool should_free = false;
|
|
bool eof_tuplestore;
|
|
TupleTableSlot *slot;
|
|
|
|
/*
|
|
* get state info from node
|
|
*/
|
|
estate = node->ss.ps.state;
|
|
dir = estate->es_direction;
|
|
forward = ScanDirectionIsForward(dir);
|
|
tuplestorestate = (Tuplestorestate *) node->tuplestorestate;
|
|
|
|
/*
|
|
* If first time through, initialize the tuplestore.
|
|
*/
|
|
if (tuplestorestate == NULL)
|
|
{
|
|
tuplestorestate = tuplestore_begin_heap(true, false, work_mem);
|
|
|
|
node->tuplestorestate = (void *) tuplestorestate;
|
|
}
|
|
|
|
/*
|
|
* If we are not at the end of the tuplestore, or are going backwards, try
|
|
* to fetch a tuple from tuplestore.
|
|
*/
|
|
eof_tuplestore = tuplestore_ateof(tuplestorestate);
|
|
|
|
if (!forward && eof_tuplestore)
|
|
{
|
|
if (!node->eof_underlying)
|
|
{
|
|
/*
|
|
* When reversing direction at tuplestore EOF, the first
|
|
* getheaptuple call will fetch the last-added tuple; but we want
|
|
* to return the one before that, if possible. So do an extra
|
|
* fetch.
|
|
*/
|
|
heapTuple = tuplestore_getheaptuple(tuplestorestate,
|
|
forward,
|
|
&should_free);
|
|
if (heapTuple == NULL)
|
|
return NULL; /* the tuplestore must be empty */
|
|
if (should_free)
|
|
heap_freetuple(heapTuple);
|
|
}
|
|
eof_tuplestore = false;
|
|
}
|
|
|
|
if (!eof_tuplestore)
|
|
{
|
|
heapTuple = tuplestore_getheaptuple(tuplestorestate,
|
|
forward,
|
|
&should_free);
|
|
if (heapTuple == NULL && forward)
|
|
eof_tuplestore = true;
|
|
}
|
|
|
|
/*
|
|
* If necessary, try to fetch another row from the subplan.
|
|
*
|
|
* Note: the eof_underlying state variable exists to short-circuit further
|
|
* subplan calls. It's not optional, unfortunately, because some plan
|
|
* node types are not robust about being called again when they've already
|
|
* returned NULL.
|
|
*/
|
|
if (eof_tuplestore && !node->eof_underlying)
|
|
{
|
|
PlanState *outerNode;
|
|
TupleTableSlot *outerslot;
|
|
|
|
/*
|
|
* We can only get here with forward==true, so no need to worry about
|
|
* which direction the subplan will go.
|
|
*/
|
|
outerNode = outerPlanState(node);
|
|
outerslot = ExecProcNode(outerNode);
|
|
if (TupIsNull(outerslot))
|
|
{
|
|
node->eof_underlying = true;
|
|
return NULL;
|
|
}
|
|
heapTuple = ExecFetchSlotTuple(outerslot);
|
|
should_free = false;
|
|
|
|
/*
|
|
* Append returned tuple to tuplestore, too. NOTE: because the
|
|
* tuplestore is certainly in EOF state, its read position will move
|
|
* forward over the added tuple. This is what we want.
|
|
*/
|
|
tuplestore_puttuple(tuplestorestate, (void *) heapTuple);
|
|
}
|
|
|
|
/*
|
|
* Return the obtained tuple, if any.
|
|
*/
|
|
slot = (TupleTableSlot *) node->ss.ps.ps_ResultTupleSlot;
|
|
if (heapTuple)
|
|
return ExecStoreTuple(heapTuple, slot, InvalidBuffer, should_free);
|
|
else
|
|
return ExecClearTuple(slot);
|
|
}
|
|
|
|
/* ----------------------------------------------------------------
|
|
* ExecInitMaterial
|
|
* ----------------------------------------------------------------
|
|
*/
|
|
MaterialState *
|
|
ExecInitMaterial(Material *node, EState *estate)
|
|
{
|
|
MaterialState *matstate;
|
|
Plan *outerPlan;
|
|
|
|
/*
|
|
* create state structure
|
|
*/
|
|
matstate = makeNode(MaterialState);
|
|
matstate->ss.ps.plan = (Plan *) node;
|
|
matstate->ss.ps.state = estate;
|
|
|
|
matstate->tuplestorestate = NULL;
|
|
matstate->eof_underlying = false;
|
|
|
|
/*
|
|
* Miscellaneous initialization
|
|
*
|
|
* Materialization nodes don't need ExprContexts because they never call
|
|
* ExecQual or ExecProject.
|
|
*/
|
|
|
|
#define MATERIAL_NSLOTS 2
|
|
|
|
/*
|
|
* tuple table initialization
|
|
*
|
|
* material nodes only return tuples from their materialized relation.
|
|
*/
|
|
ExecInitResultTupleSlot(estate, &matstate->ss.ps);
|
|
ExecInitScanTupleSlot(estate, &matstate->ss);
|
|
|
|
/*
|
|
* initializes child nodes
|
|
*/
|
|
outerPlan = outerPlan(node);
|
|
outerPlanState(matstate) = ExecInitNode(outerPlan, estate);
|
|
|
|
/*
|
|
* initialize tuple type. no need to initialize projection info because
|
|
* this node doesn't do projections.
|
|
*/
|
|
ExecAssignResultTypeFromTL(&matstate->ss.ps);
|
|
ExecAssignScanTypeFromOuterPlan(&matstate->ss);
|
|
matstate->ss.ps.ps_ProjInfo = NULL;
|
|
|
|
return matstate;
|
|
}
|
|
|
|
int
|
|
ExecCountSlotsMaterial(Material *node)
|
|
{
|
|
return ExecCountSlotsNode(outerPlan((Plan *) node)) +
|
|
ExecCountSlotsNode(innerPlan((Plan *) node)) +
|
|
MATERIAL_NSLOTS;
|
|
}
|
|
|
|
/* ----------------------------------------------------------------
|
|
* ExecEndMaterial
|
|
* ----------------------------------------------------------------
|
|
*/
|
|
void
|
|
ExecEndMaterial(MaterialState *node)
|
|
{
|
|
/*
|
|
* clean out the tuple table
|
|
*/
|
|
ExecClearTuple(node->ss.ss_ScanTupleSlot);
|
|
|
|
/*
|
|
* Release tuplestore resources
|
|
*/
|
|
if (node->tuplestorestate != NULL)
|
|
tuplestore_end((Tuplestorestate *) node->tuplestorestate);
|
|
node->tuplestorestate = NULL;
|
|
|
|
/*
|
|
* shut down the subplan
|
|
*/
|
|
ExecEndNode(outerPlanState(node));
|
|
}
|
|
|
|
/* ----------------------------------------------------------------
|
|
* ExecMaterialMarkPos
|
|
*
|
|
* Calls tuplestore to save the current position in the stored file.
|
|
* ----------------------------------------------------------------
|
|
*/
|
|
void
|
|
ExecMaterialMarkPos(MaterialState *node)
|
|
{
|
|
/*
|
|
* if we haven't materialized yet, just return.
|
|
*/
|
|
if (!node->tuplestorestate)
|
|
return;
|
|
|
|
tuplestore_markpos((Tuplestorestate *) node->tuplestorestate);
|
|
}
|
|
|
|
/* ----------------------------------------------------------------
|
|
* ExecMaterialRestrPos
|
|
*
|
|
* Calls tuplestore to restore the last saved file position.
|
|
* ----------------------------------------------------------------
|
|
*/
|
|
void
|
|
ExecMaterialRestrPos(MaterialState *node)
|
|
{
|
|
/*
|
|
* if we haven't materialized yet, just return.
|
|
*/
|
|
if (!node->tuplestorestate)
|
|
return;
|
|
|
|
/*
|
|
* restore the scan to the previously marked position
|
|
*/
|
|
tuplestore_restorepos((Tuplestorestate *) node->tuplestorestate);
|
|
}
|
|
|
|
/* ----------------------------------------------------------------
|
|
* ExecMaterialReScan
|
|
*
|
|
* Rescans the materialized relation.
|
|
* ----------------------------------------------------------------
|
|
*/
|
|
void
|
|
ExecMaterialReScan(MaterialState *node, ExprContext *exprCtxt)
|
|
{
|
|
/*
|
|
* If we haven't materialized yet, just return. If outerplan' chgParam is
|
|
* not NULL then it will be re-scanned by ExecProcNode, else - no reason
|
|
* to re-scan it at all.
|
|
*/
|
|
if (!node->tuplestorestate)
|
|
return;
|
|
|
|
ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);
|
|
|
|
/*
|
|
* If subnode is to be rescanned then we forget previous stored results;
|
|
* we have to re-read the subplan and re-store.
|
|
*
|
|
* Otherwise we can just rewind and rescan the stored output. The state of
|
|
* the subnode does not change.
|
|
*/
|
|
if (((PlanState *) node)->lefttree->chgParam != NULL)
|
|
{
|
|
tuplestore_end((Tuplestorestate *) node->tuplestorestate);
|
|
node->tuplestorestate = NULL;
|
|
node->eof_underlying = false;
|
|
}
|
|
else
|
|
tuplestore_rescan((Tuplestorestate *) node->tuplestorestate);
|
|
}
|