postgresql/src/backend/executor/nodeMaterial.c
PostgreSQL Daemon 2ff501590b Tag appropriate files for rc3
Also performed an initial run through of upgrading our Copyright date to
extend to 2005 ... first run here was very simple ... change everything
where: grep 1996-2004 && the word 'Copyright' ... scanned through the
generated list with 'less' first, and after, to make sure that I only
picked up the right entries ...
2004-12-31 22:04:05 +00:00

308 lines
8 KiB
C

/*-------------------------------------------------------------------------
*
* nodeMaterial.c
* Routines to handle materialization nodes.
*
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/executor/nodeMaterial.c,v 1.48 2004/12/31 21:59:45 pgsql Exp $
*
*-------------------------------------------------------------------------
*/
/*
* INTERFACE ROUTINES
* ExecMaterial - materialize the result of a subplan
* ExecInitMaterial - initialize node and subnodes
* ExecEndMaterial - shutdown node and subnodes
*
*/
#include "postgres.h"
#include "access/heapam.h"
#include "executor/executor.h"
#include "executor/nodeMaterial.h"
#include "miscadmin.h"
#include "utils/tuplestore.h"
/* ----------------------------------------------------------------
* ExecMaterial
*
* As long as we are at the end of the data collected in the tuplestore,
* we collect one new row from the subplan on each call, and stash it
* aside in the tuplestore before returning it. The tuplestore is
* only read if we are asked to scan backwards, rescan, or mark/restore.
*
* ----------------------------------------------------------------
*/
TupleTableSlot * /* result tuple from subplan */
ExecMaterial(MaterialState *node)
{
EState *estate;
ScanDirection dir;
bool forward;
Tuplestorestate *tuplestorestate;
HeapTuple heapTuple = NULL;
bool should_free = false;
bool eof_tuplestore;
TupleTableSlot *slot;
/*
* get state info from node
*/
estate = node->ss.ps.state;
dir = estate->es_direction;
forward = ScanDirectionIsForward(dir);
tuplestorestate = (Tuplestorestate *) node->tuplestorestate;
/*
* If first time through, initialize the tuplestore.
*/
if (tuplestorestate == NULL)
{
tuplestorestate = tuplestore_begin_heap(true, false, work_mem);
node->tuplestorestate = (void *) tuplestorestate;
}
/*
* If we are not at the end of the tuplestore, or are going backwards,
* try to fetch a tuple from tuplestore.
*/
eof_tuplestore = tuplestore_ateof(tuplestorestate);
if (!forward && eof_tuplestore)
{
if (!node->eof_underlying)
{
/*
* When reversing direction at tuplestore EOF, the first
* getheaptuple call will fetch the last-added tuple; but we
* want to return the one before that, if possible. So do an
* extra fetch.
*/
heapTuple = tuplestore_getheaptuple(tuplestorestate,
forward,
&should_free);
if (heapTuple == NULL)
return NULL; /* the tuplestore must be empty */
if (should_free)
heap_freetuple(heapTuple);
}
eof_tuplestore = false;
}
if (!eof_tuplestore)
{
heapTuple = tuplestore_getheaptuple(tuplestorestate,
forward,
&should_free);
if (heapTuple == NULL && forward)
eof_tuplestore = true;
}
/*
* If necessary, try to fetch another row from the subplan.
*
* Note: the eof_underlying state variable exists to short-circuit
* further subplan calls. It's not optional, unfortunately, because
* some plan node types are not robust about being called again when
* they've already returned NULL.
*/
if (eof_tuplestore && !node->eof_underlying)
{
PlanState *outerNode;
TupleTableSlot *outerslot;
/*
* We can only get here with forward==true, so no need to worry
* about which direction the subplan will go.
*/
outerNode = outerPlanState(node);
outerslot = ExecProcNode(outerNode);
if (TupIsNull(outerslot))
{
node->eof_underlying = true;
return NULL;
}
heapTuple = outerslot->val;
should_free = false;
/*
* Append returned tuple to tuplestore, too. NOTE: because the
* tuplestore is certainly in EOF state, its read position will
* move forward over the added tuple. This is what we want.
*/
tuplestore_puttuple(tuplestorestate, (void *) heapTuple);
}
/*
* Return the obtained tuple.
*/
slot = (TupleTableSlot *) node->ss.ps.ps_ResultTupleSlot;
return ExecStoreTuple(heapTuple, slot, InvalidBuffer, should_free);
}
/* ----------------------------------------------------------------
* ExecInitMaterial
* ----------------------------------------------------------------
*/
MaterialState *
ExecInitMaterial(Material *node, EState *estate)
{
MaterialState *matstate;
Plan *outerPlan;
/*
* create state structure
*/
matstate = makeNode(MaterialState);
matstate->ss.ps.plan = (Plan *) node;
matstate->ss.ps.state = estate;
matstate->tuplestorestate = NULL;
matstate->eof_underlying = false;
/*
* Miscellaneous initialization
*
* Materialization nodes don't need ExprContexts because they never call
* ExecQual or ExecProject.
*/
#define MATERIAL_NSLOTS 2
/*
* tuple table initialization
*
* material nodes only return tuples from their materialized relation.
*/
ExecInitResultTupleSlot(estate, &matstate->ss.ps);
ExecInitScanTupleSlot(estate, &matstate->ss);
/*
* initializes child nodes
*/
outerPlan = outerPlan(node);
outerPlanState(matstate) = ExecInitNode(outerPlan, estate);
/*
* initialize tuple type. no need to initialize projection info
* because this node doesn't do projections.
*/
ExecAssignResultTypeFromOuterPlan(&matstate->ss.ps);
ExecAssignScanTypeFromOuterPlan(&matstate->ss);
matstate->ss.ps.ps_ProjInfo = NULL;
return matstate;
}
int
ExecCountSlotsMaterial(Material *node)
{
return ExecCountSlotsNode(outerPlan((Plan *) node)) +
ExecCountSlotsNode(innerPlan((Plan *) node)) +
MATERIAL_NSLOTS;
}
/* ----------------------------------------------------------------
* ExecEndMaterial
* ----------------------------------------------------------------
*/
void
ExecEndMaterial(MaterialState *node)
{
/*
* clean out the tuple table
*/
ExecClearTuple(node->ss.ss_ScanTupleSlot);
/*
* Release tuplestore resources
*/
if (node->tuplestorestate != NULL)
tuplestore_end((Tuplestorestate *) node->tuplestorestate);
node->tuplestorestate = NULL;
/*
* shut down the subplan
*/
ExecEndNode(outerPlanState(node));
}
/* ----------------------------------------------------------------
* ExecMaterialMarkPos
*
* Calls tuplestore to save the current position in the stored file.
* ----------------------------------------------------------------
*/
void
ExecMaterialMarkPos(MaterialState *node)
{
/*
* if we haven't materialized yet, just return.
*/
if (!node->tuplestorestate)
return;
tuplestore_markpos((Tuplestorestate *) node->tuplestorestate);
}
/* ----------------------------------------------------------------
* ExecMaterialRestrPos
*
* Calls tuplestore to restore the last saved file position.
* ----------------------------------------------------------------
*/
void
ExecMaterialRestrPos(MaterialState *node)
{
/*
* if we haven't materialized yet, just return.
*/
if (!node->tuplestorestate)
return;
/*
* restore the scan to the previously marked position
*/
tuplestore_restorepos((Tuplestorestate *) node->tuplestorestate);
}
/* ----------------------------------------------------------------
* ExecMaterialReScan
*
* Rescans the materialized relation.
* ----------------------------------------------------------------
*/
void
ExecMaterialReScan(MaterialState *node, ExprContext *exprCtxt)
{
/*
* If we haven't materialized yet, just return. If outerplan' chgParam
* is not NULL then it will be re-scanned by ExecProcNode, else - no
* reason to re-scan it at all.
*/
if (!node->tuplestorestate)
return;
ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);
/*
* If subnode is to be rescanned then we forget previous stored
* results; we have to re-read the subplan and re-store.
*
* Otherwise we can just rewind and rescan the stored output. The state
* of the subnode does not change.
*/
if (((PlanState *) node)->lefttree->chgParam != NULL)
{
tuplestore_end((Tuplestorestate *) node->tuplestorestate);
node->tuplestorestate = NULL;
node->eof_underlying = false;
}
else
tuplestore_rescan((Tuplestorestate *) node->tuplestorestate);
}