2006-08-01 21:59:48 -04:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
|
*
|
|
|
|
|
* nodeValuesscan.c
|
|
|
|
|
* Support routines for scanning Values lists
|
2006-10-03 20:30:14 -04:00
|
|
|
* ("VALUES (...), (...), ..." in rangetable).
|
2006-08-01 21:59:48 -04:00
|
|
|
*
|
2017-01-03 13:48:53 -05:00
|
|
|
* Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
|
2006-08-01 21:59:48 -04:00
|
|
|
* Portions Copyright (c) 1994, Regents of the University of California
|
|
|
|
|
*
|
|
|
|
|
*
|
|
|
|
|
* IDENTIFICATION
|
2010-09-20 16:08:53 -04:00
|
|
|
* src/backend/executor/nodeValuesscan.c
|
2006-08-01 21:59:48 -04:00
|
|
|
*
|
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
|
*/
|
|
|
|
|
/*
|
|
|
|
|
* INTERFACE ROUTINES
|
|
|
|
|
* ExecValuesScan scans a values list.
|
|
|
|
|
* ExecValuesNext retrieve next tuple in sequential order.
|
|
|
|
|
* ExecInitValuesScan creates and initializes a valuesscan node.
|
|
|
|
|
* ExecEndValuesScan releases any storage allocated.
|
2010-07-12 13:01:06 -04:00
|
|
|
* ExecReScanValuesScan rescans the values list
|
2006-08-01 21:59:48 -04:00
|
|
|
*/
|
|
|
|
|
#include "postgres.h"
|
|
|
|
|
|
|
|
|
|
#include "executor/executor.h"
|
|
|
|
|
#include "executor/nodeValuesscan.h"
|
2016-06-03 18:07:14 -04:00
|
|
|
#include "utils/expandeddatum.h"
|
2006-08-01 21:59:48 -04:00
|
|
|
|
|
|
|
|
|
|
|
|
|
static TupleTableSlot *ValuesNext(ValuesScanState *node);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
|
|
|
|
* Scan Support
|
|
|
|
|
* ----------------------------------------------------------------
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
|
|
|
|
* ValuesNext
|
|
|
|
|
*
|
|
|
|
|
* This is a workhorse for ExecValuesScan
|
|
|
|
|
* ----------------------------------------------------------------
|
|
|
|
|
*/
|
|
|
|
|
static TupleTableSlot *
|
|
|
|
|
ValuesNext(ValuesScanState *node)
|
|
|
|
|
{
|
|
|
|
|
TupleTableSlot *slot;
|
2006-10-03 20:30:14 -04:00
|
|
|
EState *estate;
|
|
|
|
|
ExprContext *econtext;
|
|
|
|
|
ScanDirection direction;
|
|
|
|
|
List *exprlist;
|
2006-08-01 21:59:48 -04:00
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* get information from the estate and scan state
|
|
|
|
|
*/
|
|
|
|
|
estate = node->ss.ps.state;
|
|
|
|
|
direction = estate->es_direction;
|
|
|
|
|
slot = node->ss.ss_ScanTupleSlot;
|
2006-08-02 14:58:21 -04:00
|
|
|
econtext = node->rowcontext;
|
2006-08-01 21:59:48 -04:00
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Get the next tuple. Return NULL if no more tuples.
|
|
|
|
|
*/
|
|
|
|
|
if (ScanDirectionIsForward(direction))
|
|
|
|
|
{
|
|
|
|
|
if (node->curr_idx < node->array_len)
|
|
|
|
|
node->curr_idx++;
|
|
|
|
|
if (node->curr_idx < node->array_len)
|
|
|
|
|
exprlist = node->exprlists[node->curr_idx];
|
|
|
|
|
else
|
|
|
|
|
exprlist = NIL;
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
if (node->curr_idx >= 0)
|
|
|
|
|
node->curr_idx--;
|
|
|
|
|
if (node->curr_idx >= 0)
|
|
|
|
|
exprlist = node->exprlists[node->curr_idx];
|
|
|
|
|
else
|
|
|
|
|
exprlist = NIL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
2006-10-03 20:30:14 -04:00
|
|
|
* Always clear the result slot; this is appropriate if we are at the end
|
|
|
|
|
* of the data, and if we're not, we still need it as the first step of
|
|
|
|
|
* the store-virtual-tuple protocol. It seems wise to clear the slot
|
2006-08-02 14:58:21 -04:00
|
|
|
* before we reset the context it might have pointers into.
|
2006-08-01 21:59:48 -04:00
|
|
|
*/
|
|
|
|
|
ExecClearTuple(slot);
|
|
|
|
|
|
2006-08-02 14:58:21 -04:00
|
|
|
if (exprlist)
|
2006-08-01 21:59:48 -04:00
|
|
|
{
|
2006-08-02 14:58:21 -04:00
|
|
|
MemoryContext oldContext;
|
|
|
|
|
List *exprstatelist;
|
|
|
|
|
Datum *values;
|
|
|
|
|
bool *isnull;
|
2016-06-03 18:07:14 -04:00
|
|
|
Form_pg_attribute *att;
|
2006-08-02 14:58:21 -04:00
|
|
|
ListCell *lc;
|
|
|
|
|
int resind;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Get rid of any prior cycle's leftovers. We use ReScanExprContext
|
|
|
|
|
* not just ResetExprContext because we want any registered shutdown
|
|
|
|
|
* callbacks to be called.
|
|
|
|
|
*/
|
|
|
|
|
ReScanExprContext(econtext);
|
|
|
|
|
|
|
|
|
|
/*
|
2006-10-03 20:30:14 -04:00
|
|
|
* Build the expression eval state in the econtext's per-tuple memory.
|
|
|
|
|
* This is a tad unusual, but we want to delete the eval state again
|
|
|
|
|
* when we move to the next row, to avoid growth of memory
|
|
|
|
|
* requirements over a long values list.
|
2006-08-02 14:58:21 -04:00
|
|
|
*/
|
|
|
|
|
oldContext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
|
|
|
|
|
|
|
|
|
|
/*
|
2006-10-03 20:30:14 -04:00
|
|
|
* Pass NULL, not my plan node, because we don't want anything in this
|
|
|
|
|
* transient state linking into permanent state. The only possibility
|
|
|
|
|
* is a SubPlan, and there shouldn't be any (any subselects in the
|
|
|
|
|
* VALUES list should be InitPlans).
|
2006-08-02 14:58:21 -04:00
|
|
|
*/
|
|
|
|
|
exprstatelist = (List *) ExecInitExpr((Expr *) exprlist, NULL);
|
|
|
|
|
|
|
|
|
|
/* parser should have checked all sublists are the same length */
|
|
|
|
|
Assert(list_length(exprstatelist) == slot->tts_tupleDescriptor->natts);
|
|
|
|
|
|
|
|
|
|
/*
|
2006-10-03 20:30:14 -04:00
|
|
|
* Compute the expressions and build a virtual result tuple. We
|
|
|
|
|
* already did ExecClearTuple(slot).
|
2006-08-02 14:58:21 -04:00
|
|
|
*/
|
|
|
|
|
values = slot->tts_values;
|
|
|
|
|
isnull = slot->tts_isnull;
|
2016-06-03 18:07:14 -04:00
|
|
|
att = slot->tts_tupleDescriptor->attrs;
|
2006-08-02 14:58:21 -04:00
|
|
|
|
|
|
|
|
resind = 0;
|
|
|
|
|
foreach(lc, exprstatelist)
|
|
|
|
|
{
|
2006-10-03 20:30:14 -04:00
|
|
|
ExprState *estate = (ExprState *) lfirst(lc);
|
2006-08-02 14:58:21 -04:00
|
|
|
|
|
|
|
|
values[resind] = ExecEvalExpr(estate,
|
|
|
|
|
econtext,
|
2017-01-19 17:12:38 -05:00
|
|
|
&isnull[resind]);
|
2016-06-03 18:07:14 -04:00
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* We must force any R/W expanded datums to read-only state, in
|
|
|
|
|
* case they are multiply referenced in the plan node's output
|
|
|
|
|
* expressions, or in case we skip the output projection and the
|
|
|
|
|
* output column is multiply referenced in higher plan nodes.
|
|
|
|
|
*/
|
|
|
|
|
values[resind] = MakeExpandedObjectReadOnly(values[resind],
|
|
|
|
|
isnull[resind],
|
|
|
|
|
att[resind]->attlen);
|
|
|
|
|
|
2006-08-02 14:58:21 -04:00
|
|
|
resind++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
MemoryContextSwitchTo(oldContext);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* And return the virtual tuple.
|
|
|
|
|
*/
|
|
|
|
|
ExecStoreVirtualTuple(slot);
|
2006-08-01 21:59:48 -04:00
|
|
|
}
|
|
|
|
|
|
2006-08-02 14:58:21 -04:00
|
|
|
return slot;
|
2006-08-01 21:59:48 -04:00
|
|
|
}
|
|
|
|
|
|
Re-implement EvalPlanQual processing to improve its performance and eliminate
a lot of strange behaviors that occurred in join cases. We now identify the
"current" row for every joined relation in UPDATE, DELETE, and SELECT FOR
UPDATE/SHARE queries. If an EvalPlanQual recheck is necessary, we jam the
appropriate row into each scan node in the rechecking plan, forcing it to emit
only that one row. The former behavior could rescan the whole of each joined
relation for each recheck, which was terrible for performance, and what's much
worse could result in duplicated output tuples.
Also, the original implementation of EvalPlanQual could not re-use the recheck
execution tree --- it had to go through a full executor init and shutdown for
every row to be tested. To avoid this overhead, I've associated a special
runtime Param with each LockRows or ModifyTable plan node, and arranged to
make every scan node below such a node depend on that Param. Thus, by
signaling a change in that Param, the EPQ machinery can just rescan the
already-built test plan.
This patch also adds a prohibition on set-returning functions in the
targetlist of SELECT FOR UPDATE/SHARE. This is needed to avoid the
duplicate-output-tuple problem. It seems fairly reasonable since the
other restrictions on SELECT FOR UPDATE are meant to ensure that there
is a unique correspondence between source tuples and result tuples,
which an output SRF destroys as much as anything else does.
2009-10-25 22:26:45 -04:00
|
|
|
/*
|
|
|
|
|
* ValuesRecheck -- access method routine to recheck a tuple in EvalPlanQual
|
|
|
|
|
*/
|
|
|
|
|
static bool
|
|
|
|
|
ValuesRecheck(ValuesScanState *node, TupleTableSlot *slot)
|
|
|
|
|
{
|
|
|
|
|
/* nothing to check */
|
|
|
|
|
return true;
|
|
|
|
|
}
|
2006-08-01 21:59:48 -04:00
|
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
|
|
|
|
* ExecValuesScan(node)
|
|
|
|
|
*
|
|
|
|
|
* Scans the values lists sequentially and returns the next qualifying
|
|
|
|
|
* tuple.
|
Re-implement EvalPlanQual processing to improve its performance and eliminate
a lot of strange behaviors that occurred in join cases. We now identify the
"current" row for every joined relation in UPDATE, DELETE, and SELECT FOR
UPDATE/SHARE queries. If an EvalPlanQual recheck is necessary, we jam the
appropriate row into each scan node in the rechecking plan, forcing it to emit
only that one row. The former behavior could rescan the whole of each joined
relation for each recheck, which was terrible for performance, and what's much
worse could result in duplicated output tuples.
Also, the original implementation of EvalPlanQual could not re-use the recheck
execution tree --- it had to go through a full executor init and shutdown for
every row to be tested. To avoid this overhead, I've associated a special
runtime Param with each LockRows or ModifyTable plan node, and arranged to
make every scan node below such a node depend on that Param. Thus, by
signaling a change in that Param, the EPQ machinery can just rescan the
already-built test plan.
This patch also adds a prohibition on set-returning functions in the
targetlist of SELECT FOR UPDATE/SHARE. This is needed to avoid the
duplicate-output-tuple problem. It seems fairly reasonable since the
other restrictions on SELECT FOR UPDATE are meant to ensure that there
is a unique correspondence between source tuples and result tuples,
which an output SRF destroys as much as anything else does.
2009-10-25 22:26:45 -04:00
|
|
|
* We call the ExecScan() routine and pass it the appropriate
|
|
|
|
|
* access method functions.
|
2006-08-01 21:59:48 -04:00
|
|
|
* ----------------------------------------------------------------
|
|
|
|
|
*/
|
|
|
|
|
TupleTableSlot *
|
|
|
|
|
ExecValuesScan(ValuesScanState *node)
|
|
|
|
|
{
|
Re-implement EvalPlanQual processing to improve its performance and eliminate
a lot of strange behaviors that occurred in join cases. We now identify the
"current" row for every joined relation in UPDATE, DELETE, and SELECT FOR
UPDATE/SHARE queries. If an EvalPlanQual recheck is necessary, we jam the
appropriate row into each scan node in the rechecking plan, forcing it to emit
only that one row. The former behavior could rescan the whole of each joined
relation for each recheck, which was terrible for performance, and what's much
worse could result in duplicated output tuples.
Also, the original implementation of EvalPlanQual could not re-use the recheck
execution tree --- it had to go through a full executor init and shutdown for
every row to be tested. To avoid this overhead, I've associated a special
runtime Param with each LockRows or ModifyTable plan node, and arranged to
make every scan node below such a node depend on that Param. Thus, by
signaling a change in that Param, the EPQ machinery can just rescan the
already-built test plan.
This patch also adds a prohibition on set-returning functions in the
targetlist of SELECT FOR UPDATE/SHARE. This is needed to avoid the
duplicate-output-tuple problem. It seems fairly reasonable since the
other restrictions on SELECT FOR UPDATE are meant to ensure that there
is a unique correspondence between source tuples and result tuples,
which an output SRF destroys as much as anything else does.
2009-10-25 22:26:45 -04:00
|
|
|
return ExecScan(&node->ss,
|
|
|
|
|
(ExecScanAccessMtd) ValuesNext,
|
|
|
|
|
(ExecScanRecheckMtd) ValuesRecheck);
|
2006-08-01 21:59:48 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
|
|
|
|
* ExecInitValuesScan
|
|
|
|
|
* ----------------------------------------------------------------
|
|
|
|
|
*/
|
|
|
|
|
ValuesScanState *
|
|
|
|
|
ExecInitValuesScan(ValuesScan *node, EState *estate, int eflags)
|
|
|
|
|
{
|
2006-10-03 20:30:14 -04:00
|
|
|
ValuesScanState *scanstate;
|
|
|
|
|
TupleDesc tupdesc;
|
|
|
|
|
ListCell *vtl;
|
|
|
|
|
int i;
|
|
|
|
|
PlanState *planstate;
|
2006-08-01 21:59:48 -04:00
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* ValuesScan should not have any children.
|
|
|
|
|
*/
|
|
|
|
|
Assert(outerPlan(node) == NULL);
|
|
|
|
|
Assert(innerPlan(node) == NULL);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* create new ScanState for node
|
|
|
|
|
*/
|
|
|
|
|
scanstate = makeNode(ValuesScanState);
|
|
|
|
|
scanstate->ss.ps.plan = (Plan *) node;
|
|
|
|
|
scanstate->ss.ps.state = estate;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Miscellaneous initialization
|
|
|
|
|
*/
|
|
|
|
|
planstate = &scanstate->ss.ps;
|
2006-08-02 14:58:21 -04:00
|
|
|
|
|
|
|
|
/*
|
2014-05-06 12:12:18 -04:00
|
|
|
* Create expression contexts. We need two, one for per-sublist
|
2006-10-03 20:30:14 -04:00
|
|
|
* processing and one for execScan.c to use for quals and projections. We
|
|
|
|
|
* cheat a little by using ExecAssignExprContext() to build both.
|
2006-08-02 14:58:21 -04:00
|
|
|
*/
|
|
|
|
|
ExecAssignExprContext(estate, planstate);
|
|
|
|
|
scanstate->rowcontext = planstate->ps_ExprContext;
|
2006-08-01 21:59:48 -04:00
|
|
|
ExecAssignExprContext(estate, planstate);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* tuple table initialization
|
|
|
|
|
*/
|
|
|
|
|
ExecInitResultTupleSlot(estate, &scanstate->ss.ps);
|
|
|
|
|
ExecInitScanTupleSlot(estate, &scanstate->ss);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* initialize child expressions
|
|
|
|
|
*/
|
|
|
|
|
scanstate->ss.ps.targetlist = (List *)
|
|
|
|
|
ExecInitExpr((Expr *) node->scan.plan.targetlist,
|
|
|
|
|
(PlanState *) scanstate);
|
|
|
|
|
scanstate->ss.ps.qual = (List *)
|
|
|
|
|
ExecInitExpr((Expr *) node->scan.plan.qual,
|
|
|
|
|
(PlanState *) scanstate);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* get info about values list
|
|
|
|
|
*/
|
Ensure that RowExprs and whole-row Vars produce the expected column names.
At one time it wasn't terribly important what column names were associated
with the fields of a composite Datum, but since the introduction of
operations like row_to_json(), it's important that looking up the rowtype
ID embedded in the Datum returns the column names that users would expect.
That did not work terribly well before this patch: you could get the column
names of the underlying table, or column aliases from any level of the
query, depending on minor details of the plan tree. You could even get
totally empty field names, which is disastrous for cases like row_to_json().
To fix this for whole-row Vars, look to the RTE referenced by the Var, and
make sure its column aliases are applied to the rowtype associated with
the result Datums. This is a tad scary because we might have to return
a transient RECORD type even though the Var is declared as having some
named rowtype. In principle it should be all right because the record
type will still be physically compatible with the named rowtype; but
I had to weaken one Assert in ExecEvalConvertRowtype, and there might be
third-party code containing similar assumptions.
Similarly, RowExprs have to be willing to override the column names coming
from a named composite result type and produce a RECORD when the column
aliases visible at the site of the RowExpr differ from the underlying
table's column names.
In passing, revert the decision made in commit 398f70ec070fe601 to add
an alias-list argument to ExecTypeFromExprList: better to provide that
functionality in a separate function. This also reverts most of the code
changes in d68581483564ec0f, which we don't need because we're no longer
depending on the tupdesc found in the child plan node's result slot to be
blessed.
Back-patch to 9.4, but not earlier, since this solution changes the results
in some cases that users might not have realized were buggy. We'll apply a
more restricted form of this patch in older branches.
2014-11-10 15:21:09 -05:00
|
|
|
tupdesc = ExecTypeFromExprList((List *) linitial(node->values_lists));
|
2006-08-01 21:59:48 -04:00
|
|
|
|
|
|
|
|
ExecAssignScanType(&scanstate->ss, tupdesc);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Other node-specific setup
|
|
|
|
|
*/
|
|
|
|
|
scanstate->curr_idx = -1;
|
2007-02-18 21:23:12 -05:00
|
|
|
scanstate->array_len = list_length(node->values_lists);
|
2006-08-01 21:59:48 -04:00
|
|
|
|
|
|
|
|
/* convert list of sublists into array of sublists for easy addressing */
|
|
|
|
|
scanstate->exprlists = (List **)
|
|
|
|
|
palloc(scanstate->array_len * sizeof(List *));
|
|
|
|
|
i = 0;
|
2007-02-18 21:23:12 -05:00
|
|
|
foreach(vtl, node->values_lists)
|
2006-08-01 21:59:48 -04:00
|
|
|
{
|
|
|
|
|
scanstate->exprlists[i++] = (List *) lfirst(vtl);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Initialize result tuple type and projection info.
|
|
|
|
|
*/
|
|
|
|
|
ExecAssignResultTypeFromTL(&scanstate->ss.ps);
|
|
|
|
|
ExecAssignScanProjectionInfo(&scanstate->ss);
|
|
|
|
|
|
|
|
|
|
return scanstate;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
|
|
|
|
* ExecEndValuesScan
|
|
|
|
|
*
|
|
|
|
|
* frees any storage allocated through C routines.
|
|
|
|
|
* ----------------------------------------------------------------
|
|
|
|
|
*/
|
|
|
|
|
void
|
|
|
|
|
ExecEndValuesScan(ValuesScanState *node)
|
|
|
|
|
{
|
|
|
|
|
/*
|
2006-08-02 14:58:21 -04:00
|
|
|
* Free both exprcontexts
|
2006-08-01 21:59:48 -04:00
|
|
|
*/
|
|
|
|
|
ExecFreeExprContext(&node->ss.ps);
|
2006-08-02 14:58:21 -04:00
|
|
|
node->ss.ps.ps_ExprContext = node->rowcontext;
|
|
|
|
|
ExecFreeExprContext(&node->ss.ps);
|
2006-08-01 21:59:48 -04:00
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* clean out the tuple table
|
|
|
|
|
*/
|
|
|
|
|
ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);
|
|
|
|
|
ExecClearTuple(node->ss.ss_ScanTupleSlot);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
2010-07-12 13:01:06 -04:00
|
|
|
* ExecReScanValuesScan
|
2006-08-01 21:59:48 -04:00
|
|
|
*
|
|
|
|
|
* Rescans the relation.
|
|
|
|
|
* ----------------------------------------------------------------
|
|
|
|
|
*/
|
|
|
|
|
void
|
2010-07-12 13:01:06 -04:00
|
|
|
ExecReScanValuesScan(ValuesScanState *node)
|
2006-08-01 21:59:48 -04:00
|
|
|
{
|
|
|
|
|
ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);
|
Re-implement EvalPlanQual processing to improve its performance and eliminate
a lot of strange behaviors that occurred in join cases. We now identify the
"current" row for every joined relation in UPDATE, DELETE, and SELECT FOR
UPDATE/SHARE queries. If an EvalPlanQual recheck is necessary, we jam the
appropriate row into each scan node in the rechecking plan, forcing it to emit
only that one row. The former behavior could rescan the whole of each joined
relation for each recheck, which was terrible for performance, and what's much
worse could result in duplicated output tuples.
Also, the original implementation of EvalPlanQual could not re-use the recheck
execution tree --- it had to go through a full executor init and shutdown for
every row to be tested. To avoid this overhead, I've associated a special
runtime Param with each LockRows or ModifyTable plan node, and arranged to
make every scan node below such a node depend on that Param. Thus, by
signaling a change in that Param, the EPQ machinery can just rescan the
already-built test plan.
This patch also adds a prohibition on set-returning functions in the
targetlist of SELECT FOR UPDATE/SHARE. This is needed to avoid the
duplicate-output-tuple problem. It seems fairly reasonable since the
other restrictions on SELECT FOR UPDATE are meant to ensure that there
is a unique correspondence between source tuples and result tuples,
which an output SRF destroys as much as anything else does.
2009-10-25 22:26:45 -04:00
|
|
|
|
|
|
|
|
ExecScanReScan(&node->ss);
|
2006-08-01 21:59:48 -04:00
|
|
|
|
|
|
|
|
node->curr_idx = -1;
|
|
|
|
|
}
|