2017-03-09 07:40:36 -05:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
|
*
|
|
|
|
|
* nodeGatherMerge.c
|
|
|
|
|
* Scan a plan in multiple workers, and do order-preserving merge.
|
|
|
|
|
*
|
|
|
|
|
* Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
|
|
|
|
|
* Portions Copyright (c) 1994, Regents of the University of California
|
|
|
|
|
*
|
|
|
|
|
* IDENTIFICATION
|
|
|
|
|
* src/backend/executor/nodeGatherMerge.c
|
|
|
|
|
*
|
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
#include "postgres.h"
|
|
|
|
|
|
|
|
|
|
#include "access/relscan.h"
|
|
|
|
|
#include "access/xact.h"
|
|
|
|
|
#include "executor/execdebug.h"
|
|
|
|
|
#include "executor/execParallel.h"
|
|
|
|
|
#include "executor/nodeGatherMerge.h"
|
|
|
|
|
#include "executor/nodeSubplan.h"
|
|
|
|
|
#include "executor/tqueue.h"
|
|
|
|
|
#include "lib/binaryheap.h"
|
|
|
|
|
#include "miscadmin.h"
|
|
|
|
|
#include "utils/memutils.h"
|
|
|
|
|
#include "utils/rel.h"
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Tuple array for each worker
|
|
|
|
|
*/
|
|
|
|
|
typedef struct GMReaderTupleBuffer
|
|
|
|
|
{
|
|
|
|
|
HeapTuple *tuple;
|
|
|
|
|
int readCounter;
|
|
|
|
|
int nTuples;
|
|
|
|
|
bool done;
|
2017-05-17 16:31:56 -04:00
|
|
|
} GMReaderTupleBuffer;
|
2017-03-09 07:40:36 -05:00
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* When we read tuples from workers, it's a good idea to read several at once
|
|
|
|
|
* for efficiency when possible: this minimizes context-switching overhead.
|
|
|
|
|
* But reading too many at a time wastes memory without improving performance.
|
|
|
|
|
*/
|
|
|
|
|
#define MAX_TUPLE_STORE 10
|
|
|
|
|
|
2017-07-17 03:33:49 -04:00
|
|
|
static TupleTableSlot *ExecGatherMerge(PlanState *pstate);
|
2017-03-09 07:40:36 -05:00
|
|
|
static int32 heap_compare_slots(Datum a, Datum b, void *arg);
|
|
|
|
|
static TupleTableSlot *gather_merge_getnext(GatherMergeState *gm_state);
|
|
|
|
|
static HeapTuple gm_readnext_tuple(GatherMergeState *gm_state, int nreader,
|
|
|
|
|
bool nowait, bool *done);
|
|
|
|
|
static void gather_merge_init(GatherMergeState *gm_state);
|
|
|
|
|
static void ExecShutdownGatherMergeWorkers(GatherMergeState *node);
|
|
|
|
|
static bool gather_merge_readnext(GatherMergeState *gm_state, int reader,
|
|
|
|
|
bool nowait);
|
|
|
|
|
static void form_tuple_array(GatherMergeState *gm_state, int reader);
|
|
|
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
|
|
|
|
* ExecInitGather
|
|
|
|
|
* ----------------------------------------------------------------
|
|
|
|
|
*/
|
|
|
|
|
GatherMergeState *
|
|
|
|
|
ExecInitGatherMerge(GatherMerge *node, EState *estate, int eflags)
|
|
|
|
|
{
|
|
|
|
|
GatherMergeState *gm_state;
|
|
|
|
|
Plan *outerNode;
|
|
|
|
|
bool hasoid;
|
|
|
|
|
TupleDesc tupDesc;
|
|
|
|
|
|
|
|
|
|
/* Gather merge node doesn't have innerPlan node. */
|
|
|
|
|
Assert(innerPlan(node) == NULL);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* create state structure
|
|
|
|
|
*/
|
|
|
|
|
gm_state = makeNode(GatherMergeState);
|
|
|
|
|
gm_state->ps.plan = (Plan *) node;
|
|
|
|
|
gm_state->ps.state = estate;
|
2017-07-17 03:33:49 -04:00
|
|
|
gm_state->ps.ExecProcNode = ExecGatherMerge;
|
2017-03-09 07:40:36 -05:00
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Miscellaneous initialization
|
|
|
|
|
*
|
|
|
|
|
* create expression context for node
|
|
|
|
|
*/
|
|
|
|
|
ExecAssignExprContext(estate, &gm_state->ps);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* initialize child expressions
|
|
|
|
|
*/
|
Faster expression evaluation and targetlist projection.
This replaces the old, recursive tree-walk based evaluation, with
non-recursive, opcode dispatch based, expression evaluation.
Projection is now implemented as part of expression evaluation.
This both leads to significant performance improvements, and makes
future just-in-time compilation of expressions easier.
The speed gains primarily come from:
- non-recursive implementation reduces stack usage / overhead
- simple sub-expressions are implemented with a single jump, without
function calls
- sharing some state between different sub-expressions
- reduced amount of indirect/hard to predict memory accesses by laying
out operation metadata sequentially; including the avoidance of
nearly all of the previously used linked lists
- more code has been moved to expression initialization, avoiding
constant re-checks at evaluation time
Future just-in-time compilation (JIT) has become easier, as
demonstrated by released patches intended to be merged in a later
release, for primarily two reasons: Firstly, due to a stricter split
between expression initialization and evaluation, less code has to be
handled by the JIT. Secondly, due to the non-recursive nature of the
generated "instructions", less performance-critical code-paths can
easily be shared between interpreted and compiled evaluation.
The new framework allows for significant future optimizations. E.g.:
- basic infrastructure for to later reduce the per executor-startup
overhead of expression evaluation, by caching state in prepared
statements. That'd be helpful in OLTPish scenarios where
initialization overhead is measurable.
- optimizing the generated "code". A number of proposals for potential
work has already been made.
- optimizing the interpreter. Similarly a number of proposals have
been made here too.
The move of logic into the expression initialization step leads to some
backward-incompatible changes:
- Function permission checks are now done during expression
initialization, whereas previously they were done during
execution. In edge cases this can lead to errors being raised that
previously wouldn't have been, e.g. a NULL array being coerced to a
different array type previously didn't perform checks.
- The set of domain constraints to be checked, is now evaluated once
during expression initialization, previously it was re-built
every time a domain check was evaluated. For normal queries this
doesn't change much, but e.g. for plpgsql functions, which caches
ExprStates, the old set could stick around longer. The behavior
around might still change.
Author: Andres Freund, with significant changes by Tom Lane,
changes by Heikki Linnakangas
Reviewed-By: Tom Lane, Heikki Linnakangas
Discussion: https://postgr.es/m/20161206034955.bh33paeralxbtluv@alap3.anarazel.de
2017-03-14 18:45:36 -04:00
|
|
|
gm_state->ps.qual =
|
|
|
|
|
ExecInitQual(node->plan.qual, &gm_state->ps);
|
2017-03-09 07:40:36 -05:00
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* tuple table initialization
|
|
|
|
|
*/
|
|
|
|
|
ExecInitResultTupleSlot(estate, &gm_state->ps);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* now initialize outer plan
|
|
|
|
|
*/
|
|
|
|
|
outerNode = outerPlan(node);
|
|
|
|
|
outerPlanState(gm_state) = ExecInitNode(outerNode, estate, eflags);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Initialize result tuple type and projection info.
|
|
|
|
|
*/
|
|
|
|
|
ExecAssignResultTypeFromTL(&gm_state->ps);
|
|
|
|
|
ExecAssignProjectionInfo(&gm_state->ps, NULL);
|
|
|
|
|
|
|
|
|
|
gm_state->gm_initialized = false;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* initialize sort-key information
|
|
|
|
|
*/
|
|
|
|
|
if (node->numCols)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
gm_state->gm_nkeys = node->numCols;
|
|
|
|
|
gm_state->gm_sortkeys =
|
|
|
|
|
palloc0(sizeof(SortSupportData) * node->numCols);
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < node->numCols; i++)
|
|
|
|
|
{
|
|
|
|
|
SortSupport sortKey = gm_state->gm_sortkeys + i;
|
|
|
|
|
|
|
|
|
|
sortKey->ssup_cxt = CurrentMemoryContext;
|
|
|
|
|
sortKey->ssup_collation = node->collations[i];
|
|
|
|
|
sortKey->ssup_nulls_first = node->nullsFirst[i];
|
|
|
|
|
sortKey->ssup_attno = node->sortColIdx[i];
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* We don't perform abbreviated key conversion here, for the same
|
|
|
|
|
* reasons that it isn't used in MergeAppend
|
|
|
|
|
*/
|
|
|
|
|
sortKey->abbreviate = false;
|
|
|
|
|
|
|
|
|
|
PrepareSortSupportFromOrderingOp(node->sortOperators[i], sortKey);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* store the tuple descriptor into gather merge state, so we can use it
|
|
|
|
|
* later while initializing the gather merge slots.
|
|
|
|
|
*/
|
|
|
|
|
if (!ExecContextForcesOids(&gm_state->ps, &hasoid))
|
|
|
|
|
hasoid = false;
|
|
|
|
|
tupDesc = ExecTypeFromTL(outerNode->targetlist, hasoid);
|
|
|
|
|
gm_state->tupDesc = tupDesc;
|
|
|
|
|
|
|
|
|
|
return gm_state;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
|
|
|
|
* ExecGatherMerge(node)
|
|
|
|
|
*
|
|
|
|
|
* Scans the relation via multiple workers and returns
|
|
|
|
|
* the next qualifying tuple.
|
|
|
|
|
* ----------------------------------------------------------------
|
|
|
|
|
*/
|
2017-07-17 03:33:49 -04:00
|
|
|
static TupleTableSlot *
|
|
|
|
|
ExecGatherMerge(PlanState *pstate)
|
2017-03-09 07:40:36 -05:00
|
|
|
{
|
2017-07-17 03:33:49 -04:00
|
|
|
GatherMergeState *node = castNode(GatherMergeState, pstate);
|
2017-03-09 07:40:36 -05:00
|
|
|
TupleTableSlot *slot;
|
|
|
|
|
ExprContext *econtext;
|
|
|
|
|
int i;
|
|
|
|
|
|
2017-07-25 20:37:17 -04:00
|
|
|
CHECK_FOR_INTERRUPTS();
|
|
|
|
|
|
2017-03-09 07:40:36 -05:00
|
|
|
/*
|
|
|
|
|
* As with Gather, we don't launch workers until this node is actually
|
|
|
|
|
* executed.
|
|
|
|
|
*/
|
|
|
|
|
if (!node->initialized)
|
|
|
|
|
{
|
|
|
|
|
EState *estate = node->ps.state;
|
|
|
|
|
GatherMerge *gm = (GatherMerge *) node->ps.plan;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Sometimes we might have to run without parallelism; but if parallel
|
|
|
|
|
* mode is active then we can try to fire up some workers.
|
|
|
|
|
*/
|
|
|
|
|
if (gm->num_workers > 0 && IsInParallelMode())
|
|
|
|
|
{
|
|
|
|
|
ParallelContext *pcxt;
|
|
|
|
|
|
Separate reinitialization of shared parallel-scan state from ExecReScan.
Previously, the parallel executor logic did reinitialization of shared
state within the ExecReScan code for parallel-aware scan nodes. This is
problematic, because it means that the ExecReScan call has to occur
synchronously (ie, during the parent Gather node's ReScan call). That is
swimming very much against the tide so far as the ExecReScan machinery is
concerned; the fact that it works at all today depends on a lot of fragile
assumptions, such as that no plan node between Gather and a parallel-aware
scan node is parameterized. Another objection is that because ExecReScan
might be called in workers as well as the leader, hacky extra tests are
needed in some places to prevent unwanted shared-state resets.
Hence, let's separate this code into two functions, a ReInitializeDSM
call and the ReScan call proper. ReInitializeDSM is called only in
the leader and is guaranteed to run before we start new workers.
ReScan is returned to its traditional function of resetting only local
state, which means that ExecReScan's usual habits of delaying or
eliminating child rescan calls are safe again.
As with the preceding commit 7df2c1f8d, it doesn't seem to be necessary
to make these changes in 9.6, which is a good thing because the FDW and
CustomScan APIs are impacted.
Discussion: https://postgr.es/m/CAA4eK1JkByysFJNh9M349u_nNjqETuEnY_y1VUc_kJiU0bxtaQ@mail.gmail.com
2017-08-30 13:18:16 -04:00
|
|
|
/* Initialize, or re-initialize, shared state needed by workers. */
|
2017-03-09 07:40:36 -05:00
|
|
|
if (!node->pei)
|
|
|
|
|
node->pei = ExecInitParallelPlan(node->ps.lefttree,
|
|
|
|
|
estate,
|
|
|
|
|
gm->num_workers);
|
Separate reinitialization of shared parallel-scan state from ExecReScan.
Previously, the parallel executor logic did reinitialization of shared
state within the ExecReScan code for parallel-aware scan nodes. This is
problematic, because it means that the ExecReScan call has to occur
synchronously (ie, during the parent Gather node's ReScan call). That is
swimming very much against the tide so far as the ExecReScan machinery is
concerned; the fact that it works at all today depends on a lot of fragile
assumptions, such as that no plan node between Gather and a parallel-aware
scan node is parameterized. Another objection is that because ExecReScan
might be called in workers as well as the leader, hacky extra tests are
needed in some places to prevent unwanted shared-state resets.
Hence, let's separate this code into two functions, a ReInitializeDSM
call and the ReScan call proper. ReInitializeDSM is called only in
the leader and is guaranteed to run before we start new workers.
ReScan is returned to its traditional function of resetting only local
state, which means that ExecReScan's usual habits of delaying or
eliminating child rescan calls are safe again.
As with the preceding commit 7df2c1f8d, it doesn't seem to be necessary
to make these changes in 9.6, which is a good thing because the FDW and
CustomScan APIs are impacted.
Discussion: https://postgr.es/m/CAA4eK1JkByysFJNh9M349u_nNjqETuEnY_y1VUc_kJiU0bxtaQ@mail.gmail.com
2017-08-30 13:18:16 -04:00
|
|
|
else
|
|
|
|
|
ExecParallelReinitialize(node->ps.lefttree,
|
|
|
|
|
node->pei);
|
2017-03-09 07:40:36 -05:00
|
|
|
|
|
|
|
|
/* Try to launch workers. */
|
|
|
|
|
pcxt = node->pei->pcxt;
|
|
|
|
|
LaunchParallelWorkers(pcxt);
|
|
|
|
|
node->nworkers_launched = pcxt->nworkers_launched;
|
|
|
|
|
|
|
|
|
|
/* Set up tuple queue readers to read the results. */
|
|
|
|
|
if (pcxt->nworkers_launched > 0)
|
|
|
|
|
{
|
|
|
|
|
node->nreaders = 0;
|
|
|
|
|
node->reader = palloc(pcxt->nworkers_launched *
|
|
|
|
|
sizeof(TupleQueueReader *));
|
|
|
|
|
|
|
|
|
|
Assert(gm->numCols);
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < pcxt->nworkers_launched; ++i)
|
|
|
|
|
{
|
|
|
|
|
shm_mq_set_handle(node->pei->tqueue[i],
|
|
|
|
|
pcxt->worker[i].bgwhandle);
|
|
|
|
|
node->reader[node->nreaders++] =
|
|
|
|
|
CreateTupleQueueReader(node->pei->tqueue[i],
|
|
|
|
|
node->tupDesc);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
/* No workers? Then never mind. */
|
|
|
|
|
ExecShutdownGatherMergeWorkers(node);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* always allow leader to participate */
|
|
|
|
|
node->need_to_scan_locally = true;
|
|
|
|
|
node->initialized = true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Reset per-tuple memory context to free any expression evaluation
|
|
|
|
|
* storage allocated in the previous tuple cycle.
|
|
|
|
|
*/
|
|
|
|
|
econtext = node->ps.ps_ExprContext;
|
|
|
|
|
ResetExprContext(econtext);
|
|
|
|
|
|
|
|
|
|
/*
|
2017-05-17 16:31:56 -04:00
|
|
|
* Get next tuple, either from one of our workers, or by running the plan
|
|
|
|
|
* ourselves.
|
2017-03-09 07:40:36 -05:00
|
|
|
*/
|
|
|
|
|
slot = gather_merge_getnext(node);
|
|
|
|
|
if (TupIsNull(slot))
|
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
/*
|
2017-05-17 16:31:56 -04:00
|
|
|
* form the result tuple using ExecProject(), and return it --- unless the
|
|
|
|
|
* projection produces an empty set, in which case we must loop back
|
|
|
|
|
* around for another tuple
|
2017-03-09 07:40:36 -05:00
|
|
|
*/
|
|
|
|
|
econtext->ecxt_outertuple = slot;
|
|
|
|
|
return ExecProject(node->ps.ps_ProjInfo);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
|
|
|
|
* ExecEndGatherMerge
|
|
|
|
|
*
|
|
|
|
|
* frees any storage allocated through C routines.
|
|
|
|
|
* ----------------------------------------------------------------
|
|
|
|
|
*/
|
|
|
|
|
void
|
|
|
|
|
ExecEndGatherMerge(GatherMergeState *node)
|
|
|
|
|
{
|
2017-05-17 16:31:56 -04:00
|
|
|
ExecEndNode(outerPlanState(node)); /* let children clean up first */
|
2017-03-09 07:40:36 -05:00
|
|
|
ExecShutdownGatherMerge(node);
|
|
|
|
|
ExecFreeExprContext(&node->ps);
|
|
|
|
|
ExecClearTuple(node->ps.ps_ResultTupleSlot);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
|
|
|
|
* ExecShutdownGatherMerge
|
|
|
|
|
*
|
|
|
|
|
* Destroy the setup for parallel workers including parallel context.
|
|
|
|
|
* Collect all the stats after workers are stopped, else some work
|
|
|
|
|
* done by workers won't be accounted.
|
|
|
|
|
* ----------------------------------------------------------------
|
|
|
|
|
*/
|
|
|
|
|
void
|
|
|
|
|
ExecShutdownGatherMerge(GatherMergeState *node)
|
|
|
|
|
{
|
|
|
|
|
ExecShutdownGatherMergeWorkers(node);
|
|
|
|
|
|
|
|
|
|
/* Now destroy the parallel context. */
|
|
|
|
|
if (node->pei != NULL)
|
|
|
|
|
{
|
|
|
|
|
ExecParallelCleanup(node->pei);
|
|
|
|
|
node->pei = NULL;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
|
|
|
|
* ExecShutdownGatherMergeWorkers
|
|
|
|
|
*
|
|
|
|
|
* Destroy the parallel workers. Collect all the stats after
|
|
|
|
|
* workers are stopped, else some work done by workers won't be
|
|
|
|
|
* accounted.
|
|
|
|
|
* ----------------------------------------------------------------
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
ExecShutdownGatherMergeWorkers(GatherMergeState *node)
|
|
|
|
|
{
|
|
|
|
|
/* Shut down tuple queue readers before shutting down workers. */
|
|
|
|
|
if (node->reader != NULL)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < node->nreaders; ++i)
|
|
|
|
|
if (node->reader[i])
|
|
|
|
|
DestroyTupleQueueReader(node->reader[i]);
|
|
|
|
|
|
|
|
|
|
pfree(node->reader);
|
|
|
|
|
node->reader = NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Now shut down the workers. */
|
|
|
|
|
if (node->pei != NULL)
|
|
|
|
|
ExecParallelFinish(node->pei);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
|
|
|
|
* ExecReScanGatherMerge
|
|
|
|
|
*
|
Separate reinitialization of shared parallel-scan state from ExecReScan.
Previously, the parallel executor logic did reinitialization of shared
state within the ExecReScan code for parallel-aware scan nodes. This is
problematic, because it means that the ExecReScan call has to occur
synchronously (ie, during the parent Gather node's ReScan call). That is
swimming very much against the tide so far as the ExecReScan machinery is
concerned; the fact that it works at all today depends on a lot of fragile
assumptions, such as that no plan node between Gather and a parallel-aware
scan node is parameterized. Another objection is that because ExecReScan
might be called in workers as well as the leader, hacky extra tests are
needed in some places to prevent unwanted shared-state resets.
Hence, let's separate this code into two functions, a ReInitializeDSM
call and the ReScan call proper. ReInitializeDSM is called only in
the leader and is guaranteed to run before we start new workers.
ReScan is returned to its traditional function of resetting only local
state, which means that ExecReScan's usual habits of delaying or
eliminating child rescan calls are safe again.
As with the preceding commit 7df2c1f8d, it doesn't seem to be necessary
to make these changes in 9.6, which is a good thing because the FDW and
CustomScan APIs are impacted.
Discussion: https://postgr.es/m/CAA4eK1JkByysFJNh9M349u_nNjqETuEnY_y1VUc_kJiU0bxtaQ@mail.gmail.com
2017-08-30 13:18:16 -04:00
|
|
|
* Prepare to re-scan the result of a GatherMerge.
|
2017-03-09 07:40:36 -05:00
|
|
|
* ----------------------------------------------------------------
|
|
|
|
|
*/
|
|
|
|
|
void
|
|
|
|
|
ExecReScanGatherMerge(GatherMergeState *node)
|
|
|
|
|
{
|
Force rescanning of parallel-aware scan nodes below a Gather[Merge].
The ExecReScan machinery contains various optimizations for postponing
or skipping rescans of plan subtrees; for example a HashAgg node may
conclude that it can re-use the table it built before, instead of
re-reading its input subtree. But that is wrong if the input contains
a parallel-aware table scan node, since the portion of the table scanned
by the leader process is likely to vary from one rescan to the next.
This explains the timing-dependent buildfarm failures we saw after
commit a2b70c89c.
The established mechanism for showing that a plan node's output is
potentially variable is to mark it as depending on some runtime Param.
Hence, to fix this, invent a dummy Param (one that has a PARAM_EXEC
parameter number, but carries no actual value) associated with each Gather
or GatherMerge node, mark parallel-aware nodes below that node as dependent
on that Param, and arrange for ExecReScanGather[Merge] to flag that Param
as changed whenever the Gather[Merge] node is rescanned.
This solution breaks an undocumented assumption made by the parallel
executor logic, namely that all rescans of nodes below a Gather[Merge]
will happen synchronously during the ReScan of the top node itself.
But that's fundamentally contrary to the design of the ExecReScan code,
and so was doomed to fail someday anyway (even if you want to argue
that the bug being fixed here wasn't a failure of that assumption).
A follow-on patch will address that issue. In the meantime, the worst
that's expected to happen is that given very bad timing luck, the leader
might have to do all the work during a rescan, because workers think
they have nothing to do, if they are able to start up before the eventual
ReScan of the leader's parallel-aware table scan node has reset the
shared scan state.
Although this problem exists in 9.6, there does not seem to be any way
for it to manifest there. Without GatherMerge, it seems that a plan tree
that has a rescan-short-circuiting node below Gather will always also
have one above it that will short-circuit in the same cases, preventing
the Gather from being rescanned. Hence we won't take the risk of
back-patching this change into 9.6. But v10 needs it.
Discussion: https://postgr.es/m/CAA4eK1JkByysFJNh9M349u_nNjqETuEnY_y1VUc_kJiU0bxtaQ@mail.gmail.com
2017-08-30 09:29:56 -04:00
|
|
|
GatherMerge *gm = (GatherMerge *) node->ps.plan;
|
|
|
|
|
PlanState *outerPlan = outerPlanState(node);
|
|
|
|
|
|
Separate reinitialization of shared parallel-scan state from ExecReScan.
Previously, the parallel executor logic did reinitialization of shared
state within the ExecReScan code for parallel-aware scan nodes. This is
problematic, because it means that the ExecReScan call has to occur
synchronously (ie, during the parent Gather node's ReScan call). That is
swimming very much against the tide so far as the ExecReScan machinery is
concerned; the fact that it works at all today depends on a lot of fragile
assumptions, such as that no plan node between Gather and a parallel-aware
scan node is parameterized. Another objection is that because ExecReScan
might be called in workers as well as the leader, hacky extra tests are
needed in some places to prevent unwanted shared-state resets.
Hence, let's separate this code into two functions, a ReInitializeDSM
call and the ReScan call proper. ReInitializeDSM is called only in
the leader and is guaranteed to run before we start new workers.
ReScan is returned to its traditional function of resetting only local
state, which means that ExecReScan's usual habits of delaying or
eliminating child rescan calls are safe again.
As with the preceding commit 7df2c1f8d, it doesn't seem to be necessary
to make these changes in 9.6, which is a good thing because the FDW and
CustomScan APIs are impacted.
Discussion: https://postgr.es/m/CAA4eK1JkByysFJNh9M349u_nNjqETuEnY_y1VUc_kJiU0bxtaQ@mail.gmail.com
2017-08-30 13:18:16 -04:00
|
|
|
/* Make sure any existing workers are gracefully shut down */
|
2017-03-09 07:40:36 -05:00
|
|
|
ExecShutdownGatherMergeWorkers(node);
|
|
|
|
|
|
Separate reinitialization of shared parallel-scan state from ExecReScan.
Previously, the parallel executor logic did reinitialization of shared
state within the ExecReScan code for parallel-aware scan nodes. This is
problematic, because it means that the ExecReScan call has to occur
synchronously (ie, during the parent Gather node's ReScan call). That is
swimming very much against the tide so far as the ExecReScan machinery is
concerned; the fact that it works at all today depends on a lot of fragile
assumptions, such as that no plan node between Gather and a parallel-aware
scan node is parameterized. Another objection is that because ExecReScan
might be called in workers as well as the leader, hacky extra tests are
needed in some places to prevent unwanted shared-state resets.
Hence, let's separate this code into two functions, a ReInitializeDSM
call and the ReScan call proper. ReInitializeDSM is called only in
the leader and is guaranteed to run before we start new workers.
ReScan is returned to its traditional function of resetting only local
state, which means that ExecReScan's usual habits of delaying or
eliminating child rescan calls are safe again.
As with the preceding commit 7df2c1f8d, it doesn't seem to be necessary
to make these changes in 9.6, which is a good thing because the FDW and
CustomScan APIs are impacted.
Discussion: https://postgr.es/m/CAA4eK1JkByysFJNh9M349u_nNjqETuEnY_y1VUc_kJiU0bxtaQ@mail.gmail.com
2017-08-30 13:18:16 -04:00
|
|
|
/* Mark node so that shared state will be rebuilt at next call */
|
2017-03-09 07:40:36 -05:00
|
|
|
node->initialized = false;
|
2017-08-17 13:49:22 -04:00
|
|
|
node->gm_initialized = false;
|
2017-03-09 07:40:36 -05:00
|
|
|
|
Force rescanning of parallel-aware scan nodes below a Gather[Merge].
The ExecReScan machinery contains various optimizations for postponing
or skipping rescans of plan subtrees; for example a HashAgg node may
conclude that it can re-use the table it built before, instead of
re-reading its input subtree. But that is wrong if the input contains
a parallel-aware table scan node, since the portion of the table scanned
by the leader process is likely to vary from one rescan to the next.
This explains the timing-dependent buildfarm failures we saw after
commit a2b70c89c.
The established mechanism for showing that a plan node's output is
potentially variable is to mark it as depending on some runtime Param.
Hence, to fix this, invent a dummy Param (one that has a PARAM_EXEC
parameter number, but carries no actual value) associated with each Gather
or GatherMerge node, mark parallel-aware nodes below that node as dependent
on that Param, and arrange for ExecReScanGather[Merge] to flag that Param
as changed whenever the Gather[Merge] node is rescanned.
This solution breaks an undocumented assumption made by the parallel
executor logic, namely that all rescans of nodes below a Gather[Merge]
will happen synchronously during the ReScan of the top node itself.
But that's fundamentally contrary to the design of the ExecReScan code,
and so was doomed to fail someday anyway (even if you want to argue
that the bug being fixed here wasn't a failure of that assumption).
A follow-on patch will address that issue. In the meantime, the worst
that's expected to happen is that given very bad timing luck, the leader
might have to do all the work during a rescan, because workers think
they have nothing to do, if they are able to start up before the eventual
ReScan of the leader's parallel-aware table scan node has reset the
shared scan state.
Although this problem exists in 9.6, there does not seem to be any way
for it to manifest there. Without GatherMerge, it seems that a plan tree
that has a rescan-short-circuiting node below Gather will always also
have one above it that will short-circuit in the same cases, preventing
the Gather from being rescanned. Hence we won't take the risk of
back-patching this change into 9.6. But v10 needs it.
Discussion: https://postgr.es/m/CAA4eK1JkByysFJNh9M349u_nNjqETuEnY_y1VUc_kJiU0bxtaQ@mail.gmail.com
2017-08-30 09:29:56 -04:00
|
|
|
/*
|
|
|
|
|
* Set child node's chgParam to tell it that the next scan might deliver a
|
|
|
|
|
* different set of rows within the leader process. (The overall rowset
|
|
|
|
|
* shouldn't change, but the leader process's subset might; hence nodes
|
|
|
|
|
* between here and the parallel table scan node mustn't optimize on the
|
|
|
|
|
* assumption of an unchanging rowset.)
|
|
|
|
|
*/
|
|
|
|
|
if (gm->rescan_param >= 0)
|
|
|
|
|
outerPlan->chgParam = bms_add_member(outerPlan->chgParam,
|
|
|
|
|
gm->rescan_param);
|
|
|
|
|
|
|
|
|
|
/*
|
Separate reinitialization of shared parallel-scan state from ExecReScan.
Previously, the parallel executor logic did reinitialization of shared
state within the ExecReScan code for parallel-aware scan nodes. This is
problematic, because it means that the ExecReScan call has to occur
synchronously (ie, during the parent Gather node's ReScan call). That is
swimming very much against the tide so far as the ExecReScan machinery is
concerned; the fact that it works at all today depends on a lot of fragile
assumptions, such as that no plan node between Gather and a parallel-aware
scan node is parameterized. Another objection is that because ExecReScan
might be called in workers as well as the leader, hacky extra tests are
needed in some places to prevent unwanted shared-state resets.
Hence, let's separate this code into two functions, a ReInitializeDSM
call and the ReScan call proper. ReInitializeDSM is called only in
the leader and is guaranteed to run before we start new workers.
ReScan is returned to its traditional function of resetting only local
state, which means that ExecReScan's usual habits of delaying or
eliminating child rescan calls are safe again.
As with the preceding commit 7df2c1f8d, it doesn't seem to be necessary
to make these changes in 9.6, which is a good thing because the FDW and
CustomScan APIs are impacted.
Discussion: https://postgr.es/m/CAA4eK1JkByysFJNh9M349u_nNjqETuEnY_y1VUc_kJiU0bxtaQ@mail.gmail.com
2017-08-30 13:18:16 -04:00
|
|
|
* If chgParam of subnode is not null then plan will be re-scanned by
|
|
|
|
|
* first ExecProcNode. Note: because this does nothing if we have a
|
|
|
|
|
* rescan_param, it's currently guaranteed that parallel-aware child nodes
|
|
|
|
|
* will not see a ReScan call until after they get a ReInitializeDSM call.
|
|
|
|
|
* That ordering might not be something to rely on, though. A good rule
|
|
|
|
|
* of thumb is that ReInitializeDSM should reset only shared state, ReScan
|
|
|
|
|
* should reset only local state, and anything that depends on both of
|
|
|
|
|
* those steps being finished must wait until the first ExecProcNode call.
|
Force rescanning of parallel-aware scan nodes below a Gather[Merge].
The ExecReScan machinery contains various optimizations for postponing
or skipping rescans of plan subtrees; for example a HashAgg node may
conclude that it can re-use the table it built before, instead of
re-reading its input subtree. But that is wrong if the input contains
a parallel-aware table scan node, since the portion of the table scanned
by the leader process is likely to vary from one rescan to the next.
This explains the timing-dependent buildfarm failures we saw after
commit a2b70c89c.
The established mechanism for showing that a plan node's output is
potentially variable is to mark it as depending on some runtime Param.
Hence, to fix this, invent a dummy Param (one that has a PARAM_EXEC
parameter number, but carries no actual value) associated with each Gather
or GatherMerge node, mark parallel-aware nodes below that node as dependent
on that Param, and arrange for ExecReScanGather[Merge] to flag that Param
as changed whenever the Gather[Merge] node is rescanned.
This solution breaks an undocumented assumption made by the parallel
executor logic, namely that all rescans of nodes below a Gather[Merge]
will happen synchronously during the ReScan of the top node itself.
But that's fundamentally contrary to the design of the ExecReScan code,
and so was doomed to fail someday anyway (even if you want to argue
that the bug being fixed here wasn't a failure of that assumption).
A follow-on patch will address that issue. In the meantime, the worst
that's expected to happen is that given very bad timing luck, the leader
might have to do all the work during a rescan, because workers think
they have nothing to do, if they are able to start up before the eventual
ReScan of the leader's parallel-aware table scan node has reset the
shared scan state.
Although this problem exists in 9.6, there does not seem to be any way
for it to manifest there. Without GatherMerge, it seems that a plan tree
that has a rescan-short-circuiting node below Gather will always also
have one above it that will short-circuit in the same cases, preventing
the Gather from being rescanned. Hence we won't take the risk of
back-patching this change into 9.6. But v10 needs it.
Discussion: https://postgr.es/m/CAA4eK1JkByysFJNh9M349u_nNjqETuEnY_y1VUc_kJiU0bxtaQ@mail.gmail.com
2017-08-30 09:29:56 -04:00
|
|
|
*/
|
|
|
|
|
if (outerPlan->chgParam == NULL)
|
|
|
|
|
ExecReScan(outerPlan);
|
2017-03-09 07:40:36 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Initialize the Gather merge tuple read.
|
|
|
|
|
*
|
|
|
|
|
* Pull at least a single tuple from each worker + leader and set up the heap.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
gather_merge_init(GatherMergeState *gm_state)
|
|
|
|
|
{
|
|
|
|
|
int nreaders = gm_state->nreaders;
|
|
|
|
|
bool initialize = true;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Allocate gm_slots for the number of worker + one more slot for leader.
|
|
|
|
|
* Last slot is always for leader. Leader always calls ExecProcNode() to
|
|
|
|
|
* read the tuple which will return the TupleTableSlot. Later it will
|
|
|
|
|
* directly get assigned to gm_slot. So just initialize leader gm_slot
|
|
|
|
|
* with NULL. For other slots below code will call
|
|
|
|
|
* ExecInitExtraTupleSlot() which will do the initialization of worker
|
|
|
|
|
* slots.
|
|
|
|
|
*/
|
|
|
|
|
gm_state->gm_slots =
|
|
|
|
|
palloc((gm_state->nreaders + 1) * sizeof(TupleTableSlot *));
|
|
|
|
|
gm_state->gm_slots[gm_state->nreaders] = NULL;
|
|
|
|
|
|
|
|
|
|
/* Initialize the tuple slot and tuple array for each worker */
|
|
|
|
|
gm_state->gm_tuple_buffers =
|
|
|
|
|
(GMReaderTupleBuffer *) palloc0(sizeof(GMReaderTupleBuffer) *
|
|
|
|
|
(gm_state->nreaders + 1));
|
|
|
|
|
for (i = 0; i < gm_state->nreaders; i++)
|
|
|
|
|
{
|
|
|
|
|
/* Allocate the tuple array with MAX_TUPLE_STORE size */
|
|
|
|
|
gm_state->gm_tuple_buffers[i].tuple =
|
|
|
|
|
(HeapTuple *) palloc0(sizeof(HeapTuple) * MAX_TUPLE_STORE);
|
|
|
|
|
|
|
|
|
|
/* Initialize slot for worker */
|
|
|
|
|
gm_state->gm_slots[i] = ExecInitExtraTupleSlot(gm_state->ps.state);
|
|
|
|
|
ExecSetSlotDescriptor(gm_state->gm_slots[i],
|
|
|
|
|
gm_state->tupDesc);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Allocate the resources for the merge */
|
|
|
|
|
gm_state->gm_heap = binaryheap_allocate(gm_state->nreaders + 1,
|
|
|
|
|
heap_compare_slots,
|
|
|
|
|
gm_state);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* First, try to read a tuple from each worker (including leader) in
|
|
|
|
|
* nowait mode, so that we initialize read from each worker as well as
|
|
|
|
|
* leader. After this, if all active workers are unable to produce a
|
|
|
|
|
* tuple, then re-read and this time use wait mode. For workers that were
|
|
|
|
|
* able to produce a tuple in the earlier loop and are still active, just
|
|
|
|
|
* try to fill the tuple array if more tuples are avaiable.
|
|
|
|
|
*/
|
|
|
|
|
reread:
|
|
|
|
|
for (i = 0; i < nreaders + 1; i++)
|
|
|
|
|
{
|
2017-07-25 20:37:17 -04:00
|
|
|
CHECK_FOR_INTERRUPTS();
|
|
|
|
|
|
2017-03-09 07:40:36 -05:00
|
|
|
if (!gm_state->gm_tuple_buffers[i].done &&
|
|
|
|
|
(TupIsNull(gm_state->gm_slots[i]) ||
|
|
|
|
|
gm_state->gm_slots[i]->tts_isempty))
|
|
|
|
|
{
|
|
|
|
|
if (gather_merge_readnext(gm_state, i, initialize))
|
|
|
|
|
{
|
|
|
|
|
binaryheap_add_unordered(gm_state->gm_heap,
|
|
|
|
|
Int32GetDatum(i));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
form_tuple_array(gm_state, i);
|
|
|
|
|
}
|
|
|
|
|
initialize = false;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < nreaders; i++)
|
|
|
|
|
if (!gm_state->gm_tuple_buffers[i].done &&
|
|
|
|
|
(TupIsNull(gm_state->gm_slots[i]) ||
|
|
|
|
|
gm_state->gm_slots[i]->tts_isempty))
|
|
|
|
|
goto reread;
|
|
|
|
|
|
|
|
|
|
binaryheap_build(gm_state->gm_heap);
|
|
|
|
|
gm_state->gm_initialized = true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
2017-03-31 21:15:05 -04:00
|
|
|
* Clear out the tuple table slots for each gather merge input.
|
2017-03-09 07:40:36 -05:00
|
|
|
*/
|
2017-03-31 21:15:05 -04:00
|
|
|
static void
|
2017-03-09 07:40:36 -05:00
|
|
|
gather_merge_clear_slots(GatherMergeState *gm_state)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < gm_state->nreaders; i++)
|
|
|
|
|
{
|
|
|
|
|
pfree(gm_state->gm_tuple_buffers[i].tuple);
|
|
|
|
|
gm_state->gm_slots[i] = ExecClearTuple(gm_state->gm_slots[i]);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Free tuple array as we don't need it any more */
|
|
|
|
|
pfree(gm_state->gm_tuple_buffers);
|
|
|
|
|
/* Free the binaryheap, which was created for sort */
|
|
|
|
|
binaryheap_free(gm_state->gm_heap);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Read the next tuple for gather merge.
|
|
|
|
|
*
|
|
|
|
|
* Fetch the sorted tuple out of the heap.
|
|
|
|
|
*/
|
|
|
|
|
static TupleTableSlot *
|
|
|
|
|
gather_merge_getnext(GatherMergeState *gm_state)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
2017-03-12 15:52:50 -04:00
|
|
|
if (!gm_state->gm_initialized)
|
|
|
|
|
{
|
|
|
|
|
/*
|
|
|
|
|
* First time through: pull the first tuple from each participant, and
|
|
|
|
|
* set up the heap.
|
|
|
|
|
*/
|
2017-03-09 07:40:36 -05:00
|
|
|
gather_merge_init(gm_state);
|
2017-03-12 15:52:50 -04:00
|
|
|
}
|
2017-03-09 07:40:36 -05:00
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
/*
|
|
|
|
|
* Otherwise, pull the next tuple from whichever participant we
|
2017-03-12 15:52:50 -04:00
|
|
|
* returned from last time, and reinsert that participant's index into
|
|
|
|
|
* the heap, because it might now compare differently against the
|
|
|
|
|
* other elements of the heap.
|
2017-03-09 07:40:36 -05:00
|
|
|
*/
|
|
|
|
|
i = DatumGetInt32(binaryheap_first(gm_state->gm_heap));
|
|
|
|
|
|
|
|
|
|
if (gather_merge_readnext(gm_state, i, false))
|
|
|
|
|
binaryheap_replace_first(gm_state->gm_heap, Int32GetDatum(i));
|
|
|
|
|
else
|
|
|
|
|
(void) binaryheap_remove_first(gm_state->gm_heap);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (binaryheap_empty(gm_state->gm_heap))
|
|
|
|
|
{
|
|
|
|
|
/* All the queues are exhausted, and so is the heap */
|
2017-03-31 21:15:05 -04:00
|
|
|
gather_merge_clear_slots(gm_state);
|
|
|
|
|
return NULL;
|
2017-03-09 07:40:36 -05:00
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
2017-03-12 15:52:50 -04:00
|
|
|
/* Return next tuple from whichever participant has the leading one */
|
2017-03-09 07:40:36 -05:00
|
|
|
i = DatumGetInt32(binaryheap_first(gm_state->gm_heap));
|
|
|
|
|
return gm_state->gm_slots[i];
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Read the tuple for given reader in nowait mode, and form the tuple array.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
form_tuple_array(GatherMergeState *gm_state, int reader)
|
|
|
|
|
{
|
|
|
|
|
GMReaderTupleBuffer *tuple_buffer = &gm_state->gm_tuple_buffers[reader];
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
/* Last slot is for leader and we don't build tuple array for leader */
|
|
|
|
|
if (reader == gm_state->nreaders)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* We here because we already read all the tuples from the tuple array, so
|
|
|
|
|
* initialize the counter to zero.
|
|
|
|
|
*/
|
|
|
|
|
if (tuple_buffer->nTuples == tuple_buffer->readCounter)
|
|
|
|
|
tuple_buffer->nTuples = tuple_buffer->readCounter = 0;
|
|
|
|
|
|
|
|
|
|
/* Tuple array is already full? */
|
|
|
|
|
if (tuple_buffer->nTuples == MAX_TUPLE_STORE)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
for (i = tuple_buffer->nTuples; i < MAX_TUPLE_STORE; i++)
|
|
|
|
|
{
|
|
|
|
|
tuple_buffer->tuple[i] = heap_copytuple(gm_readnext_tuple(gm_state,
|
|
|
|
|
reader,
|
|
|
|
|
false,
|
Phase 3 of pgindent updates.
Don't move parenthesized lines to the left, even if that means they
flow past the right margin.
By default, BSD indent lines up statement continuation lines that are
within parentheses so that they start just to the right of the preceding
left parenthesis. However, traditionally, if that resulted in the
continuation line extending to the right of the desired right margin,
then indent would push it left just far enough to not overrun the margin,
if it could do so without making the continuation line start to the left of
the current statement indent. That makes for a weird mix of indentations
unless one has been completely rigid about never violating the 80-column
limit.
This behavior has been pretty universally panned by Postgres developers.
Hence, disable it with indent's new -lpl switch, so that parenthesized
lines are always lined up with the preceding left paren.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 15:35:54 -04:00
|
|
|
&tuple_buffer->done));
|
2017-03-09 07:40:36 -05:00
|
|
|
if (!HeapTupleIsValid(tuple_buffer->tuple[i]))
|
|
|
|
|
break;
|
|
|
|
|
tuple_buffer->nTuples++;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Store the next tuple for a given reader into the appropriate slot.
|
|
|
|
|
*
|
|
|
|
|
* Returns false if the reader is exhausted, and true otherwise.
|
|
|
|
|
*/
|
|
|
|
|
static bool
|
|
|
|
|
gather_merge_readnext(GatherMergeState *gm_state, int reader, bool nowait)
|
|
|
|
|
{
|
|
|
|
|
GMReaderTupleBuffer *tuple_buffer;
|
|
|
|
|
HeapTuple tup = NULL;
|
|
|
|
|
|
|
|
|
|
/*
|
2017-05-17 16:31:56 -04:00
|
|
|
* If we're being asked to generate a tuple from the leader, then we just
|
|
|
|
|
* call ExecProcNode as normal to produce one.
|
2017-03-09 07:40:36 -05:00
|
|
|
*/
|
|
|
|
|
if (gm_state->nreaders == reader)
|
|
|
|
|
{
|
|
|
|
|
if (gm_state->need_to_scan_locally)
|
|
|
|
|
{
|
|
|
|
|
PlanState *outerPlan = outerPlanState(gm_state);
|
|
|
|
|
TupleTableSlot *outerTupleSlot;
|
|
|
|
|
|
|
|
|
|
outerTupleSlot = ExecProcNode(outerPlan);
|
|
|
|
|
|
|
|
|
|
if (!TupIsNull(outerTupleSlot))
|
|
|
|
|
{
|
|
|
|
|
gm_state->gm_slots[reader] = outerTupleSlot;
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
gm_state->gm_tuple_buffers[reader].done = true;
|
|
|
|
|
gm_state->need_to_scan_locally = false;
|
|
|
|
|
}
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Otherwise, check the state of the relevant tuple buffer. */
|
|
|
|
|
tuple_buffer = &gm_state->gm_tuple_buffers[reader];
|
|
|
|
|
|
|
|
|
|
if (tuple_buffer->nTuples > tuple_buffer->readCounter)
|
|
|
|
|
{
|
|
|
|
|
/* Return any tuple previously read that is still buffered. */
|
|
|
|
|
tuple_buffer = &gm_state->gm_tuple_buffers[reader];
|
|
|
|
|
tup = tuple_buffer->tuple[tuple_buffer->readCounter++];
|
|
|
|
|
}
|
|
|
|
|
else if (tuple_buffer->done)
|
|
|
|
|
{
|
|
|
|
|
/* Reader is known to be exhausted. */
|
|
|
|
|
DestroyTupleQueueReader(gm_state->reader[reader]);
|
|
|
|
|
gm_state->reader[reader] = NULL;
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
/* Read and buffer next tuple. */
|
|
|
|
|
tup = heap_copytuple(gm_readnext_tuple(gm_state,
|
|
|
|
|
reader,
|
|
|
|
|
nowait,
|
|
|
|
|
&tuple_buffer->done));
|
|
|
|
|
|
|
|
|
|
/*
|
2017-05-17 16:31:56 -04:00
|
|
|
* Attempt to read more tuples in nowait mode and store them in the
|
|
|
|
|
* tuple array.
|
2017-03-09 07:40:36 -05:00
|
|
|
*/
|
|
|
|
|
if (HeapTupleIsValid(tup))
|
|
|
|
|
form_tuple_array(gm_state, reader);
|
|
|
|
|
else
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Assert(HeapTupleIsValid(tup));
|
|
|
|
|
|
|
|
|
|
/* Build the TupleTableSlot for the given tuple */
|
|
|
|
|
ExecStoreTuple(tup, /* tuple to store */
|
|
|
|
|
gm_state->gm_slots[reader], /* slot in which to store the
|
|
|
|
|
* tuple */
|
Phase 2 of pgindent updates.
Change pg_bsd_indent to follow upstream rules for placement of comments
to the right of code, and remove pgindent hack that caused comments
following #endif to not obey the general rule.
Commit e3860ffa4dd0dad0dd9eea4be9cc1412373a8c89 wasn't actually using
the published version of pg_bsd_indent, but a hacked-up version that
tried to minimize the amount of movement of comments to the right of
code. The situation of interest is where such a comment has to be
moved to the right of its default placement at column 33 because there's
code there. BSD indent has always moved right in units of tab stops
in such cases --- but in the previous incarnation, indent was working
in 8-space tab stops, while now it knows we use 4-space tabs. So the
net result is that in about half the cases, such comments are placed
one tab stop left of before. This is better all around: it leaves
more room on the line for comment text, and it means that in such
cases the comment uniformly starts at the next 4-space tab stop after
the code, rather than sometimes one and sometimes two tabs after.
Also, ensure that comments following #endif are indented the same
as comments following other preprocessor commands such as #else.
That inconsistency turns out to have been self-inflicted damage
from a poorly-thought-through post-indent "fixup" in pgindent.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 15:18:54 -04:00
|
|
|
InvalidBuffer, /* buffer associated with this tuple */
|
2017-03-09 07:40:36 -05:00
|
|
|
true); /* pfree this pointer if not from heap */
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Attempt to read a tuple from given reader.
|
|
|
|
|
*/
|
|
|
|
|
static HeapTuple
|
|
|
|
|
gm_readnext_tuple(GatherMergeState *gm_state, int nreader, bool nowait,
|
|
|
|
|
bool *done)
|
|
|
|
|
{
|
|
|
|
|
TupleQueueReader *reader;
|
|
|
|
|
HeapTuple tup = NULL;
|
|
|
|
|
MemoryContext oldContext;
|
|
|
|
|
MemoryContext tupleContext;
|
|
|
|
|
|
|
|
|
|
tupleContext = gm_state->ps.ps_ExprContext->ecxt_per_tuple_memory;
|
|
|
|
|
|
|
|
|
|
if (done != NULL)
|
|
|
|
|
*done = false;
|
|
|
|
|
|
|
|
|
|
/* Check for async events, particularly messages from workers. */
|
|
|
|
|
CHECK_FOR_INTERRUPTS();
|
|
|
|
|
|
|
|
|
|
/* Attempt to read a tuple. */
|
|
|
|
|
reader = gm_state->reader[nreader];
|
|
|
|
|
|
|
|
|
|
/* Run TupleQueueReaders in per-tuple context */
|
|
|
|
|
oldContext = MemoryContextSwitchTo(tupleContext);
|
|
|
|
|
tup = TupleQueueReaderNext(reader, nowait, done);
|
|
|
|
|
MemoryContextSwitchTo(oldContext);
|
|
|
|
|
|
|
|
|
|
return tup;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* We have one slot for each item in the heap array. We use SlotNumber
|
|
|
|
|
* to store slot indexes. This doesn't actually provide any formal
|
|
|
|
|
* type-safety, but it makes the code more self-documenting.
|
|
|
|
|
*/
|
|
|
|
|
typedef int32 SlotNumber;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Compare the tuples in the two given slots.
|
|
|
|
|
*/
|
|
|
|
|
static int32
|
|
|
|
|
heap_compare_slots(Datum a, Datum b, void *arg)
|
|
|
|
|
{
|
|
|
|
|
GatherMergeState *node = (GatherMergeState *) arg;
|
|
|
|
|
SlotNumber slot1 = DatumGetInt32(a);
|
|
|
|
|
SlotNumber slot2 = DatumGetInt32(b);
|
|
|
|
|
|
|
|
|
|
TupleTableSlot *s1 = node->gm_slots[slot1];
|
|
|
|
|
TupleTableSlot *s2 = node->gm_slots[slot2];
|
|
|
|
|
int nkey;
|
|
|
|
|
|
|
|
|
|
Assert(!TupIsNull(s1));
|
|
|
|
|
Assert(!TupIsNull(s2));
|
|
|
|
|
|
|
|
|
|
for (nkey = 0; nkey < node->gm_nkeys; nkey++)
|
|
|
|
|
{
|
|
|
|
|
SortSupport sortKey = node->gm_sortkeys + nkey;
|
|
|
|
|
AttrNumber attno = sortKey->ssup_attno;
|
|
|
|
|
Datum datum1,
|
|
|
|
|
datum2;
|
|
|
|
|
bool isNull1,
|
|
|
|
|
isNull2;
|
|
|
|
|
int compare;
|
|
|
|
|
|
|
|
|
|
datum1 = slot_getattr(s1, attno, &isNull1);
|
|
|
|
|
datum2 = slot_getattr(s2, attno, &isNull2);
|
|
|
|
|
|
|
|
|
|
compare = ApplySortComparator(datum1, isNull1,
|
|
|
|
|
datum2, isNull2,
|
|
|
|
|
sortKey);
|
|
|
|
|
if (compare != 0)
|
|
|
|
|
return -compare;
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|