Use palloc_object() and palloc_array() in backend code

The idea is to encourage more the use of these new routines across the
tree, as these offer stronger type safety guarantees than palloc().
This batch of changes includes most of the trivial changes suggested by
the author for src/backend/.

A total of 334 files are updated here.  Among these files, 48 of them
have their build change slightly; these are caused by line number
changes as the new allocation formulas are simpler, shaving around 100
lines of code in total.

Similar work has been done in 0c3c5c3b06 and 31d3847a37.

Author: David Geier <geidav.pg@gmail.com>
Discussion: https://postgr.es/m/ad0748d4-3080-436e-b0bc-ac8f86a3466a@gmail.com
This commit is contained in:
Michael Paquier 2025-12-10 07:36:46 +09:00
parent c507ba55f5
commit 1b105f9472
333 changed files with 1357 additions and 1464 deletions

View file

@ -318,7 +318,7 @@ initialize_brin_insertstate(Relation idxRel, IndexInfo *indexInfo)
MemoryContext oldcxt;
oldcxt = MemoryContextSwitchTo(indexInfo->ii_Context);
bistate = palloc0(sizeof(BrinInsertState));
bistate = palloc0_object(BrinInsertState);
bistate->bis_desc = brin_build_desc(idxRel);
bistate->bis_rmAccess = brinRevmapInitialize(idxRel,
&bistate->bis_pages_per_range);
@ -1185,7 +1185,7 @@ brinbuild(Relation heap, Relation index, IndexInfo *indexInfo)
{
SortCoordinate coordinate;
coordinate = (SortCoordinate) palloc0(sizeof(SortCoordinateData));
coordinate = palloc0_object(SortCoordinateData);
coordinate->isWorker = false;
coordinate->nParticipants =
state->bs_leader->nparticipanttuplesorts;
@ -2384,7 +2384,7 @@ _brin_begin_parallel(BrinBuildState *buildstate, Relation heap, Relation index,
Size estsort;
BrinShared *brinshared;
Sharedsort *sharedsort;
BrinLeader *brinleader = (BrinLeader *) palloc0(sizeof(BrinLeader));
BrinLeader *brinleader = palloc0_object(BrinLeader);
WalUsage *walusage;
BufferUsage *bufferusage;
bool leaderparticipates = true;
@ -2828,7 +2828,7 @@ _brin_parallel_scan_and_build(BrinBuildState *state,
IndexInfo *indexInfo;
/* Initialize local tuplesort coordination state */
coordinate = palloc0(sizeof(SortCoordinateData));
coordinate = palloc0_object(SortCoordinateData);
coordinate->isWorker = true;
coordinate->nParticipants = -1;
coordinate->sharedsort = sharedsort;

View file

@ -1340,7 +1340,7 @@ build_distances(FmgrInfo *distanceFn, Oid colloid,
return NULL;
ndistances = (neranges - 1);
distances = (DistanceValue *) palloc0(sizeof(DistanceValue) * ndistances);
distances = palloc0_array(DistanceValue, ndistances);
/*
* Walk through the ranges once and compute the distance between the
@ -1504,7 +1504,7 @@ reduce_expanded_ranges(ExpandedRange *eranges, int neranges,
/* allocate space for the boundary values */
nvalues = 0;
values = (Datum *) palloc(sizeof(Datum) * max_values);
values = palloc_array(Datum, max_values);
/* add the global min/max values, from the first/last range */
values[nvalues++] = eranges[0].minval;

View file

@ -79,7 +79,7 @@ brinRevmapInitialize(Relation idxrel, BlockNumber *pagesPerRange)
page = BufferGetPage(meta);
metadata = (BrinMetaPageData *) PageGetContents(page);
revmap = palloc(sizeof(BrinRevmap));
revmap = palloc_object(BrinRevmap);
revmap->rm_irel = idxrel;
revmap->rm_pagesPerRange = metadata->pagesPerRange;
revmap->rm_lastRevmapPage = metadata->lastRevmapPage;

View file

@ -119,13 +119,12 @@ brin_form_tuple(BrinDesc *brdesc, BlockNumber blkno, BrinMemTuple *tuple,
Assert(brdesc->bd_totalstored > 0);
values = (Datum *) palloc(sizeof(Datum) * brdesc->bd_totalstored);
nulls = (bool *) palloc0(sizeof(bool) * brdesc->bd_totalstored);
phony_nullbitmap = (bits8 *)
palloc(sizeof(bits8) * BITMAPLEN(brdesc->bd_totalstored));
values = palloc_array(Datum, brdesc->bd_totalstored);
nulls = palloc0_array(bool, brdesc->bd_totalstored);
phony_nullbitmap = palloc_array(bits8, BITMAPLEN(brdesc->bd_totalstored));
#ifdef TOAST_INDEX_HACK
untoasted_values = (Datum *) palloc(sizeof(Datum) * brdesc->bd_totalstored);
untoasted_values = palloc_array(Datum, brdesc->bd_totalstored);
#endif
/*
@ -488,9 +487,9 @@ brin_new_memtuple(BrinDesc *brdesc)
sizeof(BrinValues) * brdesc->bd_tupdesc->natts);
dtup = palloc0(basesize + sizeof(Datum) * brdesc->bd_totalstored);
dtup->bt_values = palloc(sizeof(Datum) * brdesc->bd_totalstored);
dtup->bt_allnulls = palloc(sizeof(bool) * brdesc->bd_tupdesc->natts);
dtup->bt_hasnulls = palloc(sizeof(bool) * brdesc->bd_tupdesc->natts);
dtup->bt_values = palloc_array(Datum, brdesc->bd_totalstored);
dtup->bt_allnulls = palloc_array(bool, brdesc->bd_tupdesc->natts);
dtup->bt_hasnulls = palloc_array(bool, brdesc->bd_tupdesc->natts);
dtup->bt_empty_range = true;

View file

@ -41,9 +41,9 @@ make_attrmap(int maplen)
{
AttrMap *res;
res = (AttrMap *) palloc0(sizeof(AttrMap));
res = palloc0_object(AttrMap);
res->maplen = maplen;
res->attnums = (AttrNumber *) palloc0(sizeof(AttrNumber) * maplen);
res->attnums = palloc0_array(AttrNumber, maplen);
return res;
}

View file

@ -1230,8 +1230,8 @@ heap_modify_tuple(HeapTuple tuple,
* O(N^2) if there are many non-replaced columns, so it seems better to
* err on the side of linear cost.
*/
values = (Datum *) palloc(numberOfAttributes * sizeof(Datum));
isnull = (bool *) palloc(numberOfAttributes * sizeof(bool));
values = palloc_array(Datum, numberOfAttributes);
isnull = palloc_array(bool, numberOfAttributes);
heap_deform_tuple(tuple, tupleDesc, values, isnull);
@ -1292,8 +1292,8 @@ heap_modify_tuple_by_cols(HeapTuple tuple,
* allocate and fill values and isnull arrays from the tuple, then replace
* selected columns from the input arrays.
*/
values = (Datum *) palloc(numberOfAttributes * sizeof(Datum));
isnull = (bool *) palloc(numberOfAttributes * sizeof(bool));
values = palloc_array(Datum, numberOfAttributes);
isnull = palloc_array(bool, numberOfAttributes);
heap_deform_tuple(tuple, tupleDesc, values, isnull);

View file

@ -71,7 +71,7 @@ typedef struct
DestReceiver *
printtup_create_DR(CommandDest dest)
{
DR_printtup *self = (DR_printtup *) palloc0(sizeof(DR_printtup));
DR_printtup *self = palloc0_object(DR_printtup);
self->pub.receiveSlot = printtup; /* might get changed later */
self->pub.rStartup = printtup_startup;

View file

@ -776,7 +776,7 @@ register_reloptions_validator(local_relopts *relopts, relopts_validator validato
static void
add_local_reloption(local_relopts *relopts, relopt_gen *newoption, int offset)
{
local_relopt *opt = palloc(sizeof(*opt));
local_relopt *opt = palloc_object(local_relopt);
Assert(offset < relopts->relopt_struct_size);
@ -1570,7 +1570,7 @@ static relopt_value *
parseLocalRelOptions(local_relopts *relopts, Datum options, bool validate)
{
int nopts = list_length(relopts->options);
relopt_value *values = palloc(sizeof(*values) * nopts);
relopt_value *values = palloc_array(relopt_value, nopts);
ListCell *lc;
int i = 0;
@ -1991,7 +1991,7 @@ void *
build_local_reloptions(local_relopts *relopts, Datum options, bool validate)
{
int noptions = list_length(relopts->options);
relopt_parse_elt *elems = palloc(sizeof(*elems) * noptions);
relopt_parse_elt *elems = palloc_array(relopt_parse_elt, noptions);
relopt_value *vals;
void *opts;
int i = 0;

View file

@ -166,7 +166,7 @@ TidStoreCreateLocal(size_t max_bytes, bool insert_only)
size_t minContextSize = ALLOCSET_DEFAULT_MINSIZE;
size_t maxBlockSize = ALLOCSET_DEFAULT_MAXSIZE;
ts = palloc0(sizeof(TidStore));
ts = palloc0_object(TidStore);
/* choose the maxBlockSize to be no larger than 1/16 of max_bytes */
while (16 * maxBlockSize > max_bytes)
@ -212,7 +212,7 @@ TidStoreCreateShared(size_t max_bytes, int tranche_id)
size_t dsa_init_size = DSA_DEFAULT_INIT_SEGMENT_SIZE;
size_t dsa_max_size = DSA_MAX_SEGMENT_SIZE;
ts = palloc0(sizeof(TidStore));
ts = palloc0_object(TidStore);
/*
* Choose the initial and maximum DSA segment sizes to be no longer than
@ -250,7 +250,7 @@ TidStoreAttach(dsa_handle area_handle, dsa_pointer handle)
Assert(DsaPointerIsValid(handle));
/* create per-backend state */
ts = palloc0(sizeof(TidStore));
ts = palloc0_object(TidStore);
area = dsa_attach(area_handle);
@ -472,7 +472,7 @@ TidStoreBeginIterate(TidStore *ts)
{
TidStoreIter *iter;
iter = palloc0(sizeof(TidStoreIter));
iter = palloc0_object(TidStoreIter);
iter->ts = ts;
if (TidStoreIsShared(ts))

View file

@ -568,7 +568,7 @@ toast_open_indexes(Relation toastrel,
*num_indexes = list_length(indexlist);
/* Open all the index relations */
*toastidxs = (Relation *) palloc(*num_indexes * sizeof(Relation));
*toastidxs = palloc_array(Relation, *num_indexes);
foreach(lc, indexlist)
(*toastidxs)[i++] = index_open(lfirst_oid(lc), lock);

View file

@ -75,17 +75,17 @@ convert_tuples_by_position(TupleDesc indesc,
}
/* Prepare the map structure */
map = (TupleConversionMap *) palloc(sizeof(TupleConversionMap));
map = palloc_object(TupleConversionMap);
map->indesc = indesc;
map->outdesc = outdesc;
map->attrMap = attrMap;
/* preallocate workspace for Datum arrays */
n = outdesc->natts + 1; /* +1 for NULL */
map->outvalues = (Datum *) palloc(n * sizeof(Datum));
map->outisnull = (bool *) palloc(n * sizeof(bool));
map->outvalues = palloc_array(Datum, n);
map->outisnull = palloc_array(bool, n);
n = indesc->natts + 1; /* +1 for NULL */
map->invalues = (Datum *) palloc(n * sizeof(Datum));
map->inisnull = (bool *) palloc(n * sizeof(bool));
map->invalues = palloc_array(Datum, n);
map->inisnull = palloc_array(bool, n);
map->invalues[0] = (Datum) 0; /* set up the NULL entry */
map->inisnull[0] = true;
@ -132,16 +132,16 @@ convert_tuples_by_name_attrmap(TupleDesc indesc,
Assert(attrMap != NULL);
/* Prepare the map structure */
map = (TupleConversionMap *) palloc(sizeof(TupleConversionMap));
map = palloc_object(TupleConversionMap);
map->indesc = indesc;
map->outdesc = outdesc;
map->attrMap = attrMap;
/* preallocate workspace for Datum arrays */
map->outvalues = (Datum *) palloc(n * sizeof(Datum));
map->outisnull = (bool *) palloc(n * sizeof(bool));
map->outvalues = palloc_array(Datum, n);
map->outisnull = palloc_array(bool, n);
n = indesc->natts + 1; /* +1 for NULL */
map->invalues = (Datum *) palloc(n * sizeof(Datum));
map->inisnull = (bool *) palloc(n * sizeof(bool));
map->invalues = palloc_array(Datum, n);
map->inisnull = palloc_array(bool, n);
map->invalues[0] = (Datum) 0; /* set up the NULL entry */
map->inisnull[0] = true;

View file

@ -361,7 +361,7 @@ CreateTupleDescCopyConstr(TupleDesc tupdesc)
/* Copy the TupleConstr data structure, if any */
if (constr)
{
TupleConstr *cpy = (TupleConstr *) palloc0(sizeof(TupleConstr));
TupleConstr *cpy = palloc0_object(TupleConstr);
cpy->has_not_null = constr->has_not_null;
cpy->has_generated_stored = constr->has_generated_stored;

View file

@ -85,7 +85,7 @@ ginFindLeafPage(GinBtree btree, bool searchMode,
{
GinBtreeStack *stack;
stack = (GinBtreeStack *) palloc(sizeof(GinBtreeStack));
stack = palloc_object(GinBtreeStack);
stack->blkno = btree->rootBlkno;
stack->buffer = ReadBuffer(btree->index, btree->rootBlkno);
stack->parent = NULL;
@ -152,7 +152,7 @@ ginFindLeafPage(GinBtree btree, bool searchMode,
}
else
{
GinBtreeStack *ptr = (GinBtreeStack *) palloc(sizeof(GinBtreeStack));
GinBtreeStack *ptr = palloc_object(GinBtreeStack);
ptr->parent = stack;
stack = ptr;
@ -246,7 +246,7 @@ ginFindParents(GinBtree btree, GinBtreeStack *stack)
blkno = root->blkno;
buffer = root->buffer;
ptr = (GinBtreeStack *) palloc(sizeof(GinBtreeStack));
ptr = palloc_object(GinBtreeStack);
for (;;)
{

View file

@ -93,7 +93,7 @@ ginAllocEntryAccumulator(void *arg)
*/
if (accum->entryallocator == NULL || accum->eas_used >= DEF_NENTRY)
{
accum->entryallocator = palloc(sizeof(GinEntryAccumulator) * DEF_NENTRY);
accum->entryallocator = palloc_array(GinEntryAccumulator, DEF_NENTRY);
accum->allocatedMemory += GetMemoryChunkSpace(accum->entryallocator);
accum->eas_used = 0;
}
@ -177,8 +177,7 @@ ginInsertBAEntry(BuildAccumulator *accum,
ea->maxcount = DEF_NPTR;
ea->count = 1;
ea->shouldSort = false;
ea->list =
(ItemPointerData *) palloc(sizeof(ItemPointerData) * DEF_NPTR);
ea->list = palloc_array(ItemPointerData, DEF_NPTR);
ea->list[0] = *heapptr;
accum->allocatedMemory += GetMemoryChunkSpace(ea->list);
}

View file

@ -1332,7 +1332,7 @@ dataSplitPageInternal(GinBtree btree, Buffer origbuf,
static void *
dataPrepareDownlink(GinBtree btree, Buffer lbuf)
{
PostingItem *pitem = palloc(sizeof(PostingItem));
PostingItem *pitem = palloc_object(PostingItem);
Page lpage = BufferGetPage(lbuf);
PostingItemSetBlockNumber(pitem, BufferGetBlockNumber(lbuf));
@ -1374,7 +1374,7 @@ disassembleLeaf(Page page)
char *segbegin;
char *segend;
leaf = palloc0(sizeof(disassembledLeaf));
leaf = palloc0_object(disassembledLeaf);
dlist_init(&leaf->segments);
if (GinPageIsCompressed(page))
@ -1387,7 +1387,7 @@ disassembleLeaf(Page page)
segend = segbegin + GinDataLeafPageGetPostingListSize(page);
while ((char *) seg < segend)
{
leafSegmentInfo *seginfo = palloc(sizeof(leafSegmentInfo));
leafSegmentInfo *seginfo = palloc_object(leafSegmentInfo);
seginfo->action = GIN_SEGMENT_UNMODIFIED;
seginfo->seg = seg;
@ -1414,7 +1414,7 @@ disassembleLeaf(Page page)
if (nuncompressed > 0)
{
seginfo = palloc(sizeof(leafSegmentInfo));
seginfo = palloc_object(leafSegmentInfo);
seginfo->action = GIN_SEGMENT_REPLACE;
seginfo->seg = NULL;
@ -1455,7 +1455,7 @@ addItemsToLeaf(disassembledLeaf *leaf, ItemPointer newItems, int nNewItems)
*/
if (dlist_is_empty(&leaf->segments))
{
newseg = palloc(sizeof(leafSegmentInfo));
newseg = palloc_object(leafSegmentInfo);
newseg->seg = NULL;
newseg->items = newItems;
newseg->nitems = nNewItems;
@ -1512,7 +1512,7 @@ addItemsToLeaf(disassembledLeaf *leaf, ItemPointer newItems, int nNewItems)
cur->seg != NULL &&
SizeOfGinPostingList(cur->seg) >= GinPostingListSegmentTargetSize)
{
newseg = palloc(sizeof(leafSegmentInfo));
newseg = palloc_object(leafSegmentInfo);
newseg->seg = NULL;
newseg->items = nextnew;
newseg->nitems = nthis;
@ -1629,7 +1629,7 @@ leafRepackItems(disassembledLeaf *leaf, ItemPointer remaining)
if (seginfo->action != GIN_SEGMENT_INSERT)
seginfo->action = GIN_SEGMENT_REPLACE;
nextseg = palloc(sizeof(leafSegmentInfo));
nextseg = palloc_object(leafSegmentInfo);
nextseg->action = GIN_SEGMENT_INSERT;
nextseg->seg = NULL;
nextseg->items = &seginfo->items[npacked];

View file

@ -183,7 +183,7 @@ ginReadTuple(GinState *ginstate, OffsetNumber attnum, IndexTuple itup,
}
else
{
ipd = (ItemPointer) palloc(sizeof(ItemPointerData) * nipd);
ipd = palloc_array(ItemPointerData, nipd);
memcpy(ipd, ptr, sizeof(ItemPointerData) * nipd);
}
*nitems = nipd;
@ -708,7 +708,7 @@ entryPrepareDownlink(GinBtree btree, Buffer lbuf)
itup = getRightMostTuple(lpage);
insertData = palloc(sizeof(GinBtreeEntryInsertData));
insertData = palloc_object(GinBtreeEntryInsertData);
insertData->entry = GinFormInteriorTuple(itup, lpage, lblkno);
insertData->isDelete = false;

View file

@ -552,7 +552,7 @@ startScanKey(GinState *ginstate, GinScanOpaque so, GinScanKey key)
{
MemoryContextSwitchTo(so->tempCtx);
entryIndexes = (int *) palloc(sizeof(int) * key->nentries);
entryIndexes = palloc_array(int, key->nentries);
for (i = 0; i < key->nentries; i++)
entryIndexes[i] = i;
qsort_arg(entryIndexes, key->nentries, sizeof(int),
@ -1873,7 +1873,7 @@ scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids)
LockBuffer(pos.pendingBuffer, GIN_SHARE);
pos.firstOffset = FirstOffsetNumber;
UnlockReleaseBuffer(metabuffer);
pos.hasMatchKey = palloc(sizeof(bool) * so->nkeys);
pos.hasMatchKey = palloc_array(bool, so->nkeys);
/*
* loop for each heap row. scanGetCandidate returns full row or row's

View file

@ -707,7 +707,7 @@ ginbuild(Relation heap, Relation index, IndexInfo *indexInfo)
{
SortCoordinate coordinate;
coordinate = (SortCoordinate) palloc0(sizeof(SortCoordinateData));
coordinate = palloc0_object(SortCoordinateData);
coordinate->isWorker = false;
coordinate->nParticipants =
state->bs_leader->nparticipanttuplesorts;
@ -791,7 +791,7 @@ ginbuild(Relation heap, Relation index, IndexInfo *indexInfo)
/*
* Return statistics
*/
result = (IndexBuildResult *) palloc(sizeof(IndexBuildResult));
result = palloc_object(IndexBuildResult);
result->heap_tuples = reltuples;
result->index_tuples = buildstate.indtuples;
@ -867,7 +867,7 @@ gininsert(Relation index, Datum *values, bool *isnull,
if (ginstate == NULL)
{
oldCtx = MemoryContextSwitchTo(indexInfo->ii_Context);
ginstate = (GinState *) palloc(sizeof(GinState));
ginstate = palloc_object(GinState);
initGinState(ginstate, index);
indexInfo->ii_AmCache = ginstate;
MemoryContextSwitchTo(oldCtx);
@ -934,7 +934,7 @@ _gin_begin_parallel(GinBuildState *buildstate, Relation heap, Relation index,
Size estsort;
GinBuildShared *ginshared;
Sharedsort *sharedsort;
GinLeader *ginleader = (GinLeader *) palloc0(sizeof(GinLeader));
GinLeader *ginleader = palloc0_object(GinLeader);
WalUsage *walusage;
BufferUsage *bufferusage;
bool leaderparticipates = true;
@ -1259,7 +1259,7 @@ AssertCheckGinBuffer(GinBuffer *buffer)
static GinBuffer *
GinBufferInit(Relation index)
{
GinBuffer *buffer = palloc0(sizeof(GinBuffer));
GinBuffer *buffer = palloc0_object(GinBuffer);
int i,
nKeys;
TupleDesc desc = RelationGetDescr(index);
@ -1273,7 +1273,7 @@ GinBufferInit(Relation index)
nKeys = IndexRelationGetNumberOfKeyAttributes(index);
buffer->ssup = palloc0(sizeof(SortSupportData) * nKeys);
buffer->ssup = palloc0_array(SortSupportData, nKeys);
/*
* Lookup ordering operator for the index key data type, and initialize
@ -2030,7 +2030,7 @@ _gin_parallel_scan_and_build(GinBuildState *state,
IndexInfo *indexInfo;
/* Initialize local tuplesort coordination state */
coordinate = palloc0(sizeof(SortCoordinateData));
coordinate = palloc0_object(SortCoordinateData);
coordinate->isWorker = true;
coordinate->nParticipants = -1;
coordinate->sharedsort = sharedsort;
@ -2279,7 +2279,7 @@ _gin_build_tuple(OffsetNumber attrnum, unsigned char category,
while (ncompressed < nitems)
{
int cnt;
GinSegmentInfo *seginfo = palloc(sizeof(GinSegmentInfo));
GinSegmentInfo *seginfo = palloc_object(GinSegmentInfo);
seginfo->seg = ginCompressPostingList(&items[ncompressed],
(nitems - ncompressed),

View file

@ -33,7 +33,7 @@ ginbeginscan(Relation rel, int nkeys, int norderbys)
scan = RelationGetIndexScan(rel, nkeys, norderbys);
/* allocate private workspace */
so = (GinScanOpaque) palloc(sizeof(GinScanOpaqueData));
so = (GinScanOpaque) palloc_object(GinScanOpaqueData);
so->keys = NULL;
so->nkeys = 0;
so->tempCtx = AllocSetContextCreate(CurrentMemoryContext,
@ -98,7 +98,7 @@ ginFillScanEntry(GinScanOpaque so, OffsetNumber attnum,
}
/* Nope, create a new entry */
scanEntry = (GinScanEntry) palloc(sizeof(GinScanEntryData));
scanEntry = palloc_object(GinScanEntryData);
scanEntry->queryKey = queryKey;
scanEntry->queryCategory = queryCategory;
scanEntry->isPartialMatch = isPartialMatch;
@ -123,8 +123,7 @@ ginFillScanEntry(GinScanOpaque so, OffsetNumber attnum,
if (so->totalentries >= so->allocentries)
{
so->allocentries *= 2;
so->entries = (GinScanEntry *)
repalloc(so->entries, so->allocentries * sizeof(GinScanEntry));
so->entries = repalloc_array(so->entries, GinScanEntry, so->allocentries);
}
so->entries[so->totalentries++] = scanEntry;
@ -170,10 +169,8 @@ ginFillScanKey(GinScanOpaque so, OffsetNumber attnum,
key->nuserentries = nQueryValues;
/* Allocate one extra array slot for possible "hidden" entry */
key->scanEntry = (GinScanEntry *) palloc(sizeof(GinScanEntry) *
(nQueryValues + 1));
key->entryRes = (GinTernaryValue *) palloc0(sizeof(GinTernaryValue) *
(nQueryValues + 1));
key->scanEntry = palloc_array(GinScanEntry, nQueryValues + 1);
key->entryRes = palloc0_array(GinTernaryValue, nQueryValues + 1);
key->query = query;
key->queryValues = queryValues;

View file

@ -500,9 +500,9 @@ ginExtractEntries(GinState *ginstate, OffsetNumber attnum,
if (isNull)
{
*nentries = 1;
entries = (Datum *) palloc(sizeof(Datum));
entries = palloc_object(Datum);
entries[0] = (Datum) 0;
*categories = (GinNullCategory *) palloc(sizeof(GinNullCategory));
*categories = palloc_object(GinNullCategory);
(*categories)[0] = GIN_CAT_NULL_ITEM;
return entries;
}
@ -522,9 +522,9 @@ ginExtractEntries(GinState *ginstate, OffsetNumber attnum,
if (entries == NULL || *nentries <= 0)
{
*nentries = 1;
entries = (Datum *) palloc(sizeof(Datum));
entries = palloc_object(Datum);
entries[0] = (Datum) 0;
*categories = (GinNullCategory *) palloc(sizeof(GinNullCategory));
*categories = palloc_object(GinNullCategory);
(*categories)[0] = GIN_CAT_EMPTY_ITEM;
return entries;
}
@ -548,7 +548,7 @@ ginExtractEntries(GinState *ginstate, OffsetNumber attnum,
keyEntryData *keydata;
cmpEntriesArg arg;
keydata = (keyEntryData *) palloc(*nentries * sizeof(keyEntryData));
keydata = palloc_array(keyEntryData, *nentries);
for (i = 0; i < *nentries; i++)
{
keydata[i].datum = entries[i];

View file

@ -65,7 +65,7 @@ ginVacuumItemPointers(GinVacuumState *gvs, ItemPointerData *items,
* First TID to be deleted: allocate memory to hold the
* remaining items.
*/
tmpitems = palloc(sizeof(ItemPointerData) * nitem);
tmpitems = palloc_array(ItemPointerData, nitem);
memcpy(tmpitems, items, sizeof(ItemPointerData) * i);
}
}
@ -260,7 +260,7 @@ ginScanToDelete(GinVacuumState *gvs, BlockNumber blkno, bool isRoot,
{
if (!parent->child)
{
me = (DataPageDeleteStack *) palloc0(sizeof(DataPageDeleteStack));
me = palloc0_object(DataPageDeleteStack);
me->parent = parent;
parent->child = me;
me->leftBuffer = InvalidBuffer;
@ -584,7 +584,7 @@ ginbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
if (stats == NULL)
{
/* Yes, so initialize stats to zeroes */
stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
stats = palloc0_object(IndexBulkDeleteResult);
/*
* and cleanup any pending inserts
@ -714,7 +714,7 @@ ginvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
*/
if (stats == NULL)
{
stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
stats = palloc0_object(IndexBulkDeleteResult);
initGinState(&ginstate, index);
ginInsertCleanup(&ginstate, !AmAutoVacuumWorkerProcess(),
false, true, stats);

View file

@ -43,7 +43,7 @@ static void gistprunepage(Relation rel, Page page, Buffer buffer,
#define ROTATEDIST(d) do { \
SplitPageLayout *tmp = (SplitPageLayout *) palloc0(sizeof(SplitPageLayout)); \
SplitPageLayout *tmp = palloc0_object(SplitPageLayout); \
tmp->block.blkno = InvalidBlockNumber; \
tmp->buffer = InvalidBuffer; \
tmp->next = (d); \
@ -392,7 +392,7 @@ gistplacetopage(Relation rel, Size freespace, GISTSTATE *giststate,
/* Prepare a vector of all the downlinks */
for (ptr = dist; ptr; ptr = ptr->next)
ndownlinks++;
downlinks = palloc(sizeof(IndexTuple) * ndownlinks);
downlinks = palloc_array(IndexTuple, ndownlinks);
for (i = 0, ptr = dist; ptr; ptr = ptr->next)
downlinks[i++] = ptr->itup;
@ -410,7 +410,7 @@ gistplacetopage(Relation rel, Size freespace, GISTSTATE *giststate,
/* Prepare split-info to be returned to caller */
for (ptr = dist; ptr; ptr = ptr->next)
{
GISTPageSplitInfo *si = palloc(sizeof(GISTPageSplitInfo));
GISTPageSplitInfo *si = palloc_object(GISTPageSplitInfo);
si->buf = ptr->buffer;
si->downlink = ptr->itup;
@ -823,7 +823,7 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace,
xlocked = false;
/* descend to the chosen child */
item = (GISTInsertStack *) palloc0(sizeof(GISTInsertStack));
item = palloc0_object(GISTInsertStack);
item->blkno = childblkno;
item->parent = stack;
item->downlinkoffnum = downlinkoffnum;
@ -923,7 +923,7 @@ gistFindPath(Relation r, BlockNumber child, OffsetNumber *downlinkoffnum)
*ptr;
BlockNumber blkno;
top = (GISTInsertStack *) palloc0(sizeof(GISTInsertStack));
top = palloc0_object(GISTInsertStack);
top->blkno = GIST_ROOT_BLKNO;
top->downlinkoffnum = InvalidOffsetNumber;
@ -975,7 +975,7 @@ gistFindPath(Relation r, BlockNumber child, OffsetNumber *downlinkoffnum)
* leaf pages, and we assume that there can't be any non-leaf
* pages behind leaf pages.
*/
ptr = (GISTInsertStack *) palloc0(sizeof(GISTInsertStack));
ptr = palloc0_object(GISTInsertStack);
ptr->blkno = GistPageGetOpaque(page)->rightlink;
ptr->downlinkoffnum = InvalidOffsetNumber;
ptr->parent = top->parent;
@ -1000,7 +1000,7 @@ gistFindPath(Relation r, BlockNumber child, OffsetNumber *downlinkoffnum)
else
{
/* Append this child to the list of pages to visit later */
ptr = (GISTInsertStack *) palloc0(sizeof(GISTInsertStack));
ptr = palloc0_object(GISTInsertStack);
ptr->blkno = blkno;
ptr->downlinkoffnum = i;
ptr->parent = top;
@ -1218,7 +1218,7 @@ gistfixsplit(GISTInsertState *state, GISTSTATE *giststate)
*/
for (;;)
{
GISTPageSplitInfo *si = palloc(sizeof(GISTPageSplitInfo));
GISTPageSplitInfo *si = palloc_object(GISTPageSplitInfo);
IndexTuple downlink;
page = BufferGetPage(buf);
@ -1482,8 +1482,8 @@ gistSplit(Relation r,
gistSplitByKey(r, page, itup, len, giststate, &v, 0);
/* form left and right vector */
lvectup = (IndexTuple *) palloc(sizeof(IndexTuple) * (len + 1));
rvectup = (IndexTuple *) palloc(sizeof(IndexTuple) * (len + 1));
lvectup = palloc_array(IndexTuple, len + 1);
rvectup = palloc_array(IndexTuple, len + 1);
for (i = 0; i < v.splitVector.spl_nleft; i++)
lvectup[i] = itup[v.splitVector.spl_left[i] - 1];
@ -1552,7 +1552,7 @@ initGISTstate(Relation index)
oldCxt = MemoryContextSwitchTo(scanCxt);
/* Create and fill in the GISTSTATE */
giststate = (GISTSTATE *) palloc(sizeof(GISTSTATE));
giststate = palloc_object(GISTSTATE);
giststate->scanCxt = scanCxt;
giststate->tempCxt = scanCxt; /* caller must change this if needed */

View file

@ -346,7 +346,7 @@ gistbuild(Relation heap, Relation index, IndexInfo *indexInfo)
/*
* Return statistics
*/
result = (IndexBuildResult *) palloc(sizeof(IndexBuildResult));
result = palloc_object(IndexBuildResult);
result->heap_tuples = reltuples;
result->index_tuples = (double) buildstate.indtuples;
@ -409,7 +409,7 @@ gist_indexsortbuild(GISTBuildState *state)
state->bulkstate = smgr_bulk_start_rel(state->indexrel, MAIN_FORKNUM);
/* Allocate a temporary buffer for the first leaf page batch. */
levelstate = palloc0(sizeof(GistSortedBuildLevelState));
levelstate = palloc0_object(GistSortedBuildLevelState);
levelstate->pages[0] = palloc(BLCKSZ);
levelstate->parent = NULL;
gistinitpage(levelstate->pages[0], F_LEAF);
@ -526,7 +526,7 @@ gist_indexsortbuild_levelstate_flush(GISTBuildState *state,
else
{
/* Create split layout from single page */
dist = (SplitPageLayout *) palloc0(sizeof(SplitPageLayout));
dist = palloc0_object(SplitPageLayout);
union_tuple = gistunion(state->indexrel, itvec, vect_len,
state->giststate);
dist->itup = union_tuple;
@ -597,7 +597,7 @@ gist_indexsortbuild_levelstate_flush(GISTBuildState *state,
parent = levelstate->parent;
if (parent == NULL)
{
parent = palloc0(sizeof(GistSortedBuildLevelState));
parent = palloc0_object(GistSortedBuildLevelState);
parent->pages[0] = palloc(BLCKSZ);
parent->parent = NULL;
gistinitpage(parent->pages[0], 0);
@ -1154,7 +1154,7 @@ gistbufferinginserttuples(GISTBuildState *buildstate, Buffer buffer, int level,
/* Create an array of all the downlink tuples */
ndownlinks = list_length(splitinfo);
downlinks = (IndexTuple *) palloc(sizeof(IndexTuple) * ndownlinks);
downlinks = palloc_array(IndexTuple, ndownlinks);
i = 0;
foreach(lc, splitinfo)
{

View file

@ -46,7 +46,7 @@ gistInitBuildBuffers(int pagesPerBuffer, int levelStep, int maxLevel)
GISTBuildBuffers *gfbb;
HASHCTL hashCtl;
gfbb = palloc(sizeof(GISTBuildBuffers));
gfbb = palloc_object(GISTBuildBuffers);
gfbb->pagesPerBuffer = pagesPerBuffer;
gfbb->levelStep = levelStep;
@ -60,7 +60,7 @@ gistInitBuildBuffers(int pagesPerBuffer, int levelStep, int maxLevel)
/* Initialize free page management. */
gfbb->nFreeBlocks = 0;
gfbb->freeBlocksLen = 32;
gfbb->freeBlocks = (long *) palloc(gfbb->freeBlocksLen * sizeof(long));
gfbb->freeBlocks = palloc_array(long, gfbb->freeBlocksLen);
/*
* Current memory context will be used for all in-memory data structures
@ -87,8 +87,7 @@ gistInitBuildBuffers(int pagesPerBuffer, int levelStep, int maxLevel)
* buffers are inserted here when they are created.
*/
gfbb->buffersOnLevelsLen = 1;
gfbb->buffersOnLevels = (List **) palloc(sizeof(List *) *
gfbb->buffersOnLevelsLen);
gfbb->buffersOnLevels = palloc_array(List *, gfbb->buffersOnLevelsLen);
gfbb->buffersOnLevels[0] = NIL;
/*
@ -96,8 +95,7 @@ gistInitBuildBuffers(int pagesPerBuffer, int levelStep, int maxLevel)
* into main memory.
*/
gfbb->loadedBuffersLen = 32;
gfbb->loadedBuffers = (GISTNodeBuffer **) palloc(gfbb->loadedBuffersLen *
sizeof(GISTNodeBuffer *));
gfbb->loadedBuffers = palloc_array(GISTNodeBuffer *, gfbb->loadedBuffersLen);
gfbb->loadedBuffersCount = 0;
gfbb->rootlevel = maxLevel;
@ -582,9 +580,7 @@ gistRelocateBuildBuffersOnSplit(GISTBuildBuffers *gfbb, GISTSTATE *giststate,
* Allocate memory for information about relocation buffers.
*/
splitPagesCount = list_length(splitinfo);
relocationBuffersInfos =
(RelocationBufferInfo *) palloc(sizeof(RelocationBufferInfo) *
splitPagesCount);
relocationBuffersInfos = palloc_array(RelocationBufferInfo, splitPagesCount);
/*
* Fill relocation buffers information for node buffers of pages produced

View file

@ -171,7 +171,7 @@ gist_box_union(PG_FUNCTION_ARGS)
*pageunion;
numranges = entryvec->n;
pageunion = (BOX *) palloc(sizeof(BOX));
pageunion = palloc_object(BOX);
cur = DatumGetBoxP(entryvec->vector[0].key);
memcpy(pageunion, cur, sizeof(BOX));
@ -237,7 +237,7 @@ fallbackSplit(GistEntryVector *entryvec, GIST_SPLITVEC *v)
v->spl_left[v->spl_nleft] = i;
if (unionL == NULL)
{
unionL = (BOX *) palloc(sizeof(BOX));
unionL = palloc_object(BOX);
*unionL = *cur;
}
else
@ -250,7 +250,7 @@ fallbackSplit(GistEntryVector *entryvec, GIST_SPLITVEC *v)
v->spl_right[v->spl_nright] = i;
if (unionR == NULL)
{
unionR = (BOX *) palloc(sizeof(BOX));
unionR = palloc_object(BOX);
*unionR = *cur;
}
else
@ -698,8 +698,8 @@ gist_box_picksplit(PG_FUNCTION_ARGS)
v->spl_nright = 0;
/* Allocate bounding boxes of left and right groups */
leftBox = palloc0(sizeof(BOX));
rightBox = palloc0(sizeof(BOX));
leftBox = palloc0_object(BOX);
rightBox = palloc0_object(BOX);
/*
* Allocate an array for "common entries" - entries which can be placed to
@ -1042,10 +1042,10 @@ gist_poly_compress(PG_FUNCTION_ARGS)
POLYGON *in = DatumGetPolygonP(entry->key);
BOX *r;
r = (BOX *) palloc(sizeof(BOX));
r = palloc_object(BOX);
memcpy(r, &(in->boundbox), sizeof(BOX));
retval = (GISTENTRY *) palloc(sizeof(GISTENTRY));
retval = palloc_object(GISTENTRY);
gistentryinit(*retval, PointerGetDatum(r),
entry->rel, entry->page,
entry->offset, false);
@ -1107,13 +1107,13 @@ gist_circle_compress(PG_FUNCTION_ARGS)
CIRCLE *in = DatumGetCircleP(entry->key);
BOX *r;
r = (BOX *) palloc(sizeof(BOX));
r = palloc_object(BOX);
r->high.x = float8_pl(in->center.x, in->radius);
r->low.x = float8_mi(in->center.x, in->radius);
r->high.y = float8_pl(in->center.y, in->radius);
r->low.y = float8_mi(in->center.y, in->radius);
retval = (GISTENTRY *) palloc(sizeof(GISTENTRY));
retval = palloc_object(GISTENTRY);
gistentryinit(*retval, PointerGetDatum(r),
entry->rel, entry->page,
entry->offset, false);
@ -1171,9 +1171,9 @@ gist_point_compress(PG_FUNCTION_ARGS)
if (entry->leafkey) /* Point, actually */
{
BOX *box = palloc(sizeof(BOX));
BOX *box = palloc_object(BOX);
Point *point = DatumGetPointP(entry->key);
GISTENTRY *retval = palloc(sizeof(GISTENTRY));
GISTENTRY *retval = palloc_object(GISTENTRY);
box->high = box->low = *point;
@ -1200,9 +1200,9 @@ gist_point_fetch(PG_FUNCTION_ARGS)
Point *r;
GISTENTRY *retval;
retval = palloc(sizeof(GISTENTRY));
retval = palloc_object(GISTENTRY);
r = (Point *) palloc(sizeof(Point));
r = palloc_object(Point);
r->x = in->high.x;
r->y = in->high.y;
gistentryinit(*retval, PointerGetDatum(r),

View file

@ -90,7 +90,7 @@ gistbeginscan(Relation r, int nkeys, int norderbys)
oldCxt = MemoryContextSwitchTo(giststate->scanCxt);
/* initialize opaque data */
so = (GISTScanOpaque) palloc0(sizeof(GISTScanOpaqueData));
so = palloc0_object(GISTScanOpaqueData);
so->giststate = giststate;
giststate->tempCxt = createTempGistContext();
so->queue = NULL;
@ -101,8 +101,8 @@ gistbeginscan(Relation r, int nkeys, int norderbys)
so->qual_ok = true; /* in case there are zero keys */
if (scan->numberOfOrderBys > 0)
{
scan->xs_orderbyvals = palloc0(sizeof(Datum) * scan->numberOfOrderBys);
scan->xs_orderbynulls = palloc(sizeof(bool) * scan->numberOfOrderBys);
scan->xs_orderbyvals = palloc0_array(Datum, scan->numberOfOrderBys);
scan->xs_orderbynulls = palloc_array(bool, scan->numberOfOrderBys);
memset(scan->xs_orderbynulls, true, sizeof(bool) * scan->numberOfOrderBys);
}

View file

@ -51,7 +51,7 @@ gistunionsubkeyvec(GISTSTATE *giststate, IndexTuple *itvec,
int i,
cleanedLen = 0;
cleanedItVec = (IndexTuple *) palloc(sizeof(IndexTuple) * gsvp->len);
cleanedItVec = palloc_array(IndexTuple, gsvp->len);
for (i = 0; i < gsvp->len; i++)
{
@ -501,7 +501,7 @@ gistUserPicksplit(Relation r, GistEntryVector *entryvec, int attno, GistSplitVec
* Locate don't-care tuples, if any. If there are none, the split is
* optimal, so just fall out and return false.
*/
v->spl_dontcare = (bool *) palloc0(sizeof(bool) * (entryvec->n + 1));
v->spl_dontcare = palloc0_array(bool, entryvec->n + 1);
NumDontCare = findDontCares(r, giststate, entryvec->vector, v, attno);
@ -738,9 +738,9 @@ gistSplitByKey(Relation r, Page page, IndexTuple *itup, int len,
* call will overwrite that with its own result.
*/
backupSplit = v->splitVector;
backupSplit.spl_left = (OffsetNumber *) palloc(sizeof(OffsetNumber) * len);
backupSplit.spl_left = palloc_array(OffsetNumber, len);
memcpy(backupSplit.spl_left, v->splitVector.spl_left, sizeof(OffsetNumber) * v->splitVector.spl_nleft);
backupSplit.spl_right = (OffsetNumber *) palloc(sizeof(OffsetNumber) * len);
backupSplit.spl_right = palloc_array(OffsetNumber, len);
memcpy(backupSplit.spl_right, v->splitVector.spl_right, sizeof(OffsetNumber) * v->splitVector.spl_nright);
/* Recursively decide how to split the don't-care tuples */

View file

@ -100,7 +100,7 @@ gistextractpage(Page page, int *len /* out */ )
maxoff = PageGetMaxOffsetNumber(page);
*len = maxoff;
itvec = palloc(sizeof(IndexTuple) * maxoff);
itvec = palloc_array(IndexTuple, maxoff);
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
itvec[i - FirstOffsetNumber] = (IndexTuple) PageGetItem(page, PageGetItemId(page, i));
@ -113,7 +113,7 @@ gistextractpage(Page page, int *len /* out */ )
IndexTuple *
gistjoinvector(IndexTuple *itvec, int *len, IndexTuple *additvec, int addlen)
{
itvec = (IndexTuple *) repalloc(itvec, sizeof(IndexTuple) * ((*len) + addlen));
itvec = repalloc_array(itvec, IndexTuple, (*len) + addlen);
memmove(&itvec[*len], additvec, sizeof(IndexTuple) * addlen);
*len += addlen;
return itvec;

View file

@ -61,7 +61,7 @@ gistbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
{
/* allocate stats if first time through, else re-use existing struct */
if (stats == NULL)
stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
stats = palloc0_object(IndexBulkDeleteResult);
gistvacuumscan(info, stats, callback, callback_state);
@ -85,7 +85,7 @@ gistvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
*/
if (stats == NULL)
{
stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
stats = palloc0_object(IndexBulkDeleteResult);
gistvacuumscan(info, stats, NULL, NULL);
}

View file

@ -193,7 +193,7 @@ hashbuild(Relation heap, Relation index, IndexInfo *indexInfo)
/*
* Return statistics
*/
result = (IndexBuildResult *) palloc(sizeof(IndexBuildResult));
result = palloc_object(IndexBuildResult);
result->heap_tuples = reltuples;
result->index_tuples = buildstate.indtuples;
@ -318,8 +318,7 @@ hashgettuple(IndexScanDesc scan, ScanDirection dir)
* entries.
*/
if (so->killedItems == NULL)
so->killedItems = (int *)
palloc(MaxIndexTuplesPerPage * sizeof(int));
so->killedItems = palloc_array(int, MaxIndexTuplesPerPage);
if (so->numKilled < MaxIndexTuplesPerPage)
so->killedItems[so->numKilled++] = so->currPos.itemIndex;
@ -381,7 +380,7 @@ hashbeginscan(Relation rel, int nkeys, int norderbys)
scan = RelationGetIndexScan(rel, nkeys, norderbys);
so = (HashScanOpaque) palloc(sizeof(HashScanOpaqueData));
so = (HashScanOpaque) palloc_object(HashScanOpaqueData);
HashScanPosInvalidate(so->currPos);
so->hashso_bucket_buf = InvalidBuffer;
so->hashso_split_bucket_buf = InvalidBuffer;
@ -633,7 +632,7 @@ loop_top:
/* return statistics */
if (stats == NULL)
stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
stats = palloc0_object(IndexBulkDeleteResult);
stats->estimated_count = false;
stats->num_index_tuples = num_index_tuples;
stats->tuples_removed += tuples_removed;

View file

@ -59,7 +59,7 @@ struct HSpool
HSpool *
_h_spoolinit(Relation heap, Relation index, uint32 num_buckets)
{
HSpool *hspool = (HSpool *) palloc0(sizeof(HSpool));
HSpool *hspool = palloc0_object(HSpool);
hspool->index = index;

View file

@ -1133,7 +1133,7 @@ heap_beginscan(Relation relation, Snapshot snapshot,
*/
if (flags & SO_TYPE_BITMAPSCAN)
{
BitmapHeapScanDesc bscan = palloc(sizeof(BitmapHeapScanDescData));
BitmapHeapScanDesc bscan = palloc_object(BitmapHeapScanDescData);
/*
* Bitmap Heap scans do not have any fields that a normal Heap Scan
@ -1142,7 +1142,7 @@ heap_beginscan(Relation relation, Snapshot snapshot,
scan = (HeapScanDesc) bscan;
}
else
scan = (HeapScanDesc) palloc(sizeof(HeapScanDescData));
scan = (HeapScanDesc) palloc_object(HeapScanDescData);
scan->rs_base.rs_rd = relation;
scan->rs_base.rs_snapshot = snapshot;
@ -1201,7 +1201,7 @@ heap_beginscan(Relation relation, Snapshot snapshot,
* when doing a parallel scan.
*/
if (parallel_scan != NULL)
scan->rs_parallelworkerdata = palloc(sizeof(ParallelBlockTableScanWorkerData));
scan->rs_parallelworkerdata = palloc_object(ParallelBlockTableScanWorkerData);
else
scan->rs_parallelworkerdata = NULL;
@ -1210,7 +1210,7 @@ heap_beginscan(Relation relation, Snapshot snapshot,
* initscan() and we don't want to allocate memory again
*/
if (nkeys > 0)
scan->rs_base.rs_key = (ScanKey) palloc(sizeof(ScanKeyData) * nkeys);
scan->rs_base.rs_key = palloc_array(ScanKeyData, nkeys);
else
scan->rs_base.rs_key = NULL;
@ -2037,7 +2037,7 @@ GetBulkInsertState(void)
{
BulkInsertState bistate;
bistate = (BulkInsertState) palloc(sizeof(BulkInsertStateData));
bistate = (BulkInsertState) palloc_object(BulkInsertStateData);
bistate->strategy = GetAccessStrategy(BAS_BULKWRITE);
bistate->current_buf = InvalidBuffer;
bistate->next_free = InvalidBlockNumber;
@ -6895,7 +6895,7 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask,
* even member XIDs >= OldestXmin often won't be kept by second pass.
*/
nnewmembers = 0;
newmembers = palloc(sizeof(MultiXactMember) * nmembers);
newmembers = palloc_array(MultiXactMember, nmembers);
has_lockers = false;
update_xid = InvalidTransactionId;
update_committed = false;
@ -8711,7 +8711,7 @@ bottomup_sort_and_shrink(TM_IndexDeleteOp *delstate)
Assert(delstate->ndeltids > 0);
/* Calculate per-heap-block count of TIDs */
blockgroups = palloc(sizeof(IndexDeleteCounts) * delstate->ndeltids);
blockgroups = palloc_array(IndexDeleteCounts, delstate->ndeltids);
for (int i = 0; i < delstate->ndeltids; i++)
{
TM_IndexDelete *ideltid = &delstate->deltids[i];

View file

@ -81,7 +81,7 @@ heapam_slot_callbacks(Relation relation)
static IndexFetchTableData *
heapam_index_fetch_begin(Relation rel)
{
IndexFetchHeapData *hscan = palloc0(sizeof(IndexFetchHeapData));
IndexFetchHeapData *hscan = palloc0_object(IndexFetchHeapData);
hscan->xs_base.rel = rel;
hscan->xs_cbuf = InvalidBuffer;
@ -717,8 +717,8 @@ heapam_relation_copy_for_cluster(Relation OldHeap, Relation NewHeap,
/* Preallocate values/isnull arrays */
natts = newTupDesc->natts;
values = (Datum *) palloc(natts * sizeof(Datum));
isnull = (bool *) palloc(natts * sizeof(bool));
values = palloc_array(Datum, natts);
isnull = palloc_array(bool, natts);
/* Initialize the rewrite operation */
rwstate = begin_heap_rewrite(OldHeap, NewHeap, OldestXmin, *xid_cutoff,

View file

@ -249,7 +249,7 @@ begin_heap_rewrite(Relation old_heap, Relation new_heap, TransactionId oldest_xm
old_cxt = MemoryContextSwitchTo(rw_cxt);
/* Create and fill in the state struct */
state = palloc0(sizeof(RewriteStateData));
state = palloc0_object(RewriteStateData);
state->rs_old_rel = old_heap;
state->rs_new_rel = new_heap;

View file

@ -685,7 +685,7 @@ heap_vacuum_rel(Relation rel, const VacuumParams params,
* of each rel. It's convenient for code in lazy_scan_heap to always use
* these temp copies.
*/
vacrel = (LVRelState *) palloc0(sizeof(LVRelState));
vacrel = palloc0_object(LVRelState);
vacrel->dbname = get_database_name(MyDatabaseId);
vacrel->relnamespace = get_namespace_name(RelationGetNamespace(rel));
vacrel->relname = pstrdup(RelationGetRelationName(rel));
@ -705,7 +705,7 @@ heap_vacuum_rel(Relation rel, const VacuumParams params,
if (instrument && vacrel->nindexes > 0)
{
/* Copy index names used by instrumentation (not error reporting) */
indnames = palloc(sizeof(char *) * vacrel->nindexes);
indnames = palloc_array(char *, vacrel->nindexes);
for (int i = 0; i < vacrel->nindexes; i++)
indnames[i] = pstrdup(RelationGetRelationName(vacrel->indrels[i]));
}
@ -3582,7 +3582,7 @@ dead_items_alloc(LVRelState *vacrel, int nworkers)
* locally.
*/
dead_items_info = (VacDeadItemsInfo *) palloc(sizeof(VacDeadItemsInfo));
dead_items_info = palloc_object(VacDeadItemsInfo);
dead_items_info->max_bytes = vac_work_mem * (Size) 1024;
dead_items_info->num_items = 0;
vacrel->dead_items_info = dead_items_info;

View file

@ -118,7 +118,7 @@ identify_opfamily_groups(CatCList *oprlist, CatCList *proclist)
}
/* Time for a new group */
thisgroup = (OpFamilyOpFuncGroup *) palloc(sizeof(OpFamilyOpFuncGroup));
thisgroup = palloc_object(OpFamilyOpFuncGroup);
if (oprform &&
(!procform ||
(oprform->amoplefttype < procform->amproclefttype ||

View file

@ -81,7 +81,7 @@ RelationGetIndexScan(Relation indexRelation, int nkeys, int norderbys)
{
IndexScanDesc scan;
scan = (IndexScanDesc) palloc(sizeof(IndexScanDescData));
scan = palloc_object(IndexScanDescData);
scan->heapRelation = NULL; /* may be set later */
scan->xs_heapfetch = NULL;
@ -94,11 +94,11 @@ RelationGetIndexScan(Relation indexRelation, int nkeys, int norderbys)
* We allocate key workspace here, but it won't get filled until amrescan.
*/
if (nkeys > 0)
scan->keyData = (ScanKey) palloc(sizeof(ScanKeyData) * nkeys);
scan->keyData = palloc_array(ScanKeyData, nkeys);
else
scan->keyData = NULL;
if (norderbys > 0)
scan->orderByData = (ScanKey) palloc(sizeof(ScanKeyData) * norderbys);
scan->orderByData = palloc_array(ScanKeyData, norderbys);
else
scan->orderByData = NULL;
@ -310,8 +310,8 @@ index_compute_xid_horizon_for_tuples(Relation irel,
delstate.bottomup = false;
delstate.bottomupfreespace = 0;
delstate.ndeltids = 0;
delstate.deltids = palloc(nitems * sizeof(TM_IndexDelete));
delstate.status = palloc(nitems * sizeof(TM_IndexStatus));
delstate.deltids = palloc_array(TM_IndexDelete, nitems);
delstate.status = palloc_array(TM_IndexStatus, nitems);
/* identify what the index tuples about to be deleted point to */
for (int i = 0; i < nitems; i++)
@ -401,7 +401,7 @@ systable_beginscan(Relation heapRelation,
else
irel = NULL;
sysscan = (SysScanDesc) palloc(sizeof(SysScanDescData));
sysscan = palloc_object(SysScanDescData);
sysscan->heap_rel = heapRelation;
sysscan->irel = irel;
@ -667,7 +667,7 @@ systable_beginscan_ordered(Relation heapRelation,
elog(WARNING, "using index \"%s\" despite IgnoreSystemIndexes",
RelationGetRelationName(indexRelation));
sysscan = (SysScanDesc) palloc(sizeof(SysScanDescData));
sysscan = palloc_object(SysScanDescData);
sysscan->heap_rel = heapRelation;
sysscan->irel = indexRelation;

View file

@ -82,7 +82,7 @@ _bt_dedup_pass(Relation rel, Buffer buf, IndexTuple newitem, Size newitemsz,
* That ought to leave us with a good split point when pages full of
* duplicates can be split several times.
*/
state = (BTDedupState) palloc(sizeof(BTDedupStateData));
state = palloc_object(BTDedupStateData);
state->deduplicate = true;
state->nmaxitems = 0;
state->maxpostingsize = Min(BTMaxItemSize / 2, INDEX_SIZE_MASK);
@ -321,7 +321,7 @@ _bt_bottomupdel_pass(Relation rel, Buffer buf, Relation heapRel,
newitemsz += sizeof(ItemIdData);
/* Initialize deduplication state */
state = (BTDedupState) palloc(sizeof(BTDedupStateData));
state = palloc_object(BTDedupStateData);
state->deduplicate = true;
state->nmaxitems = 0;
state->maxpostingsize = BLCKSZ; /* We're not really deduplicating */
@ -355,8 +355,8 @@ _bt_bottomupdel_pass(Relation rel, Buffer buf, Relation heapRel,
delstate.bottomup = true;
delstate.bottomupfreespace = Max(BLCKSZ / 16, newitemsz);
delstate.ndeltids = 0;
delstate.deltids = palloc(MaxTIDsPerBTreePage * sizeof(TM_IndexDelete));
delstate.status = palloc(MaxTIDsPerBTreePage * sizeof(TM_IndexStatus));
delstate.deltids = palloc_array(TM_IndexDelete, MaxTIDsPerBTreePage);
delstate.status = palloc_array(TM_IndexStatus, MaxTIDsPerBTreePage);
minoff = P_FIRSTDATAKEY(opaque);
maxoff = PageGetMaxOffsetNumber(page);

View file

@ -2963,7 +2963,7 @@ _bt_deadblocks(Page page, OffsetNumber *deletable, int ndeletable,
*/
spacentids = ndeletable + 1;
ntids = 0;
tidblocks = (BlockNumber *) palloc(sizeof(BlockNumber) * spacentids);
tidblocks = palloc_array(BlockNumber, spacentids);
/*
* First add the table block for the incoming newitem. This is the one

View file

@ -2982,7 +2982,7 @@ _bt_pendingfsm_init(Relation rel, BTVacState *vstate, bool cleanuponly)
vstate->maxbufsize = (int) maxbufsize;
/* Allocate buffer, indicate that there are currently 0 pending pages */
vstate->pendingpages = palloc(sizeof(BTPendingFSM) * vstate->bufsize);
vstate->pendingpages = palloc_array(BTPendingFSM, vstate->bufsize);
vstate->npendingpages = 0;
}

View file

@ -261,8 +261,7 @@ btgettuple(IndexScanDesc scan, ScanDirection dir)
* just forget any excess entries.
*/
if (so->killedItems == NULL)
so->killedItems = (int *)
palloc(MaxTIDsPerBTreePage * sizeof(int));
so->killedItems = palloc_array(int, MaxTIDsPerBTreePage);
if (so->numKilled < MaxTIDsPerBTreePage)
so->killedItems[so->numKilled++] = so->currPos.itemIndex;
}
@ -346,7 +345,7 @@ btbeginscan(Relation rel, int nkeys, int norderbys)
scan = RelationGetIndexScan(rel, nkeys, norderbys);
/* allocate private workspace */
so = (BTScanOpaque) palloc(sizeof(BTScanOpaqueData));
so = palloc_object(BTScanOpaqueData);
BTScanPosInvalidate(so->currPos);
BTScanPosInvalidate(so->markPos);
if (scan->numberOfKeys > 0)
@ -1140,7 +1139,7 @@ btbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
/* allocate stats if first time through, else re-use existing struct */
if (stats == NULL)
stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
stats = palloc0_object(IndexBulkDeleteResult);
/* Establish the vacuum cycle ID to use for this scan */
/* The ENSURE stuff ensures we clean up shared memory on failure */
@ -1201,7 +1200,7 @@ btvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
* We handle the problem by making num_index_tuples an estimate in
* cleanup-only case.
*/
stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
stats = palloc0_object(IndexBulkDeleteResult);
btvacuumscan(info, stats, NULL, NULL, 0);
stats->estimated_count = true;
}

View file

@ -159,7 +159,7 @@ _bt_search(Relation rel, Relation heaprel, BTScanInsert key, Buffer *bufP,
* page one level down, it usually ends up inserting a new pivot
* tuple/downlink immediately after the location recorded here.
*/
new_stack = (BTStack) palloc(sizeof(BTStackData));
new_stack = (BTStack) palloc_object(BTStackData);
new_stack->bts_blkno = BufferGetBlockNumber(*bufP);
new_stack->bts_offset = offnum;
new_stack->bts_parent = stack_in;

View file

@ -335,7 +335,7 @@ btbuild(Relation heap, Relation index, IndexInfo *indexInfo)
if (buildstate.btleader)
_bt_end_parallel(buildstate.btleader);
result = (IndexBuildResult *) palloc(sizeof(IndexBuildResult));
result = palloc_object(IndexBuildResult);
result->heap_tuples = reltuples;
result->index_tuples = buildstate.indtuples;
@ -366,7 +366,7 @@ static double
_bt_spools_heapscan(Relation heap, Relation index, BTBuildState *buildstate,
IndexInfo *indexInfo)
{
BTSpool *btspool = (BTSpool *) palloc0(sizeof(BTSpool));
BTSpool *btspool = palloc0_object(BTSpool);
SortCoordinate coordinate = NULL;
double reltuples = 0;
@ -399,7 +399,7 @@ _bt_spools_heapscan(Relation heap, Relation index, BTBuildState *buildstate,
*/
if (buildstate->btleader)
{
coordinate = (SortCoordinate) palloc0(sizeof(SortCoordinateData));
coordinate = palloc0_object(SortCoordinateData);
coordinate->isWorker = false;
coordinate->nParticipants =
buildstate->btleader->nparticipanttuplesorts;
@ -440,7 +440,7 @@ _bt_spools_heapscan(Relation heap, Relation index, BTBuildState *buildstate,
*/
if (indexInfo->ii_Unique)
{
BTSpool *btspool2 = (BTSpool *) palloc0(sizeof(BTSpool));
BTSpool *btspool2 = palloc0_object(BTSpool);
SortCoordinate coordinate2 = NULL;
/* Initialize secondary spool */
@ -457,7 +457,7 @@ _bt_spools_heapscan(Relation heap, Relation index, BTBuildState *buildstate,
* tuplesort_begin_index_btree() about the basic high level
* coordination of a parallel sort.
*/
coordinate2 = (SortCoordinate) palloc0(sizeof(SortCoordinateData));
coordinate2 = palloc0_object(SortCoordinateData);
coordinate2->isWorker = false;
coordinate2->nParticipants =
buildstate->btleader->nparticipanttuplesorts;
@ -648,7 +648,7 @@ _bt_blwritepage(BTWriteState *wstate, BulkWriteBuffer buf, BlockNumber blkno)
static BTPageState *
_bt_pagestate(BTWriteState *wstate, uint32 level)
{
BTPageState *state = (BTPageState *) palloc0(sizeof(BTPageState));
BTPageState *state = palloc0_object(BTPageState);
/* create initial page for level */
state->btps_buf = _bt_blnewpage(wstate, level);
@ -1002,7 +1002,7 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup,
if (last_off == P_HIKEY)
{
Assert(state->btps_lowkey == NULL);
state->btps_lowkey = palloc0(sizeof(IndexTupleData));
state->btps_lowkey = palloc0_object(IndexTupleData);
state->btps_lowkey->t_info = sizeof(IndexTupleData);
BTreeTupleSetNAtts(state->btps_lowkey, 0, false);
}
@ -1164,7 +1164,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2)
itup2 = tuplesort_getindextuple(btspool2->sortstate, true);
/* Prepare SortSupport data for each column */
sortKeys = (SortSupport) palloc0(keysz * sizeof(SortSupportData));
sortKeys = palloc0_array(SortSupportData, keysz);
for (i = 0; i < keysz; i++)
{
@ -1266,7 +1266,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2)
/* merge is unnecessary, deduplicate into posting lists */
BTDedupState dstate;
dstate = (BTDedupState) palloc(sizeof(BTDedupStateData));
dstate = palloc_object(BTDedupStateData);
dstate->deduplicate = true; /* unused */
dstate->nmaxitems = 0; /* unused */
dstate->maxpostingsize = 0; /* set later */
@ -1404,7 +1404,7 @@ _bt_begin_parallel(BTBuildState *buildstate, bool isconcurrent, int request)
Sharedsort *sharedsort;
Sharedsort *sharedsort2;
BTSpool *btspool = buildstate->spool;
BTLeader *btleader = (BTLeader *) palloc0(sizeof(BTLeader));
BTLeader *btleader = palloc0_object(BTLeader);
WalUsage *walusage;
BufferUsage *bufferusage;
bool leaderparticipates = true;
@ -1693,7 +1693,7 @@ _bt_leader_participate_as_worker(BTBuildState *buildstate)
int sortmem;
/* Allocate memory and initialize private spool */
leaderworker = (BTSpool *) palloc0(sizeof(BTSpool));
leaderworker = palloc0_object(BTSpool);
leaderworker->heap = buildstate->spool->heap;
leaderworker->index = buildstate->spool->index;
leaderworker->isunique = buildstate->spool->isunique;
@ -1705,7 +1705,7 @@ _bt_leader_participate_as_worker(BTBuildState *buildstate)
else
{
/* Allocate memory for worker's own private secondary spool */
leaderworker2 = (BTSpool *) palloc0(sizeof(BTSpool));
leaderworker2 = palloc0_object(BTSpool);
/* Initialize worker's own secondary spool */
leaderworker2->heap = leaderworker->heap;
@ -1796,7 +1796,7 @@ _bt_parallel_build_main(dsm_segment *seg, shm_toc *toc)
indexRel = index_open(btshared->indexrelid, indexLockmode);
/* Initialize worker's own spool */
btspool = (BTSpool *) palloc0(sizeof(BTSpool));
btspool = palloc0_object(BTSpool);
btspool->heap = heapRel;
btspool->index = indexRel;
btspool->isunique = btshared->isunique;
@ -1813,7 +1813,7 @@ _bt_parallel_build_main(dsm_segment *seg, shm_toc *toc)
else
{
/* Allocate memory for worker's own private secondary spool */
btspool2 = (BTSpool *) palloc0(sizeof(BTSpool));
btspool2 = palloc0_object(BTSpool);
/* Initialize worker's own secondary spool */
btspool2->heap = btspool->heap;
@ -1874,7 +1874,7 @@ _bt_parallel_scan_and_sort(BTSpool *btspool, BTSpool *btspool2,
IndexInfo *indexInfo;
/* Initialize local tuplesort coordination state */
coordinate = palloc0(sizeof(SortCoordinateData));
coordinate = palloc0_object(SortCoordinateData);
coordinate->isWorker = true;
coordinate->nParticipants = -1;
coordinate->sharedsort = sharedsort;
@ -1901,7 +1901,7 @@ _bt_parallel_scan_and_sort(BTSpool *btspool, BTSpool *btspool2,
* worker). Worker processes are generally permitted to allocate
* work_mem independently.
*/
coordinate2 = palloc0(sizeof(SortCoordinateData));
coordinate2 = palloc0_object(SortCoordinateData);
coordinate2->isWorker = true;
coordinate2->nParticipants = -1;
coordinate2->sharedsort = sharedsort2;

View file

@ -197,7 +197,7 @@ _bt_findsplitloc(Relation rel,
* between tuples will be legal).
*/
state.maxsplits = maxoff;
state.splits = palloc(sizeof(SplitPoint) * state.maxsplits);
state.splits = palloc_array(SplitPoint, state.maxsplits);
state.nsplits = 0;
/*

View file

@ -469,7 +469,7 @@ btree_xlog_dedup(XLogReaderState *record)
BTDedupInterval *intervals;
Page newpage;
state = (BTDedupState) palloc(sizeof(BTDedupStateData));
state = palloc_object(BTDedupStateData);
state->deduplicate = true; /* unused */
state->nmaxitems = 0; /* unused */
/* Conservatively use larger maxpostingsize than primary */

View file

@ -89,7 +89,7 @@ addNode(SpGistState *state, SpGistInnerTuple tuple, Datum label, int offset)
else if (offset > tuple->nNodes)
elog(ERROR, "invalid offset for adding node to SPGiST inner tuple");
nodes = palloc(sizeof(SpGistNodeTuple) * (tuple->nNodes + 1));
nodes = palloc_array(SpGistNodeTuple, tuple->nNodes + 1);
SGITITERATE(tuple, i, node)
{
if (i < offset)
@ -409,8 +409,8 @@ moveLeafs(Relation index, SpGistState *state,
/* Locate the tuples to be moved, and count up the space needed */
i = PageGetMaxOffsetNumber(current->page);
toDelete = (OffsetNumber *) palloc(sizeof(OffsetNumber) * i);
toInsert = (OffsetNumber *) palloc(sizeof(OffsetNumber) * (i + 1));
toDelete = palloc_array(OffsetNumber, i);
toInsert = palloc_array(OffsetNumber, i + 1);
size = newLeafTuple->size + sizeof(ItemIdData);
@ -634,7 +634,7 @@ checkAllTheSame(spgPickSplitIn *in, spgPickSplitOut *out, bool tooBig,
{
Datum theLabel = out->nodeLabels[theNode];
out->nodeLabels = (Datum *) palloc(sizeof(Datum) * out->nNodes);
out->nodeLabels = palloc_array(Datum, out->nNodes);
for (i = 0; i < out->nNodes; i++)
out->nodeLabels[i] = theLabel;
}
@ -717,12 +717,12 @@ doPickSplit(Relation index, SpGistState *state,
*/
max = PageGetMaxOffsetNumber(current->page);
n = max + 1;
in.datums = (Datum *) palloc(sizeof(Datum) * n);
toDelete = (OffsetNumber *) palloc(sizeof(OffsetNumber) * n);
toInsert = (OffsetNumber *) palloc(sizeof(OffsetNumber) * n);
oldLeafs = (SpGistLeafTuple *) palloc(sizeof(SpGistLeafTuple) * n);
newLeafs = (SpGistLeafTuple *) palloc(sizeof(SpGistLeafTuple) * n);
leafPageSelect = (uint8 *) palloc(sizeof(uint8) * n);
in.datums = palloc_array(Datum, n);
toDelete = palloc_array(OffsetNumber, n);
toInsert = palloc_array(OffsetNumber, n);
oldLeafs = palloc_array(SpGistLeafTuple, n);
newLeafs = palloc_array(SpGistLeafTuple, n);
leafPageSelect = palloc_array(uint8, n);
STORE_STATE(state, xlrec.stateSrc);
@ -858,7 +858,7 @@ doPickSplit(Relation index, SpGistState *state,
out.hasPrefix = false;
out.nNodes = 1;
out.nodeLabels = NULL;
out.mapTuplesToNodes = palloc0(sizeof(int) * in.nTuples);
out.mapTuplesToNodes = palloc0_array(int, in.nTuples);
/*
* Form new leaf tuples and count up the total space needed.
@ -914,8 +914,8 @@ doPickSplit(Relation index, SpGistState *state,
* out.nNodes with a value larger than the number of tuples on the input
* page, we can't allocate these arrays before here.
*/
nodes = (SpGistNodeTuple *) palloc(sizeof(SpGistNodeTuple) * out.nNodes);
leafSizes = (int *) palloc0(sizeof(int) * out.nNodes);
nodes = palloc_array(SpGistNodeTuple, out.nNodes);
leafSizes = palloc0_array(int, out.nNodes);
/*
* Form nodes of inner tuple and inner tuple itself
@ -1054,7 +1054,7 @@ doPickSplit(Relation index, SpGistState *state,
* do so, even if totalLeafSizes is less than the available space,
* because we can't split a group across pages.
*/
nodePageSelect = (uint8 *) palloc(sizeof(uint8) * out.nNodes);
nodePageSelect = palloc_array(uint8, out.nNodes);
curspace = currentFreeSpace;
newspace = PageGetExactFreeSpace(BufferGetPage(newLeafBuffer));
@ -1740,8 +1740,7 @@ spgSplitNodeAction(Relation index, SpGistState *state,
* Construct new prefix tuple with requested number of nodes. We'll fill
* in the childNodeN'th node's downlink below.
*/
nodes = (SpGistNodeTuple *) palloc(sizeof(SpGistNodeTuple) *
out->result.splitTuple.prefixNNodes);
nodes = palloc_array(SpGistNodeTuple, out->result.splitTuple.prefixNNodes);
for (i = 0; i < out->result.splitTuple.prefixNNodes; i++)
{
@ -1769,7 +1768,7 @@ spgSplitNodeAction(Relation index, SpGistState *state,
* same node datums, but with the prefix specified by the picksplit
* function.
*/
nodes = palloc(sizeof(SpGistNodeTuple) * innerTuple->nNodes);
nodes = palloc_array(SpGistNodeTuple, innerTuple->nNodes);
SGITITERATE(innerTuple, i, node)
{
nodes[i] = node;

View file

@ -140,7 +140,7 @@ spgbuild(Relation heap, Relation index, IndexInfo *indexInfo)
true);
}
result = (IndexBuildResult *) palloc0(sizeof(IndexBuildResult));
result = palloc0_object(IndexBuildResult);
result->heap_tuples = reltuples;
result->index_tuples = buildstate.indtuples;

View file

@ -114,7 +114,7 @@ spg_kd_picksplit(PG_FUNCTION_ARGS)
SortedPoint *sorted;
double coord;
sorted = palloc(sizeof(*sorted) * in->nTuples);
sorted = palloc_array(SortedPoint, in->nTuples);
for (i = 0; i < in->nTuples; i++)
{
sorted[i].p = DatumGetPointP(in->datums[i]);
@ -132,8 +132,8 @@ spg_kd_picksplit(PG_FUNCTION_ARGS)
out->nNodes = 2;
out->nodeLabels = NULL; /* we don't need node labels */
out->mapTuplesToNodes = palloc(sizeof(int) * in->nTuples);
out->leafTupleDatums = palloc(sizeof(Datum) * in->nTuples);
out->mapTuplesToNodes = palloc_array(int, in->nTuples);
out->leafTupleDatums = palloc_array(Datum, in->nTuples);
/*
* Note: points that have coordinates exactly equal to coord may get
@ -259,7 +259,7 @@ spg_kd_inner_consistent(PG_FUNCTION_ARGS)
if (!which)
PG_RETURN_VOID();
out->nodeNumbers = (int *) palloc(sizeof(int) * 2);
out->nodeNumbers = palloc_array(int, 2);
/*
* When ordering scan keys are specified, we've to calculate distance for
@ -273,8 +273,8 @@ spg_kd_inner_consistent(PG_FUNCTION_ARGS)
BOX infArea;
BOX *area;
out->distances = (double **) palloc(sizeof(double *) * in->nNodes);
out->traversalValues = (void **) palloc(sizeof(void *) * in->nNodes);
out->distances = palloc_array(double *, in->nNodes);
out->traversalValues = palloc_array(void *, in->nNodes);
if (in->level == 0)
{
@ -335,7 +335,7 @@ spg_kd_inner_consistent(PG_FUNCTION_ARGS)
}
/* Set up level increments, too */
out->levelAdds = (int *) palloc(sizeof(int) * 2);
out->levelAdds = palloc_array(int, 2);
out->levelAdds[0] = 1;
out->levelAdds[1] = 1;

View file

@ -64,7 +64,7 @@ spg_key_orderbys_distances(Datum key, bool isLeaf,
ScanKey orderbys, int norderbys)
{
int sk_num;
double *distances = (double *) palloc(norderbys * sizeof(double)),
double *distances = palloc_array(double, norderbys),
*distance = distances;
for (sk_num = 0; sk_num < norderbys; ++sk_num, ++orderbys, ++distance)
@ -81,7 +81,7 @@ spg_key_orderbys_distances(Datum key, bool isLeaf,
BOX *
box_copy(BOX *orig)
{
BOX *result = palloc(sizeof(BOX));
BOX *result = palloc_object(BOX);
*result = *orig;
return result;

View file

@ -82,7 +82,7 @@ getQuadrant(Point *centroid, Point *tst)
static BOX *
getQuadrantArea(BOX *bbox, Point *centroid, int quadrant)
{
BOX *result = (BOX *) palloc(sizeof(BOX));
BOX *result = palloc_object(BOX);
switch (quadrant)
{
@ -177,11 +177,11 @@ spg_quad_picksplit(PG_FUNCTION_ARGS)
/* Use the median values of x and y as the centroid point */
Point **sorted;
sorted = palloc(sizeof(*sorted) * in->nTuples);
sorted = palloc_array(Point *, in->nTuples);
for (i = 0; i < in->nTuples; i++)
sorted[i] = DatumGetPointP(in->datums[i]);
centroid = palloc(sizeof(*centroid));
centroid = palloc_object(Point);
qsort(sorted, in->nTuples, sizeof(*sorted), x_cmp);
centroid->x = sorted[in->nTuples >> 1]->x;
@ -189,7 +189,7 @@ spg_quad_picksplit(PG_FUNCTION_ARGS)
centroid->y = sorted[in->nTuples >> 1]->y;
#else
/* Use the average values of x and y as the centroid point */
centroid = palloc0(sizeof(*centroid));
centroid = palloc0_object(Point);
for (i = 0; i < in->nTuples; i++)
{
@ -207,8 +207,8 @@ spg_quad_picksplit(PG_FUNCTION_ARGS)
out->nNodes = 4;
out->nodeLabels = NULL; /* we don't need node labels */
out->mapTuplesToNodes = palloc(sizeof(int) * in->nTuples);
out->leafTupleDatums = palloc(sizeof(Datum) * in->nTuples);
out->mapTuplesToNodes = palloc_array(int, in->nTuples);
out->leafTupleDatums = palloc_array(Datum, in->nTuples);
for (i = 0; i < in->nTuples; i++)
{
@ -246,8 +246,8 @@ spg_quad_inner_consistent(PG_FUNCTION_ARGS)
*/
if (in->norderbys > 0)
{
out->distances = (double **) palloc(sizeof(double *) * in->nNodes);
out->traversalValues = (void **) palloc(sizeof(void *) * in->nNodes);
out->distances = palloc_array(double *, in->nNodes);
out->traversalValues = palloc_array(void *, in->nNodes);
if (in->level == 0)
{
@ -270,7 +270,7 @@ spg_quad_inner_consistent(PG_FUNCTION_ARGS)
{
/* Report that all nodes should be visited */
out->nNodes = in->nNodes;
out->nodeNumbers = (int *) palloc(sizeof(int) * in->nNodes);
out->nodeNumbers = palloc_array(int, in->nNodes);
for (i = 0; i < in->nNodes; i++)
{
out->nodeNumbers[i] = i;
@ -368,12 +368,12 @@ spg_quad_inner_consistent(PG_FUNCTION_ARGS)
break; /* no need to consider remaining conditions */
}
out->levelAdds = palloc(sizeof(int) * 4);
out->levelAdds = palloc_array(int, 4);
for (i = 0; i < 4; ++i)
out->levelAdds[i] = 1;
/* We must descend into the quadrant(s) identified by which */
out->nodeNumbers = (int *) palloc(sizeof(int) * 4);
out->nodeNumbers = palloc_array(int, 4);
out->nNodes = 0;
for (i = 1; i <= 4; i++)

View file

@ -309,9 +309,9 @@ spgbeginscan(Relation rel, int keysz, int orderbysz)
scan = RelationGetIndexScan(rel, keysz, orderbysz);
so = (SpGistScanOpaque) palloc0(sizeof(SpGistScanOpaqueData));
so = palloc0_object(SpGistScanOpaqueData);
if (keysz > 0)
so->keyData = (ScanKey) palloc(sizeof(ScanKeyData) * keysz);
so->keyData = palloc_array(ScanKeyData, keysz);
else
so->keyData = NULL;
initSpGistState(&so->state, scan->indexRelation);
@ -336,16 +336,12 @@ spgbeginscan(Relation rel, int keysz, int orderbysz)
if (scan->numberOfOrderBys > 0)
{
/* This will be filled in spgrescan, but allocate the space here */
so->orderByTypes = (Oid *)
palloc(sizeof(Oid) * scan->numberOfOrderBys);
so->nonNullOrderByOffsets = (int *)
palloc(sizeof(int) * scan->numberOfOrderBys);
so->orderByTypes = palloc_array(Oid, scan->numberOfOrderBys);
so->nonNullOrderByOffsets = palloc_array(int, scan->numberOfOrderBys);
/* These arrays have constant contents, so we can fill them now */
so->zeroDistances = (double *)
palloc(sizeof(double) * scan->numberOfOrderBys);
so->infDistances = (double *)
palloc(sizeof(double) * scan->numberOfOrderBys);
so->zeroDistances = palloc_array(double, scan->numberOfOrderBys);
so->infDistances = palloc_array(double, scan->numberOfOrderBys);
for (i = 0; i < scan->numberOfOrderBys; i++)
{
@ -353,10 +349,8 @@ spgbeginscan(Relation rel, int keysz, int orderbysz)
so->infDistances[i] = get_float8_infinity();
}
scan->xs_orderbyvals = (Datum *)
palloc0(sizeof(Datum) * scan->numberOfOrderBys);
scan->xs_orderbynulls = (bool *)
palloc(sizeof(bool) * scan->numberOfOrderBys);
scan->xs_orderbyvals = palloc0_array(Datum, scan->numberOfOrderBys);
scan->xs_orderbynulls = palloc_array(bool, scan->numberOfOrderBys);
memset(scan->xs_orderbynulls, true,
sizeof(bool) * scan->numberOfOrderBys);
}
@ -690,7 +684,7 @@ spgInnerTest(SpGistScanOpaque so, SpGistSearchItem *item,
{
/* force all children to be visited */
out.nNodes = nNodes;
out.nodeNumbers = (int *) palloc(sizeof(int) * nNodes);
out.nodeNumbers = palloc_array(int, nNodes);
for (i = 0; i < nNodes; i++)
out.nodeNumbers[i] = i;
}
@ -703,7 +697,7 @@ spgInnerTest(SpGistScanOpaque so, SpGistSearchItem *item,
{
/* collect node pointers */
SpGistNodeTuple node;
SpGistNodeTuple *nodes = (SpGistNodeTuple *) palloc(sizeof(SpGistNodeTuple) * nNodes);
SpGistNodeTuple *nodes = palloc_array(SpGistNodeTuple, nNodes);
SGITITERATE(innerTuple, i, node)
{
@ -972,8 +966,8 @@ storeGettuple(SpGistScanOpaque so, ItemPointer heapPtr,
so->distances[so->nPtrs] = NULL;
else
{
IndexOrderByDistance *distances =
palloc(sizeof(distances[0]) * so->numberOfOrderBys);
IndexOrderByDistance *distances = palloc_array(IndexOrderByDistance,
so->numberOfOrderBys);
int i;
for (i = 0; i < so->numberOfOrderBys; i++)

View file

@ -230,8 +230,7 @@ spg_text_choose(PG_FUNCTION_ARGS)
formTextDatum(prefixStr, commonLen);
}
out->result.splitTuple.prefixNNodes = 1;
out->result.splitTuple.prefixNodeLabels =
(Datum *) palloc(sizeof(Datum));
out->result.splitTuple.prefixNodeLabels = palloc_object(Datum);
out->result.splitTuple.prefixNodeLabels[0] =
Int16GetDatum(*(unsigned char *) (prefixStr + commonLen));
@ -303,7 +302,7 @@ spg_text_choose(PG_FUNCTION_ARGS)
out->result.splitTuple.prefixHasPrefix = in->hasPrefix;
out->result.splitTuple.prefixPrefixDatum = in->prefixDatum;
out->result.splitTuple.prefixNNodes = 1;
out->result.splitTuple.prefixNodeLabels = (Datum *) palloc(sizeof(Datum));
out->result.splitTuple.prefixNodeLabels = palloc_object(Datum);
out->result.splitTuple.prefixNodeLabels[0] = Int16GetDatum(-2);
out->result.splitTuple.childNodeN = 0;
out->result.splitTuple.postfixHasPrefix = false;
@ -371,7 +370,7 @@ spg_text_picksplit(PG_FUNCTION_ARGS)
}
/* Extract the node label (first non-common byte) from each value */
nodes = (spgNodePtr *) palloc(sizeof(spgNodePtr) * in->nTuples);
nodes = palloc_array(spgNodePtr, in->nTuples);
for (i = 0; i < in->nTuples; i++)
{
@ -394,9 +393,9 @@ spg_text_picksplit(PG_FUNCTION_ARGS)
/* And emit results */
out->nNodes = 0;
out->nodeLabels = (Datum *) palloc(sizeof(Datum) * in->nTuples);
out->mapTuplesToNodes = (int *) palloc(sizeof(int) * in->nTuples);
out->leafTupleDatums = (Datum *) palloc(sizeof(Datum) * in->nTuples);
out->nodeLabels = palloc_array(Datum, in->nTuples);
out->mapTuplesToNodes = palloc_array(int, in->nTuples);
out->leafTupleDatums = palloc_array(Datum, in->nTuples);
for (i = 0; i < in->nTuples; i++)
{
@ -476,9 +475,9 @@ spg_text_inner_consistent(PG_FUNCTION_ARGS)
* and see if it's consistent with the query. If so, emit an entry into
* the output arrays.
*/
out->nodeNumbers = (int *) palloc(sizeof(int) * in->nNodes);
out->levelAdds = (int *) palloc(sizeof(int) * in->nNodes);
out->reconstructedValues = (Datum *) palloc(sizeof(Datum) * in->nNodes);
out->nodeNumbers = palloc_array(int, in->nNodes);
out->levelAdds = palloc_array(int, in->nNodes);
out->reconstructedValues = palloc_array(Datum, in->nNodes);
out->nNodes = 0;
for (i = 0; i < in->nNodes; i++)

View file

@ -1177,7 +1177,7 @@ spgExtractNodeLabels(SpGistState *state, SpGistInnerTuple innerTuple)
}
else
{
nodeLabels = (Datum *) palloc(sizeof(Datum) * innerTuple->nNodes);
nodeLabels = palloc_array(Datum, innerTuple->nNodes);
SGITITERATE(innerTuple, i, node)
{
if (IndexTupleHasNulls(node))

View file

@ -76,7 +76,7 @@ spgAddPendingTID(spgBulkDeleteState *bds, const ItemPointerData *tid)
listLink = &pitem->next;
}
/* not there, so append new entry */
pitem = (spgVacPendingItem *) palloc(sizeof(spgVacPendingItem));
pitem = palloc_object(spgVacPendingItem);
pitem->tid = *tid;
pitem->done = false;
pitem->next = NULL;
@ -954,7 +954,7 @@ spgbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
/* allocate stats if first time through, else re-use existing struct */
if (stats == NULL)
stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
stats = palloc0_object(IndexBulkDeleteResult);
bds.info = info;
bds.stats = stats;
bds.callback = callback;
@ -994,7 +994,7 @@ spgvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
*/
if (stats == NULL)
{
stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
stats = palloc0_object(IndexBulkDeleteResult);
bds.info = info;
bds.stats = stats;
bds.callback = dummy_callback;

View file

@ -901,7 +901,7 @@ spgRedoVacuumRedirect(XLogReaderState *record)
int max = PageGetMaxOffsetNumber(page);
OffsetNumber *toDelete;
toDelete = palloc(sizeof(OffsetNumber) * max);
toDelete = palloc_array(OffsetNumber, max);
for (i = xldata->firstPlaceholder; i <= max; i++)
toDelete[i - xldata->firstPlaceholder] = i;

View file

@ -129,7 +129,7 @@ system_samplescangetsamplesize(PlannerInfo *root,
static void
system_initsamplescan(SampleScanState *node, int eflags)
{
node->tsm_state = palloc0(sizeof(SystemSamplerData));
node->tsm_state = palloc0_object(SystemSamplerData);
}
/*

View file

@ -419,8 +419,7 @@ MultiXactIdExpand(MultiXactId multi, TransactionId xid, MultiXactStatus status)
* Note we have the same race condition here as above: j could be 0 at the
* end of the loop.
*/
newMembers = (MultiXactMember *)
palloc(sizeof(MultiXactMember) * (nmembers + 1));
newMembers = palloc_array(MultiXactMember, nmembers + 1);
for (i = 0, j = 0; i < nmembers; i++)
{

View file

@ -186,7 +186,7 @@ CreateParallelContext(const char *library_name, const char *function_name,
oldcontext = MemoryContextSwitchTo(TopTransactionContext);
/* Initialize a new ParallelContext. */
pcxt = palloc0(sizeof(ParallelContext));
pcxt = palloc0_object(ParallelContext);
pcxt->subid = GetCurrentSubTransactionId();
pcxt->nworkers = nworkers;
pcxt->nworkers_to_launch = nworkers;
@ -453,7 +453,7 @@ InitializeParallelDSM(ParallelContext *pcxt)
clientconninfospace);
/* Allocate space for worker information. */
pcxt->worker = palloc0(sizeof(ParallelWorkerInfo) * pcxt->nworkers);
pcxt->worker = palloc0_array(ParallelWorkerInfo, pcxt->nworkers);
/*
* Establish error queues in dynamic shared memory.
@ -648,8 +648,7 @@ LaunchParallelWorkers(ParallelContext *pcxt)
*/
if (pcxt->nworkers_launched > 0)
{
pcxt->known_attached_workers =
palloc0(sizeof(bool) * pcxt->nworkers_launched);
pcxt->known_attached_workers = palloc0_array(bool, pcxt->nworkers_launched);
pcxt->nknown_attached_workers = 0;
}

View file

@ -87,7 +87,7 @@ readTimeLineHistory(TimeLineID targetTLI)
/* Timeline 1 does not have a history file, so no need to check */
if (targetTLI == 1)
{
entry = (TimeLineHistoryEntry *) palloc(sizeof(TimeLineHistoryEntry));
entry = palloc_object(TimeLineHistoryEntry);
entry->tli = targetTLI;
entry->begin = entry->end = InvalidXLogRecPtr;
return list_make1(entry);
@ -110,7 +110,7 @@ readTimeLineHistory(TimeLineID targetTLI)
(errcode_for_file_access(),
errmsg("could not open file \"%s\": %m", path)));
/* Not there, so assume no parents */
entry = (TimeLineHistoryEntry *) palloc(sizeof(TimeLineHistoryEntry));
entry = palloc_object(TimeLineHistoryEntry);
entry->tli = targetTLI;
entry->begin = entry->end = InvalidXLogRecPtr;
return list_make1(entry);
@ -175,7 +175,7 @@ readTimeLineHistory(TimeLineID targetTLI)
lasttli = tli;
entry = (TimeLineHistoryEntry *) palloc(sizeof(TimeLineHistoryEntry));
entry = palloc_object(TimeLineHistoryEntry);
entry->tli = tli;
entry->begin = prevend;
entry->end = ((uint64) (switchpoint_hi)) << 32 | (uint64) switchpoint_lo;
@ -198,7 +198,7 @@ readTimeLineHistory(TimeLineID targetTLI)
* Create one more entry for the "tip" of the timeline, which has no entry
* in the history file.
*/
entry = (TimeLineHistoryEntry *) palloc(sizeof(TimeLineHistoryEntry));
entry = palloc_object(TimeLineHistoryEntry);
entry->tli = targetTLI;
entry->begin = prevend;
entry->end = InvalidXLogRecPtr;

View file

@ -684,7 +684,7 @@ GetPreparedTransactionList(GlobalTransaction *gxacts)
}
num = TwoPhaseState->numPrepXacts;
array = (GlobalTransaction) palloc(sizeof(GlobalTransactionData) * num);
array = palloc_array(GlobalTransactionData, num);
*gxacts = array;
for (i = 0; i < num; i++)
memcpy(array + i, TwoPhaseState->prepXacts[i],
@ -750,7 +750,7 @@ pg_prepared_xact(PG_FUNCTION_ARGS)
* Collect all the 2PC status information that we will format and send
* out as a result set.
*/
status = (Working_State *) palloc(sizeof(Working_State));
status = palloc_object(Working_State);
funcctx->user_fctx = status;
status->ngxacts = GetPreparedTransactionList(&status->array);
@ -1027,7 +1027,7 @@ save_state_data(const void *data, uint32 len)
if (padlen > records.bytes_free)
{
records.tail->next = palloc0(sizeof(StateFileChunk));
records.tail->next = palloc0_object(StateFileChunk);
records.tail = records.tail->next;
records.tail->len = 0;
records.tail->next = NULL;
@ -1062,7 +1062,7 @@ StartPrepare(GlobalTransaction gxact)
SharedInvalidationMessage *invalmsgs;
/* Initialize linked list */
records.head = palloc0(sizeof(StateFileChunk));
records.head = palloc0_object(StateFileChunk);
records.head->len = 0;
records.head->next = NULL;
@ -1453,7 +1453,7 @@ XlogReadTwoPhaseData(XLogRecPtr lsn, char **buf, int *len)
if (len != NULL)
*len = XLogRecGetDataLen(xlogreader);
*buf = palloc(sizeof(char) * XLogRecGetDataLen(xlogreader));
*buf = palloc_array(char, XLogRecGetDataLen(xlogreader));
memcpy(*buf, XLogRecGetData(xlogreader), sizeof(char) * XLogRecGetDataLen(xlogreader));
XLogReaderFree(xlogreader);

View file

@ -664,7 +664,7 @@ AssignTransactionId(TransactionState s)
TransactionState *parents;
size_t parentOffset = 0;
parents = palloc(sizeof(TransactionState) * s->nestingLevel);
parents = palloc_array(TransactionState, s->nestingLevel);
while (p != NULL && !FullTransactionIdIsValid(p->fullTransactionId))
{
parents[parentOffset++] = p;

View file

@ -4904,7 +4904,7 @@ void
LocalProcessControlFile(bool reset)
{
Assert(reset || ControlFile == NULL);
ControlFile = palloc(sizeof(ControlFileData));
ControlFile = palloc_object(ControlFileData);
ReadControlFile();
}
@ -9133,7 +9133,7 @@ do_pg_backup_start(const char *backupidstr, bool fast, List **tablespaces,
continue;
}
ti = palloc(sizeof(tablespaceinfo));
ti = palloc_object(tablespaceinfo);
ti->oid = tsoid;
ti->path = pstrdup(linkpath);
ti->rpath = relpath;

View file

@ -90,7 +90,7 @@ pg_backup_start(PG_FUNCTION_ARGS)
}
oldcontext = MemoryContextSwitchTo(backupcontext);
backup_state = (BackupState *) palloc0(sizeof(BackupState));
backup_state = palloc0_object(BackupState);
tablespace_map = makeStringInfo();
MemoryContextSwitchTo(oldcontext);

View file

@ -364,7 +364,7 @@ XLogPrefetcherAllocate(XLogReaderState *reader)
XLogPrefetcher *prefetcher;
HASHCTL ctl;
prefetcher = palloc0(sizeof(XLogPrefetcher));
prefetcher = palloc0_object(XLogPrefetcher);
prefetcher->reader = reader;
ctl.keysize = sizeof(RelFileLocator);

View file

@ -559,7 +559,7 @@ InitWalRecovery(ControlFileData *ControlFile, bool *wasShutdown_ptr,
* Set the WAL reading processor now, as it will be needed when reading
* the checkpoint record required (backup_label or not).
*/
private = palloc0(sizeof(XLogPageReadPrivate));
private = palloc0_object(XLogPageReadPrivate);
xlogreader =
XLogReaderAllocate(wal_segment_size, NULL,
XL_ROUTINE(.page_read = &XLogPageRead,
@ -1416,7 +1416,7 @@ read_tablespace_map(List **tablespaces)
errmsg("invalid data in file \"%s\"", TABLESPACE_MAP)));
str[n++] = '\0';
ti = palloc0(sizeof(tablespaceinfo));
ti = palloc0_object(tablespaceinfo);
errno = 0;
ti->oid = strtoul(str, &endp, 10);
if (*endp != '\0' || errno == EINVAL || errno == ERANGE)
@ -1467,7 +1467,7 @@ read_tablespace_map(List **tablespaces)
EndOfWalRecoveryInfo *
FinishWalRecovery(void)
{
EndOfWalRecoveryInfo *result = palloc(sizeof(EndOfWalRecoveryInfo));
EndOfWalRecoveryInfo *result = palloc_object(EndOfWalRecoveryInfo);
XLogRecPtr lastRec;
TimeLineID lastRecTLI;
XLogRecPtr endOfLog;

View file

@ -574,7 +574,7 @@ CreateFakeRelcacheEntry(RelFileLocator rlocator)
Relation rel;
/* Allocate the Relation struct and all related space in one block. */
fakeentry = palloc0(sizeof(FakeRelCacheEntryData));
fakeentry = palloc0_object(FakeRelCacheEntryData);
rel = (Relation) fakeentry;
rel->rd_rel = &fakeentry->pgc;

View file

@ -262,7 +262,7 @@ perform_base_backup(basebackup_options *opt, bbsink *sink,
total_checksum_failures = 0;
/* Allocate backup related variables. */
backup_state = (BackupState *) palloc0(sizeof(BackupState));
backup_state = palloc0_object(BackupState);
initStringInfo(&tablespace_map);
basebackup_progress_wait_checkpoint();
@ -289,7 +289,7 @@ perform_base_backup(basebackup_options *opt, bbsink *sink,
PrepareForIncrementalBackup(ib, backup_state);
/* Add a node for the base directory at the end */
newti = palloc0(sizeof(tablespaceinfo));
newti = palloc0_object(tablespaceinfo);
newti->size = -1;
state.tablespaces = lappend(state.tablespaces, newti);
@ -1206,7 +1206,7 @@ sendDir(bbsink *sink, const char *path, int basepathlen, bool sizeonly,
* But we don't need it at all if this is not an incremental backup.
*/
if (ib != NULL)
relative_block_numbers = palloc(sizeof(BlockNumber) * RELSEG_SIZE);
relative_block_numbers = palloc_array(BlockNumber, RELSEG_SIZE);
/*
* Determine if the current path is a database directory that can contain

View file

@ -107,7 +107,7 @@ static const bbsink_ops bbsink_copystream_ops = {
bbsink *
bbsink_copystream_new(bool send_to_client)
{
bbsink_copystream *sink = palloc0(sizeof(bbsink_copystream));
bbsink_copystream *sink = palloc0_object(bbsink_copystream);
*((const bbsink_ops **) &sink->base.bbs_ops) = &bbsink_copystream_ops;
sink->send_to_client = send_to_client;

View file

@ -76,7 +76,7 @@ bbsink_gzip_new(bbsink *next, pg_compress_specification *compress)
Assert((compresslevel >= 1 && compresslevel <= 9) ||
compresslevel == Z_DEFAULT_COMPRESSION);
sink = palloc0(sizeof(bbsink_gzip));
sink = palloc0_object(bbsink_gzip);
*((const bbsink_ops **) &sink->base.bbs_ops) = &bbsink_gzip_ops;
sink->base.bbs_next = next;
sink->compresslevel = compresslevel;

View file

@ -157,7 +157,7 @@ CreateIncrementalBackupInfo(MemoryContext mcxt)
oldcontext = MemoryContextSwitchTo(mcxt);
ib = palloc0(sizeof(IncrementalBackupInfo));
ib = palloc0_object(IncrementalBackupInfo);
ib->mcxt = mcxt;
initStringInfo(&ib->buf);
@ -169,7 +169,7 @@ CreateIncrementalBackupInfo(MemoryContext mcxt)
*/
ib->manifest_files = backup_file_create(mcxt, 10000, NULL);
context = palloc0(sizeof(JsonManifestParseContext));
context = palloc0_object(JsonManifestParseContext);
/* Parse the manifest. */
context->private_data = ib;
context->version_cb = manifest_process_version;
@ -993,7 +993,7 @@ manifest_process_wal_range(JsonManifestParseContext *context,
XLogRecPtr end_lsn)
{
IncrementalBackupInfo *ib = context->private_data;
backup_wal_range *range = palloc(sizeof(backup_wal_range));
backup_wal_range *range = palloc_object(backup_wal_range);
range->tli = tli;
range->start_lsn = start_lsn;

View file

@ -75,7 +75,7 @@ bbsink_lz4_new(bbsink *next, pg_compress_specification *compress)
compresslevel = compress->level;
Assert(compresslevel >= 0 && compresslevel <= 12);
sink = palloc0(sizeof(bbsink_lz4));
sink = palloc0_object(bbsink_lz4);
*((const bbsink_ops **) &sink->base.bbs_ops) = &bbsink_lz4_ops;
sink->base.bbs_next = next;
sink->compresslevel = compresslevel;

View file

@ -62,7 +62,7 @@ bbsink_progress_new(bbsink *next, bool estimate_backup_size, bool incremental)
Assert(next != NULL);
sink = palloc0(sizeof(bbsink));
sink = palloc0_object(bbsink);
*((const bbsink_ops **) &sink->bbs_ops) = &bbsink_progress_ops;
sink->bbs_next = next;

View file

@ -59,7 +59,7 @@ static const bbsink_ops bbsink_server_ops = {
bbsink *
bbsink_server_new(bbsink *next, char *pathname)
{
bbsink_server *sink = palloc0(sizeof(bbsink_server));
bbsink_server *sink = palloc0_object(bbsink_server);
*((const bbsink_ops **) &sink->base.bbs_ops) = &bbsink_server_ops;
sink->pathname = pathname;

View file

@ -96,7 +96,7 @@ BaseBackupAddTarget(char *name,
* name into a newly-allocated chunk of memory.
*/
oldcontext = MemoryContextSwitchTo(TopMemoryContext);
newtype = palloc(sizeof(BaseBackupTargetType));
newtype = palloc_object(BaseBackupTargetType);
newtype->name = pstrdup(name);
newtype->check_detail = check_detail;
newtype->get_sink = get_sink;
@ -132,7 +132,7 @@ BaseBackupGetTargetHandle(char *target, char *target_detail)
BaseBackupTargetHandle *handle;
/* Found the target. */
handle = palloc(sizeof(BaseBackupTargetHandle));
handle = palloc_object(BaseBackupTargetHandle);
handle->type = ttype;
handle->detail_arg = ttype->check_detail(target, target_detail);

View file

@ -72,7 +72,7 @@ bbsink_throttle_new(bbsink *next, uint32 maxrate)
Assert(next != NULL);
Assert(maxrate > 0);
sink = palloc0(sizeof(bbsink_throttle));
sink = palloc0_object(bbsink_throttle);
*((const bbsink_ops **) &sink->base.bbs_ops) = &bbsink_throttle_ops;
sink->base.bbs_next = next;

View file

@ -70,7 +70,7 @@ bbsink_zstd_new(bbsink *next, pg_compress_specification *compress)
Assert(next != NULL);
sink = palloc0(sizeof(bbsink_zstd));
sink = palloc0_object(bbsink_zstd);
*((const bbsink_ops **) &sink->base.bbs_ops) = &bbsink_zstd_ops;
sink->base.bbs_next = next;
sink->compress = compress;

View file

@ -73,7 +73,7 @@ GetWalSummaries(TimeLineID tli, XLogRecPtr start_lsn, XLogRecPtr end_lsn)
continue;
/* Add it to the list. */
ws = palloc(sizeof(WalSummaryFile));
ws = palloc_object(WalSummaryFile);
ws->tli = file_tli;
ws->start_lsn = file_start_lsn;
ws->end_lsn = file_end_lsn;

View file

@ -742,7 +742,7 @@ populate_typ_list(void)
Form_pg_type typForm = (Form_pg_type) GETSTRUCT(tup);
struct typmap *newtyp;
newtyp = (struct typmap *) palloc(sizeof(struct typmap));
newtyp = palloc_object(struct typmap);
Typ = lappend(Typ, newtyp);
newtyp->am_oid = typForm->oid;
@ -951,10 +951,10 @@ index_register(Oid heap,
oldcxt = MemoryContextSwitchTo(nogc);
newind = (IndexList *) palloc(sizeof(IndexList));
newind = palloc_object(IndexList);
newind->il_heap = heap;
newind->il_ind = ind;
newind->il_info = (IndexInfo *) palloc(sizeof(IndexInfo));
newind->il_info = palloc_object(IndexInfo);
memcpy(newind->il_info, indexInfo, sizeof(IndexInfo));
/* expressions will likely be null, but may as well copy it */

View file

@ -801,8 +801,7 @@ findDependentObjects(const ObjectAddress *object,
* regression testing.)
*/
maxDependentObjects = 128; /* arbitrary initial allocation */
dependentObjects = (ObjectAddressAndFlags *)
palloc(maxDependentObjects * sizeof(ObjectAddressAndFlags));
dependentObjects = palloc_array(ObjectAddressAndFlags, maxDependentObjects);
numDependentObjects = 0;
ScanKeyInit(&key[0],
@ -2616,12 +2615,11 @@ new_object_addresses(void)
{
ObjectAddresses *addrs;
addrs = palloc(sizeof(ObjectAddresses));
addrs = palloc_object(ObjectAddresses);
addrs->numrefs = 0;
addrs->maxrefs = 32;
addrs->refs = (ObjectAddress *)
palloc(addrs->maxrefs * sizeof(ObjectAddress));
addrs->refs = palloc_array(ObjectAddress, addrs->maxrefs);
addrs->extras = NULL; /* until/unless needed */
return addrs;

View file

@ -732,7 +732,7 @@ InsertPgAttributeTuples(Relation pg_attribute_rel,
/* Initialize the number of slots to use */
nslots = Min(tupdesc->natts,
(MAX_CATALOG_MULTI_INSERT_BYTES / sizeof(FormData_pg_attribute)));
slot = palloc(sizeof(TupleTableSlot *) * nslots);
slot = palloc_array(TupleTableSlot *, nslots);
for (int i = 0; i < nslots; i++)
slot[i] = MakeSingleTupleTableSlot(td, &TTSOpsHeapTuple);
@ -2459,7 +2459,7 @@ AddRelationNewConstraints(Relation rel,
defOid = StoreAttrDefault(rel, colDef->attnum, expr, is_internal);
cooked = (CookedConstraint *) palloc(sizeof(CookedConstraint));
cooked = palloc_object(CookedConstraint);
cooked->contype = CONSTR_DEFAULT;
cooked->conoid = defOid;
cooked->name = NULL;
@ -2593,7 +2593,7 @@ AddRelationNewConstraints(Relation rel,
numchecks++;
cooked = (CookedConstraint *) palloc(sizeof(CookedConstraint));
cooked = palloc_object(CookedConstraint);
cooked->contype = CONSTR_CHECK;
cooked->conoid = constrOid;
cooked->name = ccname;
@ -2669,7 +2669,7 @@ AddRelationNewConstraints(Relation rel,
inhcount,
cdef->is_no_inherit);
nncooked = (CookedConstraint *) palloc(sizeof(CookedConstraint));
nncooked = palloc_object(CookedConstraint);
nncooked->contype = CONSTR_NOTNULL;
nncooked->conoid = constrOid;
nncooked->name = nnname;

View file

@ -1414,7 +1414,7 @@ index_concurrently_create_copy(Relation heapRelation, Oid oldIndexId,
}
/* Extract opclass options for each attribute */
opclassOptions = palloc0(sizeof(Datum) * newInfo->ii_NumIndexAttrs);
opclassOptions = palloc0_array(Datum, newInfo->ii_NumIndexAttrs);
for (int i = 0; i < newInfo->ii_NumIndexAttrs; i++)
opclassOptions[i] = get_attoptions(oldIndexId, i + 1);
@ -2678,9 +2678,9 @@ BuildSpeculativeIndexInfo(Relation index, IndexInfo *ii)
*/
Assert(ii->ii_Unique);
ii->ii_UniqueOps = (Oid *) palloc(sizeof(Oid) * indnkeyatts);
ii->ii_UniqueProcs = (Oid *) palloc(sizeof(Oid) * indnkeyatts);
ii->ii_UniqueStrats = (uint16 *) palloc(sizeof(uint16) * indnkeyatts);
ii->ii_UniqueOps = palloc_array(Oid, indnkeyatts);
ii->ii_UniqueProcs = palloc_array(Oid, indnkeyatts);
ii->ii_UniqueStrats = palloc_array(uint16, indnkeyatts);
/*
* We have to look up the operator's strategy number. This provides a

View file

@ -3929,7 +3929,7 @@ GetSearchPathMatcher(MemoryContext context)
oldcxt = MemoryContextSwitchTo(context);
result = (SearchPathMatcher *) palloc0(sizeof(SearchPathMatcher));
result = palloc0_object(SearchPathMatcher);
schemas = list_copy(activeSearchPath);
while (schemas && linitial_oid(schemas) != activeCreationNamespace)
{
@ -3960,7 +3960,7 @@ CopySearchPathMatcher(SearchPathMatcher *path)
{
SearchPathMatcher *result;
result = (SearchPathMatcher *) palloc(sizeof(SearchPathMatcher));
result = palloc_object(SearchPathMatcher);
result->schemas = list_copy(path->schemas);
result->addCatalog = path->addCatalog;
result->addTemp = path->addTemp;

View file

@ -6144,8 +6144,8 @@ strlist_to_textarray(List *list)
ALLOCSET_DEFAULT_SIZES);
oldcxt = MemoryContextSwitchTo(memcxt);
datums = (Datum *) palloc(sizeof(Datum) * list_length(list));
nulls = palloc(sizeof(bool) * list_length(list));
datums = palloc_array(Datum, list_length(list));
nulls = palloc_array(bool, list_length(list));
foreach(cell, list)
{

View file

@ -846,7 +846,7 @@ RelationGetNotNullConstraints(Oid relid, bool cooked, bool include_noinh)
{
CookedConstraint *cooked;
cooked = (CookedConstraint *) palloc(sizeof(CookedConstraint));
cooked = palloc_object(CookedConstraint);
cooked->contype = CONSTR_NOTNULL;
cooked->conoid = conForm->oid;

View file

@ -88,7 +88,7 @@ recordMultipleDependencies(const ObjectAddress *depender,
*/
max_slots = Min(nreferenced,
MAX_CATALOG_MULTI_INSERT_BYTES / sizeof(FormData_pg_depend));
slot = palloc(sizeof(TupleTableSlot *) * max_slots);
slot = palloc_array(TupleTableSlot *, max_slots);
/* Don't open indexes unless we need to make an update */
indstate = NULL;

View file

@ -120,7 +120,7 @@ EnumValuesCreate(Oid enumTypeOid, List *vals)
* allocating the next), trouble could only occur if the OID counter wraps
* all the way around before we finish. Which seems unlikely.
*/
oids = (Oid *) palloc(num_elems * sizeof(Oid));
oids = palloc_array(Oid, num_elems);
for (elemno = 0; elemno < num_elems; elemno++)
{
@ -148,7 +148,7 @@ EnumValuesCreate(Oid enumTypeOid, List *vals)
/* allocate the slots to use and initialize them */
nslots = Min(num_elems,
MAX_CATALOG_MULTI_INSERT_BYTES / sizeof(FormData_pg_enum));
slot = palloc(sizeof(TupleTableSlot *) * nslots);
slot = palloc_array(TupleTableSlot *, nslots);
for (int i = 0; i < nslots; i++)
slot[i] = MakeSingleTupleTableSlot(RelationGetDescr(pg_enum),
&TTSOpsHeapTuple);
@ -375,7 +375,7 @@ restart:
nelems = list->n_members;
/* Sort the existing members by enumsortorder */
existing = (HeapTuple *) palloc(nelems * sizeof(HeapTuple));
existing = palloc_array(HeapTuple, nelems);
for (i = 0; i < nelems; i++)
existing[i] = &(list->members[i]->tuple);

View file

@ -1086,7 +1086,7 @@ GetPublication(Oid pubid)
pubform = (Form_pg_publication) GETSTRUCT(tup);
pub = (Publication *) palloc(sizeof(Publication));
pub = palloc_object(Publication);
pub->oid = pubid;
pub->name = pstrdup(NameStr(pubform->pubname));
pub->alltables = pubform->puballtables;
@ -1196,7 +1196,7 @@ pg_get_publication_tables(PG_FUNCTION_ARGS)
*/
foreach(lc, pub_elem_tables)
{
published_rel *table_info = (published_rel *) palloc(sizeof(published_rel));
published_rel *table_info = palloc_object(published_rel);
table_info->relid = lfirst_oid(lc);
table_info->pubid = pub_elem->oid;
@ -1299,7 +1299,7 @@ pg_get_publication_tables(PG_FUNCTION_ARGS)
TupleDesc desc = RelationGetDescr(rel);
int i;
attnums = (int16 *) palloc(desc->natts * sizeof(int16));
attnums = palloc_array(int16, desc->natts);
for (i = 0; i < desc->natts; i++)
{

View file

@ -791,7 +791,7 @@ checkSharedDependencies(Oid classId, Oid objectId,
}
if (!stored)
{
dep = (remoteDep *) palloc(sizeof(remoteDep));
dep = palloc_object(remoteDep);
dep->dbOid = sdepForm->dbid;
dep->count = 1;
remDeps = lappend(remDeps, dep);
@ -913,7 +913,7 @@ copyTemplateDependencies(Oid templateDbId, Oid newDbId)
* know that they will be used.
*/
max_slots = MAX_CATALOG_MULTI_INSERT_BYTES / sizeof(FormData_pg_shdepend);
slot = palloc(sizeof(TupleTableSlot *) * max_slots);
slot = palloc_array(TupleTableSlot *, max_slots);
indstate = CatalogOpenIndexes(sdepRel);

View file

@ -89,7 +89,7 @@ GetSubscription(Oid subid, bool missing_ok)
subform = (Form_pg_subscription) GETSTRUCT(tup);
sub = (Subscription *) palloc(sizeof(Subscription));
sub = palloc_object(Subscription);
sub->oid = subid;
sub->dbid = subform->subdbid;
sub->skiplsn = subform->subskiplsn;
@ -618,7 +618,7 @@ GetSubscriptionRelations(Oid subid, bool tables, bool sequences,
relkind == RELKIND_PARTITIONED_TABLE) && !tables)
continue;
relstate = (SubscriptionRelState *) palloc(sizeof(SubscriptionRelState));
relstate = palloc_object(SubscriptionRelState);
relstate->relid = subrel->srrelid;
relstate->state = subrel->srsubstate;
d = SysCacheGetAttr(SUBSCRIPTIONRELMAP, tup,

View file

@ -707,12 +707,12 @@ smgrDoPendingDeletes(bool isCommit)
if (maxrels == 0)
{
maxrels = 8;
srels = palloc(sizeof(SMgrRelation) * maxrels);
srels = palloc_array(SMgrRelation, maxrels);
}
else if (maxrels <= nrels)
{
maxrels *= 2;
srels = repalloc(srels, sizeof(SMgrRelation) * maxrels);
srels = repalloc_array(srels, SMgrRelation, maxrels);
}
srels[nrels++] = srel;
@ -829,12 +829,12 @@ smgrDoPendingSyncs(bool isCommit, bool isParallelWorker)
if (maxrels == 0)
{
maxrels = 8;
srels = palloc(sizeof(SMgrRelation) * maxrels);
srels = palloc_array(SMgrRelation, maxrels);
}
else if (maxrels <= nrels)
{
maxrels *= 2;
srels = repalloc(srels, sizeof(SMgrRelation) * maxrels);
srels = repalloc_array(srels, SMgrRelation, maxrels);
}
srels[nrels++] = srel;
@ -909,7 +909,7 @@ smgrGetPendingDeletes(bool forCommit, RelFileLocator **ptr)
*ptr = NULL;
return 0;
}
rptr = (RelFileLocator *) palloc(nrels * sizeof(RelFileLocator));
rptr = palloc_array(RelFileLocator, nrels);
*ptr = rptr;
for (pending = pendingDeletes; pending != NULL; pending = pending->next)
{

View file

@ -1079,7 +1079,7 @@ examine_attribute(Relation onerel, int attnum, Node *index_expr)
/*
* Create the VacAttrStats struct.
*/
stats = (VacAttrStats *) palloc0(sizeof(VacAttrStats));
stats = palloc0_object(VacAttrStats);
stats->attstattarget = attstattarget;
/*
@ -1910,7 +1910,7 @@ std_typanalyze(VacAttrStats *stats)
NULL);
/* Save the operator info for compute_stats routines */
mystats = (StdAnalyzeData *) palloc(sizeof(StdAnalyzeData));
mystats = palloc_object(StdAnalyzeData);
mystats->eqopr = eqopr;
mystats->eqfunc = OidIsValid(eqopr) ? get_opcode(eqopr) : InvalidOid;
mystats->ltopr = ltopr;
@ -2865,7 +2865,7 @@ compute_scalar_stats(VacAttrStatsP stats,
/* Must copy the target values into anl_context */
old_context = MemoryContextSwitchTo(stats->anl_context);
corrs = (float4 *) palloc(sizeof(float4));
corrs = palloc_object(float4);
MemoryContextSwitchTo(old_context);
/*----------

View file

@ -1672,7 +1672,7 @@ get_tables_to_cluster(MemoryContext cluster_context)
/* Use a permanent memory context for the result list */
old_context = MemoryContextSwitchTo(cluster_context);
rtc = (RelToCluster *) palloc(sizeof(RelToCluster));
rtc = palloc_object(RelToCluster);
rtc->tableOid = index->indrelid;
rtc->indexOid = index->indexrelid;
rtcs = lappend(rtcs, rtc);
@ -1726,7 +1726,7 @@ get_tables_to_cluster_partitioned(MemoryContext cluster_context, Oid indexOid)
/* Use a permanent memory context for the result list */
old_context = MemoryContextSwitchTo(cluster_context);
rtc = (RelToCluster *) palloc(sizeof(RelToCluster));
rtc = palloc_object(RelToCluster);
rtc->tableOid = relid;
rtc->indexOid = indexrelid;
rtcs = lappend(rtcs, rtc);

View file

@ -561,7 +561,7 @@ ProcessCopyOptions(ParseState *pstate,
/* Support external use for option sanity checking */
if (opts_out == NULL)
opts_out = (CopyFormatOptions *) palloc0(sizeof(CopyFormatOptions));
opts_out = palloc0_object(CopyFormatOptions);
opts_out->file_encoding = -1;

View file

@ -364,7 +364,7 @@ CopyMultiInsertBufferInit(ResultRelInfo *rri)
{
CopyMultiInsertBuffer *buffer;
buffer = (CopyMultiInsertBuffer *) palloc(sizeof(CopyMultiInsertBuffer));
buffer = palloc_object(CopyMultiInsertBuffer);
memset(buffer->slots, 0, sizeof(TupleTableSlot *) * MAX_BUFFERED_TUPLES);
buffer->resultRelInfo = rri;
buffer->bistate = (rri->ri_FdwRoutine == NULL) ? GetBulkInsertState() : NULL;
@ -1558,7 +1558,7 @@ BeginCopyFrom(ParseState *pstate,
};
/* Allocate workspace and zero all fields */
cstate = (CopyFromStateData *) palloc0(sizeof(CopyFromStateData));
cstate = palloc0_object(CopyFromStateData);
/*
* We allocate everything used by a cstate in a new memory context. This

View file

@ -720,7 +720,7 @@ BeginCopyTo(ParseState *pstate,
/* Allocate workspace and zero all fields */
cstate = (CopyToStateData *) palloc0(sizeof(CopyToStateData));
cstate = palloc0_object(CopyToStateData);
/*
* We allocate everything used by a cstate in a new memory context. This
@ -1527,7 +1527,7 @@ copy_dest_destroy(DestReceiver *self)
DestReceiver *
CreateCopyDestReceiver(void)
{
DR_copy *self = (DR_copy *) palloc(sizeof(DR_copy));
DR_copy *self = palloc_object(DR_copy);
self->pub.receiveSlot = copy_dest_receive;
self->pub.rStartup = copy_dest_startup;

View file

@ -439,7 +439,7 @@ CreateTableAsRelExists(CreateTableAsStmt *ctas)
DestReceiver *
CreateIntoRelDestReceiver(IntoClause *intoClause)
{
DR_intorel *self = (DR_intorel *) palloc0(sizeof(DR_intorel));
DR_intorel *self = palloc0_object(DR_intorel);
self->pub.receiveSlot = intorel_receive;
self->pub.rStartup = intorel_startup;

View file

@ -431,7 +431,7 @@ ScanSourceDatabasePgClassTuple(HeapTupleData *tuple, Oid tbid, Oid dbid,
classForm->oid);
/* Prepare a rel info element and add it to the list. */
relinfo = (CreateDBRelInfo *) palloc(sizeof(CreateDBRelInfo));
relinfo = palloc_object(CreateDBRelInfo);
if (OidIsValid(classForm->reltablespace))
relinfo->rlocator.spcOid = classForm->reltablespace;
else

View file

@ -364,7 +364,7 @@ filter_list_to_array(List *filterlist)
int i = 0,
l = list_length(filterlist);
data = (Datum *) palloc(l * sizeof(Datum));
data = palloc_array(Datum, l);
foreach(lc, filterlist)
{
@ -1286,7 +1286,7 @@ EventTriggerSQLDropAddObject(const ObjectAddress *object, bool original, bool no
oldcxt = MemoryContextSwitchTo(currentEventTriggerState->cxt);
obj = palloc0(sizeof(SQLDropObject));
obj = palloc0_object(SQLDropObject);
obj->address = *object;
obj->original = original;
obj->normal = normal;
@ -1726,7 +1726,7 @@ EventTriggerCollectSimpleCommand(ObjectAddress address,
oldcxt = MemoryContextSwitchTo(currentEventTriggerState->cxt);
command = palloc(sizeof(CollectedCommand));
command = palloc_object(CollectedCommand);
command->type = SCT_Simple;
command->in_extension = creating_extension;
@ -1762,7 +1762,7 @@ EventTriggerAlterTableStart(Node *parsetree)
oldcxt = MemoryContextSwitchTo(currentEventTriggerState->cxt);
command = palloc(sizeof(CollectedCommand));
command = palloc_object(CollectedCommand);
command->type = SCT_AlterTable;
command->in_extension = creating_extension;
@ -1818,7 +1818,7 @@ EventTriggerCollectAlterTableSubcmd(Node *subcmd, ObjectAddress address)
oldcxt = MemoryContextSwitchTo(currentEventTriggerState->cxt);
newsub = palloc(sizeof(CollectedATSubcmd));
newsub = palloc_object(CollectedATSubcmd);
newsub->address = address;
newsub->parsetree = copyObject(subcmd);
@ -1892,7 +1892,7 @@ EventTriggerCollectGrant(InternalGrant *istmt)
/*
* This is tedious, but necessary.
*/
icopy = palloc(sizeof(InternalGrant));
icopy = palloc_object(InternalGrant);
memcpy(icopy, istmt, sizeof(InternalGrant));
icopy->objects = list_copy(istmt->objects);
icopy->grantees = list_copy(istmt->grantees);
@ -1901,7 +1901,7 @@ EventTriggerCollectGrant(InternalGrant *istmt)
icopy->col_privs = lappend(icopy->col_privs, copyObject(lfirst(cell)));
/* Now collect it, using the copied InternalGrant */
command = palloc(sizeof(CollectedCommand));
command = palloc_object(CollectedCommand);
command->type = SCT_Grant;
command->in_extension = creating_extension;
command->d.grant.istmt = icopy;
@ -1932,7 +1932,7 @@ EventTriggerCollectAlterOpFam(AlterOpFamilyStmt *stmt, Oid opfamoid,
oldcxt = MemoryContextSwitchTo(currentEventTriggerState->cxt);
command = palloc(sizeof(CollectedCommand));
command = palloc_object(CollectedCommand);
command->type = SCT_AlterOpFamily;
command->in_extension = creating_extension;
ObjectAddressSet(command->d.opfam.address,
@ -1965,7 +1965,7 @@ EventTriggerCollectCreateOpClass(CreateOpClassStmt *stmt, Oid opcoid,
oldcxt = MemoryContextSwitchTo(currentEventTriggerState->cxt);
command = palloc0(sizeof(CollectedCommand));
command = palloc0_object(CollectedCommand);
command->type = SCT_CreateOpClass;
command->in_extension = creating_extension;
ObjectAddressSet(command->d.createopc.address,
@ -1999,12 +1999,12 @@ EventTriggerCollectAlterTSConfig(AlterTSConfigurationStmt *stmt, Oid cfgId,
oldcxt = MemoryContextSwitchTo(currentEventTriggerState->cxt);
command = palloc0(sizeof(CollectedCommand));
command = palloc0_object(CollectedCommand);
command->type = SCT_AlterTSConfig;
command->in_extension = creating_extension;
ObjectAddressSet(command->d.atscfg.address,
TSConfigRelationId, cfgId);
command->d.atscfg.dictIds = palloc(sizeof(Oid) * ndicts);
command->d.atscfg.dictIds = palloc_array(Oid, ndicts);
memcpy(command->d.atscfg.dictIds, dictIds, sizeof(Oid) * ndicts);
command->d.atscfg.ndicts = ndicts;
command->parsetree = (Node *) copyObject(stmt);
@ -2033,7 +2033,7 @@ EventTriggerCollectAlterDefPrivs(AlterDefaultPrivilegesStmt *stmt)
oldcxt = MemoryContextSwitchTo(currentEventTriggerState->cxt);
command = palloc0(sizeof(CollectedCommand));
command = palloc0_object(CollectedCommand);
command->type = SCT_AlterDefaultPrivileges;
command->d.defprivs.objtype = stmt->action->objtype;
command->in_extension = creating_extension;

View file

@ -4980,7 +4980,7 @@ ExplainCreateWorkersState(int num_workers)
{
ExplainWorkersState *wstate;
wstate = (ExplainWorkersState *) palloc(sizeof(ExplainWorkersState));
wstate = palloc_object(ExplainWorkersState);
wstate->num_workers = num_workers;
wstate->worker_inited = (bool *) palloc0(num_workers * sizeof(bool));
wstate->worker_str = (StringInfoData *)

View file

@ -276,7 +276,7 @@ CreateExplainSerializeDestReceiver(ExplainState *es)
{
SerializeDestReceiver *self;
self = (SerializeDestReceiver *) palloc0(sizeof(SerializeDestReceiver));
self = palloc0_object(SerializeDestReceiver);
self->pub.receiveSlot = serializeAnalyzeReceive;
self->pub.rStartup = serializeAnalyzeStartup;

Some files were not shown because too many files have changed in this diff Show more