2007-05-13 12:26:08 -04:00
|
|
|
/*
|
|
|
|
|
* Memory management functions.
|
|
|
|
|
*
|
|
|
|
|
* Copyright 2000-2007 Willy Tarreau <w@1wt.eu>
|
|
|
|
|
*
|
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
|
* 2 of the License, or (at your option) any later version.
|
|
|
|
|
*
|
|
|
|
|
*/
|
2018-11-26 05:44:35 -05:00
|
|
|
#include <errno.h>
|
2007-05-13 12:26:08 -04:00
|
|
|
|
2020-06-09 03:07:15 -04:00
|
|
|
#include <haproxy/activity-t.h>
|
2020-05-27 06:58:42 -04:00
|
|
|
#include <haproxy/api.h>
|
2020-06-09 03:07:15 -04:00
|
|
|
#include <haproxy/applet-t.h>
|
2020-06-04 18:00:29 -04:00
|
|
|
#include <haproxy/cfgparse.h>
|
2020-06-04 15:07:02 -04:00
|
|
|
#include <haproxy/channel.h>
|
2020-06-04 14:19:54 -04:00
|
|
|
#include <haproxy/cli.h>
|
2020-06-05 11:27:29 -04:00
|
|
|
#include <haproxy/errors.h>
|
2020-06-04 11:05:57 -04:00
|
|
|
#include <haproxy/global.h>
|
2020-05-27 12:01:47 -04:00
|
|
|
#include <haproxy/list.h>
|
2020-06-09 03:07:15 -04:00
|
|
|
#include <haproxy/pool.h>
|
2020-06-04 13:58:55 -04:00
|
|
|
#include <haproxy/stats-t.h>
|
2020-06-04 14:45:39 -04:00
|
|
|
#include <haproxy/stream_interface.h>
|
2020-06-09 03:07:15 -04:00
|
|
|
#include <haproxy/thread.h>
|
2020-06-03 12:09:46 -04:00
|
|
|
#include <haproxy/tools.h>
|
2007-05-13 12:26:08 -04:00
|
|
|
|
|
|
|
|
|
2020-06-01 13:00:28 -04:00
|
|
|
#ifdef CONFIG_HAP_LOCAL_POOLS
|
2018-10-16 01:58:39 -04:00
|
|
|
/* These are the most common pools, expected to be initialized first. These
|
|
|
|
|
* ones are allocated from an array, allowing to map them to an index.
|
|
|
|
|
*/
|
|
|
|
|
struct pool_head pool_base_start[MAX_BASE_POOLS] = { };
|
|
|
|
|
unsigned int pool_base_count = 0;
|
|
|
|
|
|
2018-11-26 11:09:46 -05:00
|
|
|
/* These ones are initialized per-thread on startup by init_pools() */
|
|
|
|
|
struct pool_cache_head pool_cache[MAX_THREADS][MAX_BASE_POOLS];
|
2018-10-16 04:28:54 -04:00
|
|
|
THREAD_LOCAL size_t pool_cache_bytes = 0; /* total cache size */
|
|
|
|
|
THREAD_LOCAL size_t pool_cache_count = 0; /* #cache objects */
|
2020-06-01 13:00:28 -04:00
|
|
|
#endif
|
2018-10-16 04:28:54 -04:00
|
|
|
|
2007-05-13 12:26:08 -04:00
|
|
|
static struct list pools = LIST_HEAD_INIT(pools);
|
2015-10-08 08:12:13 -04:00
|
|
|
int mem_poison_byte = -1;
|
2007-05-13 12:26:08 -04:00
|
|
|
|
2019-01-29 09:20:16 -05:00
|
|
|
#ifdef DEBUG_FAIL_ALLOC
|
|
|
|
|
static int mem_fail_rate = 0;
|
|
|
|
|
static int mem_should_fail(const struct pool_head *);
|
|
|
|
|
#endif
|
|
|
|
|
|
2007-05-13 12:26:08 -04:00
|
|
|
/* Try to find an existing shared pool with the same characteristics and
|
|
|
|
|
* returns it, otherwise creates this one. NULL is returned if no memory
|
2016-01-24 20:19:13 -05:00
|
|
|
* is available for a new creation. Two flags are supported :
|
|
|
|
|
* - MEM_F_SHARED to indicate that the pool may be shared with other users
|
|
|
|
|
* - MEM_F_EXACT to indicate that the size must not be rounded up
|
2007-05-13 12:26:08 -04:00
|
|
|
*/
|
|
|
|
|
struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags)
|
|
|
|
|
{
|
|
|
|
|
struct pool_head *pool;
|
2007-05-13 18:16:13 -04:00
|
|
|
struct pool_head *entry;
|
|
|
|
|
struct list *start;
|
2007-05-13 12:26:08 -04:00
|
|
|
unsigned int align;
|
2020-06-01 13:00:28 -04:00
|
|
|
int idx __maybe_unused;
|
2007-05-13 12:26:08 -04:00
|
|
|
|
2015-10-28 10:09:29 -04:00
|
|
|
/* We need to store a (void *) at the end of the chunks. Since we know
|
2007-05-13 12:26:08 -04:00
|
|
|
* that the malloc() function will never return such a small size,
|
|
|
|
|
* let's round the size up to something slightly bigger, in order to
|
|
|
|
|
* ease merging of entries. Note that the rounding is a power of two.
|
2015-10-28 10:09:29 -04:00
|
|
|
* This extra (void *) is not accounted for in the size computation
|
|
|
|
|
* so that the visible parts outside are not affected.
|
2018-10-23 08:40:23 -04:00
|
|
|
*
|
|
|
|
|
* Note: for the LRU cache, we need to store 2 doubly-linked lists.
|
2007-05-13 12:26:08 -04:00
|
|
|
*/
|
|
|
|
|
|
2016-01-24 20:19:13 -05:00
|
|
|
if (!(flags & MEM_F_EXACT)) {
|
2018-10-23 08:40:23 -04:00
|
|
|
align = 4 * sizeof(void *); // 2 lists = 4 pointers min
|
2016-01-24 20:19:13 -05:00
|
|
|
size = ((size + POOL_EXTRA + align - 1) & -align) - POOL_EXTRA;
|
|
|
|
|
}
|
2007-05-13 12:26:08 -04:00
|
|
|
|
2017-08-29 03:52:38 -04:00
|
|
|
/* TODO: thread: we do not lock pool list for now because all pools are
|
|
|
|
|
* created during HAProxy startup (so before threads creation) */
|
2007-05-13 18:16:13 -04:00
|
|
|
start = &pools;
|
2007-05-13 12:26:08 -04:00
|
|
|
pool = NULL;
|
2007-05-13 18:16:13 -04:00
|
|
|
|
|
|
|
|
list_for_each_entry(entry, &pools, list) {
|
|
|
|
|
if (entry->size == size) {
|
|
|
|
|
/* either we can share this place and we take it, or
|
2020-06-21 12:42:57 -04:00
|
|
|
* we look for a shareable one or for the next position
|
2007-05-13 18:16:13 -04:00
|
|
|
* before which we will insert a new one.
|
|
|
|
|
*/
|
|
|
|
|
if (flags & entry->flags & MEM_F_SHARED) {
|
|
|
|
|
/* we can share this one */
|
2007-05-13 12:26:08 -04:00
|
|
|
pool = entry;
|
[MEDIUM] Fix memory freeing at exit
New functions implemented:
- deinit_pollers: called at the end of deinit())
- prune_acl: called via list_for_each_entry_safe
Add missing pool_destroy2 calls:
- p->hdr_idx_pool
- pool2_tree64
Implement all task stopping:
- health-check: needs new "struct task" in the struct server
- queue processing: queue_mgt
- appsess_refresh: appsession_refresh
before (idle system):
==6079== LEAK SUMMARY:
==6079== definitely lost: 1,112 bytes in 75 blocks.
==6079== indirectly lost: 53,356 bytes in 2,090 blocks.
==6079== possibly lost: 52 bytes in 1 blocks.
==6079== still reachable: 150,996 bytes in 504 blocks.
==6079== suppressed: 0 bytes in 0 blocks.
after (idle system):
==6945== LEAK SUMMARY:
==6945== definitely lost: 7,644 bytes in 137 blocks.
==6945== indirectly lost: 9,913 bytes in 587 blocks.
==6945== possibly lost: 0 bytes in 0 blocks.
==6945== still reachable: 0 bytes in 0 blocks.
==6945== suppressed: 0 bytes in 0 blocks.
before (running system for ~2m):
==9343== LEAK SUMMARY:
==9343== definitely lost: 1,112 bytes in 75 blocks.
==9343== indirectly lost: 54,199 bytes in 2,122 blocks.
==9343== possibly lost: 52 bytes in 1 blocks.
==9343== still reachable: 151,128 bytes in 509 blocks.
==9343== suppressed: 0 bytes in 0 blocks.
after (running system for ~2m):
==11616== LEAK SUMMARY:
==11616== definitely lost: 7,644 bytes in 137 blocks.
==11616== indirectly lost: 9,981 bytes in 591 blocks.
==11616== possibly lost: 0 bytes in 0 blocks.
==11616== still reachable: 4 bytes in 1 blocks.
==11616== suppressed: 0 bytes in 0 blocks.
Still not perfect but significant improvement.
2008-05-29 17:53:44 -04:00
|
|
|
DPRINTF(stderr, "Sharing %s with %s\n", name, pool->name);
|
2007-05-13 12:26:08 -04:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
2007-05-13 18:16:13 -04:00
|
|
|
else if (entry->size > size) {
|
|
|
|
|
/* insert before this one */
|
|
|
|
|
start = &entry->list;
|
|
|
|
|
break;
|
|
|
|
|
}
|
2007-05-13 12:26:08 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!pool) {
|
2020-06-01 13:00:28 -04:00
|
|
|
#ifdef CONFIG_HAP_LOCAL_POOLS
|
2018-10-16 01:58:39 -04:00
|
|
|
if (pool_base_count < MAX_BASE_POOLS)
|
|
|
|
|
pool = &pool_base_start[pool_base_count++];
|
|
|
|
|
|
|
|
|
|
if (!pool) {
|
|
|
|
|
/* look for a freed entry */
|
|
|
|
|
for (entry = pool_base_start; entry != pool_base_start + MAX_BASE_POOLS; entry++) {
|
|
|
|
|
if (!entry->size) {
|
|
|
|
|
pool = entry;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2020-06-01 13:00:28 -04:00
|
|
|
#endif
|
2018-10-16 01:58:39 -04:00
|
|
|
|
|
|
|
|
if (!pool)
|
|
|
|
|
pool = calloc(1, sizeof(*pool));
|
|
|
|
|
|
2007-05-13 12:26:08 -04:00
|
|
|
if (!pool)
|
|
|
|
|
return NULL;
|
|
|
|
|
if (name)
|
|
|
|
|
strlcpy2(pool->name, name, sizeof(pool->name));
|
|
|
|
|
pool->size = size;
|
|
|
|
|
pool->flags = flags;
|
2007-05-13 18:16:13 -04:00
|
|
|
LIST_ADDQ(start, &pool->list);
|
2019-06-25 15:45:59 -04:00
|
|
|
|
2020-06-01 13:00:28 -04:00
|
|
|
#ifdef CONFIG_HAP_LOCAL_POOLS
|
2019-06-25 15:45:59 -04:00
|
|
|
/* update per-thread pool cache if necessary */
|
|
|
|
|
idx = pool_get_index(pool);
|
|
|
|
|
if (idx >= 0) {
|
2020-06-01 13:00:28 -04:00
|
|
|
int thr;
|
|
|
|
|
|
2019-06-25 15:45:59 -04:00
|
|
|
for (thr = 0; thr < MAX_THREADS; thr++)
|
|
|
|
|
pool_cache[thr][idx].size = size;
|
|
|
|
|
}
|
2020-06-01 13:00:28 -04:00
|
|
|
#endif
|
2020-02-01 11:45:32 -05:00
|
|
|
HA_SPIN_INIT(&pool->lock);
|
|
|
|
|
}
|
|
|
|
|
pool->users++;
|
2007-05-13 12:26:08 -04:00
|
|
|
return pool;
|
|
|
|
|
}
|
|
|
|
|
|
2020-06-01 13:00:28 -04:00
|
|
|
#ifdef CONFIG_HAP_LOCAL_POOLS
|
|
|
|
|
/* Evicts some of the oldest objects from the local cache, pushing them to the
|
|
|
|
|
* global pool.
|
|
|
|
|
*/
|
|
|
|
|
void pool_evict_from_cache()
|
|
|
|
|
{
|
|
|
|
|
struct pool_cache_item *item;
|
|
|
|
|
struct pool_cache_head *ph;
|
|
|
|
|
|
|
|
|
|
do {
|
2020-06-27 18:54:27 -04:00
|
|
|
item = LIST_PREV(&ti->pool_lru_head, struct pool_cache_item *, by_lru);
|
2020-06-01 13:00:28 -04:00
|
|
|
/* note: by definition we remove oldest objects so they also are the
|
|
|
|
|
* oldest in their own pools, thus their next is the pool's head.
|
|
|
|
|
*/
|
|
|
|
|
ph = LIST_NEXT(&item->by_pool, struct pool_cache_head *, list);
|
|
|
|
|
LIST_DEL(&item->by_pool);
|
|
|
|
|
LIST_DEL(&item->by_lru);
|
|
|
|
|
ph->count--;
|
|
|
|
|
pool_cache_count--;
|
|
|
|
|
pool_cache_bytes -= ph->size;
|
|
|
|
|
__pool_free(pool_base_start + (ph - pool_cache[tid]), item);
|
|
|
|
|
} while (pool_cache_bytes > CONFIG_HAP_POOL_CACHE_SIZE * 7 / 8);
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2018-02-22 08:05:55 -05:00
|
|
|
#ifdef CONFIG_HAP_LOCKLESS_POOLS
|
2018-01-24 12:38:31 -05:00
|
|
|
/* Allocates new entries for pool <pool> until there are at least <avail> + 1
|
|
|
|
|
* available, then returns the last one for immediate use, so that at least
|
|
|
|
|
* <avail> are left available in the pool upon return. NULL is returned if the
|
|
|
|
|
* last entry could not be allocated. It's important to note that at least one
|
|
|
|
|
* allocation is always performed even if there are enough entries in the pool.
|
|
|
|
|
* A call to the garbage collector is performed at most once in case malloc()
|
|
|
|
|
* returns an error, before returning NULL.
|
|
|
|
|
*/
|
|
|
|
|
void *__pool_refill_alloc(struct pool_head *pool, unsigned int avail)
|
|
|
|
|
{
|
2018-10-20 19:52:59 -04:00
|
|
|
void *ptr = NULL, **free_list;
|
2018-01-24 12:38:31 -05:00
|
|
|
int failed = 0;
|
|
|
|
|
int size = pool->size;
|
|
|
|
|
int limit = pool->limit;
|
|
|
|
|
int allocated = pool->allocated, allocated_orig = allocated;
|
|
|
|
|
|
|
|
|
|
/* stop point */
|
|
|
|
|
avail += pool->used;
|
|
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
|
if (limit && allocated >= limit) {
|
2019-03-08 12:53:35 -05:00
|
|
|
_HA_ATOMIC_ADD(&pool->allocated, allocated - allocated_orig);
|
2019-05-28 11:04:16 -04:00
|
|
|
activity[tid].pool_fail++;
|
2018-01-24 12:38:31 -05:00
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2020-06-01 06:35:03 -04:00
|
|
|
swrate_add_scaled(&pool->needed_avg, POOL_AVG_SAMPLES, pool->allocated, POOL_AVG_SAMPLES/4);
|
2020-05-08 02:31:56 -04:00
|
|
|
|
2020-05-30 12:56:17 -04:00
|
|
|
ptr = pool_alloc_area(size + POOL_EXTRA);
|
2018-01-24 12:38:31 -05:00
|
|
|
if (!ptr) {
|
2019-03-08 12:53:35 -05:00
|
|
|
_HA_ATOMIC_ADD(&pool->failed, 1);
|
2019-05-28 11:04:16 -04:00
|
|
|
if (failed) {
|
|
|
|
|
activity[tid].pool_fail++;
|
2018-01-24 12:38:31 -05:00
|
|
|
return NULL;
|
2019-05-28 11:04:16 -04:00
|
|
|
}
|
2018-01-24 12:38:31 -05:00
|
|
|
failed++;
|
|
|
|
|
pool_gc(pool);
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
if (++allocated > avail)
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
free_list = pool->free_list;
|
|
|
|
|
do {
|
|
|
|
|
*POOL_LINK(pool, ptr) = free_list;
|
|
|
|
|
__ha_barrier_store();
|
2019-03-08 12:53:35 -05:00
|
|
|
} while (_HA_ATOMIC_CAS(&pool->free_list, &free_list, ptr) == 0);
|
2018-01-24 12:38:31 -05:00
|
|
|
}
|
2019-03-08 12:53:35 -05:00
|
|
|
__ha_barrier_atomic_store();
|
2018-01-24 12:38:31 -05:00
|
|
|
|
2019-03-08 12:53:35 -05:00
|
|
|
_HA_ATOMIC_ADD(&pool->allocated, allocated - allocated_orig);
|
|
|
|
|
_HA_ATOMIC_ADD(&pool->used, 1);
|
2018-01-24 12:38:31 -05:00
|
|
|
|
|
|
|
|
#ifdef DEBUG_MEMORY_POOLS
|
|
|
|
|
/* keep track of where the element was allocated from */
|
|
|
|
|
*POOL_LINK(pool, ptr) = (void *)pool;
|
|
|
|
|
#endif
|
|
|
|
|
return ptr;
|
|
|
|
|
}
|
|
|
|
|
void *pool_refill_alloc(struct pool_head *pool, unsigned int avail)
|
|
|
|
|
{
|
|
|
|
|
void *ptr;
|
|
|
|
|
|
|
|
|
|
ptr = __pool_refill_alloc(pool, avail);
|
|
|
|
|
return ptr;
|
|
|
|
|
}
|
|
|
|
|
/*
|
|
|
|
|
* This function frees whatever can be freed in pool <pool>.
|
|
|
|
|
*/
|
|
|
|
|
void pool_flush(struct pool_head *pool)
|
|
|
|
|
{
|
2020-02-01 11:37:22 -05:00
|
|
|
struct pool_free_list cmp, new;
|
2018-10-20 19:52:59 -04:00
|
|
|
void **next, *temp;
|
2018-01-24 12:38:31 -05:00
|
|
|
int removed = 0;
|
|
|
|
|
|
|
|
|
|
if (!pool)
|
|
|
|
|
return;
|
2020-05-29 11:23:05 -04:00
|
|
|
HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
|
2018-01-24 12:38:31 -05:00
|
|
|
do {
|
2020-02-01 11:37:22 -05:00
|
|
|
cmp.free_list = pool->free_list;
|
|
|
|
|
cmp.seq = pool->seq;
|
|
|
|
|
new.free_list = NULL;
|
|
|
|
|
new.seq = cmp.seq + 1;
|
|
|
|
|
} while (!_HA_ATOMIC_DWCAS(&pool->free_list, &cmp, &new));
|
2019-03-08 12:53:35 -05:00
|
|
|
__ha_barrier_atomic_store();
|
2020-05-29 11:23:05 -04:00
|
|
|
HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
|
2020-02-01 11:37:22 -05:00
|
|
|
next = cmp.free_list;
|
2018-01-24 12:38:31 -05:00
|
|
|
while (next) {
|
|
|
|
|
temp = next;
|
|
|
|
|
next = *POOL_LINK(pool, temp);
|
|
|
|
|
removed++;
|
2020-05-30 12:56:17 -04:00
|
|
|
pool_free_area(temp, pool->size + POOL_EXTRA);
|
2018-01-24 12:38:31 -05:00
|
|
|
}
|
|
|
|
|
pool->free_list = next;
|
2019-03-08 12:53:35 -05:00
|
|
|
_HA_ATOMIC_SUB(&pool->allocated, removed);
|
2018-01-24 12:38:31 -05:00
|
|
|
/* here, we should have pool->allocate == pool->used */
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* This function frees whatever can be freed in all pools, but respecting
|
2020-04-24 00:15:24 -04:00
|
|
|
* the minimum thresholds imposed by owners. It makes sure to be alone to
|
|
|
|
|
* run by using thread_isolate(). <pool_ctx> is unused.
|
2018-01-24 12:38:31 -05:00
|
|
|
*/
|
|
|
|
|
void pool_gc(struct pool_head *pool_ctx)
|
|
|
|
|
{
|
|
|
|
|
struct pool_head *entry;
|
2020-04-24 00:15:24 -04:00
|
|
|
int isolated = thread_isolated();
|
2018-01-24 12:38:31 -05:00
|
|
|
|
2020-04-24 00:15:24 -04:00
|
|
|
if (!isolated)
|
|
|
|
|
thread_isolate();
|
2018-01-24 12:38:31 -05:00
|
|
|
|
|
|
|
|
list_for_each_entry(entry, &pools, list) {
|
|
|
|
|
while ((int)((volatile int)entry->allocated - (volatile int)entry->used) > (int)entry->minavail) {
|
|
|
|
|
struct pool_free_list cmp, new;
|
|
|
|
|
|
|
|
|
|
cmp.seq = entry->seq;
|
|
|
|
|
__ha_barrier_load();
|
|
|
|
|
cmp.free_list = entry->free_list;
|
|
|
|
|
__ha_barrier_load();
|
|
|
|
|
if (cmp.free_list == NULL)
|
|
|
|
|
break;
|
|
|
|
|
new.free_list = *POOL_LINK(entry, cmp.free_list);
|
|
|
|
|
new.seq = cmp.seq + 1;
|
BUILD: threads: fix again the __ha_cas_dw() definition
This low-level asm implementation of a double CAS was implemented only
for certain architectures (x86_64, armv7, armv8). When threads are not
used, they were not defined, but since they were called directly from
a few locations, they were causing build issues on certain platforms
with threads disabled. This was addressed in commit f4436e1 ("BUILD:
threads: Add __ha_cas_dw fallback for single threaded builds") by
making it fall back to HA_ATOMIC_CAS() when threads are not defined,
but this actually made the situation worse by breaking other cases.
This patch fixes this by creating a high-level macro HA_ATOMIC_DWCAS()
which is similar to HA_ATOMIC_CAS() except that it's intended to work
on a double word, and which rely on the asm implementations when threads
are in use, and uses its own open-coded implementation when threads are
not used. The 3 call places relying on __ha_cas_dw() were updated to
use HA_ATOMIC_DWCAS() instead.
This change was tested on i586, x86_64, armv7, armv8 with and without
threads with gcc 4.7, armv8 with gcc 5.4 with and without threads, as
well as i586 with gcc-3.4 without threads. It will need to be backported
to 1.9 along with the fix above to fix build on armv7 with threads
disabled.
2019-05-11 12:04:24 -04:00
|
|
|
if (HA_ATOMIC_DWCAS(&entry->free_list, &cmp, &new) == 0)
|
2018-01-24 12:38:31 -05:00
|
|
|
continue;
|
2020-05-30 12:56:17 -04:00
|
|
|
pool_free_area(cmp.free_list, entry->size + POOL_EXTRA);
|
2019-03-08 12:53:35 -05:00
|
|
|
_HA_ATOMIC_SUB(&entry->allocated, 1);
|
2018-01-24 12:38:31 -05:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-04-24 00:15:24 -04:00
|
|
|
if (!isolated)
|
|
|
|
|
thread_release();
|
2020-11-03 09:53:34 -05:00
|
|
|
|
|
|
|
|
#if defined(HA_HAVE_MALLOC_TRIM)
|
|
|
|
|
malloc_trim(0);
|
|
|
|
|
#endif
|
2018-01-24 12:38:31 -05:00
|
|
|
}
|
2018-10-16 04:28:54 -04:00
|
|
|
|
2018-02-22 08:05:55 -05:00
|
|
|
#else /* CONFIG_HAP_LOCKLESS_POOLS */
|
2018-01-24 12:38:31 -05:00
|
|
|
|
2014-12-03 09:25:28 -05:00
|
|
|
/* Allocates new entries for pool <pool> until there are at least <avail> + 1
|
|
|
|
|
* available, then returns the last one for immediate use, so that at least
|
|
|
|
|
* <avail> are left available in the pool upon return. NULL is returned if the
|
|
|
|
|
* last entry could not be allocated. It's important to note that at least one
|
|
|
|
|
* allocation is always performed even if there are enough entries in the pool.
|
|
|
|
|
* A call to the garbage collector is performed at most once in case malloc()
|
|
|
|
|
* returns an error, before returning NULL.
|
2007-05-13 12:26:08 -04:00
|
|
|
*/
|
2017-08-29 03:52:38 -04:00
|
|
|
void *__pool_refill_alloc(struct pool_head *pool, unsigned int avail)
|
2007-05-13 12:26:08 -04:00
|
|
|
{
|
2014-12-03 09:25:28 -05:00
|
|
|
void *ptr = NULL;
|
|
|
|
|
int failed = 0;
|
|
|
|
|
|
2019-01-29 09:20:16 -05:00
|
|
|
#ifdef DEBUG_FAIL_ALLOC
|
|
|
|
|
if (mem_should_fail(pool))
|
|
|
|
|
return NULL;
|
|
|
|
|
#endif
|
2014-12-03 09:25:28 -05:00
|
|
|
/* stop point */
|
|
|
|
|
avail += pool->used;
|
|
|
|
|
|
|
|
|
|
while (1) {
|
2019-05-28 11:04:16 -04:00
|
|
|
if (pool->limit && pool->allocated >= pool->limit) {
|
|
|
|
|
activity[tid].pool_fail++;
|
2007-05-13 18:16:13 -04:00
|
|
|
return NULL;
|
2019-05-28 11:04:16 -04:00
|
|
|
}
|
2014-12-03 09:25:28 -05:00
|
|
|
|
2020-06-01 06:35:03 -04:00
|
|
|
swrate_add_scaled(&pool->needed_avg, POOL_AVG_SAMPLES, pool->allocated, POOL_AVG_SAMPLES/4);
|
2019-07-04 05:30:00 -04:00
|
|
|
HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
|
2017-11-22 04:50:54 -05:00
|
|
|
ptr = pool_alloc_area(pool->size + POOL_EXTRA);
|
2019-07-04 05:48:16 -04:00
|
|
|
#ifdef DEBUG_MEMORY_POOLS
|
|
|
|
|
/* keep track of where the element was allocated from. This
|
|
|
|
|
* is done out of the lock so that the system really allocates
|
|
|
|
|
* the data without harming other threads waiting on the lock.
|
|
|
|
|
*/
|
|
|
|
|
if (ptr)
|
|
|
|
|
*POOL_LINK(pool, ptr) = (void *)pool;
|
|
|
|
|
#endif
|
2019-07-04 05:30:00 -04:00
|
|
|
HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
|
2014-12-03 09:25:28 -05:00
|
|
|
if (!ptr) {
|
2015-10-28 11:24:21 -04:00
|
|
|
pool->failed++;
|
2019-05-28 11:04:16 -04:00
|
|
|
if (failed) {
|
|
|
|
|
activity[tid].pool_fail++;
|
2014-12-03 09:25:28 -05:00
|
|
|
return NULL;
|
2019-05-28 11:04:16 -04:00
|
|
|
}
|
2014-12-03 09:25:28 -05:00
|
|
|
failed++;
|
2017-11-24 11:34:44 -05:00
|
|
|
pool_gc(pool);
|
2014-12-03 09:25:28 -05:00
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
if (++pool->allocated > avail)
|
|
|
|
|
break;
|
|
|
|
|
|
2015-10-28 10:09:29 -04:00
|
|
|
*POOL_LINK(pool, ptr) = (void *)pool->free_list;
|
2014-12-03 09:25:28 -05:00
|
|
|
pool->free_list = ptr;
|
2007-05-13 18:16:13 -04:00
|
|
|
}
|
2007-05-13 12:26:08 -04:00
|
|
|
pool->used++;
|
2014-12-03 09:25:28 -05:00
|
|
|
return ptr;
|
2007-05-13 12:26:08 -04:00
|
|
|
}
|
2017-08-29 03:52:38 -04:00
|
|
|
void *pool_refill_alloc(struct pool_head *pool, unsigned int avail)
|
|
|
|
|
{
|
|
|
|
|
void *ptr;
|
2007-05-13 12:26:08 -04:00
|
|
|
|
2017-11-07 04:42:54 -05:00
|
|
|
HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
|
2017-08-29 03:52:38 -04:00
|
|
|
ptr = __pool_refill_alloc(pool, avail);
|
2017-11-07 04:42:54 -05:00
|
|
|
HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
|
2017-08-29 03:52:38 -04:00
|
|
|
return ptr;
|
|
|
|
|
}
|
2007-05-13 13:38:49 -04:00
|
|
|
/*
|
|
|
|
|
* This function frees whatever can be freed in pool <pool>.
|
|
|
|
|
*/
|
2017-11-24 11:34:44 -05:00
|
|
|
void pool_flush(struct pool_head *pool)
|
2007-05-13 13:38:49 -04:00
|
|
|
{
|
2019-07-04 05:30:00 -04:00
|
|
|
void *temp;
|
|
|
|
|
|
2007-05-13 18:39:29 -04:00
|
|
|
if (!pool)
|
|
|
|
|
return;
|
|
|
|
|
|
2019-07-04 05:30:00 -04:00
|
|
|
while (1) {
|
|
|
|
|
HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
|
|
|
|
|
temp = pool->free_list;
|
|
|
|
|
if (!temp) {
|
|
|
|
|
HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
pool->free_list = *POOL_LINK(pool, temp);
|
2007-05-13 13:38:49 -04:00
|
|
|
pool->allocated--;
|
2019-07-04 05:30:00 -04:00
|
|
|
HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
|
2017-11-22 04:50:54 -05:00
|
|
|
pool_free_area(temp, pool->size + POOL_EXTRA);
|
2007-05-13 13:38:49 -04:00
|
|
|
}
|
2019-07-04 05:30:00 -04:00
|
|
|
/* here, we should have pool->allocated == pool->used */
|
2007-05-13 13:38:49 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* This function frees whatever can be freed in all pools, but respecting
|
2020-04-24 00:15:24 -04:00
|
|
|
* the minimum thresholds imposed by owners. It makes sure to be alone to
|
|
|
|
|
* run by using thread_isolate(). <pool_ctx> is unused.
|
2007-05-13 13:38:49 -04:00
|
|
|
*/
|
2017-11-24 11:34:44 -05:00
|
|
|
void pool_gc(struct pool_head *pool_ctx)
|
2007-05-13 13:38:49 -04:00
|
|
|
{
|
|
|
|
|
struct pool_head *entry;
|
2020-04-24 00:15:24 -04:00
|
|
|
int isolated = thread_isolated();
|
2009-04-20 20:17:45 -04:00
|
|
|
|
2020-04-24 00:15:24 -04:00
|
|
|
if (!isolated)
|
|
|
|
|
thread_isolate();
|
2009-04-20 20:17:45 -04:00
|
|
|
|
2007-05-13 13:38:49 -04:00
|
|
|
list_for_each_entry(entry, &pools, list) {
|
2020-03-12 14:05:39 -04:00
|
|
|
void *temp;
|
2007-05-13 13:38:49 -04:00
|
|
|
//qfprintf(stderr, "Flushing pool %s\n", entry->name);
|
2020-03-12 14:05:39 -04:00
|
|
|
while (entry->free_list &&
|
2014-12-22 15:40:55 -05:00
|
|
|
(int)(entry->allocated - entry->used) > (int)entry->minavail) {
|
2020-03-12 14:05:39 -04:00
|
|
|
temp = entry->free_list;
|
|
|
|
|
entry->free_list = *POOL_LINK(entry, temp);
|
2007-05-13 13:38:49 -04:00
|
|
|
entry->allocated--;
|
2017-11-22 04:50:54 -05:00
|
|
|
pool_free_area(temp, entry->size + POOL_EXTRA);
|
2007-05-13 13:38:49 -04:00
|
|
|
}
|
|
|
|
|
}
|
2017-08-29 03:52:38 -04:00
|
|
|
|
2020-04-24 00:15:24 -04:00
|
|
|
if (!isolated)
|
|
|
|
|
thread_release();
|
2007-05-13 13:38:49 -04:00
|
|
|
}
|
2018-01-24 12:38:31 -05:00
|
|
|
#endif
|
2007-05-13 13:38:49 -04:00
|
|
|
|
|
|
|
|
/*
|
2007-06-16 17:19:53 -04:00
|
|
|
* This function destroys a pool by freeing it completely, unless it's still
|
|
|
|
|
* in use. This should be called only under extreme circumstances. It always
|
|
|
|
|
* returns NULL if the resulting pool is empty, easing the clearing of the old
|
|
|
|
|
* pointer, otherwise it returns the pool.
|
|
|
|
|
* .
|
2007-05-13 13:38:49 -04:00
|
|
|
*/
|
2017-11-24 11:34:44 -05:00
|
|
|
void *pool_destroy(struct pool_head *pool)
|
2007-05-13 13:38:49 -04:00
|
|
|
{
|
2007-05-13 18:39:29 -04:00
|
|
|
if (pool) {
|
2017-11-24 11:34:44 -05:00
|
|
|
pool_flush(pool);
|
2007-06-16 17:19:53 -04:00
|
|
|
if (pool->used)
|
|
|
|
|
return pool;
|
|
|
|
|
pool->users--;
|
|
|
|
|
if (!pool->users) {
|
|
|
|
|
LIST_DEL(&pool->list);
|
2018-02-22 08:05:55 -05:00
|
|
|
#ifndef CONFIG_HAP_LOCKLESS_POOLS
|
2017-11-07 04:42:54 -05:00
|
|
|
HA_SPIN_DESTROY(&pool->lock);
|
2018-01-24 12:38:31 -05:00
|
|
|
#endif
|
2020-06-01 13:00:28 -04:00
|
|
|
|
|
|
|
|
#ifdef CONFIG_HAP_LOCAL_POOLS
|
2018-10-16 01:58:39 -04:00
|
|
|
if ((pool - pool_base_start) < MAX_BASE_POOLS)
|
|
|
|
|
memset(pool, 0, sizeof(*pool));
|
|
|
|
|
else
|
2020-06-01 13:00:28 -04:00
|
|
|
#endif
|
2018-10-16 01:58:39 -04:00
|
|
|
free(pool);
|
2007-06-16 17:19:53 -04:00
|
|
|
}
|
2007-05-13 18:39:29 -04:00
|
|
|
}
|
|
|
|
|
return NULL;
|
2007-05-13 13:38:49 -04:00
|
|
|
}
|
|
|
|
|
|
2018-11-26 09:57:34 -05:00
|
|
|
/* This destroys all pools on exit. It is *not* thread safe. */
|
|
|
|
|
void pool_destroy_all()
|
|
|
|
|
{
|
|
|
|
|
struct pool_head *entry, *back;
|
|
|
|
|
|
|
|
|
|
list_for_each_entry_safe(entry, back, &pools, list)
|
|
|
|
|
pool_destroy(entry);
|
|
|
|
|
}
|
|
|
|
|
|
2014-01-28 10:49:56 -05:00
|
|
|
/* This function dumps memory usage information into the trash buffer. */
|
|
|
|
|
void dump_pools_to_trash()
|
2007-05-13 12:26:08 -04:00
|
|
|
{
|
|
|
|
|
struct pool_head *entry;
|
|
|
|
|
unsigned long allocated, used;
|
|
|
|
|
int nbpools;
|
|
|
|
|
|
|
|
|
|
allocated = used = nbpools = 0;
|
2014-01-28 10:49:56 -05:00
|
|
|
chunk_printf(&trash, "Dumping pools usage. Use SIGQUIT to flush them.\n");
|
2007-05-13 12:26:08 -04:00
|
|
|
list_for_each_entry(entry, &pools, list) {
|
2018-02-22 08:05:55 -05:00
|
|
|
#ifndef CONFIG_HAP_LOCKLESS_POOLS
|
2017-11-07 04:42:54 -05:00
|
|
|
HA_SPIN_LOCK(POOL_LOCK, &entry->lock);
|
2018-01-24 12:38:31 -05:00
|
|
|
#endif
|
2020-05-08 02:31:56 -04:00
|
|
|
chunk_appendf(&trash, " - Pool %s (%u bytes) : %u allocated (%u bytes), %u used, needed_avg %u, %u failures, %u users, @%p=%02d%s\n",
|
2007-05-13 13:38:49 -04:00
|
|
|
entry->name, entry->size, entry->allocated,
|
2020-05-08 02:31:56 -04:00
|
|
|
entry->size * entry->allocated, entry->used,
|
2020-06-01 06:35:03 -04:00
|
|
|
swrate_avg(entry->needed_avg, POOL_AVG_SAMPLES), entry->failed,
|
2018-10-16 01:58:39 -04:00
|
|
|
entry->users, entry, (int)pool_get_index(entry),
|
|
|
|
|
(entry->flags & MEM_F_SHARED) ? " [SHARED]" : "");
|
2007-05-13 12:26:08 -04:00
|
|
|
|
|
|
|
|
allocated += entry->allocated * entry->size;
|
|
|
|
|
used += entry->used * entry->size;
|
|
|
|
|
nbpools++;
|
2018-02-22 08:05:55 -05:00
|
|
|
#ifndef CONFIG_HAP_LOCKLESS_POOLS
|
2017-11-07 04:42:54 -05:00
|
|
|
HA_SPIN_UNLOCK(POOL_LOCK, &entry->lock);
|
2018-01-24 12:38:31 -05:00
|
|
|
#endif
|
2007-05-13 12:26:08 -04:00
|
|
|
}
|
2014-01-28 10:49:56 -05:00
|
|
|
chunk_appendf(&trash, "Total: %d pools, %lu bytes allocated, %lu used.\n",
|
2007-05-13 12:26:08 -04:00
|
|
|
nbpools, allocated, used);
|
|
|
|
|
}
|
|
|
|
|
|
2014-01-28 10:49:56 -05:00
|
|
|
/* Dump statistics on pools usage. */
|
|
|
|
|
void dump_pools(void)
|
|
|
|
|
{
|
|
|
|
|
dump_pools_to_trash();
|
2018-07-13 04:54:26 -04:00
|
|
|
qfprintf(stderr, "%s", trash.area);
|
2014-01-28 10:49:56 -05:00
|
|
|
}
|
|
|
|
|
|
2015-10-28 11:24:21 -04:00
|
|
|
/* This function returns the total number of failed pool allocations */
|
|
|
|
|
int pool_total_failures()
|
|
|
|
|
{
|
|
|
|
|
struct pool_head *entry;
|
|
|
|
|
int failed = 0;
|
|
|
|
|
|
|
|
|
|
list_for_each_entry(entry, &pools, list)
|
|
|
|
|
failed += entry->failed;
|
|
|
|
|
return failed;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* This function returns the total amount of memory allocated in pools (in bytes) */
|
|
|
|
|
unsigned long pool_total_allocated()
|
|
|
|
|
{
|
|
|
|
|
struct pool_head *entry;
|
|
|
|
|
unsigned long allocated = 0;
|
|
|
|
|
|
|
|
|
|
list_for_each_entry(entry, &pools, list)
|
|
|
|
|
allocated += entry->allocated * entry->size;
|
|
|
|
|
return allocated;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* This function returns the total amount of memory used in pools (in bytes) */
|
|
|
|
|
unsigned long pool_total_used()
|
|
|
|
|
{
|
|
|
|
|
struct pool_head *entry;
|
|
|
|
|
unsigned long used = 0;
|
|
|
|
|
|
|
|
|
|
list_for_each_entry(entry, &pools, list)
|
|
|
|
|
used += entry->used * entry->size;
|
|
|
|
|
return used;
|
|
|
|
|
}
|
|
|
|
|
|
2016-11-18 20:25:36 -05:00
|
|
|
/* This function dumps memory usage information onto the stream interface's
|
|
|
|
|
* read buffer. It returns 0 as long as it does not complete, non-zero upon
|
|
|
|
|
* completion. No state is used.
|
|
|
|
|
*/
|
|
|
|
|
static int cli_io_handler_dump_pools(struct appctx *appctx)
|
|
|
|
|
{
|
|
|
|
|
struct stream_interface *si = appctx->owner;
|
|
|
|
|
|
|
|
|
|
dump_pools_to_trash();
|
REORG: channel: finally rename the last bi_* / bo_* functions
For HTTP/2 we'll need some buffer-only equivalent functions to some of
the ones applying to channels and still squatting the bi_* / bo_*
namespace. Since these names have kept being misleading for quite some
time now and are really getting annoying, it's time to rename them. This
commit will use "ci/co" as the prefix (for "channel in", "channel out")
instead of "bi/bo". The following ones were renamed :
bi_getblk_nc, bi_getline_nc, bi_putblk, bi_putchr,
bo_getblk, bo_getblk_nc, bo_getline, bo_getline_nc, bo_inject,
bi_putchk, bi_putstr, bo_getchr, bo_skip, bi_swpbuf
2017-10-19 08:32:15 -04:00
|
|
|
if (ci_putchk(si_ic(si), &trash) == -1) {
|
2018-11-15 05:08:52 -05:00
|
|
|
si_rx_room_blk(si);
|
2016-11-18 20:25:36 -05:00
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-26 05:44:35 -05:00
|
|
|
/* callback used to create early pool <name> of size <size> and store the
|
|
|
|
|
* resulting pointer into <ptr>. If the allocation fails, it quits with after
|
|
|
|
|
* emitting an error message.
|
|
|
|
|
*/
|
|
|
|
|
void create_pool_callback(struct pool_head **ptr, char *name, unsigned int size)
|
|
|
|
|
{
|
|
|
|
|
*ptr = create_pool(name, size, MEM_F_SHARED);
|
|
|
|
|
if (!*ptr) {
|
|
|
|
|
ha_alert("Failed to allocate pool '%s' of size %u : %s. Aborting.\n",
|
|
|
|
|
name, size, strerror(errno));
|
|
|
|
|
exit(1);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-26 11:09:46 -05:00
|
|
|
/* Initializes all per-thread arrays on startup */
|
|
|
|
|
static void init_pools()
|
|
|
|
|
{
|
2020-06-01 13:00:28 -04:00
|
|
|
#ifdef CONFIG_HAP_LOCAL_POOLS
|
2018-11-26 11:09:46 -05:00
|
|
|
int thr, idx;
|
|
|
|
|
|
|
|
|
|
for (thr = 0; thr < MAX_THREADS; thr++) {
|
|
|
|
|
for (idx = 0; idx < MAX_BASE_POOLS; idx++) {
|
|
|
|
|
LIST_INIT(&pool_cache[thr][idx].list);
|
|
|
|
|
pool_cache[thr][idx].size = 0;
|
|
|
|
|
}
|
2020-06-27 18:54:27 -04:00
|
|
|
LIST_INIT(&ha_thread_info[thr].pool_lru_head);
|
2018-11-26 11:09:46 -05:00
|
|
|
}
|
2020-06-01 13:00:28 -04:00
|
|
|
#endif
|
2018-11-26 11:09:46 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
INITCALL0(STG_PREPARE, init_pools);
|
2018-11-26 05:44:35 -05:00
|
|
|
|
2016-11-18 20:25:36 -05:00
|
|
|
/* register cli keywords */
|
|
|
|
|
static struct cli_kw_list cli_kws = {{ },{
|
2016-12-16 12:55:23 -05:00
|
|
|
{ { "show", "pools", NULL }, "show pools : report information about the memory pools usage", NULL, cli_io_handler_dump_pools },
|
2016-11-18 20:25:36 -05:00
|
|
|
{{},}
|
|
|
|
|
}};
|
|
|
|
|
|
2018-11-25 13:14:37 -05:00
|
|
|
INITCALL1(STG_REGISTER, cli_register_kw, &cli_kws);
|
2016-11-18 20:25:36 -05:00
|
|
|
|
2019-01-29 09:20:16 -05:00
|
|
|
#ifdef DEBUG_FAIL_ALLOC
|
|
|
|
|
#define MEM_FAIL_MAX_CHAR 32
|
|
|
|
|
#define MEM_FAIL_MAX_STR 128
|
|
|
|
|
static int mem_fail_cur_idx;
|
|
|
|
|
static char mem_fail_str[MEM_FAIL_MAX_CHAR * MEM_FAIL_MAX_STR];
|
2020-06-05 02:40:51 -04:00
|
|
|
__decl_thread(static HA_SPINLOCK_T mem_fail_lock);
|
2019-01-29 09:20:16 -05:00
|
|
|
|
|
|
|
|
int mem_should_fail(const struct pool_head *pool)
|
|
|
|
|
{
|
2019-02-01 10:28:04 -05:00
|
|
|
int ret = 0;
|
2019-01-29 09:20:16 -05:00
|
|
|
int n;
|
|
|
|
|
|
|
|
|
|
if (mem_fail_rate > 0 && !(global.mode & MODE_STARTING)) {
|
BUG/MEDIUM: random: implement a thread-safe and process-safe PRNG
This is the replacement of failed attempt to add thread safety and
per-process sequences of random numbers initally tried with commit
1c306aa84d ("BUG/MEDIUM: random: implement per-thread and per-process
random sequences").
This new version takes a completely different approach and doesn't try
to work around the horrible OS-specific and non-portable random API
anymore. Instead it implements "xoroshiro128**", a reputedly high
quality random number generator, which is one of the many variants of
xorshift, which passes all quality tests and which is described here:
http://prng.di.unimi.it/
While not cryptographically secure, it is fast and features a 2^128-1
period. It supports fast jumps allowing to cut the period into smaller
non-overlapping sequences, which we use here to support up to 2^32
processes each having their own, non-overlapping sequence of 2^96
numbers (~7*10^28). This is enough to provide 1 billion randoms per
second and per process for 2200 billion years.
The implementation was made thread-safe either by using a double 64-bit
CAS on platforms supporting it (x86_64, aarch64) or by using a local
lock for the time needed to perform the shift operations. This ensures
that all threads pick numbers from the same pool so that it is not
needed to assign per-thread ranges. For processes we use the fast jump
method to advance the sequence by 2^96 for each process.
Before this patch, the following config:
global
nbproc 8
frontend f
bind :4445
mode http
log stdout format raw daemon
log-format "%[uuid] %pid"
redirect location /
Would produce this output:
a4d0ad64-2645-4b74-b894-48acce0669af 12987
a4d0ad64-2645-4b74-b894-48acce0669af 12992
a4d0ad64-2645-4b74-b894-48acce0669af 12986
a4d0ad64-2645-4b74-b894-48acce0669af 12988
a4d0ad64-2645-4b74-b894-48acce0669af 12991
a4d0ad64-2645-4b74-b894-48acce0669af 12989
a4d0ad64-2645-4b74-b894-48acce0669af 12990
82d5f6cd-f6c1-4f85-a89c-36ae85d26fb9 12987
82d5f6cd-f6c1-4f85-a89c-36ae85d26fb9 12992
82d5f6cd-f6c1-4f85-a89c-36ae85d26fb9 12986
(...)
And now produces:
f94b29b3-da74-4e03-a0c5-a532c635bad9 13011
47470c02-4862-4c33-80e7-a952899570e5 13014
86332123-539a-47bf-853f-8c8ea8b2a2b5 13013
8f9efa99-3143-47b2-83cf-d618c8dea711 13012
3cc0f5c7-d790-496b-8d39-bec77647af5b 13015
3ec64915-8f95-4374-9e66-e777dc8791e0 13009
0f9bf894-dcde-408c-b094-6e0bb3255452 13011
49c7bfde-3ffb-40e9-9a8d-8084d650ed8f 13014
e23f6f2e-35c5-4433-a294-b790ab902653 13012
There are multiple benefits to using this method. First, it doesn't
depend anymore on a non-portable API. Second it's thread safe. Third it
is fast and more proven than any hack we could attempt to try to work
around the deficiencies of the various implementations around.
This commit depends on previous patches "MINOR: tools: add 64-bit rotate
operators" and "BUG/MEDIUM: random: initialize the random pool a bit
better", all of which will need to be backported at least as far as
version 2.0. It doesn't require to backport the build fixes for circular
include files dependecy anymore.
2020-03-07 18:42:37 -05:00
|
|
|
int randnb = ha_random() % 100;
|
2019-01-29 09:20:16 -05:00
|
|
|
|
|
|
|
|
if (mem_fail_rate > randnb)
|
|
|
|
|
ret = 1;
|
|
|
|
|
else
|
|
|
|
|
ret = 0;
|
|
|
|
|
}
|
BUG/MEDIUM: memory: Add a rwlock before freeing memory.
When using lockless pools, add a new rwlock, flush_pool. read-lock it when
getting memory from the pool, so that concurrenct access are still
authorized, but write-lock it when we're about to free memory, in
pool_flush() and pool_gc().
The problem is, when removing an item from the pool, we unreference it
to get the next one, however, that pointer may have been free'd in the
meanwhile, and that could provoke a crash if the pointer has been unmapped.
It should be OK to use a rwlock, as normal operations will still be able
to access the pool concurrently, and calls to pool_flush() and pool_gc()
should be pretty rare.
This should be backported to 2.1, 2.0 and 1.9.
2020-02-01 11:49:31 -05:00
|
|
|
HA_SPIN_LOCK(POOL_LOCK, &mem_fail_lock);
|
2019-01-29 09:20:16 -05:00
|
|
|
n = snprintf(&mem_fail_str[mem_fail_cur_idx * MEM_FAIL_MAX_CHAR],
|
|
|
|
|
MEM_FAIL_MAX_CHAR - 2,
|
|
|
|
|
"%d %.18s %d %d", mem_fail_cur_idx, pool->name, ret, tid);
|
|
|
|
|
while (n < MEM_FAIL_MAX_CHAR - 1)
|
|
|
|
|
mem_fail_str[mem_fail_cur_idx * MEM_FAIL_MAX_CHAR + n++] = ' ';
|
|
|
|
|
if (mem_fail_cur_idx < MEM_FAIL_MAX_STR - 1)
|
|
|
|
|
mem_fail_str[mem_fail_cur_idx * MEM_FAIL_MAX_CHAR + n] = '\n';
|
|
|
|
|
else
|
|
|
|
|
mem_fail_str[mem_fail_cur_idx * MEM_FAIL_MAX_CHAR + n] = 0;
|
|
|
|
|
mem_fail_cur_idx++;
|
|
|
|
|
if (mem_fail_cur_idx == MEM_FAIL_MAX_STR)
|
|
|
|
|
mem_fail_cur_idx = 0;
|
BUG/MEDIUM: memory: Add a rwlock before freeing memory.
When using lockless pools, add a new rwlock, flush_pool. read-lock it when
getting memory from the pool, so that concurrenct access are still
authorized, but write-lock it when we're about to free memory, in
pool_flush() and pool_gc().
The problem is, when removing an item from the pool, we unreference it
to get the next one, however, that pointer may have been free'd in the
meanwhile, and that could provoke a crash if the pointer has been unmapped.
It should be OK to use a rwlock, as normal operations will still be able
to access the pool concurrently, and calls to pool_flush() and pool_gc()
should be pretty rare.
This should be backported to 2.1, 2.0 and 1.9.
2020-02-01 11:49:31 -05:00
|
|
|
HA_SPIN_UNLOCK(POOL_LOCK, &mem_fail_lock);
|
2019-01-29 09:20:16 -05:00
|
|
|
return ret;
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* config parser for global "tune.fail-alloc" */
|
|
|
|
|
static int mem_parse_global_fail_alloc(char **args, int section_type, struct proxy *curpx,
|
|
|
|
|
struct proxy *defpx, const char *file, int line,
|
|
|
|
|
char **err)
|
|
|
|
|
{
|
|
|
|
|
if (too_many_args(1, args, err, NULL))
|
|
|
|
|
return -1;
|
|
|
|
|
mem_fail_rate = atoi(args[1]);
|
|
|
|
|
if (mem_fail_rate < 0 || mem_fail_rate > 100) {
|
|
|
|
|
memprintf(err, "'%s' expects a numeric value between 0 and 100.", args[0]);
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
/* register global config keywords */
|
|
|
|
|
static struct cfg_kw_list mem_cfg_kws = {ILH, {
|
|
|
|
|
#ifdef DEBUG_FAIL_ALLOC
|
|
|
|
|
{ CFG_GLOBAL, "tune.fail-alloc", mem_parse_global_fail_alloc },
|
|
|
|
|
#endif
|
|
|
|
|
{ 0, NULL, NULL }
|
|
|
|
|
}};
|
|
|
|
|
|
|
|
|
|
INITCALL1(STG_REGISTER, cfg_register_keywords, &mem_cfg_kws);
|
|
|
|
|
|
2007-05-13 12:26:08 -04:00
|
|
|
/*
|
|
|
|
|
* Local variables:
|
|
|
|
|
* c-indent-level: 8
|
|
|
|
|
* c-basic-offset: 8
|
|
|
|
|
* End:
|
|
|
|
|
*/
|