mirror of
https://github.com/haproxy/haproxy.git
synced 2026-03-23 10:53:04 -04:00
MEDIUM: pools: respect pool alignment in allocations
Now pool_alloc_area() takes the alignment in argument and makes use of ha_aligned_malloc() instead of malloc(). pool_alloc_area_uaf() simply applies the alignment before returning the mapped area. The pool_free() functionn calls ha_aligned_free() so as to permit to use a specific API for aligned alloc/free like mingw requires. Note that it's possible to see warnings about mismatching sized during pool_free() since we know both the pool and the type. In pool_free, adding just this is sufficient to detect potential offenders: WARN_ON(__alignof__(*__ptr) > pool->align);
This commit is contained in:
parent
f0d0922aa1
commit
ef915e672a
2 changed files with 13 additions and 12 deletions
|
|
@ -25,6 +25,7 @@
|
||||||
#include <sys/mman.h>
|
#include <sys/mman.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <haproxy/api.h>
|
#include <haproxy/api.h>
|
||||||
|
#include <haproxy/tools.h>
|
||||||
|
|
||||||
|
|
||||||
/************* normal allocator *************/
|
/************* normal allocator *************/
|
||||||
|
|
@ -32,9 +33,9 @@
|
||||||
/* allocates an area of size <size> and returns it. The semantics are similar
|
/* allocates an area of size <size> and returns it. The semantics are similar
|
||||||
* to those of malloc().
|
* to those of malloc().
|
||||||
*/
|
*/
|
||||||
static forceinline void *pool_alloc_area(size_t size)
|
static forceinline void *pool_alloc_area(size_t size, size_t align)
|
||||||
{
|
{
|
||||||
return malloc(size);
|
return ha_aligned_alloc(align, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* frees an area <area> of size <size> allocated by pool_alloc_area(). The
|
/* frees an area <area> of size <size> allocated by pool_alloc_area(). The
|
||||||
|
|
@ -43,8 +44,7 @@ static forceinline void *pool_alloc_area(size_t size)
|
||||||
*/
|
*/
|
||||||
static forceinline void pool_free_area(void *area, size_t __maybe_unused size)
|
static forceinline void pool_free_area(void *area, size_t __maybe_unused size)
|
||||||
{
|
{
|
||||||
will_free(area, size);
|
ha_aligned_free_size(area, size);
|
||||||
free(area);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/************* use-after-free allocator *************/
|
/************* use-after-free allocator *************/
|
||||||
|
|
@ -52,14 +52,15 @@ static forceinline void pool_free_area(void *area, size_t __maybe_unused size)
|
||||||
/* allocates an area of size <size> and returns it. The semantics are similar
|
/* allocates an area of size <size> and returns it. The semantics are similar
|
||||||
* to those of malloc(). However the allocation is rounded up to 4kB so that a
|
* to those of malloc(). However the allocation is rounded up to 4kB so that a
|
||||||
* full page is allocated. This ensures the object can be freed alone so that
|
* full page is allocated. This ensures the object can be freed alone so that
|
||||||
* future dereferences are easily detected. The returned object is always
|
* future dereferences are easily detected. The returned object is always at
|
||||||
* 16-bytes aligned to avoid issues with unaligned structure objects. In case
|
* least 16-bytes aligned to avoid issues with unaligned structure objects, and
|
||||||
* some padding is added, the area's start address is copied at the end of the
|
* in any case, is always at least aligned as required by the pool, though no
|
||||||
* padding to help detect underflows.
|
* more than 4096. In case some padding is added, the area's start address is
|
||||||
|
* copied at the end of the padding to help detect underflows.
|
||||||
*/
|
*/
|
||||||
static inline void *pool_alloc_area_uaf(size_t size)
|
static inline void *pool_alloc_area_uaf(size_t size, size_t align)
|
||||||
{
|
{
|
||||||
size_t pad = (4096 - size) & 0xFF0;
|
size_t pad = (4096 - size) & 0xFF0 & -align;
|
||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
ret = mmap(NULL, (size + 4095) & -4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
|
ret = mmap(NULL, (size + 4095) & -4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
|
||||||
|
|
|
||||||
|
|
@ -490,9 +490,9 @@ void *pool_get_from_os_noinc(struct pool_head *pool)
|
||||||
void *ptr;
|
void *ptr;
|
||||||
|
|
||||||
if ((pool_debugging & POOL_DBG_UAF) || (pool->flags & MEM_F_UAF))
|
if ((pool_debugging & POOL_DBG_UAF) || (pool->flags & MEM_F_UAF))
|
||||||
ptr = pool_alloc_area_uaf(pool->alloc_sz);
|
ptr = pool_alloc_area_uaf(pool->alloc_sz, pool->align);
|
||||||
else
|
else
|
||||||
ptr = pool_alloc_area(pool->alloc_sz);
|
ptr = pool_alloc_area(pool->alloc_sz, pool->align);
|
||||||
if (ptr)
|
if (ptr)
|
||||||
return ptr;
|
return ptr;
|
||||||
_HA_ATOMIC_INC(&pool->buckets[pool_tbucket()].failed);
|
_HA_ATOMIC_INC(&pool->buckets[pool_tbucket()].failed);
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue