CLEANUP: more conversions and cleanups for alignment
Some checks are pending
Contrib / build (push) Waiting to run
alpine/musl / gcc (push) Waiting to run
VTest / Generate Build Matrix (push) Waiting to run
VTest / (push) Blocked by required conditions
Windows / Windows, gcc, all features (push) Waiting to run

- Convert additional cases to use the automatic alignment feature for
  the THREAD_ALIGN(ED) macros. This includes some cases that are less
  obviously correct where it seems we wanted to align only in the
  USE_THREAD case but were not using the thread specific macros.
- Also move some alignment requirements to the structure definition
  instead of having it on variable declaration.
This commit is contained in:
Maxime Henrion 2025-12-09 11:26:02 -05:00 committed by Olivier Houchard
parent bc8e14ec23
commit 6eedd0d485
8 changed files with 17 additions and 17 deletions

View file

@ -125,8 +125,8 @@ struct activity {
unsigned int ctr2; // general purposee debug counter
#endif
char __pad[0]; // unused except to check remaining room
char __end[0] __attribute__((aligned(64))); // align size to 64.
};
char __end[0] THREAD_ALIGNED();
} THREAD_ALIGNED();
/* 256 entries for callers * callees should be highly sufficient (~45 seen usually) */
#define SCHED_ACT_HASH_BITS 8
@ -146,7 +146,7 @@ struct sched_activity {
uint64_t lkw_time; /* lock waiting time */
uint64_t lkd_time; /* locked time */
uint64_t mem_time; /* memory ops wait time */
};
} THREAD_ALIGNED();
#endif /* _HAPROXY_ACTIVITY_T_H */

View file

@ -231,7 +231,7 @@ const char *listener_state_str(const struct listener *l);
struct task *accept_queue_process(struct task *t, void *context, unsigned int state);
struct task *manage_global_listener_queue(struct task *t, void *context, unsigned int state);
extern struct accept_queue_ring accept_queue_rings[MAX_THREADS] __attribute__((aligned(64)));
extern struct accept_queue_ring accept_queue_rings[MAX_THREADS] THREAD_ALIGNED();
extern const char* li_status_st[LI_STATE_COUNT];
enum li_status get_li_status(struct listener *l);

View file

@ -156,8 +156,8 @@ struct pool_head {
unsigned int failed; /* failed allocations (indexed by hash of TID) */
} buckets[CONFIG_HAP_POOL_BUCKETS];
struct pool_cache_head cache[MAX_THREADS] THREAD_ALIGNED(64); /* pool caches */
} __attribute__((aligned(64)));
struct pool_cache_head cache[MAX_THREADS] THREAD_ALIGNED(); /* pool caches */
} THREAD_ALIGNED();
#endif /* _HAPROXY_POOL_T_H */

View file

@ -72,7 +72,7 @@
/* declare a self-initializing spinlock, aligned on a cache line */
#define __decl_aligned_spinlock(lock) \
HA_SPINLOCK_T (lock) __attribute__((aligned(64))); \
HA_SPINLOCK_T (lock) THREAD_ALIGNED(); \
INITCALL1(STG_LOCK, ha_spin_init, &(lock))
/* declare a self-initializing rwlock */
@ -82,7 +82,7 @@
/* declare a self-initializing rwlock, aligned on a cache line */
#define __decl_aligned_rwlock(lock) \
HA_RWLOCK_T (lock) __attribute__((aligned(64))); \
HA_RWLOCK_T (lock) THREAD_ALIGNED(); \
INITCALL1(STG_LOCK, ha_rwlock_init, &(lock))
#endif /* USE_THREAD */

View file

@ -86,7 +86,7 @@ struct tgroup_info {
/* pad to cache line (64B) */
char __pad[0]; /* unused except to check remaining room */
char __end[0] __attribute__((aligned(64)));
char __end[0] THREAD_ALIGNED();
};
/* This structure describes the group-specific context (e.g. active threads
@ -103,7 +103,7 @@ struct tgroup_ctx {
/* pad to cache line (64B) */
char __pad[0]; /* unused except to check remaining room */
char __end[0] __attribute__((aligned(64)));
char __end[0] THREAD_ALIGNED();
};
/* This structure describes all the per-thread info we need. When threads are
@ -124,7 +124,7 @@ struct thread_info {
/* pad to cache line (64B) */
char __pad[0]; /* unused except to check remaining room */
char __end[0] __attribute__((aligned(64)));
char __end[0] THREAD_ALIGNED();
};
/* This structure describes all the per-thread context we need. This is

View file

@ -60,10 +60,10 @@ uint64_t prof_mem_start_ns = 0;
uint64_t prof_mem_stop_ns = 0;
/* One struct per thread containing all collected measurements */
struct activity activity[MAX_THREADS] __attribute__((aligned(64))) = { };
struct activity activity[MAX_THREADS] = { };
/* One struct per function pointer hash entry (SCHED_ACT_HASH_BUCKETS values, 0=collision) */
struct sched_activity sched_activity[SCHED_ACT_HASH_BUCKETS] __attribute__((aligned(64))) = { };
struct sched_activity sched_activity[SCHED_ACT_HASH_BUCKETS] = { };
#ifdef USE_MEMORY_PROFILING
@ -949,7 +949,7 @@ struct sched_activity *sched_activity_entry(struct sched_activity *array, const
static int cli_io_handler_show_profiling(struct appctx *appctx)
{
struct show_prof_ctx *ctx = appctx->svcctx;
struct sched_activity tmp_activity[SCHED_ACT_HASH_BUCKETS] __attribute__((aligned(64)));
struct sched_activity tmp_activity[SCHED_ACT_HASH_BUCKETS];
#ifdef USE_MEMORY_PROFILING
struct memprof_stats tmp_memstats[MEMPROF_HASH_BUCKETS + 1];
unsigned long long tot_alloc_calls, tot_free_calls;
@ -1329,7 +1329,7 @@ static int cli_parse_show_profiling(char **args, char *payload, struct appctx *a
*/
static int cli_io_handler_show_tasks(struct appctx *appctx)
{
struct sched_activity tmp_activity[SCHED_ACT_HASH_BUCKETS] __attribute__((aligned(64)));
struct sched_activity tmp_activity[SCHED_ACT_HASH_BUCKETS];
struct buffer *name_buffer = get_trash_chunk();
struct sched_activity *entry;
const struct tasklet *tl;

View file

@ -66,7 +66,7 @@ const char* li_status_st[LI_STATE_COUNT] = {
#if defined(USE_THREAD)
struct accept_queue_ring accept_queue_rings[MAX_THREADS] __attribute__((aligned(64))) = { };
struct accept_queue_ring accept_queue_rings[MAX_THREADS] THREAD_ALIGNED();
/* dequeue and process a pending connection from the local accept queue (single
* consumer). Returns the accepted connection or NULL if none was found.

View file

@ -44,7 +44,7 @@ __decl_aligned_rwlock(wq_lock);
/* used to detect if the scheduler looks stuck (for warnings) */
static struct {
int sched_stuck ALIGNED(64);
int sched_stuck THREAD_ALIGNED();
} sched_ctx[MAX_THREADS];
/* Flags the task <t> for immediate destruction and puts it into its first