MEDIUM: sched: do not run a same task multiple times in series

There's always a risk that some tasks run multiple times if they wake
each other up. Now we include the loop counter in the task struct and
stop processing the queue it's in when meeting a task that has already
run. We only pick 16 bits since that's only what remains free in the
task common part, so from time to time (once every 65536) it will be
possible to wrongly match a task as having already run and stop evaluating
its queue, but it's rare enough that we don't care, because this will
be OK on the next iteration.
This commit is contained in:
Willy Tarreau 2026-03-21 14:47:09 +00:00 committed by Willy Tarreau
parent 8f6cb8f452
commit 7d40b3134a
2 changed files with 29 additions and 19 deletions

View file

@ -130,20 +130,22 @@ struct notification {
* on return.
*/
#define TASK_COMMON \
struct { \
unsigned int state; /* task state : bitfield of TASK_ */ \
int tid; /* tid of task/tasklet. <0 = local for tasklet, unbound for task */ \
struct task *(*process)(struct task *t, void *ctx, unsigned int state); /* the function which processes the task */ \
void *context; /* the task's context */ \
const struct ha_caller *caller; /* call place of last wakeup(); 0 on init, -1 on free */ \
uint32_t wake_date; /* date of the last task wakeup */ \
unsigned int calls; /* number of times process was called */ \
TASK_DEBUG_STORAGE; \
}
unsigned int state; /* task state : bitfield of TASK_ */ \
int tid; /* tid of task/tasklet. <0 = local for tasklet, unbound for task */ \
struct task *(*process)(struct task *t, void *ctx, unsigned int state); /* the function which processes the task */ \
void *context; /* the task's context */ \
const struct ha_caller *caller; /* call place of last wakeup(); 0 on init, -1 on free */ \
uint32_t wake_date; /* date of the last task wakeup */ \
unsigned int calls; /* number of times process was called */ \
TASK_DEBUG_STORAGE; \
short last_run; /* 16-bit now_ms of last run */
/* a 16- or 48-bit hole remains here and is used by task */
/* The base for all tasks */
struct task {
TASK_COMMON; /* must be at the beginning! */
short nice; /* task prio from -1024 to +1024 */
int expire; /* next expiration date for this task, in ticks */
struct eb32_node rq; /* ebtree node used to hold the task in the run queue */
/* WARNING: the struct task is often aliased as a struct tasklet when
* it is NOT in the run queue. The tasklet has its struct list here
@ -151,14 +153,12 @@ struct task {
* ever reorder these fields without taking this into account!
*/
struct eb32_node wq; /* ebtree node used to hold the task in the wait queue */
int expire; /* next expiration date for this task, in ticks */
short nice; /* task prio from -1024 to +1024 */
/* 16-bit hole here */
};
/* lightweight tasks, without priority, mainly used for I/Os */
struct tasklet {
TASK_COMMON; /* must be at the beginning! */
/* 48-bit hole here */
struct list list;
/* WARNING: the struct task is often aliased as a struct tasklet when
* it is not in the run queue. The task has its struct rq here where

View file

@ -563,14 +563,23 @@ unsigned int run_tasks_from_lists(unsigned int budgets[])
continue;
}
budgets[queue]--;
activity[tid].ctxsw++;
t = (struct task *)LIST_ELEM(tl_queues[queue].n, struct tasklet *, list);
/* check if this task has already run during this loop */
if ((uint16_t)t->last_run == (uint16_t)activity[tid].loops) {
activity[tid].ctr1++;
budget_mask &= ~(1 << queue);
queue++;
continue;
}
t->last_run = activity[tid].loops;
ctx = t->context;
process = t->process;
t->calls++;
budgets[queue]--;
activity[tid].ctxsw++;
th_ctx->lock_wait_total = 0;
th_ctx->mem_wait_total = 0;
th_ctx->locked_total = 0;
@ -734,7 +743,7 @@ void process_runnable_tasks()
int max_processed;
int lpicked, gpicked;
int heavy_queued = 0;
int budget;
int budget, done;
_HA_ATOMIC_AND(&th_ctx->flags, ~TH_FL_STUCK); // this thread is still running
@ -904,10 +913,11 @@ void process_runnable_tasks()
}
/* execute tasklets in each queue */
max_processed -= run_tasks_from_lists(max);
done = run_tasks_from_lists(max);
max_processed -= done;
/* some tasks may have woken other ones up */
if (max_processed > 0 && thread_has_tasks())
if (done && max_processed > 0 && thread_has_tasks())
goto not_done_yet;
leave: