haproxy/src/task.c

371 lines
11 KiB
C
Raw Normal View History

/*
* Task management functions.
*
* Copyright 2000-2009 Willy Tarreau <w@1wt.eu>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <string.h>
#include <common/config.h>
#include <common/memory.h>
#include <common/mini-clist.h>
#include <common/standard.h>
#include <common/time.h>
MAJOR: task: make use of the scope-aware ebtree functions Currently the task scheduler suffers from an O(n) lookup when skipping tasks that are not for the current thread. The reason is that eb32_lookup_ge() has no information about the current thread so it always revisits many tasks for other threads before finding its own tasks. This is particularly visible with HTTP/2 since the number of concurrent streams created at once causes long series of tasks for the same stream in the scheduler. With only 10 connections and 100 streams each, by running on two threads, the performance drops from 640kreq/s to 11.2kreq/s! Lookup metrics show that for only 200000 task lookups, 430 million skips had to be performed, which means that on average, each lookup leads to 2150 nodes to be visited. This commit backports the principle of scope lookups for ebtrees from the ebtree_v7 development tree. The idea is that each node contains a mask indicating the union of the scopes for the nodes below it, which is fed during insertion, and used during lookups. Then during lookups, branches that do not contain any leaf matching the requested scope are simply ignored. This perfectly matches a thread mask, allowing a thread to only extract the tasks it cares about from the run queue, and to always find them in O(log(n)) instead of O(n). Thus the scheduler uses tid_bit and task->thread_mask as the ebtree scope here. Doing this has recovered most of the performance, as can be seen on the test below with two threads, 10 connections, 100 streams each, and 1 million requests total : Before After Gain test duration : 89.6s 4.73s x19 HTTP requests/s (DEBUG) : 11200 211300 x19 HTTP requests/s (PROD) : 15900 447000 x28 spin_lock time : 85.2s 0.46s /185 time per lookup : 13us 40ns /325 Even when going to 6 threads (on 3 hyperthreaded CPU cores), the performance stays around 284000 req/s, showing that the contention is much lower. A test showed that there's no benefit in using this for the wait queue though.
2017-11-05 07:34:20 -05:00
#include <eb32sctree.h>
#include <eb32tree.h>
#include <proto/proxy.h>
REORG/MAJOR: session: rename the "session" entity to "stream" With HTTP/2, we'll have to support multiplexed streams. A stream is in fact the largest part of what we currently call a session, it has buffers, logs, etc. In order to catch any error, this commit removes any reference to the struct session and tries to rename most "session" occurrences in function names to "stream" and "sess" to "strm" when that's related to a session. The files stream.{c,h} were added and session.{c,h} removed. The session will be reintroduced later and a few parts of the stream will progressively be moved overthere. It will more or less contain only what we need in an embryonic session. Sample fetch functions and converters will have to change a bit so that they'll use an L5 (session) instead of what's currently called "L4" which is in fact L6 for now. Once all changes are completed, we should see approximately this : L7 - http_txn L6 - stream L5 - session L4 - connection | applet There will be at most one http_txn per stream, and a same session will possibly be referenced by multiple streams. A connection will point to a session and to a stream. The session will hold all the information we need to keep even when we don't yet have a stream. Some more cleanup is needed because some code was already far from being clean. The server queue management still refers to sessions at many places while comments talk about connections. This will have to be cleaned up once we have a server-side connection pool manager. Stream flags "SN_*" still need to be renamed, it doesn't seem like any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
#include <proto/stream.h>
#include <proto/task.h>
struct pool_head *pool_head_task;
/* This is the memory pool containing all the signal structs. These
* struct are used to store each requiered signal between two tasks.
*/
struct pool_head *pool_head_notification;
unsigned int nb_tasks = 0;
unsigned long active_tasks_mask = 0; /* Mask of threads with active tasks */
unsigned int tasks_run_queue = 0;
unsigned int tasks_run_queue_cur = 0; /* copy of the run queue size */
unsigned int nb_tasks_cur = 0; /* copy of the tasks count */
unsigned int niced_tasks = 0; /* number of niced tasks in the run queue */
THREAD_LOCAL struct task *curr_task = NULL; /* task currently running or NULL */
__decl_hathreads(HA_SPINLOCK_T __attribute__((aligned(64))) rq_lock); /* spin lock related to run queue */
__decl_hathreads(HA_SPINLOCK_T __attribute__((aligned(64))) wq_lock); /* spin lock related to wait queue */
static struct eb_root timers; /* sorted timers tree */
static struct eb_root rqueue; /* tree constituting the run queue */
static unsigned int rqueue_ticks; /* insertion count */
/* Puts the task <t> in run queue at a position depending on t->nice. <t> is
* returned. The nice value assigns boosts in 32th of the run queue size. A
* nice value of -1024 sets the task to -tasks_run_queue*32, while a nice value
* of 1024 sets the task to tasks_run_queue*32. The state flags are cleared, so
* the caller will have to set its flags after this call.
* The task must not already be in the run queue. If unsure, use the safer
* task_wakeup() function.
*/
struct task *__task_wakeup(struct task *t)
{
tasks_run_queue++;
active_tasks_mask |= t->thread_mask;
t->rq.key = ++rqueue_ticks;
if (likely(t->nice)) {
int offset;
niced_tasks++;
if (likely(t->nice > 0))
offset = (unsigned)((tasks_run_queue * (unsigned int)t->nice) / 32U);
else
offset = -(unsigned)((tasks_run_queue * (unsigned int)-t->nice) / 32U);
t->rq.key += offset;
}
/* reset flag to pending ones
* Note: __task_wakeup must not be called
* if task is running
*/
t->state = t->pending_state;
MAJOR: task: make use of the scope-aware ebtree functions Currently the task scheduler suffers from an O(n) lookup when skipping tasks that are not for the current thread. The reason is that eb32_lookup_ge() has no information about the current thread so it always revisits many tasks for other threads before finding its own tasks. This is particularly visible with HTTP/2 since the number of concurrent streams created at once causes long series of tasks for the same stream in the scheduler. With only 10 connections and 100 streams each, by running on two threads, the performance drops from 640kreq/s to 11.2kreq/s! Lookup metrics show that for only 200000 task lookups, 430 million skips had to be performed, which means that on average, each lookup leads to 2150 nodes to be visited. This commit backports the principle of scope lookups for ebtrees from the ebtree_v7 development tree. The idea is that each node contains a mask indicating the union of the scopes for the nodes below it, which is fed during insertion, and used during lookups. Then during lookups, branches that do not contain any leaf matching the requested scope are simply ignored. This perfectly matches a thread mask, allowing a thread to only extract the tasks it cares about from the run queue, and to always find them in O(log(n)) instead of O(n). Thus the scheduler uses tid_bit and task->thread_mask as the ebtree scope here. Doing this has recovered most of the performance, as can be seen on the test below with two threads, 10 connections, 100 streams each, and 1 million requests total : Before After Gain test duration : 89.6s 4.73s x19 HTTP requests/s (DEBUG) : 11200 211300 x19 HTTP requests/s (PROD) : 15900 447000 x28 spin_lock time : 85.2s 0.46s /185 time per lookup : 13us 40ns /325 Even when going to 6 threads (on 3 hyperthreaded CPU cores), the performance stays around 284000 req/s, showing that the contention is much lower. A test showed that there's no benefit in using this for the wait queue though.
2017-11-05 07:34:20 -05:00
eb32sc_insert(&rqueue, &t->rq, t->thread_mask);
return t;
}
/*
* __task_queue()
*
* Inserts a task into the wait queue at the position given by its expiration
* date. It does not matter if the task was already in the wait queue or not,
* as it will be unlinked. The task must not have an infinite expiration timer.
* Last, tasks must not be queued further than the end of the tree, which is
* between <now_ms> and <now_ms> + 2^31 ms (now+24days in 32bit).
*
* This function should not be used directly, it is meant to be called by the
* inline version of task_queue() which performs a few cheap preliminary tests
* before deciding to call __task_queue().
*/
void __task_queue(struct task *task)
{
if (likely(task_in_wq(task)))
__task_unlink_wq(task);
/* the task is not in the queue now */
task->wq.key = task->expire;
#ifdef DEBUG_CHECK_INVALID_EXPIRATION_DATES
if (tick_is_lt(task->wq.key, now_ms))
/* we're queuing too far away or in the past (most likely) */
return;
#endif
eb32_insert(&timers, &task->wq);
return;
}
/*
* Extract all expired timers from the timer queue, and wakes up all
* associated tasks. Returns the date of next event (or eternity).
*/
int wake_expired_tasks()
{
struct task *task;
struct eb32_node *eb;
int ret = TICK_ETERNITY;
while (1) {
HA_SPIN_LOCK(TASK_WQ_LOCK, &wq_lock);
lookup_next:
eb = eb32_lookup_ge(&timers, now_ms - TIMER_LOOK_BACK);
if (!eb) {
/* we might have reached the end of the tree, typically because
* <now_ms> is in the first half and we're first scanning the last
* half. Let's loop back to the beginning of the tree now.
*/
eb = eb32_first(&timers);
if (likely(!eb))
break;
}
if (tick_is_lt(now_ms, eb->key)) {
/* timer not expired yet, revisit it later */
ret = eb->key;
break;
}
/* timer looks expired, detach it from the queue */
task = eb32_entry(eb, struct task, wq);
__task_unlink_wq(task);
/* It is possible that this task was left at an earlier place in the
* tree because a recent call to task_queue() has not moved it. This
* happens when the new expiration date is later than the old one.
* Since it is very unlikely that we reach a timeout anyway, it's a
* lot cheaper to proceed like this because we almost never update
* the tree. We may also find disabled expiration dates there. Since
* we have detached the task from the tree, we simply call task_queue
* to take care of this. Note that we might occasionally requeue it at
* the same place, before <eb>, so we have to check if this happens,
* and adjust <eb>, otherwise we may skip it which is not what we want.
* We may also not requeue the task (and not point eb at it) if its
* expiration time is not set.
*/
if (!tick_is_expired(task->expire, now_ms)) {
if (tick_isset(task->expire))
__task_queue(task);
goto lookup_next;
}
task_wakeup(task, TASK_WOKEN_TIMER);
BUG/MAJOR: threads/task: dequeue expired tasks under the WQ lock There is a small unprotected window for a task between the wait queue and the run queue where a task could be woken up and destroyed at the same time. What typically happens is that a timeout is reached at the same time an I/O completes and wakes it up, and the I/O terminates the task, causing a use after free in wake_expired_tasks() possibly causing a crash and/or memory corruption : thread 1 thread 2 (wake_expired_tasks) (stream_int_notify) HA_SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock); task_wakeup(task, TASK_WOKEN_IO); ... process_stream() stream_free() task_free() pool_free(task) task_wakeup(task, TASK_WOKEN_TIMER); This case is reasonably easy to reproduce with a config using very short server timeouts (100ms) and client timeouts (10ms), while injecting on httpterm requesting medium sized objects (5kB) over SSL. All this is easier done with more threads than allocated CPUs so that pauses can happen anywhere and last long enough for process_stream() to kill the task. This patch inverts the lock and the wakeup(), but requires some changes in process_runnable_tasks() to ensure we never try to grab the WQ lock while having the RQ lock held. This means we have to release the RQ lock before calling task_queue(), so we can't hold the RQ lock during the loop and must take and drop it. It seems that a different approach with the scope-aware trees could be easier, but it would possibly not cover situations where a task is allowed to run on multiple threads. The current solution covers it and doesn't seem to have any measurable performance impact.
2017-11-23 12:36:50 -05:00
HA_SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock);
}
HA_SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock);
return ret;
}
/* The run queue is chronologically sorted in a tree. An insertion counter is
* used to assign a position to each task. This counter may be combined with
* other variables (eg: nice value) to set the final position in the tree. The
* counter may wrap without a problem, of course. We then limit the number of
* tasks processed to 200 in any case, so that general latency remains low and
* so that task positions have a chance to be considered.
*
* The function adjusts <next> if a new event is closer.
*/
void process_runnable_tasks()
{
struct task *t;
int i;
int max_processed;
MAJOR: task: make use of the scope-aware ebtree functions Currently the task scheduler suffers from an O(n) lookup when skipping tasks that are not for the current thread. The reason is that eb32_lookup_ge() has no information about the current thread so it always revisits many tasks for other threads before finding its own tasks. This is particularly visible with HTTP/2 since the number of concurrent streams created at once causes long series of tasks for the same stream in the scheduler. With only 10 connections and 100 streams each, by running on two threads, the performance drops from 640kreq/s to 11.2kreq/s! Lookup metrics show that for only 200000 task lookups, 430 million skips had to be performed, which means that on average, each lookup leads to 2150 nodes to be visited. This commit backports the principle of scope lookups for ebtrees from the ebtree_v7 development tree. The idea is that each node contains a mask indicating the union of the scopes for the nodes below it, which is fed during insertion, and used during lookups. Then during lookups, branches that do not contain any leaf matching the requested scope are simply ignored. This perfectly matches a thread mask, allowing a thread to only extract the tasks it cares about from the run queue, and to always find them in O(log(n)) instead of O(n). Thus the scheduler uses tid_bit and task->thread_mask as the ebtree scope here. Doing this has recovered most of the performance, as can be seen on the test below with two threads, 10 connections, 100 streams each, and 1 million requests total : Before After Gain test duration : 89.6s 4.73s x19 HTTP requests/s (DEBUG) : 11200 211300 x19 HTTP requests/s (PROD) : 15900 447000 x28 spin_lock time : 85.2s 0.46s /185 time per lookup : 13us 40ns /325 Even when going to 6 threads (on 3 hyperthreaded CPU cores), the performance stays around 284000 req/s, showing that the contention is much lower. A test showed that there's no benefit in using this for the wait queue though.
2017-11-05 07:34:20 -05:00
struct eb32sc_node *rq_next;
struct task *local_tasks[16];
int local_tasks_count;
int final_tasks_count;
tasks_run_queue_cur = tasks_run_queue; /* keep a copy for reporting */
nb_tasks_cur = nb_tasks;
max_processed = 200;
if (unlikely(global.nbthread <= 1)) {
/* when no lock is needed, this loop is much faster */
if (!(active_tasks_mask & tid_bit)) {
activity[tid].empty_rq++;
return;
}
active_tasks_mask &= ~tid_bit;
rq_next = eb32sc_lookup_ge(&rqueue, rqueue_ticks - TIMER_LOOK_BACK, tid_bit);
while (1) {
if (!rq_next) {
/* we might have reached the end of the tree, typically because
* <rqueue_ticks> is in the first half and we're first scanning
* the last half. Let's loop back to the beginning of the tree now.
*/
rq_next = eb32sc_first(&rqueue, tid_bit);
if (!rq_next)
break;
}
t = eb32sc_entry(rq_next, struct task, rq);
rq_next = eb32sc_next(rq_next, tid_bit);
__task_unlink_rq(t);
t->state |= TASK_RUNNING;
t->pending_state = 0;
t->calls++;
curr_task = t;
/* This is an optimisation to help the processor's branch
* predictor take this most common call.
*/
if (likely(t->process == process_stream))
t = process_stream(t);
else
t = t->process(t);
curr_task = NULL;
if (likely(t != NULL)) {
t->state &= ~TASK_RUNNING;
/* If there is a pending state
* we have to wake up the task
* immediatly, else we defer
* it into wait queue
*/
if (t->pending_state)
__task_wakeup(t);
else
task_queue(t);
}
max_processed--;
if (max_processed <= 0) {
active_tasks_mask |= tid_bit;
activity[tid].long_rq++;
break;
}
}
return;
}
HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
if (!(active_tasks_mask & tid_bit)) {
HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
activity[tid].empty_rq++;
return;
}
active_tasks_mask &= ~tid_bit;
while (1) {
/* Note: this loop is one of the fastest code path in
* the whole program. It should not be re-arranged
* without a good reason.
*/
/* we have to restart looking up after every batch */
rq_next = eb32sc_lookup_ge(&rqueue, rqueue_ticks - TIMER_LOOK_BACK, tid_bit);
for (local_tasks_count = 0; local_tasks_count < 16; local_tasks_count++) {
if (unlikely(!rq_next)) {
/* either we just started or we reached the end
* of the tree, typically because <rqueue_ticks>
* is in the first half and we're first scanning
* the last half. Let's loop back to the beginning
* of the tree now.
*/
rq_next = eb32sc_first(&rqueue, tid_bit);
if (!rq_next)
break;
}
MAJOR: task: make use of the scope-aware ebtree functions Currently the task scheduler suffers from an O(n) lookup when skipping tasks that are not for the current thread. The reason is that eb32_lookup_ge() has no information about the current thread so it always revisits many tasks for other threads before finding its own tasks. This is particularly visible with HTTP/2 since the number of concurrent streams created at once causes long series of tasks for the same stream in the scheduler. With only 10 connections and 100 streams each, by running on two threads, the performance drops from 640kreq/s to 11.2kreq/s! Lookup metrics show that for only 200000 task lookups, 430 million skips had to be performed, which means that on average, each lookup leads to 2150 nodes to be visited. This commit backports the principle of scope lookups for ebtrees from the ebtree_v7 development tree. The idea is that each node contains a mask indicating the union of the scopes for the nodes below it, which is fed during insertion, and used during lookups. Then during lookups, branches that do not contain any leaf matching the requested scope are simply ignored. This perfectly matches a thread mask, allowing a thread to only extract the tasks it cares about from the run queue, and to always find them in O(log(n)) instead of O(n). Thus the scheduler uses tid_bit and task->thread_mask as the ebtree scope here. Doing this has recovered most of the performance, as can be seen on the test below with two threads, 10 connections, 100 streams each, and 1 million requests total : Before After Gain test duration : 89.6s 4.73s x19 HTTP requests/s (DEBUG) : 11200 211300 x19 HTTP requests/s (PROD) : 15900 447000 x28 spin_lock time : 85.2s 0.46s /185 time per lookup : 13us 40ns /325 Even when going to 6 threads (on 3 hyperthreaded CPU cores), the performance stays around 284000 req/s, showing that the contention is much lower. A test showed that there's no benefit in using this for the wait queue though.
2017-11-05 07:34:20 -05:00
t = eb32sc_entry(rq_next, struct task, rq);
rq_next = eb32sc_next(rq_next, tid_bit);
/* detach the task from the queue */
__task_unlink_rq(t);
local_tasks[local_tasks_count] = t;
t->state |= TASK_RUNNING;
t->pending_state = 0;
t->calls++;
max_processed--;
}
if (!local_tasks_count)
break;
HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
final_tasks_count = 0;
for (i = 0; i < local_tasks_count ; i++) {
t = local_tasks[i];
/* This is an optimisation to help the processor's branch
* predictor take this most common call.
*/
curr_task = t;
if (likely(t->process == process_stream))
t = process_stream(t);
else
t = t->process(t);
curr_task = NULL;
if (t)
local_tasks[final_tasks_count++] = t;
}
for (i = 0; i < final_tasks_count ; i++) {
t = local_tasks[i];
/* If there is a pending state
* we have to wake up the task
* immediatly, else we defer
* it into wait queue
*/
BUG/MAJOR: threads/task: dequeue expired tasks under the WQ lock There is a small unprotected window for a task between the wait queue and the run queue where a task could be woken up and destroyed at the same time. What typically happens is that a timeout is reached at the same time an I/O completes and wakes it up, and the I/O terminates the task, causing a use after free in wake_expired_tasks() possibly causing a crash and/or memory corruption : thread 1 thread 2 (wake_expired_tasks) (stream_int_notify) HA_SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock); task_wakeup(task, TASK_WOKEN_IO); ... process_stream() stream_free() task_free() pool_free(task) task_wakeup(task, TASK_WOKEN_TIMER); This case is reasonably easy to reproduce with a config using very short server timeouts (100ms) and client timeouts (10ms), while injecting on httpterm requesting medium sized objects (5kB) over SSL. All this is easier done with more threads than allocated CPUs so that pauses can happen anywhere and last long enough for process_stream() to kill the task. This patch inverts the lock and the wakeup(), but requires some changes in process_runnable_tasks() to ensure we never try to grab the WQ lock while having the RQ lock held. This means we have to release the RQ lock before calling task_queue(), so we can't hold the RQ lock during the loop and must take and drop it. It seems that a different approach with the scope-aware trees could be easier, but it would possibly not cover situations where a task is allowed to run on multiple threads. The current solution covers it and doesn't seem to have any measurable performance impact.
2017-11-23 12:36:50 -05:00
HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
t->state &= ~TASK_RUNNING;
if (t->pending_state) {
__task_wakeup(t);
BUG/MAJOR: threads/task: dequeue expired tasks under the WQ lock There is a small unprotected window for a task between the wait queue and the run queue where a task could be woken up and destroyed at the same time. What typically happens is that a timeout is reached at the same time an I/O completes and wakes it up, and the I/O terminates the task, causing a use after free in wake_expired_tasks() possibly causing a crash and/or memory corruption : thread 1 thread 2 (wake_expired_tasks) (stream_int_notify) HA_SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock); task_wakeup(task, TASK_WOKEN_IO); ... process_stream() stream_free() task_free() pool_free(task) task_wakeup(task, TASK_WOKEN_TIMER); This case is reasonably easy to reproduce with a config using very short server timeouts (100ms) and client timeouts (10ms), while injecting on httpterm requesting medium sized objects (5kB) over SSL. All this is easier done with more threads than allocated CPUs so that pauses can happen anywhere and last long enough for process_stream() to kill the task. This patch inverts the lock and the wakeup(), but requires some changes in process_runnable_tasks() to ensure we never try to grab the WQ lock while having the RQ lock held. This means we have to release the RQ lock before calling task_queue(), so we can't hold the RQ lock during the loop and must take and drop it. It seems that a different approach with the scope-aware trees could be easier, but it would possibly not cover situations where a task is allowed to run on multiple threads. The current solution covers it and doesn't seem to have any measurable performance impact.
2017-11-23 12:36:50 -05:00
HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
}
else {
/* we must never hold the RQ lock before the WQ lock */
HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
task_queue(t);
BUG/MAJOR: threads/task: dequeue expired tasks under the WQ lock There is a small unprotected window for a task between the wait queue and the run queue where a task could be woken up and destroyed at the same time. What typically happens is that a timeout is reached at the same time an I/O completes and wakes it up, and the I/O terminates the task, causing a use after free in wake_expired_tasks() possibly causing a crash and/or memory corruption : thread 1 thread 2 (wake_expired_tasks) (stream_int_notify) HA_SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock); task_wakeup(task, TASK_WOKEN_IO); ... process_stream() stream_free() task_free() pool_free(task) task_wakeup(task, TASK_WOKEN_TIMER); This case is reasonably easy to reproduce with a config using very short server timeouts (100ms) and client timeouts (10ms), while injecting on httpterm requesting medium sized objects (5kB) over SSL. All this is easier done with more threads than allocated CPUs so that pauses can happen anywhere and last long enough for process_stream() to kill the task. This patch inverts the lock and the wakeup(), but requires some changes in process_runnable_tasks() to ensure we never try to grab the WQ lock while having the RQ lock held. This means we have to release the RQ lock before calling task_queue(), so we can't hold the RQ lock during the loop and must take and drop it. It seems that a different approach with the scope-aware trees could be easier, but it would possibly not cover situations where a task is allowed to run on multiple threads. The current solution covers it and doesn't seem to have any measurable performance impact.
2017-11-23 12:36:50 -05:00
}
}
BUG/MAJOR: threads/task: dequeue expired tasks under the WQ lock There is a small unprotected window for a task between the wait queue and the run queue where a task could be woken up and destroyed at the same time. What typically happens is that a timeout is reached at the same time an I/O completes and wakes it up, and the I/O terminates the task, causing a use after free in wake_expired_tasks() possibly causing a crash and/or memory corruption : thread 1 thread 2 (wake_expired_tasks) (stream_int_notify) HA_SPIN_UNLOCK(TASK_WQ_LOCK, &wq_lock); task_wakeup(task, TASK_WOKEN_IO); ... process_stream() stream_free() task_free() pool_free(task) task_wakeup(task, TASK_WOKEN_TIMER); This case is reasonably easy to reproduce with a config using very short server timeouts (100ms) and client timeouts (10ms), while injecting on httpterm requesting medium sized objects (5kB) over SSL. All this is easier done with more threads than allocated CPUs so that pauses can happen anywhere and last long enough for process_stream() to kill the task. This patch inverts the lock and the wakeup(), but requires some changes in process_runnable_tasks() to ensure we never try to grab the WQ lock while having the RQ lock held. This means we have to release the RQ lock before calling task_queue(), so we can't hold the RQ lock during the loop and must take and drop it. It seems that a different approach with the scope-aware trees could be easier, but it would possibly not cover situations where a task is allowed to run on multiple threads. The current solution covers it and doesn't seem to have any measurable performance impact.
2017-11-23 12:36:50 -05:00
HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
if (max_processed <= 0) {
active_tasks_mask |= tid_bit;
activity[tid].long_rq++;
break;
}
}
HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
}
/* perform minimal intializations, report 0 in case of error, 1 if OK. */
int init_task()
{
memset(&timers, 0, sizeof(timers));
memset(&rqueue, 0, sizeof(rqueue));
HA_SPIN_INIT(&wq_lock);
HA_SPIN_INIT(&rq_lock);
pool_head_task = create_pool("task", sizeof(struct task), MEM_F_SHARED);
if (!pool_head_task)
return 0;
pool_head_notification = create_pool("notification", sizeof(struct notification), MEM_F_SHARED);
if (!pool_head_notification)
return 0;
return 1;
}
/*
* Local variables:
* c-indent-level: 8
* c-basic-offset: 8
* End:
*/