DEBUG: stream: Display the currently running rule in stream dump

Since the 2.5, when stream's info are dump, it is possible to print the
yielding rule, if any. It was useful to detect buggy rules on spinning
loop. But it is not possible to detect a rule consuming too much CPU
per-execution. We can see a rule was executing in the backtrace reported by
the watchdog, but we are unable to spot the specific rule.

Thanks to this patch, it is now possible. When a dump is emitted, the
running or yield rule is now displayed with its current state (RUNNING or
YIELDING).

This patch could be backported as far as 3.2 because it could be useful to
spot issues.
This commit is contained in:
Christopher Faulet 2026-03-04 08:53:20 +01:00
parent 4939f18ff7
commit 9f1e9ee0ed
3 changed files with 29 additions and 18 deletions

View file

@ -2838,7 +2838,6 @@ static enum rule_result http_req_get_intercept_rule(struct proxy *px, struct lis
int forced = s->flags & SF_RULE_FYIELD;
rule = s->current_rule;
s->current_rule = NULL;
s->flags &= ~SF_RULE_FYIELD;
if (s->current_rule_list == rules || (def_rules && s->current_rule_list == def_rules)) {
if (forced)
@ -2854,11 +2853,12 @@ static enum rule_result http_req_get_intercept_rule(struct proxy *px, struct lis
list_for_each_entry(rule, s->current_rule_list, list) {
resume_rule:
s->current_rule = rule;
/* check if budget is exceeded and we need to continue on the next
* polling loop, unless we know that we cannot yield
*/
if (s->rules_bcount++ >= global.tune.max_rules_at_once && !(act_opts & ACT_OPT_FINAL)) {
s->current_rule = rule;
s->flags |= SF_RULE_FYIELD;
rule_ret = HTTP_RULE_RES_FYIELD;
task_wakeup(s->task, TASK_WOKEN_MSG);
@ -2890,7 +2890,6 @@ static enum rule_result http_req_get_intercept_rule(struct proxy *px, struct lis
s->last_entity.ptr = rule;
goto end;
case ACT_RET_YIELD:
s->current_rule = rule;
if (act_opts & ACT_OPT_FINAL) {
send_log(s->be, LOG_WARNING,
"Internal error: action yields while it is no long allowed "
@ -2980,13 +2979,17 @@ static enum rule_result http_req_get_intercept_rule(struct proxy *px, struct lis
if (def_rules && s->current_rule_list == def_rules) {
s->current_rule_list = rules;
s->current_rule = NULL;
goto restart;
}
end:
/* if the ruleset evaluation is finished reset the strict mode */
if (rule_ret != HTTP_RULE_RES_YIELD && rule_ret != HTTP_RULE_RES_FYIELD)
if (rule_ret != HTTP_RULE_RES_YIELD && rule_ret != HTTP_RULE_RES_FYIELD) {
s->current_rule_list = NULL;
s->current_rule = NULL;
txn->req.flags &= ~HTTP_MSGF_SOFT_RW;
}
/* we reached the end of the rules, nothing to report */
return rule_ret;
@ -3026,7 +3029,6 @@ static enum rule_result http_res_get_intercept_rule(struct proxy *px, struct lis
int forced = s->flags & SF_RULE_FYIELD;
rule = s->current_rule;
s->current_rule = NULL;
s->flags &= ~SF_RULE_FYIELD;
if (s->current_rule_list == rules || (def_rules && s->current_rule_list == def_rules)) {
if (forced)
@ -3043,11 +3045,12 @@ static enum rule_result http_res_get_intercept_rule(struct proxy *px, struct lis
list_for_each_entry(rule, s->current_rule_list, list) {
resume_rule:
s->current_rule = rule;
/* check if budget is exceeded and we need to continue on the next
* polling loop, unless we know that we cannot yield
*/
if (s->rules_bcount++ >= global.tune.max_rules_at_once && !(act_opts & ACT_OPT_FINAL)) {
s->current_rule = rule;
s->flags |= SF_RULE_FYIELD;
rule_ret = HTTP_RULE_RES_FYIELD;
task_wakeup(s->task, TASK_WOKEN_MSG);
@ -3079,7 +3082,6 @@ resume_execution:
s->last_entity.ptr = rule;
goto end;
case ACT_RET_YIELD:
s->current_rule = rule;
if (act_opts & ACT_OPT_FINAL) {
send_log(s->be, LOG_WARNING,
"Internal error: action yields while it is no long allowed "
@ -3159,13 +3161,17 @@ resume_execution:
if (def_rules && s->current_rule_list == def_rules) {
s->current_rule_list = rules;
s->current_rule = NULL;
goto restart;
}
end:
/* if the ruleset evaluation is finished reset the strict mode */
if (rule_ret != HTTP_RULE_RES_YIELD && rule_ret != HTTP_RULE_RES_FYIELD)
if (rule_ret != HTTP_RULE_RES_YIELD && rule_ret != HTTP_RULE_RES_FYIELD) {
s->current_rule_list = NULL;
s->current_rule = NULL;
txn->rsp.flags &= ~HTTP_MSGF_SOFT_RW;
}
/* we reached the end of the rules, nothing to report */
return rule_ret;

View file

@ -1117,10 +1117,8 @@ enum act_return process_use_service(struct act_rule *rule, struct proxy *px,
return ACT_RET_ERR;
/* Finish initialisation of the context. */
s->current_rule = rule;
if (appctx_init(appctx) == -1)
return ACT_RET_ERR;
s->current_rule = NULL;
}
else
appctx = __sc_appctx(s->scb);
@ -3842,7 +3840,9 @@ static void __strm_dump_to_buffer(struct buffer *buf, const struct show_sess_ctx
if (strm->current_rule_list && strm->current_rule) {
const struct act_rule *rule = strm->current_rule;
chunk_appendf(buf, "%s current_rule=\"%s\" [%s:%d]\n", pfx, rule->kw->kw, rule->conf.file, rule->conf.line);
chunk_appendf(buf, "%s current_rule=\"%s\" [%s:%d] (%s)\n",
pfx, rule->kw->kw, rule->conf.file, rule->conf.line,
(rule == strm->waiting_entity.ptr) ? "YIELDING" : "RUNNING");
}
}

View file

@ -155,11 +155,12 @@ int tcp_inspect_request(struct stream *s, struct channel *req, int an_bit)
restart:
list_for_each_entry(rule, s->current_rule_list, list) {
resume_rule:
s->current_rule = rule;
/* check if budget is exceeded and we need to continue on the next
* polling loop, unless we know that we cannot yield
*/
if (s->rules_bcount++ >= global.tune.max_rules_at_once && !(act_opts & ACT_OPT_FINAL)) {
s->current_rule = rule;
s->flags |= SF_RULE_FYIELD;
task_wakeup(s->task, TASK_WOKEN_MSG);
goto missing_data;
@ -169,8 +170,10 @@ int tcp_inspect_request(struct stream *s, struct channel *req, int an_bit)
enum acl_test_res ret = ACL_TEST_PASS;
ret = acl_exec_cond(rule->cond, s->be, sess, s, SMP_OPT_DIR_REQ | partial);
if (ret == ACL_TEST_MISS)
if (ret == ACL_TEST_MISS) {
s->current_rule = NULL;
goto missing_data;
}
ret = acl_pass(ret);
if (rule->cond->pol == ACL_COND_UNLESS)
@ -193,7 +196,6 @@ resume_execution:
s->last_entity.ptr = rule;
goto end;
case ACT_RET_YIELD:
s->current_rule = rule;
if (act_opts & ACT_OPT_FINAL) {
send_log(s->be, LOG_WARNING,
"Internal error: yield not allowed if the inspect-delay expired "
@ -241,6 +243,7 @@ resume_execution:
if (def_rules && s->current_rule_list == def_rules) {
s->current_rule_list = rules;
s->current_rule = NULL;
goto restart;
}
@ -354,7 +357,6 @@ int tcp_inspect_response(struct stream *s, struct channel *rep, int an_bit)
int forced = s->flags & SF_RULE_FYIELD;
rule = s->current_rule;
s->current_rule = NULL;
s->flags &= ~SF_RULE_FYIELD;
if (!(rep->flags & SC_FL_ERROR) && !(rep->flags & (CF_READ_TIMEOUT|CF_WRITE_TIMEOUT))) {
s->waiting_entity.type = STRM_ENTITY_NONE;
@ -371,11 +373,12 @@ int tcp_inspect_response(struct stream *s, struct channel *rep, int an_bit)
restart:
list_for_each_entry(rule, s->current_rule_list, list) {
resume_rule:
s->current_rule = rule;
/* check if budget is exceeded and we need to continue on the next
* polling loop, unless we know that we cannot yield
*/
if (s->rules_bcount++ >= global.tune.max_rules_at_once && !(act_opts & ACT_OPT_FINAL)) {
s->current_rule = rule;
s->flags |= SF_RULE_FYIELD;
task_wakeup(s->task, TASK_WOKEN_MSG);
goto missing_data;
@ -385,8 +388,10 @@ int tcp_inspect_response(struct stream *s, struct channel *rep, int an_bit)
enum acl_test_res ret = ACL_TEST_PASS;
ret = acl_exec_cond(rule->cond, s->be, sess, s, SMP_OPT_DIR_RES | partial);
if (ret == ACL_TEST_MISS)
if (ret == ACL_TEST_MISS) {
s->current_rule = NULL;
goto missing_data;
}
ret = acl_pass(ret);
if (rule->cond->pol == ACL_COND_UNLESS)
@ -409,7 +414,6 @@ resume_execution:
s->last_entity.ptr = rule;
goto end;
case ACT_RET_YIELD:
s->current_rule = rule;
if (act_opts & ACT_OPT_FINAL) {
send_log(s->be, LOG_WARNING,
"Internal error: yield not allowed if the inspect-delay expired "
@ -466,6 +470,7 @@ resume_execution:
if (def_rules && s->current_rule_list == def_rules) {
s->current_rule_list = rules;
s->current_rule = NULL;
goto restart;
}