MINOR: tinfo: store the number of committed extra streams in the tgroup

In order to be able to enforce global streams limitations, we'll first
have to be able to account how many streams we promised to serve via
frontend muxes. We'll always need to support at least one stream, which
is why here we're only counting extra streams beyond the first one. It
also has the benefit of leaving H1 out of this, and save it from updating
a variable. Also in order to avoid an important update cost, we're storing
this value per thread group. For now only H2 is implemented, but QUIC
should follow shortly and should only count bidirectional streams.
This commit is contained in:
Willy Tarreau 2026-05-09 16:19:54 +02:00
parent 2a1599297b
commit 7f17512d18
2 changed files with 20 additions and 0 deletions

View file

@ -138,6 +138,7 @@ struct tgroup_ctx {
struct eb_root timers; /* wait queue (sorted timers tree, global, accessed under wq_lock) */
uint niced_tasks; /* number of niced tasks in this group's run queues */
uint committed_extra_streams; /* sum of extra front streams committed by muxes in this group */
/* pad to cache line (64B) */
char __pad[0]; /* unused except to check remaining room */

View file

@ -1446,6 +1446,10 @@ static int h2_init(struct connection *conn, struct proxy *prx, struct session *s
h2c->st0 = H2_CS_PREFACE;
h2c->conn = conn;
h2c->streams_limit = h2c->streams_hard_limit = h2c_max_concurrent_streams(h2c);
if (!(h2c->flags & H2_CF_IS_BACK) && h2c->streams_hard_limit > 1)
_HA_ATOMIC_ADD(&tg_ctx->committed_extra_streams, h2c->streams_hard_limit - 1);
nb_rxbufs = (h2c->flags & H2_CF_IS_BACK) ? h2_be_rxbuf : h2_fe_rxbuf;
nb_rxbufs = (nb_rxbufs + global.tune.bufsize - 9 - 1) / (global.tune.bufsize - 9);
nb_rxbufs = MAX(nb_rxbufs, h2c->streams_limit);
@ -1513,6 +1517,8 @@ static int h2_init(struct connection *conn, struct proxy *prx, struct session *s
TRACE_LEAVE(H2_EV_H2C_NEW, conn);
return 0;
fail_stream:
if (!(h2c->flags & H2_CF_IS_BACK) && h2c->streams_hard_limit > 1)
_HA_ATOMIC_SUB(&tg_ctx->committed_extra_streams, h2c->streams_hard_limit - 1);
hpack_dht_free(h2c->ddht);
fail:
task_destroy(t);
@ -1591,6 +1597,9 @@ static void h2_release(struct h2c *h2c)
if (!conn || !conn_is_reverse(conn))
HA_ATOMIC_DEC(&h2c->px_counters->open_conns);
if (!(h2c->flags & H2_CF_IS_BACK) && h2c->streams_hard_limit > 1)
_HA_ATOMIC_SUB(&tg_ctx->committed_extra_streams, h2c->streams_hard_limit - 1);
pool_free(pool_head_h2_rx_bufs, h2c->shared_rx_bufs);
pool_free(pool_head_h2c, h2c);
@ -4175,6 +4184,12 @@ static int h2_conn_reverse(struct h2c *h2c)
struct server *srv = __objt_server(h2c->conn->target);
struct proxy *prx = srv->proxy;
/* the connection was accounted as frontend streams before
* reversal, we must undo that accounting now.
*/
if (h2c->streams_hard_limit > 1)
_HA_ATOMIC_SUB(&tg_ctx->committed_extra_streams, h2c->streams_hard_limit - 1);
h2c->flags |= H2_CF_IS_BACK;
h2c->shut_timeout = h2c->timeout = prx->timeout.server;
@ -4193,6 +4208,10 @@ static int h2_conn_reverse(struct h2c *h2c)
struct listener *l = __objt_listener(h2c->conn->target);
struct proxy *prx = l->bind_conf->frontend;
/* backend connections becoming frontend need accounting. */
if (h2c->streams_hard_limit > 1)
_HA_ATOMIC_ADD(&tg_ctx->committed_extra_streams, h2c->streams_hard_limit - 1);
h2c->flags &= ~H2_CF_IS_BACK;
/* Must manually init max_id so that GOAWAY can be emitted. */
h2c->max_id = 0;