2015-04-03 07:53:24 -04:00
|
|
|
/*
|
2015-04-04 12:50:31 -04:00
|
|
|
* Session management functions.
|
2015-04-03 07:53:24 -04:00
|
|
|
*
|
2015-04-04 12:50:31 -04:00
|
|
|
* Copyright 2000-2015 Willy Tarreau <w@1wt.eu>
|
2015-04-03 07:53:24 -04:00
|
|
|
*
|
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
|
* 2 of the License, or (at your option) any later version.
|
|
|
|
|
*
|
|
|
|
|
*/
|
|
|
|
|
|
2023-05-12 11:13:46 -04:00
|
|
|
#include <haproxy/ssl_sock-t.h>
|
|
|
|
|
|
2020-05-27 06:58:42 -04:00
|
|
|
#include <haproxy/api.h>
|
2020-06-04 12:02:10 -04:00
|
|
|
#include <haproxy/connection.h>
|
2020-06-04 11:05:57 -04:00
|
|
|
#include <haproxy/global.h>
|
2020-06-02 13:11:26 -04:00
|
|
|
#include <haproxy/http.h>
|
2020-06-04 08:58:24 -04:00
|
|
|
#include <haproxy/listener.h>
|
2020-06-04 16:01:04 -04:00
|
|
|
#include <haproxy/log.h>
|
2020-06-02 03:38:52 -04:00
|
|
|
#include <haproxy/pool.h>
|
2024-10-24 08:20:01 -04:00
|
|
|
#include <haproxy/protocol.h>
|
2020-06-04 16:29:18 -04:00
|
|
|
#include <haproxy/proxy.h>
|
2020-06-04 12:58:52 -04:00
|
|
|
#include <haproxy/session.h>
|
2020-06-04 11:42:48 -04:00
|
|
|
#include <haproxy/tcp_rules.h>
|
2025-08-08 09:54:21 -04:00
|
|
|
#include <haproxy/thread.h>
|
2021-05-08 07:03:04 -04:00
|
|
|
#include <haproxy/tools.h>
|
2024-08-06 10:12:11 -04:00
|
|
|
#include <haproxy/trace.h>
|
2020-06-04 10:25:31 -04:00
|
|
|
#include <haproxy/vars.h>
|
2015-04-03 07:53:24 -04:00
|
|
|
|
2015-04-04 10:31:16 -04:00
|
|
|
|
MEDIUM: tree-wide: replace most DECLARE_POOL with DECLARE_TYPED_POOL
This will make the pools size and alignment automatically inherit
the type declaration. It was done like this:
sed -i -e 's:DECLARE_POOL(\([^,]*,[^,]*,\s*\)sizeof(\([^)]*\))):DECLARE_TYPED_POOL(\1\2):g' $(git grep -lw DECLARE_POOL src addons)
sed -i -e 's:DECLARE_STATIC_POOL(\([^,]*,[^,]*,\s*\)sizeof(\([^)]*\))):DECLARE_STATIC_TYPED_POOL(\1\2):g' $(git grep -lw DECLARE_STATIC_POOL src addons)
81 replacements were made. The only remaining ones are those which set
their own size without depending on a structure. The few ones with an
extra size were manually handled.
It also means that the requested alignments are now checked against the
type's. Given that none is specified for now, no issue is reported.
It was verified with "show pools detailed" that the definitions are
exactly the same, and that the binaries are similar.
2025-08-06 10:43:27 -04:00
|
|
|
DECLARE_TYPED_POOL(pool_head_session, "session", struct session);
|
|
|
|
|
DECLARE_TYPED_POOL(pool_head_sess_priv_conns, "session priv conns list", struct sess_priv_conns);
|
2015-04-03 07:53:24 -04:00
|
|
|
|
2020-01-22 12:08:48 -05:00
|
|
|
int conn_complete_session(struct connection *conn);
|
2015-04-04 12:50:31 -04:00
|
|
|
|
2024-08-06 10:12:11 -04:00
|
|
|
static const struct trace_event sess_trace_events[] = {
|
|
|
|
|
#define SESS_EV_NEW (1ULL << 0)
|
|
|
|
|
{ .mask = SESS_EV_NEW, .name = "sess_new", .desc = "new session creation" },
|
|
|
|
|
#define SESS_EV_END (1ULL << 1)
|
|
|
|
|
{ .mask = SESS_EV_END, .name = "sess_end", .desc = "session termination" },
|
|
|
|
|
#define SESS_EV_ERR (1ULL << 1)
|
|
|
|
|
{ .mask = SESS_EV_ERR, .name = "sess_err", .desc = "session error" },
|
|
|
|
|
{ }
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static const struct name_desc sess_trace_lockon_args[4] = {
|
|
|
|
|
/* arg1 */ { /* already used by the session */ },
|
|
|
|
|
/* arg2 */ { },
|
|
|
|
|
/* arg3 */ { },
|
|
|
|
|
/* arg4 */ { }
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static struct trace_source trace_sess __read_mostly = {
|
|
|
|
|
.name = IST("session"),
|
|
|
|
|
.desc = "client session management",
|
|
|
|
|
.arg_def = TRC_ARG1_SESS, // TRACE()'s first argument is always a session
|
|
|
|
|
.known_events = sess_trace_events,
|
|
|
|
|
.lockon_args = sess_trace_lockon_args,
|
|
|
|
|
.report_events = ~0, // report everything by default
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
#define TRACE_SOURCE &trace_sess
|
|
|
|
|
INITCALL1(STG_REGISTER, trace_register_source, TRACE_SOURCE);
|
|
|
|
|
|
2015-04-04 18:38:48 -04:00
|
|
|
/* Create a a new session and assign it to frontend <fe>, listener <li>,
|
|
|
|
|
* origin <origin>, set the current date and clear the stick counters pointers.
|
|
|
|
|
* Returns the session upon success or NULL. The session may be released using
|
2017-09-15 04:25:14 -04:00
|
|
|
* session_free(). Note: <li> may be NULL.
|
2015-04-04 18:38:48 -04:00
|
|
|
*/
|
|
|
|
|
struct session *session_new(struct proxy *fe, struct listener *li, enum obj_type *origin)
|
|
|
|
|
{
|
|
|
|
|
struct session *sess;
|
|
|
|
|
|
2024-08-06 10:12:11 -04:00
|
|
|
TRACE_ENTER(SESS_EV_NEW);
|
|
|
|
|
|
2017-11-24 11:34:44 -05:00
|
|
|
sess = pool_alloc(pool_head_session);
|
2015-04-04 18:38:48 -04:00
|
|
|
if (sess) {
|
|
|
|
|
sess->listener = li;
|
|
|
|
|
sess->fe = fe;
|
|
|
|
|
sess->origin = origin;
|
|
|
|
|
sess->accept_date = date; /* user-visible date for logging */
|
2023-04-28 03:16:15 -04:00
|
|
|
sess->accept_ts = now_ns; /* corrected date for internal use */
|
2023-01-06 10:09:58 -05:00
|
|
|
sess->stkctr = NULL;
|
|
|
|
|
if (pool_head_stk_ctr) {
|
|
|
|
|
sess->stkctr = pool_alloc(pool_head_stk_ctr);
|
|
|
|
|
if (!sess->stkctr)
|
|
|
|
|
goto out_fail_alloc;
|
|
|
|
|
memset(sess->stkctr, 0, sizeof(sess->stkctr[0]) * global.tune.nb_stk_ctr);
|
|
|
|
|
}
|
2021-08-31 02:13:25 -04:00
|
|
|
vars_init_head(&sess->vars, SCOPE_SESS);
|
2017-08-28 13:02:51 -04:00
|
|
|
sess->task = NULL;
|
2018-09-05 05:56:48 -04:00
|
|
|
sess->t_handshake = -1; /* handshake not done yet */
|
2020-09-30 04:28:02 -04:00
|
|
|
sess->t_idle = -1;
|
2021-04-06 07:53:36 -04:00
|
|
|
_HA_ATOMIC_INC(&totalconn);
|
|
|
|
|
_HA_ATOMIC_INC(&jobs);
|
2024-03-14 06:24:10 -04:00
|
|
|
LIST_INIT(&sess->priv_conns);
|
2018-12-28 12:50:57 -05:00
|
|
|
sess->idle_conns = 0;
|
2019-05-29 09:01:50 -04:00
|
|
|
sess->flags = SESS_FL_NONE;
|
2021-10-22 09:41:57 -04:00
|
|
|
sess->src = NULL;
|
|
|
|
|
sess->dst = NULL;
|
2025-07-24 13:46:36 -04:00
|
|
|
sess->fe_tgcounters = sess->fe->fe_counters.shared.tg[tgid - 1];
|
|
|
|
|
if (sess->listener && sess->listener->counters)
|
|
|
|
|
sess->li_tgcounters = sess->listener->counters->shared.tg[tgid - 1];
|
2024-08-06 10:12:11 -04:00
|
|
|
TRACE_STATE("new session", SESS_EV_NEW, sess);
|
2015-04-04 18:38:48 -04:00
|
|
|
}
|
2024-08-06 10:12:11 -04:00
|
|
|
TRACE_LEAVE(SESS_EV_NEW);
|
2015-04-04 18:38:48 -04:00
|
|
|
return sess;
|
2023-01-06 10:09:58 -05:00
|
|
|
out_fail_alloc:
|
|
|
|
|
pool_free(pool_head_session, sess);
|
2024-08-06 10:12:11 -04:00
|
|
|
TRACE_DEVEL("leaving in error", SESS_EV_NEW|SESS_EV_END|SESS_EV_ERR);
|
2023-01-06 10:09:58 -05:00
|
|
|
return NULL;
|
2015-04-04 18:38:48 -04:00
|
|
|
}
|
|
|
|
|
|
2015-04-04 09:54:03 -04:00
|
|
|
void session_free(struct session *sess)
|
|
|
|
|
{
|
2018-11-30 11:24:55 -05:00
|
|
|
struct connection *conn, *conn_back;
|
2024-03-14 06:24:10 -04:00
|
|
|
struct sess_priv_conns *pconns, *pconns_back;
|
2025-08-08 09:54:21 -04:00
|
|
|
struct list conn_tmp_list = LIST_HEAD_INIT(conn_tmp_list);
|
2018-11-13 10:48:36 -05:00
|
|
|
|
2024-08-06 10:12:11 -04:00
|
|
|
TRACE_ENTER(SESS_EV_END);
|
|
|
|
|
TRACE_STATE("releasing session", SESS_EV_END, sess);
|
|
|
|
|
|
2024-05-21 10:44:26 -04:00
|
|
|
if (sess->flags & SESS_FL_RELEASE_LI) {
|
|
|
|
|
/* listener must be set for session used to account FE conns. */
|
|
|
|
|
BUG_ON(!sess->listener);
|
2017-10-18 09:01:14 -04:00
|
|
|
listener_release(sess->listener);
|
2024-05-21 10:44:26 -04:00
|
|
|
}
|
|
|
|
|
|
2015-04-04 10:31:16 -04:00
|
|
|
session_store_counters(sess);
|
2023-01-06 10:09:58 -05:00
|
|
|
pool_free(pool_head_stk_ctr, sess->stkctr);
|
2015-06-19 05:59:02 -04:00
|
|
|
vars_prune_per_sess(&sess->vars);
|
2018-11-13 10:48:36 -05:00
|
|
|
conn = objt_conn(sess->origin);
|
|
|
|
|
if (conn != NULL && conn->mux)
|
2019-04-08 05:23:22 -04:00
|
|
|
conn->mux->destroy(conn->ctx);
|
2025-08-08 09:54:21 -04:00
|
|
|
|
|
|
|
|
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
|
2024-03-14 06:24:10 -04:00
|
|
|
list_for_each_entry_safe(pconns, pconns_back, &sess->priv_conns, sess_el) {
|
|
|
|
|
list_for_each_entry_safe(conn, conn_back, &pconns->conn_list, sess_el) {
|
|
|
|
|
LIST_DEL_INIT(&conn->sess_el);
|
2024-03-20 10:37:09 -04:00
|
|
|
conn->owner = NULL;
|
|
|
|
|
conn->flags &= ~CO_FL_SESS_IDLE;
|
2025-08-08 09:54:21 -04:00
|
|
|
LIST_APPEND(&conn_tmp_list, &conn->sess_el);
|
2018-11-30 11:24:55 -05:00
|
|
|
}
|
2024-03-12 09:09:55 -04:00
|
|
|
MT_LIST_DELETE(&pconns->srv_el);
|
2024-03-14 06:24:10 -04:00
|
|
|
pool_free(pool_head_sess_priv_conns, pconns);
|
2018-11-20 18:16:29 -05:00
|
|
|
}
|
2025-08-08 09:54:21 -04:00
|
|
|
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
|
|
|
|
|
|
|
|
|
|
/* Release connections outside of idle lock. */
|
|
|
|
|
while (!LIST_ISEMPTY(&conn_tmp_list)) {
|
|
|
|
|
conn = LIST_ELEM(conn_tmp_list.n, struct connection *, sess_el);
|
|
|
|
|
/* Del-init sess_el to prevent session_unown_conn() via conn_backend_deinit(). */
|
|
|
|
|
LIST_DEL_INIT(&conn->sess_el);
|
|
|
|
|
conn_release(conn);
|
|
|
|
|
}
|
|
|
|
|
|
2021-10-22 09:41:57 -04:00
|
|
|
sockaddr_free(&sess->src);
|
|
|
|
|
sockaddr_free(&sess->dst);
|
2017-11-24 11:34:44 -05:00
|
|
|
pool_free(pool_head_session, sess);
|
2021-04-06 07:53:36 -04:00
|
|
|
_HA_ATOMIC_DEC(&jobs);
|
2024-08-06 10:12:11 -04:00
|
|
|
|
|
|
|
|
TRACE_LEAVE(SESS_EV_END);
|
2015-04-04 09:54:03 -04:00
|
|
|
}
|
|
|
|
|
|
2017-10-08 05:26:30 -04:00
|
|
|
/* callback used from the connection/mux layer to notify that a connection is
|
2018-11-25 14:22:10 -05:00
|
|
|
* going to be released.
|
2017-10-08 05:26:30 -04:00
|
|
|
*/
|
|
|
|
|
void conn_session_free(struct connection *conn)
|
|
|
|
|
{
|
|
|
|
|
session_free(conn->owner);
|
BUG/MAJOR: connection: reset conn->owner when detaching from session list
Baptiste reported a new crash affecting 2.3 which can be triggered
when using H2 on the backend, with http-reuse always and with a tens
of clients doing close only. There are a few combined cases which cause
this to happen, but each time the issue is the same, an already freed
session is dereferenced in session_unown_conn().
Two cases were identified to cause this:
- a connection referencing a session as its owner, which is detached
from the session's list and is destroyed after this session ends.
The test on conn->owner before calling session_unown_conn() is not
sufficent as the pointer is not null but is not valid anymore.
- a connection that never goes idle and that gets killed form the
mux, where session_free() is called first, then conn_free() calls
session_unown_conn() which scans the just freed session for older
connections. This one is only triggered with DEBUG_UAF
The reason for this session to be present here is that it's needed during
the connection setup, to be passed to conn_install_mux_be() to mux->init()
as the owning session, but it's never deleted aftrewards. Furthermore, even
conn_session_free() doesn't delete this pointer after freeing the session
that lies there. Both do definitely result in a use-after-free that's more
easily triggered under DEBUG_UAF.
This patch makes sure that the owner is always deleted after detaching
or killing the session. However it is currently not possible to clear
the owner right after a synchronous init because the proxy protocol
apparently needs it (a reg test checks this), and if we leave it past
the connection setup with the session not attached anywhere, it's hard
to catch the right moment to detach it. This means that the session may
remain in conn->owner as long as the connection has never been added to
nor removed from the session's idle list. Given that this patch needs to
remain simple enough to be backported, instead it adds a workaround in
session_unown_conn() to detect that the element is already not attached
anywhere.
This fix absolutely requires previous patch "CLEANUP: connection: do not
use conn->owner when the session is known" otherwise the situation will
be even worse, as some places used to rely on conn->owner instead of the
session.
The fix could theorically be backported as far as 1.8. However, the code
in this area has significantly changed along versions and there are more
risks of breaking working stuff than fixing real issues there. The issue
was really woken up in two steps during 2.3-dev when slightly reworking
the idle conns with commit 08016ab82 ("MEDIUM: connection: Add private
connections synchronously in session server list") and when adding
support for storing used H2 connections in the session and adding the
necessary call to session_unown_conn() in the muxes. But the same test
managed to crash 2.2 when built in DEBUG_UAF and patched like this,
proving that we used to already leave dangling pointers behind us:
| diff --git a/include/haproxy/connection.h b/include/haproxy/connection.h
| index f8f235c1a..dd30b5f80 100644
| --- a/include/haproxy/connection.h
| +++ b/include/haproxy/connection.h
| @@ -458,6 +458,10 @@ static inline void conn_free(struct connection *conn)
| sess->idle_conns--;
| session_unown_conn(sess, conn);
| }
| + else {
| + struct session *sess = conn->owner;
| + BUG_ON(sess && sess->origin != &conn->obj_type);
| + }
|
| sockaddr_free(&conn->src);
| sockaddr_free(&conn->dst);
It's uncertain whether an existing code path there can lead to dereferencing
conn->owner when it's bad, though certain suspicious memory corruption bugs
make one think it's a likely candidate. The patch should not be hard to
adapt there.
Backports to 2.1 and older are left to the appreciation of the person
doing the backport.
A reproducer consists in this:
global
nbthread 1
listen l
bind :9000
mode http
http-reuse always
server s 127.0.0.1:8999 proto h2
frontend f
bind :8999 proto h2
mode http
http-request return status 200
Then this will make it crash within 2-3 seconds:
$ h1load -e -r 1 -c 10 http://0:9000/
If it does not, it might be that DEBUG_UAF was not used (it's harder then)
and it might be useful to restart.
2020-11-20 11:22:44 -05:00
|
|
|
conn->owner = NULL;
|
2017-10-08 05:26:30 -04:00
|
|
|
}
|
|
|
|
|
|
2015-04-08 12:10:49 -04:00
|
|
|
/* count a new session to keep frontend, listener and track stats up to date */
|
|
|
|
|
static void session_count_new(struct session *sess)
|
|
|
|
|
{
|
|
|
|
|
struct stkctr *stkctr;
|
|
|
|
|
void *ptr;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
proxy_inc_fe_sess_ctr(sess->listener, sess->fe);
|
|
|
|
|
|
2023-01-06 10:09:58 -05:00
|
|
|
for (i = 0; i < global.tune.nb_stk_ctr; i++) {
|
2015-04-08 12:10:49 -04:00
|
|
|
stkctr = &sess->stkctr[i];
|
|
|
|
|
if (!stkctr_entry(stkctr))
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_SESS_CNT);
|
|
|
|
|
if (ptr)
|
2021-06-30 11:18:28 -04:00
|
|
|
HA_ATOMIC_INC(&stktable_data_cast(ptr, std_t_uint));
|
2015-04-08 12:10:49 -04:00
|
|
|
|
|
|
|
|
ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_SESS_RATE);
|
|
|
|
|
if (ptr)
|
2021-06-30 11:18:28 -04:00
|
|
|
update_freq_ctr_period(&stktable_data_cast(ptr, std_t_frqp),
|
2015-04-08 12:10:49 -04:00
|
|
|
stkctr->table->data_arg[STKTABLE_DT_SESS_RATE].u, 1);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-04-04 12:50:31 -04:00
|
|
|
/* This function is called from the protocol layer accept() in order to
|
2018-06-21 12:03:20 -04:00
|
|
|
* instantiate a new session on behalf of a given listener and frontend. It
|
2015-04-04 12:50:31 -04:00
|
|
|
* returns a positive value upon success, 0 if the connection can be ignored,
|
2020-10-14 11:37:17 -04:00
|
|
|
* or a negative value upon critical failure. The accepted connection is
|
2015-04-04 12:50:31 -04:00
|
|
|
* closed if we return <= 0. If no handshake is needed, it immediately tries
|
2020-10-14 11:37:17 -04:00
|
|
|
* to instantiate a new stream. The connection must already have been filled
|
|
|
|
|
* with the incoming connection handle (a fd), a target (the listener) and a
|
|
|
|
|
* source address.
|
2015-04-04 12:50:31 -04:00
|
|
|
*/
|
2020-10-14 11:37:17 -04:00
|
|
|
int session_accept_fd(struct connection *cli_conn)
|
2015-04-04 12:50:31 -04:00
|
|
|
{
|
2020-10-14 11:37:17 -04:00
|
|
|
struct listener *l = __objt_listener(cli_conn->target);
|
2016-12-21 18:13:31 -05:00
|
|
|
struct proxy *p = l->bind_conf->frontend;
|
2020-10-14 11:37:17 -04:00
|
|
|
int cfd = cli_conn->handle.fd;
|
2015-04-04 12:50:31 -04:00
|
|
|
struct session *sess;
|
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
|
|
ret = -1; /* assume unrecoverable error by default */
|
|
|
|
|
|
2020-09-03 01:50:19 -04:00
|
|
|
cli_conn->proxy_netns = l->rx.settings->netns;
|
2015-04-04 12:50:31 -04:00
|
|
|
|
2023-08-23 12:02:51 -04:00
|
|
|
/* Active reversed connection has already been initialized before being
|
2023-11-21 13:54:16 -05:00
|
|
|
* accepted. It must not be reset.
|
2023-08-23 12:02:51 -04:00
|
|
|
* TODO use a dedicated accept_fd callback for reverse protocol
|
|
|
|
|
*/
|
|
|
|
|
if (!cli_conn->xprt) {
|
|
|
|
|
if (conn_prepare(cli_conn, l->rx.proto, l->bind_conf->xprt) < 0)
|
|
|
|
|
goto out_free_conn;
|
2021-03-05 17:37:48 -05:00
|
|
|
|
2023-08-23 12:02:51 -04:00
|
|
|
conn_ctrl_init(cli_conn);
|
2015-04-04 12:50:31 -04:00
|
|
|
|
2023-08-23 12:02:51 -04:00
|
|
|
/* wait for a PROXY protocol header */
|
|
|
|
|
if (l->bind_conf->options & BC_O_ACC_PROXY)
|
|
|
|
|
cli_conn->flags |= CO_FL_ACCEPT_PROXY;
|
2015-04-04 12:50:31 -04:00
|
|
|
|
2023-08-23 12:02:51 -04:00
|
|
|
/* wait for a NetScaler client IP insertion protocol header */
|
|
|
|
|
if (l->bind_conf->options & BC_O_ACC_CIP)
|
|
|
|
|
cli_conn->flags |= CO_FL_ACCEPT_CIP;
|
2016-06-04 10:11:10 -04:00
|
|
|
|
2023-08-23 12:02:51 -04:00
|
|
|
/* Add the handshake pseudo-XPRT */
|
|
|
|
|
if (cli_conn->flags & (CO_FL_ACCEPT_PROXY | CO_FL_ACCEPT_CIP)) {
|
|
|
|
|
if (xprt_add_hs(cli_conn) != 0)
|
|
|
|
|
goto out_free_conn;
|
|
|
|
|
}
|
2019-05-27 06:09:19 -04:00
|
|
|
}
|
2023-08-23 12:02:51 -04:00
|
|
|
|
2023-11-03 11:21:12 -04:00
|
|
|
/* Reversed conns already have an assigned session, do not recreate it. */
|
|
|
|
|
if (!(cli_conn->flags & CO_FL_REVERSED)) {
|
|
|
|
|
sess = session_new(p, l, &cli_conn->obj_type);
|
|
|
|
|
if (!sess)
|
|
|
|
|
goto out_free_conn;
|
2015-04-04 12:50:31 -04:00
|
|
|
|
2023-11-03 11:21:12 -04:00
|
|
|
conn_set_owner(cli_conn, sess, NULL);
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
sess = cli_conn->owner;
|
|
|
|
|
}
|
2017-08-28 13:02:51 -04:00
|
|
|
|
2015-04-04 12:50:31 -04:00
|
|
|
/* now evaluate the tcp-request layer4 rules. We only need a session
|
|
|
|
|
* and no stream for these rules.
|
|
|
|
|
*/
|
2024-07-12 09:21:21 -04:00
|
|
|
if (((sess->fe->defpx && !LIST_ISEMPTY(&sess->fe->defpx->tcp_req.l4_rules)) ||
|
|
|
|
|
!LIST_ISEMPTY(&p->tcp_req.l4_rules)) && !tcp_exec_l4_rules(sess)) {
|
2015-04-04 12:50:31 -04:00
|
|
|
/* let's do a no-linger now to close with a single RST. */
|
BUG/MEDIUM: quic: fix crash when "option nolinger" is set in the frontend
Commit 0aba11e9e ("MINOR: quic: remove unnecessary quic_session_accept()")
overlooked one problem, in session_accept_fd() at the end, there's a bunch
of FD-specific stuff that either sets up or resets the socket at the TCP
level. The tests are mostly performed for AF_INET/AF_INET6 families but
they're only for one part (i.e. to avoid setting up TCP options on UNIX
sockets). Other pieces continue to configure the socket regardless of its
family. All of this directly acts on the FD, which is not correct since
the FD is not valid here, it corresponds to the QUIC handle. The issue
is much more visible when "option nolinger" is enabled in the frontend,
because the access to fdatb[cfd].state immediately crashes on the first
connection, as can be seen in github issue #2030.
This patch bypasses this setup for FD-less connections, such as QUIC.
However some of them could definitely be relevant to the QUIC stack, or
even to UNIX sockets sometimes. A better long-term solution would consist
in implementing a setsockopt() equivalent at the protocol layer that would
be used to configure the socket, either the FD or the QUIC conn depending
on the case. Some of them would not always be implemented but that would
allow to unify all this code.
This fix must be backported everywhere the commit above is backported,
namely 2.6 and 2.7.
Thanks to github user @twomoses for the nicely detailed report.
2023-02-09 11:53:41 -05:00
|
|
|
if (!(cli_conn->flags & CO_FL_FDLESS))
|
|
|
|
|
setsockopt(cfd, SOL_SOCKET, SO_LINGER, (struct linger *) &nolinger, sizeof(struct linger));
|
2015-04-04 12:50:31 -04:00
|
|
|
ret = 0; /* successful termination */
|
|
|
|
|
goto out_free_sess;
|
|
|
|
|
}
|
2021-03-05 17:37:48 -05:00
|
|
|
/* TCP rules may flag the connection as needing proxy protocol, now that it's done we can start ourxprt */
|
|
|
|
|
if (conn_xprt_start(cli_conn) < 0)
|
2022-03-11 01:25:11 -05:00
|
|
|
goto out_free_sess;
|
2015-04-04 12:50:31 -04:00
|
|
|
|
BUG/MEDIUM: quic: fix crash when "option nolinger" is set in the frontend
Commit 0aba11e9e ("MINOR: quic: remove unnecessary quic_session_accept()")
overlooked one problem, in session_accept_fd() at the end, there's a bunch
of FD-specific stuff that either sets up or resets the socket at the TCP
level. The tests are mostly performed for AF_INET/AF_INET6 families but
they're only for one part (i.e. to avoid setting up TCP options on UNIX
sockets). Other pieces continue to configure the socket regardless of its
family. All of this directly acts on the FD, which is not correct since
the FD is not valid here, it corresponds to the QUIC handle. The issue
is much more visible when "option nolinger" is enabled in the frontend,
because the access to fdatb[cfd].state immediately crashes on the first
connection, as can be seen in github issue #2030.
This patch bypasses this setup for FD-less connections, such as QUIC.
However some of them could definitely be relevant to the QUIC stack, or
even to UNIX sockets sometimes. A better long-term solution would consist
in implementing a setsockopt() equivalent at the protocol layer that would
be used to configure the socket, either the FD or the QUIC conn depending
on the case. Some of them would not always be implemented but that would
allow to unify all this code.
This fix must be backported everywhere the commit above is backported,
namely 2.6 and 2.7.
Thanks to github user @twomoses for the nicely detailed report.
2023-02-09 11:53:41 -05:00
|
|
|
/* FIXME/WTA: we should implement the setsockopt() calls at the proto
|
|
|
|
|
* level instead and let non-inet protocols implement their own equivalent.
|
|
|
|
|
*/
|
|
|
|
|
if (cli_conn->flags & CO_FL_FDLESS)
|
|
|
|
|
goto skip_fd_setup;
|
|
|
|
|
|
2015-04-05 11:56:47 -04:00
|
|
|
/* Adjust some socket options */
|
2020-08-27 01:48:42 -04:00
|
|
|
if (l->rx.addr.ss_family == AF_INET || l->rx.addr.ss_family == AF_INET6) {
|
2015-04-05 11:56:47 -04:00
|
|
|
setsockopt(cfd, IPPROTO_TCP, TCP_NODELAY, (char *) &one, sizeof(one));
|
|
|
|
|
|
2020-07-08 22:13:20 -04:00
|
|
|
if (p->options & PR_O_TCP_CLI_KA) {
|
2015-04-05 11:56:47 -04:00
|
|
|
setsockopt(cfd, SOL_SOCKET, SO_KEEPALIVE, (char *) &one, sizeof(one));
|
|
|
|
|
|
2020-07-08 23:58:51 -04:00
|
|
|
#ifdef TCP_KEEPCNT
|
2020-07-08 22:13:20 -04:00
|
|
|
if (p->clitcpka_cnt)
|
|
|
|
|
setsockopt(cfd, IPPROTO_TCP, TCP_KEEPCNT, &p->clitcpka_cnt, sizeof(p->clitcpka_cnt));
|
2020-07-08 23:58:51 -04:00
|
|
|
#endif
|
2020-07-08 22:13:20 -04:00
|
|
|
|
2020-07-08 23:58:51 -04:00
|
|
|
#ifdef TCP_KEEPIDLE
|
2020-07-08 22:13:20 -04:00
|
|
|
if (p->clitcpka_idle)
|
|
|
|
|
setsockopt(cfd, IPPROTO_TCP, TCP_KEEPIDLE, &p->clitcpka_idle, sizeof(p->clitcpka_idle));
|
2020-07-08 23:58:51 -04:00
|
|
|
#endif
|
2020-07-08 22:13:20 -04:00
|
|
|
|
2020-07-08 23:58:51 -04:00
|
|
|
#ifdef TCP_KEEPINTVL
|
2020-07-08 22:13:20 -04:00
|
|
|
if (p->clitcpka_intvl)
|
|
|
|
|
setsockopt(cfd, IPPROTO_TCP, TCP_KEEPINTVL, &p->clitcpka_intvl, sizeof(p->clitcpka_intvl));
|
2020-07-08 23:58:51 -04:00
|
|
|
#endif
|
2020-07-08 22:13:20 -04:00
|
|
|
}
|
|
|
|
|
|
2015-04-05 11:56:47 -04:00
|
|
|
if (p->options & PR_O_TCP_NOLING)
|
2021-04-06 11:49:19 -04:00
|
|
|
HA_ATOMIC_OR(&fdtab[cfd].state, FD_LINGER_RISK);
|
2015-04-05 11:56:47 -04:00
|
|
|
|
|
|
|
|
#if defined(TCP_MAXSEG)
|
2023-01-12 12:42:49 -05:00
|
|
|
if (l->bind_conf->maxseg < 0) {
|
2015-04-05 11:56:47 -04:00
|
|
|
/* we just want to reduce the current MSS by that value */
|
|
|
|
|
int mss;
|
|
|
|
|
socklen_t mss_len = sizeof(mss);
|
|
|
|
|
if (getsockopt(cfd, IPPROTO_TCP, TCP_MAXSEG, &mss, &mss_len) == 0) {
|
2023-01-12 12:42:49 -05:00
|
|
|
mss += l->bind_conf->maxseg; /* remember, it's < 0 */
|
2015-04-05 11:56:47 -04:00
|
|
|
setsockopt(cfd, IPPROTO_TCP, TCP_MAXSEG, &mss, sizeof(mss));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (global.tune.client_sndbuf)
|
|
|
|
|
setsockopt(cfd, SOL_SOCKET, SO_SNDBUF, &global.tune.client_sndbuf, sizeof(global.tune.client_sndbuf));
|
|
|
|
|
|
2025-04-29 05:43:46 -04:00
|
|
|
#if defined(TCP_NOTSENT_LOWAT)
|
|
|
|
|
if (global.tune.client_notsent_lowat && (l->rx.addr.ss_family == AF_INET || l->rx.addr.ss_family == AF_INET6))
|
|
|
|
|
setsockopt(cfd, IPPROTO_TCP, TCP_NOTSENT_LOWAT, &global.tune.client_notsent_lowat, sizeof(global.tune.client_notsent_lowat));
|
|
|
|
|
#endif
|
|
|
|
|
|
2015-04-05 11:56:47 -04:00
|
|
|
if (global.tune.client_rcvbuf)
|
|
|
|
|
setsockopt(cfd, SOL_SOCKET, SO_RCVBUF, &global.tune.client_rcvbuf, sizeof(global.tune.client_rcvbuf));
|
|
|
|
|
|
BUG/MEDIUM: quic: fix crash when "option nolinger" is set in the frontend
Commit 0aba11e9e ("MINOR: quic: remove unnecessary quic_session_accept()")
overlooked one problem, in session_accept_fd() at the end, there's a bunch
of FD-specific stuff that either sets up or resets the socket at the TCP
level. The tests are mostly performed for AF_INET/AF_INET6 families but
they're only for one part (i.e. to avoid setting up TCP options on UNIX
sockets). Other pieces continue to configure the socket regardless of its
family. All of this directly acts on the FD, which is not correct since
the FD is not valid here, it corresponds to the QUIC handle. The issue
is much more visible when "option nolinger" is enabled in the frontend,
because the access to fdatb[cfd].state immediately crashes on the first
connection, as can be seen in github issue #2030.
This patch bypasses this setup for FD-less connections, such as QUIC.
However some of them could definitely be relevant to the QUIC stack, or
even to UNIX sockets sometimes. A better long-term solution would consist
in implementing a setsockopt() equivalent at the protocol layer that would
be used to configure the socket, either the FD or the QUIC conn depending
on the case. Some of them would not always be implemented but that would
allow to unify all this code.
This fix must be backported everywhere the commit above is backported,
namely 2.6 and 2.7.
Thanks to github user @twomoses for the nicely detailed report.
2023-02-09 11:53:41 -05:00
|
|
|
skip_fd_setup:
|
2017-08-28 13:02:51 -04:00
|
|
|
/* OK, now either we have a pending handshake to execute with and then
|
|
|
|
|
* we must return to the I/O layer, or we can proceed with the end of
|
|
|
|
|
* the stream initialization. In case of handshake, we also set the I/O
|
|
|
|
|
* timeout to the frontend's client timeout and register a task in the
|
|
|
|
|
* session for this purpose. The connection's owner is left to the
|
|
|
|
|
* session during this period.
|
2015-04-04 12:50:31 -04:00
|
|
|
*
|
|
|
|
|
* At this point we set the relation between sess/task/conn this way :
|
|
|
|
|
*
|
2017-08-28 13:02:51 -04:00
|
|
|
* +----------------- task
|
|
|
|
|
* | |
|
|
|
|
|
* orig -- sess <-- context |
|
|
|
|
|
* | ^ | |
|
|
|
|
|
* v | | |
|
|
|
|
|
* conn -- owner ---> task <-----+
|
2015-04-04 12:50:31 -04:00
|
|
|
*/
|
2020-01-23 10:27:54 -05:00
|
|
|
if (cli_conn->flags & (CO_FL_WAIT_XPRT | CO_FL_EARLY_SSL_HS)) {
|
2023-11-15 08:24:10 -05:00
|
|
|
int timeout;
|
|
|
|
|
int clt_tmt = p->timeout.client;
|
2023-11-17 12:03:20 -05:00
|
|
|
int hs_tmt = p->timeout.client_hs;
|
2023-11-15 08:24:10 -05:00
|
|
|
|
2021-10-01 12:23:30 -04:00
|
|
|
if (unlikely((sess->task = task_new_here()) == NULL))
|
2017-08-28 10:22:54 -04:00
|
|
|
goto out_free_sess;
|
|
|
|
|
|
2023-11-15 08:24:10 -05:00
|
|
|
/* Handshake timeout as default timeout */
|
|
|
|
|
timeout = hs_tmt ? hs_tmt : clt_tmt;
|
2017-08-28 13:02:51 -04:00
|
|
|
sess->task->context = sess;
|
2023-01-12 13:32:45 -05:00
|
|
|
sess->task->nice = l->bind_conf->nice;
|
2017-08-28 13:02:51 -04:00
|
|
|
sess->task->process = session_expire_embryonic;
|
2023-11-15 08:24:10 -05:00
|
|
|
sess->task->expire = tick_add_ifset(now_ms, timeout);
|
2017-08-28 13:02:51 -04:00
|
|
|
task_queue(sess->task);
|
2024-05-21 10:44:26 -04:00
|
|
|
|
|
|
|
|
/* Session is responsible to decrement listener conns counters. */
|
|
|
|
|
sess->flags |= SESS_FL_RELEASE_LI;
|
|
|
|
|
|
2015-04-04 12:50:31 -04:00
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
2015-04-04 19:04:01 -04:00
|
|
|
/* OK let's complete stream initialization since there is no handshake */
|
2024-05-21 10:44:26 -04:00
|
|
|
if (conn_complete_session(cli_conn) >= 0) {
|
|
|
|
|
/* Session is responsible to decrement listener conns counters. */
|
|
|
|
|
sess->flags |= SESS_FL_RELEASE_LI;
|
2017-09-15 04:06:28 -04:00
|
|
|
return 1;
|
2024-05-21 10:44:26 -04:00
|
|
|
}
|
2015-04-04 12:50:31 -04:00
|
|
|
|
2020-01-07 12:03:09 -05:00
|
|
|
/* if we reach here we have deliberately decided not to keep this
|
|
|
|
|
* session (e.g. tcp-request rule), so that's not an error we should
|
|
|
|
|
* try to protect against.
|
|
|
|
|
*/
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
2017-09-15 04:06:28 -04:00
|
|
|
/* error unrolling */
|
2015-04-04 12:50:31 -04:00
|
|
|
out_free_sess:
|
2024-05-21 10:44:26 -04:00
|
|
|
/* SESS_FL_RELEASE_LI must not be set here as listener_release() is
|
|
|
|
|
* called manually for all errors.
|
|
|
|
|
*/
|
2015-04-04 12:50:31 -04:00
|
|
|
session_free(sess);
|
2020-10-15 01:11:14 -04:00
|
|
|
|
2015-04-04 12:50:31 -04:00
|
|
|
out_free_conn:
|
2019-07-17 10:53:19 -04:00
|
|
|
if (ret < 0 && l->bind_conf->xprt == xprt_get(XPRT_RAW) &&
|
BUG/MEDIUM: quic: fix crash when "option nolinger" is set in the frontend
Commit 0aba11e9e ("MINOR: quic: remove unnecessary quic_session_accept()")
overlooked one problem, in session_accept_fd() at the end, there's a bunch
of FD-specific stuff that either sets up or resets the socket at the TCP
level. The tests are mostly performed for AF_INET/AF_INET6 families but
they're only for one part (i.e. to avoid setting up TCP options on UNIX
sockets). Other pieces continue to configure the socket regardless of its
family. All of this directly acts on the FD, which is not correct since
the FD is not valid here, it corresponds to the QUIC handle. The issue
is much more visible when "option nolinger" is enabled in the frontend,
because the access to fdatb[cfd].state immediately crashes on the first
connection, as can be seen in github issue #2030.
This patch bypasses this setup for FD-less connections, such as QUIC.
However some of them could definitely be relevant to the QUIC stack, or
even to UNIX sockets sometimes. A better long-term solution would consist
in implementing a setsockopt() equivalent at the protocol layer that would
be used to configure the socket, either the FD or the QUIC conn depending
on the case. Some of them would not always be implemented but that would
allow to unify all this code.
This fix must be backported everywhere the commit above is backported,
namely 2.6 and 2.7.
Thanks to github user @twomoses for the nicely detailed report.
2023-02-09 11:53:41 -05:00
|
|
|
p->mode == PR_MODE_HTTP && l->bind_conf->mux_proto == NULL &&
|
|
|
|
|
!(cli_conn->flags & CO_FL_FDLESS)) {
|
2015-04-04 12:50:31 -04:00
|
|
|
/* critical error, no more memory, try to emit a 500 response */
|
2019-07-17 15:36:33 -04:00
|
|
|
send(cfd, http_err_msgs[HTTP_ERR_500], strlen(http_err_msgs[HTTP_ERR_500]),
|
2018-07-13 04:54:26 -04:00
|
|
|
MSG_DONTWAIT|MSG_NOSIGNAL);
|
2015-04-04 12:50:31 -04:00
|
|
|
}
|
|
|
|
|
|
2024-03-20 10:37:09 -04:00
|
|
|
/* Mux is already initialized for active reversed connection. */
|
|
|
|
|
conn_release(cli_conn);
|
2020-10-15 01:11:14 -04:00
|
|
|
listener_release(l);
|
2015-04-04 12:50:31 -04:00
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2024-02-21 10:38:46 -05:00
|
|
|
/* prepare <out> buffer with a log prefix for session <sess>. It only works with
|
2015-04-04 12:50:31 -04:00
|
|
|
* embryonic sessions based on a real connection. This function requires that
|
|
|
|
|
* at sess->origin points to the incoming connection.
|
|
|
|
|
*/
|
2024-02-21 10:38:46 -05:00
|
|
|
static void session_prepare_log_prefix(struct session *sess, struct buffer *out)
|
2015-04-04 12:50:31 -04:00
|
|
|
{
|
2021-10-22 11:47:14 -04:00
|
|
|
const struct sockaddr_storage *src;
|
2015-04-04 12:50:31 -04:00
|
|
|
struct tm tm;
|
|
|
|
|
char pn[INET6_ADDRSTRLEN];
|
|
|
|
|
int ret;
|
|
|
|
|
char *end;
|
|
|
|
|
|
2021-10-22 11:47:14 -04:00
|
|
|
src = sess_src(sess);
|
|
|
|
|
ret = (src ? addr_to_str(src, pn, sizeof(pn)) : 0);
|
2015-04-04 12:50:31 -04:00
|
|
|
if (ret <= 0)
|
2024-02-21 10:38:46 -05:00
|
|
|
chunk_printf(out, "unknown [");
|
MEDIUM: protocol: make abns a custom unix socket address family
This is a pre-requisite to adding the abnsz socket address family:
in this patch we make use of protocol API rework started by 732913f
("MINOR: protocol: properly assign the sock_domain and sock_family") in
order to implement a dedicated address family for ABNS sockets (based on
UNIX parent family).
Thanks to this, it will become trivial to implement a new ABNSZ (for abns
zero) family which is essentially the same as ABNS but with a slight
difference when it comes to path handling (ABNS uses the whole sun_path
length, while ABNSZ's path is zero terminated and evaluation stops at 0)
It was verified that this patch doesn't break reg-tests and behaves
properly (tests performed on the CLI with show sess and show fd).
Anywhere relevant, AF_CUST_ABNS is handled alongside AF_UNIX. If no
distinction needs to be made, real_family() is used to fetch the proper
real family type to handle it properly.
Both stream and dgram were converted, so no functional change should be
expected for this "internal" rework, except that proto will be displayed
as "abns_{stream,dgram}" instead of "unix_{stream,dgram}".
Before ("show sess" output):
0x64c35528aab0: proto=unix_stream src=unix:1 fe=GLOBAL be=<NONE> srv=<none> ts=00 epoch=0 age=0s calls=1 rate=0 cpu=0 lat=0 rq[f=848000h,i=0,an=00h,ax=] rp[f=80008000h,i=0,an=00h,ax=] scf=[8,0h,fd=21,rex=10s,wex=] scb=[8,1h,fd=-1,rex=,wex=] exp=10s rc=0 c_exp=
After:
0x619da7ad74c0: proto=abns_stream src=unix:1 fe=GLOBAL be=<NONE> srv=<none> ts=00 epoch=0 age=0s calls=1 rate=0 cpu=0 lat=0 rq[f=848000h,i=0,an=00h,ax=] rp[f=80008000h,i=0,an=00h,ax=] scf=[8,0h,fd=22,rex=10s,wex=] scb=[8,1h,fd=-1,rex=,wex=] exp=10s rc=0 c_exp=
Co-authored-by: Aurelien DARRAGON <adarragon@haproxy.com>
2024-08-09 12:48:14 -04:00
|
|
|
else if (real_family(ret) == AF_UNIX)
|
2024-02-21 10:38:46 -05:00
|
|
|
chunk_printf(out, "%s:%d [", pn, sess->listener->luid);
|
2015-04-04 12:50:31 -04:00
|
|
|
else
|
2024-02-21 10:38:46 -05:00
|
|
|
chunk_printf(out, "%s:%d [", pn, get_host_port(src));
|
2015-04-04 12:50:31 -04:00
|
|
|
|
|
|
|
|
get_localtime(sess->accept_date.tv_sec, &tm);
|
2024-02-21 10:38:46 -05:00
|
|
|
end = date2str_log(out->area + out->data, &tm, &(sess->accept_date),
|
|
|
|
|
out->size - out->data);
|
|
|
|
|
out->data = end - out->area;
|
2015-04-04 12:50:31 -04:00
|
|
|
if (sess->listener->name)
|
2024-02-21 10:38:46 -05:00
|
|
|
chunk_appendf(out, "] %s/%s", sess->fe->id, sess->listener->name);
|
2015-04-04 12:50:31 -04:00
|
|
|
else
|
2024-02-21 10:38:46 -05:00
|
|
|
chunk_appendf(out, "] %s/%d", sess->fe->id, sess->listener->luid);
|
2015-04-04 12:50:31 -04:00
|
|
|
}
|
|
|
|
|
|
2023-05-12 11:13:46 -04:00
|
|
|
|
2024-02-21 10:38:46 -05:00
|
|
|
/* fill <out> buffer with the string to use for send_log during
|
2023-05-12 11:13:46 -04:00
|
|
|
* session_kill_embryonic(). Add log prefix and error string.
|
|
|
|
|
*
|
2024-02-21 10:38:46 -05:00
|
|
|
* It expects that the session originates from a connection.
|
|
|
|
|
*
|
2023-05-12 11:13:46 -04:00
|
|
|
* The function is able to dump an SSL error string when CO_ER_SSL_HANDSHAKE
|
|
|
|
|
* is met.
|
|
|
|
|
*/
|
2024-02-21 10:38:46 -05:00
|
|
|
void session_embryonic_build_legacy_err(struct session *sess, struct buffer *out)
|
2023-05-12 11:13:46 -04:00
|
|
|
{
|
2024-02-21 10:38:46 -05:00
|
|
|
struct connection *conn = objt_conn(sess->origin);
|
2023-05-12 11:13:46 -04:00
|
|
|
const char *err_msg;
|
|
|
|
|
struct ssl_sock_ctx __maybe_unused *ssl_ctx;
|
|
|
|
|
|
2024-02-21 10:38:46 -05:00
|
|
|
BUG_ON(!conn);
|
|
|
|
|
|
2023-05-12 11:13:46 -04:00
|
|
|
err_msg = conn_err_code_str(conn);
|
2024-02-21 10:38:46 -05:00
|
|
|
session_prepare_log_prefix(sess, out);
|
2023-05-12 11:13:46 -04:00
|
|
|
|
|
|
|
|
#ifdef USE_OPENSSL
|
|
|
|
|
ssl_ctx = conn_get_ssl_sock_ctx(conn);
|
|
|
|
|
|
2023-06-12 10:23:29 -04:00
|
|
|
/* when the SSL error code is present and during a SSL Handshake failure,
|
|
|
|
|
* try to dump the error string from OpenSSL */
|
|
|
|
|
if (conn->err_code == CO_ER_SSL_HANDSHAKE && ssl_ctx && ssl_ctx->error_code != 0) {
|
2024-02-21 10:38:46 -05:00
|
|
|
chunk_appendf(out, ": SSL handshake failure (");
|
|
|
|
|
ERR_error_string_n(ssl_ctx->error_code, b_orig(out)+b_data(out), b_room(out));
|
|
|
|
|
out->data = strlen(b_orig(out));
|
|
|
|
|
chunk_appendf(out, ")\n");
|
2023-05-12 11:13:46 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
else
|
|
|
|
|
#endif /* ! USE_OPENSSL */
|
|
|
|
|
|
|
|
|
|
if (err_msg)
|
2024-02-21 10:38:46 -05:00
|
|
|
chunk_appendf(out, ": %s\n", err_msg);
|
2023-05-12 11:13:46 -04:00
|
|
|
else
|
2024-02-21 10:38:46 -05:00
|
|
|
chunk_appendf(out, ": unknown connection error (code=%d flags=%08x)\n",
|
2023-05-12 11:13:46 -04:00
|
|
|
conn->err_code, conn->flags);
|
|
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2015-04-04 12:50:31 -04:00
|
|
|
/* This function kills an existing embryonic session. It stops the connection's
|
|
|
|
|
* transport layer, releases assigned resources, resumes the listener if it was
|
|
|
|
|
* disabled and finally kills the file descriptor. This function requires that
|
|
|
|
|
* sess->origin points to the incoming connection.
|
|
|
|
|
*/
|
2021-03-02 10:09:26 -05:00
|
|
|
static void session_kill_embryonic(struct session *sess, unsigned int state)
|
2015-04-04 12:50:31 -04:00
|
|
|
{
|
|
|
|
|
struct connection *conn = __objt_conn(sess->origin);
|
2017-08-28 13:02:51 -04:00
|
|
|
struct task *task = sess->task;
|
2015-04-04 12:50:31 -04:00
|
|
|
unsigned int log = sess->fe->to_log;
|
|
|
|
|
|
|
|
|
|
if (log && (sess->fe->options & PR_O_NULLNOLOG)) {
|
|
|
|
|
/* with "option dontlognull", we don't log connections with no transfer */
|
|
|
|
|
if (!conn->err_code ||
|
|
|
|
|
conn->err_code == CO_ER_PRX_EMPTY || conn->err_code == CO_ER_PRX_ABORT ||
|
2016-06-04 10:11:10 -04:00
|
|
|
conn->err_code == CO_ER_CIP_EMPTY || conn->err_code == CO_ER_CIP_ABORT ||
|
2015-04-04 12:50:31 -04:00
|
|
|
conn->err_code == CO_ER_SSL_EMPTY || conn->err_code == CO_ER_SSL_ABORT)
|
|
|
|
|
log = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (log) {
|
2018-11-05 09:09:47 -05:00
|
|
|
if (!conn->err_code && (state & TASK_WOKEN_TIMER)) {
|
2015-04-04 12:50:31 -04:00
|
|
|
if (conn->flags & CO_FL_ACCEPT_PROXY)
|
|
|
|
|
conn->err_code = CO_ER_PRX_TIMEOUT;
|
2016-06-04 10:11:10 -04:00
|
|
|
else if (conn->flags & CO_FL_ACCEPT_CIP)
|
|
|
|
|
conn->err_code = CO_ER_CIP_TIMEOUT;
|
2015-04-04 12:50:31 -04:00
|
|
|
else if (conn->flags & CO_FL_SSL_WAIT_HS)
|
|
|
|
|
conn->err_code = CO_ER_SSL_TIMEOUT;
|
|
|
|
|
}
|
|
|
|
|
|
2024-02-21 11:26:52 -05:00
|
|
|
sess_log_embryonic(sess);
|
2015-04-04 12:50:31 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* kill the connection now */
|
2017-10-05 12:12:51 -04:00
|
|
|
conn_stop_tracking(conn);
|
|
|
|
|
conn_full_close(conn);
|
2015-04-04 12:50:31 -04:00
|
|
|
conn_free(conn);
|
2018-11-13 10:48:36 -05:00
|
|
|
sess->origin = NULL;
|
2015-04-04 12:50:31 -04:00
|
|
|
|
2019-04-17 16:51:06 -04:00
|
|
|
task_destroy(task);
|
2015-04-04 12:50:31 -04:00
|
|
|
session_free(sess);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Manages the embryonic session timeout. It is only called when the timeout
|
2021-01-29 06:27:57 -05:00
|
|
|
* strikes and performs the required cleanup. It's only exported to make it
|
|
|
|
|
* resolve in "show tasks".
|
2015-04-04 12:50:31 -04:00
|
|
|
*/
|
2021-03-02 10:09:26 -05:00
|
|
|
struct task *session_expire_embryonic(struct task *t, void *context, unsigned int state)
|
2015-04-04 12:50:31 -04:00
|
|
|
{
|
2018-05-25 08:04:04 -04:00
|
|
|
struct session *sess = context;
|
2015-04-04 12:50:31 -04:00
|
|
|
|
2018-08-16 13:03:50 -04:00
|
|
|
if (!(state & TASK_WOKEN_TIMER))
|
2015-04-04 12:50:31 -04:00
|
|
|
return t;
|
|
|
|
|
|
2018-11-05 09:09:47 -05:00
|
|
|
session_kill_embryonic(sess, state);
|
2015-04-04 12:50:31 -04:00
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Finish initializing a session from a connection, or kills it if the
|
2017-09-15 04:06:28 -04:00
|
|
|
* connection shows and error. Returns <0 if the connection was killed. It may
|
2020-01-22 12:08:48 -05:00
|
|
|
* be called either asynchronously when ssl handshake is done with an embryonic
|
2017-09-15 04:06:28 -04:00
|
|
|
* session, or synchronously to finalize the session. The distinction is made
|
|
|
|
|
* on sess->task which is only set in the embryonic session case.
|
2015-04-04 12:50:31 -04:00
|
|
|
*/
|
2020-01-22 12:08:48 -05:00
|
|
|
int conn_complete_session(struct connection *conn)
|
2015-04-04 12:50:31 -04:00
|
|
|
{
|
2017-08-28 13:02:51 -04:00
|
|
|
struct session *sess = conn->owner;
|
2015-04-04 12:50:31 -04:00
|
|
|
|
2023-04-28 03:16:15 -04:00
|
|
|
sess->t_handshake = ns_to_ms(now_ns - sess->accept_ts);
|
2018-09-05 05:56:48 -04:00
|
|
|
|
2015-04-05 18:25:48 -04:00
|
|
|
if (conn->flags & CO_FL_ERROR)
|
|
|
|
|
goto fail;
|
|
|
|
|
|
2015-04-08 12:18:15 -04:00
|
|
|
/* if logs require transport layer information, note it on the connection */
|
|
|
|
|
if (sess->fe->to_log & LW_XPRT)
|
|
|
|
|
conn->flags |= CO_FL_XPRT_TRACKED;
|
|
|
|
|
|
2016-10-21 10:37:51 -04:00
|
|
|
/* we may have some tcp-request-session rules */
|
2024-07-12 09:21:21 -04:00
|
|
|
if (((sess->fe->defpx && !LIST_ISEMPTY(&sess->fe->defpx->tcp_req.l5_rules)) ||
|
|
|
|
|
!LIST_ISEMPTY(&sess->fe->tcp_req.l5_rules)) && !tcp_exec_l5_rules(sess))
|
2016-10-21 10:37:51 -04:00
|
|
|
goto fail;
|
|
|
|
|
|
2015-04-08 12:10:49 -04:00
|
|
|
session_count_new(sess);
|
2023-08-23 12:02:51 -04:00
|
|
|
if (!conn->mux) {
|
|
|
|
|
if (conn_install_mux_fe(conn, NULL) < 0)
|
|
|
|
|
goto fail;
|
|
|
|
|
}
|
2015-04-05 18:25:48 -04:00
|
|
|
|
2017-08-28 10:22:54 -04:00
|
|
|
/* the embryonic session's task is not needed anymore */
|
2019-05-07 13:05:35 -04:00
|
|
|
task_destroy(sess->task);
|
|
|
|
|
sess->task = NULL;
|
2017-10-08 05:26:30 -04:00
|
|
|
conn_set_owner(conn, sess, conn_session_free);
|
|
|
|
|
|
2015-04-05 18:25:48 -04:00
|
|
|
return 0;
|
2015-04-04 12:50:31 -04:00
|
|
|
|
2015-04-05 18:25:48 -04:00
|
|
|
fail:
|
2017-09-15 04:06:28 -04:00
|
|
|
if (sess->task)
|
2018-11-05 09:09:47 -05:00
|
|
|
session_kill_embryonic(sess, 0);
|
2015-04-04 12:50:31 -04:00
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
|
2024-01-19 11:25:18 -05:00
|
|
|
/* Add <inc> to the number of cumulated glitches in the tracked counters for
|
|
|
|
|
* session <sess> which is known for being tracked, and implicitly update the
|
|
|
|
|
* rate if also tracked.
|
|
|
|
|
*/
|
|
|
|
|
void __session_add_glitch_ctr(struct session *sess, uint inc)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < global.tune.nb_stk_ctr; i++)
|
|
|
|
|
stkctr_add_glitch_ctr(&sess->stkctr[i], inc);
|
|
|
|
|
}
|
|
|
|
|
|
2025-08-20 04:01:46 -04:00
|
|
|
|
|
|
|
|
/* Session management of backend connections. */
|
|
|
|
|
|
2025-08-21 09:59:33 -04:00
|
|
|
/* Allocate a storage element into <sess> session which refers to <target>
|
|
|
|
|
* endpoint. This storage can be used to attach new connections
|
|
|
|
|
* to the session.
|
|
|
|
|
*
|
|
|
|
|
* Returns the allocated element or NULL on failure.
|
|
|
|
|
*/
|
|
|
|
|
static struct sess_priv_conns *sess_alloc_sess_conns(struct session *sess,
|
|
|
|
|
enum obj_type *target)
|
|
|
|
|
{
|
|
|
|
|
struct sess_priv_conns *pconns;
|
|
|
|
|
struct server *srv;
|
|
|
|
|
|
|
|
|
|
pconns = pool_alloc(pool_head_sess_priv_conns);
|
|
|
|
|
if (!pconns)
|
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
pconns->target = target;
|
|
|
|
|
LIST_INIT(&pconns->conn_list);
|
|
|
|
|
LIST_APPEND(&sess->priv_conns, &pconns->sess_el);
|
|
|
|
|
|
|
|
|
|
MT_LIST_INIT(&pconns->srv_el);
|
|
|
|
|
/* If <target> endpoint is a server, also attach storage element into it. */
|
|
|
|
|
if ((srv = objt_server(target)))
|
|
|
|
|
MT_LIST_APPEND(&srv->sess_conns, &pconns->srv_el);
|
|
|
|
|
|
|
|
|
|
pconns->tid = tid;
|
|
|
|
|
|
|
|
|
|
return pconns;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Retrieve the backend connections storage element from <sess> session which
|
|
|
|
|
* refers to <target> endpoint.
|
|
|
|
|
*
|
|
|
|
|
* This function usage must be protected with idle_conns lock.
|
|
|
|
|
*
|
|
|
|
|
* Returns the storage element or NULL if not found;
|
|
|
|
|
*/
|
|
|
|
|
static struct sess_priv_conns *sess_get_sess_conns(struct session *sess,
|
|
|
|
|
enum obj_type *target)
|
|
|
|
|
{
|
|
|
|
|
struct sess_priv_conns *pconns;
|
|
|
|
|
|
|
|
|
|
list_for_each_entry(pconns, &sess->priv_conns, sess_el) {
|
|
|
|
|
if (pconns->target == target)
|
|
|
|
|
return pconns;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2025-08-20 04:01:46 -04:00
|
|
|
/* Add the connection <conn> to the private conns list of session <sess>. Each
|
|
|
|
|
* connection is indexed by their respective target in the session. Nothing is
|
|
|
|
|
* performed if the connection is already in the session list.
|
|
|
|
|
*
|
|
|
|
|
* Returns true if conn is inserted or already present else false if a failure
|
|
|
|
|
* occurs during insertion.
|
|
|
|
|
*/
|
|
|
|
|
int session_add_conn(struct session *sess, struct connection *conn)
|
|
|
|
|
{
|
2025-08-21 09:59:33 -04:00
|
|
|
struct sess_priv_conns *pconns;
|
2025-08-08 09:54:21 -04:00
|
|
|
int ret = 0;
|
2025-08-20 04:01:46 -04:00
|
|
|
|
|
|
|
|
/* Connection target is used to index it in the session. Only BE conns are expected in session list. */
|
|
|
|
|
BUG_ON(!conn->target || objt_listener(conn->target));
|
|
|
|
|
|
|
|
|
|
/* A connection cannot be attached already to another session.
|
|
|
|
|
*
|
|
|
|
|
* This is safe as BE connections are flagged as private immediately
|
|
|
|
|
* after being created during connect_server(). The only potential
|
|
|
|
|
* issue would be if a connection is turned private later on during its
|
|
|
|
|
* lifetime. Currently, this happens only on NTLM headers detection,
|
|
|
|
|
* however this case is only implemented with HTTP/1.1 which cannot
|
|
|
|
|
* multiplex several streams on the same connection.
|
|
|
|
|
*/
|
|
|
|
|
BUG_ON(conn->owner && conn->owner != sess);
|
|
|
|
|
|
2025-08-08 09:54:21 -04:00
|
|
|
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
|
|
|
|
|
|
2025-08-20 04:01:46 -04:00
|
|
|
/* Already attach to the session */
|
2025-08-08 09:54:21 -04:00
|
|
|
if (!LIST_ISEMPTY(&conn->sess_el)) {
|
|
|
|
|
ret = 1;
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
2025-08-20 04:01:46 -04:00
|
|
|
|
2025-08-21 09:59:33 -04:00
|
|
|
pconns = sess_get_sess_conns(sess, conn->target);
|
|
|
|
|
if (!pconns) {
|
|
|
|
|
pconns = sess_alloc_sess_conns(sess, conn->target);
|
2025-08-20 04:01:46 -04:00
|
|
|
if (!pconns)
|
2025-08-08 09:54:21 -04:00
|
|
|
goto out;
|
2025-08-20 04:01:46 -04:00
|
|
|
}
|
|
|
|
|
|
2025-08-21 09:59:33 -04:00
|
|
|
LIST_APPEND(&pconns->conn_list, &conn->sess_el);
|
2025-08-20 04:01:46 -04:00
|
|
|
/* Ensure owner is set for connection. It could have been reset
|
|
|
|
|
* prior on after a session_add_conn() failure.
|
|
|
|
|
*/
|
|
|
|
|
conn->owner = sess;
|
2025-08-08 09:54:21 -04:00
|
|
|
ret = 1;
|
2025-08-20 04:01:46 -04:00
|
|
|
|
2025-08-08 09:54:21 -04:00
|
|
|
out:
|
|
|
|
|
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
|
|
|
|
|
return ret;
|
2025-08-20 04:01:46 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Check that session <sess> is able to keep idle connection <conn>. This must
|
|
|
|
|
* be called each time a connection stored in a session becomes idle.
|
|
|
|
|
*
|
|
|
|
|
* Returns 0 if the connection is kept, else non-zero if the connection was
|
|
|
|
|
* explicitely removed from session.
|
|
|
|
|
*/
|
|
|
|
|
int session_check_idle_conn(struct session *sess, struct connection *conn)
|
|
|
|
|
{
|
|
|
|
|
/* Connection must be attached to session prior to this function call. */
|
|
|
|
|
BUG_ON(!conn->owner || conn->owner != sess);
|
|
|
|
|
|
|
|
|
|
/* Connection is not attached to a session. */
|
|
|
|
|
if (!conn->owner)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
/* Ensure conn is not already accounted as idle to prevent sess idle count excess increment. */
|
|
|
|
|
BUG_ON(conn->flags & CO_FL_SESS_IDLE);
|
|
|
|
|
|
|
|
|
|
if (sess->idle_conns >= sess->fe->max_out_conns) {
|
|
|
|
|
session_unown_conn(sess, conn);
|
|
|
|
|
conn->owner = NULL;
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
conn->flags |= CO_FL_SESS_IDLE;
|
|
|
|
|
sess->idle_conns++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Look for an available connection matching the target <target> in the server
|
|
|
|
|
* list of the session <sess>. It returns a connection if found. Otherwise it
|
|
|
|
|
* returns NULL.
|
|
|
|
|
*/
|
|
|
|
|
struct connection *session_get_conn(struct session *sess, void *target, int64_t hash)
|
|
|
|
|
{
|
2025-08-21 09:59:33 -04:00
|
|
|
struct connection *srv_conn, *res = NULL;
|
2025-08-20 04:01:46 -04:00
|
|
|
struct sess_priv_conns *pconns;
|
|
|
|
|
|
2025-08-08 09:54:21 -04:00
|
|
|
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
|
|
|
|
|
|
2025-08-21 09:59:33 -04:00
|
|
|
pconns = sess_get_sess_conns(sess, target);
|
|
|
|
|
if (!pconns)
|
|
|
|
|
goto end;
|
|
|
|
|
|
|
|
|
|
/* Search into pconns for a connection with matching params and available streams. */
|
|
|
|
|
list_for_each_entry(srv_conn, &pconns->conn_list, sess_el) {
|
|
|
|
|
if ((srv_conn->hash_node && srv_conn->hash_node->node.key == hash) &&
|
|
|
|
|
srv_conn->mux &&
|
|
|
|
|
(srv_conn->mux->avail_streams(srv_conn) > 0) &&
|
|
|
|
|
!(srv_conn->flags & CO_FL_WAIT_XPRT)) {
|
|
|
|
|
if (srv_conn->flags & CO_FL_SESS_IDLE) {
|
|
|
|
|
srv_conn->flags &= ~CO_FL_SESS_IDLE;
|
|
|
|
|
sess->idle_conns--;
|
2025-08-20 04:01:46 -04:00
|
|
|
}
|
2025-08-21 09:59:33 -04:00
|
|
|
|
|
|
|
|
res = srv_conn;
|
|
|
|
|
break;
|
2025-08-20 04:01:46 -04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
end:
|
2025-08-08 09:54:21 -04:00
|
|
|
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
|
2025-08-21 09:59:33 -04:00
|
|
|
return res;
|
2025-08-20 04:01:46 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Remove the connection from the session list, and destroy sess_priv_conns
|
|
|
|
|
* element if it's now empty.
|
|
|
|
|
*/
|
|
|
|
|
void session_unown_conn(struct session *sess, struct connection *conn)
|
|
|
|
|
{
|
|
|
|
|
struct sess_priv_conns *pconns = NULL;
|
|
|
|
|
|
|
|
|
|
BUG_ON(objt_listener(conn->target));
|
|
|
|
|
|
2025-08-08 09:54:21 -04:00
|
|
|
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
|
|
|
|
|
|
2025-08-20 04:01:46 -04:00
|
|
|
/* WT: this currently is a workaround for an inconsistency between
|
|
|
|
|
* the link status of the connection in the session list and the
|
|
|
|
|
* connection's owner. This should be removed as soon as all this
|
|
|
|
|
* is addressed. Right now it's possible to enter here with a non-null
|
|
|
|
|
* conn->owner that points to a dead session, but in this case the
|
|
|
|
|
* element is not linked.
|
|
|
|
|
*/
|
|
|
|
|
if (!LIST_INLIST(&conn->sess_el))
|
2025-08-08 09:54:21 -04:00
|
|
|
goto out;
|
2025-08-20 04:01:46 -04:00
|
|
|
|
|
|
|
|
if (conn->flags & CO_FL_SESS_IDLE)
|
|
|
|
|
sess->idle_conns--;
|
|
|
|
|
LIST_DEL_INIT(&conn->sess_el);
|
|
|
|
|
conn->owner = NULL;
|
2025-08-21 09:59:33 -04:00
|
|
|
|
|
|
|
|
pconns = sess_get_sess_conns(sess, conn->target);
|
|
|
|
|
BUG_ON(!pconns); /* if conn is attached to session, its sess_conn must exists. */
|
|
|
|
|
if (LIST_ISEMPTY(&pconns->conn_list)) {
|
|
|
|
|
LIST_DELETE(&pconns->sess_el);
|
|
|
|
|
MT_LIST_DELETE(&pconns->srv_el);
|
|
|
|
|
pool_free(pool_head_sess_priv_conns, pconns);
|
2025-08-20 04:01:46 -04:00
|
|
|
}
|
2025-08-08 09:54:21 -04:00
|
|
|
|
|
|
|
|
out:
|
|
|
|
|
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
|
2025-08-20 04:01:46 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2015-04-03 07:53:24 -04:00
|
|
|
/*
|
|
|
|
|
* Local variables:
|
|
|
|
|
* c-indent-level: 8
|
|
|
|
|
* c-basic-offset: 8
|
|
|
|
|
* End:
|
|
|
|
|
*/
|