2006-06-15 15:48:13 -04:00
|
|
|
/*
|
2020-06-04 16:29:18 -04:00
|
|
|
* include/haproxy/proxy.h
|
2009-10-04 17:12:44 -04:00
|
|
|
* This file defines function prototypes for proxy management.
|
|
|
|
|
*
|
2011-07-25 10:33:49 -04:00
|
|
|
* Copyright (C) 2000-2011 Willy Tarreau - w@1wt.eu
|
2009-10-04 17:12:44 -04:00
|
|
|
*
|
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
|
* License as published by the Free Software Foundation, version 2.1
|
|
|
|
|
* exclusively.
|
|
|
|
|
*
|
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
|
*
|
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
|
* License along with this library; if not, write to the Free Software
|
|
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
|
|
|
*/
|
2006-06-15 15:48:13 -04:00
|
|
|
|
2020-06-04 16:29:18 -04:00
|
|
|
#ifndef _HAPROXY_PROXY_H
|
|
|
|
|
#define _HAPROXY_PROXY_H
|
2006-06-25 20:48:02 -04:00
|
|
|
|
2025-08-23 13:57:29 -04:00
|
|
|
#include <import/ceb32_tree.h>
|
2025-08-23 13:45:03 -04:00
|
|
|
|
2020-05-27 06:58:42 -04:00
|
|
|
#include <haproxy/api.h>
|
2020-06-09 03:07:15 -04:00
|
|
|
#include <haproxy/applet-t.h>
|
2020-06-04 16:29:18 -04:00
|
|
|
#include <haproxy/freq_ctr.h>
|
2021-03-24 11:13:20 -04:00
|
|
|
#include <haproxy/list.h>
|
2020-06-04 08:58:24 -04:00
|
|
|
#include <haproxy/listener-t.h>
|
2020-06-04 16:29:18 -04:00
|
|
|
#include <haproxy/proxy-t.h>
|
2020-06-04 17:20:13 -04:00
|
|
|
#include <haproxy/server-t.h>
|
2020-06-02 12:15:32 -04:00
|
|
|
#include <haproxy/ticks.h>
|
2023-04-28 10:46:11 -04:00
|
|
|
#include <haproxy/thread.h>
|
2006-06-15 15:48:13 -04:00
|
|
|
|
2017-11-24 10:54:05 -05:00
|
|
|
extern struct proxy *proxies_list;
|
2025-05-09 10:02:09 -04:00
|
|
|
extern struct list proxies;
|
2025-08-23 13:57:29 -04:00
|
|
|
extern struct ceb_root *used_proxy_id; /* list of proxy IDs in use */
|
2014-03-15 02:22:35 -04:00
|
|
|
extern unsigned int error_snapshot_id; /* global ID assigned to each error then incremented */
|
2025-07-15 05:47:54 -04:00
|
|
|
extern struct ceb_root *proxy_by_name; /* tree of proxies sorted by name */
|
2014-03-15 02:22:35 -04:00
|
|
|
|
2018-11-11 09:40:36 -05:00
|
|
|
extern const struct cfg_opt cfg_opts[];
|
|
|
|
|
extern const struct cfg_opt cfg_opts2[];
|
2025-03-07 04:55:31 -05:00
|
|
|
extern const struct cfg_opt cfg_opts3[];
|
2018-11-11 09:40:36 -05:00
|
|
|
|
2021-03-02 10:09:26 -05:00
|
|
|
struct task *manage_proxy(struct task *t, void *context, unsigned int state);
|
2022-09-09 09:51:37 -04:00
|
|
|
void proxy_cond_pause(struct proxy *p);
|
|
|
|
|
void proxy_cond_resume(struct proxy *p);
|
2020-10-07 10:31:39 -04:00
|
|
|
void proxy_cond_disable(struct proxy *p);
|
2006-06-25 20:48:02 -04:00
|
|
|
void soft_stop(void);
|
2011-09-07 13:14:57 -04:00
|
|
|
int pause_proxy(struct proxy *p);
|
2011-09-07 16:37:44 -04:00
|
|
|
int resume_proxy(struct proxy *p);
|
2008-10-12 06:07:48 -04:00
|
|
|
void stop_proxy(struct proxy *p);
|
REORG/MAJOR: session: rename the "session" entity to "stream"
With HTTP/2, we'll have to support multiplexed streams. A stream is in
fact the largest part of what we currently call a session, it has buffers,
logs, etc.
In order to catch any error, this commit removes any reference to the
struct session and tries to rename most "session" occurrences in function
names to "stream" and "sess" to "strm" when that's related to a session.
The files stream.{c,h} were added and session.{c,h} removed.
The session will be reintroduced later and a few parts of the stream
will progressively be moved overthere. It will more or less contain
only what we need in an embryonic session.
Sample fetch functions and converters will have to change a bit so
that they'll use an L5 (session) instead of what's currently called
"L4" which is in fact L6 for now.
Once all changes are completed, we should see approximately this :
L7 - http_txn
L6 - stream
L5 - session
L4 - connection | applet
There will be at most one http_txn per stream, and a same session will
possibly be referenced by multiple streams. A connection will point to
a session and to a stream. The session will hold all the information
we need to keep even when we don't yet have a stream.
Some more cleanup is needed because some code was already far from
being clean. The server queue management still refers to sessions at
many places while comments talk about connections. This will have to
be cleaned up once we have a server-side connection pool manager.
Stream flags "SN_*" still need to be renamed, it doesn't seem like
any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
|
|
|
int stream_set_backend(struct stream *s, struct proxy *be);
|
2006-06-15 15:48:13 -04:00
|
|
|
|
2025-04-10 04:37:33 -04:00
|
|
|
void deinit_proxy(struct proxy *p);
|
2021-03-24 11:13:20 -04:00
|
|
|
void free_proxy(struct proxy *p);
|
2007-11-04 01:04:43 -05:00
|
|
|
const char *proxy_cap_str(int cap);
|
2007-11-03 18:41:58 -04:00
|
|
|
const char *proxy_mode_str(int mode);
|
2021-03-15 06:11:55 -04:00
|
|
|
const char *proxy_find_best_option(const char *word, const char **extra);
|
2025-08-23 13:24:21 -04:00
|
|
|
uint proxy_get_next_id(uint from);
|
2014-03-15 02:22:35 -04:00
|
|
|
void proxy_store_name(struct proxy *px);
|
2015-05-26 09:25:32 -04:00
|
|
|
struct proxy *proxy_find_by_id(int id, int cap, int table);
|
2015-05-26 05:24:42 -04:00
|
|
|
struct proxy *proxy_find_by_name(const char *name, int cap, int table);
|
2015-05-27 10:46:26 -04:00
|
|
|
struct proxy *proxy_find_best_match(int cap, const char *name, int id, int *diff);
|
2009-06-22 09:48:36 -04:00
|
|
|
int proxy_cfg_ensure_no_http(struct proxy *curproxy);
|
2023-11-15 06:18:52 -05:00
|
|
|
int proxy_cfg_ensure_no_log(struct proxy *curproxy);
|
2011-07-28 19:49:03 -04:00
|
|
|
void init_new_proxy(struct proxy *p);
|
2021-02-12 02:19:01 -05:00
|
|
|
void proxy_preset_defaults(struct proxy *defproxy);
|
2021-02-12 04:38:49 -05:00
|
|
|
void proxy_free_defaults(struct proxy *defproxy);
|
2021-02-12 07:52:11 -05:00
|
|
|
void proxy_destroy_defaults(struct proxy *px);
|
2021-10-13 03:50:53 -04:00
|
|
|
void proxy_destroy_all_unref_defaults(void);
|
|
|
|
|
void proxy_ref_defaults(struct proxy *px, struct proxy *defpx);
|
|
|
|
|
void proxy_unref_defaults(struct proxy *px);
|
2024-09-20 09:59:04 -04:00
|
|
|
void proxy_unref_or_destroy_defaults(struct proxy *px);
|
2025-04-09 15:05:35 -04:00
|
|
|
int setup_new_proxy(struct proxy *px, const char *name, unsigned int cap, char **errmsg);
|
2021-03-23 12:27:05 -04:00
|
|
|
struct proxy *alloc_new_proxy(const char *name, unsigned int cap,
|
|
|
|
|
char **errmsg);
|
|
|
|
|
struct proxy *parse_new_proxy(const char *name, unsigned int cap,
|
|
|
|
|
const char *file, int linenum,
|
|
|
|
|
const struct proxy *defproxy);
|
2018-09-07 11:43:26 -04:00
|
|
|
void proxy_capture_error(struct proxy *proxy, int is_back,
|
|
|
|
|
struct proxy *other_end, enum obj_type *target,
|
|
|
|
|
const struct session *sess,
|
|
|
|
|
const struct buffer *buf, long buf_ofs,
|
|
|
|
|
unsigned int buf_out, unsigned int err_pos,
|
|
|
|
|
const union error_snapshot_ctx *ctx,
|
|
|
|
|
void (*show)(struct buffer *, const struct error_snapshot *));
|
CLEANUP: tree-wide: fix prototypes for functions taking no arguments.
"f(void)" is the correct and preferred form for a function taking no
argument, while some places use the older "f()". These were reported
by clang's -Wmissing-prototypes, for example:
src/cpuset.c:111:5: warning: no previous prototype for function 'ha_cpuset_size' [-Wmissing-prototypes]
int ha_cpuset_size()
include/haproxy/cpuset.h:42:5: note: this declaration is not a prototype; add 'void' to make it a prototype for a zero-parameter function
int ha_cpuset_size();
^
void
This aggregate patch fixes this for the following functions:
ha_backtrace_to_stderr(), ha_cpuset_size(), ha_panic(), ha_random64(),
ha_thread_dump_all_to_trash(), get_exec_path(), check_config_validity(),
mworker_child_nb(), mworker_cli_proxy_(create|stop)(),
mworker_cleantasks(), mworker_cleanlisteners(), mworker_ext_launch_all(),
mworker_reload(), mworker_(env|proc_list)_to_(proc_list|env)(),
mworker_(un|)block_signals(), proxy_adjust_all_maxconn(),
proxy_destroy_all_defaults(), get_tainted(),
pool_total_(allocated|used)(), thread_isolate(_full|)(),
thread(_sync|)_release(), thread_harmless_till_end(),
thread_cpu_mask_forced(), dequeue_all_listeners(), next_timer_expiry(),
wake_expired_tasks(), process_runnable_tasks(), init_acl(),
init_buffer(), (de|)init_log_buffers(), (de|)init_pollers(),
fork_poller(), pool_destroy_all(), pool_evict_from_local_caches(),
pool_total_failures(), dump_pools_to_trash(), cfg_run_diagnostics(),
tv_init_(process|thread)_date(), __signal_process_queue(),
deinit_signals(), haproxy_unblock_signals()
2021-09-12 06:49:33 -04:00
|
|
|
void proxy_adjust_all_maxconn(void);
|
2016-11-24 06:02:29 -05:00
|
|
|
struct proxy *cli_find_frontend(struct appctx *appctx, const char *arg);
|
2017-03-14 15:08:46 -04:00
|
|
|
struct proxy *cli_find_frontend(struct appctx *appctx, const char *arg);
|
2023-08-08 05:37:59 -04:00
|
|
|
int resolve_stick_rule(struct proxy *curproxy, struct sticking_rule *mrule);
|
2023-11-16 05:29:58 -05:00
|
|
|
void free_stick_rules(struct list *rules);
|
2023-11-23 10:27:45 -05:00
|
|
|
void free_server_rules(struct list *srules);
|
2025-01-15 10:10:43 -05:00
|
|
|
int proxy_init_per_thr(struct proxy *px);
|
2006-06-25 20:48:02 -04:00
|
|
|
|
2007-11-04 01:04:43 -05:00
|
|
|
/*
|
|
|
|
|
* This function returns a string containing the type of the proxy in a format
|
|
|
|
|
* suitable for error messages, from its capabilities.
|
|
|
|
|
*/
|
|
|
|
|
static inline const char *proxy_type_str(struct proxy *proxy)
|
|
|
|
|
{
|
2023-04-22 18:04:36 -04:00
|
|
|
if (proxy->mode == PR_MODE_PEERS)
|
|
|
|
|
return "peers section";
|
2007-11-04 01:04:43 -05:00
|
|
|
return proxy_cap_str(proxy->cap);
|
|
|
|
|
}
|
|
|
|
|
|
2015-05-26 05:24:42 -04:00
|
|
|
/* Find the frontend having name <name>. The name may also start with a '#' to
|
|
|
|
|
* reference a numeric id. NULL is returned if not found.
|
|
|
|
|
*/
|
|
|
|
|
static inline struct proxy *proxy_fe_by_name(const char *name)
|
|
|
|
|
{
|
|
|
|
|
return proxy_find_by_name(name, PR_CAP_FE, 0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Find the backend having name <name>. The name may also start with a '#' to
|
|
|
|
|
* reference a numeric id. NULL is returned if not found.
|
|
|
|
|
*/
|
|
|
|
|
static inline struct proxy *proxy_be_by_name(const char *name)
|
|
|
|
|
{
|
|
|
|
|
return proxy_find_by_name(name, PR_CAP_BE, 0);
|
|
|
|
|
}
|
|
|
|
|
|
2025-08-23 13:45:03 -04:00
|
|
|
/* index proxy <px>'s id into used_proxy_id */
|
|
|
|
|
static inline void proxy_index_id(struct proxy *px)
|
|
|
|
|
{
|
2025-08-23 13:57:29 -04:00
|
|
|
ceb32_item_insert(&used_proxy_id, conf.uuid_node, uuid, px);
|
2025-08-23 13:45:03 -04:00
|
|
|
}
|
|
|
|
|
|
2008-02-15 05:15:34 -05:00
|
|
|
/* this function initializes all timeouts for proxy p */
|
|
|
|
|
static inline void proxy_reset_timeouts(struct proxy *proxy)
|
|
|
|
|
{
|
2008-07-06 18:09:58 -04:00
|
|
|
proxy->timeout.client = TICK_ETERNITY;
|
|
|
|
|
proxy->timeout.tarpit = TICK_ETERNITY;
|
|
|
|
|
proxy->timeout.queue = TICK_ETERNITY;
|
|
|
|
|
proxy->timeout.connect = TICK_ETERNITY;
|
|
|
|
|
proxy->timeout.server = TICK_ETERNITY;
|
|
|
|
|
proxy->timeout.httpreq = TICK_ETERNITY;
|
|
|
|
|
proxy->timeout.check = TICK_ETERNITY;
|
2012-05-12 06:50:00 -04:00
|
|
|
proxy->timeout.tunnel = TICK_ETERNITY;
|
2008-02-15 05:15:34 -05:00
|
|
|
}
|
|
|
|
|
|
2025-10-08 04:27:45 -04:00
|
|
|
/* Return proxy's abortonclose status: 0=off, non-zero=on, with a default to
|
|
|
|
|
* <def> when neither choice was forced.
|
2025-10-08 04:18:35 -04:00
|
|
|
*/
|
2025-10-08 04:27:45 -04:00
|
|
|
static inline int proxy_abrt_close_def(const struct proxy *px, int def)
|
2025-10-07 09:36:54 -04:00
|
|
|
{
|
2025-10-08 04:18:35 -04:00
|
|
|
if (px->options & PR_O_ABRT_CLOSE)
|
|
|
|
|
return 1;
|
|
|
|
|
else if (px->no_options & PR_O_ABRT_CLOSE)
|
|
|
|
|
return 0;
|
|
|
|
|
/* When unset: 1 for HTTP, 0 for TCP */
|
2025-10-08 04:27:45 -04:00
|
|
|
return def;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* return proxy's abortonclose status: 0=off, non-zero=on.
|
|
|
|
|
* Considers the proxy's mode when neither on/off was set,
|
|
|
|
|
* and HTTP mode defaults to on.
|
|
|
|
|
*/
|
|
|
|
|
static inline int proxy_abrt_close(const struct proxy *px)
|
|
|
|
|
{
|
|
|
|
|
return proxy_abrt_close_def(px, px->mode == PR_MODE_HTTP);
|
2025-10-07 09:36:54 -04:00
|
|
|
}
|
|
|
|
|
|
2010-06-04 14:59:39 -04:00
|
|
|
/* increase the number of cumulated connections received on the designated frontend */
|
2019-04-15 15:25:03 -04:00
|
|
|
static inline void proxy_inc_fe_conn_ctr(struct listener *l, struct proxy *fe)
|
2009-03-05 12:43:00 -05:00
|
|
|
{
|
MEDIUM: stats: consider that shared stats pointers may be NULL
This patch looks huge, but it has a very simple goal: protect all
accessed to shared stats pointers (either read or writes), because
we know consider that these pointers may be NULL.
The reason behind this is despite all precautions taken to ensure the
pointers shouldn't be NULL when not expected, there are still corner
cases (ie: frontends stats used on a backend which no FE cap and vice
versa) where we could try to access a memory area which is not
allocated. Willy stumbled on such cases while playing with the rings
servers upon connection error, which eventually led to process crashes
(since 3.3 when shared stats were implemented)
Also, we may decide later that shared stats are optional and should
be disabled on the proxy to save memory and CPU, and this patch is
a step further towards that goal.
So in essence, this patch ensures shared stats pointers are always
initialized (including NULL), and adds necessary guards before shared
stats pointers are de-referenced. Since we already had some checks
for backends and listeners stats, and the pointer address retrieval
should stay in cpu cache, let's hope that this patch doesn't impact
stats performance much.
2025-09-18 10:28:29 -04:00
|
|
|
if (fe->fe_counters.shared.tg[tgid - 1])
|
|
|
|
|
_HA_ATOMIC_INC(&fe->fe_counters.shared.tg[tgid - 1]->cum_conn);
|
|
|
|
|
if (l && l->counters && l->counters->shared.tg[tgid - 1])
|
2025-07-22 11:15:02 -04:00
|
|
|
_HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->cum_conn);
|
MEDIUM: stats: consider that shared stats pointers may be NULL
This patch looks huge, but it has a very simple goal: protect all
accessed to shared stats pointers (either read or writes), because
we know consider that these pointers may be NULL.
The reason behind this is despite all precautions taken to ensure the
pointers shouldn't be NULL when not expected, there are still corner
cases (ie: frontends stats used on a backend which no FE cap and vice
versa) where we could try to access a memory area which is not
allocated. Willy stumbled on such cases while playing with the rings
servers upon connection error, which eventually led to process crashes
(since 3.3 when shared stats were implemented)
Also, we may decide later that shared stats are optional and should
be disabled on the proxy to save memory and CPU, and this patch is
a step further towards that goal.
So in essence, this patch ensures shared stats pointers are always
initialized (including NULL), and adds necessary guards before shared
stats pointers are de-referenced. Since we already had some checks
for backends and listeners stats, and the pointer address retrieval
should stay in cpu cache, let's hope that this patch doesn't impact
stats performance much.
2025-09-18 10:28:29 -04:00
|
|
|
if (fe->fe_counters.shared.tg[tgid - 1])
|
|
|
|
|
update_freq_ctr(&fe->fe_counters.shared.tg[tgid - 1]->conn_per_sec, 1);
|
2017-06-02 09:33:24 -04:00
|
|
|
HA_ATOMIC_UPDATE_MAX(&fe->fe_counters.cps_max,
|
MINOR: counters: add local-only internal rates to compute some maxes
cps_max (max new connections received per second), sps_max (max new
sessions per second) and http.rps_max (maximum new http requests per
second) all rely on shared counters (namely conn_per_sec, sess_per_sec and
http.req_per_sec). The problem is that shared counters are about to be
distributed over thread groups, and we cannot afford to compute the
total (for all thread groups) each time we update the max counters.
Instead, since such max counters (relying on shared counters) are a very
few exceptions, let's add internal (sess,conn,req) per sec freq counters
that are dedicated to cps_max, sps_max and http.rps_max computing.
Thanks to that, related *_max counters shouldn't be negatively impacted
by the thread-group distribution, yet they will not benefit from it
either. Related internal freq counters are prefixed with "_" to emphasize
the fact that they should not be used for other purpose (the shared ones,
which are about to be distributed over thread groups in upcoming commits
are still available and must be used instead). The internal ones could
eventually be removed at any time if we find another way to compute the
{cps,sps,http.rps)_max counters.
2025-05-28 06:00:49 -04:00
|
|
|
update_freq_ctr(&fe->fe_counters._conn_per_sec, 1));
|
2010-06-04 14:59:39 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* increase the number of cumulated connections accepted by the designated frontend */
|
2019-04-15 15:25:03 -04:00
|
|
|
static inline void proxy_inc_fe_sess_ctr(struct listener *l, struct proxy *fe)
|
2010-06-04 14:59:39 -04:00
|
|
|
{
|
MEDIUM: stats: consider that shared stats pointers may be NULL
This patch looks huge, but it has a very simple goal: protect all
accessed to shared stats pointers (either read or writes), because
we know consider that these pointers may be NULL.
The reason behind this is despite all precautions taken to ensure the
pointers shouldn't be NULL when not expected, there are still corner
cases (ie: frontends stats used on a backend which no FE cap and vice
versa) where we could try to access a memory area which is not
allocated. Willy stumbled on such cases while playing with the rings
servers upon connection error, which eventually led to process crashes
(since 3.3 when shared stats were implemented)
Also, we may decide later that shared stats are optional and should
be disabled on the proxy to save memory and CPU, and this patch is
a step further towards that goal.
So in essence, this patch ensures shared stats pointers are always
initialized (including NULL), and adds necessary guards before shared
stats pointers are de-referenced. Since we already had some checks
for backends and listeners stats, and the pointer address retrieval
should stay in cpu cache, let's hope that this patch doesn't impact
stats performance much.
2025-09-18 10:28:29 -04:00
|
|
|
if (fe->fe_counters.shared.tg[tgid - 1])
|
|
|
|
|
_HA_ATOMIC_INC(&fe->fe_counters.shared.tg[tgid - 1]->cum_sess);
|
|
|
|
|
if (l && l->counters && l->counters->shared.tg[tgid - 1])
|
2025-07-22 11:15:02 -04:00
|
|
|
_HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->cum_sess);
|
MEDIUM: stats: consider that shared stats pointers may be NULL
This patch looks huge, but it has a very simple goal: protect all
accessed to shared stats pointers (either read or writes), because
we know consider that these pointers may be NULL.
The reason behind this is despite all precautions taken to ensure the
pointers shouldn't be NULL when not expected, there are still corner
cases (ie: frontends stats used on a backend which no FE cap and vice
versa) where we could try to access a memory area which is not
allocated. Willy stumbled on such cases while playing with the rings
servers upon connection error, which eventually led to process crashes
(since 3.3 when shared stats were implemented)
Also, we may decide later that shared stats are optional and should
be disabled on the proxy to save memory and CPU, and this patch is
a step further towards that goal.
So in essence, this patch ensures shared stats pointers are always
initialized (including NULL), and adds necessary guards before shared
stats pointers are de-referenced. Since we already had some checks
for backends and listeners stats, and the pointer address retrieval
should stay in cpu cache, let's hope that this patch doesn't impact
stats performance much.
2025-09-18 10:28:29 -04:00
|
|
|
if (fe->fe_counters.shared.tg[tgid - 1])
|
|
|
|
|
update_freq_ctr(&fe->fe_counters.shared.tg[tgid - 1]->sess_per_sec, 1);
|
2017-06-02 09:33:24 -04:00
|
|
|
HA_ATOMIC_UPDATE_MAX(&fe->fe_counters.sps_max,
|
MINOR: counters: add local-only internal rates to compute some maxes
cps_max (max new connections received per second), sps_max (max new
sessions per second) and http.rps_max (maximum new http requests per
second) all rely on shared counters (namely conn_per_sec, sess_per_sec and
http.req_per_sec). The problem is that shared counters are about to be
distributed over thread groups, and we cannot afford to compute the
total (for all thread groups) each time we update the max counters.
Instead, since such max counters (relying on shared counters) are a very
few exceptions, let's add internal (sess,conn,req) per sec freq counters
that are dedicated to cps_max, sps_max and http.rps_max computing.
Thanks to that, related *_max counters shouldn't be negatively impacted
by the thread-group distribution, yet they will not benefit from it
either. Related internal freq counters are prefixed with "_" to emphasize
the fact that they should not be used for other purpose (the shared ones,
which are about to be distributed over thread groups in upcoming commits
are still available and must be used instead). The internal ones could
eventually be removed at any time if we find another way to compute the
{cps,sps,http.rps)_max counters.
2025-05-28 06:00:49 -04:00
|
|
|
update_freq_ctr(&fe->fe_counters._sess_per_sec, 1));
|
2009-03-05 12:43:00 -05:00
|
|
|
}
|
|
|
|
|
|
2023-01-18 05:52:21 -05:00
|
|
|
/* increase the number of cumulated HTTP sessions on the designated frontend.
|
|
|
|
|
* <http_ver> must be the HTTP version for such requests.
|
|
|
|
|
*/
|
|
|
|
|
static inline void proxy_inc_fe_cum_sess_ver_ctr(struct listener *l, struct proxy *fe,
|
|
|
|
|
unsigned int http_ver)
|
|
|
|
|
{
|
|
|
|
|
if (http_ver == 0 ||
|
2025-07-22 11:15:02 -04:00
|
|
|
http_ver > sizeof(fe->fe_counters.shared.tg[tgid - 1]->cum_sess_ver) / sizeof(*fe->fe_counters.shared.tg[tgid - 1]->cum_sess_ver))
|
2023-01-18 05:52:21 -05:00
|
|
|
return;
|
|
|
|
|
|
MEDIUM: stats: consider that shared stats pointers may be NULL
This patch looks huge, but it has a very simple goal: protect all
accessed to shared stats pointers (either read or writes), because
we know consider that these pointers may be NULL.
The reason behind this is despite all precautions taken to ensure the
pointers shouldn't be NULL when not expected, there are still corner
cases (ie: frontends stats used on a backend which no FE cap and vice
versa) where we could try to access a memory area which is not
allocated. Willy stumbled on such cases while playing with the rings
servers upon connection error, which eventually led to process crashes
(since 3.3 when shared stats were implemented)
Also, we may decide later that shared stats are optional and should
be disabled on the proxy to save memory and CPU, and this patch is
a step further towards that goal.
So in essence, this patch ensures shared stats pointers are always
initialized (including NULL), and adds necessary guards before shared
stats pointers are de-referenced. Since we already had some checks
for backends and listeners stats, and the pointer address retrieval
should stay in cpu cache, let's hope that this patch doesn't impact
stats performance much.
2025-09-18 10:28:29 -04:00
|
|
|
if (fe->fe_counters.shared.tg[tgid - 1])
|
|
|
|
|
_HA_ATOMIC_INC(&fe->fe_counters.shared.tg[tgid - 1]->cum_sess_ver[http_ver - 1]);
|
|
|
|
|
if (l && l->counters && l->counters->shared.tg[tgid - 1])
|
2025-07-22 11:15:02 -04:00
|
|
|
_HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->cum_sess_ver[http_ver - 1]);
|
2023-01-18 05:52:21 -05:00
|
|
|
}
|
|
|
|
|
|
2024-04-04 12:08:46 -04:00
|
|
|
/* increase the number of cumulated streams on the designated backend */
|
2019-04-15 15:25:03 -04:00
|
|
|
static inline void proxy_inc_be_ctr(struct proxy *be)
|
2009-03-05 12:43:00 -05:00
|
|
|
{
|
MEDIUM: stats: consider that shared stats pointers may be NULL
This patch looks huge, but it has a very simple goal: protect all
accessed to shared stats pointers (either read or writes), because
we know consider that these pointers may be NULL.
The reason behind this is despite all precautions taken to ensure the
pointers shouldn't be NULL when not expected, there are still corner
cases (ie: frontends stats used on a backend which no FE cap and vice
versa) where we could try to access a memory area which is not
allocated. Willy stumbled on such cases while playing with the rings
servers upon connection error, which eventually led to process crashes
(since 3.3 when shared stats were implemented)
Also, we may decide later that shared stats are optional and should
be disabled on the proxy to save memory and CPU, and this patch is
a step further towards that goal.
So in essence, this patch ensures shared stats pointers are always
initialized (including NULL), and adds necessary guards before shared
stats pointers are de-referenced. Since we already had some checks
for backends and listeners stats, and the pointer address retrieval
should stay in cpu cache, let's hope that this patch doesn't impact
stats performance much.
2025-09-18 10:28:29 -04:00
|
|
|
if (be->be_counters.shared.tg[tgid - 1])
|
|
|
|
|
_HA_ATOMIC_INC(&be->be_counters.shared.tg[tgid - 1]->cum_sess);
|
|
|
|
|
if (be->be_counters.shared.tg[tgid - 1])
|
|
|
|
|
update_freq_ctr(&be->be_counters.shared.tg[tgid - 1]->sess_per_sec, 1);
|
2017-06-02 09:33:24 -04:00
|
|
|
HA_ATOMIC_UPDATE_MAX(&be->be_counters.sps_max,
|
MINOR: counters: add local-only internal rates to compute some maxes
cps_max (max new connections received per second), sps_max (max new
sessions per second) and http.rps_max (maximum new http requests per
second) all rely on shared counters (namely conn_per_sec, sess_per_sec and
http.req_per_sec). The problem is that shared counters are about to be
distributed over thread groups, and we cannot afford to compute the
total (for all thread groups) each time we update the max counters.
Instead, since such max counters (relying on shared counters) are a very
few exceptions, let's add internal (sess,conn,req) per sec freq counters
that are dedicated to cps_max, sps_max and http.rps_max computing.
Thanks to that, related *_max counters shouldn't be negatively impacted
by the thread-group distribution, yet they will not benefit from it
either. Related internal freq counters are prefixed with "_" to emphasize
the fact that they should not be used for other purpose (the shared ones,
which are about to be distributed over thread groups in upcoming commits
are still available and must be used instead). The internal ones could
eventually be removed at any time if we find another way to compute the
{cps,sps,http.rps)_max counters.
2025-05-28 06:00:49 -04:00
|
|
|
update_freq_ctr(&be->be_counters._sess_per_sec, 1));
|
2009-03-05 12:43:00 -05:00
|
|
|
}
|
|
|
|
|
|
2023-01-18 05:52:21 -05:00
|
|
|
/* increase the number of cumulated requests on the designated frontend.
|
|
|
|
|
* <http_ver> must be the HTTP version for HTTP request. 0 may be provided
|
|
|
|
|
* for others requests.
|
|
|
|
|
*/
|
|
|
|
|
static inline void proxy_inc_fe_req_ctr(struct listener *l, struct proxy *fe,
|
|
|
|
|
unsigned int http_ver)
|
2010-02-26 04:05:55 -05:00
|
|
|
{
|
2025-07-22 11:15:02 -04:00
|
|
|
if (http_ver >= sizeof(fe->fe_counters.shared.tg[tgid - 1]->p.http.cum_req) / sizeof(*fe->fe_counters.shared.tg[tgid - 1]->p.http.cum_req))
|
2023-01-18 05:52:21 -05:00
|
|
|
return;
|
|
|
|
|
|
MEDIUM: stats: consider that shared stats pointers may be NULL
This patch looks huge, but it has a very simple goal: protect all
accessed to shared stats pointers (either read or writes), because
we know consider that these pointers may be NULL.
The reason behind this is despite all precautions taken to ensure the
pointers shouldn't be NULL when not expected, there are still corner
cases (ie: frontends stats used on a backend which no FE cap and vice
versa) where we could try to access a memory area which is not
allocated. Willy stumbled on such cases while playing with the rings
servers upon connection error, which eventually led to process crashes
(since 3.3 when shared stats were implemented)
Also, we may decide later that shared stats are optional and should
be disabled on the proxy to save memory and CPU, and this patch is
a step further towards that goal.
So in essence, this patch ensures shared stats pointers are always
initialized (including NULL), and adds necessary guards before shared
stats pointers are de-referenced. Since we already had some checks
for backends and listeners stats, and the pointer address retrieval
should stay in cpu cache, let's hope that this patch doesn't impact
stats performance much.
2025-09-18 10:28:29 -04:00
|
|
|
if (fe->fe_counters.shared.tg[tgid - 1])
|
|
|
|
|
_HA_ATOMIC_INC(&fe->fe_counters.shared.tg[tgid - 1]->p.http.cum_req[http_ver]);
|
|
|
|
|
if (l && l->counters && l->counters->shared.tg[tgid - 1])
|
2025-07-22 11:15:02 -04:00
|
|
|
_HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->p.http.cum_req[http_ver]);
|
MEDIUM: stats: consider that shared stats pointers may be NULL
This patch looks huge, but it has a very simple goal: protect all
accessed to shared stats pointers (either read or writes), because
we know consider that these pointers may be NULL.
The reason behind this is despite all precautions taken to ensure the
pointers shouldn't be NULL when not expected, there are still corner
cases (ie: frontends stats used on a backend which no FE cap and vice
versa) where we could try to access a memory area which is not
allocated. Willy stumbled on such cases while playing with the rings
servers upon connection error, which eventually led to process crashes
(since 3.3 when shared stats were implemented)
Also, we may decide later that shared stats are optional and should
be disabled on the proxy to save memory and CPU, and this patch is
a step further towards that goal.
So in essence, this patch ensures shared stats pointers are always
initialized (including NULL), and adds necessary guards before shared
stats pointers are de-referenced. Since we already had some checks
for backends and listeners stats, and the pointer address retrieval
should stay in cpu cache, let's hope that this patch doesn't impact
stats performance much.
2025-09-18 10:28:29 -04:00
|
|
|
if (fe->fe_counters.shared.tg[tgid - 1])
|
|
|
|
|
update_freq_ctr(&fe->fe_counters.shared.tg[tgid - 1]->req_per_sec, 1);
|
2017-06-02 09:33:24 -04:00
|
|
|
HA_ATOMIC_UPDATE_MAX(&fe->fe_counters.p.http.rps_max,
|
MINOR: counters: add local-only internal rates to compute some maxes
cps_max (max new connections received per second), sps_max (max new
sessions per second) and http.rps_max (maximum new http requests per
second) all rely on shared counters (namely conn_per_sec, sess_per_sec and
http.req_per_sec). The problem is that shared counters are about to be
distributed over thread groups, and we cannot afford to compute the
total (for all thread groups) each time we update the max counters.
Instead, since such max counters (relying on shared counters) are a very
few exceptions, let's add internal (sess,conn,req) per sec freq counters
that are dedicated to cps_max, sps_max and http.rps_max computing.
Thanks to that, related *_max counters shouldn't be negatively impacted
by the thread-group distribution, yet they will not benefit from it
either. Related internal freq counters are prefixed with "_" to emphasize
the fact that they should not be used for other purpose (the shared ones,
which are about to be distributed over thread groups in upcoming commits
are still available and must be used instead). The internal ones could
eventually be removed at any time if we find another way to compute the
{cps,sps,http.rps)_max counters.
2025-05-28 06:00:49 -04:00
|
|
|
update_freq_ctr(&fe->fe_counters.p.http._req_per_sec, 1));
|
2010-02-26 04:05:55 -05:00
|
|
|
}
|
|
|
|
|
|
2020-03-10 03:06:11 -04:00
|
|
|
/* Returns non-zero if the proxy is configured to retry a request if we got that status, 0 otherwise */
|
MEDIUM: streams: Add the ability to retry a request on L7 failure.
When running in HTX mode, if we sent the request, but failed to get the
answer, either because the server just closed its socket, we hit a server
timeout, or we get a 404, 408, 425, 500, 501, 502, 503 or 504 error,
attempt to retry the request, exactly as if we just failed to connect to
the server.
To do so, add a new backend keyword, "retry-on".
It accepts a list of keywords, which can be "none" (never retry),
"conn-failure" (we failed to connect, or to do the SSL handshake),
"empty-response" (the server closed the connection without answering),
"response-timeout" (we timed out while waiting for the server response),
or "404", "408", "425", "500", "501", "502", "503" and "504".
The default is "conn-failure".
2019-04-05 09:30:12 -04:00
|
|
|
static inline int l7_status_match(struct proxy *p, int status)
|
|
|
|
|
{
|
|
|
|
|
/* Just return 0 if no retry was configured for any status */
|
|
|
|
|
if (!(p->retry_type & PR_RE_STATUS_MASK))
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
switch (status) {
|
2020-11-12 05:14:05 -05:00
|
|
|
case 401:
|
|
|
|
|
return (p->retry_type & PR_RE_401);
|
|
|
|
|
case 403:
|
|
|
|
|
return (p->retry_type & PR_RE_403);
|
MEDIUM: streams: Add the ability to retry a request on L7 failure.
When running in HTX mode, if we sent the request, but failed to get the
answer, either because the server just closed its socket, we hit a server
timeout, or we get a 404, 408, 425, 500, 501, 502, 503 or 504 error,
attempt to retry the request, exactly as if we just failed to connect to
the server.
To do so, add a new backend keyword, "retry-on".
It accepts a list of keywords, which can be "none" (never retry),
"conn-failure" (we failed to connect, or to do the SSL handshake),
"empty-response" (the server closed the connection without answering),
"response-timeout" (we timed out while waiting for the server response),
or "404", "408", "425", "500", "501", "502", "503" and "504".
The default is "conn-failure".
2019-04-05 09:30:12 -04:00
|
|
|
case 404:
|
|
|
|
|
return (p->retry_type & PR_RE_404);
|
|
|
|
|
case 408:
|
|
|
|
|
return (p->retry_type & PR_RE_408);
|
2024-11-28 05:45:51 -05:00
|
|
|
case 421:
|
|
|
|
|
return (p->retry_type & PR_RE_421);
|
MEDIUM: streams: Add the ability to retry a request on L7 failure.
When running in HTX mode, if we sent the request, but failed to get the
answer, either because the server just closed its socket, we hit a server
timeout, or we get a 404, 408, 425, 500, 501, 502, 503 or 504 error,
attempt to retry the request, exactly as if we just failed to connect to
the server.
To do so, add a new backend keyword, "retry-on".
It accepts a list of keywords, which can be "none" (never retry),
"conn-failure" (we failed to connect, or to do the SSL handshake),
"empty-response" (the server closed the connection without answering),
"response-timeout" (we timed out while waiting for the server response),
or "404", "408", "425", "500", "501", "502", "503" and "504".
The default is "conn-failure".
2019-04-05 09:30:12 -04:00
|
|
|
case 425:
|
|
|
|
|
return (p->retry_type & PR_RE_425);
|
2024-08-30 06:11:03 -04:00
|
|
|
case 429:
|
|
|
|
|
return (p->retry_type & PR_RE_429);
|
MEDIUM: streams: Add the ability to retry a request on L7 failure.
When running in HTX mode, if we sent the request, but failed to get the
answer, either because the server just closed its socket, we hit a server
timeout, or we get a 404, 408, 425, 500, 501, 502, 503 or 504 error,
attempt to retry the request, exactly as if we just failed to connect to
the server.
To do so, add a new backend keyword, "retry-on".
It accepts a list of keywords, which can be "none" (never retry),
"conn-failure" (we failed to connect, or to do the SSL handshake),
"empty-response" (the server closed the connection without answering),
"response-timeout" (we timed out while waiting for the server response),
or "404", "408", "425", "500", "501", "502", "503" and "504".
The default is "conn-failure".
2019-04-05 09:30:12 -04:00
|
|
|
case 500:
|
|
|
|
|
return (p->retry_type & PR_RE_500);
|
|
|
|
|
case 501:
|
|
|
|
|
return (p->retry_type & PR_RE_501);
|
|
|
|
|
case 502:
|
|
|
|
|
return (p->retry_type & PR_RE_502);
|
|
|
|
|
case 503:
|
|
|
|
|
return (p->retry_type & PR_RE_503);
|
|
|
|
|
case 504:
|
|
|
|
|
return (p->retry_type & PR_RE_504);
|
|
|
|
|
default:
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
2019-08-07 03:28:39 -04:00
|
|
|
|
|
|
|
|
/* Return 1 if <p> proxy is in <list> list of proxies which are also stick-tables,
|
|
|
|
|
* 0 if not.
|
|
|
|
|
*/
|
|
|
|
|
static inline int in_proxies_list(struct proxy *list, struct proxy *proxy)
|
|
|
|
|
{
|
|
|
|
|
struct proxy *p;
|
|
|
|
|
|
|
|
|
|
for (p = list; p; p = p->next_stkt_ref)
|
|
|
|
|
if (proxy == p)
|
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-28 10:46:11 -04:00
|
|
|
/* Add <bytes> to the global total bytes sent and adjust the send rate. Set
|
|
|
|
|
* <splice> if this was sent usigin splicing.
|
|
|
|
|
*/
|
|
|
|
|
static inline void increment_send_rate(uint64_t bytes, int splice)
|
|
|
|
|
{
|
|
|
|
|
/* We count the total bytes sent, and the send rate for 32-byte blocks.
|
|
|
|
|
* The reason for the latter is that freq_ctr are limited to 4GB and
|
|
|
|
|
* that it's not enough per second.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
if (splice)
|
|
|
|
|
_HA_ATOMIC_ADD(&th_ctx->spliced_out_bytes, bytes);
|
|
|
|
|
_HA_ATOMIC_ADD(&th_ctx->out_bytes, bytes);
|
|
|
|
|
update_freq_ctr(&th_ctx->out_32bps, (bytes + 16) / 32);
|
|
|
|
|
}
|
|
|
|
|
|
2020-06-04 16:29:18 -04:00
|
|
|
#endif /* _HAPROXY_PROXY_H */
|
2006-06-25 20:48:02 -04:00
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Local variables:
|
|
|
|
|
* c-indent-level: 8
|
|
|
|
|
* c-basic-offset: 8
|
|
|
|
|
* End:
|
|
|
|
|
*/
|