2006-06-25 20:48:02 -04:00
/*
* Configuration parser
*
2011-01-06 11:51:27 -05:00
* Copyright 2000 - 2011 Willy Tarreau < w @ 1 wt . eu >
2006-06-25 20:48:02 -04:00
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version
* 2 of the License , or ( at your option ) any later version .
*
*/
2022-08-07 10:55:07 -04:00
/* This is to have crypt() and sched_setaffinity() defined on Linux */
2014-08-29 14:20:02 -04:00
# define _GNU_SOURCE
2022-08-07 10:55:07 -04:00
# ifdef USE_LIBCRYPT
2019-05-22 13:24:06 -04:00
# ifdef USE_CRYPT_H
2014-08-29 14:20:02 -04:00
/* some platforms such as Solaris need this */
# include <crypt.h>
# endif
2019-05-22 13:24:06 -04:00
# endif /* USE_LIBCRYPT */
2014-08-29 14:20:02 -04:00
2021-03-26 13:20:47 -04:00
# include <dirent.h>
2006-06-25 20:48:02 -04:00
# include <stdio.h>
# include <stdlib.h>
# include <string.h>
# include <netdb.h>
# include <ctype.h>
2007-03-25 09:39:23 -04:00
# include <pwd.h>
# include <grp.h>
2007-03-25 18:18:40 -04:00
# include <errno.h>
2022-08-07 10:55:07 -04:00
# ifdef USE_CPU_AFFINITY
# include <sched.h>
# endif
2007-06-10 18:29:26 -04:00
# include <sys/types.h>
# include <sys/stat.h>
# include <unistd.h>
2006-06-25 20:48:02 -04:00
2020-06-04 13:11:43 -04:00
# include <haproxy/acl.h>
2021-03-25 12:19:04 -04:00
# include <haproxy/action.h>
2020-06-09 03:07:15 -04:00
# include <haproxy/api.h>
2021-05-06 09:49:04 -04:00
# include <haproxy/arg.h>
2020-06-04 04:36:03 -04:00
# include <haproxy/auth.h>
2020-06-04 16:50:02 -04:00
# include <haproxy/backend.h>
2020-06-04 05:18:28 -04:00
# include <haproxy/capture.h>
2021-07-16 09:39:28 -04:00
# include <haproxy/cfgcond.h>
2020-06-04 18:00:29 -04:00
# include <haproxy/cfgparse.h>
2020-06-04 15:07:02 -04:00
# include <haproxy/channel.h>
2020-06-04 12:21:56 -04:00
# include <haproxy/check.h>
2020-06-02 04:22:45 -04:00
# include <haproxy/chunk.h>
2021-10-08 03:33:24 -04:00
# include <haproxy/clock.h>
MEDIUM: counters: manage shared counters using dedicated helpers
proxies, listeners and server shared counters are now managed via helpers
added in one of the previous commits.
When guid is not set (ie: when not yet assigned), shared counters pointer
is allocated using calloc() (local memory) and a flag is set on the shared
counters struct to know how to manipulate (and free it). Else if guid is
set, then it means that the counters may be shared so while for now we
don't actually use a shared memory location the API is ready for that.
The way it works, for proxies and servers (for which guid is not known
during creation), we first call counters_{fe,be}_shared_get with guid not
set, which results in local pointer being retrieved (as if we just
manually called calloc() to retrieve a pointer). Later (during postparsing)
if guid is set we try to upgrade the pointer from local to shared.
Lastly, since the memory location for some objects (proxies and servers
counters) may change from creation to postparsing, let's update
counters->last_change member directly under counters_{fe,be}_shared_get()
so we don't miss it.
No change of behavior is expected, this is only preparation work.
2025-05-07 17:42:04 -04:00
# include <haproxy/counters.h>
2021-04-23 10:58:08 -04:00
# ifdef USE_CPU_AFFINITY
2021-04-14 10:16:03 -04:00
# include <haproxy/cpuset.h>
2025-01-22 11:17:59 -05:00
# include <haproxy/cpu_topo.h>
2021-04-23 10:58:08 -04:00
# endif
2020-06-04 12:02:10 -04:00
# include <haproxy/connection.h>
2020-05-27 10:10:29 -04:00
# include <haproxy/errors.h>
2020-06-04 15:29:29 -04:00
# include <haproxy/filters.h>
2020-06-04 05:23:07 -04:00
# include <haproxy/frontend.h>
2020-06-09 03:07:15 -04:00
# include <haproxy/global.h>
2020-06-04 15:21:03 -04:00
# include <haproxy/http_ana.h>
2020-06-04 05:40:28 -04:00
# include <haproxy/http_rules.h>
2020-06-04 08:34:27 -04:00
# include <haproxy/lb_chash.h>
2020-06-04 08:37:38 -04:00
# include <haproxy/lb_fas.h>
2020-06-04 08:41:04 -04:00
# include <haproxy/lb_fwlc.h>
2020-06-04 08:45:03 -04:00
# include <haproxy/lb_fwrr.h>
2020-06-04 14:22:59 -04:00
# include <haproxy/lb_map.h>
2024-03-28 12:24:53 -04:00
# include <haproxy/lb_ss.h>
2020-06-04 08:58:24 -04:00
# include <haproxy/listener.h>
2020-06-04 16:01:04 -04:00
# include <haproxy/log.h>
2022-09-13 10:16:30 -04:00
# include <haproxy/sink.h>
2020-06-05 05:40:38 -04:00
# include <haproxy/mailers.h>
2020-06-09 03:07:15 -04:00
# include <haproxy/namespace.h>
2025-01-30 08:50:19 -05:00
# include <haproxy/quic_cc-t.h>
2022-01-25 11:48:47 -05:00
# include <haproxy/quic_sock.h>
2025-01-30 12:01:53 -05:00
# include <haproxy/quic_tune.h>
2020-06-04 05:29:21 -04:00
# include <haproxy/obj_type-t.h>
2023-04-19 04:41:55 -04:00
# include <haproxy/openssl-compat.h>
2020-06-04 12:38:21 -04:00
# include <haproxy/peers-t.h>
2020-06-09 03:07:15 -04:00
# include <haproxy/peers.h>
2020-06-02 03:38:52 -04:00
# include <haproxy/pool.h>
2020-06-09 03:07:15 -04:00
# include <haproxy/protocol.h>
2020-06-04 16:29:18 -04:00
# include <haproxy/proxy.h>
2021-02-12 13:42:55 -05:00
# include <haproxy/resolvers.h>
2020-06-09 03:07:15 -04:00
# include <haproxy/sample.h>
# include <haproxy/server.h>
2020-06-04 12:58:52 -04:00
# include <haproxy/session.h>
2020-06-04 13:58:55 -04:00
# include <haproxy/stats-t.h>
2020-06-04 12:46:44 -04:00
# include <haproxy/stick_table.h>
2020-06-04 17:46:14 -04:00
# include <haproxy/stream.h>
2020-06-09 03:07:15 -04:00
# include <haproxy/task.h>
2020-06-04 11:42:48 -04:00
# include <haproxy/tcp_rules.h>
2021-07-23 09:46:46 -04:00
# include <haproxy/tcpcheck.h>
2020-06-09 03:07:15 -04:00
# include <haproxy/thread.h>
# include <haproxy/tools.h>
2024-11-13 13:54:32 -05:00
# include <haproxy/uri_auth.h>
2006-06-25 20:48:02 -04:00
2014-03-18 08:54:18 -04:00
/* Used to chain configuration sections definitions. This list
* stores struct cfg_section
*/
struct list sections = LIST_HEAD_INIT ( sections ) ;
2017-10-23 08:36:34 -04:00
struct list postparsers = LIST_HEAD_INIT ( postparsers ) ;
2021-03-16 10:12:17 -04:00
extern struct proxy * mworker_proxy ;
2023-07-27 11:09:14 -04:00
/* curproxy is only valid during parsing and will be NULL afterwards. */
2023-08-01 05:18:00 -04:00
struct proxy * curproxy = NULL ;
2023-07-27 11:09:14 -04:00
2018-11-11 09:40:36 -05:00
char * cursection = NULL ;
MEDIUM: config: don't enforce a low frontend maxconn value anymore
Historically the default frontend's maxconn used to be quite low (2000),
which was sufficient two decades ago but often proved to be a problem
when users had purposely set the global maxconn value but forgot to set
the frontend's.
There is no point in keeping this arbitrary limit for frontends : when
the global maxconn is lower, it's already too high and when the global
maxconn is much higher, it becomes a limiting factor which causes trouble
in production.
This commit allows the value to be set to zero, which becomes the new
default value, to mean it's not directly limited, or in fact it's set
to the global maxconn. Since this operation used to be performed before
computing a possibly automatic global maxconn based on memory limits,
the calculation of the maxconn value and its propagation to the backends'
fullconn has now moved to a dedicated function, proxy_adjust_all_maxconn(),
which is called once the global maxconn is stabilized.
This comes with two benefits :
1) a configuration missing "maxconn" in the defaults section will not
limit itself to a magically hardcoded value but will scale up to the
global maxconn ;
2) when the global maxconn is not set and memory limits are used instead,
the frontends' maxconn automatically adapts, and the backends' fullconn
as well.
2019-02-27 11:25:52 -05:00
int cfg_maxpconn = 0 ; /* # of simultaneous connections per proxy (-N) */
2009-03-15 10:23:16 -04:00
int cfg_maxconn = 0 ; /* # of simultaneous connections, (-n) */
2016-11-04 17:36:15 -04:00
char * cfg_scope = NULL ; /* the current scope during the configuration parsing */
2022-11-18 09:46:06 -05:00
int non_global_section_parsed = 0 ;
2006-06-25 20:48:02 -04:00
2021-04-27 14:29:11 -04:00
/* how to handle default paths */
static enum default_path_mode {
DEFAULT_PATH_CURRENT = 0 , /* "current": paths are relative to CWD (this is the default) */
DEFAULT_PATH_CONFIG , /* "config": paths are relative to config file */
DEFAULT_PATH_PARENT , /* "parent": paths are relative to config file's ".." */
DEFAULT_PATH_ORIGIN , /* "origin": paths are relative to default_path_origin */
} default_path_mode ;
2025-03-04 05:04:01 -05:00
char initial_cwd [ PATH_MAX ] ;
2021-04-27 14:29:11 -04:00
static char current_cwd [ PATH_MAX ] ;
[MEDIUM] add support for configuration keyword registration
Any module which needs configuration keywords may now dynamically
register a keyword in a given section, and associate it with a
configuration parsing function using cfg_register_keywords() from
a constructor function. This makes the configuration parser more
modular because it is not required anymore to touch cfg_parse.c.
Example :
static int parse_global_blah(char **args, int section_type, struct proxy *curpx,
struct proxy *defpx, char *err, int errlen)
{
printf("parsing blah in global section\n");
return 0;
}
static int parse_listen_blah(char **args, int section_type, struct proxy *curpx,
struct proxy *defpx, char *err, int errlen)
{
printf("parsing blah in listen section\n");
if (*args[1]) {
snprintf(err, errlen, "missing arg for listen_blah!!!");
return -1;
}
return 0;
}
static struct cfg_kw_list cfg_kws = {{ },{
{ CFG_GLOBAL, "blah", parse_global_blah },
{ CFG_LISTEN, "blah", parse_listen_blah },
{ 0, NULL, NULL },
}};
__attribute__((constructor))
static void __module_init(void)
{
cfg_register_keywords(&cfg_kws);
}
2008-07-09 13:39:06 -04:00
/* List head of all known configuration keywords */
2018-11-11 09:19:52 -05:00
struct cfg_kw_list cfg_keywords = {
[MEDIUM] add support for configuration keyword registration
Any module which needs configuration keywords may now dynamically
register a keyword in a given section, and associate it with a
configuration parsing function using cfg_register_keywords() from
a constructor function. This makes the configuration parser more
modular because it is not required anymore to touch cfg_parse.c.
Example :
static int parse_global_blah(char **args, int section_type, struct proxy *curpx,
struct proxy *defpx, char *err, int errlen)
{
printf("parsing blah in global section\n");
return 0;
}
static int parse_listen_blah(char **args, int section_type, struct proxy *curpx,
struct proxy *defpx, char *err, int errlen)
{
printf("parsing blah in listen section\n");
if (*args[1]) {
snprintf(err, errlen, "missing arg for listen_blah!!!");
return -1;
}
return 0;
}
static struct cfg_kw_list cfg_kws = {{ },{
{ CFG_GLOBAL, "blah", parse_global_blah },
{ CFG_LISTEN, "blah", parse_listen_blah },
{ 0, NULL, NULL },
}};
__attribute__((constructor))
static void __module_init(void)
{
cfg_register_keywords(&cfg_kws);
}
2008-07-09 13:39:06 -04:00
. list = LIST_HEAD_INIT ( cfg_keywords . list )
} ;
2006-06-25 20:48:02 -04:00
/*
* converts < str > to a list of listeners which are dynamically allocated .
* The format is " {addr|'*'}:port[-end][,{addr|'*'}:port[-end]]* " , where :
* - < addr > can be empty or " * " to indicate INADDR_ANY ;
* - < port > is a numerical port from 1 to 65535 ;
* - < end > indicates to use the range from < port > to < end > instead ( inclusive ) .
* This can be repeated as many times as necessary , separated by a coma .
2012-09-20 14:01:39 -04:00
* Function returns 1 for success or 0 if error . In case of errors , if < err > is
* not NULL , it must be a valid pointer to either NULL or a freeable area that
* will be replaced with an error message .
2006-06-25 20:48:02 -04:00
*/
2012-09-20 14:01:39 -04:00
int str2listener ( char * str , struct proxy * curproxy , struct bind_conf * bind_conf , const char * file , int line , char * * err )
2006-06-25 20:48:02 -04:00
{
2020-09-16 11:58:55 -04:00
struct protocol * proto ;
2011-03-04 09:43:13 -05:00
char * next , * dupstr ;
2006-06-25 20:48:02 -04:00
int port , end ;
next = dupstr = strdup ( str ) ;
2009-10-04 09:43:17 -04:00
2006-06-25 20:48:02 -04:00
while ( next & & * next ) {
2017-09-15 02:10:44 -04:00
struct sockaddr_storage * ss2 ;
2013-03-10 18:51:38 -04:00
int fd = - 1 ;
2006-06-25 20:48:02 -04:00
str = next ;
/* 1) look for the end of the first address */
2009-01-27 10:57:08 -05:00
if ( ( next = strchr ( str , ' , ' ) ) ! = NULL ) {
2006-06-25 20:48:02 -04:00
* next + + = 0 ;
}
2023-11-09 05:19:24 -05:00
ss2 = str2sa_range ( str , NULL , & port , & end , & fd , & proto , NULL , err ,
2021-03-16 10:12:17 -04:00
( curproxy = = global . cli_fe | | curproxy = = mworker_proxy ) ? NULL : global . unix_bind . prefix ,
2024-08-26 05:50:24 -04:00
NULL , NULL , PA_O_RESOLVE | PA_O_PORT_OK | PA_O_PORT_MAND | PA_O_PORT_RANGE |
2020-09-16 10:28:08 -04:00
PA_O_SOCKET_FD | PA_O_STREAM | PA_O_XPRT ) ;
2013-03-06 09:45:03 -05:00
if ( ! ss2 )
goto fail ;
2006-06-25 20:48:02 -04:00
2023-11-21 05:10:34 -05:00
if ( ss2 - > ss_family = = AF_CUST_RHTTP_SRV ) {
2023-10-19 06:05:31 -04:00
/* Check if a previous non reverse HTTP present is
* already defined . If DGRAM or STREAM is set , this
* indicates that we are currently parsing the second
* or more address .
*/
if ( bind_conf - > options & ( BC_O_USE_SOCK_DGRAM | BC_O_USE_SOCK_STREAM ) & &
! ( bind_conf - > options & BC_O_REVERSE_HTTP ) ) {
memprintf ( err , " Cannot mix reverse HTTP bind with others. \n " ) ;
goto fail ;
}
2023-11-21 05:10:34 -05:00
bind_conf - > rhttp_srvname = strdup ( str + strlen ( " rhttp@ " ) ) ;
if ( ! bind_conf - > rhttp_srvname ) {
2023-10-19 06:05:31 -04:00
memprintf ( err , " Cannot allocate reverse HTTP bind. \n " ) ;
goto fail ;
}
bind_conf - > options | = BC_O_REVERSE_HTTP ;
}
else if ( bind_conf - > options & BC_O_REVERSE_HTTP ) {
/* Standard address mixed with a previous reverse HTTP one. */
memprintf ( err , " Cannot mix reverse HTTP bind with others. \n " ) ;
goto fail ;
}
2013-03-06 09:45:03 -05:00
/* OK the address looks correct */
2022-05-20 10:15:01 -04:00
if ( proto - > proto_type = = PROTO_TYPE_DGRAM )
bind_conf - > options | = BC_O_USE_SOCK_DGRAM ;
else
bind_conf - > options | = BC_O_USE_SOCK_STREAM ;
if ( proto - > xprt_type = = PROTO_TYPE_DGRAM )
bind_conf - > options | = BC_O_USE_XPRT_DGRAM ;
else
bind_conf - > options | = BC_O_USE_XPRT_STREAM ;
2020-11-23 08:23:21 -05:00
2020-09-16 11:58:55 -04:00
if ( ! create_listeners ( bind_conf , ss2 , port , end , fd , proto , err ) ) {
2017-09-15 02:10:44 -04:00
memprintf ( err , " %s for address '%s'. \n " , * err , str ) ;
goto fail ;
}
2006-06-25 20:48:02 -04:00
} /* end while(next) */
free ( dupstr ) ;
2009-10-04 09:43:17 -04:00
return 1 ;
2006-06-25 20:48:02 -04:00
fail :
free ( dupstr ) ;
2009-10-04 09:43:17 -04:00
return 0 ;
2006-06-25 20:48:02 -04:00
}
2020-09-16 09:13:04 -04:00
/*
* converts < str > to a list of datagram - oriented listeners which are dynamically
* allocated .
* The format is " {addr|'*'}:port[-end][,{addr|'*'}:port[-end]]* " , where :
* - < addr > can be empty or " * " to indicate INADDR_ANY ;
* - < port > is a numerical port from 1 to 65535 ;
* - < end > indicates to use the range from < port > to < end > instead ( inclusive ) .
* This can be repeated as many times as necessary , separated by a coma .
* Function returns 1 for success or 0 if error . In case of errors , if < err > is
* not NULL , it must be a valid pointer to either NULL or a freeable area that
* will be replaced with an error message .
*/
int str2receiver ( char * str , struct proxy * curproxy , struct bind_conf * bind_conf , const char * file , int line , char * * err )
{
2020-09-16 11:58:55 -04:00
struct protocol * proto ;
2020-09-16 09:13:04 -04:00
char * next , * dupstr ;
int port , end ;
next = dupstr = strdup ( str ) ;
while ( next & & * next ) {
struct sockaddr_storage * ss2 ;
int fd = - 1 ;
str = next ;
/* 1) look for the end of the first address */
if ( ( next = strchr ( str , ' , ' ) ) ! = NULL ) {
* next + + = 0 ;
}
2023-11-09 05:19:24 -05:00
ss2 = str2sa_range ( str , NULL , & port , & end , & fd , & proto , NULL , err ,
2021-03-13 05:00:33 -05:00
curproxy = = global . cli_fe ? NULL : global . unix_bind . prefix ,
2024-08-26 05:50:24 -04:00
NULL , NULL , PA_O_RESOLVE | PA_O_PORT_OK | PA_O_PORT_MAND | PA_O_PORT_RANGE |
2020-09-16 09:13:04 -04:00
PA_O_SOCKET_FD | PA_O_DGRAM | PA_O_XPRT ) ;
if ( ! ss2 )
goto fail ;
/* OK the address looks correct */
2020-09-16 11:58:55 -04:00
if ( ! create_listeners ( bind_conf , ss2 , port , end , fd , proto , err ) ) {
2020-09-16 09:13:04 -04:00
memprintf ( err , " %s for address '%s'. \n " , * err , str ) ;
goto fail ;
}
} /* end while(next) */
free ( dupstr ) ;
return 1 ;
fail :
free ( dupstr ) ;
return 0 ;
}
2021-05-08 13:58:37 -04:00
/*
* Sends a warning if proxy < proxy > does not have at least one of the
* capabilities in < cap > . An optional < hint > may be added at the end
* of the warning to help the user . Returns 1 if a warning was emitted
* or 0 if the condition is valid .
*/
int warnifnotcap ( struct proxy * proxy , int cap , const char * file , int line , const char * arg , const char * hint )
{
char * msg ;
switch ( cap ) {
case PR_CAP_BE : msg = " no backend " ; break ;
case PR_CAP_FE : msg = " no frontend " ; break ;
case PR_CAP_BE | PR_CAP_FE : msg = " neither frontend nor backend " ; break ;
default : msg = " not enough " ; break ;
}
if ( ! ( proxy - > cap & cap ) ) {
ha_warning ( " parsing [%s:%d] : '%s' ignored because %s '%s' has %s capability.%s \n " ,
file , line , arg , proxy_type_str ( proxy ) , proxy - > id , msg , hint ? hint : " " ) ;
return 1 ;
}
return 0 ;
}
/*
* Sends an alert if proxy < proxy > does not have at least one of the
* capabilities in < cap > . An optional < hint > may be added at the end
* of the alert to help the user . Returns 1 if an alert was emitted
* or 0 if the condition is valid .
*/
int failifnotcap ( struct proxy * proxy , int cap , const char * file , int line , const char * arg , const char * hint )
{
char * msg ;
switch ( cap ) {
case PR_CAP_BE : msg = " no backend " ; break ;
case PR_CAP_FE : msg = " no frontend " ; break ;
case PR_CAP_BE | PR_CAP_FE : msg = " neither frontend nor backend " ; break ;
default : msg = " not enough " ; break ;
}
if ( ! ( proxy - > cap & cap ) ) {
ha_alert ( " parsing [%s:%d] : '%s' not allowed because %s '%s' has %s capability.%s \n " ,
file , line , arg , proxy_type_str ( proxy ) , proxy - > id , msg , hint ? hint : " " ) ;
return 1 ;
}
return 0 ;
}
2016-12-21 16:41:44 -05:00
/*
* Report an error in < msg > when there are too many arguments . This version is
* intended to be used by keyword parsers so that the message will be included
* into the general error message . The index is the current keyword in args .
* Return 0 if the number of argument is correct , otherwise build a message and
* return 1. Fill err_code with an ERR_ALERT and an ERR_FATAL if not null . The
* message may also be null , it will simply not be produced ( useful to check only ) .
* < msg > and < err_code > are only affected on error .
*/
int too_many_args_idx ( int maxarg , int index , char * * args , char * * msg , int * err_code )
{
int i ;
if ( ! * args [ index + maxarg + 1 ] )
return 0 ;
if ( msg ) {
* msg = NULL ;
memprintf ( msg , " %s " , args [ 0 ] ) ;
for ( i = 1 ; i < = index ; i + + )
memprintf ( msg , " %s %s " , * msg , args [ i ] ) ;
memprintf ( msg , " '%s' cannot handle unexpected argument '%s'. " , * msg , args [ index + maxarg + 1 ] ) ;
}
if ( err_code )
* err_code | = ERR_ALERT | ERR_FATAL ;
return 1 ;
}
/*
* same as too_many_args_idx with a 0 index
*/
int too_many_args ( int maxarg , char * * args , char * * msg , int * err_code )
{
return too_many_args_idx ( maxarg , 0 , args , msg , err_code ) ;
}
2015-04-28 10:55:23 -04:00
/*
* Report a fatal Alert when there is too much arguments
* The index is the current keyword in args
* Return 0 if the number of argument is correct , otherwise emit an alert and return 1
* Fill err_code with an ERR_ALERT and an ERR_FATAL
*/
int alertif_too_many_args_idx ( int maxarg , int index , const char * file , int linenum , char * * args , int * err_code )
{
char * kw = NULL ;
int i ;
if ( ! * args [ index + maxarg + 1 ] )
return 0 ;
memprintf ( & kw , " %s " , args [ 0 ] ) ;
for ( i = 1 ; i < = index ; i + + ) {
memprintf ( & kw , " %s %s " , kw , args [ i ] ) ;
}
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d] : '%s' cannot handle unexpected argument '%s'. \n " , file , linenum , kw , args [ index + maxarg + 1 ] ) ;
2015-04-28 10:55:23 -04:00
free ( kw ) ;
* err_code | = ERR_ALERT | ERR_FATAL ;
return 1 ;
}
/*
* same as alertif_too_many_args_idx with a 0 index
*/
int alertif_too_many_args ( int maxarg , const char * file , int linenum , char * * args , int * err_code )
{
return alertif_too_many_args_idx ( maxarg , 0 , file , linenum , args , err_code ) ;
}
2009-03-31 04:49:21 -04:00
2013-03-25 03:12:18 -04:00
/* Report it if a request ACL condition uses some keywords that are incompatible
* with the place where the ACL is used . It returns either 0 or ERR_WARN so that
* its result can be or ' ed with err_code . Note that < cond > may be NULL and then
* will be ignored .
2010-01-28 11:59:39 -05:00
*/
2018-11-11 09:40:36 -05:00
int warnif_cond_conflicts ( const struct acl_cond * cond , unsigned int where , const char * file , int line )
2010-01-28 11:59:39 -05:00
{
2013-03-25 03:12:18 -04:00
const struct acl * acl ;
2013-03-31 16:59:32 -04:00
const char * kw ;
2010-01-28 11:59:39 -05:00
2013-03-25 03:12:18 -04:00
if ( ! cond )
2010-01-28 11:59:39 -05:00
return 0 ;
2013-03-25 03:12:18 -04:00
acl = acl_cond_conflicts ( cond , where ) ;
if ( acl ) {
if ( acl - > name & & * acl - > name )
2017-11-24 10:50:31 -05:00
ha_warning ( " parsing [%s:%d] : acl '%s' will never match because it only involves keywords that are incompatible with '%s' \n " ,
file , line , acl - > name , sample_ckp_names ( where ) ) ;
2013-03-25 03:12:18 -04:00
else
2017-11-24 10:50:31 -05:00
ha_warning ( " parsing [%s:%d] : anonymous acl will never match because it uses keyword '%s' which is incompatible with '%s' \n " ,
file , line , LIST_ELEM ( acl - > expr . n , struct acl_expr * , list ) - > kw , sample_ckp_names ( where ) ) ;
2013-03-25 03:12:18 -04:00
return ERR_WARN ;
}
if ( ! acl_cond_kw_conflicts ( cond , where , & acl , & kw ) )
2010-01-31 09:43:27 -05:00
return 0 ;
2013-03-25 03:12:18 -04:00
if ( acl - > name & & * acl - > name )
2017-11-24 10:50:31 -05:00
ha_warning ( " parsing [%s:%d] : acl '%s' involves keywords '%s' which is incompatible with '%s' \n " ,
file , line , acl - > name , kw , sample_ckp_names ( where ) ) ;
2013-03-25 03:12:18 -04:00
else
2017-11-24 10:50:31 -05:00
ha_warning ( " parsing [%s:%d] : anonymous acl involves keyword '%s' which is incompatible with '%s' \n " ,
file , line , kw , sample_ckp_names ( where ) ) ;
2010-01-31 09:43:27 -05:00
return ERR_WARN ;
}
2021-03-26 05:02:46 -04:00
/* Report it if an ACL uses a L6 sample fetch from an HTTP proxy. It returns
* either 0 or ERR_WARN so that its result can be or ' ed with err_code . Note that
* < cond > may be NULL and then will be ignored .
*/
int warnif_tcp_http_cond ( const struct proxy * px , const struct acl_cond * cond )
{
if ( ! cond | | px - > mode ! = PR_MODE_HTTP )
return 0 ;
if ( cond - > use & ( SMP_USE_L6REQ | SMP_USE_L6RES ) ) {
ha_warning ( " Proxy '%s': L6 sample fetches ignored on HTTP proxies (declared at %s:%d). \n " ,
px - > id , cond - > file , cond - > line ) ;
return ERR_WARN ;
}
return 0 ;
}
2021-03-12 03:08:04 -05:00
/* try to find in <list> the word that looks closest to <word> by counting
* transitions between letters , digits and other characters . Will return the
* best matching word if found , otherwise NULL . An optional array of extra
* words to compare may be passed in < extra > , but it must then be terminated
* by a NULL entry . If unused it may be NULL .
*/
const char * cfg_find_best_match ( const char * word , const struct list * list , int section , const char * * extra )
{
uint8_t word_sig [ 1024 ] ; // 0..25=letter, 26=digit, 27=other, 28=begin, 29=end
uint8_t list_sig [ 1024 ] ;
const struct cfg_kw_list * kwl ;
int index ;
const char * best_ptr = NULL ;
int dist , best_dist = INT_MAX ;
make_word_fingerprint ( word_sig , word ) ;
list_for_each_entry ( kwl , list , list ) {
for ( index = 0 ; kwl - > kw [ index ] . kw ! = NULL ; index + + ) {
if ( kwl - > kw [ index ] . section ! = section )
continue ;
make_word_fingerprint ( list_sig , kwl - > kw [ index ] . kw ) ;
dist = word_fingerprint_distance ( word_sig , list_sig ) ;
if ( dist < best_dist ) {
best_dist = dist ;
best_ptr = kwl - > kw [ index ] . kw ;
}
}
}
while ( extra & & * extra ) {
make_word_fingerprint ( list_sig , * extra ) ;
dist = word_fingerprint_distance ( word_sig , list_sig ) ;
if ( dist < best_dist ) {
best_dist = dist ;
best_ptr = * extra ;
}
extra + + ;
}
if ( best_dist > 2 * strlen ( word ) | | ( best_ptr & & best_dist > 2 * strlen ( best_ptr ) ) )
best_ptr = NULL ;
return best_ptr ;
}
2017-10-16 09:49:32 -04:00
/* Parse a string representing a process number or a set of processes. It must
2019-01-26 07:25:14 -05:00
* be " all " , " odd " , " even " , a number between 1 and < max > or a range with
2017-11-22 05:21:58 -05:00
* two such numbers delimited by a dash ( ' - ' ) . On success , it returns
* 0. otherwise it returns 1 with an error message in < err > .
2017-10-16 09:49:32 -04:00
*
* Note : this function can also be used to parse a thread number or a set of
* threads .
*/
2019-01-26 07:25:14 -05:00
int parse_process_number ( const char * arg , unsigned long * proc , int max , int * autoinc , char * * err )
2017-10-16 09:49:32 -04:00
{
2017-11-22 09:01:51 -05:00
if ( autoinc ) {
* autoinc = 0 ;
if ( strncmp ( arg , " auto: " , 5 ) = = 0 ) {
arg + = 5 ;
* autoinc = 1 ;
}
}
2017-10-16 09:49:32 -04:00
if ( strcmp ( arg , " all " ) = = 0 )
2017-11-22 05:21:58 -05:00
* proc | = ~ 0UL ;
2017-10-16 09:49:32 -04:00
else if ( strcmp ( arg , " odd " ) = = 0 )
2017-11-22 05:21:58 -05:00
* proc | = ~ 0UL / 3UL ; /* 0x555....555 */
2017-10-16 09:49:32 -04:00
else if ( strcmp ( arg , " even " ) = = 0 )
2017-11-22 05:21:58 -05:00
* proc | = ( ~ 0UL / 3UL ) < < 1 ; /* 0xAAA...AAA */
2017-10-16 09:49:32 -04:00
else {
2019-02-07 10:29:41 -05:00
const char * p , * dash = NULL ;
2017-11-22 04:24:40 -05:00
unsigned int low , high ;
2019-02-07 10:29:41 -05:00
for ( p = arg ; * p ; p + + ) {
if ( * p = = ' - ' & & ! dash )
dash = p ;
2020-02-25 02:16:33 -05:00
else if ( ! isdigit ( ( unsigned char ) * p ) ) {
2019-02-07 10:29:41 -05:00
memprintf ( err , " '%s' is not a valid number/range. " , arg ) ;
return - 1 ;
}
2017-11-22 05:21:58 -05:00
}
2017-11-22 04:24:40 -05:00
low = high = str2uic ( arg ) ;
2019-02-07 10:29:41 -05:00
if ( dash )
2019-01-26 07:25:14 -05:00
high = ( ( ! * ( dash + 1 ) ) ? max : str2uic ( dash + 1 ) ) ;
2017-11-22 10:38:49 -05:00
2017-11-22 04:24:40 -05:00
if ( high < low ) {
unsigned int swap = low ;
low = high ;
high = swap ;
}
2019-01-26 07:25:14 -05:00
if ( low < 1 | | low > max | | high > max ) {
2017-11-22 10:50:41 -05:00
memprintf ( err , " '%s' is not a valid number/range. "
" It supports numbers from 1 to %d. \n " ,
2019-01-26 07:25:14 -05:00
arg , max ) ;
2017-11-22 05:21:58 -05:00
return 1 ;
}
2017-11-22 04:24:40 -05:00
for ( ; low < = high ; low + + )
2017-11-22 05:21:58 -05:00
* proc | = 1UL < < ( low - 1 ) ;
2017-10-16 09:49:32 -04:00
}
2019-01-26 07:25:14 -05:00
* proc & = ~ 0UL > > ( LONGBITS - max ) ;
2017-11-22 04:24:40 -05:00
2017-11-22 05:21:58 -05:00
return 0 ;
2017-10-16 09:49:32 -04:00
}
2019-01-11 05:07:15 -05:00
/* Allocate and initialize the frontend of a "peers" section found in
2022-07-25 09:10:44 -04:00
* file < file > at line < linenum > with < id > as ID .
2019-01-11 05:07:15 -05:00
* Return 0 if succeeded , - 1 if not .
2018-04-26 04:06:41 -04:00
* Note that this function may be called from " default-server "
* or " peer " lines .
2019-01-11 05:07:15 -05:00
*/
2022-07-25 09:10:44 -04:00
static int init_peers_frontend ( const char * file , int linenum ,
const char * id , struct peers * peers )
2019-01-11 05:07:15 -05:00
{
struct proxy * p ;
2025-04-09 15:57:39 -04:00
char * errmsg = NULL ;
2019-01-11 05:07:15 -05:00
2018-04-26 04:06:41 -04:00
if ( peers - > peers_fe ) {
p = peers - > peers_fe ;
goto out ;
}
2019-01-11 05:47:12 -05:00
2025-04-09 15:57:39 -04:00
p = alloc_new_proxy ( NULL , PR_CAP_FE | PR_CAP_BE , & errmsg ) ;
2019-01-11 05:07:15 -05:00
if ( ! p ) {
2025-04-09 15:57:39 -04:00
ha_alert ( " parsing [%s:%d] : %s \n " , file , linenum , errmsg ) ;
ha_free ( & errmsg ) ;
2019-01-11 05:07:15 -05:00
return - 1 ;
}
2018-04-26 04:06:41 -04:00
peers_setup_frontend ( p ) ;
2019-01-11 05:07:15 -05:00
p - > parent = peers ;
2018-04-26 04:06:41 -04:00
/* Finally store this frontend. */
peers - > peers_fe = p ;
out :
2022-07-25 09:10:44 -04:00
if ( id & & ! p - > id )
p - > id = strdup ( id ) ;
2024-09-19 09:35:11 -04:00
drop_file_name ( & p - > conf . file ) ;
p - > conf . args . file = p - > conf . file = copy_file_name ( file ) ;
2019-01-11 08:06:12 -05:00
if ( linenum ! = - 1 )
p - > conf . args . line = p - > conf . line = linenum ;
2019-01-11 05:07:15 -05:00
return 0 ;
}
2010-01-28 13:33:49 -05:00
2019-01-11 08:06:12 -05:00
/* Only change ->file, ->line and ->arg struct bind_conf member values
* if already present .
*/
static struct bind_conf * bind_conf_uniq_alloc ( struct proxy * p ,
const char * file , int line ,
const char * arg , struct xprt_ops * xprt )
{
struct bind_conf * bind_conf ;
if ( ! LIST_ISEMPTY ( & p - > conf . bind ) ) {
bind_conf = LIST_ELEM ( ( & p - > conf . bind ) - > n , typeof ( bind_conf ) , by_fe ) ;
2022-05-25 04:12:07 -04:00
/*
* We keep bind_conf - > file and bind_conf - > line unchanged
* to make them available for error messages
*/
2019-01-11 08:06:12 -05:00
if ( arg ) {
free ( bind_conf - > arg ) ;
bind_conf - > arg = strdup ( arg ) ;
}
}
else {
bind_conf = bind_conf_alloc ( p , file , line , arg , xprt ) ;
}
return bind_conf ;
}
/*
* Allocate a new struct peer parsed at line < linenum > in file < file >
* to be added to < peers > .
* Returns the new allocated structure if succeeded , NULL if not .
*/
static struct peer * cfg_peers_add_peer ( struct peers * peers ,
const char * file , int linenum ,
const char * id , int local )
{
struct peer * p ;
p = calloc ( 1 , sizeof * p ) ;
if ( ! p ) {
ha_alert ( " parsing [%s:%d] : out of memory. \n " , file , linenum ) ;
return NULL ;
}
/* the peers are linked backwards first */
peers - > count + + ;
2022-05-12 08:47:52 -04:00
p - > peers = peers ;
2019-01-11 08:06:12 -05:00
p - > next = peers - > remote ;
peers - > remote = p ;
p - > conf . file = strdup ( file ) ;
p - > conf . line = linenum ;
2023-04-28 03:16:15 -04:00
p - > last_change = ns_to_sec ( now_ns ) ;
2019-01-11 08:06:12 -05:00
HA_SPIN_INIT ( & p - > lock ) ;
if ( id )
p - > id = strdup ( id ) ;
if ( local ) {
p - > local = 1 ;
peers - > local = p ;
}
return p ;
}
2006-06-25 20:48:02 -04:00
/*
2015-04-14 10:35:22 -04:00
* Parse a line in a < listen > , < frontend > or < backend > section .
2009-07-23 07:19:11 -04:00
* Returns the error code , 0 if OK , or any combination of :
* - ERR_ABORT : must abort ASAP
* - ERR_FATAL : we can continue parsing but not start the service
* - ERR_WARN : a warning has been emitted
* - ERR_ALERT : an alert has been emitted
* Only the two first ones can stop processing , the two others are just
* indicators .
2006-06-25 20:48:02 -04:00
*/
2010-09-23 12:39:19 -04:00
int cfg_parse_peers ( const char * file , int linenum , char * * args , int kwm )
{
static struct peers * curpeers = NULL ;
2024-04-17 12:43:25 -04:00
static struct sockaddr_storage * bind_addr = NULL ;
2022-10-17 08:58:19 -04:00
static int nb_shards = 0 ;
2010-09-23 12:39:19 -04:00
struct peer * newpeer = NULL ;
const char * err ;
2012-09-20 10:48:07 -04:00
struct bind_conf * bind_conf ;
2010-09-23 12:39:19 -04:00
int err_code = 0 ;
2013-03-10 14:44:48 -04:00
char * errmsg = NULL ;
2019-01-11 08:06:12 -05:00
static int bind_line , peer_line ;
if ( strcmp ( args [ 0 ] , " bind " ) = = 0 | | strcmp ( args [ 0 ] , " default-bind " ) = = 0 ) {
int cur_arg ;
struct bind_conf * bind_conf ;
2022-05-20 09:44:17 -04:00
int ret ;
2019-01-11 08:06:12 -05:00
cur_arg = 1 ;
2010-09-23 12:39:19 -04:00
2022-07-25 09:10:44 -04:00
if ( init_peers_frontend ( file , linenum , NULL , curpeers ) ! = 0 ) {
2018-04-26 04:06:41 -04:00
err_code | = ERR_ALERT | ERR_ABORT ;
goto out ;
}
2019-01-11 08:06:12 -05:00
bind_conf = bind_conf_uniq_alloc ( curpeers - > peers_fe , file , linenum ,
2022-07-05 09:54:09 -04:00
args [ 1 ] , xprt_get ( XPRT_RAW ) ) ;
2022-07-06 08:30:23 -04:00
if ( ! bind_conf ) {
ha_alert ( " parsing [%s:%d] : '%s %s' : cannot allocate memory. \n " , file , linenum , args [ 0 ] , args [ 1 ] ) ;
err_code | = ERR_FATAL ;
goto out ;
}
2023-01-12 12:52:23 -05:00
bind_conf - > maxaccept = 1 ;
2023-01-12 13:10:17 -05:00
bind_conf - > accept = session_accept_fd ;
2023-01-12 13:58:42 -05:00
bind_conf - > options | = BC_O_UNLIMITED ; /* don't make the peers subject to global limits */
2023-01-12 12:52:23 -05:00
2019-01-11 08:06:12 -05:00
if ( * args [ 0 ] = = ' b ' ) {
struct listener * l ;
if ( peer_line ) {
ha_alert ( " parsing [%s:%d] : mixing \" peer \" and \" bind \" line is forbidden \n " , file , linenum ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
2022-05-25 04:12:07 -04:00
if ( ! LIST_ISEMPTY ( & bind_conf - > listeners ) ) {
ha_alert ( " parsing [%s:%d] : One listener per \" peers \" section is authorized but another is already configured at [%s:%d]. \n " , file , linenum , bind_conf - > file , bind_conf - > line ) ;
err_code | = ERR_FATAL ;
}
2019-01-11 08:06:12 -05:00
if ( ! str2listener ( args [ 1 ] , curpeers - > peers_fe , bind_conf , file , linenum , & errmsg ) ) {
if ( errmsg & & * errmsg ) {
indent_msg ( & errmsg , 2 ) ;
ha_alert ( " parsing [%s:%d] : '%s %s' : %s \n " , file , linenum , args [ 0 ] , args [ 1 ] , errmsg ) ;
}
else
ha_alert ( " parsing [%s:%d] : '%s %s' : error encountered while parsing listening address %s. \n " ,
2022-05-20 09:19:48 -04:00
file , linenum , args [ 0 ] , args [ 1 ] , args [ 1 ] ) ;
2019-01-11 08:06:12 -05:00
err_code | = ERR_FATAL ;
goto out ;
}
2023-01-12 13:58:42 -05:00
BUG/MINOR: peers: Improve detection of config errors in peers sections
There are several misuses in peers sections that are not detected during the
configuration parsing and that could lead to undefined behaviors or crashes.
First, only one listener is expected for a peers section. If several bind
lines or local peer definitions are used, an error is triggered. However, if
multiple addresses are set on the same bind line, there is no error while
only the last listener is properly configured. On the 2.8, there is no crash
but side effects are hardly predictable. On older version, HAProxy crashes
if an unconfigured listener is used.
Then, there is no check on remote peers name. It is unexpected to have same
name for several remote peers. There is now a test, performed during the
post-parsing, to verify all remote peer names are unique.
Finally, server parsing options for the peers sections are changed to be
sure a port is always defined, and not a port range or a port offset.
This patch fixes the issue #2066. It could be backported to all stable
versions.
2023-06-02 08:10:36 -04:00
/* Only one listener supported. Compare first listener
* against the last one . It must be the same one .
*/
if ( bind_conf - > listeners . n ! = bind_conf - > listeners . p ) {
ha_alert ( " parsing [%s:%d] : Only one listener per \" peers \" section is authorized. Multiple listening addresses or port range are not supported. \n " , file , linenum ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
2022-05-25 04:25:45 -04:00
/*
* Newly allocated listener is at the end of the list
*/
l = LIST_ELEM ( bind_conf - > listeners . p , typeof ( l ) , by_bind ) ;
2024-04-17 12:43:25 -04:00
bind_addr = & l - > rx . addr ;
2023-01-12 13:58:42 -05:00
2019-02-27 10:25:28 -05:00
global . maxsock + + ; /* for the listening socket */
2019-01-11 08:06:12 -05:00
bind_line = 1 ;
if ( cfg_peers - > local ) {
2024-04-17 12:43:25 -04:00
/* Local peer already defined using "server" line has no
* address yet , we should update its server ' s addr : port
* settings
*/
2019-01-11 08:06:12 -05:00
newpeer = cfg_peers - > local ;
2024-04-17 12:43:25 -04:00
BUG_ON ( ! newpeer - > srv ) ;
newpeer - > srv - > addr = * bind_addr ;
newpeer - > srv - > svc_port = get_host_port ( bind_addr ) ;
2019-01-11 08:06:12 -05:00
}
else {
/* This peer is local.
* Note that we do not set the peer ID . This latter is initialized
* when parsing " peer " or " server " line .
*/
newpeer = cfg_peers_add_peer ( curpeers , file , linenum , NULL , 1 ) ;
if ( ! newpeer ) {
err_code | = ERR_ALERT | ERR_ABORT ;
goto out ;
}
}
cur_arg + + ;
}
2022-05-20 09:44:17 -04:00
ret = bind_parse_args_list ( bind_conf , args , cur_arg , cursection , file , linenum ) ;
err_code | = ret ;
if ( ret ! = 0 )
2019-01-11 08:06:12 -05:00
goto out ;
}
else if ( strcmp ( args [ 0 ] , " default-server " ) = = 0 ) {
2022-07-25 09:10:44 -04:00
if ( init_peers_frontend ( file , - 1 , NULL , curpeers ) ! = 0 ) {
2019-01-11 08:06:12 -05:00
err_code | = ERR_ALERT | ERR_ABORT ;
goto out ;
}
2021-03-08 10:36:46 -05:00
err_code | = parse_server ( file , linenum , args , curpeers - > peers_fe , NULL ,
SRV_PARSE_DEFAULT_SERVER | SRV_PARSE_IN_PEER_SECTION | SRV_PARSE_INITIAL_RESOLVE ) ;
2018-04-26 04:06:41 -04:00
}
2019-11-05 03:57:45 -05:00
else if ( strcmp ( args [ 0 ] , " log " ) = = 0 ) {
2022-07-25 09:10:44 -04:00
if ( init_peers_frontend ( file , linenum , NULL , curpeers ) ! = 0 ) {
2019-11-05 03:57:45 -05:00
err_code | = ERR_ALERT | ERR_ABORT ;
goto out ;
}
MEDIUM: tree-wide: logsrv struct becomes logger
When 'log' directive was implemented, the internal representation was
named 'struct logsrv', because the 'log' directive would directly point
to the log target, which used to be a (UDP) log server exclusively at
that time, hence the name.
But things have become more complex, since today 'log' directive can point
to ring targets (implicit, or named) for example.
Indeed, a 'log' directive does no longer reference the "final" server to
which the log will be sent, but instead it describes which log API and
parameters to use for transporting the log messages to the proper log
destination.
So now the term 'logsrv' is rather confusing and prevents us from
introducing a new level of abstraction because they would be mixed
with logsrv.
So in order to better designate this 'log' directive, and make it more
generic, we chose the word 'logger' which now replaces logsrv everywhere
it was used in the code (including related comments).
This is internal rewording, so no functional change should be expected
on user-side.
2023-09-11 09:06:53 -04:00
if ( ! parse_logger ( args , & curpeers - > peers_fe - > loggers , ( kwm = = KWM_NO ) , file , linenum , & errmsg ) ) {
2019-11-05 03:57:45 -05:00
ha_alert ( " parsing [%s:%d] : %s : %s \n " , file , linenum , args [ 0 ] , errmsg ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
}
2018-04-26 04:06:41 -04:00
else if ( strcmp ( args [ 0 ] , " peers " ) = = 0 ) { /* new peers section */
2019-01-11 08:06:12 -05:00
/* Initialize these static variables when entering a new "peers" section*/
bind_line = peer_line = 0 ;
2024-04-17 12:43:25 -04:00
bind_addr = NULL ;
2013-03-05 05:31:55 -05:00
if ( ! * args [ 1 ] ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d] : missing name for peers section. \n " , file , linenum ) ;
2014-02-16 02:20:13 -05:00
err_code | = ERR_ALERT | ERR_ABORT ;
2013-03-05 05:31:55 -05:00
goto out ;
}
2010-09-23 12:39:19 -04:00
BUG/MINOR: cfgparse: fix NULL ptr dereference in cfg_parse_peers
When "peers" keyword is followed by more than one argument and it's the first
"peers" section in the config, cfg_parse_peers() detects it and exits with
"ERR_ALERT|ERR_FATAL" err_code.
So, upper layer parser, parse_cfg(), continues and parses the next keyword
"peer" and then he tries to check the global cfg_peers, which should contain
"my_cluster". The global cfg_peers is still NULL, because after alerting a user
in alertif_too_many_args, cfg_parse_peers() exited.
peers my_cluster __some_wrong_data__
peer haproxy1 1.1.1.1 1000
In order to fix this, let's add ERR_ABORT, if "peers" keyword is followed by
more than one argument. Like this parse_cfg() will stops immediately and
terminates haproxy with "too many args for peers my_cluster..." alert message.
It's more reliable, than add checks "if (cfg_peers !=NULL)" in "peer"
subparser, as we may have many "peers" sections.
peers my_another_cluster
peer haproxy1 1.1.1.2 1000
peers my_cluster __some_wrong_data__
peer haproxy1 1.1.1.1 1000
In addition, for the example above, parse_cfg() will parse all configuration
until the end and only then terminates haproxy with the alert
"too many args...". Peer haproxy1 will be wrongly associated with
my_another_cluster.
This fixes the issue #2872.
This should be backported in all stable versions.
2025-02-20 09:00:38 -05:00
if ( alertif_too_many_args ( 1 , file , linenum , args , & err_code ) ) {
err_code | = ERR_ABORT ;
2015-04-28 10:55:23 -04:00
goto out ;
BUG/MINOR: cfgparse: fix NULL ptr dereference in cfg_parse_peers
When "peers" keyword is followed by more than one argument and it's the first
"peers" section in the config, cfg_parse_peers() detects it and exits with
"ERR_ALERT|ERR_FATAL" err_code.
So, upper layer parser, parse_cfg(), continues and parses the next keyword
"peer" and then he tries to check the global cfg_peers, which should contain
"my_cluster". The global cfg_peers is still NULL, because after alerting a user
in alertif_too_many_args, cfg_parse_peers() exited.
peers my_cluster __some_wrong_data__
peer haproxy1 1.1.1.1 1000
In order to fix this, let's add ERR_ABORT, if "peers" keyword is followed by
more than one argument. Like this parse_cfg() will stops immediately and
terminates haproxy with "too many args for peers my_cluster..." alert message.
It's more reliable, than add checks "if (cfg_peers !=NULL)" in "peer"
subparser, as we may have many "peers" sections.
peers my_another_cluster
peer haproxy1 1.1.1.2 1000
peers my_cluster __some_wrong_data__
peer haproxy1 1.1.1.1 1000
In addition, for the example above, parse_cfg() will parse all configuration
until the end and only then terminates haproxy with the alert
"too many args...". Peer haproxy1 will be wrongly associated with
my_another_cluster.
This fixes the issue #2872.
This should be backported in all stable versions.
2025-02-20 09:00:38 -05:00
}
2015-04-28 10:55:23 -04:00
2010-09-23 12:39:19 -04:00
err = invalid_char ( args [ 1 ] ) ;
if ( err ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d] : character '%c' is not permitted in '%s' name '%s'. \n " ,
file , linenum , * err , args [ 0 ] , args [ 1 ] ) ;
2014-02-16 02:20:13 -05:00
err_code | = ERR_ALERT | ERR_ABORT ;
2013-03-05 05:31:55 -05:00
goto out ;
2010-09-23 12:39:19 -04:00
}
2017-07-13 03:07:09 -04:00
for ( curpeers = cfg_peers ; curpeers ! = NULL ; curpeers = curpeers - > next ) {
2010-09-23 12:39:19 -04:00
/*
* If there are two proxies with the same name only following
* combinations are allowed :
*/
if ( strcmp ( curpeers - > id , args [ 1 ] ) = = 0 ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " Parsing [%s:%d]: peers section '%s' has the same name as another peers section declared at %s:%d. \n " ,
file , linenum , args [ 1 ] , curpeers - > conf . file , curpeers - > conf . line ) ;
2015-05-26 04:35:50 -04:00
err_code | = ERR_ALERT | ERR_FATAL ;
2010-09-23 12:39:19 -04:00
}
}
2016-04-03 07:48:43 -04:00
if ( ( curpeers = calloc ( 1 , sizeof ( * curpeers ) ) ) = = NULL ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d] : out of memory. \n " , file , linenum ) ;
2010-09-23 12:39:19 -04:00
err_code | = ERR_ALERT | ERR_ABORT ;
goto out ;
}
2017-07-13 03:07:09 -04:00
curpeers - > next = cfg_peers ;
cfg_peers = curpeers ;
2012-10-04 02:01:43 -04:00
curpeers - > conf . file = strdup ( file ) ;
2010-09-23 12:39:19 -04:00
curpeers - > conf . line = linenum ;
2023-04-28 03:16:15 -04:00
curpeers - > last_change = ns_to_sec ( now_ns ) ;
2010-09-23 12:39:19 -04:00
curpeers - > id = strdup ( args [ 1 ] ) ;
2020-09-24 02:48:08 -04:00
curpeers - > disabled = 0 ;
2010-09-23 12:39:19 -04:00
}
2018-04-26 04:06:41 -04:00
else if ( strcmp ( args [ 0 ] , " peer " ) = = 0 | |
strcmp ( args [ 0 ] , " server " ) = = 0 ) { /* peer or server definition */
BUG/MINOR: cfgparse/peers: fix inconsistent check for missing peer server
In the "peers" section parser, right after parse_server() is called, we
used to check whether the curpeers->peers_fe->srv pointer was set or not
to know if parse_server() successfuly added a server to the peers proxy,
server that we can then associate to the new peer.
However the check is wrong, as curpeers->peers_fe->srv points to the
last added server, if a server was successfully added before the
failing one, we cannot detect that the last parse_server() didn't
add a server. This is known to cause bug with bad "peer"/"server"
statements.
To fix the issue, we save a pointer on the last known
curpeers->peers_fe->srv before parse_server() is called, and we then
compare the save with the pointer after parse_server(), if the value
didn't change, then parse_server() didn't add a server. This makes
the check consistent in all situations.
It should be backported to all stable versions.
2025-03-06 03:05:23 -05:00
struct server * prev_srv ;
2019-01-31 00:48:16 -05:00
int local_peer , peer ;
2021-03-08 10:36:46 -05:00
int parse_addr = 0 ;
2010-09-23 12:39:19 -04:00
2019-01-31 00:48:16 -05:00
peer = * args [ 0 ] = = ' p ' ;
CLEANUP: Compare the return value of `XXXcmp()` functions with zero
According to coding-style.txt it is recommended to use:
`strcmp(a, b) == 0` instead of `!strcmp(a, b)`
So let's do this.
The change was performed by running the following (very long) coccinelle patch
on src/:
@@
statement S;
expression E;
expression F;
@@
if (
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
(
S
|
{ ... }
)
@@
statement S;
expression E;
expression F;
@@
if (
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
(
S
|
{ ... }
)
@@
expression E;
expression F;
expression G;
@@
(
G &&
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
@@
expression E;
expression F;
expression G;
@@
(
G ||
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
@@
expression E;
expression F;
expression G;
@@
(
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
&& G
)
@@
expression E;
expression F;
expression G;
@@
(
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
|| G
)
@@
expression E;
expression F;
expression G;
@@
(
G &&
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
@@
expression E;
expression F;
expression G;
@@
(
G ||
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
&& G
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
|| G
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
2021-01-02 16:31:53 -05:00
local_peer = strcmp ( args [ 1 ] , localpeer ) = = 0 ;
2019-01-11 08:06:12 -05:00
/* The local peer may have already partially been parsed on a "bind" line. */
if ( * args [ 0 ] = = ' p ' ) {
if ( bind_line ) {
ha_alert ( " parsing [%s:%d] : mixing \" peer \" and \" bind \" line is forbidden \n " , file , linenum ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
peer_line = 1 ;
}
if ( cfg_peers - > local & & ! cfg_peers - > local - > id & & local_peer ) {
/* The local peer has already been initialized on a "bind" line.
* Let ' s use it and store its ID .
*/
newpeer = cfg_peers - > local ;
newpeer - > id = strdup ( localpeer ) ;
}
else {
if ( local_peer & & cfg_peers - > local ) {
ha_alert ( " parsing [%s:%d] : '%s %s' : local peer name already referenced at %s:%d. %s \n " ,
file , linenum , args [ 0 ] , args [ 1 ] ,
curpeers - > peers_fe - > conf . file , curpeers - > peers_fe - > conf . line , cfg_peers - > local - > id ) ;
err_code | = ERR_FATAL ;
goto out ;
}
newpeer = cfg_peers_add_peer ( curpeers , file , linenum , args [ 1 ] , local_peer ) ;
if ( ! newpeer ) {
err_code | = ERR_ALERT | ERR_ABORT ;
goto out ;
}
}
2010-09-23 12:39:19 -04:00
2019-01-11 08:06:12 -05:00
/* Line number and peer ID are updated only if this peer is the local one. */
if ( init_peers_frontend ( file ,
newpeer - > local ? linenum : - 1 ,
2022-07-25 09:10:44 -04:00
newpeer - > local ? newpeer - > id : NULL ,
2019-01-11 08:06:12 -05:00
curpeers ) ! = 0 ) {
2018-04-26 04:06:41 -04:00
err_code | = ERR_ALERT | ERR_ABORT ;
2013-02-20 13:20:59 -05:00
goto out ;
2010-09-23 12:39:19 -04:00
}
2013-02-20 13:20:59 -05:00
2019-02-12 13:12:32 -05:00
/* This initializes curpeer->peers->peers_fe->srv.
* The server address is parsed only if we are parsing a " peer " line ,
* or if we are parsing a " server " line and the current peer is not the local one .
*/
2021-03-08 10:36:46 -05:00
parse_addr = ( peer | | ! local_peer ) ? SRV_PARSE_PARSE_ADDR : 0 ;
BUG/MINOR: cfgparse/peers: fix inconsistent check for missing peer server
In the "peers" section parser, right after parse_server() is called, we
used to check whether the curpeers->peers_fe->srv pointer was set or not
to know if parse_server() successfuly added a server to the peers proxy,
server that we can then associate to the new peer.
However the check is wrong, as curpeers->peers_fe->srv points to the
last added server, if a server was successfully added before the
failing one, we cannot detect that the last parse_server() didn't
add a server. This is known to cause bug with bad "peer"/"server"
statements.
To fix the issue, we save a pointer on the last known
curpeers->peers_fe->srv before parse_server() is called, and we then
compare the save with the pointer after parse_server(), if the value
didn't change, then parse_server() didn't add a server. This makes
the check consistent in all situations.
It should be backported to all stable versions.
2025-03-06 03:05:23 -05:00
prev_srv = curpeers - > peers_fe - > srv ;
2021-03-08 10:36:46 -05:00
err_code | = parse_server ( file , linenum , args , curpeers - > peers_fe , NULL ,
SRV_PARSE_IN_PEER_SECTION | parse_addr | SRV_PARSE_INITIAL_RESOLVE ) ;
BUG/MINOR: cfgparse/peers: fix inconsistent check for missing peer server
In the "peers" section parser, right after parse_server() is called, we
used to check whether the curpeers->peers_fe->srv pointer was set or not
to know if parse_server() successfuly added a server to the peers proxy,
server that we can then associate to the new peer.
However the check is wrong, as curpeers->peers_fe->srv points to the
last added server, if a server was successfully added before the
failing one, we cannot detect that the last parse_server() didn't
add a server. This is known to cause bug with bad "peer"/"server"
statements.
To fix the issue, we save a pointer on the last known
curpeers->peers_fe->srv before parse_server() is called, and we then
compare the save with the pointer after parse_server(), if the value
didn't change, then parse_server() didn't add a server. This makes
the check consistent in all situations.
It should be backported to all stable versions.
2025-03-06 03:05:23 -05:00
if ( curpeers - > peers_fe - > srv = = prev_srv ) {
/* parse_server didn't add a server:
* Remove the newly allocated peer .
*/
2025-03-06 03:29:05 -05:00
struct peer * p ;
2025-03-07 03:30:47 -05:00
/* while it is tolerated to have a "server" line without address, it isn't
* the case for a " peer " line
*/
if ( peer ) {
ha_warning ( " parsing [%s:%d] : '%s %s' : ignoring invalid peer definition (missing address:port) \n " ,
file , linenum , args [ 0 ] , args [ 1 ] ) ;
err_code | = ERR_WARN ;
}
else {
ha_diag_warning ( " parsing [%s:%d] : '%s %s' : ignoring server (not a local peer, valid address:port is expected) \n " ,
file , linenum , args [ 0 ] , args [ 1 ] ) ;
}
2025-03-06 03:29:05 -05:00
p = curpeers - > remote ;
curpeers - > remote = curpeers - > remote - > next ;
free ( p - > id ) ;
free ( p ) ;
2025-03-07 03:30:47 -05:00
if ( local_peer ) {
/* we only get there with incomplete "peer"
* line for local peer ( missing address ) :
*
* reset curpeers and curpeers fields
2025-03-06 03:29:05 -05:00
* that are local peer related
*/
curpeers - > local = NULL ;
ha_free ( & curpeers - > peers_fe - > id ) ;
2020-04-03 03:43:47 -04:00
}
2010-09-23 12:39:19 -04:00
goto out ;
2020-04-03 03:43:47 -04:00
}
2010-09-23 12:39:19 -04:00
2024-04-17 12:43:25 -04:00
if ( ! parse_addr & & bind_addr ) {
/* local peer declared using "server": has name but no
* address : we use the known " bind " line addr settings
* as implicit server ' s addr and port .
*/
curpeers - > peers_fe - > srv - > addr = * bind_addr ;
curpeers - > peers_fe - > srv - > svc_port = get_host_port ( bind_addr ) ;
}
2022-10-17 08:58:19 -04:00
if ( nb_shards & & curpeers - > peers_fe - > srv - > shard > nb_shards ) {
ha_warning ( " parsing [%s:%d] : '%s %s' : %d peer shard greater value than %d shards value is ignored. \n " ,
file , linenum , args [ 0 ] , args [ 1 ] , curpeers - > peers_fe - > srv - > shard , nb_shards ) ;
curpeers - > peers_fe - > srv - > shard = 0 ;
err_code | = ERR_WARN ;
}
2022-05-31 03:42:44 -04:00
if ( curpeers - > peers_fe - > srv - > init_addr_methods | | curpeers - > peers_fe - > srv - > resolvers_id | |
curpeers - > peers_fe - > srv - > do_check | | curpeers - > peers_fe - > srv - > do_agent ) {
ha_warning ( " parsing [%s:%d] : '%s %s' : init_addr, resolvers, check and agent are ignored for peers. \n " , file , linenum , args [ 0 ] , args [ 1 ] ) ;
err_code | = ERR_WARN ;
}
2017-11-07 04:42:54 -05:00
HA_SPIN_INIT ( & newpeer - > lock ) ;
2012-05-07 12:12:14 -04:00
2019-02-12 13:12:32 -05:00
newpeer - > srv = curpeers - > peers_fe - > srv ;
if ( ! newpeer - > local )
2018-04-25 09:13:38 -04:00
goto out ;
2019-01-11 08:06:12 -05:00
/* The lines above are reserved to "peer" lines. */
if ( * args [ 0 ] = = ' s ' )
2018-04-25 09:32:18 -04:00
goto out ;
2019-01-11 05:27:16 -05:00
2019-01-11 08:06:12 -05:00
bind_conf = bind_conf_uniq_alloc ( curpeers - > peers_fe , file , linenum , args [ 2 ] , xprt_get ( XPRT_RAW ) ) ;
2022-07-06 08:30:23 -04:00
if ( ! bind_conf ) {
ha_alert ( " parsing [%s:%d] : '%s %s' : Cannot allocate memory. \n " , file , linenum , args [ 0 ] , args [ 1 ] ) ;
err_code | = ERR_FATAL ;
goto out ;
}
2019-01-11 05:27:16 -05:00
2023-01-12 12:52:23 -05:00
bind_conf - > maxaccept = 1 ;
2023-01-12 13:10:17 -05:00
bind_conf - > accept = session_accept_fd ;
2023-01-12 13:58:42 -05:00
bind_conf - > options | = BC_O_UNLIMITED ; /* don't make the peers subject to global limits */
2023-01-12 12:52:23 -05:00
2022-05-25 04:12:07 -04:00
if ( ! LIST_ISEMPTY ( & bind_conf - > listeners ) ) {
ha_alert ( " parsing [%s:%d] : One listener per \" peers \" section is authorized but another is already configured at [%s:%d]. \n " , file , linenum , bind_conf - > file , bind_conf - > line ) ;
err_code | = ERR_FATAL ;
}
2019-01-11 05:43:53 -05:00
if ( ! str2listener ( args [ 2 ] , curpeers - > peers_fe , bind_conf , file , linenum , & errmsg ) ) {
if ( errmsg & & * errmsg ) {
indent_msg ( & errmsg , 2 ) ;
ha_alert ( " parsing [%s:%d] : '%s %s' : %s \n " , file , linenum , args [ 0 ] , args [ 1 ] , errmsg ) ;
2019-01-11 05:27:16 -05:00
}
2019-01-11 05:43:53 -05:00
else
ha_alert ( " parsing [%s:%d] : '%s %s' : error encountered while parsing listening address %s. \n " ,
file , linenum , args [ 0 ] , args [ 1 ] , args [ 2 ] ) ;
err_code | = ERR_FATAL ;
goto out ;
}
2019-01-11 08:06:12 -05:00
2019-02-27 10:25:28 -05:00
global . maxsock + + ; /* for the listening socket */
2019-03-14 02:07:41 -04:00
}
2022-10-17 08:58:19 -04:00
else if ( strcmp ( args [ 0 ] , " shards " ) = = 0 ) {
char * endptr ;
if ( ! * args [ 1 ] ) {
ha_alert ( " parsing [%s:%d] : '%s' : missing value \n " , file , linenum , args [ 0 ] ) ;
err_code | = ERR_FATAL ;
goto out ;
}
curpeers - > nb_shards = strtol ( args [ 1 ] , & endptr , 10 ) ;
if ( * endptr ! = ' \0 ' ) {
ha_alert ( " parsing [%s:%d] : '%s' : expects an integer argument, found '%s' \n " ,
file , linenum , args [ 0 ] , args [ 1 ] ) ;
err_code | = ERR_FATAL ;
goto out ;
}
if ( ! curpeers - > nb_shards ) {
ha_alert ( " parsing [%s:%d] : '%s' : expects a strictly positive integer argument \n " ,
file , linenum , args [ 0 ] ) ;
err_code | = ERR_FATAL ;
goto out ;
}
nb_shards = curpeers - > nb_shards ;
}
CLEANUP: Compare the return value of `XXXcmp()` functions with zero
According to coding-style.txt it is recommended to use:
`strcmp(a, b) == 0` instead of `!strcmp(a, b)`
So let's do this.
The change was performed by running the following (very long) coccinelle patch
on src/:
@@
statement S;
expression E;
expression F;
@@
if (
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
(
S
|
{ ... }
)
@@
statement S;
expression E;
expression F;
@@
if (
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
(
S
|
{ ... }
)
@@
expression E;
expression F;
expression G;
@@
(
G &&
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
@@
expression E;
expression F;
expression G;
@@
(
G ||
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
@@
expression E;
expression F;
expression G;
@@
(
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
&& G
)
@@
expression E;
expression F;
expression G;
@@
(
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
|| G
)
@@
expression E;
expression F;
expression G;
@@
(
G &&
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
@@
expression E;
expression F;
expression G;
@@
(
G ||
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
&& G
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
|| G
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
2021-01-02 16:31:53 -05:00
else if ( strcmp ( args [ 0 ] , " table " ) = = 0 ) {
2019-03-14 02:07:41 -04:00
struct stktable * t , * other ;
char * id ;
2019-03-20 10:06:55 -04:00
size_t prefix_len ;
2019-03-14 02:07:41 -04:00
/* Line number and peer ID are updated only if this peer is the local one. */
2022-07-25 09:10:44 -04:00
if ( init_peers_frontend ( file , - 1 , NULL , curpeers ) ! = 0 ) {
2019-03-14 02:07:41 -04:00
err_code | = ERR_ALERT | ERR_ABORT ;
goto out ;
}
2019-03-20 10:06:55 -04:00
/* Build the stick-table name, concatenating the "peers" section name
* followed by a ' / ' character and the table name argument .
*/
chunk_reset ( & trash ) ;
2019-03-20 10:09:45 -04:00
if ( ! chunk_strcpy ( & trash , curpeers - > id ) ) {
2019-03-20 10:06:55 -04:00
ha_alert ( " parsing [%s:%d]: '%s %s' : stick-table name too long. \n " ,
file , linenum , args [ 0 ] , args [ 1 ] ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
prefix_len = trash . data ;
2019-03-20 10:09:45 -04:00
if ( ! chunk_memcat ( & trash , " / " , 1 ) | | ! chunk_strcat ( & trash , args [ 1 ] ) ) {
2019-03-20 10:06:55 -04:00
ha_alert ( " parsing [%s:%d]: '%s %s' : stick-table name too long. \n " ,
file , linenum , args [ 0 ] , args [ 1 ] ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
2019-03-14 02:07:41 -04:00
t = calloc ( 1 , sizeof * t ) ;
2019-03-20 10:06:55 -04:00
id = strdup ( trash . area ) ;
2019-03-14 02:07:41 -04:00
if ( ! t | | ! id ) {
ha_alert ( " parsing [%s:%d]: '%s %s' : memory allocation failed \n " ,
file , linenum , args [ 0 ] , args [ 1 ] ) ;
2020-09-18 05:55:17 -04:00
free ( t ) ;
free ( id ) ;
2019-03-14 02:07:41 -04:00
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
2023-06-26 14:14:47 -04:00
other = stktable_find_by_name ( trash . area ) ;
if ( other ) {
ha_alert ( " parsing [%s:%d] : stick-table name '%s' conflicts with table declared in %s '%s' at %s:%d. \n " ,
file , linenum , args [ 1 ] ,
other - > proxy ? proxy_cap_str ( other - > proxy - > cap ) : " peers " ,
other - > proxy ? other - > id : other - > peers . p - > id ,
other - > conf . file , other - > conf . line ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
2019-03-20 10:06:55 -04:00
err_code | = parse_stick_table ( file , linenum , args , t , id , id + prefix_len , curpeers ) ;
2020-09-18 05:55:17 -04:00
if ( err_code & ERR_FATAL ) {
free ( t ) ;
free ( id ) ;
2019-03-14 02:07:41 -04:00
goto out ;
2020-09-18 05:55:17 -04:00
}
2019-03-14 02:07:41 -04:00
stktable_store_name ( t ) ;
t - > next = stktables_list ;
stktables_list = t ;
}
CLEANUP: Compare the return value of `XXXcmp()` functions with zero
According to coding-style.txt it is recommended to use:
`strcmp(a, b) == 0` instead of `!strcmp(a, b)`
So let's do this.
The change was performed by running the following (very long) coccinelle patch
on src/:
@@
statement S;
expression E;
expression F;
@@
if (
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
(
S
|
{ ... }
)
@@
statement S;
expression E;
expression F;
@@
if (
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
(
S
|
{ ... }
)
@@
expression E;
expression F;
expression G;
@@
(
G &&
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
@@
expression E;
expression F;
expression G;
@@
(
G ||
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
@@
expression E;
expression F;
expression G;
@@
(
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
&& G
)
@@
expression E;
expression F;
expression G;
@@
(
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
|| G
)
@@
expression E;
expression F;
expression G;
@@
(
G &&
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
@@
expression E;
expression F;
expression G;
@@
(
G ||
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
&& G
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
|| G
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
2021-01-02 16:31:53 -05:00
else if ( strcmp ( args [ 0 ] , " disabled " ) = = 0 ) { /* disables this peers section */
2021-10-06 08:24:19 -04:00
curpeers - > disabled | = PR_FL_DISABLED ;
2015-05-01 14:02:17 -04:00
}
CLEANUP: Compare the return value of `XXXcmp()` functions with zero
According to coding-style.txt it is recommended to use:
`strcmp(a, b) == 0` instead of `!strcmp(a, b)`
So let's do this.
The change was performed by running the following (very long) coccinelle patch
on src/:
@@
statement S;
expression E;
expression F;
@@
if (
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
(
S
|
{ ... }
)
@@
statement S;
expression E;
expression F;
@@
if (
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
(
S
|
{ ... }
)
@@
expression E;
expression F;
expression G;
@@
(
G &&
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
@@
expression E;
expression F;
expression G;
@@
(
G ||
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
@@
expression E;
expression F;
expression G;
@@
(
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
&& G
)
@@
expression E;
expression F;
expression G;
@@
(
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
|| G
)
@@
expression E;
expression F;
expression G;
@@
(
G &&
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
@@
expression E;
expression F;
expression G;
@@
(
G ||
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
&& G
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
|| G
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
2021-01-02 16:31:53 -05:00
else if ( strcmp ( args [ 0 ] , " enabled " ) = = 0 ) { /* enables this peers section (used to revert a disabled default) */
2020-09-24 02:48:08 -04:00
curpeers - > disabled = 0 ;
2015-05-01 14:02:17 -04:00
}
2010-09-23 12:39:19 -04:00
else if ( * args [ 0 ] ! = 0 ) {
2023-06-26 14:43:48 -04:00
struct peers_kw_list * pkwl ;
int index ;
int rc = - 1 ;
list_for_each_entry ( pkwl , & peers_keywords . list , list ) {
for ( index = 0 ; pkwl - > kw [ index ] . kw ! = NULL ; index + + ) {
if ( strcmp ( pkwl - > kw [ index ] . kw , args [ 0 ] ) = = 0 ) {
rc = pkwl - > kw [ index ] . parse ( args , curpeers , file , linenum , & errmsg ) ;
if ( rc < 0 ) {
ha_alert ( " parsing [%s:%d] : %s \n " , file , linenum , errmsg ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
else if ( rc > 0 ) {
ha_warning ( " parsing [%s:%d] : %s \n " , file , linenum , errmsg ) ;
err_code | = ERR_WARN ;
goto out ;
}
goto out ;
}
}
}
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d] : unknown keyword '%s' in '%s' section \n " , file , linenum , args [ 0 ] , cursection ) ;
2010-09-23 12:39:19 -04:00
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
out :
2013-03-10 14:44:48 -04:00
free ( errmsg ) ;
2010-09-23 12:39:19 -04:00
return err_code ;
}
2015-01-29 21:22:58 -05:00
/*
2015-04-14 10:35:22 -04:00
* Parse a line in a < listen > , < frontend > or < backend > section .
2015-01-29 21:22:58 -05:00
* Returns the error code , 0 if OK , or any combination of :
* - ERR_ABORT : must abort ASAP
* - ERR_FATAL : we can continue parsing but not start the service
* - ERR_WARN : a warning has been emitted
* - ERR_ALERT : an alert has been emitted
* Only the two first ones can stop processing , the two others are just
* indicators .
*/
int cfg_parse_mailers ( const char * file , int linenum , char * * args , int kwm )
{
static struct mailers * curmailers = NULL ;
struct mailer * newmailer = NULL ;
const char * err ;
int err_code = 0 ;
char * errmsg = NULL ;
if ( strcmp ( args [ 0 ] , " mailers " ) = = 0 ) { /* new mailers section */
if ( ! * args [ 1 ] ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d] : missing name for mailers section. \n " , file , linenum ) ;
2015-01-29 21:22:58 -05:00
err_code | = ERR_ALERT | ERR_ABORT ;
goto out ;
}
err = invalid_char ( args [ 1 ] ) ;
if ( err ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d] : character '%c' is not permitted in '%s' name '%s'. \n " ,
file , linenum , * err , args [ 0 ] , args [ 1 ] ) ;
2015-01-29 21:22:58 -05:00
err_code | = ERR_ALERT | ERR_ABORT ;
goto out ;
}
for ( curmailers = mailers ; curmailers ! = NULL ; curmailers = curmailers - > next ) {
/*
* If there are two proxies with the same name only following
* combinations are allowed :
*/
if ( strcmp ( curmailers - > id , args [ 1 ] ) = = 0 ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " Parsing [%s:%d]: mailers section '%s' has the same name as another mailers section declared at %s:%d. \n " ,
file , linenum , args [ 1 ] , curmailers - > conf . file , curmailers - > conf . line ) ;
2015-05-26 04:35:50 -04:00
err_code | = ERR_ALERT | ERR_FATAL ;
2015-01-29 21:22:58 -05:00
}
}
2016-04-03 07:48:43 -04:00
if ( ( curmailers = calloc ( 1 , sizeof ( * curmailers ) ) ) = = NULL ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d] : out of memory. \n " , file , linenum ) ;
2015-01-29 21:22:58 -05:00
err_code | = ERR_ALERT | ERR_ABORT ;
goto out ;
}
curmailers - > next = mailers ;
mailers = curmailers ;
curmailers - > conf . file = strdup ( file ) ;
curmailers - > conf . line = linenum ;
curmailers - > id = strdup ( args [ 1 ] ) ;
2016-02-13 09:33:40 -05:00
curmailers - > timeout . mail = DEF_MAILALERTTIME ; /* XXX: Would like to Skip to the next alert, if any, ASAP.
* But need enough time so that timeouts don ' t occur
* during tcp procssing . For now just us an arbitrary default . */
2015-01-29 21:22:58 -05:00
}
else if ( strcmp ( args [ 0 ] , " mailer " ) = = 0 ) { /* mailer definition */
struct sockaddr_storage * sk ;
int port1 , port2 ;
struct protocol * proto ;
if ( ! * args [ 2 ] ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d] : '%s' expects <name> and <addr>[:<port>] as arguments. \n " ,
file , linenum , args [ 0 ] ) ;
2015-01-29 21:22:58 -05:00
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
err = invalid_char ( args [ 1 ] ) ;
if ( err ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d] : character '%c' is not permitted in server name '%s'. \n " ,
file , linenum , * err , args [ 1 ] ) ;
2015-01-29 21:22:58 -05:00
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
2016-04-03 07:48:43 -04:00
if ( ( newmailer = calloc ( 1 , sizeof ( * newmailer ) ) ) = = NULL ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d] : out of memory. \n " , file , linenum ) ;
2015-01-29 21:22:58 -05:00
err_code | = ERR_ALERT | ERR_ABORT ;
goto out ;
}
/* the mailers are linked backwards first */
curmailers - > count + + ;
newmailer - > next = curmailers - > mailer_list ;
curmailers - > mailer_list = newmailer ;
newmailer - > mailers = curmailers ;
newmailer - > conf . file = strdup ( file ) ;
newmailer - > conf . line = linenum ;
newmailer - > id = strdup ( args [ 1 ] ) ;
2023-11-09 05:19:24 -05:00
sk = str2sa_range ( args [ 2 ] , NULL , & port1 , & port2 , NULL , & proto , NULL ,
2024-08-26 05:50:24 -04:00
& errmsg , NULL , NULL , NULL ,
2020-09-16 13:17:08 -04:00
PA_O_RESOLVE | PA_O_PORT_OK | PA_O_PORT_MAND | PA_O_STREAM | PA_O_XPRT | PA_O_CONNECT ) ;
2015-01-29 21:22:58 -05:00
if ( ! sk ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d] : '%s %s' : %s \n " , file , linenum , args [ 0 ] , args [ 1 ] , errmsg ) ;
2015-01-29 21:22:58 -05:00
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
2020-09-16 13:17:08 -04:00
if ( proto - > sock_prot ! = IPPROTO_TCP ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d] : '%s %s' : TCP not supported for this address family. \n " ,
file , linenum , args [ 0 ] , args [ 1 ] ) ;
2015-01-29 21:22:58 -05:00
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
newmailer - > addr = * sk ;
newmailer - > proto = proto ;
2016-12-22 14:44:00 -05:00
newmailer - > xprt = xprt_get ( XPRT_RAW ) ;
2015-01-29 21:22:58 -05:00
newmailer - > sock_init_arg = NULL ;
2016-02-13 09:33:40 -05:00
}
else if ( strcmp ( args [ 0 ] , " timeout " ) = = 0 ) {
if ( ! * args [ 1 ] ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d] : '%s' expects 'mail' and <time> as arguments. \n " ,
file , linenum , args [ 0 ] ) ;
2016-02-13 09:33:40 -05:00
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
else if ( strcmp ( args [ 1 ] , " mail " ) = = 0 ) {
const char * res ;
unsigned int timeout_mail ;
if ( ! * args [ 2 ] ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d] : '%s %s' expects <time> as argument. \n " ,
file , linenum , args [ 0 ] , args [ 1 ] ) ;
2016-02-13 09:33:40 -05:00
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
res = parse_time_err ( args [ 2 ] , & timeout_mail , TIME_UNIT_MS ) ;
2019-06-07 13:00:37 -04:00
if ( res = = PARSE_TIME_OVER ) {
ha_alert ( " parsing [%s:%d]: timer overflow in argument <%s> to <%s %s>, maximum value is 2147483647 ms (~24.8 days). \n " ,
file , linenum , args [ 2 ] , args [ 0 ] , args [ 1 ] ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
else if ( res = = PARSE_TIME_UNDER ) {
ha_alert ( " parsing [%s:%d]: timer underflow in argument <%s> to <%s %s>, minimum non-null value is 1 ms. \n " ,
file , linenum , args [ 2 ] , args [ 0 ] , args [ 1 ] ) ;
2016-02-13 09:33:40 -05:00
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
2019-06-07 13:00:37 -04:00
else if ( res ) {
ha_alert ( " parsing [%s:%d]: unexpected character '%c' in argument to <%s %s>. \n " ,
file , linenum , * res , args [ 0 ] , args [ 1 ] ) ;
2016-02-13 09:33:40 -05:00
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
curmailers - > timeout . mail = timeout_mail ;
} else {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d] : '%s' expects 'mail' and <time> as arguments got '%s'. \n " ,
2016-02-13 09:33:40 -05:00
file , linenum , args [ 0 ] , args [ 1 ] ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
}
2015-01-29 21:22:58 -05:00
else if ( * args [ 0 ] ! = 0 ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d] : unknown keyword '%s' in '%s' section \n " , file , linenum , args [ 0 ] , cursection ) ;
2015-01-29 21:22:58 -05:00
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
out :
free ( errmsg ) ;
return err_code ;
}
2006-06-25 20:48:02 -04:00
2014-11-17 09:11:45 -05:00
int
cfg_parse_netns ( const char * file , int linenum , char * * args , int kwm )
{
2019-05-22 13:24:06 -04:00
# ifdef USE_NS
2014-11-17 09:11:45 -05:00
const char * err ;
const char * item = args [ 0 ] ;
CLEANUP: Compare the return value of `XXXcmp()` functions with zero
According to coding-style.txt it is recommended to use:
`strcmp(a, b) == 0` instead of `!strcmp(a, b)`
So let's do this.
The change was performed by running the following (very long) coccinelle patch
on src/:
@@
statement S;
expression E;
expression F;
@@
if (
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
(
S
|
{ ... }
)
@@
statement S;
expression E;
expression F;
@@
if (
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
(
S
|
{ ... }
)
@@
expression E;
expression F;
expression G;
@@
(
G &&
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
@@
expression E;
expression F;
expression G;
@@
(
G ||
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
@@
expression E;
expression F;
expression G;
@@
(
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
&& G
)
@@
expression E;
expression F;
expression G;
@@
(
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
|| G
)
@@
expression E;
expression F;
expression G;
@@
(
G &&
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
@@
expression E;
expression F;
expression G;
@@
(
G ||
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
&& G
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
|| G
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
2021-01-02 16:31:53 -05:00
if ( strcmp ( item , " namespace_list " ) = = 0 ) {
2014-11-17 09:11:45 -05:00
return 0 ;
}
CLEANUP: Compare the return value of `XXXcmp()` functions with zero
According to coding-style.txt it is recommended to use:
`strcmp(a, b) == 0` instead of `!strcmp(a, b)`
So let's do this.
The change was performed by running the following (very long) coccinelle patch
on src/:
@@
statement S;
expression E;
expression F;
@@
if (
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
(
S
|
{ ... }
)
@@
statement S;
expression E;
expression F;
@@
if (
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
(
S
|
{ ... }
)
@@
expression E;
expression F;
expression G;
@@
(
G &&
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
@@
expression E;
expression F;
expression G;
@@
(
G ||
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
@@
expression E;
expression F;
expression G;
@@
(
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
&& G
)
@@
expression E;
expression F;
expression G;
@@
(
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
|| G
)
@@
expression E;
expression F;
expression G;
@@
(
G &&
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
@@
expression E;
expression F;
expression G;
@@
(
G ||
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
&& G
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
|| G
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
2021-01-02 16:31:53 -05:00
else if ( strcmp ( item , " namespace " ) = = 0 ) {
2014-11-17 09:11:45 -05:00
size_t idx = 1 ;
const char * current ;
while ( * ( current = args [ idx + + ] ) ) {
err = invalid_char ( current ) ;
if ( err ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d]: character '%c' is not permitted in '%s' name '%s'. \n " ,
file , linenum , * err , item , current ) ;
2014-11-17 09:11:45 -05:00
return ERR_ALERT | ERR_FATAL ;
}
if ( netns_store_lookup ( current , strlen ( current ) ) ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d]: Namespace '%s' is already added. \n " ,
file , linenum , current ) ;
2014-11-17 09:11:45 -05:00
return ERR_ALERT | ERR_FATAL ;
}
if ( ! netns_store_insert ( current ) ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d]: Cannot open namespace '%s'. \n " ,
file , linenum , current ) ;
2014-11-17 09:11:45 -05:00
return ERR_ALERT | ERR_FATAL ;
}
}
}
return 0 ;
# else
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d]: namespace support is not compiled in. " ,
file , linenum ) ;
2014-11-17 09:11:45 -05:00
return ERR_ALERT | ERR_FATAL ;
# endif
}
2010-01-29 11:50:44 -05:00
int
cfg_parse_users ( const char * file , int linenum , char * * args , int kwm )
{
int err_code = 0 ;
const char * err ;
CLEANUP: Compare the return value of `XXXcmp()` functions with zero
According to coding-style.txt it is recommended to use:
`strcmp(a, b) == 0` instead of `!strcmp(a, b)`
So let's do this.
The change was performed by running the following (very long) coccinelle patch
on src/:
@@
statement S;
expression E;
expression F;
@@
if (
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
(
S
|
{ ... }
)
@@
statement S;
expression E;
expression F;
@@
if (
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
(
S
|
{ ... }
)
@@
expression E;
expression F;
expression G;
@@
(
G &&
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
@@
expression E;
expression F;
expression G;
@@
(
G ||
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
@@
expression E;
expression F;
expression G;
@@
(
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
&& G
)
@@
expression E;
expression F;
expression G;
@@
(
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
|| G
)
@@
expression E;
expression F;
expression G;
@@
(
G &&
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
@@
expression E;
expression F;
expression G;
@@
(
G ||
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
&& G
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
|| G
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
2021-01-02 16:31:53 -05:00
if ( strcmp ( args [ 0 ] , " userlist " ) = = 0 ) { /* new userlist */
2010-01-29 11:50:44 -05:00
struct userlist * newul ;
if ( ! * args [ 1 ] ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d]: '%s' expects <name> as arguments. \n " ,
file , linenum , args [ 0 ] ) ;
2010-01-29 11:50:44 -05:00
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
2015-04-28 10:55:23 -04:00
if ( alertif_too_many_args ( 1 , file , linenum , args , & err_code ) )
goto out ;
2010-01-29 11:50:44 -05:00
err = invalid_char ( args [ 1 ] ) ;
if ( err ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d]: character '%c' is not permitted in '%s' name '%s'. \n " ,
file , linenum , * err , args [ 0 ] , args [ 1 ] ) ;
2010-01-29 11:50:44 -05:00
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
for ( newul = userlist ; newul ; newul = newul - > next )
CLEANUP: Compare the return value of `XXXcmp()` functions with zero
According to coding-style.txt it is recommended to use:
`strcmp(a, b) == 0` instead of `!strcmp(a, b)`
So let's do this.
The change was performed by running the following (very long) coccinelle patch
on src/:
@@
statement S;
expression E;
expression F;
@@
if (
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
(
S
|
{ ... }
)
@@
statement S;
expression E;
expression F;
@@
if (
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
(
S
|
{ ... }
)
@@
expression E;
expression F;
expression G;
@@
(
G &&
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
@@
expression E;
expression F;
expression G;
@@
(
G ||
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
@@
expression E;
expression F;
expression G;
@@
(
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
&& G
)
@@
expression E;
expression F;
expression G;
@@
(
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
|| G
)
@@
expression E;
expression F;
expression G;
@@
(
G &&
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
@@
expression E;
expression F;
expression G;
@@
(
G ||
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
&& G
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
|| G
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
2021-01-02 16:31:53 -05:00
if ( strcmp ( newul - > name , args [ 1 ] ) = = 0 ) {
2017-11-24 10:50:31 -05:00
ha_warning ( " parsing [%s:%d]: ignoring duplicated userlist '%s'. \n " ,
file , linenum , args [ 1 ] ) ;
2010-01-29 11:50:44 -05:00
err_code | = ERR_WARN ;
goto out ;
}
2016-04-03 07:48:43 -04:00
newul = calloc ( 1 , sizeof ( * newul ) ) ;
2010-01-29 11:50:44 -05:00
if ( ! newul ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d]: out of memory. \n " , file , linenum ) ;
2010-01-29 11:50:44 -05:00
err_code | = ERR_ALERT | ERR_ABORT ;
goto out ;
}
newul - > name = strdup ( args [ 1 ] ) ;
2014-01-22 12:38:02 -05:00
if ( ! newul - > name ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d]: out of memory. \n " , file , linenum ) ;
2010-01-29 11:50:44 -05:00
err_code | = ERR_ALERT | ERR_ABORT ;
2016-04-08 05:35:26 -04:00
free ( newul ) ;
2010-01-29 11:50:44 -05:00
goto out ;
}
newul - > next = userlist ;
userlist = newul ;
CLEANUP: Compare the return value of `XXXcmp()` functions with zero
According to coding-style.txt it is recommended to use:
`strcmp(a, b) == 0` instead of `!strcmp(a, b)`
So let's do this.
The change was performed by running the following (very long) coccinelle patch
on src/:
@@
statement S;
expression E;
expression F;
@@
if (
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
(
S
|
{ ... }
)
@@
statement S;
expression E;
expression F;
@@
if (
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
(
S
|
{ ... }
)
@@
expression E;
expression F;
expression G;
@@
(
G &&
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
@@
expression E;
expression F;
expression G;
@@
(
G ||
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
@@
expression E;
expression F;
expression G;
@@
(
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
&& G
)
@@
expression E;
expression F;
expression G;
@@
(
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
|| G
)
@@
expression E;
expression F;
expression G;
@@
(
G &&
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
@@
expression E;
expression F;
expression G;
@@
(
G ||
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
&& G
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
|| G
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
2021-01-02 16:31:53 -05:00
} else if ( strcmp ( args [ 0 ] , " group " ) = = 0 ) { /* new group */
2014-01-22 12:38:02 -05:00
int cur_arg ;
2010-01-29 11:50:44 -05:00
const char * err ;
2014-01-22 12:38:02 -05:00
struct auth_groups * ag ;
2010-01-29 11:50:44 -05:00
if ( ! * args [ 1 ] ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d]: '%s' expects <name> as arguments. \n " ,
file , linenum , args [ 0 ] ) ;
2010-01-29 11:50:44 -05:00
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
err = invalid_char ( args [ 1 ] ) ;
if ( err ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d]: character '%c' is not permitted in '%s' name '%s'. \n " ,
file , linenum , * err , args [ 0 ] , args [ 1 ] ) ;
2010-01-29 11:50:44 -05:00
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
2015-05-28 12:03:51 -04:00
if ( ! userlist )
goto out ;
2014-01-22 12:38:02 -05:00
for ( ag = userlist - > groups ; ag ; ag = ag - > next )
CLEANUP: Compare the return value of `XXXcmp()` functions with zero
According to coding-style.txt it is recommended to use:
`strcmp(a, b) == 0` instead of `!strcmp(a, b)`
So let's do this.
The change was performed by running the following (very long) coccinelle patch
on src/:
@@
statement S;
expression E;
expression F;
@@
if (
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
(
S
|
{ ... }
)
@@
statement S;
expression E;
expression F;
@@
if (
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
(
S
|
{ ... }
)
@@
expression E;
expression F;
expression G;
@@
(
G &&
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
@@
expression E;
expression F;
expression G;
@@
(
G ||
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
@@
expression E;
expression F;
expression G;
@@
(
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
&& G
)
@@
expression E;
expression F;
expression G;
@@
(
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
|| G
)
@@
expression E;
expression F;
expression G;
@@
(
G &&
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
@@
expression E;
expression F;
expression G;
@@
(
G ||
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
&& G
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
|| G
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
2021-01-02 16:31:53 -05:00
if ( strcmp ( ag - > name , args [ 1 ] ) = = 0 ) {
2017-11-24 10:50:31 -05:00
ha_warning ( " parsing [%s:%d]: ignoring duplicated group '%s' in userlist '%s'. \n " ,
file , linenum , args [ 1 ] , userlist - > name ) ;
2010-01-29 11:50:44 -05:00
err_code | = ERR_ALERT ;
goto out ;
}
2014-01-22 12:38:02 -05:00
ag = calloc ( 1 , sizeof ( * ag ) ) ;
if ( ! ag ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d]: out of memory. \n " , file , linenum ) ;
2014-01-22 12:38:02 -05:00
err_code | = ERR_ALERT | ERR_ABORT ;
goto out ;
}
ag - > name = strdup ( args [ 1 ] ) ;
2016-08-22 18:27:42 -04:00
if ( ! ag - > name ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d]: out of memory. \n " , file , linenum ) ;
2014-01-22 12:38:02 -05:00
err_code | = ERR_ALERT | ERR_ABORT ;
2016-08-22 18:27:42 -04:00
free ( ag ) ;
2010-01-29 11:50:44 -05:00
goto out ;
}
cur_arg = 2 ;
while ( * args [ cur_arg ] ) {
CLEANUP: Compare the return value of `XXXcmp()` functions with zero
According to coding-style.txt it is recommended to use:
`strcmp(a, b) == 0` instead of `!strcmp(a, b)`
So let's do this.
The change was performed by running the following (very long) coccinelle patch
on src/:
@@
statement S;
expression E;
expression F;
@@
if (
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
(
S
|
{ ... }
)
@@
statement S;
expression E;
expression F;
@@
if (
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
(
S
|
{ ... }
)
@@
expression E;
expression F;
expression G;
@@
(
G &&
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
@@
expression E;
expression F;
expression G;
@@
(
G ||
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
@@
expression E;
expression F;
expression G;
@@
(
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
&& G
)
@@
expression E;
expression F;
expression G;
@@
(
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
|| G
)
@@
expression E;
expression F;
expression G;
@@
(
G &&
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
@@
expression E;
expression F;
expression G;
@@
(
G ||
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
&& G
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
|| G
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
2021-01-02 16:31:53 -05:00
if ( strcmp ( args [ cur_arg ] , " users " ) = = 0 ) {
2025-02-06 10:21:20 -05:00
if ( ag - > groupusers ) {
ha_alert ( " parsing [%s:%d]: 'users' option already defined in '%s' name '%s'. \n " ,
file , linenum , args [ 0 ] , args [ 1 ] ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
free ( ag - > groupusers ) ;
free ( ag - > name ) ;
free ( ag ) ;
goto out ;
}
2014-01-22 12:38:02 -05:00
ag - > groupusers = strdup ( args [ cur_arg + 1 ] ) ;
2010-01-29 11:50:44 -05:00
cur_arg + = 2 ;
continue ;
} else {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d]: '%s' only supports 'users' option. \n " ,
file , linenum , args [ 0 ] ) ;
2010-01-29 11:50:44 -05:00
err_code | = ERR_ALERT | ERR_FATAL ;
2016-08-22 18:27:42 -04:00
free ( ag - > groupusers ) ;
free ( ag - > name ) ;
free ( ag ) ;
2010-01-29 11:50:44 -05:00
goto out ;
}
}
2014-01-22 12:38:02 -05:00
ag - > next = userlist - > groups ;
userlist - > groups = ag ;
CLEANUP: Compare the return value of `XXXcmp()` functions with zero
According to coding-style.txt it is recommended to use:
`strcmp(a, b) == 0` instead of `!strcmp(a, b)`
So let's do this.
The change was performed by running the following (very long) coccinelle patch
on src/:
@@
statement S;
expression E;
expression F;
@@
if (
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
(
S
|
{ ... }
)
@@
statement S;
expression E;
expression F;
@@
if (
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
(
S
|
{ ... }
)
@@
expression E;
expression F;
expression G;
@@
(
G &&
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
@@
expression E;
expression F;
expression G;
@@
(
G ||
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
@@
expression E;
expression F;
expression G;
@@
(
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
&& G
)
@@
expression E;
expression F;
expression G;
@@
(
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
|| G
)
@@
expression E;
expression F;
expression G;
@@
(
G &&
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
@@
expression E;
expression F;
expression G;
@@
(
G ||
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
&& G
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
|| G
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
2021-01-02 16:31:53 -05:00
} else if ( strcmp ( args [ 0 ] , " user " ) = = 0 ) { /* new user */
2010-01-29 11:50:44 -05:00
struct auth_users * newuser ;
int cur_arg ;
if ( ! * args [ 1 ] ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d]: '%s' expects <name> as arguments. \n " ,
file , linenum , args [ 0 ] ) ;
2010-01-29 11:50:44 -05:00
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
2015-05-28 12:03:51 -04:00
if ( ! userlist )
goto out ;
2010-01-29 11:50:44 -05:00
for ( newuser = userlist - > users ; newuser ; newuser = newuser - > next )
CLEANUP: Compare the return value of `XXXcmp()` functions with zero
According to coding-style.txt it is recommended to use:
`strcmp(a, b) == 0` instead of `!strcmp(a, b)`
So let's do this.
The change was performed by running the following (very long) coccinelle patch
on src/:
@@
statement S;
expression E;
expression F;
@@
if (
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
(
S
|
{ ... }
)
@@
statement S;
expression E;
expression F;
@@
if (
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
(
S
|
{ ... }
)
@@
expression E;
expression F;
expression G;
@@
(
G &&
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
@@
expression E;
expression F;
expression G;
@@
(
G ||
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
@@
expression E;
expression F;
expression G;
@@
(
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
&& G
)
@@
expression E;
expression F;
expression G;
@@
(
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
|| G
)
@@
expression E;
expression F;
expression G;
@@
(
G &&
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
@@
expression E;
expression F;
expression G;
@@
(
G ||
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
&& G
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
|| G
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
2021-01-02 16:31:53 -05:00
if ( strcmp ( newuser - > user , args [ 1 ] ) = = 0 ) {
2017-11-24 10:50:31 -05:00
ha_warning ( " parsing [%s:%d]: ignoring duplicated user '%s' in userlist '%s'. \n " ,
file , linenum , args [ 1 ] , userlist - > name ) ;
2010-01-29 11:50:44 -05:00
err_code | = ERR_ALERT ;
goto out ;
}
2016-04-03 07:48:43 -04:00
newuser = calloc ( 1 , sizeof ( * newuser ) ) ;
2010-01-29 11:50:44 -05:00
if ( ! newuser ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d]: out of memory. \n " , file , linenum ) ;
2010-01-29 11:50:44 -05:00
err_code | = ERR_ALERT | ERR_ABORT ;
goto out ;
}
newuser - > user = strdup ( args [ 1 ] ) ;
newuser - > next = userlist - > users ;
userlist - > users = newuser ;
cur_arg = 2 ;
while ( * args [ cur_arg ] ) {
CLEANUP: Compare the return value of `XXXcmp()` functions with zero
According to coding-style.txt it is recommended to use:
`strcmp(a, b) == 0` instead of `!strcmp(a, b)`
So let's do this.
The change was performed by running the following (very long) coccinelle patch
on src/:
@@
statement S;
expression E;
expression F;
@@
if (
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
(
S
|
{ ... }
)
@@
statement S;
expression E;
expression F;
@@
if (
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
(
S
|
{ ... }
)
@@
expression E;
expression F;
expression G;
@@
(
G &&
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
@@
expression E;
expression F;
expression G;
@@
(
G ||
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
@@
expression E;
expression F;
expression G;
@@
(
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
&& G
)
@@
expression E;
expression F;
expression G;
@@
(
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
|| G
)
@@
expression E;
expression F;
expression G;
@@
(
G &&
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
@@
expression E;
expression F;
expression G;
@@
(
G ||
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
&& G
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
|| G
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
2021-01-02 16:31:53 -05:00
if ( strcmp ( args [ cur_arg ] , " password " ) = = 0 ) {
2019-05-22 13:24:06 -04:00
# ifdef USE_LIBCRYPT
2014-08-29 14:20:02 -04:00
if ( ! crypt ( " " , args [ cur_arg + 1 ] ) ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d]: the encrypted password used for user '%s' is not supported by crypt(3). \n " ,
file , linenum , newuser - > user ) ;
2014-08-29 14:20:02 -04:00
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
# else
2017-11-24 10:50:31 -05:00
ha_warning ( " parsing [%s:%d]: no crypt(3) support compiled, encrypted passwords will not work. \n " ,
file , linenum ) ;
2010-01-29 11:50:44 -05:00
err_code | = ERR_ALERT ;
# endif
newuser - > pass = strdup ( args [ cur_arg + 1 ] ) ;
cur_arg + = 2 ;
continue ;
CLEANUP: Compare the return value of `XXXcmp()` functions with zero
According to coding-style.txt it is recommended to use:
`strcmp(a, b) == 0` instead of `!strcmp(a, b)`
So let's do this.
The change was performed by running the following (very long) coccinelle patch
on src/:
@@
statement S;
expression E;
expression F;
@@
if (
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
(
S
|
{ ... }
)
@@
statement S;
expression E;
expression F;
@@
if (
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
(
S
|
{ ... }
)
@@
expression E;
expression F;
expression G;
@@
(
G &&
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
@@
expression E;
expression F;
expression G;
@@
(
G ||
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
@@
expression E;
expression F;
expression G;
@@
(
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
&& G
)
@@
expression E;
expression F;
expression G;
@@
(
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
|| G
)
@@
expression E;
expression F;
expression G;
@@
(
G &&
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
@@
expression E;
expression F;
expression G;
@@
(
G ||
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
&& G
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
|| G
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
2021-01-02 16:31:53 -05:00
} else if ( strcmp ( args [ cur_arg ] , " insecure-password " ) = = 0 ) {
2010-01-29 11:50:44 -05:00
newuser - > pass = strdup ( args [ cur_arg + 1 ] ) ;
newuser - > flags | = AU_O_INSECURE ;
cur_arg + = 2 ;
continue ;
CLEANUP: Compare the return value of `XXXcmp()` functions with zero
According to coding-style.txt it is recommended to use:
`strcmp(a, b) == 0` instead of `!strcmp(a, b)`
So let's do this.
The change was performed by running the following (very long) coccinelle patch
on src/:
@@
statement S;
expression E;
expression F;
@@
if (
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
(
S
|
{ ... }
)
@@
statement S;
expression E;
expression F;
@@
if (
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
(
S
|
{ ... }
)
@@
expression E;
expression F;
expression G;
@@
(
G &&
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
@@
expression E;
expression F;
expression G;
@@
(
G ||
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
@@
expression E;
expression F;
expression G;
@@
(
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
&& G
)
@@
expression E;
expression F;
expression G;
@@
(
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
|| G
)
@@
expression E;
expression F;
expression G;
@@
(
G &&
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
@@
expression E;
expression F;
expression G;
@@
(
G ||
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
&& G
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
|| G
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
2021-01-02 16:31:53 -05:00
} else if ( strcmp ( args [ cur_arg ] , " groups " ) = = 0 ) {
2014-01-22 12:38:02 -05:00
newuser - > u . groups_names = strdup ( args [ cur_arg + 1 ] ) ;
2010-01-29 11:50:44 -05:00
cur_arg + = 2 ;
continue ;
} else {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d]: '%s' only supports 'password', 'insecure-password' and 'groups' options. \n " ,
file , linenum , args [ 0 ] ) ;
2010-01-29 11:50:44 -05:00
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
}
} else {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d]: unknown keyword '%s' in '%s' section \n " , file , linenum , args [ 0 ] , " users " ) ;
2010-01-29 11:50:44 -05:00
err_code | = ERR_ALERT | ERR_FATAL ;
}
out :
return err_code ;
}
2006-06-25 20:48:02 -04:00
2016-11-04 17:36:15 -04:00
int
cfg_parse_scope ( const char * file , int linenum , char * line )
{
char * beg , * end , * scope = NULL ;
int err_code = 0 ;
const char * err ;
beg = line + 1 ;
end = strchr ( beg , ' ] ' ) ;
/* Detect end of scope declaration */
if ( ! end | | end = = beg ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d] : empty scope name is forbidden. \n " ,
file , linenum ) ;
2016-11-04 17:36:15 -04:00
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
/* Get scope name and check its validity */
scope = my_strndup ( beg , end - beg ) ;
err = invalid_char ( scope ) ;
if ( err ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d] : character '%c' is not permitted in a scope name. \n " ,
file , linenum , * err ) ;
2016-11-04 17:36:15 -04:00
err_code | = ERR_ALERT | ERR_ABORT ;
goto out ;
}
/* Be sure to have a scope declaration alone on its line */
line = end + 1 ;
while ( isspace ( ( unsigned char ) * line ) )
line + + ;
if ( * line & & * line ! = ' # ' & & * line ! = ' \n ' & & * line ! = ' \r ' ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d] : character '%c' is not permitted after scope declaration. \n " ,
file , linenum , * line ) ;
2016-11-04 17:36:15 -04:00
err_code | = ERR_ALERT | ERR_ABORT ;
goto out ;
}
/* We have a valid scope declaration, save it */
free ( cfg_scope ) ;
cfg_scope = scope ;
scope = NULL ;
out :
free ( scope ) ;
return err_code ;
}
2018-01-29 06:05:07 -05:00
int
cfg_parse_track_sc_num ( unsigned int * track_sc_num ,
const char * arg , const char * end , char * * errmsg )
{
const char * p ;
unsigned int num ;
p = arg ;
num = read_uint64 ( & arg , end ) ;
if ( arg ! = end ) {
memprintf ( errmsg , " Wrong track-sc number '%s' " , p ) ;
return - 1 ;
}
2023-01-06 10:09:58 -05:00
if ( num > = global . tune . nb_stk_ctr ) {
if ( ! global . tune . nb_stk_ctr )
memprintf ( errmsg , " %u track-sc number not usable, stick-counters "
" are disabled by tune.stick-counters " , num ) ;
else
memprintf ( errmsg , " %u track-sc number exceeding "
" %d (tune.stick-counters-1) value " , num , global . tune . nb_stk_ctr - 1 ) ;
2018-01-29 06:05:07 -05:00
return - 1 ;
}
* track_sc_num = num ;
return 0 ;
}
2021-03-31 05:43:47 -04:00
/*
* Detect a global section after a non - global one and output a diagnostic
* warning .
*/
2022-11-18 09:46:06 -05:00
static void check_section_position ( char * section_name , const char * file , int linenum )
2021-03-31 05:43:47 -04:00
{
2021-10-16 11:48:15 -04:00
if ( strcmp ( section_name , " global " ) = = 0 ) {
2022-11-18 09:46:06 -05:00
if ( ( global . mode & MODE_DIAG ) & & non_global_section_parsed = = 1 )
2021-03-31 05:43:47 -04:00
_ha_diag_warning ( " parsing [%s:%d] : global section detected after a non-global one, the prevalence of their statements is unspecified \n " , file , linenum ) ;
}
2022-11-18 09:46:06 -05:00
else if ( non_global_section_parsed = = 0 ) {
non_global_section_parsed = 1 ;
2021-03-31 05:43:47 -04:00
}
}
2021-04-27 14:29:11 -04:00
/* apply the current default_path setting for config file <file>, and
* optionally replace the current path to < origin > if not NULL while the
* default - path mode is set to " origin " . Errors are returned into an
* allocated string passed to < err > if it ' s not NULL . Returns 0 on failure
* or non - zero on success .
*/
static int cfg_apply_default_path ( const char * file , const char * origin , char * * err )
{
const char * beg , * end ;
/* make path start at <beg> and end before <end>, and switch it to ""
* if no slash was passed .
*/
beg = file ;
end = strrchr ( beg , ' / ' ) ;
if ( ! end )
end = beg ;
if ( ! * initial_cwd ) {
if ( getcwd ( initial_cwd , sizeof ( initial_cwd ) ) = = NULL ) {
if ( err )
memprintf ( err , " Impossible to retrieve startup directory name: %s " , strerror ( errno ) ) ;
return 0 ;
}
}
else if ( chdir ( initial_cwd ) = = - 1 ) {
if ( err )
memprintf ( err , " Impossible to get back to initial directory '%s': %s " , initial_cwd , strerror ( errno ) ) ;
return 0 ;
}
/* OK now we're (back) to initial_cwd */
switch ( default_path_mode ) {
case DEFAULT_PATH_CURRENT :
/* current_cwd never set, nothing to do */
return 1 ;
case DEFAULT_PATH_ORIGIN :
/* current_cwd set in the config */
if ( origin & &
snprintf ( current_cwd , sizeof ( current_cwd ) , " %s " , origin ) > sizeof ( current_cwd ) ) {
if ( err )
memprintf ( err , " Absolute path too long: '%s' " , origin ) ;
return 0 ;
}
break ;
case DEFAULT_PATH_CONFIG :
if ( end - beg > = sizeof ( current_cwd ) ) {
if ( err )
memprintf ( err , " Config file path too long, cannot use for relative paths: '%s' " , file ) ;
return 0 ;
}
memcpy ( current_cwd , beg , end - beg ) ;
current_cwd [ end - beg ] = 0 ;
break ;
case DEFAULT_PATH_PARENT :
if ( end - beg + 3 > = sizeof ( current_cwd ) ) {
if ( err )
memprintf ( err , " Config file path too long, cannot use for relative paths: '%s' " , file ) ;
return 0 ;
}
memcpy ( current_cwd , beg , end - beg ) ;
if ( end > beg )
memcpy ( current_cwd + ( end - beg ) , " /.. \0 " , 4 ) ;
else
memcpy ( current_cwd + ( end - beg ) , " .. \0 " , 3 ) ;
break ;
}
if ( * current_cwd & & chdir ( current_cwd ) = = - 1 ) {
if ( err )
memprintf ( err , " Impossible to get back to directory '%s': %s " , initial_cwd , strerror ( errno ) ) ;
return 0 ;
}
return 1 ;
}
/* parses a global "default-path" directive. */
static int cfg_parse_global_def_path ( char * * args , int section_type , struct proxy * curpx ,
const struct proxy * defpx , const char * file , int line ,
char * * err )
{
int ret = - 1 ;
/* "current", "config", "parent", "origin <path>" */
if ( strcmp ( args [ 1 ] , " current " ) = = 0 )
default_path_mode = DEFAULT_PATH_CURRENT ;
else if ( strcmp ( args [ 1 ] , " config " ) = = 0 )
default_path_mode = DEFAULT_PATH_CONFIG ;
else if ( strcmp ( args [ 1 ] , " parent " ) = = 0 )
default_path_mode = DEFAULT_PATH_PARENT ;
else if ( strcmp ( args [ 1 ] , " origin " ) = = 0 )
default_path_mode = DEFAULT_PATH_ORIGIN ;
else {
memprintf ( err , " %s default-path mode '%s' for '%s', supported modes include 'current', 'config', 'parent', and 'origin'. " , * args [ 1 ] ? " unsupported " : " missing " , args [ 1 ] , args [ 0 ] ) ;
goto end ;
}
if ( default_path_mode = = DEFAULT_PATH_ORIGIN ) {
if ( ! * args [ 2 ] ) {
memprintf ( err , " '%s %s' expects a directory as an argument. " , args [ 0 ] , args [ 1 ] ) ;
goto end ;
}
if ( ! cfg_apply_default_path ( file , args [ 2 ] , err ) ) {
memprintf ( err , " couldn't set '%s' to origin '%s': %s. " , args [ 0 ] , args [ 2 ] , * err ) ;
goto end ;
}
}
else if ( ! cfg_apply_default_path ( file , NULL , err ) ) {
memprintf ( err , " couldn't set '%s' to '%s': %s. " , args [ 0 ] , args [ 1 ] , * err ) ;
goto end ;
}
/* note that once applied, the path is immediately updated */
ret = 0 ;
end :
return ret ;
}
2024-08-07 12:20:43 -04:00
/* append a copy of string <filename>, ptr to some allocated memory at the at
* the end of the list < li > .
2024-08-07 12:12:48 -04:00
* On failure : return 0 and < err > filled with an error message .
2024-08-07 12:20:43 -04:00
* The caller is responsible for freeing the < err > and < filename > copy
* memory area using free ( ) .
2024-08-07 12:12:48 -04:00
*/
2024-08-07 12:20:43 -04:00
int list_append_cfgfile ( struct list * li , const char * filename , char * * err )
2024-08-07 12:12:48 -04:00
{
2024-08-07 12:20:43 -04:00
struct cfgfile * entry = NULL ;
2024-08-07 12:12:48 -04:00
2024-08-07 12:20:43 -04:00
entry = calloc ( 1 , sizeof ( * entry ) ) ;
if ( ! entry ) {
2024-08-07 12:12:48 -04:00
memprintf ( err , " out of memory " ) ;
2024-08-07 12:20:43 -04:00
goto fail_entry ;
2024-08-07 12:12:48 -04:00
}
2024-08-07 12:20:43 -04:00
entry - > filename = strdup ( filename ) ;
if ( ! entry - > filename ) {
2024-08-07 12:12:48 -04:00
memprintf ( err , " out of memory " ) ;
2024-08-07 12:20:43 -04:00
goto fail_entry_name ;
2024-08-07 12:12:48 -04:00
}
2024-08-07 12:20:43 -04:00
LIST_APPEND ( li , & entry - > list ) ;
2024-08-07 12:12:48 -04:00
return 1 ;
2024-08-07 12:20:43 -04:00
fail_entry_name :
free ( entry - > filename ) ;
fail_entry :
free ( entry ) ;
2024-08-07 12:12:48 -04:00
return 0 ;
}
2024-08-05 04:03:39 -04:00
/* loads the content of the given file in memory. On success, returns the number
* of bytes successfully stored at * cfg_content until EOF . On error , emits
* alerts , performs needed clean - up routines and returns - 1.
*/
ssize_t load_cfg_in_mem ( char * filename , char * * cfg_content )
{
size_t bytes_to_read = LINESIZE ;
size_t chunk_size = 0 ;
size_t read_bytes = 0 ;
2024-08-07 10:31:25 -04:00
struct stat file_stat ;
2024-08-05 04:03:39 -04:00
char * new_area ;
size_t ret = 0 ;
FILE * f ;
2024-08-07 10:31:25 -04:00
/* let's try to obtain the size, if regular file */
if ( stat ( filename , & file_stat ) ! = 0 ) {
ha_alert ( " stat() failed for configuration file %s : %s \n " ,
filename , strerror ( errno ) ) ;
return - 1 ;
}
2024-08-08 10:34:54 -04:00
if ( file_stat . st_size > chunk_size )
bytes_to_read = file_stat . st_size ;
2024-08-07 10:31:25 -04:00
2024-08-05 04:03:39 -04:00
if ( ( f = fopen ( filename , " r " ) ) = = NULL ) {
ha_alert ( " Could not open configuration file %s : %s \n " ,
filename , strerror ( errno ) ) ;
return - 1 ;
}
* cfg_content = NULL ;
while ( 1 ) {
2024-08-20 04:04:03 -04:00
if ( ! file_stat . st_size & & ( ( read_bytes + bytes_to_read ) > MAX_CFG_SIZE ) ) {
ha_alert ( " Loading %s: input is too large %ldMB, limited to %dMB. Exiting. \n " ,
filename , ( long ) ( read_bytes + bytes_to_read ) / ( 1024 * 1024 ) ,
MAX_CFG_SIZE / ( 1024 * 1024 ) ) ;
goto free_mem ;
}
2024-08-05 04:03:39 -04:00
if ( read_bytes + bytes_to_read > chunk_size ) {
chunk_size = ( read_bytes + bytes_to_read ) * 2 ;
new_area = realloc ( * cfg_content , chunk_size ) ;
if ( new_area = = NULL ) {
ha_alert ( " Loading %s: file too long, cannot allocate memory. \n " ,
filename ) ;
goto free_mem ;
}
* cfg_content = new_area ;
}
bytes_to_read = chunk_size - read_bytes ;
ret = fread ( * cfg_content + read_bytes , sizeof ( char ) , bytes_to_read , f ) ;
read_bytes + = ret ;
if ( ! ret | | feof ( f ) | | ferror ( f ) )
break ;
}
fclose ( f ) ;
return read_bytes ;
free_mem :
ha_free ( cfg_content ) ;
fclose ( f ) ;
return - 1 ;
}
2006-06-25 20:48:02 -04:00
/*
2024-08-07 10:53:50 -04:00
* This function parses the configuration file given in the argument .
* Returns the error code , 0 if OK , - 1 if we are run out of memory ,
2021-04-27 12:30:28 -04:00
* or any combination of :
2009-07-20 03:30:05 -04:00
* - ERR_ABORT : must abort ASAP
* - ERR_FATAL : we can continue parsing but not start the service
* - ERR_WARN : a warning has been emitted
* - ERR_ALERT : an alert has been emitted
* Only the two first ones can stop processing , the two others are just
* indicators .
2006-06-25 20:48:02 -04:00
*/
2024-08-05 04:04:03 -04:00
int parse_cfg ( const struct cfgfile * cfg )
2006-06-25 20:48:02 -04:00
{
2021-04-27 12:30:28 -04:00
char * thisline = NULL ;
2015-05-12 08:25:37 -04:00
int linesize = LINESIZE ;
2006-06-25 20:48:02 -04:00
int linenum = 0 ;
2009-07-20 03:30:05 -04:00
int err_code = 0 ;
2017-10-16 05:06:50 -04:00
struct cfg_section * cs = NULL , * pcs = NULL ;
2014-03-18 08:54:18 -04:00
struct cfg_section * ics ;
2015-05-12 08:25:37 -04:00
int readbytes = 0 ;
BUG/MEDIUM: cfgparse: use parse_line() to expand/unquote/unescape config lines
Issue 22689 in oss-fuzz shows that specially crafted config files can take
a long time to process. This happens when variable expansion, backslash
escaping or unquoting causes calls to memmove() and possibly to realloc()
resulting in O(N^2) complexity with N following the line size.
By using parse_line() we now have a safe parser that remains in O(N)
regardless of the type of operation. Error reporting changed a little bit
since the errors are not reported anymore from the deepest parsing level.
As such we now report the beginning of the error. One benefit is that for
many invalid character sequences, the original line is shown and the first
bad char or sequence is designated with a caret ('^'), which tends to be
visually easier to spot, for example:
[ALERT] 167/170507 (14633) : parsing [mini5.cfg:19]: unmatched brace in environment variable name below:
"${VAR"}
^
or:
[ALERT] 167/170645 (14640) : parsing [mini5.cfg:18]: unmatched quote below:
timeout client 10s'
^
In case the target buffer is too short for the new line, the output buffer
is grown in 1kB chunks and kept till the end, so that it should not happen
too often.
Before this patch a test like below involving a 4 MB long line would take
138s to process, 98% of which were spent in __memmove_avx_unaligned_erms(),
and now it takes only 65 milliseconds:
$ perl -e 'print "\"\$A\""x1000000,"\n"' | ./haproxy -c -f /dev/stdin 2>/dev/null
This may be backported to stable versions after a long period of
observation to be sure nothing broke. It relies on patch "MINOR: tools:
add a new configurable line parse, parse_line()".
2020-06-16 10:32:59 -04:00
char * outline = NULL ;
size_t outlen = 0 ;
size_t outlinesize = 0 ;
2020-06-16 11:14:33 -04:00
int fatal = 0 ;
2020-06-22 16:57:45 -04:00
int missing_lf = - 1 ;
MINOR: cfgparse: implement a simple if/elif/else/endif macro block handler
Very often, especially since reg-tests, it would be desirable to be able
to conditionally comment out a config block, such as removing an SSL
binding when SSL is disabled, or enabling HTX only for certain versions,
etc.
This patch introduces a very simple nested block management which takes
".if", ".elif", ".else" and ".endif" directives to take or ignore a block.
For now the conditions are limited to empty string or "0" for false versus
a non-nul integer for true, which already suffices to test environment
variables. Still, it needs to be a bit more advanced with defines, versions
etc.
A set of ".notice", ".warning" and ".alert" statements are provided to
emit messages, often in order to provide advice about how to fix certain
conditions.
2021-02-12 11:59:10 -05:00
int nested_cond_lvl = 0 ;
enum nested_cond_state nested_conds [ MAXNESTEDCONDS ] ;
2021-04-27 14:29:11 -04:00
char * errmsg = NULL ;
2024-08-07 10:53:50 -04:00
const char * cur_position = cfg - > content ;
char * file = cfg - > filename ;
2015-05-12 08:25:37 -04:00
2021-05-06 04:04:45 -04:00
global . cfg_curr_line = 0 ;
global . cfg_curr_file = file ;
2015-05-12 08:25:37 -04:00
if ( ( thisline = malloc ( sizeof ( * thisline ) * linesize ) ) = = NULL ) {
2021-04-27 12:30:28 -04:00
ha_alert ( " Out of memory trying to allocate a buffer for a configuration line. \n " ) ;
err_code = - 1 ;
goto err ;
2015-05-12 08:25:37 -04:00
}
2014-03-18 08:54:18 -04:00
2021-04-27 14:29:11 -04:00
/* change to the new dir if required */
if ( ! cfg_apply_default_path ( file , NULL , & errmsg ) ) {
ha_alert ( " parsing [%s:%d]: failed to apply default-path: %s. \n " , file , linenum , errmsg ) ;
free ( errmsg ) ;
err_code = - 1 ;
goto err ;
}
2015-05-12 08:27:13 -04:00
next_line :
2024-08-07 10:53:50 -04:00
while ( fgets_from_mem ( thisline + readbytes , linesize - readbytes ,
& cur_position , cfg - > content + cfg - > size ) ) {
2009-06-14 05:39:52 -04:00
int arg , kwm = KWM_STD ;
2007-10-31 19:33:12 -04:00
char * end ;
char * args [ MAX_LINE_ARGS + 1 ] ;
char * line = thisline ;
2020-06-22 16:57:45 -04:00
if ( missing_lf ! = - 1 ) {
2020-08-18 16:00:04 -04:00
ha_alert ( " parsing [%s:%d]: Stray NUL character at position %d. \n " ,
file , linenum , ( missing_lf + 1 ) ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
2020-06-22 16:57:45 -04:00
missing_lf = - 1 ;
2020-08-18 16:00:04 -04:00
break ;
2020-06-22 16:57:45 -04:00
}
2006-06-25 20:48:02 -04:00
linenum + + ;
2021-05-06 04:04:45 -04:00
global . cfg_curr_line = linenum ;
2006-06-25 20:48:02 -04:00
2020-06-16 11:14:33 -04:00
if ( fatal > = 50 ) {
ha_alert ( " parsing [%s:%d]: too many fatal errors (%d), stopping now. \n " , file , linenum , fatal ) ;
break ;
}
2006-06-25 20:48:02 -04:00
end = line + strlen ( line ) ;
2015-05-12 08:25:37 -04:00
if ( end - line = = linesize - 1 & & * ( end - 1 ) ! = ' \n ' ) {
2007-10-31 19:33:12 -04:00
/* Check if we reached the limit and the last char is not \n.
* Watch out for the last line without the terminating ' \n ' !
*/
2015-05-12 08:25:37 -04:00
char * newline ;
int newlinesize = linesize * 2 ;
newline = realloc ( thisline , sizeof ( * thisline ) * newlinesize ) ;
if ( newline = = NULL ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d]: line too long, cannot allocate memory. \n " ,
file , linenum ) ;
2015-05-12 08:25:37 -04:00
err_code | = ERR_ALERT | ERR_FATAL ;
2020-06-16 11:14:33 -04:00
fatal + + ;
2020-06-25 03:37:54 -04:00
linenum - - ;
2015-05-12 08:25:37 -04:00
continue ;
}
readbytes = linesize - 1 ;
linesize = newlinesize ;
thisline = newline ;
2020-06-25 03:37:54 -04:00
linenum - - ;
2015-05-12 08:25:37 -04:00
continue ;
2007-10-31 19:33:12 -04:00
}
2015-05-12 08:25:37 -04:00
readbytes = 0 ;
2020-06-26 11:24:54 -04:00
if ( end > line & & * ( end - 1 ) = = ' \n ' ) {
2020-06-22 16:57:44 -04:00
/* kill trailing LF */
* ( end - 1 ) = 0 ;
}
2020-06-22 16:57:45 -04:00
else {
/* mark this line as truncated */
missing_lf = end - line ;
}
BUG/MEDIUM: cfgparse: use parse_line() to expand/unquote/unescape config lines
Issue 22689 in oss-fuzz shows that specially crafted config files can take
a long time to process. This happens when variable expansion, backslash
escaping or unquoting causes calls to memmove() and possibly to realloc()
resulting in O(N^2) complexity with N following the line size.
By using parse_line() we now have a safe parser that remains in O(N)
regardless of the type of operation. Error reporting changed a little bit
since the errors are not reported anymore from the deepest parsing level.
As such we now report the beginning of the error. One benefit is that for
many invalid character sequences, the original line is shown and the first
bad char or sequence is designated with a caret ('^'), which tends to be
visually easier to spot, for example:
[ALERT] 167/170507 (14633) : parsing [mini5.cfg:19]: unmatched brace in environment variable name below:
"${VAR"}
^
or:
[ALERT] 167/170645 (14640) : parsing [mini5.cfg:18]: unmatched quote below:
timeout client 10s'
^
In case the target buffer is too short for the new line, the output buffer
is grown in 1kB chunks and kept till the end, so that it should not happen
too often.
Before this patch a test like below involving a 4 MB long line would take
138s to process, 98% of which were spent in __memmove_avx_unaligned_erms(),
and now it takes only 65 milliseconds:
$ perl -e 'print "\"\$A\""x1000000,"\n"' | ./haproxy -c -f /dev/stdin 2>/dev/null
This may be backported to stable versions after a long period of
observation to be sure nothing broke. It relies on patch "MINOR: tools:
add a new configurable line parse, parse_line()".
2020-06-16 10:32:59 -04:00
2006-06-25 20:48:02 -04:00
/* skip leading spaces */
2007-06-17 15:51:38 -04:00
while ( isspace ( ( unsigned char ) * line ) )
2006-06-25 20:48:02 -04:00
line + + ;
2015-05-05 11:37:14 -04:00
2018-11-15 17:04:19 -05:00
if ( * line = = ' [ ' ) { /* This is the beginning if a scope */
2016-11-04 17:36:15 -04:00
err_code | = cfg_parse_scope ( file , linenum , line ) ;
goto next_line ;
}
BUG/MEDIUM: cfgparse: use parse_line() to expand/unquote/unescape config lines
Issue 22689 in oss-fuzz shows that specially crafted config files can take
a long time to process. This happens when variable expansion, backslash
escaping or unquoting causes calls to memmove() and possibly to realloc()
resulting in O(N^2) complexity with N following the line size.
By using parse_line() we now have a safe parser that remains in O(N)
regardless of the type of operation. Error reporting changed a little bit
since the errors are not reported anymore from the deepest parsing level.
As such we now report the beginning of the error. One benefit is that for
many invalid character sequences, the original line is shown and the first
bad char or sequence is designated with a caret ('^'), which tends to be
visually easier to spot, for example:
[ALERT] 167/170507 (14633) : parsing [mini5.cfg:19]: unmatched brace in environment variable name below:
"${VAR"}
^
or:
[ALERT] 167/170645 (14640) : parsing [mini5.cfg:18]: unmatched quote below:
timeout client 10s'
^
In case the target buffer is too short for the new line, the output buffer
is grown in 1kB chunks and kept till the end, so that it should not happen
too often.
Before this patch a test like below involving a 4 MB long line would take
138s to process, 98% of which were spent in __memmove_avx_unaligned_erms(),
and now it takes only 65 milliseconds:
$ perl -e 'print "\"\$A\""x1000000,"\n"' | ./haproxy -c -f /dev/stdin 2>/dev/null
This may be backported to stable versions after a long period of
observation to be sure nothing broke. It relies on patch "MINOR: tools:
add a new configurable line parse, parse_line()".
2020-06-16 10:32:59 -04:00
while ( 1 ) {
uint32_t err ;
2025-05-09 03:55:39 -04:00
const char * errptr = NULL ;
2025-05-02 09:47:41 -04:00
int check_arg ;
2006-06-25 20:48:02 -04:00
2021-06-05 18:50:20 -04:00
arg = sizeof ( args ) / sizeof ( * args ) ;
BUG/MEDIUM: cfgparse: use parse_line() to expand/unquote/unescape config lines
Issue 22689 in oss-fuzz shows that specially crafted config files can take
a long time to process. This happens when variable expansion, backslash
escaping or unquoting causes calls to memmove() and possibly to realloc()
resulting in O(N^2) complexity with N following the line size.
By using parse_line() we now have a safe parser that remains in O(N)
regardless of the type of operation. Error reporting changed a little bit
since the errors are not reported anymore from the deepest parsing level.
As such we now report the beginning of the error. One benefit is that for
many invalid character sequences, the original line is shown and the first
bad char or sequence is designated with a caret ('^'), which tends to be
visually easier to spot, for example:
[ALERT] 167/170507 (14633) : parsing [mini5.cfg:19]: unmatched brace in environment variable name below:
"${VAR"}
^
or:
[ALERT] 167/170645 (14640) : parsing [mini5.cfg:18]: unmatched quote below:
timeout client 10s'
^
In case the target buffer is too short for the new line, the output buffer
is grown in 1kB chunks and kept till the end, so that it should not happen
too often.
Before this patch a test like below involving a 4 MB long line would take
138s to process, 98% of which were spent in __memmove_avx_unaligned_erms(),
and now it takes only 65 milliseconds:
$ perl -e 'print "\"\$A\""x1000000,"\n"' | ./haproxy -c -f /dev/stdin 2>/dev/null
This may be backported to stable versions after a long period of
observation to be sure nothing broke. It relies on patch "MINOR: tools:
add a new configurable line parse, parse_line()".
2020-06-16 10:32:59 -04:00
outlen = outlinesize ;
err = parse_line ( line , outline , & outlen , args , & arg ,
PARSE_OPT_ENV | PARSE_OPT_DQUOTE | PARSE_OPT_SQUOTE |
2020-10-01 08:32:35 -04:00
PARSE_OPT_BKSLASH | PARSE_OPT_SHARP | PARSE_OPT_WORD_EXPAND ,
& errptr ) ;
BUG/MEDIUM: cfgparse: use parse_line() to expand/unquote/unescape config lines
Issue 22689 in oss-fuzz shows that specially crafted config files can take
a long time to process. This happens when variable expansion, backslash
escaping or unquoting causes calls to memmove() and possibly to realloc()
resulting in O(N^2) complexity with N following the line size.
By using parse_line() we now have a safe parser that remains in O(N)
regardless of the type of operation. Error reporting changed a little bit
since the errors are not reported anymore from the deepest parsing level.
As such we now report the beginning of the error. One benefit is that for
many invalid character sequences, the original line is shown and the first
bad char or sequence is designated with a caret ('^'), which tends to be
visually easier to spot, for example:
[ALERT] 167/170507 (14633) : parsing [mini5.cfg:19]: unmatched brace in environment variable name below:
"${VAR"}
^
or:
[ALERT] 167/170645 (14640) : parsing [mini5.cfg:18]: unmatched quote below:
timeout client 10s'
^
In case the target buffer is too short for the new line, the output buffer
is grown in 1kB chunks and kept till the end, so that it should not happen
too often.
Before this patch a test like below involving a 4 MB long line would take
138s to process, 98% of which were spent in __memmove_avx_unaligned_erms(),
and now it takes only 65 milliseconds:
$ perl -e 'print "\"\$A\""x1000000,"\n"' | ./haproxy -c -f /dev/stdin 2>/dev/null
This may be backported to stable versions after a long period of
observation to be sure nothing broke. It relies on patch "MINOR: tools:
add a new configurable line parse, parse_line()".
2020-06-16 10:32:59 -04:00
if ( err & PARSE_ERR_QUOTE ) {
2020-06-25 03:15:40 -04:00
size_t newpos = sanitize_for_printing ( line , errptr - line , 80 ) ;
ha_alert ( " parsing [%s:%d]: unmatched quote at position %d: \n "
" %s \n %*s \n " , file , linenum , ( int ) ( errptr - thisline + 1 ) , line , ( int ) ( newpos + 1 ) , " ^ " ) ;
BUG/MEDIUM: cfgparse: use parse_line() to expand/unquote/unescape config lines
Issue 22689 in oss-fuzz shows that specially crafted config files can take
a long time to process. This happens when variable expansion, backslash
escaping or unquoting causes calls to memmove() and possibly to realloc()
resulting in O(N^2) complexity with N following the line size.
By using parse_line() we now have a safe parser that remains in O(N)
regardless of the type of operation. Error reporting changed a little bit
since the errors are not reported anymore from the deepest parsing level.
As such we now report the beginning of the error. One benefit is that for
many invalid character sequences, the original line is shown and the first
bad char or sequence is designated with a caret ('^'), which tends to be
visually easier to spot, for example:
[ALERT] 167/170507 (14633) : parsing [mini5.cfg:19]: unmatched brace in environment variable name below:
"${VAR"}
^
or:
[ALERT] 167/170645 (14640) : parsing [mini5.cfg:18]: unmatched quote below:
timeout client 10s'
^
In case the target buffer is too short for the new line, the output buffer
is grown in 1kB chunks and kept till the end, so that it should not happen
too often.
Before this patch a test like below involving a 4 MB long line would take
138s to process, 98% of which were spent in __memmove_avx_unaligned_erms(),
and now it takes only 65 milliseconds:
$ perl -e 'print "\"\$A\""x1000000,"\n"' | ./haproxy -c -f /dev/stdin 2>/dev/null
This may be backported to stable versions after a long period of
observation to be sure nothing broke. It relies on patch "MINOR: tools:
add a new configurable line parse, parse_line()".
2020-06-16 10:32:59 -04:00
err_code | = ERR_ALERT | ERR_FATAL ;
2020-06-16 11:14:33 -04:00
fatal + + ;
BUG/MEDIUM: cfgparse: use parse_line() to expand/unquote/unescape config lines
Issue 22689 in oss-fuzz shows that specially crafted config files can take
a long time to process. This happens when variable expansion, backslash
escaping or unquoting causes calls to memmove() and possibly to realloc()
resulting in O(N^2) complexity with N following the line size.
By using parse_line() we now have a safe parser that remains in O(N)
regardless of the type of operation. Error reporting changed a little bit
since the errors are not reported anymore from the deepest parsing level.
As such we now report the beginning of the error. One benefit is that for
many invalid character sequences, the original line is shown and the first
bad char or sequence is designated with a caret ('^'), which tends to be
visually easier to spot, for example:
[ALERT] 167/170507 (14633) : parsing [mini5.cfg:19]: unmatched brace in environment variable name below:
"${VAR"}
^
or:
[ALERT] 167/170645 (14640) : parsing [mini5.cfg:18]: unmatched quote below:
timeout client 10s'
^
In case the target buffer is too short for the new line, the output buffer
is grown in 1kB chunks and kept till the end, so that it should not happen
too often.
Before this patch a test like below involving a 4 MB long line would take
138s to process, 98% of which were spent in __memmove_avx_unaligned_erms(),
and now it takes only 65 milliseconds:
$ perl -e 'print "\"\$A\""x1000000,"\n"' | ./haproxy -c -f /dev/stdin 2>/dev/null
This may be backported to stable versions after a long period of
observation to be sure nothing broke. It relies on patch "MINOR: tools:
add a new configurable line parse, parse_line()".
2020-06-16 10:32:59 -04:00
goto next_line ;
2015-05-05 11:37:14 -04:00
}
BUG/MEDIUM: cfgparse: use parse_line() to expand/unquote/unescape config lines
Issue 22689 in oss-fuzz shows that specially crafted config files can take
a long time to process. This happens when variable expansion, backslash
escaping or unquoting causes calls to memmove() and possibly to realloc()
resulting in O(N^2) complexity with N following the line size.
By using parse_line() we now have a safe parser that remains in O(N)
regardless of the type of operation. Error reporting changed a little bit
since the errors are not reported anymore from the deepest parsing level.
As such we now report the beginning of the error. One benefit is that for
many invalid character sequences, the original line is shown and the first
bad char or sequence is designated with a caret ('^'), which tends to be
visually easier to spot, for example:
[ALERT] 167/170507 (14633) : parsing [mini5.cfg:19]: unmatched brace in environment variable name below:
"${VAR"}
^
or:
[ALERT] 167/170645 (14640) : parsing [mini5.cfg:18]: unmatched quote below:
timeout client 10s'
^
In case the target buffer is too short for the new line, the output buffer
is grown in 1kB chunks and kept till the end, so that it should not happen
too often.
Before this patch a test like below involving a 4 MB long line would take
138s to process, 98% of which were spent in __memmove_avx_unaligned_erms(),
and now it takes only 65 milliseconds:
$ perl -e 'print "\"\$A\""x1000000,"\n"' | ./haproxy -c -f /dev/stdin 2>/dev/null
This may be backported to stable versions after a long period of
observation to be sure nothing broke. It relies on patch "MINOR: tools:
add a new configurable line parse, parse_line()".
2020-06-16 10:32:59 -04:00
if ( err & PARSE_ERR_BRACE ) {
2020-06-25 03:15:40 -04:00
size_t newpos = sanitize_for_printing ( line , errptr - line , 80 ) ;
ha_alert ( " parsing [%s:%d]: unmatched brace in environment variable name at position %d: \n "
" %s \n %*s \n " , file , linenum , ( int ) ( errptr - thisline + 1 ) , line , ( int ) ( newpos + 1 ) , " ^ " ) ;
BUG/MEDIUM: cfgparse: use parse_line() to expand/unquote/unescape config lines
Issue 22689 in oss-fuzz shows that specially crafted config files can take
a long time to process. This happens when variable expansion, backslash
escaping or unquoting causes calls to memmove() and possibly to realloc()
resulting in O(N^2) complexity with N following the line size.
By using parse_line() we now have a safe parser that remains in O(N)
regardless of the type of operation. Error reporting changed a little bit
since the errors are not reported anymore from the deepest parsing level.
As such we now report the beginning of the error. One benefit is that for
many invalid character sequences, the original line is shown and the first
bad char or sequence is designated with a caret ('^'), which tends to be
visually easier to spot, for example:
[ALERT] 167/170507 (14633) : parsing [mini5.cfg:19]: unmatched brace in environment variable name below:
"${VAR"}
^
or:
[ALERT] 167/170645 (14640) : parsing [mini5.cfg:18]: unmatched quote below:
timeout client 10s'
^
In case the target buffer is too short for the new line, the output buffer
is grown in 1kB chunks and kept till the end, so that it should not happen
too often.
Before this patch a test like below involving a 4 MB long line would take
138s to process, 98% of which were spent in __memmove_avx_unaligned_erms(),
and now it takes only 65 milliseconds:
$ perl -e 'print "\"\$A\""x1000000,"\n"' | ./haproxy -c -f /dev/stdin 2>/dev/null
This may be backported to stable versions after a long period of
observation to be sure nothing broke. It relies on patch "MINOR: tools:
add a new configurable line parse, parse_line()".
2020-06-16 10:32:59 -04:00
err_code | = ERR_ALERT | ERR_FATAL ;
2020-06-16 11:14:33 -04:00
fatal + + ;
BUG/MEDIUM: cfgparse: use parse_line() to expand/unquote/unescape config lines
Issue 22689 in oss-fuzz shows that specially crafted config files can take
a long time to process. This happens when variable expansion, backslash
escaping or unquoting causes calls to memmove() and possibly to realloc()
resulting in O(N^2) complexity with N following the line size.
By using parse_line() we now have a safe parser that remains in O(N)
regardless of the type of operation. Error reporting changed a little bit
since the errors are not reported anymore from the deepest parsing level.
As such we now report the beginning of the error. One benefit is that for
many invalid character sequences, the original line is shown and the first
bad char or sequence is designated with a caret ('^'), which tends to be
visually easier to spot, for example:
[ALERT] 167/170507 (14633) : parsing [mini5.cfg:19]: unmatched brace in environment variable name below:
"${VAR"}
^
or:
[ALERT] 167/170645 (14640) : parsing [mini5.cfg:18]: unmatched quote below:
timeout client 10s'
^
In case the target buffer is too short for the new line, the output buffer
is grown in 1kB chunks and kept till the end, so that it should not happen
too often.
Before this patch a test like below involving a 4 MB long line would take
138s to process, 98% of which were spent in __memmove_avx_unaligned_erms(),
and now it takes only 65 milliseconds:
$ perl -e 'print "\"\$A\""x1000000,"\n"' | ./haproxy -c -f /dev/stdin 2>/dev/null
This may be backported to stable versions after a long period of
observation to be sure nothing broke. It relies on patch "MINOR: tools:
add a new configurable line parse, parse_line()".
2020-06-16 10:32:59 -04:00
goto next_line ;
2015-05-05 11:37:14 -04:00
}
BUG/MEDIUM: cfgparse: use parse_line() to expand/unquote/unescape config lines
Issue 22689 in oss-fuzz shows that specially crafted config files can take
a long time to process. This happens when variable expansion, backslash
escaping or unquoting causes calls to memmove() and possibly to realloc()
resulting in O(N^2) complexity with N following the line size.
By using parse_line() we now have a safe parser that remains in O(N)
regardless of the type of operation. Error reporting changed a little bit
since the errors are not reported anymore from the deepest parsing level.
As such we now report the beginning of the error. One benefit is that for
many invalid character sequences, the original line is shown and the first
bad char or sequence is designated with a caret ('^'), which tends to be
visually easier to spot, for example:
[ALERT] 167/170507 (14633) : parsing [mini5.cfg:19]: unmatched brace in environment variable name below:
"${VAR"}
^
or:
[ALERT] 167/170645 (14640) : parsing [mini5.cfg:18]: unmatched quote below:
timeout client 10s'
^
In case the target buffer is too short for the new line, the output buffer
is grown in 1kB chunks and kept till the end, so that it should not happen
too often.
Before this patch a test like below involving a 4 MB long line would take
138s to process, 98% of which were spent in __memmove_avx_unaligned_erms(),
and now it takes only 65 milliseconds:
$ perl -e 'print "\"\$A\""x1000000,"\n"' | ./haproxy -c -f /dev/stdin 2>/dev/null
This may be backported to stable versions after a long period of
observation to be sure nothing broke. It relies on patch "MINOR: tools:
add a new configurable line parse, parse_line()".
2020-06-16 10:32:59 -04:00
if ( err & PARSE_ERR_VARNAME ) {
2020-06-25 03:15:40 -04:00
size_t newpos = sanitize_for_printing ( line , errptr - line , 80 ) ;
ha_alert ( " parsing [%s:%d]: forbidden first char in environment variable name at position %d: \n "
" %s \n %*s \n " , file , linenum , ( int ) ( errptr - thisline + 1 ) , line , ( int ) ( newpos + 1 ) , " ^ " ) ;
BUG/MEDIUM: cfgparse: use parse_line() to expand/unquote/unescape config lines
Issue 22689 in oss-fuzz shows that specially crafted config files can take
a long time to process. This happens when variable expansion, backslash
escaping or unquoting causes calls to memmove() and possibly to realloc()
resulting in O(N^2) complexity with N following the line size.
By using parse_line() we now have a safe parser that remains in O(N)
regardless of the type of operation. Error reporting changed a little bit
since the errors are not reported anymore from the deepest parsing level.
As such we now report the beginning of the error. One benefit is that for
many invalid character sequences, the original line is shown and the first
bad char or sequence is designated with a caret ('^'), which tends to be
visually easier to spot, for example:
[ALERT] 167/170507 (14633) : parsing [mini5.cfg:19]: unmatched brace in environment variable name below:
"${VAR"}
^
or:
[ALERT] 167/170645 (14640) : parsing [mini5.cfg:18]: unmatched quote below:
timeout client 10s'
^
In case the target buffer is too short for the new line, the output buffer
is grown in 1kB chunks and kept till the end, so that it should not happen
too often.
Before this patch a test like below involving a 4 MB long line would take
138s to process, 98% of which were spent in __memmove_avx_unaligned_erms(),
and now it takes only 65 milliseconds:
$ perl -e 'print "\"\$A\""x1000000,"\n"' | ./haproxy -c -f /dev/stdin 2>/dev/null
This may be backported to stable versions after a long period of
observation to be sure nothing broke. It relies on patch "MINOR: tools:
add a new configurable line parse, parse_line()".
2020-06-16 10:32:59 -04:00
err_code | = ERR_ALERT | ERR_FATAL ;
2020-06-16 11:14:33 -04:00
fatal + + ;
BUG/MEDIUM: cfgparse: use parse_line() to expand/unquote/unescape config lines
Issue 22689 in oss-fuzz shows that specially crafted config files can take
a long time to process. This happens when variable expansion, backslash
escaping or unquoting causes calls to memmove() and possibly to realloc()
resulting in O(N^2) complexity with N following the line size.
By using parse_line() we now have a safe parser that remains in O(N)
regardless of the type of operation. Error reporting changed a little bit
since the errors are not reported anymore from the deepest parsing level.
As such we now report the beginning of the error. One benefit is that for
many invalid character sequences, the original line is shown and the first
bad char or sequence is designated with a caret ('^'), which tends to be
visually easier to spot, for example:
[ALERT] 167/170507 (14633) : parsing [mini5.cfg:19]: unmatched brace in environment variable name below:
"${VAR"}
^
or:
[ALERT] 167/170645 (14640) : parsing [mini5.cfg:18]: unmatched quote below:
timeout client 10s'
^
In case the target buffer is too short for the new line, the output buffer
is grown in 1kB chunks and kept till the end, so that it should not happen
too often.
Before this patch a test like below involving a 4 MB long line would take
138s to process, 98% of which were spent in __memmove_avx_unaligned_erms(),
and now it takes only 65 milliseconds:
$ perl -e 'print "\"\$A\""x1000000,"\n"' | ./haproxy -c -f /dev/stdin 2>/dev/null
This may be backported to stable versions after a long period of
observation to be sure nothing broke. It relies on patch "MINOR: tools:
add a new configurable line parse, parse_line()".
2020-06-16 10:32:59 -04:00
goto next_line ;
2006-06-25 20:48:02 -04:00
}
BUG/MEDIUM: cfgparse: use parse_line() to expand/unquote/unescape config lines
Issue 22689 in oss-fuzz shows that specially crafted config files can take
a long time to process. This happens when variable expansion, backslash
escaping or unquoting causes calls to memmove() and possibly to realloc()
resulting in O(N^2) complexity with N following the line size.
By using parse_line() we now have a safe parser that remains in O(N)
regardless of the type of operation. Error reporting changed a little bit
since the errors are not reported anymore from the deepest parsing level.
As such we now report the beginning of the error. One benefit is that for
many invalid character sequences, the original line is shown and the first
bad char or sequence is designated with a caret ('^'), which tends to be
visually easier to spot, for example:
[ALERT] 167/170507 (14633) : parsing [mini5.cfg:19]: unmatched brace in environment variable name below:
"${VAR"}
^
or:
[ALERT] 167/170645 (14640) : parsing [mini5.cfg:18]: unmatched quote below:
timeout client 10s'
^
In case the target buffer is too short for the new line, the output buffer
is grown in 1kB chunks and kept till the end, so that it should not happen
too often.
Before this patch a test like below involving a 4 MB long line would take
138s to process, 98% of which were spent in __memmove_avx_unaligned_erms(),
and now it takes only 65 milliseconds:
$ perl -e 'print "\"\$A\""x1000000,"\n"' | ./haproxy -c -f /dev/stdin 2>/dev/null
This may be backported to stable versions after a long period of
observation to be sure nothing broke. It relies on patch "MINOR: tools:
add a new configurable line parse, parse_line()".
2020-06-16 10:32:59 -04:00
if ( err & PARSE_ERR_HEX ) {
2020-06-25 03:15:40 -04:00
size_t newpos = sanitize_for_printing ( line , errptr - line , 80 ) ;
ha_alert ( " parsing [%s:%d]: truncated or invalid hexadecimal sequence at position %d: \n "
" %s \n %*s \n " , file , linenum , ( int ) ( errptr - thisline + 1 ) , line , ( int ) ( newpos + 1 ) , " ^ " ) ;
BUG/MEDIUM: cfgparse: use parse_line() to expand/unquote/unescape config lines
Issue 22689 in oss-fuzz shows that specially crafted config files can take
a long time to process. This happens when variable expansion, backslash
escaping or unquoting causes calls to memmove() and possibly to realloc()
resulting in O(N^2) complexity with N following the line size.
By using parse_line() we now have a safe parser that remains in O(N)
regardless of the type of operation. Error reporting changed a little bit
since the errors are not reported anymore from the deepest parsing level.
As such we now report the beginning of the error. One benefit is that for
many invalid character sequences, the original line is shown and the first
bad char or sequence is designated with a caret ('^'), which tends to be
visually easier to spot, for example:
[ALERT] 167/170507 (14633) : parsing [mini5.cfg:19]: unmatched brace in environment variable name below:
"${VAR"}
^
or:
[ALERT] 167/170645 (14640) : parsing [mini5.cfg:18]: unmatched quote below:
timeout client 10s'
^
In case the target buffer is too short for the new line, the output buffer
is grown in 1kB chunks and kept till the end, so that it should not happen
too often.
Before this patch a test like below involving a 4 MB long line would take
138s to process, 98% of which were spent in __memmove_avx_unaligned_erms(),
and now it takes only 65 milliseconds:
$ perl -e 'print "\"\$A\""x1000000,"\n"' | ./haproxy -c -f /dev/stdin 2>/dev/null
This may be backported to stable versions after a long period of
observation to be sure nothing broke. It relies on patch "MINOR: tools:
add a new configurable line parse, parse_line()".
2020-06-16 10:32:59 -04:00
err_code | = ERR_ALERT | ERR_FATAL ;
2020-06-16 12:14:21 -04:00
fatal + + ;
BUG/MEDIUM: cfgparse: use parse_line() to expand/unquote/unescape config lines
Issue 22689 in oss-fuzz shows that specially crafted config files can take
a long time to process. This happens when variable expansion, backslash
escaping or unquoting causes calls to memmove() and possibly to realloc()
resulting in O(N^2) complexity with N following the line size.
By using parse_line() we now have a safe parser that remains in O(N)
regardless of the type of operation. Error reporting changed a little bit
since the errors are not reported anymore from the deepest parsing level.
As such we now report the beginning of the error. One benefit is that for
many invalid character sequences, the original line is shown and the first
bad char or sequence is designated with a caret ('^'), which tends to be
visually easier to spot, for example:
[ALERT] 167/170507 (14633) : parsing [mini5.cfg:19]: unmatched brace in environment variable name below:
"${VAR"}
^
or:
[ALERT] 167/170645 (14640) : parsing [mini5.cfg:18]: unmatched quote below:
timeout client 10s'
^
In case the target buffer is too short for the new line, the output buffer
is grown in 1kB chunks and kept till the end, so that it should not happen
too often.
Before this patch a test like below involving a 4 MB long line would take
138s to process, 98% of which were spent in __memmove_avx_unaligned_erms(),
and now it takes only 65 milliseconds:
$ perl -e 'print "\"\$A\""x1000000,"\n"' | ./haproxy -c -f /dev/stdin 2>/dev/null
This may be backported to stable versions after a long period of
observation to be sure nothing broke. It relies on patch "MINOR: tools:
add a new configurable line parse, parse_line()".
2020-06-16 10:32:59 -04:00
goto next_line ;
2006-06-25 20:48:02 -04:00
}
BUG/MEDIUM: cfgparse: use parse_line() to expand/unquote/unescape config lines
Issue 22689 in oss-fuzz shows that specially crafted config files can take
a long time to process. This happens when variable expansion, backslash
escaping or unquoting causes calls to memmove() and possibly to realloc()
resulting in O(N^2) complexity with N following the line size.
By using parse_line() we now have a safe parser that remains in O(N)
regardless of the type of operation. Error reporting changed a little bit
since the errors are not reported anymore from the deepest parsing level.
As such we now report the beginning of the error. One benefit is that for
many invalid character sequences, the original line is shown and the first
bad char or sequence is designated with a caret ('^'), which tends to be
visually easier to spot, for example:
[ALERT] 167/170507 (14633) : parsing [mini5.cfg:19]: unmatched brace in environment variable name below:
"${VAR"}
^
or:
[ALERT] 167/170645 (14640) : parsing [mini5.cfg:18]: unmatched quote below:
timeout client 10s'
^
In case the target buffer is too short for the new line, the output buffer
is grown in 1kB chunks and kept till the end, so that it should not happen
too often.
Before this patch a test like below involving a 4 MB long line would take
138s to process, 98% of which were spent in __memmove_avx_unaligned_erms(),
and now it takes only 65 milliseconds:
$ perl -e 'print "\"\$A\""x1000000,"\n"' | ./haproxy -c -f /dev/stdin 2>/dev/null
This may be backported to stable versions after a long period of
observation to be sure nothing broke. It relies on patch "MINOR: tools:
add a new configurable line parse, parse_line()".
2020-06-16 10:32:59 -04:00
2020-10-01 08:32:35 -04:00
if ( err & PARSE_ERR_WRONG_EXPAND ) {
size_t newpos = sanitize_for_printing ( line , errptr - line , 80 ) ;
ha_alert ( " parsing [%s:%d]: truncated or invalid word expansion sequence at position %d: \n "
" %s \n %*s \n " , file , linenum , ( int ) ( errptr - thisline + 1 ) , line , ( int ) ( newpos + 1 ) , " ^ " ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
fatal + + ;
goto next_line ;
}
BUG/MEDIUM: cfgparse: use parse_line() to expand/unquote/unescape config lines
Issue 22689 in oss-fuzz shows that specially crafted config files can take
a long time to process. This happens when variable expansion, backslash
escaping or unquoting causes calls to memmove() and possibly to realloc()
resulting in O(N^2) complexity with N following the line size.
By using parse_line() we now have a safe parser that remains in O(N)
regardless of the type of operation. Error reporting changed a little bit
since the errors are not reported anymore from the deepest parsing level.
As such we now report the beginning of the error. One benefit is that for
many invalid character sequences, the original line is shown and the first
bad char or sequence is designated with a caret ('^'), which tends to be
visually easier to spot, for example:
[ALERT] 167/170507 (14633) : parsing [mini5.cfg:19]: unmatched brace in environment variable name below:
"${VAR"}
^
or:
[ALERT] 167/170645 (14640) : parsing [mini5.cfg:18]: unmatched quote below:
timeout client 10s'
^
In case the target buffer is too short for the new line, the output buffer
is grown in 1kB chunks and kept till the end, so that it should not happen
too often.
Before this patch a test like below involving a 4 MB long line would take
138s to process, 98% of which were spent in __memmove_avx_unaligned_erms(),
and now it takes only 65 milliseconds:
$ perl -e 'print "\"\$A\""x1000000,"\n"' | ./haproxy -c -f /dev/stdin 2>/dev/null
This may be backported to stable versions after a long period of
observation to be sure nothing broke. It relies on patch "MINOR: tools:
add a new configurable line parse, parse_line()".
2020-06-16 10:32:59 -04:00
if ( err & ( PARSE_ERR_TOOLARGE | PARSE_ERR_OVERLAP ) ) {
outlinesize = ( outlen + 1023 ) & - 1024 ;
2021-01-07 12:45:13 -05:00
outline = my_realloc2 ( outline , outlinesize ) ;
BUG/MEDIUM: cfgparse: use parse_line() to expand/unquote/unescape config lines
Issue 22689 in oss-fuzz shows that specially crafted config files can take
a long time to process. This happens when variable expansion, backslash
escaping or unquoting causes calls to memmove() and possibly to realloc()
resulting in O(N^2) complexity with N following the line size.
By using parse_line() we now have a safe parser that remains in O(N)
regardless of the type of operation. Error reporting changed a little bit
since the errors are not reported anymore from the deepest parsing level.
As such we now report the beginning of the error. One benefit is that for
many invalid character sequences, the original line is shown and the first
bad char or sequence is designated with a caret ('^'), which tends to be
visually easier to spot, for example:
[ALERT] 167/170507 (14633) : parsing [mini5.cfg:19]: unmatched brace in environment variable name below:
"${VAR"}
^
or:
[ALERT] 167/170645 (14640) : parsing [mini5.cfg:18]: unmatched quote below:
timeout client 10s'
^
In case the target buffer is too short for the new line, the output buffer
is grown in 1kB chunks and kept till the end, so that it should not happen
too often.
Before this patch a test like below involving a 4 MB long line would take
138s to process, 98% of which were spent in __memmove_avx_unaligned_erms(),
and now it takes only 65 milliseconds:
$ perl -e 'print "\"\$A\""x1000000,"\n"' | ./haproxy -c -f /dev/stdin 2>/dev/null
This may be backported to stable versions after a long period of
observation to be sure nothing broke. It relies on patch "MINOR: tools:
add a new configurable line parse, parse_line()".
2020-06-16 10:32:59 -04:00
if ( outline = = NULL ) {
ha_alert ( " parsing [%s:%d]: line too long, cannot allocate memory. \n " ,
file , linenum ) ;
2022-05-20 03:13:38 -04:00
err_code | = ERR_ALERT | ERR_FATAL | ERR_ABORT ;
2020-06-16 11:14:33 -04:00
fatal + + ;
2022-05-18 10:22:43 -04:00
outlinesize = 0 ;
2022-05-20 03:13:38 -04:00
goto err ;
2015-05-12 08:27:13 -04:00
}
BUG/MEDIUM: cfgparse: use parse_line() to expand/unquote/unescape config lines
Issue 22689 in oss-fuzz shows that specially crafted config files can take
a long time to process. This happens when variable expansion, backslash
escaping or unquoting causes calls to memmove() and possibly to realloc()
resulting in O(N^2) complexity with N following the line size.
By using parse_line() we now have a safe parser that remains in O(N)
regardless of the type of operation. Error reporting changed a little bit
since the errors are not reported anymore from the deepest parsing level.
As such we now report the beginning of the error. One benefit is that for
many invalid character sequences, the original line is shown and the first
bad char or sequence is designated with a caret ('^'), which tends to be
visually easier to spot, for example:
[ALERT] 167/170507 (14633) : parsing [mini5.cfg:19]: unmatched brace in environment variable name below:
"${VAR"}
^
or:
[ALERT] 167/170645 (14640) : parsing [mini5.cfg:18]: unmatched quote below:
timeout client 10s'
^
In case the target buffer is too short for the new line, the output buffer
is grown in 1kB chunks and kept till the end, so that it should not happen
too often.
Before this patch a test like below involving a 4 MB long line would take
138s to process, 98% of which were spent in __memmove_avx_unaligned_erms(),
and now it takes only 65 milliseconds:
$ perl -e 'print "\"\$A\""x1000000,"\n"' | ./haproxy -c -f /dev/stdin 2>/dev/null
This may be backported to stable versions after a long period of
observation to be sure nothing broke. It relies on patch "MINOR: tools:
add a new configurable line parse, parse_line()".
2020-06-16 10:32:59 -04:00
/* try again */
continue ;
2006-06-25 20:48:02 -04:00
}
2020-06-25 01:41:22 -04:00
if ( err & PARSE_ERR_TOOMANY ) {
/* only check this *after* being sure the output is allocated */
ha_alert ( " parsing [%s:%d]: too many words, truncating after word %d, position %ld: <%s>. \n " ,
file , linenum , MAX_LINE_ARGS , ( long ) ( args [ MAX_LINE_ARGS - 1 ] - outline + 1 ) , args [ MAX_LINE_ARGS - 1 ] ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
fatal + + ;
goto next_line ;
}
2025-06-04 05:26:27 -04:00
if ( ( global . mode & MODE_DISCOVERY ) ) {
/* Only print empty arg warning in discovery mode to prevent double display. */
for ( check_arg = 0 ; check_arg < arg ; check_arg + + ) {
if ( ! * args [ check_arg ] ) {
size_t newpos ;
/* if an empty arg was found, its pointer should be in <errptr>, except
* for rare cases such as ' \x00 ' etc . We need to check errptr in any case
* and if it ' s not set , we ' ll fall back to args ' s position in the output
* string instead ( less accurate but still useful ) .
*/
if ( ! errptr ) {
newpos = args [ check_arg ] - outline ;
if ( newpos > = strlen ( line ) )
newpos = 0 ; // impossible to report anything, start at the beginning.
errptr = line + newpos ;
}
/* sanitize input line in-place */
newpos = sanitize_for_printing ( line , errptr - line , 80 ) ;
ha_warning ( " parsing [%s:%d]: argument number %d at position %d is empty and marks the end of the "
2025-06-04 09:36:13 -04:00
" argument list; all subsequent arguments will be ignored: \n %s \n %*s \n " ,
file , linenum , check_arg , ( int ) ( errptr - thisline + 1 ) , line , ( int ) ( newpos + 1 ) , " ^ " ) ;
2025-06-04 05:26:27 -04:00
break ;
2025-05-12 10:06:28 -04:00
}
2025-05-05 10:13:33 -04:00
}
2025-05-02 09:47:41 -04:00
}
BUG/MEDIUM: cfgparse: use parse_line() to expand/unquote/unescape config lines
Issue 22689 in oss-fuzz shows that specially crafted config files can take
a long time to process. This happens when variable expansion, backslash
escaping or unquoting causes calls to memmove() and possibly to realloc()
resulting in O(N^2) complexity with N following the line size.
By using parse_line() we now have a safe parser that remains in O(N)
regardless of the type of operation. Error reporting changed a little bit
since the errors are not reported anymore from the deepest parsing level.
As such we now report the beginning of the error. One benefit is that for
many invalid character sequences, the original line is shown and the first
bad char or sequence is designated with a caret ('^'), which tends to be
visually easier to spot, for example:
[ALERT] 167/170507 (14633) : parsing [mini5.cfg:19]: unmatched brace in environment variable name below:
"${VAR"}
^
or:
[ALERT] 167/170645 (14640) : parsing [mini5.cfg:18]: unmatched quote below:
timeout client 10s'
^
In case the target buffer is too short for the new line, the output buffer
is grown in 1kB chunks and kept till the end, so that it should not happen
too often.
Before this patch a test like below involving a 4 MB long line would take
138s to process, 98% of which were spent in __memmove_avx_unaligned_erms(),
and now it takes only 65 milliseconds:
$ perl -e 'print "\"\$A\""x1000000,"\n"' | ./haproxy -c -f /dev/stdin 2>/dev/null
This may be backported to stable versions after a long period of
observation to be sure nothing broke. It relies on patch "MINOR: tools:
add a new configurable line parse, parse_line()".
2020-06-16 10:32:59 -04:00
/* everything's OK */
break ;
2015-05-05 11:37:14 -04:00
}
2006-06-25 20:48:02 -04:00
2022-09-14 11:51:55 -04:00
/* dump cfg */
if ( global . mode & MODE_DUMP_CFG ) {
if ( args [ 0 ] ! = NULL ) {
struct cfg_section * sect ;
int is_sect = 0 ;
int i = 0 ;
uint32_t g_key = HA_ATOMIC_LOAD ( & global . anon_key ) ;
2022-09-29 04:34:04 -04:00
if ( global . mode & MODE_DUMP_NB_L )
qfprintf ( stdout , " %d \t " , linenum ) ;
2022-09-14 11:51:55 -04:00
/* if a word is in sections list, is_sect = 1 */
list_for_each_entry ( sect , & sections , list ) {
2025-02-10 09:07:05 -05:00
/* look for a section_name, but also a section_parser, because there might be
* only a post_section_parser */
if ( strcmp ( args [ 0 ] , sect - > section_name ) = = 0 & &
sect - > section_parser ) {
2022-09-14 11:51:55 -04:00
is_sect = 1 ;
break ;
}
}
if ( g_key = = 0 ) {
/* no anonymizing needed, dump the config as-is (but without comments).
* Note : tabs were lost during tokenizing , so we reinsert for non - section
* keywords .
*/
if ( ! is_sect )
qfprintf ( stdout , " \t " ) ;
for ( i = 0 ; i < arg ; i + + ) {
qfprintf ( stdout , " %s " , args [ i ] ) ;
}
qfprintf ( stdout , " \n " ) ;
continue ;
}
/* We're anonymizing */
if ( is_sect ) {
/* new sections are optionally followed by an identifier */
if ( arg > = 2 ) {
qfprintf ( stdout , " %s %s \n " , args [ 0 ] , HA_ANON_ID ( g_key , args [ 1 ] ) ) ;
}
else {
qfprintf ( stdout , " %s \n " , args [ 0 ] ) ;
}
continue ;
}
/* non-section keywords start indented */
qfprintf ( stdout , " \t " ) ;
/* some keywords deserve special treatment */
if ( ! * args [ 0 ] ) {
qfprintf ( stdout , " \n " ) ;
}
else if ( strcmp ( args [ 0 ] , " anonkey " ) = = 0 ) {
qfprintf ( stdout , " %s [...] \n " , args [ 0 ] ) ;
}
else if ( strcmp ( args [ 0 ] , " maxconn " ) = = 0 ) {
qfprintf ( stdout , " %s %s \n " , args [ 0 ] , args [ 1 ] ) ;
}
else if ( strcmp ( args [ 0 ] , " stats " ) = = 0 & &
( strcmp ( args [ 1 ] , " timeout " ) = = 0 | | strcmp ( args [ 1 ] , " maxconn " ) = = 0 ) ) {
qfprintf ( stdout , " %s %s %s \n " , args [ 0 ] , args [ 1 ] , args [ 2 ] ) ;
}
else if ( strcmp ( args [ 0 ] , " stats " ) = = 0 & & strcmp ( args [ 1 ] , " socket " ) = = 0 ) {
qfprintf ( stdout , " %s %s " , args [ 0 ] , args [ 1 ] ) ;
2022-09-29 04:30:00 -04:00
if ( arg > 2 ) {
2022-09-29 04:25:31 -04:00
qfprintf ( stdout , " %s " , hash_ipanon ( g_key , args [ 2 ] , 1 ) ) ;
2022-09-14 11:51:55 -04:00
2022-09-29 04:30:00 -04:00
if ( arg > 3 ) {
2022-09-14 11:51:55 -04:00
qfprintf ( stdout , " [...] \n " ) ;
}
else {
qfprintf ( stdout , " \n " ) ;
}
}
else {
qfprintf ( stdout , " \n " ) ;
}
}
else if ( strcmp ( args [ 0 ] , " timeout " ) = = 0 ) {
qfprintf ( stdout , " %s %s %s \n " , args [ 0 ] , args [ 1 ] , args [ 2 ] ) ;
}
else if ( strcmp ( args [ 0 ] , " mode " ) = = 0 ) {
qfprintf ( stdout , " %s %s \n " , args [ 0 ] , args [ 1 ] ) ;
}
2022-10-29 00:34:32 -04:00
/* It concerns user in global section and in userlist */
2022-09-14 11:51:55 -04:00
else if ( strcmp ( args [ 0 ] , " user " ) = = 0 ) {
qfprintf ( stdout , " %s %s " , args [ 0 ] , HA_ANON_ID ( g_key , args [ 1 ] ) ) ;
if ( arg > 2 ) {
qfprintf ( stdout , " [...] \n " ) ;
}
else {
qfprintf ( stdout , " \n " ) ;
}
}
else if ( strcmp ( args [ 0 ] , " bind " ) = = 0 ) {
qfprintf ( stdout , " %s " , args [ 0 ] ) ;
2022-09-29 04:25:31 -04:00
qfprintf ( stdout , " %s " , hash_ipanon ( g_key , args [ 1 ] , 1 ) ) ;
2022-09-14 11:51:55 -04:00
if ( arg > 2 ) {
qfprintf ( stdout , " [...] \n " ) ;
}
else {
qfprintf ( stdout , " \n " ) ;
}
}
else if ( strcmp ( args [ 0 ] , " server " ) = = 0 ) {
2022-09-29 04:31:18 -04:00
qfprintf ( stdout , " %s %s " , args [ 0 ] , HA_ANON_ID ( g_key , args [ 1 ] ) ) ;
2022-09-14 11:51:55 -04:00
if ( arg > 2 ) {
2022-09-29 04:25:31 -04:00
qfprintf ( stdout , " %s " , hash_ipanon ( g_key , args [ 2 ] , 1 ) ) ;
2022-09-14 11:51:55 -04:00
}
if ( arg > 3 ) {
qfprintf ( stdout , " [...] \n " ) ;
}
else {
qfprintf ( stdout , " \n " ) ;
}
}
else if ( strcmp ( args [ 0 ] , " redirect " ) = = 0 ) {
qfprintf ( stdout , " %s %s " , args [ 0 ] , args [ 1 ] ) ;
if ( strcmp ( args [ 1 ] , " prefix " ) = = 0 | | strcmp ( args [ 1 ] , " location " ) = = 0 ) {
qfprintf ( stdout , " %s " , HA_ANON_PATH ( g_key , args [ 2 ] ) ) ;
}
else {
qfprintf ( stdout , " %s " , args [ 2 ] ) ;
}
if ( arg > 3 ) {
qfprintf ( stdout , " [...] " ) ;
}
qfprintf ( stdout , " \n " ) ;
}
else if ( strcmp ( args [ 0 ] , " acl " ) = = 0 ) {
qfprintf ( stdout , " %s %s %s " , args [ 0 ] , HA_ANON_ID ( g_key , args [ 1 ] ) , args [ 2 ] ) ;
if ( arg > 3 ) {
qfprintf ( stdout , " [...] " ) ;
}
qfprintf ( stdout , " \n " ) ;
}
else if ( strcmp ( args [ 0 ] , " log " ) = = 0 ) {
qfprintf ( stdout , " log " ) ;
if ( strcmp ( args [ 1 ] , " global " ) = = 0 ) {
qfprintf ( stdout , " %s " , args [ 1 ] ) ;
}
else {
2022-09-29 04:25:31 -04:00
qfprintf ( stdout , " %s " , hash_ipanon ( g_key , args [ 1 ] , 1 ) ) ;
2022-09-14 11:51:55 -04:00
}
if ( arg > 2 ) {
qfprintf ( stdout , " [...] " ) ;
}
qfprintf ( stdout , " \n " ) ;
}
else if ( strcmp ( args [ 0 ] , " peer " ) = = 0 ) {
qfprintf ( stdout , " %s %s " , args [ 0 ] , HA_ANON_ID ( g_key , args [ 1 ] ) ) ;
2022-09-29 04:25:31 -04:00
qfprintf ( stdout , " %s " , hash_ipanon ( g_key , args [ 2 ] , 1 ) ) ;
2022-09-14 11:51:55 -04:00
if ( arg > 3 ) {
qfprintf ( stdout , " [...] " ) ;
}
qfprintf ( stdout , " \n " ) ;
}
else if ( strcmp ( args [ 0 ] , " use_backend " ) = = 0 ) {
qfprintf ( stdout , " %s %s " , args [ 0 ] , HA_ANON_ID ( g_key , args [ 1 ] ) ) ;
if ( arg > 2 ) {
qfprintf ( stdout , " [...] " ) ;
}
qfprintf ( stdout , " \n " ) ;
}
else if ( strcmp ( args [ 0 ] , " default_backend " ) = = 0 ) {
qfprintf ( stdout , " %s %s \n " , args [ 0 ] , HA_ANON_ID ( g_key , args [ 1 ] ) ) ;
}
2022-09-29 04:31:18 -04:00
else if ( strcmp ( args [ 0 ] , " source " ) = = 0 ) {
qfprintf ( stdout , " %s %s " , args [ 0 ] , hash_ipanon ( g_key , args [ 1 ] , 1 ) ) ;
if ( arg > 2 ) {
qfprintf ( stdout , " [...] " ) ;
}
qfprintf ( stdout , " \n " ) ;
}
else if ( strcmp ( args [ 0 ] , " nameserver " ) = = 0 ) {
qfprintf ( stdout , " %s %s %s " , args [ 0 ] ,
HA_ANON_ID ( g_key , args [ 1 ] ) , hash_ipanon ( g_key , args [ 2 ] , 1 ) ) ;
if ( arg > 3 ) {
qfprintf ( stdout , " [...] " ) ;
}
qfprintf ( stdout , " \n " ) ;
}
else if ( strcmp ( args [ 0 ] , " http-request " ) = = 0 ) {
qfprintf ( stdout , " %s %s " , args [ 0 ] , args [ 1 ] ) ;
if ( arg > 2 )
qfprintf ( stdout , " [...] " ) ;
qfprintf ( stdout , " \n " ) ;
}
else if ( strcmp ( args [ 0 ] , " http-response " ) = = 0 ) {
qfprintf ( stdout , " %s %s " , args [ 0 ] , args [ 1 ] ) ;
if ( arg > 2 )
qfprintf ( stdout , " [...] " ) ;
qfprintf ( stdout , " \n " ) ;
}
else if ( strcmp ( args [ 0 ] , " http-after-response " ) = = 0 ) {
qfprintf ( stdout , " %s %s " , args [ 0 ] , args [ 1 ] ) ;
if ( arg > 2 )
qfprintf ( stdout , " [...] " ) ;
qfprintf ( stdout , " \n " ) ;
}
else if ( strcmp ( args [ 0 ] , " filter " ) = = 0 ) {
qfprintf ( stdout , " %s %s " , args [ 0 ] , args [ 1 ] ) ;
if ( arg > 2 )
qfprintf ( stdout , " [...] " ) ;
qfprintf ( stdout , " \n " ) ;
}
else if ( strcmp ( args [ 0 ] , " errorfile " ) = = 0 ) {
qfprintf ( stdout , " %s %s %s \n " , args [ 0 ] , args [ 1 ] , HA_ANON_PATH ( g_key , args [ 2 ] ) ) ;
}
else if ( strcmp ( args [ 0 ] , " cookie " ) = = 0 ) {
qfprintf ( stdout , " %s %s " , args [ 0 ] , HA_ANON_ID ( g_key , args [ 1 ] ) ) ;
if ( arg > 2 )
qfprintf ( stdout , " %s " , args [ 2 ] ) ;
if ( arg > 3 )
qfprintf ( stdout , " [...] " ) ;
qfprintf ( stdout , " \n " ) ;
}
else if ( strcmp ( args [ 0 ] , " stats " ) = = 0 & & strcmp ( args [ 1 ] , " auth " ) = = 0 ) {
qfprintf ( stdout , " %s %s %s \n " , args [ 0 ] , args [ 1 ] , HA_ANON_STR ( g_key , args [ 2 ] ) ) ;
}
2022-09-14 11:51:55 -04:00
else {
/* display up to 3 words and mask the rest which might be confidential */
for ( i = 0 ; i < MIN ( arg , 3 ) ; i + + ) {
qfprintf ( stdout , " %s " , args [ i ] ) ;
}
if ( arg > 3 ) {
qfprintf ( stdout , " [...] " ) ;
}
qfprintf ( stdout , " \n " ) ;
}
}
continue ;
}
/* end of config dump */
2006-06-25 20:48:02 -04:00
/* empty line */
2024-08-09 03:25:37 -04:00
if ( ! * args | | ! * * args )
2006-06-25 20:48:02 -04:00
continue ;
MINOR: cfgparse: implement a simple if/elif/else/endif macro block handler
Very often, especially since reg-tests, it would be desirable to be able
to conditionally comment out a config block, such as removing an SSL
binding when SSL is disabled, or enabling HTX only for certain versions,
etc.
This patch introduces a very simple nested block management which takes
".if", ".elif", ".else" and ".endif" directives to take or ignore a block.
For now the conditions are limited to empty string or "0" for false versus
a non-nul integer for true, which already suffices to test environment
variables. Still, it needs to be a bit more advanced with defines, versions
etc.
A set of ".notice", ".warning" and ".alert" statements are provided to
emit messages, often in order to provide advice about how to fix certain
conditions.
2021-02-12 11:59:10 -05:00
/* check for config macros */
if ( * args [ 0 ] = = ' . ' ) {
if ( strcmp ( args [ 0 ] , " .if " ) = = 0 ) {
2021-05-06 09:07:10 -04:00
const char * errptr = NULL ;
2021-05-06 02:19:48 -04:00
char * errmsg = NULL ;
int cond ;
2021-07-16 10:38:58 -04:00
char * w ;
2021-05-06 02:19:48 -04:00
2021-07-16 10:38:58 -04:00
/* remerge all words into a single expression */
for ( w = * args ; ( w + = strlen ( w ) ) < outline + outlen - 1 ; * w = ' ' )
;
2021-05-26 11:45:33 -04:00
MINOR: cfgparse: implement a simple if/elif/else/endif macro block handler
Very often, especially since reg-tests, it would be desirable to be able
to conditionally comment out a config block, such as removing an SSL
binding when SSL is disabled, or enabling HTX only for certain versions,
etc.
This patch introduces a very simple nested block management which takes
".if", ".elif", ".else" and ".endif" directives to take or ignore a block.
For now the conditions are limited to empty string or "0" for false versus
a non-nul integer for true, which already suffices to test environment
variables. Still, it needs to be a bit more advanced with defines, versions
etc.
A set of ".notice", ".warning" and ".alert" statements are provided to
emit messages, often in order to provide advice about how to fix certain
conditions.
2021-02-12 11:59:10 -05:00
nested_cond_lvl + + ;
if ( nested_cond_lvl > = MAXNESTEDCONDS ) {
ha_alert ( " parsing [%s:%d]: too many nested '.if', max is %d. \n " , file , linenum , MAXNESTEDCONDS ) ;
err_code | = ERR_ALERT | ERR_FATAL | ERR_ABORT ;
goto err ;
}
2021-05-06 02:46:11 -04:00
if ( nested_cond_lvl > 1 & &
( nested_conds [ nested_cond_lvl - 1 ] = = NESTED_COND_IF_DROP | |
nested_conds [ nested_cond_lvl - 1 ] = = NESTED_COND_IF_SKIP | |
nested_conds [ nested_cond_lvl - 1 ] = = NESTED_COND_ELIF_DROP | |
nested_conds [ nested_cond_lvl - 1 ] = = NESTED_COND_ELIF_SKIP | |
nested_conds [ nested_cond_lvl - 1 ] = = NESTED_COND_ELSE_DROP ) ) {
MINOR: cfgparse: implement a simple if/elif/else/endif macro block handler
Very often, especially since reg-tests, it would be desirable to be able
to conditionally comment out a config block, such as removing an SSL
binding when SSL is disabled, or enabling HTX only for certain versions,
etc.
This patch introduces a very simple nested block management which takes
".if", ".elif", ".else" and ".endif" directives to take or ignore a block.
For now the conditions are limited to empty string or "0" for false versus
a non-nul integer for true, which already suffices to test environment
variables. Still, it needs to be a bit more advanced with defines, versions
etc.
A set of ".notice", ".warning" and ".alert" statements are provided to
emit messages, often in order to provide advice about how to fix certain
conditions.
2021-02-12 11:59:10 -05:00
nested_conds [ nested_cond_lvl ] = NESTED_COND_IF_SKIP ;
2021-05-06 02:19:48 -04:00
goto next_line ;
}
2021-05-06 09:07:10 -04:00
cond = cfg_eval_condition ( args + 1 , & errmsg , & errptr ) ;
2021-05-06 02:19:48 -04:00
if ( cond < 0 ) {
2021-05-06 09:07:10 -04:00
size_t newpos = sanitize_for_printing ( args [ 1 ] , errptr - args [ 1 ] , 76 ) ;
ha_alert ( " parsing [%s:%d]: %s in '.if' at position %d: \n .if %s \n %*s \n " ,
file , linenum , errmsg ,
( int ) ( errptr - args [ 1 ] + 1 ) , args [ 1 ] , ( int ) ( newpos + 5 ) , " ^ " ) ;
2021-05-06 02:19:48 -04:00
free ( errmsg ) ;
MINOR: cfgparse: implement a simple if/elif/else/endif macro block handler
Very often, especially since reg-tests, it would be desirable to be able
to conditionally comment out a config block, such as removing an SSL
binding when SSL is disabled, or enabling HTX only for certain versions,
etc.
This patch introduces a very simple nested block management which takes
".if", ".elif", ".else" and ".endif" directives to take or ignore a block.
For now the conditions are limited to empty string or "0" for false versus
a non-nul integer for true, which already suffices to test environment
variables. Still, it needs to be a bit more advanced with defines, versions
etc.
A set of ".notice", ".warning" and ".alert" statements are provided to
emit messages, often in order to provide advice about how to fix certain
conditions.
2021-02-12 11:59:10 -05:00
err_code | = ERR_ALERT | ERR_FATAL | ERR_ABORT ;
goto err ;
}
2021-05-06 02:19:48 -04:00
if ( cond )
nested_conds [ nested_cond_lvl ] = NESTED_COND_IF_TAKE ;
else
nested_conds [ nested_cond_lvl ] = NESTED_COND_IF_DROP ;
MINOR: cfgparse: implement a simple if/elif/else/endif macro block handler
Very often, especially since reg-tests, it would be desirable to be able
to conditionally comment out a config block, such as removing an SSL
binding when SSL is disabled, or enabling HTX only for certain versions,
etc.
This patch introduces a very simple nested block management which takes
".if", ".elif", ".else" and ".endif" directives to take or ignore a block.
For now the conditions are limited to empty string or "0" for false versus
a non-nul integer for true, which already suffices to test environment
variables. Still, it needs to be a bit more advanced with defines, versions
etc.
A set of ".notice", ".warning" and ".alert" statements are provided to
emit messages, often in order to provide advice about how to fix certain
conditions.
2021-02-12 11:59:10 -05:00
goto next_line ;
}
else if ( strcmp ( args [ 0 ] , " .elif " ) = = 0 ) {
2021-05-06 09:07:10 -04:00
const char * errptr = NULL ;
2021-05-06 02:19:48 -04:00
char * errmsg = NULL ;
int cond ;
2021-07-16 10:38:58 -04:00
char * w ;
2021-05-06 02:19:48 -04:00
2021-07-16 10:38:58 -04:00
/* remerge all words into a single expression */
for ( w = * args ; ( w + = strlen ( w ) ) < outline + outlen - 1 ; * w = ' ' )
;
2021-05-26 11:45:33 -04:00
MINOR: cfgparse: implement a simple if/elif/else/endif macro block handler
Very often, especially since reg-tests, it would be desirable to be able
to conditionally comment out a config block, such as removing an SSL
binding when SSL is disabled, or enabling HTX only for certain versions,
etc.
This patch introduces a very simple nested block management which takes
".if", ".elif", ".else" and ".endif" directives to take or ignore a block.
For now the conditions are limited to empty string or "0" for false versus
a non-nul integer for true, which already suffices to test environment
variables. Still, it needs to be a bit more advanced with defines, versions
etc.
A set of ".notice", ".warning" and ".alert" statements are provided to
emit messages, often in order to provide advice about how to fix certain
conditions.
2021-02-12 11:59:10 -05:00
if ( ! nested_cond_lvl ) {
ha_alert ( " parsing [%s:%d]: lone '.elif' with no matching '.if'. \n " , file , linenum ) ;
err_code | = ERR_ALERT | ERR_FATAL | ERR_ABORT ;
goto err ;
}
if ( nested_conds [ nested_cond_lvl ] = = NESTED_COND_ELSE_TAKE | |
nested_conds [ nested_cond_lvl ] = = NESTED_COND_ELSE_DROP ) {
ha_alert ( " parsing [%s:%d]: '.elif' after '.else' is not permitted. \n " , file , linenum ) ;
err_code | = ERR_ALERT | ERR_FATAL | ERR_ABORT ;
goto err ;
}
if ( nested_conds [ nested_cond_lvl ] = = NESTED_COND_IF_TAKE | |
nested_conds [ nested_cond_lvl ] = = NESTED_COND_IF_SKIP | |
2021-05-06 02:48:09 -04:00
nested_conds [ nested_cond_lvl ] = = NESTED_COND_ELIF_TAKE | |
MINOR: cfgparse: implement a simple if/elif/else/endif macro block handler
Very often, especially since reg-tests, it would be desirable to be able
to conditionally comment out a config block, such as removing an SSL
binding when SSL is disabled, or enabling HTX only for certain versions,
etc.
This patch introduces a very simple nested block management which takes
".if", ".elif", ".else" and ".endif" directives to take or ignore a block.
For now the conditions are limited to empty string or "0" for false versus
a non-nul integer for true, which already suffices to test environment
variables. Still, it needs to be a bit more advanced with defines, versions
etc.
A set of ".notice", ".warning" and ".alert" statements are provided to
emit messages, often in order to provide advice about how to fix certain
conditions.
2021-02-12 11:59:10 -05:00
nested_conds [ nested_cond_lvl ] = = NESTED_COND_ELIF_SKIP ) {
nested_conds [ nested_cond_lvl ] = NESTED_COND_ELIF_SKIP ;
2021-05-06 02:19:48 -04:00
goto next_line ;
}
2021-05-06 09:07:10 -04:00
cond = cfg_eval_condition ( args + 1 , & errmsg , & errptr ) ;
2021-05-06 02:19:48 -04:00
if ( cond < 0 ) {
2021-05-06 09:07:10 -04:00
size_t newpos = sanitize_for_printing ( args [ 1 ] , errptr - args [ 1 ] , 74 ) ;
ha_alert ( " parsing [%s:%d]: %s in '.elif' at position %d: \n .elif %s \n %*s \n " ,
file , linenum , errmsg ,
( int ) ( errptr - args [ 1 ] + 1 ) , args [ 1 ] , ( int ) ( newpos + 7 ) , " ^ " ) ;
2021-05-06 02:19:48 -04:00
free ( errmsg ) ;
MINOR: cfgparse: implement a simple if/elif/else/endif macro block handler
Very often, especially since reg-tests, it would be desirable to be able
to conditionally comment out a config block, such as removing an SSL
binding when SSL is disabled, or enabling HTX only for certain versions,
etc.
This patch introduces a very simple nested block management which takes
".if", ".elif", ".else" and ".endif" directives to take or ignore a block.
For now the conditions are limited to empty string or "0" for false versus
a non-nul integer for true, which already suffices to test environment
variables. Still, it needs to be a bit more advanced with defines, versions
etc.
A set of ".notice", ".warning" and ".alert" statements are provided to
emit messages, often in order to provide advice about how to fix certain
conditions.
2021-02-12 11:59:10 -05:00
err_code | = ERR_ALERT | ERR_FATAL | ERR_ABORT ;
goto err ;
}
2021-05-06 02:19:48 -04:00
if ( cond )
nested_conds [ nested_cond_lvl ] = NESTED_COND_ELIF_TAKE ;
else
nested_conds [ nested_cond_lvl ] = NESTED_COND_ELIF_DROP ;
MINOR: cfgparse: implement a simple if/elif/else/endif macro block handler
Very often, especially since reg-tests, it would be desirable to be able
to conditionally comment out a config block, such as removing an SSL
binding when SSL is disabled, or enabling HTX only for certain versions,
etc.
This patch introduces a very simple nested block management which takes
".if", ".elif", ".else" and ".endif" directives to take or ignore a block.
For now the conditions are limited to empty string or "0" for false versus
a non-nul integer for true, which already suffices to test environment
variables. Still, it needs to be a bit more advanced with defines, versions
etc.
A set of ".notice", ".warning" and ".alert" statements are provided to
emit messages, often in order to provide advice about how to fix certain
conditions.
2021-02-12 11:59:10 -05:00
goto next_line ;
}
else if ( strcmp ( args [ 0 ] , " .else " ) = = 0 ) {
2021-05-26 11:45:33 -04:00
if ( * args [ 1 ] ) {
2021-06-12 06:55:27 -04:00
ha_alert ( " parsing [%s:%d]: Unexpected argument '%s' for '%s'. \n " ,
2021-05-26 11:45:33 -04:00
file , linenum , args [ 1 ] , args [ 0 ] ) ;
err_code | = ERR_ALERT | ERR_FATAL | ERR_ABORT ;
break ;
}
MINOR: cfgparse: implement a simple if/elif/else/endif macro block handler
Very often, especially since reg-tests, it would be desirable to be able
to conditionally comment out a config block, such as removing an SSL
binding when SSL is disabled, or enabling HTX only for certain versions,
etc.
This patch introduces a very simple nested block management which takes
".if", ".elif", ".else" and ".endif" directives to take or ignore a block.
For now the conditions are limited to empty string or "0" for false versus
a non-nul integer for true, which already suffices to test environment
variables. Still, it needs to be a bit more advanced with defines, versions
etc.
A set of ".notice", ".warning" and ".alert" statements are provided to
emit messages, often in order to provide advice about how to fix certain
conditions.
2021-02-12 11:59:10 -05:00
if ( ! nested_cond_lvl ) {
ha_alert ( " parsing [%s:%d]: lone '.else' with no matching '.if'. \n " , file , linenum ) ;
err_code | = ERR_ALERT | ERR_FATAL | ERR_ABORT ;
goto err ;
}
if ( nested_conds [ nested_cond_lvl ] = = NESTED_COND_ELSE_TAKE | |
nested_conds [ nested_cond_lvl ] = = NESTED_COND_ELSE_DROP ) {
ha_alert ( " parsing [%s:%d]: '.else' after '.else' is not permitted. \n " , file , linenum ) ;
err_code | = ERR_ALERT | ERR_FATAL | ERR_ABORT ;
goto err ;
}
if ( nested_conds [ nested_cond_lvl ] = = NESTED_COND_IF_TAKE | |
nested_conds [ nested_cond_lvl ] = = NESTED_COND_IF_SKIP | |
nested_conds [ nested_cond_lvl ] = = NESTED_COND_ELIF_TAKE | |
nested_conds [ nested_cond_lvl ] = = NESTED_COND_ELIF_SKIP ) {
nested_conds [ nested_cond_lvl ] = NESTED_COND_ELSE_DROP ;
} else {
/* otherwise we take the "else" */
nested_conds [ nested_cond_lvl ] = NESTED_COND_ELSE_TAKE ;
}
goto next_line ;
}
else if ( strcmp ( args [ 0 ] , " .endif " ) = = 0 ) {
2021-05-26 11:45:33 -04:00
if ( * args [ 1 ] ) {
2021-06-12 06:55:27 -04:00
ha_alert ( " parsing [%s:%d]: Unexpected argument '%s' for '%s'. \n " ,
2021-05-26 11:45:33 -04:00
file , linenum , args [ 1 ] , args [ 0 ] ) ;
err_code | = ERR_ALERT | ERR_FATAL | ERR_ABORT ;
break ;
}
MINOR: cfgparse: implement a simple if/elif/else/endif macro block handler
Very often, especially since reg-tests, it would be desirable to be able
to conditionally comment out a config block, such as removing an SSL
binding when SSL is disabled, or enabling HTX only for certain versions,
etc.
This patch introduces a very simple nested block management which takes
".if", ".elif", ".else" and ".endif" directives to take or ignore a block.
For now the conditions are limited to empty string or "0" for false versus
a non-nul integer for true, which already suffices to test environment
variables. Still, it needs to be a bit more advanced with defines, versions
etc.
A set of ".notice", ".warning" and ".alert" statements are provided to
emit messages, often in order to provide advice about how to fix certain
conditions.
2021-02-12 11:59:10 -05:00
if ( ! nested_cond_lvl ) {
ha_alert ( " parsing [%s:%d]: lone '.endif' with no matching '.if'. \n " , file , linenum ) ;
2021-05-26 11:45:33 -04:00
err_code | = ERR_ALERT | ERR_FATAL | ERR_ABORT ;
MINOR: cfgparse: implement a simple if/elif/else/endif macro block handler
Very often, especially since reg-tests, it would be desirable to be able
to conditionally comment out a config block, such as removing an SSL
binding when SSL is disabled, or enabling HTX only for certain versions,
etc.
This patch introduces a very simple nested block management which takes
".if", ".elif", ".else" and ".endif" directives to take or ignore a block.
For now the conditions are limited to empty string or "0" for false versus
a non-nul integer for true, which already suffices to test environment
variables. Still, it needs to be a bit more advanced with defines, versions
etc.
A set of ".notice", ".warning" and ".alert" statements are provided to
emit messages, often in order to provide advice about how to fix certain
conditions.
2021-02-12 11:59:10 -05:00
break ;
}
nested_cond_lvl - - ;
goto next_line ;
}
}
if ( nested_cond_lvl & &
( nested_conds [ nested_cond_lvl ] = = NESTED_COND_IF_DROP | |
nested_conds [ nested_cond_lvl ] = = NESTED_COND_IF_SKIP | |
nested_conds [ nested_cond_lvl ] = = NESTED_COND_ELIF_DROP | |
nested_conds [ nested_cond_lvl ] = = NESTED_COND_ELIF_SKIP | |
nested_conds [ nested_cond_lvl ] = = NESTED_COND_ELSE_DROP ) ) {
/* The current block is masked out by the conditions */
goto next_line ;
}
2021-05-07 02:59:50 -04:00
/* .warning/.error/.notice/.diag */
2025-04-01 03:06:25 -04:00
if ( * args [ 0 ] = = ' . ' & & ! ( global . mode & MODE_DISCOVERY ) ) {
MINOR: cfgparse: implement a simple if/elif/else/endif macro block handler
Very often, especially since reg-tests, it would be desirable to be able
to conditionally comment out a config block, such as removing an SSL
binding when SSL is disabled, or enabling HTX only for certain versions,
etc.
This patch introduces a very simple nested block management which takes
".if", ".elif", ".else" and ".endif" directives to take or ignore a block.
For now the conditions are limited to empty string or "0" for false versus
a non-nul integer for true, which already suffices to test environment
variables. Still, it needs to be a bit more advanced with defines, versions
etc.
A set of ".notice", ".warning" and ".alert" statements are provided to
emit messages, often in order to provide advice about how to fix certain
conditions.
2021-02-12 11:59:10 -05:00
if ( strcmp ( args [ 0 ] , " .alert " ) = = 0 ) {
2021-05-26 11:45:33 -04:00
if ( * args [ 2 ] ) {
ha_alert ( " parsing [%s:%d]: Unexpected argument '%s' for '%s'. Use quotes if the message should contain spaces. \n " ,
file , linenum , args [ 2 ] , args [ 0 ] ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
goto next_line ;
}
MINOR: cfgparse: implement a simple if/elif/else/endif macro block handler
Very often, especially since reg-tests, it would be desirable to be able
to conditionally comment out a config block, such as removing an SSL
binding when SSL is disabled, or enabling HTX only for certain versions,
etc.
This patch introduces a very simple nested block management which takes
".if", ".elif", ".else" and ".endif" directives to take or ignore a block.
For now the conditions are limited to empty string or "0" for false versus
a non-nul integer for true, which already suffices to test environment
variables. Still, it needs to be a bit more advanced with defines, versions
etc.
A set of ".notice", ".warning" and ".alert" statements are provided to
emit messages, often in order to provide advice about how to fix certain
conditions.
2021-02-12 11:59:10 -05:00
ha_alert ( " parsing [%s:%d]: '%s'. \n " , file , linenum , args [ 1 ] ) ;
err_code | = ERR_ALERT | ERR_FATAL | ERR_ABORT ;
goto err ;
}
else if ( strcmp ( args [ 0 ] , " .warning " ) = = 0 ) {
2021-05-26 11:45:33 -04:00
if ( * args [ 2 ] ) {
ha_alert ( " parsing [%s:%d]: Unexpected argument '%s' for '%s'. Use quotes if the message should contain spaces. \n " ,
file , linenum , args [ 2 ] , args [ 0 ] ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
goto next_line ;
}
MINOR: cfgparse: implement a simple if/elif/else/endif macro block handler
Very often, especially since reg-tests, it would be desirable to be able
to conditionally comment out a config block, such as removing an SSL
binding when SSL is disabled, or enabling HTX only for certain versions,
etc.
This patch introduces a very simple nested block management which takes
".if", ".elif", ".else" and ".endif" directives to take or ignore a block.
For now the conditions are limited to empty string or "0" for false versus
a non-nul integer for true, which already suffices to test environment
variables. Still, it needs to be a bit more advanced with defines, versions
etc.
A set of ".notice", ".warning" and ".alert" statements are provided to
emit messages, often in order to provide advice about how to fix certain
conditions.
2021-02-12 11:59:10 -05:00
ha_warning ( " parsing [%s:%d]: '%s'. \n " , file , linenum , args [ 1 ] ) ;
err_code | = ERR_WARN ;
goto next_line ;
}
else if ( strcmp ( args [ 0 ] , " .notice " ) = = 0 ) {
2021-05-26 11:45:33 -04:00
if ( * args [ 2 ] ) {
ha_alert ( " parsing [%s:%d]: Unexpected argument '%s' for '%s'. Use quotes if the message should contain spaces. \n " ,
file , linenum , args [ 2 ] , args [ 0 ] ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
goto next_line ;
}
MINOR: cfgparse: implement a simple if/elif/else/endif macro block handler
Very often, especially since reg-tests, it would be desirable to be able
to conditionally comment out a config block, such as removing an SSL
binding when SSL is disabled, or enabling HTX only for certain versions,
etc.
This patch introduces a very simple nested block management which takes
".if", ".elif", ".else" and ".endif" directives to take or ignore a block.
For now the conditions are limited to empty string or "0" for false versus
a non-nul integer for true, which already suffices to test environment
variables. Still, it needs to be a bit more advanced with defines, versions
etc.
A set of ".notice", ".warning" and ".alert" statements are provided to
emit messages, often in order to provide advice about how to fix certain
conditions.
2021-02-12 11:59:10 -05:00
ha_notice ( " parsing [%s:%d]: '%s'. \n " , file , linenum , args [ 1 ] ) ;
goto next_line ;
}
2021-05-07 02:59:50 -04:00
else if ( strcmp ( args [ 0 ] , " .diag " ) = = 0 ) {
2021-05-26 11:45:33 -04:00
if ( * args [ 2 ] ) {
ha_alert ( " parsing [%s:%d]: Unexpected argument '%s' for '%s'. Use quotes if the message should contain spaces. \n " ,
file , linenum , args [ 2 ] , args [ 0 ] ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
goto next_line ;
}
2021-05-07 02:59:50 -04:00
ha_diag_warning ( " parsing [%s:%d]: '%s'. \n " , file , linenum , args [ 1 ] ) ;
goto next_line ;
}
MINOR: cfgparse: implement a simple if/elif/else/endif macro block handler
Very often, especially since reg-tests, it would be desirable to be able
to conditionally comment out a config block, such as removing an SSL
binding when SSL is disabled, or enabling HTX only for certain versions,
etc.
This patch introduces a very simple nested block management which takes
".if", ".elif", ".else" and ".endif" directives to take or ignore a block.
For now the conditions are limited to empty string or "0" for false versus
a non-nul integer for true, which already suffices to test environment
variables. Still, it needs to be a bit more advanced with defines, versions
etc.
A set of ".notice", ".warning" and ".alert" statements are provided to
emit messages, often in order to provide advice about how to fix certain
conditions.
2021-02-12 11:59:10 -05:00
else {
ha_alert ( " parsing [%s:%d]: unknown directive '%s'. \n " , file , linenum , args [ 0 ] ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
fatal + + ;
break ;
}
}
2009-06-14 05:39:52 -04:00
/* check for keyword modifiers "no" and "default" */
CLEANUP: Compare the return value of `XXXcmp()` functions with zero
According to coding-style.txt it is recommended to use:
`strcmp(a, b) == 0` instead of `!strcmp(a, b)`
So let's do this.
The change was performed by running the following (very long) coccinelle patch
on src/:
@@
statement S;
expression E;
expression F;
@@
if (
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
(
S
|
{ ... }
)
@@
statement S;
expression E;
expression F;
@@
if (
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
(
S
|
{ ... }
)
@@
expression E;
expression F;
expression G;
@@
(
G &&
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
@@
expression E;
expression F;
expression G;
@@
(
G ||
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
@@
expression E;
expression F;
expression G;
@@
(
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
&& G
)
@@
expression E;
expression F;
expression G;
@@
(
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
|| G
)
@@
expression E;
expression F;
expression G;
@@
(
G &&
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
@@
expression E;
expression F;
expression G;
@@
(
G ||
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
&& G
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
|| G
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
2021-01-02 16:31:53 -05:00
if ( strcmp ( args [ 0 ] , " no " ) = = 0 ) {
2011-10-12 11:50:54 -04:00
char * tmp ;
2009-06-14 05:39:52 -04:00
kwm = KWM_NO ;
2011-10-12 11:50:54 -04:00
tmp = args [ 0 ] ;
2009-06-14 05:39:52 -04:00
for ( arg = 0 ; * args [ arg + 1 ] ; arg + + )
args [ arg ] = args [ arg + 1 ] ; // shift args after inversion
2011-10-12 11:50:54 -04:00
* tmp = ' \0 ' ; // fix the next arg to \0
args [ arg ] = tmp ;
2009-06-14 05:39:52 -04:00
}
CLEANUP: Compare the return value of `XXXcmp()` functions with zero
According to coding-style.txt it is recommended to use:
`strcmp(a, b) == 0` instead of `!strcmp(a, b)`
So let's do this.
The change was performed by running the following (very long) coccinelle patch
on src/:
@@
statement S;
expression E;
expression F;
@@
if (
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
(
S
|
{ ... }
)
@@
statement S;
expression E;
expression F;
@@
if (
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
(
S
|
{ ... }
)
@@
expression E;
expression F;
expression G;
@@
(
G &&
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
@@
expression E;
expression F;
expression G;
@@
(
G ||
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
@@
expression E;
expression F;
expression G;
@@
(
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
&& G
)
@@
expression E;
expression F;
expression G;
@@
(
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
|| G
)
@@
expression E;
expression F;
expression G;
@@
(
G &&
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
@@
expression E;
expression F;
expression G;
@@
(
G ||
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
&& G
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
|| G
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
2021-01-02 16:31:53 -05:00
else if ( strcmp ( args [ 0 ] , " default " ) = = 0 ) {
2009-06-14 05:39:52 -04:00
kwm = KWM_DEF ;
2007-12-24 20:40:22 -05:00
for ( arg = 0 ; * args [ arg + 1 ] ; arg + + )
args [ arg ] = args [ arg + 1 ] ; // shift args after inversion
}
2019-10-27 15:08:10 -04:00
if ( kwm ! = KWM_STD & & strcmp ( args [ 0 ] , " option " ) ! = 0 & &
strcmp ( args [ 0 ] , " log " ) ! = 0 & & strcmp ( args [ 0 ] , " busy-polling " ) ! = 0 & &
MEDIUM: init: prevent process and thread creation at runtime
Some concerns are regularly raised about the risk to inherit some Lua
files which make use of a fork (e.g. via os.execute()) as well as
whether or not some of bugs we fix might or not be exploitable to run
some code. Given that haproxy is event-driven, any foreground activity
completely stops processing and is easy to detect, but background
activity is a different story. A Lua script could very well discretely
fork a sub-process connecting to a remote location and taking commands,
and some injected code could also try to hide its activity by creating
a process or a thread without blocking the rest of the processing. While
such activities should be extremely limited when run in an empty chroot
without any permission, it would be better to get a higher assurance
they cannot happen.
This patch introduces something very simple: it limits the number of
processes and threads to zero in the workers after the last thread was
created. By doing so, it effectively instructs the system to fail on
any fork() or clone() syscall. Thus any undesired activity has to happen
in the foreground and is way easier to detect.
This will obviously break external checks (whose concept is already
totally insecure), and for this reason a new option
"insecure-fork-wanted" was added to disable this protection, and it
is suggested in the fork() error report from the checks. It is
obviously recommended not to use it and to reconsider the reasons
leading to it being enabled in the first place.
If for any reason we fail to disable forks, we still start because it
could be imaginable that some operating systems refuse to set this
limit to zero, but in this case we emit a warning, that may or may not
be reported since we're after the fork point. Ideally over the long
term it should be conditionned by strict-limits and cause a hard fail.
2019-12-03 01:07:36 -05:00
strcmp ( args [ 0 ] , " set-dumpable " ) ! = 0 & & strcmp ( args [ 0 ] , " strict-limits " ) ! = 0 & &
2021-03-26 13:50:33 -04:00
strcmp ( args [ 0 ] , " insecure-fork-wanted " ) ! = 0 & &
strcmp ( args [ 0 ] , " numa-cpu-mapping " ) ! = 0 ) {
2019-10-27 15:08:10 -04:00
ha_alert ( " parsing [%s:%d]: negation/default currently "
2019-10-27 15:08:11 -04:00
" supported only for options, log, busy-polling, "
2021-03-26 13:50:33 -04:00
" set-dumpable, strict-limits, insecure-fork-wanted "
" and numa-cpu-mapping. \n " , file , linenum ) ;
2009-07-20 03:30:05 -04:00
err_code | = ERR_ALERT | ERR_FATAL ;
2020-06-16 11:14:33 -04:00
fatal + + ;
2007-12-24 20:40:22 -05:00
}
2014-03-18 08:54:18 -04:00
/* detect section start */
list_for_each_entry ( ics , & sections , list ) {
2025-02-10 09:07:05 -05:00
if ( strcmp ( args [ 0 ] , ics - > section_name ) = = 0 & & ics - > section_parser ) {
2014-03-18 08:54:18 -04:00
cursection = ics - > section_name ;
2018-11-30 07:50:47 -05:00
pcs = cs ;
2014-03-18 08:54:18 -04:00
cs = ics ;
2021-05-06 04:04:45 -04:00
free ( global . cfg_curr_section ) ;
global . cfg_curr_section = strdup ( * args [ 1 ] ? args [ 1 ] : args [ 0 ] ) ;
2022-11-18 09:46:06 -05:00
check_section_position ( args [ 0 ] , file , linenum ) ;
2014-03-18 08:54:18 -04:00
break ;
}
2010-09-23 12:39:19 -04:00
}
2025-02-10 09:07:05 -05:00
if ( pcs ) {
struct cfg_section * psect ;
2020-06-16 11:14:33 -04:00
int status ;
2024-10-01 10:11:01 -04:00
2025-02-10 09:07:05 -05:00
/* look for every post_section_parser for the previous section name */
list_for_each_entry ( psect , & sections , list ) {
if ( strcmp ( pcs - > section_name , psect - > section_name ) = = 0 & &
psect - > post_section_parser ) {
2020-06-16 11:14:33 -04:00
2025-02-10 09:07:05 -05:00
/* don't call post_section_parser in MODE_DISCOVERY */
if ( global . mode & MODE_DISCOVERY )
goto section_parser ;
status = psect - > post_section_parser ( ) ;
err_code | = status ;
if ( status & ERR_FATAL )
fatal + + ;
if ( err_code & ERR_ABORT )
goto err ;
}
}
2018-11-30 07:50:47 -05:00
}
2018-12-02 03:37:38 -05:00
pcs = NULL ;
2018-11-30 07:50:47 -05:00
2025-02-12 06:09:05 -05:00
section_parser :
2017-10-16 05:06:50 -04:00
if ( ! cs ) {
2024-10-01 10:11:01 -04:00
/* ignore unknown section names during the first read in MODE_DISCOVERY */
if ( global . mode & MODE_DISCOVERY )
continue ;
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d]: unknown keyword '%s' out of section. \n " , file , linenum , args [ 0 ] ) ;
2009-07-20 03:30:05 -04:00
err_code | = ERR_ALERT | ERR_FATAL ;
2020-06-16 11:14:33 -04:00
fatal + + ;
2017-10-16 05:06:50 -04:00
} else {
2020-06-16 11:14:33 -04:00
int status ;
2024-10-09 17:11:07 -04:00
/* read only the "global" and "program" sections in MODE_DISCOVERY */
if ( ( ( global . mode & MODE_DISCOVERY ) & & ( strcmp ( cs - > section_name , " global " ) ! = 0 )
& & ( strcmp ( cs - > section_name , " program " ) ! = 0 ) ) )
2024-10-01 10:11:01 -04:00
continue ;
2020-06-16 11:14:33 -04:00
status = cs - > section_parser ( file , linenum , args , kwm ) ;
err_code | = status ;
if ( status & ERR_FATAL )
fatal + + ;
2017-10-16 05:06:50 -04:00
if ( err_code & ERR_ABORT )
goto err ;
}
2006-06-25 20:48:02 -04:00
}
2017-10-16 05:06:50 -04:00
2020-06-22 16:57:45 -04:00
if ( missing_lf ! = - 1 ) {
2020-08-18 16:00:04 -04:00
ha_alert ( " parsing [%s:%d]: Missing LF on last line, file might have been truncated at position %d. \n " ,
file , linenum , ( missing_lf + 1 ) ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
2020-06-22 16:57:45 -04:00
}
2021-05-06 04:04:45 -04:00
ha_free ( & global . cfg_curr_section ) ;
2025-02-12 06:31:11 -05:00
/* call post_section_parser of the last section when there is no more lines */
2025-02-10 09:07:05 -05:00
if ( cs ) {
struct cfg_section * psect ;
2025-02-17 04:59:46 -05:00
int status ;
2025-02-10 09:07:05 -05:00
2025-02-12 06:31:11 -05:00
/* don't call post_section_parser in MODE_DISCOVERY */
2025-02-10 09:07:05 -05:00
if ( ! ( global . mode & MODE_DISCOVERY ) ) {
list_for_each_entry ( psect , & sections , list ) {
if ( strcmp ( cs - > section_name , psect - > section_name ) = = 0 & &
psect - > post_section_parser ) {
2025-02-17 04:59:46 -05:00
status = psect - > post_section_parser ( ) ;
if ( status & ERR_FATAL )
fatal + + ;
err_code | = status ;
if ( err_code & ERR_ABORT )
goto err ;
2025-02-10 09:07:05 -05:00
}
}
}
2025-02-12 06:31:11 -05:00
}
2017-10-16 05:06:50 -04:00
MINOR: cfgparse: implement a simple if/elif/else/endif macro block handler
Very often, especially since reg-tests, it would be desirable to be able
to conditionally comment out a config block, such as removing an SSL
binding when SSL is disabled, or enabling HTX only for certain versions,
etc.
This patch introduces a very simple nested block management which takes
".if", ".elif", ".else" and ".endif" directives to take or ignore a block.
For now the conditions are limited to empty string or "0" for false versus
a non-nul integer for true, which already suffices to test environment
variables. Still, it needs to be a bit more advanced with defines, versions
etc.
A set of ".notice", ".warning" and ".alert" statements are provided to
emit messages, often in order to provide advice about how to fix certain
conditions.
2021-02-12 11:59:10 -05:00
if ( nested_cond_lvl ) {
ha_alert ( " parsing [%s:%d]: non-terminated '.if' block. \n " , file , linenum ) ;
err_code | = ERR_ALERT | ERR_FATAL | ERR_ABORT ;
}
2021-04-27 14:29:11 -04:00
2017-10-16 05:06:50 -04:00
err :
2021-02-20 04:46:51 -05:00
ha_free ( & cfg_scope ) ;
2008-01-22 10:44:08 -05:00
cursection = NULL ;
2015-05-12 08:25:37 -04:00
free ( thisline ) ;
BUG/MEDIUM: cfgparse: use parse_line() to expand/unquote/unescape config lines
Issue 22689 in oss-fuzz shows that specially crafted config files can take
a long time to process. This happens when variable expansion, backslash
escaping or unquoting causes calls to memmove() and possibly to realloc()
resulting in O(N^2) complexity with N following the line size.
By using parse_line() we now have a safe parser that remains in O(N)
regardless of the type of operation. Error reporting changed a little bit
since the errors are not reported anymore from the deepest parsing level.
As such we now report the beginning of the error. One benefit is that for
many invalid character sequences, the original line is shown and the first
bad char or sequence is designated with a caret ('^'), which tends to be
visually easier to spot, for example:
[ALERT] 167/170507 (14633) : parsing [mini5.cfg:19]: unmatched brace in environment variable name below:
"${VAR"}
^
or:
[ALERT] 167/170645 (14640) : parsing [mini5.cfg:18]: unmatched quote below:
timeout client 10s'
^
In case the target buffer is too short for the new line, the output buffer
is grown in 1kB chunks and kept till the end, so that it should not happen
too often.
Before this patch a test like below involving a 4 MB long line would take
138s to process, 98% of which were spent in __memmove_avx_unaligned_erms(),
and now it takes only 65 milliseconds:
$ perl -e 'print "\"\$A\""x1000000,"\n"' | ./haproxy -c -f /dev/stdin 2>/dev/null
This may be backported to stable versions after a long period of
observation to be sure nothing broke. It relies on patch "MINOR: tools:
add a new configurable line parse, parse_line()".
2020-06-16 10:32:59 -04:00
free ( outline ) ;
2021-05-06 04:04:45 -04:00
global . cfg_curr_line = 0 ;
global . cfg_curr_file = NULL ;
2009-07-20 03:30:05 -04:00
return err_code ;
2009-06-22 09:48:36 -04:00
}
2009-07-23 07:36:36 -04:00
/*
* Returns the error code , 0 if OK , or any combination of :
* - ERR_ABORT : must abort ASAP
* - ERR_FATAL : we can continue parsing but not start the service
* - ERR_WARN : a warning has been emitted
* - ERR_ALERT : an alert has been emitted
* Only the two first ones can stop processing , the two others are just
* indicators .
*/
2009-06-22 09:48:36 -04:00
int check_config_validity ( )
{
int cfgerr = 0 ;
2022-08-18 09:53:21 -04:00
struct proxy * init_proxies_list = NULL ;
2019-03-14 02:07:41 -04:00
struct stktable * t ;
2009-06-22 09:48:36 -04:00
struct server * newsrv = NULL ;
2024-10-23 12:18:48 -04:00
struct mt_list back ;
2009-07-23 07:36:36 -04:00
int err_code = 0 ;
2009-10-04 17:04:08 -04:00
unsigned int next_pxid = 1 ;
2012-09-13 11:54:29 -04:00
struct bind_conf * bind_conf ;
2016-11-22 18:41:28 -05:00
char * err ;
2017-10-23 08:36:34 -04:00
struct cfg_postparser * postparser ;
2020-12-23 10:51:12 -05:00
struct resolvers * curr_resolvers = NULL ;
2021-03-08 11:31:39 -05:00
int i ;
2006-06-25 20:48:02 -04:00
2012-09-13 11:54:29 -04:00
bind_conf = NULL ;
2006-06-25 20:48:02 -04:00
/*
* Now , check for the integrity of all that we have collected .
*/
2012-11-21 18:17:38 -05:00
if ( ! global . tune . max_http_hdr )
global . tune . max_http_hdr = MAX_HTTP_HDR ;
if ( ! global . tune . cookie_len )
global . tune . cookie_len = CAPTURE_LEN ;
2017-05-18 02:58:41 -04:00
if ( ! global . tune . requri_len )
global . tune . requri_len = REQURI_LEN ;
2024-05-24 03:46:49 -04:00
if ( ! global . thread_limit )
global . thread_limit = MAX_THREADS ;
# if defined(USE_THREAD)
if ( thread_cpus_enabled_at_boot > global . thread_limit )
thread_cpus_enabled_at_boot = global . thread_limit ;
# endif
2023-07-20 11:22:35 -04:00
if ( global . nbthread > global . thread_limit ) {
2024-05-24 03:46:49 -04:00
ha_warning ( " nbthread forced to a higher value (%d) than the configured thread-hard-limit (%d), enforcing the limit. "
" Please fix either value to remove this warning. \n " ,
global . nbthread , global . thread_limit ) ;
global . nbthread = global . thread_limit ;
}
2019-01-26 08:27:06 -05:00
2023-07-20 11:22:35 -04:00
/* in the worst case these were supposed to be set in thread_detect_count() */
BUG_ON ( ! global . nbthread ) ;
BUG_ON ( ! global . nbtgroups ) ;
2021-09-22 06:07:23 -04:00
2021-09-27 04:10:26 -04:00
if ( thread_map_to_groups ( ) < 0 ) {
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
2017-11-24 11:34:44 -05:00
pool_head_requri = create_pool ( " requri " , global . tune . requri_len , MEM_F_SHARED ) ;
2017-07-05 07:33:16 -04:00
2017-11-24 11:34:44 -05:00
pool_head_capture = create_pool ( " capture " , global . tune . cookie_len , MEM_F_SHARED ) ;
2012-11-21 18:17:38 -05:00
2014-01-22 12:38:02 -05:00
/* Post initialisation of the users and groups lists. */
err_code = userlist_postinit ( ) ;
if ( err_code ! = ERR_NONE )
goto out ;
2009-03-15 09:51:53 -04:00
/* first, we will invert the proxy list order */
curproxy = NULL ;
2017-11-24 10:54:05 -05:00
while ( proxies_list ) {
2009-03-15 09:51:53 -04:00
struct proxy * next ;
2017-11-24 10:54:05 -05:00
next = proxies_list - > next ;
proxies_list - > next = curproxy ;
curproxy = proxies_list ;
2009-03-15 09:51:53 -04:00
if ( ! next )
break ;
2017-11-24 10:54:05 -05:00
proxies_list = next ;
2009-03-15 09:51:53 -04:00
}
2025-04-17 04:42:25 -04:00
/*
* we must finish to initialize certain things on the servers ,
2025-05-09 14:27:29 -04:00
* as some of the fields may be accessed soon
2025-04-17 04:42:25 -04:00
*/
MT_LIST_FOR_EACH_ENTRY_LOCKED ( newsrv , & servers_list , global_list , back ) {
2025-05-09 14:27:29 -04:00
if ( srv_init ( newsrv ) & ERR_CODE ) {
2025-04-17 04:42:25 -04:00
cfgerr + + ;
continue ;
}
}
2022-08-18 09:53:21 -04:00
/* starting to initialize the main proxies list */
init_proxies_list = proxies_list ;
init_proxies_list_stage1 :
for ( curproxy = init_proxies_list ; curproxy ; curproxy = curproxy - > next ) {
2007-06-17 13:56:27 -04:00
struct switching_rule * rule ;
2012-04-05 15:09:48 -04:00
struct server_rule * srule ;
2010-01-04 09:45:53 -05:00
struct sticking_rule * mrule ;
MEDIUM: tree-wide: logsrv struct becomes logger
When 'log' directive was implemented, the internal representation was
named 'struct logsrv', because the 'log' directive would directly point
to the log target, which used to be a (UDP) log server exclusively at
that time, hence the name.
But things have become more complex, since today 'log' directive can point
to ring targets (implicit, or named) for example.
Indeed, a 'log' directive does no longer reference the "final" server to
which the log will be sent, but instead it describes which log API and
parameters to use for transporting the log messages to the proper log
destination.
So now the term 'logsrv' is rather confusing and prevents us from
introducing a new level of abstraction because they would be mixed
with logsrv.
So in order to better designate this 'log' directive, and make it more
generic, we chose the word 'logger' which now replaces logsrv everywhere
it was used in the code (including related comments).
This is internal rewording, so no functional change should be expected
on user-side.
2023-09-11 09:06:53 -04:00
struct logger * tmplogger ;
2009-10-04 17:04:08 -04:00
unsigned int next_id ;
2025-04-17 11:05:07 -04:00
proxy_init_per_thr ( curproxy ) ;
2021-08-20 04:15:40 -04:00
if ( ! ( curproxy - > cap & PR_CAP_INT ) & & curproxy - > uuid < 0 ) {
2009-10-04 17:04:08 -04:00
/* proxy ID not set, use automatic numbering with first
2021-08-20 04:15:40 -04:00
* spare entry starting with next_pxid . We don ' t assign
* numbers for internal proxies as they may depend on
* build or config options and we don ' t want them to
* possibly reuse existing IDs .
2009-10-04 17:04:08 -04:00
*/
next_pxid = get_next_id ( & used_proxy_id , next_pxid ) ;
curproxy - > conf . id . key = curproxy - > uuid = next_pxid ;
eb32_insert ( & used_proxy_id , & curproxy - > conf . id ) ;
}
2010-02-05 14:58:27 -05:00
2021-08-26 09:59:44 -04:00
if ( curproxy - > mode = = PR_MODE_HTTP & & global . tune . bufsize > = ( 256 < < 20 ) & & ONLY_ONCE ( ) ) {
ha_alert ( " global.tune.bufsize must be below 256 MB when HTTP is in use (current value = %d). \n " ,
global . tune . bufsize ) ;
cfgerr + + ;
}
2021-08-20 04:15:40 -04:00
/* next IDs are shifted even if the proxy is disabled, this
* guarantees that a proxy that is temporarily disabled in the
* configuration doesn ' t cause a renumbering . Internal proxies
* that are not assigned a static ID must never shift the IDs
* either since they may appear in any order ( Lua , logs , etc ) .
* The GLOBAL proxy that carries the stats socket has its ID
* forced to zero .
*/
if ( curproxy - > uuid > = 0 )
next_pxid + + ;
2007-06-17 13:56:27 -04:00
2021-10-06 08:24:19 -04:00
if ( curproxy - > flags & PR_FL_DISABLED ) {
2020-10-07 12:36:54 -04:00
/* ensure we don't keep listeners uselessly bound. We
* can ' t disable their listeners yet ( fdtab not
* allocated yet ) but let ' s skip them .
*/
2019-05-07 08:16:18 -04:00
if ( curproxy - > table ) {
2021-02-20 04:46:51 -05:00
ha_free ( & curproxy - > table - > peers . name ) ;
2019-05-07 08:16:18 -04:00
curproxy - > table - > peers . p = NULL ;
}
2006-06-25 20:48:02 -04:00
continue ;
}
2021-10-13 05:04:10 -04:00
/* The current proxy is referencing a default proxy. We must
* finalize its config , but only once . If the default proxy is
* ready ( PR_FL_READY ) it means it was already fully configured .
*/
if ( curproxy - > defpx ) {
if ( ! ( curproxy - > defpx - > flags & PR_FL_READY ) ) {
2021-10-13 09:40:15 -04:00
/* check validity for 'tcp-request' layer 4/5/6/7 rules */
cfgerr + = check_action_rules ( & curproxy - > defpx - > tcp_req . l4_rules , curproxy - > defpx , & err_code ) ;
cfgerr + = check_action_rules ( & curproxy - > defpx - > tcp_req . l5_rules , curproxy - > defpx , & err_code ) ;
cfgerr + = check_action_rules ( & curproxy - > defpx - > tcp_req . inspect_rules , curproxy - > defpx , & err_code ) ;
cfgerr + = check_action_rules ( & curproxy - > defpx - > tcp_rep . inspect_rules , curproxy - > defpx , & err_code ) ;
cfgerr + = check_action_rules ( & curproxy - > defpx - > http_req_rules , curproxy - > defpx , & err_code ) ;
cfgerr + = check_action_rules ( & curproxy - > defpx - > http_res_rules , curproxy - > defpx , & err_code ) ;
cfgerr + = check_action_rules ( & curproxy - > defpx - > http_after_res_rules , curproxy - > defpx , & err_code ) ;
2021-10-13 05:04:10 -04:00
err = NULL ;
i = smp_resolve_args ( curproxy - > defpx , & err ) ;
cfgerr + = i ;
if ( i ) {
indent_msg ( & err , 8 ) ;
ha_alert ( " %s%s \n " , i > 1 ? " multiple argument resolution errors: " : " " , err ) ;
ha_free ( & err ) ;
}
else
cfgerr + = acl_find_targets ( curproxy - > defpx ) ;
/* default proxy is now ready. Set the right FE/BE capabilities */
curproxy - > defpx - > flags | = PR_FL_READY ;
}
}
2014-05-09 11:06:11 -04:00
/* check and reduce the bind-proc of each listener */
list_for_each_entry ( bind_conf , & curproxy - > conf . bind , by_fe ) {
2023-04-22 17:25:38 -04:00
int ret ;
2014-05-09 11:06:11 -04:00
2017-11-24 05:28:00 -05:00
/* HTTP frontends with "h2" as ALPN/NPN will work in
* HTTP / 2 and absolutely require buffers 16 kB or larger .
*/
# ifdef USE_OPENSSL
2023-04-19 02:28:40 -04:00
/* no-alpn ? If so, it's the right moment to remove it */
if ( bind_conf - > ssl_conf . alpn_str & & ! bind_conf - > ssl_conf . alpn_len ) {
2024-03-29 13:21:53 -04:00
ha_free ( & bind_conf - > ssl_conf . alpn_str ) ;
2023-04-19 02:28:40 -04:00
}
2023-04-19 03:12:33 -04:00
# ifdef TLSEXT_TYPE_application_layer_protocol_negotiation
else if ( ! bind_conf - > ssl_conf . alpn_str & & ! bind_conf - > ssl_conf . npn_str & &
( ( bind_conf - > options & BC_O_USE_SSL ) | | bind_conf - > xprt = = xprt_get ( XPRT_QUIC ) ) & &
curproxy - > mode = = PR_MODE_HTTP & & global . tune . bufsize > = 16384 ) {
/* Neither ALPN nor NPN were explicitly set nor disabled, we're
* in HTTP mode with an SSL or QUIC listener , we can enable ALPN .
* Note that it ' s in binary form .
*/
if ( bind_conf - > xprt = = xprt_get ( XPRT_QUIC ) )
bind_conf - > ssl_conf . alpn_str = strdup ( " \002 h3 " ) ;
else
bind_conf - > ssl_conf . alpn_str = strdup ( " \002 h2 \010 http/1.1 " ) ;
if ( ! bind_conf - > ssl_conf . alpn_str ) {
ha_alert ( " Proxy '%s': out of memory while trying to allocate a default alpn string in 'bind %s' at [%s:%d]. \n " ,
curproxy - > id , bind_conf - > arg , bind_conf - > file , bind_conf - > line ) ;
cfgerr + + ;
err_code | = ERR_FATAL | ERR_ALERT ;
goto out ;
}
bind_conf - > ssl_conf . alpn_len = strlen ( bind_conf - > ssl_conf . alpn_str ) ;
}
# endif
2023-04-19 02:28:40 -04:00
2017-11-24 05:28:00 -05:00
if ( curproxy - > mode = = PR_MODE_HTTP & & global . tune . bufsize < 16384 ) {
# ifdef OPENSSL_NPN_NEGOTIATED
/* check NPN */
2018-11-11 04:36:25 -05:00
if ( bind_conf - > ssl_conf . npn_str & & strstr ( bind_conf - > ssl_conf . npn_str , " \002 h2 " ) ) {
2021-06-04 12:22:08 -04:00
ha_alert ( " HTTP frontend '%s' enables HTTP/2 via NPN at [%s:%d], so global.tune.bufsize must be at least 16384 bytes (%d now). \n " ,
2017-11-24 10:50:31 -05:00
curproxy - > id , bind_conf - > file , bind_conf - > line , global . tune . bufsize ) ;
2017-11-24 05:28:00 -05:00
cfgerr + + ;
}
# endif
# ifdef TLSEXT_TYPE_application_layer_protocol_negotiation
/* check ALPN */
2018-11-11 04:36:25 -05:00
if ( bind_conf - > ssl_conf . alpn_str & & strstr ( bind_conf - > ssl_conf . alpn_str , " \002 h2 " ) ) {
2021-06-04 12:22:08 -04:00
ha_alert ( " HTTP frontend '%s' enables HTTP/2 via ALPN at [%s:%d], so global.tune.bufsize must be at least 16384 bytes (%d now). \n " ,
2017-11-24 10:50:31 -05:00
curproxy - > id , bind_conf - > file , bind_conf - > line , global . tune . bufsize ) ;
2017-11-24 05:28:00 -05:00
cfgerr + + ;
}
# endif
} /* HTTP && bufsize < 16384 */
# endif
2025-01-30 08:50:19 -05:00
# ifdef USE_QUIC
if ( bind_conf - > xprt = = xprt_get ( XPRT_QUIC ) ) {
const struct quic_cc_algo * cc_algo = bind_conf - > quic_cc_algo ?
bind_conf - > quic_cc_algo : default_quic_cc_algo ;
if ( ! ( cc_algo - > flags & QUIC_CC_ALGO_FL_OPT_PACING ) & &
2025-01-30 12:01:53 -05:00
quic_tune . options & QUIC_TUNE_NO_PACING ) {
2025-01-30 08:50:19 -05:00
ha_warning ( " Binding [%s:%d] for %s %s: using the selected congestion algorithm without pacing may cause slowdowns or high loss rates during transfers. \n " ,
bind_conf - > file , bind_conf - > line ,
proxy_type_str ( curproxy ) , curproxy - > id ) ;
err_code | = ERR_WARN ;
}
}
# endif /* USE_QUIC */
2023-04-22 17:25:38 -04:00
/* finish the bind setup */
ret = bind_complete_thread_setup ( bind_conf , & err_code ) ;
if ( ret ! = 0 ) {
cfgerr + = ret ;
if ( err_code & ERR_FATAL )
goto out ;
2021-10-12 02:47:54 -04:00
}
2024-04-02 04:44:08 -04:00
if ( bind_generate_guid ( bind_conf ) ) {
cfgerr + + ;
err_code | = ERR_FATAL | ERR_ALERT ;
goto out ;
}
2014-05-09 11:06:11 -04:00
}
2009-03-15 08:46:16 -04:00
switch ( curproxy - > mode ) {
case PR_MODE_TCP :
2009-06-22 09:48:36 -04:00
cfgerr + = proxy_cfg_ensure_no_http ( curproxy ) ;
2023-11-15 06:18:52 -05:00
cfgerr + = proxy_cfg_ensure_no_log ( curproxy ) ;
2009-03-15 08:46:16 -04:00
break ;
case PR_MODE_HTTP :
2023-11-15 06:18:52 -05:00
cfgerr + = proxy_cfg_ensure_no_log ( curproxy ) ;
2013-03-24 02:22:08 -04:00
curproxy - > http_needed = 1 ;
2009-03-15 08:46:16 -04:00
break ;
2018-10-26 08:47:40 -04:00
case PR_MODE_CLI :
cfgerr + = proxy_cfg_ensure_no_http ( curproxy ) ;
2023-11-15 06:18:52 -05:00
cfgerr + = proxy_cfg_ensure_no_log ( curproxy ) ;
2018-10-26 08:47:40 -04:00
break ;
2022-08-18 09:53:21 -04:00
2020-07-07 03:43:24 -04:00
case PR_MODE_SYSLOG :
2022-08-18 09:53:21 -04:00
/* this mode is initialized as the classic tcp proxy */
cfgerr + = proxy_cfg_ensure_no_http ( curproxy ) ;
break ;
2024-07-04 03:58:12 -04:00
case PR_MODE_SPOP :
cfgerr + = proxy_cfg_ensure_no_http ( curproxy ) ;
cfgerr + = proxy_cfg_ensure_no_log ( curproxy ) ;
break ;
2020-10-07 11:49:42 -04:00
case PR_MODE_PEERS :
2020-07-07 03:43:24 -04:00
case PR_MODES :
/* should not happen, bug gcc warn missing switch statement */
2022-08-18 09:53:21 -04:00
ha_alert ( " %s '%s' cannot initialize this proxy mode (peers) in this way. NOTE: PLEASE REPORT THIS TO DEVELOPERS AS YOU'RE NOT SUPPOSED TO BE ABLE TO CREATE A CONFIGURATION TRIGGERING THIS! \n " ,
2020-07-07 03:43:24 -04:00
proxy_type_str ( curproxy ) , curproxy - > id ) ;
cfgerr + + ;
break ;
2009-03-15 08:46:16 -04:00
}
2021-08-13 09:21:12 -04:00
if ( ! ( curproxy - > cap & PR_CAP_INT ) & & ( curproxy - > cap & PR_CAP_FE ) & & LIST_ISEMPTY ( & curproxy - > conf . listeners ) ) {
2021-06-04 12:22:08 -04:00
ha_warning ( " %s '%s' has no 'bind' directive. Please declare it as a backend if this was intended. \n " ,
2017-11-24 10:50:31 -05:00
proxy_type_str ( curproxy ) , curproxy - > id ) ;
2015-08-11 05:36:45 -04:00
err_code | = ERR_WARN ;
}
2020-10-14 09:44:27 -04:00
if ( curproxy - > cap & PR_CAP_BE ) {
2009-10-03 06:21:20 -04:00
if ( curproxy - > lbprm . algo & BE_LB_KIND ) {
2009-03-15 09:06:41 -04:00
if ( curproxy - > options & PR_O_TRANSP ) {
2021-06-04 12:22:08 -04:00
ha_alert ( " %s '%s' cannot use both transparent and balance mode. \n " ,
2017-11-24 10:50:31 -05:00
proxy_type_str ( curproxy ) , curproxy - > id ) ;
2009-03-15 09:06:41 -04:00
cfgerr + + ;
}
2006-06-25 20:48:02 -04:00
# ifdef WE_DONT_SUPPORT_SERVERLESS_LISTENERS
2009-03-15 09:06:41 -04:00
else if ( curproxy - > srv = = NULL ) {
2021-06-04 12:22:08 -04:00
ha_alert ( " %s '%s' needs at least 1 server in balance mode. \n " ,
2017-11-24 10:50:31 -05:00
proxy_type_str ( curproxy ) , curproxy - > id ) ;
2009-03-15 09:06:41 -04:00
cfgerr + + ;
}
2006-06-25 20:48:02 -04:00
# endif
2011-08-06 11:05:02 -04:00
else if ( curproxy - > options & PR_O_DISPATCH ) {
2021-06-04 12:22:08 -04:00
ha_warning ( " dispatch address of %s '%s' will be ignored in balance mode. \n " ,
2017-11-24 10:50:31 -05:00
proxy_type_str ( curproxy ) , curproxy - > id ) ;
2009-07-23 07:36:36 -04:00
err_code | = ERR_WARN ;
2009-03-15 09:06:41 -04:00
}
}
2021-07-18 13:18:56 -04:00
else if ( ! ( curproxy - > options & ( PR_O_TRANSP | PR_O_DISPATCH ) ) ) {
2009-03-15 09:06:41 -04:00
/* If no LB algo is set in a backend, and we're not in
* transparent mode , dispatch mode nor proxy mode , we
* want to use balance roundrobin by default .
*/
curproxy - > lbprm . algo & = ~ BE_LB_ALGO ;
curproxy - > lbprm . algo | = BE_LB_ALGO_RR ;
2006-06-25 20:48:02 -04:00
}
}
2007-09-17 04:17:23 -04:00
2011-08-06 11:05:02 -04:00
if ( curproxy - > options & PR_O_DISPATCH )
2021-07-18 13:18:56 -04:00
curproxy - > options & = ~ PR_O_TRANSP ;
2011-08-06 11:05:02 -04:00
else if ( curproxy - > options & PR_O_TRANSP )
2021-07-18 13:18:56 -04:00
curproxy - > options & = ~ PR_O_DISPATCH ;
2006-06-25 20:48:02 -04:00
2020-04-15 05:32:03 -04:00
if ( ( curproxy - > tcpcheck_rules . flags & TCPCHK_RULES_UNUSED_HTTP_RS ) ) {
2021-06-04 12:22:08 -04:00
ha_warning ( " %s '%s' uses http-check rules without 'option httpchk', so the rules are ignored. \n " ,
2020-04-15 05:32:03 -04:00
proxy_type_str ( curproxy ) , curproxy - > id ) ;
err_code | = ERR_WARN ;
}
if ( ( curproxy - > options2 & PR_O2_CHK_ANY ) = = PR_O2_TCPCHK_CHK & &
2020-06-03 13:00:42 -04:00
( curproxy - > tcpcheck_rules . flags & TCPCHK_RULES_PROTO_CHK ) ! = TCPCHK_RULES_HTTP_CHK ) {
2011-08-06 11:05:02 -04:00
if ( curproxy - > options & PR_O_DISABLE404 ) {
2021-06-04 12:22:08 -04:00
ha_warning ( " '%s' will be ignored for %s '%s' (requires 'option httpchk'). \n " ,
2017-11-24 10:50:31 -05:00
" disable-on-404 " , proxy_type_str ( curproxy ) , curproxy - > id ) ;
2011-08-06 11:05:02 -04:00
err_code | = ERR_WARN ;
curproxy - > options & = ~ PR_O_DISABLE404 ;
}
if ( curproxy - > options2 & PR_O2_CHK_SNDST ) {
2021-06-04 12:22:08 -04:00
ha_warning ( " '%s' will be ignored for %s '%s' (requires 'option httpchk'). \n " ,
2017-11-24 10:50:31 -05:00
" send-state " , proxy_type_str ( curproxy ) , curproxy - > id ) ;
2011-08-06 11:05:02 -04:00
err_code | = ERR_WARN ;
2024-05-31 12:30:16 -04:00
curproxy - > options2 & = ~ PR_O2_CHK_SNDST ;
2011-08-06 11:05:02 -04:00
}
2010-01-27 05:53:01 -05:00
}
2014-06-19 23:30:16 -04:00
if ( ( curproxy - > options2 & PR_O2_CHK_ANY ) = = PR_O2_EXT_CHK ) {
if ( ! global . external_check ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " Proxy '%s' : '%s' unable to find required 'global.external-check'. \n " ,
curproxy - > id , " option external-check " ) ;
2014-06-19 23:30:16 -04:00
cfgerr + + ;
}
if ( ! curproxy - > check_command ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " Proxy '%s' : '%s' unable to find required 'external-check command'. \n " ,
curproxy - > id , " option external-check " ) ;
2014-06-19 23:30:16 -04:00
cfgerr + + ;
}
MEDIUM: init: prevent process and thread creation at runtime
Some concerns are regularly raised about the risk to inherit some Lua
files which make use of a fork (e.g. via os.execute()) as well as
whether or not some of bugs we fix might or not be exploitable to run
some code. Given that haproxy is event-driven, any foreground activity
completely stops processing and is easy to detect, but background
activity is a different story. A Lua script could very well discretely
fork a sub-process connecting to a remote location and taking commands,
and some injected code could also try to hide its activity by creating
a process or a thread without blocking the rest of the processing. While
such activities should be extremely limited when run in an empty chroot
without any permission, it would be better to get a higher assurance
they cannot happen.
This patch introduces something very simple: it limits the number of
processes and threads to zero in the workers after the last thread was
created. By doing so, it effectively instructs the system to fail on
any fork() or clone() syscall. Thus any undesired activity has to happen
in the foreground and is way easier to detect.
This will obviously break external checks (whose concept is already
totally insecure), and for this reason a new option
"insecure-fork-wanted" was added to disable this protection, and it
is suggested in the fork() error report from the checks. It is
obviously recommended not to use it and to reconsider the reasons
leading to it being enabled in the first place.
If for any reason we fail to disable forks, we still start because it
could be imaginable that some operating systems refuse to set this
limit to zero, but in this case we emit a warning, that may or may not
be reported since we're after the fork point. Ideally over the long
term it should be conditionned by strict-limits and cause a hard fail.
2019-12-03 01:07:36 -05:00
if ( ! ( global . tune . options & GTUNE_INSECURE_FORK ) ) {
ha_warning ( " Proxy '%s' : 'insecure-fork-wanted' not enabled in the global section, '%s' will likely fail. \n " ,
curproxy - > id , " option external-check " ) ;
err_code | = ERR_WARN ;
}
2014-06-19 23:30:16 -04:00
}
2024-06-17 12:53:48 -04:00
if ( curproxy - > email_alert . flags & PR_EMAIL_ALERT_SET ) {
2015-01-29 21:23:00 -05:00
if ( ! ( curproxy - > email_alert . mailers . name & & curproxy - > email_alert . from & & curproxy - > email_alert . to ) ) {
2021-06-04 12:22:08 -04:00
ha_warning ( " 'email-alert' will be ignored for %s '%s' (the presence any of "
2017-11-24 10:50:31 -05:00
" 'email-alert from', 'email-alert level' 'email-alert mailers', "
" 'email-alert myhostname', or 'email-alert to' "
" requires each of 'email-alert from', 'email-alert mailers' and 'email-alert to' "
" to be present). \n " ,
proxy_type_str ( curproxy ) , curproxy - > id ) ;
2015-01-29 21:23:00 -05:00
err_code | = ERR_WARN ;
free_email_alert ( curproxy ) ;
}
if ( ! curproxy - > email_alert . myhostname )
2015-12-03 21:07:07 -05:00
curproxy - > email_alert . myhostname = strdup ( hostname ) ;
2015-01-29 21:22:59 -05:00
}
2014-06-19 23:30:16 -04:00
if ( curproxy - > check_command ) {
int clear = 0 ;
if ( ( curproxy - > options2 & PR_O2_CHK_ANY ) ! = PR_O2_EXT_CHK ) {
2021-06-04 12:22:08 -04:00
ha_warning ( " '%s' will be ignored for %s '%s' (requires 'option external-check'). \n " ,
2017-11-24 10:50:31 -05:00
" external-check command " , proxy_type_str ( curproxy ) , curproxy - > id ) ;
2014-06-19 23:30:16 -04:00
err_code | = ERR_WARN ;
clear = 1 ;
}
if ( curproxy - > check_command [ 0 ] ! = ' / ' & & ! curproxy - > check_path ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " Proxy '%s': '%s' does not have a leading '/' and 'external-check path' is not set. \n " ,
curproxy - > id , " external-check command " ) ;
2014-06-19 23:30:16 -04:00
cfgerr + + ;
}
if ( clear ) {
2021-02-20 04:46:51 -05:00
ha_free ( & curproxy - > check_command ) ;
2014-06-19 23:30:16 -04:00
}
}
if ( curproxy - > check_path ) {
if ( ( curproxy - > options2 & PR_O2_CHK_ANY ) ! = PR_O2_EXT_CHK ) {
2021-06-04 12:22:08 -04:00
ha_warning ( " '%s' will be ignored for %s '%s' (requires 'option external-check'). \n " ,
2017-11-24 10:50:31 -05:00
" external-check path " , proxy_type_str ( curproxy ) , curproxy - > id ) ;
2014-06-19 23:30:16 -04:00
err_code | = ERR_WARN ;
2021-02-20 04:46:51 -05:00
ha_free ( & curproxy - > check_path ) ;
2014-06-19 23:30:16 -04:00
}
}
2007-01-01 17:11:07 -05:00
/* if a default backend was specified, let's find it */
if ( curproxy - > defbe . name ) {
struct proxy * target ;
2015-05-26 06:04:09 -04:00
target = proxy_be_by_name ( curproxy - > defbe . name ) ;
2007-11-03 18:41:58 -04:00
if ( ! target ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " Proxy '%s': unable to find required default_backend: '%s'. \n " ,
curproxy - > id , curproxy - > defbe . name ) ;
2007-01-01 17:11:07 -05:00
cfgerr + + ;
} else if ( target = = curproxy ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " Proxy '%s': loop detected for default_backend: '%s'. \n " ,
curproxy - > id , curproxy - > defbe . name ) ;
2009-07-23 07:36:36 -04:00
cfgerr + + ;
2015-05-26 06:04:09 -04:00
} else if ( target - > mode ! = curproxy - > mode & &
! ( curproxy - > mode = = PR_MODE_TCP & & target - > mode = = PR_MODE_HTTP ) ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " %s %s '%s' (%s:%d) tries to use incompatible %s %s '%s' (%s:%d) as its default backend (see 'mode'). \n " ,
proxy_mode_str ( curproxy - > mode ) , proxy_type_str ( curproxy ) , curproxy - > id ,
curproxy - > conf . file , curproxy - > conf . line ,
proxy_mode_str ( target - > mode ) , proxy_type_str ( target ) , target - > id ,
target - > conf . file , target - > conf . line ) ;
2015-05-26 06:04:09 -04:00
cfgerr + + ;
2007-01-01 17:11:07 -05:00
} else {
free ( curproxy - > defbe . name ) ;
curproxy - > defbe . be = target ;
2012-02-13 08:32:34 -05:00
/* Emit a warning if this proxy also has some servers */
if ( curproxy - > srv ) {
2017-11-24 10:50:31 -05:00
ha_warning ( " In proxy '%s', the 'default_backend' rule always has precedence over the servers, which will never be used. \n " ,
curproxy - > id ) ;
2012-02-13 08:32:34 -05:00
err_code | = ERR_WARN ;
}
MEDIUM: proxy: set PR_O_HTTP_UPG on implicit upgrades
When a TCP frontend uses an HTTP backend, the stream is automatically
upgraded and it results in a similar behavior as if a switch-mode http
rule was evaluated since stream_set_http_mode() gets called in both
situations and minimal HTTP analyzers are set.
In the current implementation, some postparsing checks are generating
errors or warnings when the frontend is in TCP mode with some HTTP options
set and no upgrade is expected (no switch-rule http). But as you can guess,
unfortunately this leads in issues when such "HTTP" only options are used
in a frontend that has implicit switching rules (that is, when the
frontend uses an HTTP backend for example), because in this case the
PR_O_HTTP_UPG will not be set, so the postparsing checks will consider
that some options are not relevant and will raise some warnings.
Consider the following example:
backend back
mode http
server s1 git.haproxy.org:80
frontend front
mode tcp
bind localhost:8080
http-request set-var(txn.test) str(TRUE),debug(WORKING,stderr)
use_backend back
By starting an haproxy instance with the above example conf, we end up
having this warning:
[WARNING] (400280) : config : 'http-request' rules ignored for frontend 'front' as they require HTTP mode.
However, by making a request on the frontend, we notice that the request
rules are still executed, and that's because the stream is effectively
upgraded as a result of an implicit upgrade:
[debug] WORKING: type=str <TRUE>
So this confirms the previous description: since implicit and explicit
upgrades result in approximately the same behavior on the frontend side,
we should consider them both when doing postparsing checks.
This is what we try to address in the following commit: PR_O_HTTP_UPG
flag is now more generic in the sense that it refers to either implicit
(through default_backend or use_backend rules) or explicit (switch-mode
rules) upgrades. Indeed, everytime an HTTP or dynamic backend (where the
mode cannot be assumed during parsing) is encountered in default_backend
directive or use_backend rules, we explicitly position the upgrade flag
so that further checks that depend on the proxy being in HTTP context
don't report false warnings.
2023-12-05 09:58:49 -05:00
if ( target - > mode = = PR_MODE_HTTP ) {
/* at least one of the used backends will provoke an
* HTTP upgrade
*/
curproxy - > options | = PR_O_HTTP_UPG ;
}
2007-01-01 17:11:07 -05:00
}
}
2007-06-17 13:56:27 -04:00
/* find the target proxy for 'use_backend' rules */
list_for_each_entry ( rule , & curproxy - > switching_rules , list ) {
struct proxy * target ;
2013-11-19 05:43:06 -05:00
struct logformat_node * node ;
char * pxname ;
/* Try to parse the string as a log format expression. If the result
* of the parsing is only one entry containing a simple string , then
* it ' s a standard string corresponding to a static rule , thus the
* parsing is cancelled and be . name is restored to be resolved .
*/
pxname = rule - > be . name ;
2024-02-23 09:57:21 -05:00
lf_expr_init ( & rule - > be . expr ) ;
2016-11-24 17:57:54 -05:00
curproxy - > conf . args . ctx = ARGC_UBK ;
curproxy - > conf . args . file = rule - > file ;
curproxy - > conf . args . line = rule - > line ;
2016-11-22 18:41:28 -05:00
err = NULL ;
if ( ! parse_logformat_string ( pxname , curproxy , & rule - > be . expr , 0 , SMP_VAL_FE_HRQ_HDR , & err ) ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " Parsing [%s:%d]: failed to parse use_backend rule '%s' : %s. \n " ,
rule - > file , rule - > line , pxname , err ) ;
2016-11-22 18:41:28 -05:00
free ( err ) ;
2016-11-22 17:50:02 -05:00
cfgerr + + ;
continue ;
}
2024-03-25 06:29:58 -04:00
node = LIST_NEXT ( & rule - > be . expr . nodes . list , struct logformat_node * , list ) ;
2013-11-19 05:43:06 -05:00
2024-02-23 09:57:21 -05:00
if ( ! lf_expr_isempty ( & rule - > be . expr ) ) {
2024-03-25 06:29:58 -04:00
if ( node - > type ! = LOG_FMT_TEXT | | node - > list . n ! = & rule - > be . expr . nodes . list ) {
2013-11-19 05:43:06 -05:00
rule - > dynamic = 1 ;
free ( pxname ) ;
MEDIUM: proxy: set PR_O_HTTP_UPG on implicit upgrades
When a TCP frontend uses an HTTP backend, the stream is automatically
upgraded and it results in a similar behavior as if a switch-mode http
rule was evaluated since stream_set_http_mode() gets called in both
situations and minimal HTTP analyzers are set.
In the current implementation, some postparsing checks are generating
errors or warnings when the frontend is in TCP mode with some HTTP options
set and no upgrade is expected (no switch-rule http). But as you can guess,
unfortunately this leads in issues when such "HTTP" only options are used
in a frontend that has implicit switching rules (that is, when the
frontend uses an HTTP backend for example), because in this case the
PR_O_HTTP_UPG will not be set, so the postparsing checks will consider
that some options are not relevant and will raise some warnings.
Consider the following example:
backend back
mode http
server s1 git.haproxy.org:80
frontend front
mode tcp
bind localhost:8080
http-request set-var(txn.test) str(TRUE),debug(WORKING,stderr)
use_backend back
By starting an haproxy instance with the above example conf, we end up
having this warning:
[WARNING] (400280) : config : 'http-request' rules ignored for frontend 'front' as they require HTTP mode.
However, by making a request on the frontend, we notice that the request
rules are still executed, and that's because the stream is effectively
upgraded as a result of an implicit upgrade:
[debug] WORKING: type=str <TRUE>
So this confirms the previous description: since implicit and explicit
upgrades result in approximately the same behavior on the frontend side,
we should consider them both when doing postparsing checks.
This is what we try to address in the following commit: PR_O_HTTP_UPG
flag is now more generic in the sense that it refers to either implicit
(through default_backend or use_backend rules) or explicit (switch-mode
rules) upgrades. Indeed, everytime an HTTP or dynamic backend (where the
mode cannot be assumed during parsing) is encountered in default_backend
directive or use_backend rules, we explicitly position the upgrade flag
so that further checks that depend on the proxy being in HTTP context
don't report false warnings.
2023-12-05 09:58:49 -05:00
/* backend is not yet known so we cannot assume its type,
* thus we should consider that at least one of the used
* backends may provoke HTTP upgrade
*/
curproxy - > options | = PR_O_HTTP_UPG ;
2013-11-19 05:43:06 -05:00
continue ;
}
2020-05-07 09:59:33 -04:00
/* Only one element in the list, a simple string: free the expression and
* fall back to static rule
*/
2024-02-23 09:57:21 -05:00
lf_expr_deinit ( & rule - > be . expr ) ;
2013-11-19 05:43:06 -05:00
}
rule - > dynamic = 0 ;
rule - > be . name = pxname ;
2007-06-17 13:56:27 -04:00
2015-05-26 06:04:09 -04:00
target = proxy_be_by_name ( rule - > be . name ) ;
2007-11-03 18:41:58 -04:00
if ( ! target ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " Proxy '%s': unable to find required use_backend: '%s'. \n " ,
curproxy - > id , rule - > be . name ) ;
2007-06-17 13:56:27 -04:00
cfgerr + + ;
} else if ( target = = curproxy ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " Proxy '%s': loop detected for use_backend: '%s'. \n " ,
curproxy - > id , rule - > be . name ) ;
2007-06-17 13:56:27 -04:00
cfgerr + + ;
2015-05-26 06:04:09 -04:00
} else if ( target - > mode ! = curproxy - > mode & &
! ( curproxy - > mode = = PR_MODE_TCP & & target - > mode = = PR_MODE_HTTP ) ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " %s %s '%s' (%s:%d) tries to use incompatible %s %s '%s' (%s:%d) in a 'use_backend' rule (see 'mode'). \n " ,
proxy_mode_str ( curproxy - > mode ) , proxy_type_str ( curproxy ) , curproxy - > id ,
curproxy - > conf . file , curproxy - > conf . line ,
proxy_mode_str ( target - > mode ) , proxy_type_str ( target ) , target - > id ,
target - > conf . file , target - > conf . line ) ;
2015-05-26 06:04:09 -04:00
cfgerr + + ;
2007-06-17 13:56:27 -04:00
} else {
2021-02-26 14:51:47 -05:00
ha_free ( & rule - > be . name ) ;
2007-06-17 13:56:27 -04:00
rule - > be . backend = target ;
MEDIUM: proxy: set PR_O_HTTP_UPG on implicit upgrades
When a TCP frontend uses an HTTP backend, the stream is automatically
upgraded and it results in a similar behavior as if a switch-mode http
rule was evaluated since stream_set_http_mode() gets called in both
situations and minimal HTTP analyzers are set.
In the current implementation, some postparsing checks are generating
errors or warnings when the frontend is in TCP mode with some HTTP options
set and no upgrade is expected (no switch-rule http). But as you can guess,
unfortunately this leads in issues when such "HTTP" only options are used
in a frontend that has implicit switching rules (that is, when the
frontend uses an HTTP backend for example), because in this case the
PR_O_HTTP_UPG will not be set, so the postparsing checks will consider
that some options are not relevant and will raise some warnings.
Consider the following example:
backend back
mode http
server s1 git.haproxy.org:80
frontend front
mode tcp
bind localhost:8080
http-request set-var(txn.test) str(TRUE),debug(WORKING,stderr)
use_backend back
By starting an haproxy instance with the above example conf, we end up
having this warning:
[WARNING] (400280) : config : 'http-request' rules ignored for frontend 'front' as they require HTTP mode.
However, by making a request on the frontend, we notice that the request
rules are still executed, and that's because the stream is effectively
upgraded as a result of an implicit upgrade:
[debug] WORKING: type=str <TRUE>
So this confirms the previous description: since implicit and explicit
upgrades result in approximately the same behavior on the frontend side,
we should consider them both when doing postparsing checks.
This is what we try to address in the following commit: PR_O_HTTP_UPG
flag is now more generic in the sense that it refers to either implicit
(through default_backend or use_backend rules) or explicit (switch-mode
rules) upgrades. Indeed, everytime an HTTP or dynamic backend (where the
mode cannot be assumed during parsing) is encountered in default_backend
directive or use_backend rules, we explicitly position the upgrade flag
so that further checks that depend on the proxy being in HTTP context
don't report false warnings.
2023-12-05 09:58:49 -05:00
if ( target - > mode = = PR_MODE_HTTP ) {
/* at least one of the used backends will provoke an
* HTTP upgrade
*/
curproxy - > options | = PR_O_HTTP_UPG ;
}
2007-06-17 13:56:27 -04:00
}
2021-03-26 05:02:46 -04:00
err_code | = warnif_tcp_http_cond ( curproxy , rule - > cond ) ;
2007-06-17 13:56:27 -04:00
}
2014-09-16 06:17:36 -04:00
/* find the target server for 'use_server' rules */
2012-04-05 15:09:48 -04:00
list_for_each_entry ( srule , & curproxy - > server_rules , list ) {
2020-03-29 03:37:12 -04:00
struct server * target ;
struct logformat_node * node ;
char * server_name ;
/* We try to parse the string as a log format expression. If the result of the parsing
* is only one entry containing a single string , then it ' s a standard string corresponding
* to a static rule , thus the parsing is cancelled and we fall back to setting srv . ptr .
*/
server_name = srule - > srv . name ;
2024-02-23 09:57:21 -05:00
lf_expr_init ( & srule - > expr ) ;
2020-03-29 03:37:12 -04:00
curproxy - > conf . args . ctx = ARGC_USRV ;
err = NULL ;
if ( ! parse_logformat_string ( server_name , curproxy , & srule - > expr , 0 , SMP_VAL_FE_HRQ_HDR , & err ) ) {
ha_alert ( " Parsing [%s:%d]; use-server rule failed to parse log-format '%s' : %s. \n " ,
srule - > file , srule - > line , server_name , err ) ;
free ( err ) ;
cfgerr + + ;
continue ;
}
2024-03-25 06:29:58 -04:00
node = LIST_NEXT ( & srule - > expr . nodes . list , struct logformat_node * , list ) ;
2020-03-29 03:37:12 -04:00
2024-02-23 09:57:21 -05:00
if ( ! lf_expr_isempty ( & srule - > expr ) ) {
2024-03-25 06:29:58 -04:00
if ( node - > type ! = LOG_FMT_TEXT | | node - > list . n ! = & srule - > expr . nodes . list ) {
2020-03-29 03:37:12 -04:00
srule - > dynamic = 1 ;
free ( server_name ) ;
continue ;
}
2020-05-07 09:59:33 -04:00
/* Only one element in the list, a simple string: free the expression and
* fall back to static rule
*/
2024-02-23 09:57:21 -05:00
lf_expr_deinit ( & srule - > expr ) ;
2020-03-29 03:37:12 -04:00
}
srule - > dynamic = 0 ;
srule - > srv . name = server_name ;
target = findserver ( curproxy , srule - > srv . name ) ;
2021-03-26 05:02:46 -04:00
err_code | = warnif_tcp_http_cond ( curproxy , srule - > cond ) ;
2012-04-05 15:09:48 -04:00
if ( ! target ) {
2021-06-04 12:22:08 -04:00
ha_alert ( " %s '%s' : unable to find server '%s' referenced in a 'use-server' rule. \n " ,
2017-11-24 10:50:31 -05:00
proxy_type_str ( curproxy ) , curproxy - > id , srule - > srv . name ) ;
2012-04-05 15:09:48 -04:00
cfgerr + + ;
continue ;
}
2021-02-26 14:51:47 -05:00
ha_free ( & srule - > srv . name ) ;
2012-04-05 15:09:48 -04:00
srule - > srv . ptr = target ;
2021-08-23 08:05:07 -04:00
target - > flags | = SRV_F_NON_PURGEABLE ;
2012-04-05 15:09:48 -04:00
}
2010-01-04 09:45:53 -05:00
/* find the target table for 'stick' rules */
list_for_each_entry ( mrule , & curproxy - > sticking_rules , list ) {
2010-01-04 09:47:17 -05:00
curproxy - > be_req_ana | = AN_REQ_STICKING_RULES ;
if ( mrule - > flags & STK_IS_STORE )
curproxy - > be_rsp_ana | = AN_RES_STORE_RULES ;
2023-08-08 05:37:59 -04:00
if ( ! resolve_stick_rule ( curproxy , mrule ) )
2010-01-04 09:45:53 -05:00
cfgerr + + ;
2023-08-08 05:37:59 -04:00
2021-03-26 05:02:46 -04:00
err_code | = warnif_tcp_http_cond ( curproxy , mrule - > cond ) ;
2010-01-04 09:45:53 -05:00
}
/* find the target table for 'store response' rules */
list_for_each_entry ( mrule , & curproxy - > storersp_rules , list ) {
2010-01-04 09:47:17 -05:00
curproxy - > be_rsp_ana | = AN_RES_STORE_RULES ;
2023-08-08 05:37:59 -04:00
if ( ! resolve_stick_rule ( curproxy , mrule ) )
2010-01-04 09:45:53 -05:00
cfgerr + + ;
}
2021-03-25 12:19:04 -04:00
/* check validity for 'tcp-request' layer 4/5/6/7 rules */
cfgerr + = check_action_rules ( & curproxy - > tcp_req . l4_rules , curproxy , & err_code ) ;
cfgerr + = check_action_rules ( & curproxy - > tcp_req . l5_rules , curproxy , & err_code ) ;
cfgerr + = check_action_rules ( & curproxy - > tcp_req . inspect_rules , curproxy , & err_code ) ;
cfgerr + = check_action_rules ( & curproxy - > tcp_rep . inspect_rules , curproxy , & err_code ) ;
cfgerr + = check_action_rules ( & curproxy - > http_req_rules , curproxy , & err_code ) ;
cfgerr + = check_action_rules ( & curproxy - > http_res_rules , curproxy , & err_code ) ;
cfgerr + = check_action_rules ( & curproxy - > http_after_res_rules , curproxy , & err_code ) ;
2020-01-22 03:26:35 -05:00
2021-03-31 11:13:49 -04:00
/* Warn is a switch-mode http is used on a TCP listener with servers but no backend */
if ( ! curproxy - > defbe . name & & LIST_ISEMPTY ( & curproxy - > switching_rules ) & & curproxy - > srv ) {
if ( ( curproxy - > options & PR_O_HTTP_UPG ) & & curproxy - > mode = = PR_MODE_TCP )
ha_warning ( " Proxy '%s' : 'switch-mode http' configured for a %s %s with no backend. "
" Incoming connections upgraded to HTTP cannot be routed to TCP servers \n " ,
curproxy - > id , proxy_mode_str ( curproxy - > mode ) , proxy_type_str ( curproxy ) ) ;
}
2019-03-14 02:07:41 -04:00
if ( curproxy - > table & & curproxy - > table - > peers . name ) {
2017-07-13 03:07:09 -04:00
struct peers * curpeers ;
2010-09-23 12:39:19 -04:00
2017-07-13 03:07:09 -04:00
for ( curpeers = cfg_peers ; curpeers ; curpeers = curpeers - > next ) {
2019-03-14 02:07:41 -04:00
if ( strcmp ( curpeers - > id , curproxy - > table - > peers . name ) = = 0 ) {
2021-02-26 14:51:47 -05:00
ha_free ( & curproxy - > table - > peers . name ) ;
2019-03-14 02:07:41 -04:00
curproxy - > table - > peers . p = curpeers ;
2010-09-23 12:39:19 -04:00
break ;
}
}
if ( ! curpeers ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " Proxy '%s': unable to find sync peers '%s'. \n " ,
2019-03-14 02:07:41 -04:00
curproxy - > id , curproxy - > table - > peers . name ) ;
2021-02-26 14:51:47 -05:00
ha_free ( & curproxy - > table - > peers . name ) ;
2019-03-14 02:07:41 -04:00
curproxy - > table - > peers . p = NULL ;
2010-09-23 12:39:19 -04:00
cfgerr + + ;
}
2020-09-24 02:48:08 -04:00
else if ( curpeers - > disabled ) {
2015-05-01 14:02:17 -04:00
/* silently disable this peers section */
2019-03-14 02:07:41 -04:00
curproxy - > table - > peers . p = NULL ;
2015-05-01 14:02:17 -04:00
}
2010-09-23 12:39:19 -04:00
else if ( ! curpeers - > peers_fe ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " Proxy '%s': unable to find local peer '%s' in peers section '%s'. \n " ,
curproxy - > id , localpeer , curpeers - > id ) ;
2019-03-14 02:07:41 -04:00
curproxy - > table - > peers . p = NULL ;
2010-09-23 12:39:19 -04:00
cfgerr + + ;
}
}
2015-01-29 21:22:59 -05:00
if ( curproxy - > email_alert . mailers . name ) {
struct mailers * curmailers = mailers ;
for ( curmailers = mailers ; curmailers ; curmailers = curmailers - > next ) {
CLEANUP: Compare the return value of `XXXcmp()` functions with zero
According to coding-style.txt it is recommended to use:
`strcmp(a, b) == 0` instead of `!strcmp(a, b)`
So let's do this.
The change was performed by running the following (very long) coccinelle patch
on src/:
@@
statement S;
expression E;
expression F;
@@
if (
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
(
S
|
{ ... }
)
@@
statement S;
expression E;
expression F;
@@
if (
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
(
S
|
{ ... }
)
@@
expression E;
expression F;
expression G;
@@
(
G &&
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
@@
expression E;
expression F;
expression G;
@@
(
G ||
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
)
@@
expression E;
expression F;
expression G;
@@
(
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
&& G
)
@@
expression E;
expression F;
expression G;
@@
(
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) != 0
|| G
)
@@
expression E;
expression F;
expression G;
@@
(
G &&
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
@@
expression E;
expression F;
expression G;
@@
(
G ||
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
&& G
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
|| G
)
@@
expression E;
expression F;
expression G;
@@
(
- !
(
dns_hostname_cmp
|
eb_memcmp
|
memcmp
|
strcasecmp
|
strcmp
|
strncasecmp
|
strncmp
)
- (E, F)
+ (E, F) == 0
)
2021-01-02 16:31:53 -05:00
if ( strcmp ( curmailers - > id , curproxy - > email_alert . mailers . name ) = = 0 )
2015-01-29 21:22:59 -05:00
break ;
}
if ( ! curmailers ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " Proxy '%s': unable to find mailers '%s'. \n " ,
curproxy - > id , curproxy - > email_alert . mailers . name ) ;
2015-01-29 21:22:59 -05:00
free_email_alert ( curproxy ) ;
cfgerr + + ;
}
2017-10-20 15:34:32 -04:00
else {
err = NULL ;
if ( init_email_alert ( curmailers , curproxy , & err ) ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " Proxy '%s': %s. \n " , curproxy - > id , err ) ;
2017-10-20 15:34:32 -04:00
free ( err ) ;
cfgerr + + ;
}
}
2015-01-29 21:22:59 -05:00
}
2024-04-22 08:42:09 -04:00
if ( curproxy - > uri_auth & & ! ( curproxy - > uri_auth - > flags & STAT_F_CONVDONE ) & &
2011-01-06 11:51:27 -05:00
! LIST_ISEMPTY ( & curproxy - > uri_auth - > http_req_rules ) & &
2010-01-29 13:29:32 -05:00
( curproxy - > uri_auth - > userlist | | curproxy - > uri_auth - > auth_realm ) ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " %s '%s': stats 'auth'/'realm' and 'http-request' can't be used at the same time. \n " ,
" proxy " , curproxy - > id ) ;
2010-01-29 13:29:32 -05:00
cfgerr + + ;
goto out_uri_auth_compat ;
}
2019-10-09 03:59:22 -04:00
if ( curproxy - > uri_auth & & curproxy - > uri_auth - > userlist & &
2024-04-22 08:42:09 -04:00
( ! ( curproxy - > uri_auth - > flags & STAT_F_CONVDONE ) | |
2019-10-09 03:59:22 -04:00
LIST_ISEMPTY ( & curproxy - > uri_auth - > http_req_rules ) ) ) {
2010-02-01 07:05:50 -05:00
const char * uri_auth_compat_req [ 10 ] ;
2015-08-04 13:35:46 -04:00
struct act_rule * rule ;
2021-03-08 11:31:39 -05:00
i = 0 ;
2010-02-01 07:05:50 -05:00
/* build the ACL condition from scratch. We're relying on anonymous ACLs for that */
uri_auth_compat_req [ i + + ] = " auth " ;
2010-01-29 13:29:32 -05:00
if ( curproxy - > uri_auth - > auth_realm ) {
2010-02-01 07:05:50 -05:00
uri_auth_compat_req [ i + + ] = " realm " ;
uri_auth_compat_req [ i + + ] = curproxy - > uri_auth - > auth_realm ;
}
2010-01-29 13:29:32 -05:00
2010-02-01 07:05:50 -05:00
uri_auth_compat_req [ i + + ] = " unless " ;
uri_auth_compat_req [ i + + ] = " { " ;
uri_auth_compat_req [ i + + ] = " http_auth(.internal-stats-userlist) " ;
uri_auth_compat_req [ i + + ] = " } " ;
uri_auth_compat_req [ i + + ] = " " ;
2010-01-29 13:29:32 -05:00
2011-01-06 11:51:27 -05:00
rule = parse_http_req_cond ( uri_auth_compat_req , " internal-stats-auth-compat " , 0 , curproxy ) ;
if ( ! rule ) {
2010-02-01 07:05:50 -05:00
cfgerr + + ;
break ;
2010-01-29 13:29:32 -05:00
}
2021-04-21 01:32:39 -04:00
LIST_APPEND ( & curproxy - > uri_auth - > http_req_rules , & rule - > list ) ;
2010-02-01 07:05:50 -05:00
2010-01-29 13:29:32 -05:00
if ( curproxy - > uri_auth - > auth_realm ) {
2021-02-20 04:46:51 -05:00
ha_free ( & curproxy - > uri_auth - > auth_realm ) ;
2010-01-29 13:29:32 -05:00
}
2024-04-22 08:42:09 -04:00
curproxy - > uri_auth - > flags | = STAT_F_CONVDONE ;
2010-01-29 13:29:32 -05:00
}
out_uri_auth_compat :
MEDIUM: tree-wide: logsrv struct becomes logger
When 'log' directive was implemented, the internal representation was
named 'struct logsrv', because the 'log' directive would directly point
to the log target, which used to be a (UDP) log server exclusively at
that time, hence the name.
But things have become more complex, since today 'log' directive can point
to ring targets (implicit, or named) for example.
Indeed, a 'log' directive does no longer reference the "final" server to
which the log will be sent, but instead it describes which log API and
parameters to use for transporting the log messages to the proper log
destination.
So now the term 'logsrv' is rather confusing and prevents us from
introducing a new level of abstraction because they would be mixed
with logsrv.
So in order to better designate this 'log' directive, and make it more
generic, we chose the word 'logger' which now replaces logsrv everywhere
it was used in the code (including related comments).
This is internal rewording, so no functional change should be expected
on user-side.
2023-09-11 09:06:53 -04:00
/* check whether we have a logger that uses RFC5424 log format */
list_for_each_entry ( tmplogger , & curproxy - > loggers , list ) {
if ( tmplogger - > format = = LOG_FORMAT_RFC5424 ) {
MEDIUM: proxy/log: leverage lf_expr API for logformat preparsing
Currently, the way proxy-oriented logformat directives are handled is way
too complicated. Indeed, "log-format", "log-format-error", "log-format-sd"
and "unique-id-format" all rely on preparsing hints stored inside
proxy->conf member struct. Those preparsing hints include the original
string that should be compiled once the proxy parameters are known plus
the config file and line number where the string was found to generate
precise error messages in case of failure during the compiling process
that happens within check_config_validity().
Now that lf_expr API permits to compile a lf_expr struct that was
previously prepared (with original string and config hints), let's
leverage lf_expr_compile() from check_config_validity() and instead
of relying on individual proxy->conf hints for each logformat expression,
store string and config hints in the lf_expr struct directly and use
lf_expr helpers funcs to handle them when relevant (ie: original
logformat string freeing is now done at a central place inside
lf_expr_deinit(), which allows for some simplifications)
Doing so allows us to greatly simplify the preparsing logic for those 4
proxy directives, and to finally save some space in the proxy struct.
Also, since httpclient proxy has its "logformat" automatically compiled
in check_config_validity(), we now use the file hint from the logformat
expression struct to set an explicit name that will be reported in case
of error ("parsing [httpclient:0] : ...") and remove the extraneous check
in httpclient_precheck() (logformat was parsed twice previously..)
2024-03-05 09:44:43 -05:00
if ( ! curproxy - > logformat_sd . str ) {
2015-10-01 07:18:13 -04:00
/* set the default logformat_sd_string */
MEDIUM: proxy/log: leverage lf_expr API for logformat preparsing
Currently, the way proxy-oriented logformat directives are handled is way
too complicated. Indeed, "log-format", "log-format-error", "log-format-sd"
and "unique-id-format" all rely on preparsing hints stored inside
proxy->conf member struct. Those preparsing hints include the original
string that should be compiled once the proxy parameters are known plus
the config file and line number where the string was found to generate
precise error messages in case of failure during the compiling process
that happens within check_config_validity().
Now that lf_expr API permits to compile a lf_expr struct that was
previously prepared (with original string and config hints), let's
leverage lf_expr_compile() from check_config_validity() and instead
of relying on individual proxy->conf hints for each logformat expression,
store string and config hints in the lf_expr struct directly and use
lf_expr helpers funcs to handle them when relevant (ie: original
logformat string freeing is now done at a central place inside
lf_expr_deinit(), which allows for some simplifications)
Doing so allows us to greatly simplify the preparsing logic for those 4
proxy directives, and to finally save some space in the proxy struct.
Also, since httpclient proxy has its "logformat" automatically compiled
in check_config_validity(), we now use the file hint from the logformat
expression struct to set an explicit name that will be reported in case
of error ("parsing [httpclient:0] : ...") and remove the extraneous check
in httpclient_precheck() (logformat was parsed twice previously..)
2024-03-05 09:44:43 -05:00
curproxy - > logformat_sd . str = default_rfc5424_sd_log_format ;
2015-10-01 07:18:13 -04:00
}
2015-09-22 10:05:32 -04:00
break ;
}
}
2015-09-19 16:35:44 -04:00
MAJOR: sample: maintain a per-proxy list of the fetch args to resolve
While ACL args were resolved after all the config was parsed, it was not the
case with sample fetch args because they're almost everywhere now.
The issue is that ACLs now solely rely on sample fetches, so their args
resolving doesn't work anymore. And many fetches involving a server, a
proxy or a userlist don't work at all.
The real issue is that at the bottom layers we have no information about
proxies, line numbers, even ACLs in order to report understandable errors,
and that at the top layers we have no visibility over the locations where
fetches are referenced (think log node).
After failing multiple unsatisfying solutions attempts, we now have a new
concept of args list. The principle is that every proxy has a list head
which contains a number of indications such as the config keyword, the
context where it's used, the file and line number, etc... and a list of
arguments. This list head is of the same type as the elements, so it
serves as a template for adding new elements. This way, it is filled from
top to bottom by the callers with the information they have (eg: line
numbers, ACL name, ...) and the lower layers just have to duplicate it and
add an element when they face an argument they cannot resolve yet.
Then at the end of the configuration parsing, a loop passes over each
proxy's list and resolves all the args in sequence. And this way there is
all necessary information to report verbose errors.
The first immediate benefit is that for the first time we got very precise
location of issues (arg number in a keyword in its context, ...). Second,
in order to do this we had to parse log-format and unique-id-format a bit
earlier, so that was a great opportunity for doing so when the directives
are encountered (unless it's a default section). This way, the recorded
line numbers for these args are the ones of the place where the log format
is declared, not the end of the file.
Userlists report slightly more information now. They're the only remaining
ones in the ACL resolving function.
2013-04-02 10:34:32 -04:00
/* compile the log format */
if ( ! ( curproxy - > cap & PR_CAP_FE ) ) {
MEDIUM: proxy/log: leverage lf_expr API for logformat preparsing
Currently, the way proxy-oriented logformat directives are handled is way
too complicated. Indeed, "log-format", "log-format-error", "log-format-sd"
and "unique-id-format" all rely on preparsing hints stored inside
proxy->conf member struct. Those preparsing hints include the original
string that should be compiled once the proxy parameters are known plus
the config file and line number where the string was found to generate
precise error messages in case of failure during the compiling process
that happens within check_config_validity().
Now that lf_expr API permits to compile a lf_expr struct that was
previously prepared (with original string and config hints), let's
leverage lf_expr_compile() from check_config_validity() and instead
of relying on individual proxy->conf hints for each logformat expression,
store string and config hints in the lf_expr struct directly and use
lf_expr helpers funcs to handle them when relevant (ie: original
logformat string freeing is now done at a central place inside
lf_expr_deinit(), which allows for some simplifications)
Doing so allows us to greatly simplify the preparsing logic for those 4
proxy directives, and to finally save some space in the proxy struct.
Also, since httpclient proxy has its "logformat" automatically compiled
in check_config_validity(), we now use the file hint from the logformat
expression struct to set an explicit name that will be reported in case
of error ("parsing [httpclient:0] : ...") and remove the extraneous check
in httpclient_precheck() (logformat was parsed twice previously..)
2024-03-05 09:44:43 -05:00
lf_expr_deinit ( & curproxy - > logformat ) ;
lf_expr_deinit ( & curproxy - > logformat_sd ) ;
}
if ( curproxy - > logformat . str ) {
2013-04-12 12:13:46 -04:00
curproxy - > conf . args . ctx = ARGC_LOG ;
MEDIUM: proxy/log: leverage lf_expr API for logformat preparsing
Currently, the way proxy-oriented logformat directives are handled is way
too complicated. Indeed, "log-format", "log-format-error", "log-format-sd"
and "unique-id-format" all rely on preparsing hints stored inside
proxy->conf member struct. Those preparsing hints include the original
string that should be compiled once the proxy parameters are known plus
the config file and line number where the string was found to generate
precise error messages in case of failure during the compiling process
that happens within check_config_validity().
Now that lf_expr API permits to compile a lf_expr struct that was
previously prepared (with original string and config hints), let's
leverage lf_expr_compile() from check_config_validity() and instead
of relying on individual proxy->conf hints for each logformat expression,
store string and config hints in the lf_expr struct directly and use
lf_expr helpers funcs to handle them when relevant (ie: original
logformat string freeing is now done at a central place inside
lf_expr_deinit(), which allows for some simplifications)
Doing so allows us to greatly simplify the preparsing logic for those 4
proxy directives, and to finally save some space in the proxy struct.
Also, since httpclient proxy has its "logformat" automatically compiled
in check_config_validity(), we now use the file hint from the logformat
expression struct to set an explicit name that will be reported in case
of error ("parsing [httpclient:0] : ...") and remove the extraneous check
in httpclient_precheck() (logformat was parsed twice previously..)
2024-03-05 09:44:43 -05:00
curproxy - > conf . args . file = curproxy - > logformat . conf . file ;
curproxy - > conf . args . line = curproxy - > logformat . conf . line ;
2016-11-22 18:41:28 -05:00
err = NULL ;
MEDIUM: proxy/log: leverage lf_expr API for logformat preparsing
Currently, the way proxy-oriented logformat directives are handled is way
too complicated. Indeed, "log-format", "log-format-error", "log-format-sd"
and "unique-id-format" all rely on preparsing hints stored inside
proxy->conf member struct. Those preparsing hints include the original
string that should be compiled once the proxy parameters are known plus
the config file and line number where the string was found to generate
precise error messages in case of failure during the compiling process
that happens within check_config_validity().
Now that lf_expr API permits to compile a lf_expr struct that was
previously prepared (with original string and config hints), let's
leverage lf_expr_compile() from check_config_validity() and instead
of relying on individual proxy->conf hints for each logformat expression,
store string and config hints in the lf_expr struct directly and use
lf_expr helpers funcs to handle them when relevant (ie: original
logformat string freeing is now done at a central place inside
lf_expr_deinit(), which allows for some simplifications)
Doing so allows us to greatly simplify the preparsing logic for those 4
proxy directives, and to finally save some space in the proxy struct.
Also, since httpclient proxy has its "logformat" automatically compiled
in check_config_validity(), we now use the file hint from the logformat
expression struct to set an explicit name that will be reported in case
of error ("parsing [httpclient:0] : ...") and remove the extraneous check
in httpclient_precheck() (logformat was parsed twice previously..)
2024-03-05 09:44:43 -05:00
if ( ! lf_expr_compile ( & curproxy - > logformat , & curproxy - > conf . args ,
2020-06-23 12:16:44 -04:00
LOG_OPT_MANDATORY | LOG_OPT_MERGE_SPACES ,
MEDIUM: proxy/log: leverage lf_expr API for logformat preparsing
Currently, the way proxy-oriented logformat directives are handled is way
too complicated. Indeed, "log-format", "log-format-error", "log-format-sd"
and "unique-id-format" all rely on preparsing hints stored inside
proxy->conf member struct. Those preparsing hints include the original
string that should be compiled once the proxy parameters are known plus
the config file and line number where the string was found to generate
precise error messages in case of failure during the compiling process
that happens within check_config_validity().
Now that lf_expr API permits to compile a lf_expr struct that was
previously prepared (with original string and config hints), let's
leverage lf_expr_compile() from check_config_validity() and instead
of relying on individual proxy->conf hints for each logformat expression,
store string and config hints in the lf_expr struct directly and use
lf_expr helpers funcs to handle them when relevant (ie: original
logformat string freeing is now done at a central place inside
lf_expr_deinit(), which allows for some simplifications)
Doing so allows us to greatly simplify the preparsing logic for those 4
proxy directives, and to finally save some space in the proxy struct.
Also, since httpclient proxy has its "logformat" automatically compiled
in check_config_validity(), we now use the file hint from the logformat
expression struct to set an explicit name that will be reported in case
of error ("parsing [httpclient:0] : ...") and remove the extraneous check
in httpclient_precheck() (logformat was parsed twice previously..)
2024-03-05 09:44:43 -05:00
SMP_VAL_FE_LOG_END , & err ) | |
! lf_expr_postcheck ( & curproxy - > logformat , curproxy , & err ) ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " Parsing [%s:%d]: failed to parse log-format : %s. \n " ,
MEDIUM: proxy/log: leverage lf_expr API for logformat preparsing
Currently, the way proxy-oriented logformat directives are handled is way
too complicated. Indeed, "log-format", "log-format-error", "log-format-sd"
and "unique-id-format" all rely on preparsing hints stored inside
proxy->conf member struct. Those preparsing hints include the original
string that should be compiled once the proxy parameters are known plus
the config file and line number where the string was found to generate
precise error messages in case of failure during the compiling process
that happens within check_config_validity().
Now that lf_expr API permits to compile a lf_expr struct that was
previously prepared (with original string and config hints), let's
leverage lf_expr_compile() from check_config_validity() and instead
of relying on individual proxy->conf hints for each logformat expression,
store string and config hints in the lf_expr struct directly and use
lf_expr helpers funcs to handle them when relevant (ie: original
logformat string freeing is now done at a central place inside
lf_expr_deinit(), which allows for some simplifications)
Doing so allows us to greatly simplify the preparsing logic for those 4
proxy directives, and to finally save some space in the proxy struct.
Also, since httpclient proxy has its "logformat" automatically compiled
in check_config_validity(), we now use the file hint from the logformat
expression struct to set an explicit name that will be reported in case
of error ("parsing [httpclient:0] : ...") and remove the extraneous check
in httpclient_precheck() (logformat was parsed twice previously..)
2024-03-05 09:44:43 -05:00
curproxy - > logformat . conf . file , curproxy - > logformat . conf . line , err ) ;
2016-11-22 18:41:28 -05:00
free ( err ) ;
2016-11-22 17:50:02 -05:00
cfgerr + + ;
}
2013-04-12 12:13:46 -04:00
curproxy - > conf . args . file = NULL ;
curproxy - > conf . args . line = 0 ;
}
MAJOR: sample: maintain a per-proxy list of the fetch args to resolve
While ACL args were resolved after all the config was parsed, it was not the
case with sample fetch args because they're almost everywhere now.
The issue is that ACLs now solely rely on sample fetches, so their args
resolving doesn't work anymore. And many fetches involving a server, a
proxy or a userlist don't work at all.
The real issue is that at the bottom layers we have no information about
proxies, line numbers, even ACLs in order to report understandable errors,
and that at the top layers we have no visibility over the locations where
fetches are referenced (think log node).
After failing multiple unsatisfying solutions attempts, we now have a new
concept of args list. The principle is that every proxy has a list head
which contains a number of indications such as the config keyword, the
context where it's used, the file and line number, etc... and a list of
arguments. This list head is of the same type as the elements, so it
serves as a template for adding new elements. This way, it is filled from
top to bottom by the callers with the information they have (eg: line
numbers, ACL name, ...) and the lower layers just have to duplicate it and
add an element when they face an argument they cannot resolve yet.
Then at the end of the configuration parsing, a loop passes over each
proxy's list and resolves all the args in sequence. And this way there is
all necessary information to report verbose errors.
The first immediate benefit is that for the first time we got very precise
location of issues (arg number in a keyword in its context, ...). Second,
in order to do this we had to parse log-format and unique-id-format a bit
earlier, so that was a great opportunity for doing so when the directives
are encountered (unless it's a default section). This way, the recorded
line numbers for these args are the ones of the place where the log format
is declared, not the end of the file.
Userlists report slightly more information now. They're the only remaining
ones in the ACL resolving function.
2013-04-02 10:34:32 -04:00
MEDIUM: proxy/log: leverage lf_expr API for logformat preparsing
Currently, the way proxy-oriented logformat directives are handled is way
too complicated. Indeed, "log-format", "log-format-error", "log-format-sd"
and "unique-id-format" all rely on preparsing hints stored inside
proxy->conf member struct. Those preparsing hints include the original
string that should be compiled once the proxy parameters are known plus
the config file and line number where the string was found to generate
precise error messages in case of failure during the compiling process
that happens within check_config_validity().
Now that lf_expr API permits to compile a lf_expr struct that was
previously prepared (with original string and config hints), let's
leverage lf_expr_compile() from check_config_validity() and instead
of relying on individual proxy->conf hints for each logformat expression,
store string and config hints in the lf_expr struct directly and use
lf_expr helpers funcs to handle them when relevant (ie: original
logformat string freeing is now done at a central place inside
lf_expr_deinit(), which allows for some simplifications)
Doing so allows us to greatly simplify the preparsing logic for those 4
proxy directives, and to finally save some space in the proxy struct.
Also, since httpclient proxy has its "logformat" automatically compiled
in check_config_validity(), we now use the file hint from the logformat
expression struct to set an explicit name that will be reported in case
of error ("parsing [httpclient:0] : ...") and remove the extraneous check
in httpclient_precheck() (logformat was parsed twice previously..)
2024-03-05 09:44:43 -05:00
if ( curproxy - > logformat_sd . str ) {
2015-09-25 13:17:44 -04:00
curproxy - > conf . args . ctx = ARGC_LOGSD ;
MEDIUM: proxy/log: leverage lf_expr API for logformat preparsing
Currently, the way proxy-oriented logformat directives are handled is way
too complicated. Indeed, "log-format", "log-format-error", "log-format-sd"
and "unique-id-format" all rely on preparsing hints stored inside
proxy->conf member struct. Those preparsing hints include the original
string that should be compiled once the proxy parameters are known plus
the config file and line number where the string was found to generate
precise error messages in case of failure during the compiling process
that happens within check_config_validity().
Now that lf_expr API permits to compile a lf_expr struct that was
previously prepared (with original string and config hints), let's
leverage lf_expr_compile() from check_config_validity() and instead
of relying on individual proxy->conf hints for each logformat expression,
store string and config hints in the lf_expr struct directly and use
lf_expr helpers funcs to handle them when relevant (ie: original
logformat string freeing is now done at a central place inside
lf_expr_deinit(), which allows for some simplifications)
Doing so allows us to greatly simplify the preparsing logic for those 4
proxy directives, and to finally save some space in the proxy struct.
Also, since httpclient proxy has its "logformat" automatically compiled
in check_config_validity(), we now use the file hint from the logformat
expression struct to set an explicit name that will be reported in case
of error ("parsing [httpclient:0] : ...") and remove the extraneous check
in httpclient_precheck() (logformat was parsed twice previously..)
2024-03-05 09:44:43 -05:00
curproxy - > conf . args . file = curproxy - > logformat_sd . conf . file ;
curproxy - > conf . args . line = curproxy - > logformat_sd . conf . line ;
2016-11-22 18:41:28 -05:00
err = NULL ;
MEDIUM: proxy/log: leverage lf_expr API for logformat preparsing
Currently, the way proxy-oriented logformat directives are handled is way
too complicated. Indeed, "log-format", "log-format-error", "log-format-sd"
and "unique-id-format" all rely on preparsing hints stored inside
proxy->conf member struct. Those preparsing hints include the original
string that should be compiled once the proxy parameters are known plus
the config file and line number where the string was found to generate
precise error messages in case of failure during the compiling process
that happens within check_config_validity().
Now that lf_expr API permits to compile a lf_expr struct that was
previously prepared (with original string and config hints), let's
leverage lf_expr_compile() from check_config_validity() and instead
of relying on individual proxy->conf hints for each logformat expression,
store string and config hints in the lf_expr struct directly and use
lf_expr helpers funcs to handle them when relevant (ie: original
logformat string freeing is now done at a central place inside
lf_expr_deinit(), which allows for some simplifications)
Doing so allows us to greatly simplify the preparsing logic for those 4
proxy directives, and to finally save some space in the proxy struct.
Also, since httpclient proxy has its "logformat" automatically compiled
in check_config_validity(), we now use the file hint from the logformat
expression struct to set an explicit name that will be reported in case
of error ("parsing [httpclient:0] : ...") and remove the extraneous check
in httpclient_precheck() (logformat was parsed twice previously..)
2024-03-05 09:44:43 -05:00
if ( ! lf_expr_compile ( & curproxy - > logformat_sd , & curproxy - > conf . args ,
2020-06-23 12:16:44 -04:00
LOG_OPT_MANDATORY | LOG_OPT_MERGE_SPACES ,
MEDIUM: proxy/log: leverage lf_expr API for logformat preparsing
Currently, the way proxy-oriented logformat directives are handled is way
too complicated. Indeed, "log-format", "log-format-error", "log-format-sd"
and "unique-id-format" all rely on preparsing hints stored inside
proxy->conf member struct. Those preparsing hints include the original
string that should be compiled once the proxy parameters are known plus
the config file and line number where the string was found to generate
precise error messages in case of failure during the compiling process
that happens within check_config_validity().
Now that lf_expr API permits to compile a lf_expr struct that was
previously prepared (with original string and config hints), let's
leverage lf_expr_compile() from check_config_validity() and instead
of relying on individual proxy->conf hints for each logformat expression,
store string and config hints in the lf_expr struct directly and use
lf_expr helpers funcs to handle them when relevant (ie: original
logformat string freeing is now done at a central place inside
lf_expr_deinit(), which allows for some simplifications)
Doing so allows us to greatly simplify the preparsing logic for those 4
proxy directives, and to finally save some space in the proxy struct.
Also, since httpclient proxy has its "logformat" automatically compiled
in check_config_validity(), we now use the file hint from the logformat
expression struct to set an explicit name that will be reported in case
of error ("parsing [httpclient:0] : ...") and remove the extraneous check
in httpclient_precheck() (logformat was parsed twice previously..)
2024-03-05 09:44:43 -05:00
SMP_VAL_FE_LOG_END , & err ) | |
! add_to_logformat_list ( NULL , NULL , LF_SEPARATOR , & curproxy - > logformat_sd , & err ) | |
! lf_expr_postcheck ( & curproxy - > logformat_sd , curproxy , & err ) ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " Parsing [%s:%d]: failed to parse log-format-sd : %s. \n " ,
MEDIUM: proxy/log: leverage lf_expr API for logformat preparsing
Currently, the way proxy-oriented logformat directives are handled is way
too complicated. Indeed, "log-format", "log-format-error", "log-format-sd"
and "unique-id-format" all rely on preparsing hints stored inside
proxy->conf member struct. Those preparsing hints include the original
string that should be compiled once the proxy parameters are known plus
the config file and line number where the string was found to generate
precise error messages in case of failure during the compiling process
that happens within check_config_validity().
Now that lf_expr API permits to compile a lf_expr struct that was
previously prepared (with original string and config hints), let's
leverage lf_expr_compile() from check_config_validity() and instead
of relying on individual proxy->conf hints for each logformat expression,
store string and config hints in the lf_expr struct directly and use
lf_expr helpers funcs to handle them when relevant (ie: original
logformat string freeing is now done at a central place inside
lf_expr_deinit(), which allows for some simplifications)
Doing so allows us to greatly simplify the preparsing logic for those 4
proxy directives, and to finally save some space in the proxy struct.
Also, since httpclient proxy has its "logformat" automatically compiled
in check_config_validity(), we now use the file hint from the logformat
expression struct to set an explicit name that will be reported in case
of error ("parsing [httpclient:0] : ...") and remove the extraneous check
in httpclient_precheck() (logformat was parsed twice previously..)
2024-03-05 09:44:43 -05:00
curproxy - > logformat_sd . conf . file , curproxy - > logformat_sd . conf . line , err ) ;
2016-11-22 18:41:28 -05:00
free ( err ) ;
2016-11-22 17:50:02 -05:00
cfgerr + + ;
}
2015-09-25 13:17:44 -04:00
curproxy - > conf . args . file = NULL ;
curproxy - > conf . args . line = 0 ;
}
MEDIUM: proxy/log: leverage lf_expr API for logformat preparsing
Currently, the way proxy-oriented logformat directives are handled is way
too complicated. Indeed, "log-format", "log-format-error", "log-format-sd"
and "unique-id-format" all rely on preparsing hints stored inside
proxy->conf member struct. Those preparsing hints include the original
string that should be compiled once the proxy parameters are known plus
the config file and line number where the string was found to generate
precise error messages in case of failure during the compiling process
that happens within check_config_validity().
Now that lf_expr API permits to compile a lf_expr struct that was
previously prepared (with original string and config hints), let's
leverage lf_expr_compile() from check_config_validity() and instead
of relying on individual proxy->conf hints for each logformat expression,
store string and config hints in the lf_expr struct directly and use
lf_expr helpers funcs to handle them when relevant (ie: original
logformat string freeing is now done at a central place inside
lf_expr_deinit(), which allows for some simplifications)
Doing so allows us to greatly simplify the preparsing logic for those 4
proxy directives, and to finally save some space in the proxy struct.
Also, since httpclient proxy has its "logformat" automatically compiled
in check_config_validity(), we now use the file hint from the logformat
expression struct to set an explicit name that will be reported in case
of error ("parsing [httpclient:0] : ...") and remove the extraneous check
in httpclient_precheck() (logformat was parsed twice previously..)
2024-03-05 09:44:43 -05:00
if ( curproxy - > format_unique_id . str ) {
2021-10-13 11:22:17 -04:00
int where = 0 ;
2013-04-12 12:13:46 -04:00
curproxy - > conf . args . ctx = ARGC_UIF ;
MEDIUM: proxy/log: leverage lf_expr API for logformat preparsing
Currently, the way proxy-oriented logformat directives are handled is way
too complicated. Indeed, "log-format", "log-format-error", "log-format-sd"
and "unique-id-format" all rely on preparsing hints stored inside
proxy->conf member struct. Those preparsing hints include the original
string that should be compiled once the proxy parameters are known plus
the config file and line number where the string was found to generate
precise error messages in case of failure during the compiling process
that happens within check_config_validity().
Now that lf_expr API permits to compile a lf_expr struct that was
previously prepared (with original string and config hints), let's
leverage lf_expr_compile() from check_config_validity() and instead
of relying on individual proxy->conf hints for each logformat expression,
store string and config hints in the lf_expr struct directly and use
lf_expr helpers funcs to handle them when relevant (ie: original
logformat string freeing is now done at a central place inside
lf_expr_deinit(), which allows for some simplifications)
Doing so allows us to greatly simplify the preparsing logic for those 4
proxy directives, and to finally save some space in the proxy struct.
Also, since httpclient proxy has its "logformat" automatically compiled
in check_config_validity(), we now use the file hint from the logformat
expression struct to set an explicit name that will be reported in case
of error ("parsing [httpclient:0] : ...") and remove the extraneous check
in httpclient_precheck() (logformat was parsed twice previously..)
2024-03-05 09:44:43 -05:00
curproxy - > conf . args . file = curproxy - > format_unique_id . conf . file ;
curproxy - > conf . args . line = curproxy - > format_unique_id . conf . line ;
2016-11-22 18:41:28 -05:00
err = NULL ;
2021-10-13 11:22:17 -04:00
if ( curproxy - > cap & PR_CAP_FE )
where | = SMP_VAL_FE_HRQ_HDR ;
if ( curproxy - > cap & PR_CAP_BE )
where | = SMP_VAL_BE_HRQ_HDR ;
MEDIUM: proxy/log: leverage lf_expr API for logformat preparsing
Currently, the way proxy-oriented logformat directives are handled is way
too complicated. Indeed, "log-format", "log-format-error", "log-format-sd"
and "unique-id-format" all rely on preparsing hints stored inside
proxy->conf member struct. Those preparsing hints include the original
string that should be compiled once the proxy parameters are known plus
the config file and line number where the string was found to generate
precise error messages in case of failure during the compiling process
that happens within check_config_validity().
Now that lf_expr API permits to compile a lf_expr struct that was
previously prepared (with original string and config hints), let's
leverage lf_expr_compile() from check_config_validity() and instead
of relying on individual proxy->conf hints for each logformat expression,
store string and config hints in the lf_expr struct directly and use
lf_expr helpers funcs to handle them when relevant (ie: original
logformat string freeing is now done at a central place inside
lf_expr_deinit(), which allows for some simplifications)
Doing so allows us to greatly simplify the preparsing logic for those 4
proxy directives, and to finally save some space in the proxy struct.
Also, since httpclient proxy has its "logformat" automatically compiled
in check_config_validity(), we now use the file hint from the logformat
expression struct to set an explicit name that will be reported in case
of error ("parsing [httpclient:0] : ...") and remove the extraneous check
in httpclient_precheck() (logformat was parsed twice previously..)
2024-03-05 09:44:43 -05:00
if ( ! lf_expr_compile ( & curproxy - > format_unique_id , & curproxy - > conf . args ,
LOG_OPT_HTTP | LOG_OPT_MERGE_SPACES , where , & err ) | |
! lf_expr_postcheck ( & curproxy - > format_unique_id , curproxy , & err ) ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " Parsing [%s:%d]: failed to parse unique-id : %s. \n " ,
MEDIUM: proxy/log: leverage lf_expr API for logformat preparsing
Currently, the way proxy-oriented logformat directives are handled is way
too complicated. Indeed, "log-format", "log-format-error", "log-format-sd"
and "unique-id-format" all rely on preparsing hints stored inside
proxy->conf member struct. Those preparsing hints include the original
string that should be compiled once the proxy parameters are known plus
the config file and line number where the string was found to generate
precise error messages in case of failure during the compiling process
that happens within check_config_validity().
Now that lf_expr API permits to compile a lf_expr struct that was
previously prepared (with original string and config hints), let's
leverage lf_expr_compile() from check_config_validity() and instead
of relying on individual proxy->conf hints for each logformat expression,
store string and config hints in the lf_expr struct directly and use
lf_expr helpers funcs to handle them when relevant (ie: original
logformat string freeing is now done at a central place inside
lf_expr_deinit(), which allows for some simplifications)
Doing so allows us to greatly simplify the preparsing logic for those 4
proxy directives, and to finally save some space in the proxy struct.
Also, since httpclient proxy has its "logformat" automatically compiled
in check_config_validity(), we now use the file hint from the logformat
expression struct to set an explicit name that will be reported in case
of error ("parsing [httpclient:0] : ...") and remove the extraneous check
in httpclient_precheck() (logformat was parsed twice previously..)
2024-03-05 09:44:43 -05:00
curproxy - > format_unique_id . conf . file , curproxy - > format_unique_id . conf . line , err ) ;
2016-11-22 18:41:28 -05:00
free ( err ) ;
2016-11-22 17:50:02 -05:00
cfgerr + + ;
}
2013-04-12 12:13:46 -04:00
curproxy - > conf . args . file = NULL ;
curproxy - > conf . args . line = 0 ;
}
MAJOR: sample: maintain a per-proxy list of the fetch args to resolve
While ACL args were resolved after all the config was parsed, it was not the
case with sample fetch args because they're almost everywhere now.
The issue is that ACLs now solely rely on sample fetches, so their args
resolving doesn't work anymore. And many fetches involving a server, a
proxy or a userlist don't work at all.
The real issue is that at the bottom layers we have no information about
proxies, line numbers, even ACLs in order to report understandable errors,
and that at the top layers we have no visibility over the locations where
fetches are referenced (think log node).
After failing multiple unsatisfying solutions attempts, we now have a new
concept of args list. The principle is that every proxy has a list head
which contains a number of indications such as the config keyword, the
context where it's used, the file and line number, etc... and a list of
arguments. This list head is of the same type as the elements, so it
serves as a template for adding new elements. This way, it is filled from
top to bottom by the callers with the information they have (eg: line
numbers, ACL name, ...) and the lower layers just have to duplicate it and
add an element when they face an argument they cannot resolve yet.
Then at the end of the configuration parsing, a loop passes over each
proxy's list and resolves all the args in sequence. And this way there is
all necessary information to report verbose errors.
The first immediate benefit is that for the first time we got very precise
location of issues (arg number in a keyword in its context, ...). Second,
in order to do this we had to parse log-format and unique-id-format a bit
earlier, so that was a great opportunity for doing so when the directives
are encountered (unless it's a default section). This way, the recorded
line numbers for these args are the ones of the place where the log format
is declared, not the end of the file.
Userlists report slightly more information now. They're the only remaining
ones in the ACL resolving function.
2013-04-02 10:34:32 -04:00
MEDIUM: proxy/log: leverage lf_expr API for logformat preparsing
Currently, the way proxy-oriented logformat directives are handled is way
too complicated. Indeed, "log-format", "log-format-error", "log-format-sd"
and "unique-id-format" all rely on preparsing hints stored inside
proxy->conf member struct. Those preparsing hints include the original
string that should be compiled once the proxy parameters are known plus
the config file and line number where the string was found to generate
precise error messages in case of failure during the compiling process
that happens within check_config_validity().
Now that lf_expr API permits to compile a lf_expr struct that was
previously prepared (with original string and config hints), let's
leverage lf_expr_compile() from check_config_validity() and instead
of relying on individual proxy->conf hints for each logformat expression,
store string and config hints in the lf_expr struct directly and use
lf_expr helpers funcs to handle them when relevant (ie: original
logformat string freeing is now done at a central place inside
lf_expr_deinit(), which allows for some simplifications)
Doing so allows us to greatly simplify the preparsing logic for those 4
proxy directives, and to finally save some space in the proxy struct.
Also, since httpclient proxy has its "logformat" automatically compiled
in check_config_validity(), we now use the file hint from the logformat
expression struct to set an explicit name that will be reported in case
of error ("parsing [httpclient:0] : ...") and remove the extraneous check
in httpclient_precheck() (logformat was parsed twice previously..)
2024-03-05 09:44:43 -05:00
if ( curproxy - > logformat_error . str ) {
2021-08-31 06:08:52 -04:00
curproxy - > conf . args . ctx = ARGC_LOG ;
MEDIUM: proxy/log: leverage lf_expr API for logformat preparsing
Currently, the way proxy-oriented logformat directives are handled is way
too complicated. Indeed, "log-format", "log-format-error", "log-format-sd"
and "unique-id-format" all rely on preparsing hints stored inside
proxy->conf member struct. Those preparsing hints include the original
string that should be compiled once the proxy parameters are known plus
the config file and line number where the string was found to generate
precise error messages in case of failure during the compiling process
that happens within check_config_validity().
Now that lf_expr API permits to compile a lf_expr struct that was
previously prepared (with original string and config hints), let's
leverage lf_expr_compile() from check_config_validity() and instead
of relying on individual proxy->conf hints for each logformat expression,
store string and config hints in the lf_expr struct directly and use
lf_expr helpers funcs to handle them when relevant (ie: original
logformat string freeing is now done at a central place inside
lf_expr_deinit(), which allows for some simplifications)
Doing so allows us to greatly simplify the preparsing logic for those 4
proxy directives, and to finally save some space in the proxy struct.
Also, since httpclient proxy has its "logformat" automatically compiled
in check_config_validity(), we now use the file hint from the logformat
expression struct to set an explicit name that will be reported in case
of error ("parsing [httpclient:0] : ...") and remove the extraneous check
in httpclient_precheck() (logformat was parsed twice previously..)
2024-03-05 09:44:43 -05:00
curproxy - > conf . args . file = curproxy - > logformat_error . conf . file ;
curproxy - > conf . args . line = curproxy - > logformat_error . conf . line ;
2021-08-31 06:08:52 -04:00
err = NULL ;
MEDIUM: proxy/log: leverage lf_expr API for logformat preparsing
Currently, the way proxy-oriented logformat directives are handled is way
too complicated. Indeed, "log-format", "log-format-error", "log-format-sd"
and "unique-id-format" all rely on preparsing hints stored inside
proxy->conf member struct. Those preparsing hints include the original
string that should be compiled once the proxy parameters are known plus
the config file and line number where the string was found to generate
precise error messages in case of failure during the compiling process
that happens within check_config_validity().
Now that lf_expr API permits to compile a lf_expr struct that was
previously prepared (with original string and config hints), let's
leverage lf_expr_compile() from check_config_validity() and instead
of relying on individual proxy->conf hints for each logformat expression,
store string and config hints in the lf_expr struct directly and use
lf_expr helpers funcs to handle them when relevant (ie: original
logformat string freeing is now done at a central place inside
lf_expr_deinit(), which allows for some simplifications)
Doing so allows us to greatly simplify the preparsing logic for those 4
proxy directives, and to finally save some space in the proxy struct.
Also, since httpclient proxy has its "logformat" automatically compiled
in check_config_validity(), we now use the file hint from the logformat
expression struct to set an explicit name that will be reported in case
of error ("parsing [httpclient:0] : ...") and remove the extraneous check
in httpclient_precheck() (logformat was parsed twice previously..)
2024-03-05 09:44:43 -05:00
if ( ! lf_expr_compile ( & curproxy - > logformat_error , & curproxy - > conf . args ,
2021-08-31 06:08:52 -04:00
LOG_OPT_MANDATORY | LOG_OPT_MERGE_SPACES ,
MEDIUM: proxy/log: leverage lf_expr API for logformat preparsing
Currently, the way proxy-oriented logformat directives are handled is way
too complicated. Indeed, "log-format", "log-format-error", "log-format-sd"
and "unique-id-format" all rely on preparsing hints stored inside
proxy->conf member struct. Those preparsing hints include the original
string that should be compiled once the proxy parameters are known plus
the config file and line number where the string was found to generate
precise error messages in case of failure during the compiling process
that happens within check_config_validity().
Now that lf_expr API permits to compile a lf_expr struct that was
previously prepared (with original string and config hints), let's
leverage lf_expr_compile() from check_config_validity() and instead
of relying on individual proxy->conf hints for each logformat expression,
store string and config hints in the lf_expr struct directly and use
lf_expr helpers funcs to handle them when relevant (ie: original
logformat string freeing is now done at a central place inside
lf_expr_deinit(), which allows for some simplifications)
Doing so allows us to greatly simplify the preparsing logic for those 4
proxy directives, and to finally save some space in the proxy struct.
Also, since httpclient proxy has its "logformat" automatically compiled
in check_config_validity(), we now use the file hint from the logformat
expression struct to set an explicit name that will be reported in case
of error ("parsing [httpclient:0] : ...") and remove the extraneous check
in httpclient_precheck() (logformat was parsed twice previously..)
2024-03-05 09:44:43 -05:00
SMP_VAL_FE_LOG_END , & err ) | |
! lf_expr_postcheck ( & curproxy - > logformat_error , curproxy , & err ) ) {
2021-08-31 06:08:52 -04:00
ha_alert ( " Parsing [%s:%d]: failed to parse error-log-format : %s. \n " ,
MEDIUM: proxy/log: leverage lf_expr API for logformat preparsing
Currently, the way proxy-oriented logformat directives are handled is way
too complicated. Indeed, "log-format", "log-format-error", "log-format-sd"
and "unique-id-format" all rely on preparsing hints stored inside
proxy->conf member struct. Those preparsing hints include the original
string that should be compiled once the proxy parameters are known plus
the config file and line number where the string was found to generate
precise error messages in case of failure during the compiling process
that happens within check_config_validity().
Now that lf_expr API permits to compile a lf_expr struct that was
previously prepared (with original string and config hints), let's
leverage lf_expr_compile() from check_config_validity() and instead
of relying on individual proxy->conf hints for each logformat expression,
store string and config hints in the lf_expr struct directly and use
lf_expr helpers funcs to handle them when relevant (ie: original
logformat string freeing is now done at a central place inside
lf_expr_deinit(), which allows for some simplifications)
Doing so allows us to greatly simplify the preparsing logic for those 4
proxy directives, and to finally save some space in the proxy struct.
Also, since httpclient proxy has its "logformat" automatically compiled
in check_config_validity(), we now use the file hint from the logformat
expression struct to set an explicit name that will be reported in case
of error ("parsing [httpclient:0] : ...") and remove the extraneous check
in httpclient_precheck() (logformat was parsed twice previously..)
2024-03-05 09:44:43 -05:00
curproxy - > logformat_error . conf . file , curproxy - > logformat_error . conf . line , err ) ;
2021-08-31 06:08:52 -04:00
free ( err ) ;
cfgerr + + ;
}
curproxy - > conf . args . file = NULL ;
curproxy - > conf . args . line = 0 ;
}
2023-09-19 04:51:53 -04:00
/* "balance hash" needs to compile its expression
* ( log backends will handle this in proxy log postcheck )
*/
if ( curproxy - > mode ! = PR_MODE_SYSLOG & &
( curproxy - > lbprm . algo & BE_LB_ALGO ) = = BE_LB_ALGO_SMP ) {
2022-04-25 04:25:34 -04:00
int idx = 0 ;
const char * args [ ] = {
curproxy - > lbprm . arg_str ,
NULL ,
} ;
err = NULL ;
curproxy - > conf . args . ctx = ARGC_USRV ; // same context as use_server.
curproxy - > lbprm . expr =
sample_parse_expr ( ( char * * ) args , & idx ,
curproxy - > conf . file , curproxy - > conf . line ,
& err , & curproxy - > conf . args , NULL ) ;
if ( ! curproxy - > lbprm . expr ) {
ha_alert ( " %s '%s' [%s:%d]: failed to parse 'balance hash' expression '%s' in : %s. \n " ,
proxy_type_str ( curproxy ) , curproxy - > id ,
curproxy - > conf . file , curproxy - > conf . line ,
curproxy - > lbprm . arg_str , err ) ;
ha_free ( & err ) ;
cfgerr + + ;
}
else if ( ! ( curproxy - > lbprm . expr - > fetch - > val & SMP_VAL_BE_SET_SRV ) ) {
ha_alert ( " %s '%s' [%s:%d]: error detected while parsing 'balance hash' expression '%s' "
" which requires information from %s, which is not available here. \n " ,
proxy_type_str ( curproxy ) , curproxy - > id ,
curproxy - > conf . file , curproxy - > conf . line ,
curproxy - > lbprm . arg_str , sample_src_names ( curproxy - > lbprm . expr - > fetch - > use ) ) ;
cfgerr + + ;
}
else if ( curproxy - > mode = = PR_MODE_HTTP & & ( curproxy - > lbprm . expr - > fetch - > use & SMP_USE_L6REQ ) ) {
ha_warning ( " %s '%s' [%s:%d]: L6 sample fetch <%s> will be ignored in 'balance hash' expression in HTTP mode. \n " ,
proxy_type_str ( curproxy ) , curproxy - > id ,
curproxy - > conf . file , curproxy - > conf . line ,
curproxy - > lbprm . arg_str ) ;
}
else
curproxy - > http_needed | = ! ! ( curproxy - > lbprm . expr - > fetch - > use & SMP_USE_HTTP_ANY ) ;
}
2023-01-09 05:09:03 -05:00
2014-01-22 12:38:02 -05:00
/* only now we can check if some args remain unresolved.
* This must be done after the users and groups resolution .
*/
2021-03-26 11:11:55 -04:00
err = NULL ;
i = smp_resolve_args ( curproxy , & err ) ;
cfgerr + = i ;
if ( i ) {
indent_msg ( & err , 8 ) ;
ha_alert ( " %s%s \n " , i > 1 ? " multiple argument resolution errors: " : " " , err ) ;
ha_free ( & err ) ;
} else
MAJOR: sample: maintain a per-proxy list of the fetch args to resolve
While ACL args were resolved after all the config was parsed, it was not the
case with sample fetch args because they're almost everywhere now.
The issue is that ACLs now solely rely on sample fetches, so their args
resolving doesn't work anymore. And many fetches involving a server, a
proxy or a userlist don't work at all.
The real issue is that at the bottom layers we have no information about
proxies, line numbers, even ACLs in order to report understandable errors,
and that at the top layers we have no visibility over the locations where
fetches are referenced (think log node).
After failing multiple unsatisfying solutions attempts, we now have a new
concept of args list. The principle is that every proxy has a list head
which contains a number of indications such as the config keyword, the
context where it's used, the file and line number, etc... and a list of
arguments. This list head is of the same type as the elements, so it
serves as a template for adding new elements. This way, it is filled from
top to bottom by the callers with the information they have (eg: line
numbers, ACL name, ...) and the lower layers just have to duplicate it and
add an element when they face an argument they cannot resolve yet.
Then at the end of the configuration parsing, a loop passes over each
proxy's list and resolves all the args in sequence. And this way there is
all necessary information to report verbose errors.
The first immediate benefit is that for the first time we got very precise
location of issues (arg number in a keyword in its context, ...). Second,
in order to do this we had to parse log-format and unique-id-format a bit
earlier, so that was a great opportunity for doing so when the directives
are encountered (unless it's a default section). This way, the recorded
line numbers for these args are the ones of the place where the log format
is declared, not the end of the file.
Userlists report slightly more information now. They're the only remaining
ones in the ACL resolving function.
2013-04-02 10:34:32 -04:00
cfgerr + = acl_find_targets ( curproxy ) ;
2010-01-29 13:26:18 -05:00
2021-08-13 09:21:12 -04:00
if ( ! ( curproxy - > cap & PR_CAP_INT ) & & ( curproxy - > mode = = PR_MODE_TCP | | curproxy - > mode = = PR_MODE_HTTP ) & &
2008-07-06 18:09:58 -04:00
( ( ( curproxy - > cap & PR_CAP_FE ) & & ! curproxy - > timeout . client ) | |
2007-05-12 16:35:00 -04:00
( ( curproxy - > cap & PR_CAP_BE ) & & ( curproxy - > srv ) & &
2012-05-12 06:50:00 -04:00
( ! curproxy - > timeout . connect | |
( ! curproxy - > timeout . server & & ( curproxy - > mode = = PR_MODE_HTTP | | ! curproxy - > timeout . tunnel ) ) ) ) ) ) {
2021-06-04 12:22:08 -04:00
ha_warning ( " missing timeouts for %s '%s'. \n "
2017-11-24 10:50:31 -05:00
" | While not properly invalid, you will certainly encounter various problems \n "
" | with such a configuration. To fix this, please ensure that all following \n "
" | timeouts are set to a non-zero value: 'client', 'connect', 'server'. \n " ,
proxy_type_str ( curproxy ) , curproxy - > id ) ;
2009-07-23 07:36:36 -04:00
err_code | = ERR_WARN ;
2006-07-08 11:28:09 -04:00
}
2006-07-09 10:42:34 -04:00
2007-12-02 18:36:16 -05:00
/* Historically, the tarpit and queue timeouts were inherited from contimeout.
* We must still support older configurations , so let ' s find out whether those
* parameters have been set or must be copied from contimeouts .
*/
2021-02-12 05:14:35 -05:00
if ( ! curproxy - > timeout . tarpit )
curproxy - > timeout . tarpit = curproxy - > timeout . connect ;
if ( ( curproxy - > cap & PR_CAP_BE ) & & ! curproxy - > timeout . queue )
curproxy - > timeout . queue = curproxy - > timeout . connect ;
2007-12-02 18:36:16 -05:00
2020-04-15 05:32:03 -04:00
if ( ( curproxy - > tcpcheck_rules . flags & TCPCHK_RULES_UNUSED_TCP_RS ) ) {
2021-06-04 12:22:08 -04:00
ha_warning ( " %s '%s' uses tcp-check rules without 'option tcp-check', so the rules are ignored. \n " ,
2017-11-24 10:50:31 -05:00
proxy_type_str ( curproxy ) , curproxy - > id ) ;
2014-06-13 12:30:23 -04:00
err_code | = ERR_WARN ;
}
2012-11-21 18:17:38 -05:00
/* ensure that cookie capture length is not too large */
if ( curproxy - > capture_len > = global . tune . cookie_len ) {
2021-06-04 12:22:08 -04:00
ha_warning ( " truncating capture length to %d bytes for %s '%s'. \n " ,
2017-11-24 10:50:31 -05:00
global . tune . cookie_len - 1 , proxy_type_str ( curproxy ) , curproxy - > id ) ;
2012-11-21 18:17:38 -05:00
err_code | = ERR_WARN ;
curproxy - > capture_len = global . tune . cookie_len - 1 ;
}
2007-05-13 16:46:04 -04:00
/* The small pools required for the capture lists */
2012-03-24 03:33:05 -04:00
if ( curproxy - > nb_req_cap ) {
2014-06-13 06:23:06 -04:00
curproxy - > req_cap_pool = create_pool ( " ptrcap " ,
curproxy - > nb_req_cap * sizeof ( char * ) ,
MEM_F_SHARED ) ;
2012-03-24 03:33:05 -04:00
}
if ( curproxy - > nb_rsp_cap ) {
2014-06-13 06:23:06 -04:00
curproxy - > rsp_cap_pool = create_pool ( " ptrcap " ,
curproxy - > nb_rsp_cap * sizeof ( char * ) ,
MEM_F_SHARED ) ;
2012-03-24 03:33:05 -04:00
}
2007-05-13 16:46:04 -04:00
2015-08-19 10:44:03 -04:00
switch ( curproxy - > load_server_state_from_file ) {
case PR_SRV_STATE_FILE_UNSPEC :
curproxy - > load_server_state_from_file = PR_SRV_STATE_FILE_NONE ;
break ;
case PR_SRV_STATE_FILE_GLOBAL :
if ( ! global . server_state_file ) {
2021-06-04 12:22:08 -04:00
ha_warning ( " backend '%s' configured to load server state file from global section 'server-state-file' directive. Unfortunately, 'server-state-file' is not set! \n " ,
2017-11-24 10:50:31 -05:00
curproxy - > id ) ;
2015-08-19 10:44:03 -04:00
err_code | = ERR_WARN ;
}
break ;
}
2006-06-25 20:48:02 -04:00
/* first, we will invert the servers list order */
newsrv = NULL ;
while ( curproxy - > srv ) {
struct server * next ;
next = curproxy - > srv - > next ;
curproxy - > srv - > next = newsrv ;
newsrv = curproxy - > srv ;
if ( ! next )
break ;
curproxy - > srv = next ;
}
2014-01-03 06:14:34 -05:00
/* Check that no server name conflicts. This causes trouble in the stats.
* We only emit a warning for the first conflict affecting each server ,
* in order to avoid combinatory explosion if all servers have the same
* name . We do that only for servers which do not have an explicit ID ,
* because these IDs were made also for distinguishing them and we don ' t
2024-09-20 11:12:04 -04:00
* want to annoy people who correctly manage them . Since servers names
* are stored in a tree before landing here , we simply have to check for
* the current server ' s duplicates to spot conflicts .
2014-01-03 06:14:34 -05:00
*/
for ( newsrv = curproxy - > srv ; newsrv ; newsrv = newsrv - > next ) {
struct server * other_srv ;
2024-09-20 11:12:04 -04:00
/* Note: internal servers are not always registered and
* they do not conflict .
*/
if ( ! newsrv - > conf . name . node . leaf_p )
2014-01-03 06:14:34 -05:00
continue ;
2024-09-20 11:12:04 -04:00
for ( other_srv = newsrv ;
( other_srv = container_of_safe ( ebpt_prev_dup ( & other_srv - > conf . name ) ,
struct server , conf . name ) ) ; ) {
if ( ! newsrv - > puid & & ! other_srv - > puid ) {
2019-05-27 13:31:06 -04:00
ha_alert ( " parsing [%s:%d] : %s '%s', another server named '%s' was already defined at line %d, please use distinct names. \n " ,
2017-11-24 10:50:31 -05:00
newsrv - > conf . file , newsrv - > conf . line ,
proxy_type_str ( curproxy ) , curproxy - > id ,
newsrv - > id , other_srv - > conf . line ) ;
2019-05-27 13:31:06 -04:00
cfgerr + + ;
2014-01-03 06:14:34 -05:00
break ;
}
2024-09-20 11:15:11 -04:00
ha_warning ( " parsing [%s:%d] : %s '%s', another server named '%s' was already defined at line %d. This is dangerous and will not be supported anymore in version 3.3. Please use distinct names. \n " ,
newsrv - > conf . file , newsrv - > conf . line ,
proxy_type_str ( curproxy ) , curproxy - > id ,
newsrv - > id , other_srv - > conf . line ) ;
2014-01-03 06:14:34 -05:00
}
}
2010-05-25 17:03:02 -04:00
/* assign automatic UIDs to servers which don't have one yet */
next_id = 1 ;
newsrv = curproxy - > srv ;
while ( newsrv ! = NULL ) {
if ( ! newsrv - > puid ) {
/* server ID not set, use automatic numbering with first
* spare entry starting with next_svid .
*/
next_id = get_next_id ( & curproxy - > conf . used_server_id , next_id ) ;
newsrv - > conf . id . key = newsrv - > puid = next_id ;
eb32_insert ( & curproxy - > conf . used_server_id , & newsrv - > conf . id ) ;
}
2021-06-14 11:04:25 -04:00
2010-05-25 17:03:02 -04:00
next_id + + ;
newsrv = newsrv - > next ;
}
2007-11-15 17:26:18 -05:00
curproxy - > lbprm . wmult = 1 ; /* default weight multiplier */
2007-11-19 13:10:18 -05:00
curproxy - > lbprm . wdiv = 1 ; /* default weight divider */
2007-11-15 17:26:18 -05:00
2012-01-20 07:12:32 -05:00
/*
* If this server supports a maxconn parameter , it needs a dedicated
* tasks to fill the emptied slots when a connection leaves .
* Also , resolve deferred tracking dependency if needed .
*/
newsrv = curproxy - > srv ;
while ( newsrv ! = NULL ) {
2021-05-28 04:34:01 -04:00
set_usermsgs_ctx ( newsrv - > conf . file , newsrv - > conf . line , & newsrv - > obj_type ) ;
2023-02-08 05:49:02 -05:00
srv_minmax_conn_apply ( newsrv ) ;
2012-01-20 07:12:32 -05:00
2020-11-14 13:25:33 -05:00
/* this will also properly set the transport layer for
* prod and checks
* if default - server have use_ssl , prerare ssl init
* without activating it */
if ( newsrv - > use_ssl = = 1 | | newsrv - > check . use_ssl = = 1 | |
2021-12-01 03:50:41 -05:00
( newsrv - > proxy - > options & PR_O_TCPCHK_SSL ) | |
( ( newsrv - > flags & SRV_F_DEFSRV_USE_SSL ) & & newsrv - > use_ssl ! = 1 ) ) {
2016-12-22 15:16:08 -05:00
if ( xprt_get ( XPRT_SSL ) & & xprt_get ( XPRT_SSL ) - > prepare_srv )
cfgerr + = xprt_get ( XPRT_SSL ) - > prepare_srv ( newsrv ) ;
}
2012-10-11 08:00:19 -04:00
2017-01-23 17:36:45 -05:00
if ( ( newsrv - > flags & SRV_F_FASTOPEN ) & &
( ( curproxy - > retry_type & ( PR_RE_DISCONNECTED | PR_RE_TIMEOUT ) ) ! =
( PR_RE_DISCONNECTED | PR_RE_TIMEOUT ) ) )
2021-05-28 04:34:01 -04:00
ha_warning ( " server has tfo activated, the backend should be configured with at least 'conn-failure', 'empty-response' and 'response-timeout' or we wouldn't be able to retry the connection on failure. \n " ) ;
2017-01-23 17:36:45 -05:00
2012-01-20 07:12:32 -05:00
if ( newsrv - > trackit ) {
2021-07-13 04:35:23 -04:00
if ( srv_apply_track ( newsrv , curproxy ) ) {
+ + cfgerr ;
2012-01-20 07:12:32 -05:00
goto next_srv ;
}
}
2015-04-13 19:15:08 -04:00
2012-01-20 07:12:32 -05:00
next_srv :
2021-05-28 04:34:01 -04:00
reset_usermsgs_ctx ( ) ;
2012-01-20 07:12:32 -05:00
newsrv = newsrv - > next ;
}
MINOR: server: Add dynamic session cookies.
This adds a new "dynamic" keyword for the cookie option. If set, a cookie
will be generated for each server (assuming one isn't already provided on
the "server" line), from the IP of the server, the TCP port, and a secret
key provided. To provide the secret key, a new keyword as been added,
"dynamic-cookie-key", for backends.
Example :
backend bk_web
balance roundrobin
dynamic-cookie-key "bla"
cookie WEBSRV insert dynamic
server s1 127.0.0.1:80 check
server s2 192.168.56.1:80 check
This is a first step to be able to dynamically add and remove servers,
without modifying the configuration file, and still have all the load
balancers redirect the traffic to the right server.
Provide a way to generate session cookies, based on the IP address of the
server, the TCP port, and a secret key provided.
2017-03-14 15:01:29 -04:00
/*
* Try to generate dynamic cookies for servers now .
* It couldn ' t be done earlier , since at the time we parsed
* the server line , we may not have known yet that we
* should use dynamic cookies , or the secret key may not
* have been provided yet .
*/
if ( curproxy - > ck_opts & PR_CK_DYNAMIC ) {
newsrv = curproxy - > srv ;
while ( newsrv ! = NULL ) {
srv_set_dyncookie ( newsrv ) ;
newsrv = newsrv - > next ;
}
}
2009-10-03 06:21:20 -04:00
/* We have to initialize the server lookup mechanism depending
2018-11-15 17:04:19 -05:00
* on what LB algorithm was chosen .
2009-10-03 06:21:20 -04:00
*/
curproxy - > lbprm . algo & = ~ ( BE_LB_LKUP | BE_LB_PROP_DYN ) ;
switch ( curproxy - > lbprm . algo & BE_LB_KIND ) {
case BE_LB_KIND_RR :
2009-10-03 06:56:50 -04:00
if ( ( curproxy - > lbprm . algo & BE_LB_PARM ) = = BE_LB_RR_STATIC ) {
curproxy - > lbprm . algo | = BE_LB_LKUP_MAP ;
init_server_map ( curproxy ) ;
2018-05-03 01:20:40 -04:00
} else if ( ( curproxy - > lbprm . algo & BE_LB_PARM ) = = BE_LB_RR_RANDOM ) {
curproxy - > lbprm . algo | = BE_LB_LKUP_CHTREE | BE_LB_PROP_DYN ;
2021-05-19 10:40:28 -04:00
if ( chash_init_server_tree ( curproxy ) < 0 ) {
cfgerr + + ;
}
2009-10-03 06:56:50 -04:00
} else {
curproxy - > lbprm . algo | = BE_LB_LKUP_RRTREE | BE_LB_PROP_DYN ;
fwrr_init_server_groups ( curproxy ) ;
}
2009-10-03 06:21:20 -04:00
break ;
[MEDIUM] backend: implement consistent hashing variation
Consistent hashing provides some interesting advantages over common
hashing. It avoids full redistribution in case of a server failure,
or when expanding the farm. This has a cost however, the hashing is
far from being perfect, as we associate a server to a request by
searching the server with the closest key in a tree. Since servers
appear multiple times based on their weights, it is recommended to
use weights larger than approximately 10-20 in order to smoothen
the distribution a bit.
In some cases, playing with weights will be the only solution to
make a server appear more often and increase chances of being picked,
so stats are very important with consistent hashing.
In order to indicate the type of hashing, use :
hash-type map-based (default, old one)
hash-type consistent (new one)
Consistent hashing can make sense in a cache farm, in order not
to redistribute everyone when a cache changes state. It could also
probably be used for long sessions such as terminal sessions, though
that has not be attempted yet.
More details on this method of hashing here :
http://www.spiteful.com/2008/03/17/programmers-toolbox-part-3-consistent-hashing/
2009-10-01 01:52:15 -04:00
2012-02-13 10:57:44 -05:00
case BE_LB_KIND_CB :
2012-02-13 11:12:08 -05:00
if ( ( curproxy - > lbprm . algo & BE_LB_PARM ) = = BE_LB_CB_LC ) {
curproxy - > lbprm . algo | = BE_LB_LKUP_LCTREE | BE_LB_PROP_DYN ;
fwlc_init_server_tree ( curproxy ) ;
} else {
curproxy - > lbprm . algo | = BE_LB_LKUP_FSTREE | BE_LB_PROP_DYN ;
fas_init_server_tree ( curproxy ) ;
}
2009-10-03 06:21:20 -04:00
break ;
[MEDIUM] backend: implement consistent hashing variation
Consistent hashing provides some interesting advantages over common
hashing. It avoids full redistribution in case of a server failure,
or when expanding the farm. This has a cost however, the hashing is
far from being perfect, as we associate a server to a request by
searching the server with the closest key in a tree. Since servers
appear multiple times based on their weights, it is recommended to
use weights larger than approximately 10-20 in order to smoothen
the distribution a bit.
In some cases, playing with weights will be the only solution to
make a server appear more often and increase chances of being picked,
so stats are very important with consistent hashing.
In order to indicate the type of hashing, use :
hash-type map-based (default, old one)
hash-type consistent (new one)
Consistent hashing can make sense in a cache farm, in order not
to redistribute everyone when a cache changes state. It could also
probably be used for long sessions such as terminal sessions, though
that has not be attempted yet.
More details on this method of hashing here :
http://www.spiteful.com/2008/03/17/programmers-toolbox-part-3-consistent-hashing/
2009-10-01 01:52:15 -04:00
2009-10-03 06:21:20 -04:00
case BE_LB_KIND_HI :
[MEDIUM] backend: implement consistent hashing variation
Consistent hashing provides some interesting advantages over common
hashing. It avoids full redistribution in case of a server failure,
or when expanding the farm. This has a cost however, the hashing is
far from being perfect, as we associate a server to a request by
searching the server with the closest key in a tree. Since servers
appear multiple times based on their weights, it is recommended to
use weights larger than approximately 10-20 in order to smoothen
the distribution a bit.
In some cases, playing with weights will be the only solution to
make a server appear more often and increase chances of being picked,
so stats are very important with consistent hashing.
In order to indicate the type of hashing, use :
hash-type map-based (default, old one)
hash-type consistent (new one)
Consistent hashing can make sense in a cache farm, in order not
to redistribute everyone when a cache changes state. It could also
probably be used for long sessions such as terminal sessions, though
that has not be attempted yet.
More details on this method of hashing here :
http://www.spiteful.com/2008/03/17/programmers-toolbox-part-3-consistent-hashing/
2009-10-01 01:52:15 -04:00
if ( ( curproxy - > lbprm . algo & BE_LB_HASH_TYPE ) = = BE_LB_HASH_CONS ) {
curproxy - > lbprm . algo | = BE_LB_LKUP_CHTREE | BE_LB_PROP_DYN ;
2021-05-19 10:40:28 -04:00
if ( chash_init_server_tree ( curproxy ) < 0 ) {
cfgerr + + ;
}
[MEDIUM] backend: implement consistent hashing variation
Consistent hashing provides some interesting advantages over common
hashing. It avoids full redistribution in case of a server failure,
or when expanding the farm. This has a cost however, the hashing is
far from being perfect, as we associate a server to a request by
searching the server with the closest key in a tree. Since servers
appear multiple times based on their weights, it is recommended to
use weights larger than approximately 10-20 in order to smoothen
the distribution a bit.
In some cases, playing with weights will be the only solution to
make a server appear more often and increase chances of being picked,
so stats are very important with consistent hashing.
In order to indicate the type of hashing, use :
hash-type map-based (default, old one)
hash-type consistent (new one)
Consistent hashing can make sense in a cache farm, in order not
to redistribute everyone when a cache changes state. It could also
probably be used for long sessions such as terminal sessions, though
that has not be attempted yet.
More details on this method of hashing here :
http://www.spiteful.com/2008/03/17/programmers-toolbox-part-3-consistent-hashing/
2009-10-01 01:52:15 -04:00
} else {
curproxy - > lbprm . algo | = BE_LB_LKUP_MAP ;
init_server_map ( curproxy ) ;
}
2009-10-03 06:21:20 -04:00
break ;
2024-03-28 12:24:53 -04:00
case BE_LB_KIND_SA :
if ( ( curproxy - > lbprm . algo & BE_LB_PARM ) = = BE_LB_SA_SS ) {
curproxy - > lbprm . algo | = BE_LB_PROP_DYN ;
init_server_ss ( curproxy ) ;
}
break ;
2009-10-03 06:21:20 -04:00
}
2020-10-17 12:48:47 -04:00
HA_RWLOCK_INIT ( & curproxy - > lbprm . lock ) ;
2006-06-25 20:48:02 -04:00
if ( curproxy - > options & PR_O_LOGASAP )
curproxy - > to_log & = ~ LW_BYTES ;
2021-08-13 09:21:12 -04:00
if ( ! ( curproxy - > cap & PR_CAP_INT ) & & ( curproxy - > mode = = PR_MODE_TCP | | curproxy - > mode = = PR_MODE_HTTP ) & &
MEDIUM: tree-wide: logsrv struct becomes logger
When 'log' directive was implemented, the internal representation was
named 'struct logsrv', because the 'log' directive would directly point
to the log target, which used to be a (UDP) log server exclusively at
that time, hence the name.
But things have become more complex, since today 'log' directive can point
to ring targets (implicit, or named) for example.
Indeed, a 'log' directive does no longer reference the "final" server to
which the log will be sent, but instead it describes which log API and
parameters to use for transporting the log messages to the proper log
destination.
So now the term 'logsrv' is rather confusing and prevents us from
introducing a new level of abstraction because they would be mixed
with logsrv.
So in order to better designate this 'log' directive, and make it more
generic, we chose the word 'logger' which now replaces logsrv everywhere
it was used in the code (including related comments).
This is internal rewording, so no functional change should be expected
on user-side.
2023-09-11 09:06:53 -04:00
( curproxy - > cap & PR_CAP_FE ) & & LIST_ISEMPTY ( & curproxy - > loggers ) & &
2024-02-23 09:57:21 -05:00
( ! lf_expr_isempty ( & curproxy - > logformat ) | | ! lf_expr_isempty ( & curproxy - > logformat_sd ) ) ) {
2021-06-04 12:22:08 -04:00
ha_warning ( " log format ignored for %s '%s' since it has no log address. \n " ,
2017-11-24 10:50:31 -05:00
proxy_type_str ( curproxy ) , curproxy - > id ) ;
2009-08-09 04:11:45 -04:00
err_code | = ERR_WARN ;
}
2021-03-15 10:10:38 -04:00
if ( curproxy - > mode ! = PR_MODE_HTTP & & ! ( curproxy - > options & PR_O_HTTP_UPG ) ) {
2010-03-25 02:22:56 -04:00
int optnum ;
if ( curproxy - > uri_auth ) {
2021-06-04 12:22:08 -04:00
ha_warning ( " 'stats' statement ignored for %s '%s' as it requires HTTP mode. \n " ,
2017-11-24 10:50:31 -05:00
proxy_type_str ( curproxy ) , curproxy - > id ) ;
2010-03-25 02:22:56 -04:00
err_code | = ERR_WARN ;
2024-11-13 13:54:32 -05:00
stats_uri_auth_drop ( curproxy - > uri_auth ) ;
2010-03-25 02:22:56 -04:00
curproxy - > uri_auth = NULL ;
}
2017-03-10 05:49:21 -05:00
if ( curproxy - > capture_name ) {
2021-06-04 12:22:08 -04:00
ha_warning ( " 'capture' statement ignored for %s '%s' as it requires HTTP mode. \n " ,
2017-11-24 10:50:31 -05:00
proxy_type_str ( curproxy ) , curproxy - > id ) ;
2017-03-10 05:49:21 -05:00
err_code | = ERR_WARN ;
}
2023-12-06 05:01:01 -05:00
if ( isttest ( curproxy - > monitor_uri ) ) {
ha_warning ( " 'monitor-uri' statement ignored for %s '%s' as it requires HTTP mode. \n " ,
proxy_type_str ( curproxy ) , curproxy - > id ) ;
err_code | = ERR_WARN ;
}
2017-03-10 05:49:21 -05:00
if ( ! LIST_ISEMPTY ( & curproxy - > http_req_rules ) ) {
2021-06-04 12:22:08 -04:00
ha_warning ( " 'http-request' rules ignored for %s '%s' as they require HTTP mode. \n " ,
2017-11-24 10:50:31 -05:00
proxy_type_str ( curproxy ) , curproxy - > id ) ;
2017-03-10 05:49:21 -05:00
err_code | = ERR_WARN ;
}
if ( ! LIST_ISEMPTY ( & curproxy - > http_res_rules ) ) {
2021-06-04 12:22:08 -04:00
ha_warning ( " 'http-response' rules ignored for %s '%s' as they require HTTP mode. \n " ,
2017-11-24 10:50:31 -05:00
proxy_type_str ( curproxy ) , curproxy - > id ) ;
2017-03-10 05:49:21 -05:00
err_code | = ERR_WARN ;
}
2021-03-15 10:09:21 -04:00
if ( ! LIST_ISEMPTY ( & curproxy - > http_after_res_rules ) ) {
2021-06-04 12:22:08 -04:00
ha_warning ( " 'http-after-response' rules ignored for %s '%s' as they require HTTP mode. \n " ,
2021-03-15 10:09:21 -04:00
proxy_type_str ( curproxy ) , curproxy - > id ) ;
err_code | = ERR_WARN ;
}
2017-03-10 05:49:21 -05:00
if ( ! LIST_ISEMPTY ( & curproxy - > redirect_rules ) ) {
2021-06-04 12:22:08 -04:00
ha_warning ( " 'redirect' rules ignored for %s '%s' as they require HTTP mode. \n " ,
2017-11-24 10:50:31 -05:00
proxy_type_str ( curproxy ) , curproxy - > id ) ;
2017-03-10 05:49:21 -05:00
err_code | = ERR_WARN ;
}
2010-03-25 02:22:56 -04:00
for ( optnum = 0 ; cfg_opts [ optnum ] . name ; optnum + + ) {
if ( cfg_opts [ optnum ] . mode = = PR_MODE_HTTP & &
( curproxy - > cap & cfg_opts [ optnum ] . cap ) & &
( curproxy - > options & cfg_opts [ optnum ] . val ) ) {
2021-06-04 12:22:08 -04:00
ha_warning ( " 'option %s' ignored for %s '%s' as it requires HTTP mode. \n " ,
2017-11-24 10:50:31 -05:00
cfg_opts [ optnum ] . name , proxy_type_str ( curproxy ) , curproxy - > id ) ;
2010-03-25 02:22:56 -04:00
err_code | = ERR_WARN ;
curproxy - > options & = ~ cfg_opts [ optnum ] . val ;
}
}
for ( optnum = 0 ; cfg_opts2 [ optnum ] . name ; optnum + + ) {
if ( cfg_opts2 [ optnum ] . mode = = PR_MODE_HTTP & &
( curproxy - > cap & cfg_opts2 [ optnum ] . cap ) & &
( curproxy - > options2 & cfg_opts2 [ optnum ] . val ) ) {
2021-06-04 12:22:08 -04:00
ha_warning ( " 'option %s' ignored for %s '%s' as it requires HTTP mode. \n " ,
2017-11-24 10:50:31 -05:00
cfg_opts2 [ optnum ] . name , proxy_type_str ( curproxy ) , curproxy - > id ) ;
2010-03-25 02:22:56 -04:00
err_code | = ERR_WARN ;
curproxy - > options2 & = ~ cfg_opts2 [ optnum ] . val ;
}
}
2009-09-07 05:51:47 -04:00
2015-08-20 13:35:14 -04:00
# if defined(CONFIG_HAP_TRANSPARENT)
2012-12-08 16:29:20 -05:00
if ( curproxy - > conn_src . bind_hdr_occ ) {
curproxy - > conn_src . bind_hdr_occ = 0 ;
2021-06-04 12:22:08 -04:00
ha_warning ( " %s '%s' : ignoring use of header %s as source IP in non-HTTP mode. \n " ,
2017-11-24 10:50:31 -05:00
proxy_type_str ( curproxy ) , curproxy - > id , curproxy - > conn_src . bind_hdr_name ) ;
2009-09-07 05:51:47 -04:00
err_code | = ERR_WARN ;
}
2010-03-30 14:13:29 -04:00
# endif
2010-03-25 02:22:56 -04:00
}
2008-02-14 14:25:24 -05:00
/*
* ensure that we ' re not cross - dressing a TCP server into HTTP .
*/
newsrv = curproxy - > srv ;
while ( newsrv ! = NULL ) {
2011-10-31 08:49:26 -04:00
if ( ( curproxy - > mode ! = PR_MODE_HTTP ) & & newsrv - > rdr_len ) {
2021-06-04 12:22:08 -04:00
ha_alert ( " %s '%s' : server cannot have cookie or redirect prefix in non-HTTP mode. \n " ,
2017-11-24 10:50:31 -05:00
proxy_type_str ( curproxy ) , curproxy - > id ) ;
2009-07-23 07:36:36 -04:00
cfgerr + + ;
2009-09-07 05:51:47 -04:00
}
2011-10-31 08:49:26 -04:00
if ( ( curproxy - > mode ! = PR_MODE_HTTP ) & & newsrv - > cklen ) {
2021-06-04 12:22:08 -04:00
ha_warning ( " %s '%s' : ignoring cookie for server '%s' as HTTP mode is disabled. \n " ,
2017-11-24 10:50:31 -05:00
proxy_type_str ( curproxy ) , curproxy - > id , newsrv - > id ) ;
2011-10-31 08:49:26 -04:00
err_code | = ERR_WARN ;
2013-08-13 11:19:08 -04:00
}
2014-05-13 09:54:22 -04:00
if ( ( newsrv - > flags & SRV_F_MAPPORTS ) & & ( curproxy - > options2 & PR_O2_RDPC_PRST ) ) {
2021-06-04 12:22:08 -04:00
ha_warning ( " %s '%s' : RDP cookie persistence will not work for server '%s' because it lacks an explicit port number. \n " ,
2017-11-24 10:50:31 -05:00
proxy_type_str ( curproxy ) , curproxy - > id , newsrv - > id ) ;
2013-08-13 11:19:08 -04:00
err_code | = ERR_WARN ;
2011-10-31 08:49:26 -04:00
}
2015-08-20 13:35:14 -04:00
# if defined(CONFIG_HAP_TRANSPARENT)
2012-12-08 16:29:20 -05:00
if ( curproxy - > mode ! = PR_MODE_HTTP & & newsrv - > conn_src . bind_hdr_occ ) {
newsrv - > conn_src . bind_hdr_occ = 0 ;
2021-06-04 12:22:08 -04:00
ha_warning ( " %s '%s' : server %s cannot use header %s as source IP in non-HTTP mode. \n " ,
2017-11-24 10:50:31 -05:00
proxy_type_str ( curproxy ) , curproxy - > id , newsrv - > id , newsrv - > conn_src . bind_hdr_name ) ;
2009-09-07 05:51:47 -04:00
err_code | = ERR_WARN ;
2008-02-14 14:25:24 -05:00
}
2010-03-30 14:13:29 -04:00
# endif
2017-01-06 06:21:38 -05:00
2018-04-28 01:18:15 -04:00
if ( ( curproxy - > mode ! = PR_MODE_HTTP ) & & ( curproxy - > options & PR_O_REUSE_MASK ) ! = PR_O_REUSE_NEVR )
curproxy - > options & = ~ PR_O_REUSE_MASK ;
2024-07-08 13:14:35 -04:00
if ( curproxy - > mode = = PR_MODE_SPOP )
curproxy - > options | = PR_O_REUSE_ALWS ;
2018-04-28 01:18:15 -04:00
2023-11-21 05:10:34 -05:00
if ( ( curproxy - > mode ! = PR_MODE_HTTP ) & & newsrv - > flags & SRV_F_RHTTP ) {
ha_alert ( " %s '%s' : server %s uses reverse HTTP addressing which can only be used with HTTP mode. \n " ,
2023-07-25 09:16:55 -04:00
proxy_type_str ( curproxy ) , curproxy - > id , newsrv - > id ) ;
cfgerr + + ;
err_code | = ERR_FATAL | ERR_ALERT ;
goto out ;
}
2008-02-14 14:25:24 -05:00
newsrv = newsrv - > next ;
}
MAJOR: filters: Add filters support
This patch adds the support of filters in HAProxy. The main idea is to have a
way to "easely" extend HAProxy by adding some "modules", called filters, that
will be able to change HAProxy behavior in a programmatic way.
To do so, many entry points has been added in code to let filters to hook up to
different steps of the processing. A filter must define a flt_ops sutrctures
(see include/types/filters.h for details). This structure contains all available
callbacks that a filter can define:
struct flt_ops {
/*
* Callbacks to manage the filter lifecycle
*/
int (*init) (struct proxy *p);
void (*deinit)(struct proxy *p);
int (*check) (struct proxy *p);
/*
* Stream callbacks
*/
void (*stream_start) (struct stream *s);
void (*stream_accept) (struct stream *s);
void (*session_establish)(struct stream *s);
void (*stream_stop) (struct stream *s);
/*
* HTTP callbacks
*/
int (*http_start) (struct stream *s, struct http_msg *msg);
int (*http_start_body) (struct stream *s, struct http_msg *msg);
int (*http_start_chunk) (struct stream *s, struct http_msg *msg);
int (*http_data) (struct stream *s, struct http_msg *msg);
int (*http_last_chunk) (struct stream *s, struct http_msg *msg);
int (*http_end_chunk) (struct stream *s, struct http_msg *msg);
int (*http_chunk_trailers)(struct stream *s, struct http_msg *msg);
int (*http_end_body) (struct stream *s, struct http_msg *msg);
void (*http_end) (struct stream *s, struct http_msg *msg);
void (*http_reset) (struct stream *s, struct http_msg *msg);
int (*http_pre_process) (struct stream *s, struct http_msg *msg);
int (*http_post_process) (struct stream *s, struct http_msg *msg);
void (*http_reply) (struct stream *s, short status,
const struct chunk *msg);
};
To declare and use a filter, in the configuration, the "filter" keyword must be
used in a listener/frontend section:
frontend test
...
filter <FILTER-NAME> [OPTIONS...]
The filter referenced by the <FILTER-NAME> must declare a configuration parser
on its own name to fill flt_ops and filter_conf field in the proxy's
structure. An exemple will be provided later to make it perfectly clear.
For now, filters cannot be used in backend section. But this is only a matter of
time. Documentation will also be added later. This is the first commit of a long
list about filters.
It is possible to have several filters on the same listener/frontend. These
filters are stored in an array of at most MAX_FILTERS elements (define in
include/types/filters.h). Again, this will be replaced later by a list of
filters.
The filter API has been highly refactored. Main changes are:
* Now, HA supports an infinite number of filters per proxy. To do so, filters
are stored in list.
* Because filters are stored in list, filters state has been moved from the
channel structure to the filter structure. This is cleaner because there is no
more info about filters in channel structure.
* It is possible to defined filters on backends only. For such filters,
stream_start/stream_stop callbacks are not called. Of course, it is possible
to mix frontend and backend filters.
* Now, TCP streams are also filtered. All callbacks without the 'http_' prefix
are called for all kind of streams. In addition, 2 new callbacks were added to
filter data exchanged through a TCP stream:
- tcp_data: it is called when new data are available or when old unprocessed
data are still waiting.
- tcp_forward_data: it is called when some data can be consumed.
* New callbacks attached to channel were added:
- channel_start_analyze: it is called when a filter is ready to process data
exchanged through a channel. 2 new analyzers (a frontend and a backend)
are attached to channels to call this callback. For a frontend filter, it
is called before any other analyzer. For a backend filter, it is called
when a backend is attached to a stream. So some processing cannot be
filtered in that case.
- channel_analyze: it is called before each analyzer attached to a channel,
expects analyzers responsible for data sending.
- channel_end_analyze: it is called when all other analyzers have finished
their processing. A new analyzers is attached to channels to call this
callback. For a TCP stream, this is always the last one called. For a HTTP
one, the callback is called when a request/response ends, so it is called
one time for each request/response.
* 'session_established' callback has been removed. Everything that is done in
this callback can be handled by 'channel_start_analyze' on the response
channel.
* 'http_pre_process' and 'http_post_process' callbacks have been replaced by
'channel_analyze'.
* 'http_start' callback has been replaced by 'http_headers'. This new one is
called just before headers sending and parsing of the body.
* 'http_end' callback has been replaced by 'channel_end_analyze'.
* It is possible to set a forwarder for TCP channels. It was already possible to
do it for HTTP ones.
* Forwarders can partially consumed forwardable data. For this reason a new
HTTP message state was added before HTTP_MSG_DONE : HTTP_MSG_ENDING.
Now all filters can define corresponding callbacks (http_forward_data
and tcp_forward_data). Each filter owns 2 offsets relative to buf->p, next and
forward, to track, respectively, input data already parsed but not forwarded yet
by the filter and parsed data considered as forwarded by the filter. A any time,
we have the warranty that a filter cannot parse or forward more input than
previous ones. And, of course, it cannot forward more input than it has
parsed. 2 macros has been added to retrieve these offets: FLT_NXT and FLT_FWD.
In addition, 2 functions has been added to change the 'next size' and the
'forward size' of a filter. When a filter parses input data, it can alter these
data, so the size of these data can vary. This action has an effet on all
previous filters that must be handled. To do so, the function
'filter_change_next_size' must be called, passing the size variation. In the
same spirit, if a filter alter forwarded data, it must call the function
'filter_change_forward_size'. 'filter_change_next_size' can be called in
'http_data' and 'tcp_data' callbacks and only these ones. And
'filter_change_forward_size' can be called in 'http_forward_data' and
'tcp_forward_data' callbacks and only these ones. The data changes are the
filter responsability, but with some limitation. It must not change already
parsed/forwarded data or data that previous filters have not parsed/forwarded
yet.
Because filters can be used on backends, when we the backend is set for a
stream, we add filters defined for this backend in the filter list of the
stream. But we must only do that when the backend and the frontend of the stream
are not the same. Else same filters are added a second time leading to undefined
behavior.
The HTTP compression code had to be moved.
So it simplifies http_response_forward_body function. To do so, the way the data
are forwarded has changed. Now, a filter (and only one) can forward data. In a
commit to come, this limitation will be removed to let all filters take part to
data forwarding. There are 2 new functions that filters should use to deal with
this feature:
* flt_set_http_data_forwarder: This function sets the filter (using its id)
that will forward data for the specified HTTP message. It is possible if it
was not already set by another filter _AND_ if no data was yet forwarded
(msg->msg_state <= HTTP_MSG_BODY). It returns -1 if an error occurs.
* flt_http_data_forwarder: This function returns the filter id that will
forward data for the specified HTTP message. If there is no forwarder set, it
returns -1.
When an HTTP data forwarder is set for the response, the HTTP compression is
disabled. Of course, this is not definitive.
2015-04-30 05:48:27 -04:00
/* Check filter configuration, if any */
cfgerr + = flt_check ( curproxy ) ;
2009-08-16 16:37:44 -04:00
if ( curproxy - > cap & PR_CAP_FE ) {
2012-10-04 02:47:34 -04:00
if ( ! curproxy - > accept )
curproxy - > accept = frontend_accept ;
2010-06-01 11:45:26 -04:00
2021-10-13 09:40:15 -04:00
if ( ! LIST_ISEMPTY ( & curproxy - > tcp_req . inspect_rules ) | |
( curproxy - > defpx & & ! LIST_ISEMPTY ( & curproxy - > defpx - > tcp_req . inspect_rules ) ) )
2010-08-03 08:02:05 -04:00
curproxy - > fe_req_ana | = AN_REQ_INSPECT_FE ;
2009-08-16 16:37:44 -04:00
2009-08-16 16:57:50 -04:00
if ( curproxy - > mode = = PR_MODE_HTTP ) {
2009-08-16 16:37:44 -04:00
curproxy - > fe_req_ana | = AN_REQ_WAIT_HTTP | AN_REQ_HTTP_PROCESS_FE ;
2009-10-18 16:53:08 -04:00
curproxy - > fe_rsp_ana | = AN_RES_WAIT_HTTP | AN_RES_HTTP_PROCESS_FE ;
2009-08-16 16:57:50 -04:00
}
2009-08-16 16:37:44 -04:00
2018-10-26 08:47:40 -04:00
if ( curproxy - > mode = = PR_MODE_CLI ) {
curproxy - > fe_req_ana | = AN_REQ_WAIT_CLI ;
curproxy - > fe_rsp_ana | = AN_RES_WAIT_CLI ;
}
2009-08-16 16:37:44 -04:00
/* both TCP and HTTP must check switching rules */
curproxy - > fe_req_ana | = AN_REQ_SWITCHING_RULES ;
MAJOR: filters: Add filters support
This patch adds the support of filters in HAProxy. The main idea is to have a
way to "easely" extend HAProxy by adding some "modules", called filters, that
will be able to change HAProxy behavior in a programmatic way.
To do so, many entry points has been added in code to let filters to hook up to
different steps of the processing. A filter must define a flt_ops sutrctures
(see include/types/filters.h for details). This structure contains all available
callbacks that a filter can define:
struct flt_ops {
/*
* Callbacks to manage the filter lifecycle
*/
int (*init) (struct proxy *p);
void (*deinit)(struct proxy *p);
int (*check) (struct proxy *p);
/*
* Stream callbacks
*/
void (*stream_start) (struct stream *s);
void (*stream_accept) (struct stream *s);
void (*session_establish)(struct stream *s);
void (*stream_stop) (struct stream *s);
/*
* HTTP callbacks
*/
int (*http_start) (struct stream *s, struct http_msg *msg);
int (*http_start_body) (struct stream *s, struct http_msg *msg);
int (*http_start_chunk) (struct stream *s, struct http_msg *msg);
int (*http_data) (struct stream *s, struct http_msg *msg);
int (*http_last_chunk) (struct stream *s, struct http_msg *msg);
int (*http_end_chunk) (struct stream *s, struct http_msg *msg);
int (*http_chunk_trailers)(struct stream *s, struct http_msg *msg);
int (*http_end_body) (struct stream *s, struct http_msg *msg);
void (*http_end) (struct stream *s, struct http_msg *msg);
void (*http_reset) (struct stream *s, struct http_msg *msg);
int (*http_pre_process) (struct stream *s, struct http_msg *msg);
int (*http_post_process) (struct stream *s, struct http_msg *msg);
void (*http_reply) (struct stream *s, short status,
const struct chunk *msg);
};
To declare and use a filter, in the configuration, the "filter" keyword must be
used in a listener/frontend section:
frontend test
...
filter <FILTER-NAME> [OPTIONS...]
The filter referenced by the <FILTER-NAME> must declare a configuration parser
on its own name to fill flt_ops and filter_conf field in the proxy's
structure. An exemple will be provided later to make it perfectly clear.
For now, filters cannot be used in backend section. But this is only a matter of
time. Documentation will also be added later. This is the first commit of a long
list about filters.
It is possible to have several filters on the same listener/frontend. These
filters are stored in an array of at most MAX_FILTERS elements (define in
include/types/filters.h). Again, this will be replaced later by a list of
filters.
The filter API has been highly refactored. Main changes are:
* Now, HA supports an infinite number of filters per proxy. To do so, filters
are stored in list.
* Because filters are stored in list, filters state has been moved from the
channel structure to the filter structure. This is cleaner because there is no
more info about filters in channel structure.
* It is possible to defined filters on backends only. For such filters,
stream_start/stream_stop callbacks are not called. Of course, it is possible
to mix frontend and backend filters.
* Now, TCP streams are also filtered. All callbacks without the 'http_' prefix
are called for all kind of streams. In addition, 2 new callbacks were added to
filter data exchanged through a TCP stream:
- tcp_data: it is called when new data are available or when old unprocessed
data are still waiting.
- tcp_forward_data: it is called when some data can be consumed.
* New callbacks attached to channel were added:
- channel_start_analyze: it is called when a filter is ready to process data
exchanged through a channel. 2 new analyzers (a frontend and a backend)
are attached to channels to call this callback. For a frontend filter, it
is called before any other analyzer. For a backend filter, it is called
when a backend is attached to a stream. So some processing cannot be
filtered in that case.
- channel_analyze: it is called before each analyzer attached to a channel,
expects analyzers responsible for data sending.
- channel_end_analyze: it is called when all other analyzers have finished
their processing. A new analyzers is attached to channels to call this
callback. For a TCP stream, this is always the last one called. For a HTTP
one, the callback is called when a request/response ends, so it is called
one time for each request/response.
* 'session_established' callback has been removed. Everything that is done in
this callback can be handled by 'channel_start_analyze' on the response
channel.
* 'http_pre_process' and 'http_post_process' callbacks have been replaced by
'channel_analyze'.
* 'http_start' callback has been replaced by 'http_headers'. This new one is
called just before headers sending and parsing of the body.
* 'http_end' callback has been replaced by 'channel_end_analyze'.
* It is possible to set a forwarder for TCP channels. It was already possible to
do it for HTTP ones.
* Forwarders can partially consumed forwardable data. For this reason a new
HTTP message state was added before HTTP_MSG_DONE : HTTP_MSG_ENDING.
Now all filters can define corresponding callbacks (http_forward_data
and tcp_forward_data). Each filter owns 2 offsets relative to buf->p, next and
forward, to track, respectively, input data already parsed but not forwarded yet
by the filter and parsed data considered as forwarded by the filter. A any time,
we have the warranty that a filter cannot parse or forward more input than
previous ones. And, of course, it cannot forward more input than it has
parsed. 2 macros has been added to retrieve these offets: FLT_NXT and FLT_FWD.
In addition, 2 functions has been added to change the 'next size' and the
'forward size' of a filter. When a filter parses input data, it can alter these
data, so the size of these data can vary. This action has an effet on all
previous filters that must be handled. To do so, the function
'filter_change_next_size' must be called, passing the size variation. In the
same spirit, if a filter alter forwarded data, it must call the function
'filter_change_forward_size'. 'filter_change_next_size' can be called in
'http_data' and 'tcp_data' callbacks and only these ones. And
'filter_change_forward_size' can be called in 'http_forward_data' and
'tcp_forward_data' callbacks and only these ones. The data changes are the
filter responsability, but with some limitation. It must not change already
parsed/forwarded data or data that previous filters have not parsed/forwarded
yet.
Because filters can be used on backends, when we the backend is set for a
stream, we add filters defined for this backend in the filter list of the
stream. But we must only do that when the backend and the frontend of the stream
are not the same. Else same filters are added a second time leading to undefined
behavior.
The HTTP compression code had to be moved.
So it simplifies http_response_forward_body function. To do so, the way the data
are forwarded has changed. Now, a filter (and only one) can forward data. In a
commit to come, this limitation will be removed to let all filters take part to
data forwarding. There are 2 new functions that filters should use to deal with
this feature:
* flt_set_http_data_forwarder: This function sets the filter (using its id)
that will forward data for the specified HTTP message. It is possible if it
was not already set by another filter _AND_ if no data was yet forwarded
(msg->msg_state <= HTTP_MSG_BODY). It returns -1 if an error occurs.
* flt_http_data_forwarder: This function returns the filter id that will
forward data for the specified HTTP message. If there is no forwarder set, it
returns -1.
When an HTTP data forwarder is set for the response, the HTTP compression is
disabled. Of course, this is not definitive.
2015-04-30 05:48:27 -04:00
/* Add filters analyzers if needed */
2016-02-04 07:40:26 -05:00
if ( ! LIST_ISEMPTY ( & curproxy - > filter_configs ) ) {
BUG/MAJOR: channel: Fix the definition order of channel analyzers
It is important to defined analyzers (AN_REQ_* and AN_RES_*) in the same order
they are evaluated in process_stream. This order is really important because
during analyzers evaluation, we run them in the order of the lower bit to the
higher one. This way, when an analyzer adds/removes another one during its
evaluation, we know if it is located before or after it. So, when it adds an
analyzer which is located before it, we can switch to it immediately, even if it
has already been called once but removed since.
With the time, and introduction of new analyzers, this order was broken up. the
main problems come from the filter analyzers. We used values not related with
their evaluation order. Furthermore, we used same values for request and response
analyzers.
So, to fix the bug, filter analyzers have been splitted in 2 distinct lists to
have different analyzers for the request channel than those for the response
channel. And of course, we have moved them to the right place.
Some other analyzers have been reordered to respect the evaluation order:
* AN_REQ_HTTP_TARPIT has been moved just before AN_REQ_SRV_RULES
* AN_REQ_PRST_RDP_COOKIE has been moved just before AN_REQ_STICKING_RULES
* AN_RES_STORE_RULES has been moved just after AN_RES_WAIT_HTTP
Note today we have 29 analyzers, all stored into a 32 bits bitfield. So we can
still add 4 more analyzers before having a problem. A good way to fend off the
problem for a while could be to have a different bitfield for request and
response analyzers.
[wt: all of this must be backported to 1.7, and part of it must be backported
to 1.6 and 1.5]
2017-01-05 08:06:34 -05:00
curproxy - > fe_req_ana | = AN_REQ_FLT_START_FE | AN_REQ_FLT_XFER_DATA | AN_REQ_FLT_END ;
curproxy - > fe_rsp_ana | = AN_RES_FLT_START_FE | AN_RES_FLT_XFER_DATA | AN_RES_FLT_END ;
MAJOR: filters: Add filters support
This patch adds the support of filters in HAProxy. The main idea is to have a
way to "easely" extend HAProxy by adding some "modules", called filters, that
will be able to change HAProxy behavior in a programmatic way.
To do so, many entry points has been added in code to let filters to hook up to
different steps of the processing. A filter must define a flt_ops sutrctures
(see include/types/filters.h for details). This structure contains all available
callbacks that a filter can define:
struct flt_ops {
/*
* Callbacks to manage the filter lifecycle
*/
int (*init) (struct proxy *p);
void (*deinit)(struct proxy *p);
int (*check) (struct proxy *p);
/*
* Stream callbacks
*/
void (*stream_start) (struct stream *s);
void (*stream_accept) (struct stream *s);
void (*session_establish)(struct stream *s);
void (*stream_stop) (struct stream *s);
/*
* HTTP callbacks
*/
int (*http_start) (struct stream *s, struct http_msg *msg);
int (*http_start_body) (struct stream *s, struct http_msg *msg);
int (*http_start_chunk) (struct stream *s, struct http_msg *msg);
int (*http_data) (struct stream *s, struct http_msg *msg);
int (*http_last_chunk) (struct stream *s, struct http_msg *msg);
int (*http_end_chunk) (struct stream *s, struct http_msg *msg);
int (*http_chunk_trailers)(struct stream *s, struct http_msg *msg);
int (*http_end_body) (struct stream *s, struct http_msg *msg);
void (*http_end) (struct stream *s, struct http_msg *msg);
void (*http_reset) (struct stream *s, struct http_msg *msg);
int (*http_pre_process) (struct stream *s, struct http_msg *msg);
int (*http_post_process) (struct stream *s, struct http_msg *msg);
void (*http_reply) (struct stream *s, short status,
const struct chunk *msg);
};
To declare and use a filter, in the configuration, the "filter" keyword must be
used in a listener/frontend section:
frontend test
...
filter <FILTER-NAME> [OPTIONS...]
The filter referenced by the <FILTER-NAME> must declare a configuration parser
on its own name to fill flt_ops and filter_conf field in the proxy's
structure. An exemple will be provided later to make it perfectly clear.
For now, filters cannot be used in backend section. But this is only a matter of
time. Documentation will also be added later. This is the first commit of a long
list about filters.
It is possible to have several filters on the same listener/frontend. These
filters are stored in an array of at most MAX_FILTERS elements (define in
include/types/filters.h). Again, this will be replaced later by a list of
filters.
The filter API has been highly refactored. Main changes are:
* Now, HA supports an infinite number of filters per proxy. To do so, filters
are stored in list.
* Because filters are stored in list, filters state has been moved from the
channel structure to the filter structure. This is cleaner because there is no
more info about filters in channel structure.
* It is possible to defined filters on backends only. For such filters,
stream_start/stream_stop callbacks are not called. Of course, it is possible
to mix frontend and backend filters.
* Now, TCP streams are also filtered. All callbacks without the 'http_' prefix
are called for all kind of streams. In addition, 2 new callbacks were added to
filter data exchanged through a TCP stream:
- tcp_data: it is called when new data are available or when old unprocessed
data are still waiting.
- tcp_forward_data: it is called when some data can be consumed.
* New callbacks attached to channel were added:
- channel_start_analyze: it is called when a filter is ready to process data
exchanged through a channel. 2 new analyzers (a frontend and a backend)
are attached to channels to call this callback. For a frontend filter, it
is called before any other analyzer. For a backend filter, it is called
when a backend is attached to a stream. So some processing cannot be
filtered in that case.
- channel_analyze: it is called before each analyzer attached to a channel,
expects analyzers responsible for data sending.
- channel_end_analyze: it is called when all other analyzers have finished
their processing. A new analyzers is attached to channels to call this
callback. For a TCP stream, this is always the last one called. For a HTTP
one, the callback is called when a request/response ends, so it is called
one time for each request/response.
* 'session_established' callback has been removed. Everything that is done in
this callback can be handled by 'channel_start_analyze' on the response
channel.
* 'http_pre_process' and 'http_post_process' callbacks have been replaced by
'channel_analyze'.
* 'http_start' callback has been replaced by 'http_headers'. This new one is
called just before headers sending and parsing of the body.
* 'http_end' callback has been replaced by 'channel_end_analyze'.
* It is possible to set a forwarder for TCP channels. It was already possible to
do it for HTTP ones.
* Forwarders can partially consumed forwardable data. For this reason a new
HTTP message state was added before HTTP_MSG_DONE : HTTP_MSG_ENDING.
Now all filters can define corresponding callbacks (http_forward_data
and tcp_forward_data). Each filter owns 2 offsets relative to buf->p, next and
forward, to track, respectively, input data already parsed but not forwarded yet
by the filter and parsed data considered as forwarded by the filter. A any time,
we have the warranty that a filter cannot parse or forward more input than
previous ones. And, of course, it cannot forward more input than it has
parsed. 2 macros has been added to retrieve these offets: FLT_NXT and FLT_FWD.
In addition, 2 functions has been added to change the 'next size' and the
'forward size' of a filter. When a filter parses input data, it can alter these
data, so the size of these data can vary. This action has an effet on all
previous filters that must be handled. To do so, the function
'filter_change_next_size' must be called, passing the size variation. In the
same spirit, if a filter alter forwarded data, it must call the function
'filter_change_forward_size'. 'filter_change_next_size' can be called in
'http_data' and 'tcp_data' callbacks and only these ones. And
'filter_change_forward_size' can be called in 'http_forward_data' and
'tcp_forward_data' callbacks and only these ones. The data changes are the
filter responsability, but with some limitation. It must not change already
parsed/forwarded data or data that previous filters have not parsed/forwarded
yet.
Because filters can be used on backends, when we the backend is set for a
stream, we add filters defined for this backend in the filter list of the
stream. But we must only do that when the backend and the frontend of the stream
are not the same. Else same filters are added a second time leading to undefined
behavior.
The HTTP compression code had to be moved.
So it simplifies http_response_forward_body function. To do so, the way the data
are forwarded has changed. Now, a filter (and only one) can forward data. In a
commit to come, this limitation will be removed to let all filters take part to
data forwarding. There are 2 new functions that filters should use to deal with
this feature:
* flt_set_http_data_forwarder: This function sets the filter (using its id)
that will forward data for the specified HTTP message. It is possible if it
was not already set by another filter _AND_ if no data was yet forwarded
(msg->msg_state <= HTTP_MSG_BODY). It returns -1 if an error occurs.
* flt_http_data_forwarder: This function returns the filter id that will
forward data for the specified HTTP message. If there is no forwarder set, it
returns -1.
When an HTTP data forwarder is set for the response, the HTTP compression is
disabled. Of course, this is not definitive.
2015-04-30 05:48:27 -04:00
}
2009-08-16 16:37:44 -04:00
}
if ( curproxy - > cap & PR_CAP_BE ) {
2021-10-13 09:40:15 -04:00
if ( ! LIST_ISEMPTY ( & curproxy - > tcp_req . inspect_rules ) | |
( curproxy - > defpx & & ! LIST_ISEMPTY ( & curproxy - > defpx - > tcp_req . inspect_rules ) ) )
2010-08-03 08:02:05 -04:00
curproxy - > be_req_ana | = AN_REQ_INSPECT_BE ;
2021-10-13 09:40:15 -04:00
if ( ! LIST_ISEMPTY ( & curproxy - > tcp_rep . inspect_rules ) | |
( curproxy - > defpx & & ! LIST_ISEMPTY ( & curproxy - > defpx - > tcp_rep . inspect_rules ) ) )
2010-09-23 11:56:44 -04:00
curproxy - > be_rsp_ana | = AN_RES_INSPECT ;
2009-08-16 16:57:50 -04:00
if ( curproxy - > mode = = PR_MODE_HTTP ) {
2009-08-16 16:37:44 -04:00
curproxy - > be_req_ana | = AN_REQ_WAIT_HTTP | AN_REQ_HTTP_INNER | AN_REQ_HTTP_PROCESS_BE ;
2009-10-18 16:53:08 -04:00
curproxy - > be_rsp_ana | = AN_RES_WAIT_HTTP | AN_RES_HTTP_PROCESS_BE ;
2009-08-16 16:57:50 -04:00
}
2009-08-16 16:37:44 -04:00
/* If the backend does requires RDP cookie persistence, we have to
* enable the corresponding analyser .
*/
if ( curproxy - > options2 & PR_O2_RDPC_PRST )
curproxy - > be_req_ana | = AN_REQ_PRST_RDP_COOKIE ;
MAJOR: filters: Add filters support
This patch adds the support of filters in HAProxy. The main idea is to have a
way to "easely" extend HAProxy by adding some "modules", called filters, that
will be able to change HAProxy behavior in a programmatic way.
To do so, many entry points has been added in code to let filters to hook up to
different steps of the processing. A filter must define a flt_ops sutrctures
(see include/types/filters.h for details). This structure contains all available
callbacks that a filter can define:
struct flt_ops {
/*
* Callbacks to manage the filter lifecycle
*/
int (*init) (struct proxy *p);
void (*deinit)(struct proxy *p);
int (*check) (struct proxy *p);
/*
* Stream callbacks
*/
void (*stream_start) (struct stream *s);
void (*stream_accept) (struct stream *s);
void (*session_establish)(struct stream *s);
void (*stream_stop) (struct stream *s);
/*
* HTTP callbacks
*/
int (*http_start) (struct stream *s, struct http_msg *msg);
int (*http_start_body) (struct stream *s, struct http_msg *msg);
int (*http_start_chunk) (struct stream *s, struct http_msg *msg);
int (*http_data) (struct stream *s, struct http_msg *msg);
int (*http_last_chunk) (struct stream *s, struct http_msg *msg);
int (*http_end_chunk) (struct stream *s, struct http_msg *msg);
int (*http_chunk_trailers)(struct stream *s, struct http_msg *msg);
int (*http_end_body) (struct stream *s, struct http_msg *msg);
void (*http_end) (struct stream *s, struct http_msg *msg);
void (*http_reset) (struct stream *s, struct http_msg *msg);
int (*http_pre_process) (struct stream *s, struct http_msg *msg);
int (*http_post_process) (struct stream *s, struct http_msg *msg);
void (*http_reply) (struct stream *s, short status,
const struct chunk *msg);
};
To declare and use a filter, in the configuration, the "filter" keyword must be
used in a listener/frontend section:
frontend test
...
filter <FILTER-NAME> [OPTIONS...]
The filter referenced by the <FILTER-NAME> must declare a configuration parser
on its own name to fill flt_ops and filter_conf field in the proxy's
structure. An exemple will be provided later to make it perfectly clear.
For now, filters cannot be used in backend section. But this is only a matter of
time. Documentation will also be added later. This is the first commit of a long
list about filters.
It is possible to have several filters on the same listener/frontend. These
filters are stored in an array of at most MAX_FILTERS elements (define in
include/types/filters.h). Again, this will be replaced later by a list of
filters.
The filter API has been highly refactored. Main changes are:
* Now, HA supports an infinite number of filters per proxy. To do so, filters
are stored in list.
* Because filters are stored in list, filters state has been moved from the
channel structure to the filter structure. This is cleaner because there is no
more info about filters in channel structure.
* It is possible to defined filters on backends only. For such filters,
stream_start/stream_stop callbacks are not called. Of course, it is possible
to mix frontend and backend filters.
* Now, TCP streams are also filtered. All callbacks without the 'http_' prefix
are called for all kind of streams. In addition, 2 new callbacks were added to
filter data exchanged through a TCP stream:
- tcp_data: it is called when new data are available or when old unprocessed
data are still waiting.
- tcp_forward_data: it is called when some data can be consumed.
* New callbacks attached to channel were added:
- channel_start_analyze: it is called when a filter is ready to process data
exchanged through a channel. 2 new analyzers (a frontend and a backend)
are attached to channels to call this callback. For a frontend filter, it
is called before any other analyzer. For a backend filter, it is called
when a backend is attached to a stream. So some processing cannot be
filtered in that case.
- channel_analyze: it is called before each analyzer attached to a channel,
expects analyzers responsible for data sending.
- channel_end_analyze: it is called when all other analyzers have finished
their processing. A new analyzers is attached to channels to call this
callback. For a TCP stream, this is always the last one called. For a HTTP
one, the callback is called when a request/response ends, so it is called
one time for each request/response.
* 'session_established' callback has been removed. Everything that is done in
this callback can be handled by 'channel_start_analyze' on the response
channel.
* 'http_pre_process' and 'http_post_process' callbacks have been replaced by
'channel_analyze'.
* 'http_start' callback has been replaced by 'http_headers'. This new one is
called just before headers sending and parsing of the body.
* 'http_end' callback has been replaced by 'channel_end_analyze'.
* It is possible to set a forwarder for TCP channels. It was already possible to
do it for HTTP ones.
* Forwarders can partially consumed forwardable data. For this reason a new
HTTP message state was added before HTTP_MSG_DONE : HTTP_MSG_ENDING.
Now all filters can define corresponding callbacks (http_forward_data
and tcp_forward_data). Each filter owns 2 offsets relative to buf->p, next and
forward, to track, respectively, input data already parsed but not forwarded yet
by the filter and parsed data considered as forwarded by the filter. A any time,
we have the warranty that a filter cannot parse or forward more input than
previous ones. And, of course, it cannot forward more input than it has
parsed. 2 macros has been added to retrieve these offets: FLT_NXT and FLT_FWD.
In addition, 2 functions has been added to change the 'next size' and the
'forward size' of a filter. When a filter parses input data, it can alter these
data, so the size of these data can vary. This action has an effet on all
previous filters that must be handled. To do so, the function
'filter_change_next_size' must be called, passing the size variation. In the
same spirit, if a filter alter forwarded data, it must call the function
'filter_change_forward_size'. 'filter_change_next_size' can be called in
'http_data' and 'tcp_data' callbacks and only these ones. And
'filter_change_forward_size' can be called in 'http_forward_data' and
'tcp_forward_data' callbacks and only these ones. The data changes are the
filter responsability, but with some limitation. It must not change already
parsed/forwarded data or data that previous filters have not parsed/forwarded
yet.
Because filters can be used on backends, when we the backend is set for a
stream, we add filters defined for this backend in the filter list of the
stream. But we must only do that when the backend and the frontend of the stream
are not the same. Else same filters are added a second time leading to undefined
behavior.
The HTTP compression code had to be moved.
So it simplifies http_response_forward_body function. To do so, the way the data
are forwarded has changed. Now, a filter (and only one) can forward data. In a
commit to come, this limitation will be removed to let all filters take part to
data forwarding. There are 2 new functions that filters should use to deal with
this feature:
* flt_set_http_data_forwarder: This function sets the filter (using its id)
that will forward data for the specified HTTP message. It is possible if it
was not already set by another filter _AND_ if no data was yet forwarded
(msg->msg_state <= HTTP_MSG_BODY). It returns -1 if an error occurs.
* flt_http_data_forwarder: This function returns the filter id that will
forward data for the specified HTTP message. If there is no forwarder set, it
returns -1.
When an HTTP data forwarder is set for the response, the HTTP compression is
disabled. Of course, this is not definitive.
2015-04-30 05:48:27 -04:00
/* Add filters analyzers if needed */
2016-02-04 07:40:26 -05:00
if ( ! LIST_ISEMPTY ( & curproxy - > filter_configs ) ) {
BUG/MAJOR: channel: Fix the definition order of channel analyzers
It is important to defined analyzers (AN_REQ_* and AN_RES_*) in the same order
they are evaluated in process_stream. This order is really important because
during analyzers evaluation, we run them in the order of the lower bit to the
higher one. This way, when an analyzer adds/removes another one during its
evaluation, we know if it is located before or after it. So, when it adds an
analyzer which is located before it, we can switch to it immediately, even if it
has already been called once but removed since.
With the time, and introduction of new analyzers, this order was broken up. the
main problems come from the filter analyzers. We used values not related with
their evaluation order. Furthermore, we used same values for request and response
analyzers.
So, to fix the bug, filter analyzers have been splitted in 2 distinct lists to
have different analyzers for the request channel than those for the response
channel. And of course, we have moved them to the right place.
Some other analyzers have been reordered to respect the evaluation order:
* AN_REQ_HTTP_TARPIT has been moved just before AN_REQ_SRV_RULES
* AN_REQ_PRST_RDP_COOKIE has been moved just before AN_REQ_STICKING_RULES
* AN_RES_STORE_RULES has been moved just after AN_RES_WAIT_HTTP
Note today we have 29 analyzers, all stored into a 32 bits bitfield. So we can
still add 4 more analyzers before having a problem. A good way to fend off the
problem for a while could be to have a different bitfield for request and
response analyzers.
[wt: all of this must be backported to 1.7, and part of it must be backported
to 1.6 and 1.5]
2017-01-05 08:06:34 -05:00
curproxy - > be_req_ana | = AN_REQ_FLT_START_BE | AN_REQ_FLT_XFER_DATA | AN_REQ_FLT_END ;
curproxy - > be_rsp_ana | = AN_RES_FLT_START_BE | AN_RES_FLT_XFER_DATA | AN_RES_FLT_END ;
MAJOR: filters: Add filters support
This patch adds the support of filters in HAProxy. The main idea is to have a
way to "easely" extend HAProxy by adding some "modules", called filters, that
will be able to change HAProxy behavior in a programmatic way.
To do so, many entry points has been added in code to let filters to hook up to
different steps of the processing. A filter must define a flt_ops sutrctures
(see include/types/filters.h for details). This structure contains all available
callbacks that a filter can define:
struct flt_ops {
/*
* Callbacks to manage the filter lifecycle
*/
int (*init) (struct proxy *p);
void (*deinit)(struct proxy *p);
int (*check) (struct proxy *p);
/*
* Stream callbacks
*/
void (*stream_start) (struct stream *s);
void (*stream_accept) (struct stream *s);
void (*session_establish)(struct stream *s);
void (*stream_stop) (struct stream *s);
/*
* HTTP callbacks
*/
int (*http_start) (struct stream *s, struct http_msg *msg);
int (*http_start_body) (struct stream *s, struct http_msg *msg);
int (*http_start_chunk) (struct stream *s, struct http_msg *msg);
int (*http_data) (struct stream *s, struct http_msg *msg);
int (*http_last_chunk) (struct stream *s, struct http_msg *msg);
int (*http_end_chunk) (struct stream *s, struct http_msg *msg);
int (*http_chunk_trailers)(struct stream *s, struct http_msg *msg);
int (*http_end_body) (struct stream *s, struct http_msg *msg);
void (*http_end) (struct stream *s, struct http_msg *msg);
void (*http_reset) (struct stream *s, struct http_msg *msg);
int (*http_pre_process) (struct stream *s, struct http_msg *msg);
int (*http_post_process) (struct stream *s, struct http_msg *msg);
void (*http_reply) (struct stream *s, short status,
const struct chunk *msg);
};
To declare and use a filter, in the configuration, the "filter" keyword must be
used in a listener/frontend section:
frontend test
...
filter <FILTER-NAME> [OPTIONS...]
The filter referenced by the <FILTER-NAME> must declare a configuration parser
on its own name to fill flt_ops and filter_conf field in the proxy's
structure. An exemple will be provided later to make it perfectly clear.
For now, filters cannot be used in backend section. But this is only a matter of
time. Documentation will also be added later. This is the first commit of a long
list about filters.
It is possible to have several filters on the same listener/frontend. These
filters are stored in an array of at most MAX_FILTERS elements (define in
include/types/filters.h). Again, this will be replaced later by a list of
filters.
The filter API has been highly refactored. Main changes are:
* Now, HA supports an infinite number of filters per proxy. To do so, filters
are stored in list.
* Because filters are stored in list, filters state has been moved from the
channel structure to the filter structure. This is cleaner because there is no
more info about filters in channel structure.
* It is possible to defined filters on backends only. For such filters,
stream_start/stream_stop callbacks are not called. Of course, it is possible
to mix frontend and backend filters.
* Now, TCP streams are also filtered. All callbacks without the 'http_' prefix
are called for all kind of streams. In addition, 2 new callbacks were added to
filter data exchanged through a TCP stream:
- tcp_data: it is called when new data are available or when old unprocessed
data are still waiting.
- tcp_forward_data: it is called when some data can be consumed.
* New callbacks attached to channel were added:
- channel_start_analyze: it is called when a filter is ready to process data
exchanged through a channel. 2 new analyzers (a frontend and a backend)
are attached to channels to call this callback. For a frontend filter, it
is called before any other analyzer. For a backend filter, it is called
when a backend is attached to a stream. So some processing cannot be
filtered in that case.
- channel_analyze: it is called before each analyzer attached to a channel,
expects analyzers responsible for data sending.
- channel_end_analyze: it is called when all other analyzers have finished
their processing. A new analyzers is attached to channels to call this
callback. For a TCP stream, this is always the last one called. For a HTTP
one, the callback is called when a request/response ends, so it is called
one time for each request/response.
* 'session_established' callback has been removed. Everything that is done in
this callback can be handled by 'channel_start_analyze' on the response
channel.
* 'http_pre_process' and 'http_post_process' callbacks have been replaced by
'channel_analyze'.
* 'http_start' callback has been replaced by 'http_headers'. This new one is
called just before headers sending and parsing of the body.
* 'http_end' callback has been replaced by 'channel_end_analyze'.
* It is possible to set a forwarder for TCP channels. It was already possible to
do it for HTTP ones.
* Forwarders can partially consumed forwardable data. For this reason a new
HTTP message state was added before HTTP_MSG_DONE : HTTP_MSG_ENDING.
Now all filters can define corresponding callbacks (http_forward_data
and tcp_forward_data). Each filter owns 2 offsets relative to buf->p, next and
forward, to track, respectively, input data already parsed but not forwarded yet
by the filter and parsed data considered as forwarded by the filter. A any time,
we have the warranty that a filter cannot parse or forward more input than
previous ones. And, of course, it cannot forward more input than it has
parsed. 2 macros has been added to retrieve these offets: FLT_NXT and FLT_FWD.
In addition, 2 functions has been added to change the 'next size' and the
'forward size' of a filter. When a filter parses input data, it can alter these
data, so the size of these data can vary. This action has an effet on all
previous filters that must be handled. To do so, the function
'filter_change_next_size' must be called, passing the size variation. In the
same spirit, if a filter alter forwarded data, it must call the function
'filter_change_forward_size'. 'filter_change_next_size' can be called in
'http_data' and 'tcp_data' callbacks and only these ones. And
'filter_change_forward_size' can be called in 'http_forward_data' and
'tcp_forward_data' callbacks and only these ones. The data changes are the
filter responsability, but with some limitation. It must not change already
parsed/forwarded data or data that previous filters have not parsed/forwarded
yet.
Because filters can be used on backends, when we the backend is set for a
stream, we add filters defined for this backend in the filter list of the
stream. But we must only do that when the backend and the frontend of the stream
are not the same. Else same filters are added a second time leading to undefined
behavior.
The HTTP compression code had to be moved.
So it simplifies http_response_forward_body function. To do so, the way the data
are forwarded has changed. Now, a filter (and only one) can forward data. In a
commit to come, this limitation will be removed to let all filters take part to
data forwarding. There are 2 new functions that filters should use to deal with
this feature:
* flt_set_http_data_forwarder: This function sets the filter (using its id)
that will forward data for the specified HTTP message. It is possible if it
was not already set by another filter _AND_ if no data was yet forwarded
(msg->msg_state <= HTTP_MSG_BODY). It returns -1 if an error occurs.
* flt_http_data_forwarder: This function returns the filter id that will
forward data for the specified HTTP message. If there is no forwarder set, it
returns -1.
When an HTTP data forwarder is set for the response, the HTTP compression is
disabled. Of course, this is not definitive.
2015-04-30 05:48:27 -04:00
}
2009-08-16 16:37:44 -04:00
}
2018-04-10 08:43:00 -04:00
2018-04-10 08:45:45 -04:00
/* Check the mux protocols, if any, for each listener and server
2018-04-10 08:43:00 -04:00
* attached to the current proxy */
list_for_each_entry ( bind_conf , & curproxy - > conf . bind , by_fe ) {
2023-10-19 10:06:03 -04:00
int mode = conn_pr_mode_to_proto_mode ( curproxy - > mode ) ;
2018-12-02 07:09:09 -05:00
const struct mux_proto_list * mux_ent ;
2018-04-10 08:43:00 -04:00
2024-02-29 08:27:45 -05:00
if ( bind_conf - > xprt & & bind_conf - > xprt = = xprt_get ( XPRT_QUIC ) ) {
if ( ! bind_conf - > mux_proto ) {
/* No protocol was specified. If we're using QUIC at the transport
* layer , we ' ll instantiate it as a mux as well . If QUIC is not
* compiled in , this will remain NULL .
*/
2022-05-20 12:07:06 -04:00
bind_conf - > mux_proto = get_mux_proto ( ist ( " quic " ) ) ;
2024-02-29 08:27:45 -05:00
}
if ( bind_conf - > options & BC_O_ACC_PROXY ) {
ha_alert ( " Binding [%s:%d] for %s %s: QUIC protocol does not support PROXY protocol yet. "
" 'accept-proxy' option cannot be used with a QUIC listener. \n " ,
bind_conf - > file , bind_conf - > line ,
proxy_type_str ( curproxy ) , curproxy - > id ) ;
cfgerr + + ;
}
2022-05-20 12:07:06 -04:00
}
2018-04-10 08:43:00 -04:00
if ( ! bind_conf - > mux_proto )
continue ;
2018-12-02 07:09:09 -05:00
/* it is possible that an incorrect mux was referenced
* due to the proxy ' s mode not being taken into account
* on first pass . Let ' s adjust it now .
*/
mux_ent = conn_get_best_mux_entry ( bind_conf - > mux_proto - > token , PROTO_SIDE_FE , mode ) ;
if ( ! mux_ent | | ! isteq ( mux_ent - > token , bind_conf - > mux_proto - > token ) ) {
2021-06-04 12:22:08 -04:00
ha_alert ( " %s '%s' : MUX protocol '%.*s' is not usable for 'bind %s' at [%s:%d]. \n " ,
2018-04-10 08:43:00 -04:00
proxy_type_str ( curproxy ) , curproxy - > id ,
( int ) bind_conf - > mux_proto - > token . len ,
bind_conf - > mux_proto - > token . ptr ,
bind_conf - > arg , bind_conf - > file , bind_conf - > line ) ;
cfgerr + + ;
2022-05-20 11:53:32 -04:00
} else {
if ( ( mux_ent - > mux - > flags & MX_FL_FRAMED ) & & ! ( bind_conf - > options & BC_O_USE_SOCK_DGRAM ) ) {
ha_alert ( " %s '%s' : frame-based MUX protocol '%.*s' is incompatible with stream transport of 'bind %s' at [%s:%d]. \n " ,
proxy_type_str ( curproxy ) , curproxy - > id ,
( int ) bind_conf - > mux_proto - > token . len ,
bind_conf - > mux_proto - > token . ptr ,
bind_conf - > arg , bind_conf - > file , bind_conf - > line ) ;
cfgerr + + ;
}
else if ( ! ( mux_ent - > mux - > flags & MX_FL_FRAMED ) & & ! ( bind_conf - > options & BC_O_USE_SOCK_STREAM ) ) {
ha_alert ( " %s '%s' : stream-based MUX protocol '%.*s' is incompatible with framed transport of 'bind %s' at [%s:%d]. \n " ,
proxy_type_str ( curproxy ) , curproxy - > id ,
( int ) bind_conf - > mux_proto - > token . len ,
bind_conf - > mux_proto - > token . ptr ,
bind_conf - > arg , bind_conf - > file , bind_conf - > line ) ;
cfgerr + + ;
}
2018-04-10 08:43:00 -04:00
}
2018-12-02 07:09:09 -05:00
/* update the mux */
bind_conf - > mux_proto = mux_ent ;
2018-04-10 08:43:00 -04:00
}
2018-04-10 08:45:45 -04:00
for ( newsrv = curproxy - > srv ; newsrv ; newsrv = newsrv - > next ) {
2023-10-19 10:06:03 -04:00
int mode = conn_pr_mode_to_proto_mode ( curproxy - > mode ) ;
2018-12-02 07:09:09 -05:00
const struct mux_proto_list * mux_ent ;
2018-04-10 08:45:45 -04:00
if ( ! newsrv - > mux_proto )
continue ;
2018-12-02 07:09:09 -05:00
/* it is possible that an incorrect mux was referenced
* due to the proxy ' s mode not being taken into account
* on first pass . Let ' s adjust it now .
*/
mux_ent = conn_get_best_mux_entry ( newsrv - > mux_proto - > token , PROTO_SIDE_BE , mode ) ;
if ( ! mux_ent | | ! isteq ( mux_ent - > token , newsrv - > mux_proto - > token ) ) {
2021-06-04 12:22:08 -04:00
ha_alert ( " %s '%s' : MUX protocol '%.*s' is not usable for server '%s' at [%s:%d]. \n " ,
2018-04-10 08:45:45 -04:00
proxy_type_str ( curproxy ) , curproxy - > id ,
( int ) newsrv - > mux_proto - > token . len ,
newsrv - > mux_proto - > token . ptr ,
newsrv - > id , newsrv - > conf . file , newsrv - > conf . line ) ;
cfgerr + + ;
}
2018-12-02 07:09:09 -05:00
/* update the mux */
newsrv - > mux_proto = mux_ent ;
2018-04-10 08:45:45 -04:00
}
2021-07-23 09:46:46 -04:00
/* Allocate default tcp-check rules for proxies without
* explicit rules .
*/
if ( curproxy - > cap & PR_CAP_BE ) {
if ( ! ( curproxy - > options2 & PR_O2_CHK_ANY ) ) {
struct tcpcheck_ruleset * rs = NULL ;
struct tcpcheck_rules * rules = & curproxy - > tcpcheck_rules ;
curproxy - > options2 | = PR_O2_TCPCHK_CHK ;
rs = find_tcpcheck_ruleset ( " *tcp-check " ) ;
if ( ! rs ) {
rs = create_tcpcheck_ruleset ( " *tcp-check " ) ;
if ( rs = = NULL ) {
ha_alert ( " config: %s '%s': out of memory. \n " ,
proxy_type_str ( curproxy ) , curproxy - > id ) ;
cfgerr + + ;
}
}
free_tcpcheck_vars ( & rules - > preset_vars ) ;
rules - > list = & rs - > rules ;
rules - > flags = 0 ;
}
}
2021-03-05 04:48:42 -05:00
}
2019-02-07 08:46:29 -05:00
2022-08-18 09:53:21 -04:00
/*
* We have just initialized the main proxies list
* we must also configure the log - forward proxies list
*/
if ( init_proxies_list = = proxies_list ) {
init_proxies_list = cfg_log_forward ;
2022-08-22 04:25:11 -04:00
/* check if list is not null to avoid infinite loop */
2022-09-13 10:16:30 -04:00
if ( init_proxies_list )
goto init_proxies_list_stage1 ;
}
if ( init_proxies_list = = cfg_log_forward ) {
init_proxies_list = sink_proxies_list ;
/* check if list is not null to avoid infinite loop */
2022-08-22 04:25:11 -04:00
if ( init_proxies_list )
goto init_proxies_list_stage1 ;
2022-08-18 09:53:21 -04:00
}
2021-03-05 04:48:42 -05:00
/***********************************************************/
/* At this point, target names have already been resolved. */
/***********************************************************/
2019-02-07 08:46:29 -05:00
2021-10-01 12:23:30 -04:00
idle_conn_task = task_new_anywhere ( ) ;
2021-03-08 11:31:39 -05:00
if ( ! idle_conn_task ) {
ha_alert ( " parsing : failed to allocate global idle connection task. \n " ) ;
cfgerr + + ;
}
else {
idle_conn_task - > process = srv_cleanup_idle_conns ;
idle_conn_task - > context = NULL ;
for ( i = 0 ; i < global . nbthread ; i + + ) {
2021-10-01 12:23:30 -04:00
idle_conns [ i ] . cleanup_task = task_new_on ( i ) ;
2021-03-08 11:31:39 -05:00
if ( ! idle_conns [ i ] . cleanup_task ) {
ha_alert ( " parsing : failed to allocate idle connection tasks for thread '%d'. \n " , i ) ;
cfgerr + + ;
break ;
2021-03-05 04:48:42 -05:00
}
2020-02-13 13:12:07 -05:00
2021-03-08 11:31:39 -05:00
idle_conns [ i ] . cleanup_task - > process = srv_cleanup_toremove_conns ;
idle_conns [ i ] . cleanup_task - > context = NULL ;
HA_SPIN_INIT ( & idle_conns [ i ] . idle_conns_lock ) ;
MT_LIST_INIT ( & idle_conns [ i ] . toremove_conns ) ;
2019-02-07 08:46:29 -05:00
}
2014-09-16 07:41:21 -04:00
}
/* perform the final checks before creating tasks */
2022-08-18 09:53:21 -04:00
/* starting to initialize the main proxies list */
init_proxies_list = proxies_list ;
init_proxies_list_stage2 :
for ( curproxy = init_proxies_list ; curproxy ; curproxy = curproxy - > next ) {
2014-09-16 07:41:21 -04:00
struct listener * listener ;
unsigned int next_id ;
2009-08-16 16:37:44 -04:00
2012-09-07 10:58:00 -04:00
/* Configure SSL for each bind line.
* Note : if configuration fails at some point , the - > ctx member
* remains NULL so that listeners can later detach .
*/
2012-09-13 11:54:29 -04:00
list_for_each_entry ( bind_conf , & curproxy - > conf . bind , by_fe ) {
2016-12-21 17:38:39 -05:00
if ( bind_conf - > xprt - > prepare_bind_conf & &
bind_conf - > xprt - > prepare_bind_conf ( bind_conf ) < 0 )
2012-09-07 10:58:00 -04:00
cfgerr + + ;
2023-01-12 12:39:42 -05:00
bind_conf - > analysers | = curproxy - > fe_req_ana ;
2023-01-12 12:52:23 -05:00
if ( ! bind_conf - > maxaccept )
bind_conf - > maxaccept = global . tune . maxaccept ? global . tune . maxaccept : MAX_ACCEPT ;
2023-01-12 13:10:17 -05:00
bind_conf - > accept = session_accept_fd ;
2023-01-12 13:37:07 -05:00
if ( curproxy - > options & PR_O_TCP_NOLING )
bind_conf - > options | = BC_O_NOLINGER ;
2023-01-12 13:40:42 -05:00
/* smart accept mode is automatic in HTTP mode */
if ( ( curproxy - > options2 & PR_O2_SMARTACC ) | |
( ( curproxy - > mode = = PR_MODE_HTTP | | ( bind_conf - > options & BC_O_USE_SSL ) ) & &
! ( curproxy - > no_options2 & PR_O2_SMARTACC ) ) )
bind_conf - > options | = BC_O_NOQUICKACK ;
2012-09-13 11:54:29 -04:00
}
2012-09-07 10:58:00 -04:00
2007-10-28 20:09:36 -04:00
/* adjust this proxy's listeners */
2023-08-17 04:15:09 -04:00
bind_conf = NULL ;
2009-10-04 17:04:08 -04:00
next_id = 1 ;
2012-09-20 10:48:07 -04:00
list_for_each_entry ( listener , & curproxy - > conf . listeners , by_fe ) {
2009-10-04 17:04:08 -04:00
if ( ! listener - > luid ) {
/* listener ID not set, use automatic numbering with first
* spare entry starting with next_luid .
*/
BUG/MINOR: listener: always assign distinct IDs to shards
When sharded listeners were introdcued in 2.5 with commit 6dfbef4145
("MEDIUM: listener: add the "shards" bind keyword"), a point was
overlooked regarding how IDs are assigned to listeners: they are just
duplicated! This means that if a "option socket-stats" is set and a
shard is configured, or multiple thread groups are enabled, then a stats
dump will produce several lines with exactly the same socket name and ID.
This patch tries to address this by trying to assign consecutive numbers
to these sockets. The usual algo is maintained, but with a preference for
the next number in a shard. This will help users reserve ranges for each
socket, for example by using multiples of 100 or 1000 on each bind line,
leaving enough room for all shards to be assigned.
The mechanism however is quite tricky, because the configured listener
currently ends up being the last one of the shard. This helps insert them
before the current position without having to revisit them. But here it
causes a difficulty which is that we'd like to restart from the current
ID and assign new ones on top of it. What is done is that the number is
passed between shards and the current one is cleared (and removed from
the tree) so that we instead insert the new one. It's tricky because of
the situation which depends whether it's the listener that was already
assigned on the bind line or not. But overall, always removing the entry,
always adding the new one when the ID is not zero, and passing them from
the reference to the next one does the trick.
This may be backported to all versions till 2.6.
2024-04-09 02:41:06 -04:00
if ( listener - > by_fe . p ! = & curproxy - > conf . listeners ) {
struct listener * prev_li = LIST_PREV ( & listener - > by_fe , typeof ( prev_li ) , by_fe ) ;
if ( prev_li - > luid )
next_id = prev_li - > luid + 1 ;
}
2009-10-04 17:04:08 -04:00
next_id = get_next_id ( & curproxy - > conf . used_listener_id , next_id ) ;
listener - > conf . id . key = listener - > luid = next_id ;
eb32_insert ( & curproxy - > conf . used_listener_id , & listener - > conf . id ) ;
}
2010-02-05 14:58:27 -05:00
next_id + + ;
2009-10-04 17:04:08 -04:00
2009-10-04 09:43:17 -04:00
/* enable separate counters */
if ( curproxy - > options2 & PR_O2_SOCKSTAT ) {
CLEANUP: counters: move from 3 types to 2 types
We used to have 3 types of counters with a huge overlap :
- listener counters : stats collected for each bind line
- proxy counters : union of the frontend and backend counters
- server counters : stats collected per server
It happens that quite a good part was common between listeners and
proxies due to the frontend counters being updated at the two locations,
and that similarly the server and proxy counters were overlapping and
being updated together.
This patch cleans this up to propose only two types of counters :
- fe_counters: used by frontends and listeners, related to
incoming connections activity
- be_counters: used by backends and servers, related to outgoing
connections activity
This allowed to remove some non-sensical counters from both parts. For
frontends, the following entries were removed :
cum_lbconn, last_sess, nbpend_max, failed_conns, failed_resp,
retries, redispatches, q_time, c_time, d_time, t_time
For backends, this ones was removed : intercepted_req.
While doing this it was discovered that we used to incorrectly report
intercepted_req for backends in the HTML stats, which was always zero
since it's never updated.
Also it revealed a few inconsistencies (which were not fixed as they
are harmless). For example, backends count connections (cum_conn)
instead of sessions while servers count sessions and not connections.
Over the long term, some extra cleanups may be performed by having
some counters update functions touching both the server and backend
at the same time, as well as both the frontend and listener, to
ensure that all sides have all their stats properly filled. The stats
dump will also be able to factor the dump functions by counter types.
2016-11-25 08:44:52 -05:00
listener - > counters = calloc ( 1 , sizeof ( * listener - > counters ) ) ;
2025-04-08 12:16:38 -04:00
if ( listener - > counters ) {
MEDIUM: counters: manage shared counters using dedicated helpers
proxies, listeners and server shared counters are now managed via helpers
added in one of the previous commits.
When guid is not set (ie: when not yet assigned), shared counters pointer
is allocated using calloc() (local memory) and a flag is set on the shared
counters struct to know how to manipulate (and free it). Else if guid is
set, then it means that the counters may be shared so while for now we
don't actually use a shared memory location the API is ready for that.
The way it works, for proxies and servers (for which guid is not known
during creation), we first call counters_{fe,be}_shared_get with guid not
set, which results in local pointer being retrieved (as if we just
manually called calloc() to retrieve a pointer). Later (during postparsing)
if guid is set we try to upgrade the pointer from local to shared.
Lastly, since the memory location for some objects (proxies and servers
counters) may change from creation to postparsing, let's update
counters->last_change member directly under counters_{fe,be}_shared_get()
so we don't miss it.
No change of behavior is expected, this is only preparation work.
2025-05-07 17:42:04 -04:00
listener - > counters - > shared = counters_fe_shared_get ( & listener - > guid ) ;
2025-04-08 12:16:38 -04:00
if ( ! listener - > counters - > shared ) {
ha_free ( & listener - > counters ) ;
ha_alert ( " config: %s '%s': out of memory. \n " ,
proxy_type_str ( curproxy ) , curproxy - > id ) ;
}
}
2012-10-29 11:51:55 -04:00
if ( ! listener - > name )
memprintf ( & listener - > name , " sock-%d " , listener - > luid ) ;
2009-10-04 09:43:17 -04:00
}
2012-09-22 13:11:47 -04:00
2022-01-25 11:48:47 -05:00
# ifdef USE_QUIC
2023-01-12 14:20:57 -05:00
if ( listener - > bind_conf - > xprt = = xprt_get ( XPRT_QUIC ) ) {
2023-10-25 04:52:23 -04:00
/* quic_conn are counted against maxconn. */
listener - > bind_conf - > options | = BC_O_XPRT_MAXCONN ;
2023-11-06 10:34:38 -05:00
listener - > rx . quic_curr_handshake = 0 ;
2023-11-08 08:29:31 -05:00
listener - > rx . quic_curr_accept = 0 ;
2023-10-25 04:52:23 -04:00
2023-08-17 04:15:09 -04:00
# ifdef USE_QUIC_OPENSSL_COMPAT
/* store the last checked bind_conf in bind_conf */
if ( ! ( global . tune . options & GTUNE_NO_QUIC ) & &
! ( global . tune . options & GTUNE_LIMITED_QUIC ) & &
listener - > bind_conf ! = bind_conf ) {
bind_conf = listener - > bind_conf ;
ha_alert ( " Binding [%s:%d] for %s %s: this SSL library does not support the "
" QUIC protocol. A limited compatibility layer may be enabled using "
" the \" limited-quic \" global option if desired. \n " ,
listener - > bind_conf - > file , listener - > bind_conf - > line ,
proxy_type_str ( curproxy ) , curproxy - > id ) ;
cfgerr + + ;
}
# endif
2022-09-29 12:31:24 -04:00
2022-01-25 10:21:47 -05:00
li_init_per_thr ( listener ) ;
}
2022-01-25 11:48:47 -05:00
# endif
2007-10-28 20:09:36 -04:00
}
2012-09-13 11:54:29 -04:00
/* Release unused SSL configs */
list_for_each_entry ( bind_conf , & curproxy - > conf . bind , by_fe ) {
2022-05-20 09:56:32 -04:00
if ( ! ( bind_conf - > options & BC_O_USE_SSL ) & & bind_conf - > xprt - > destroy_bind_conf )
2016-12-22 11:30:54 -05:00
bind_conf - > xprt - > destroy_bind_conf ( bind_conf ) ;
2012-09-13 11:54:29 -04:00
}
2012-09-07 10:58:00 -04:00
2011-07-25 10:33:49 -04:00
/* create the task associated with the proxy */
2021-10-01 12:23:30 -04:00
curproxy - > task = task_new_anywhere ( ) ;
2011-07-25 10:33:49 -04:00
if ( curproxy - > task ) {
curproxy - > task - > context = curproxy ;
curproxy - > task - > process = manage_proxy ;
2021-10-13 04:10:09 -04:00
curproxy - > flags | = PR_FL_READY ;
2011-07-25 10:33:49 -04:00
} else {
2017-11-24 10:50:31 -05:00
ha_alert ( " Proxy '%s': no more memory when trying to allocate the management task \n " ,
curproxy - > id ) ;
2011-07-25 10:33:49 -04:00
cfgerr + + ;
}
2014-09-16 07:21:03 -04:00
}
2022-08-18 09:53:21 -04:00
/*
* We have just initialized the main proxies list
* we must also configure the log - forward proxies list
*/
if ( init_proxies_list = = proxies_list ) {
init_proxies_list = cfg_log_forward ;
2022-08-22 04:25:11 -04:00
/* check if list is not null to avoid infinite loop */
if ( init_proxies_list )
goto init_proxies_list_stage2 ;
2022-08-18 09:53:21 -04:00
}
2025-04-17 11:16:44 -04:00
if ( init_proxies_list = = cfg_log_forward ) {
init_proxies_list = sink_proxies_list ;
/* check if list is not null to avoid infinite loop */
if ( init_proxies_list )
goto init_proxies_list_stage2 ;
}
2007-12-24 20:40:22 -05:00
/*
* Recount currently required checks .
*/
2017-11-24 10:54:05 -05:00
for ( curproxy = proxies_list ; curproxy ; curproxy = curproxy - > next ) {
2007-12-24 20:40:22 -05:00
int optnum ;
2009-01-18 15:44:07 -05:00
for ( optnum = 0 ; cfg_opts [ optnum ] . name ; optnum + + )
if ( curproxy - > options & cfg_opts [ optnum ] . val )
global . last_checks | = cfg_opts [ optnum ] . checks ;
2007-12-24 20:40:22 -05:00
2009-01-18 15:44:07 -05:00
for ( optnum = 0 ; cfg_opts2 [ optnum ] . name ; optnum + + )
if ( curproxy - > options2 & cfg_opts2 [ optnum ] . val )
global . last_checks | = cfg_opts2 [ optnum ] . checks ;
2007-12-24 20:40:22 -05:00
}
2017-07-13 03:07:09 -04:00
if ( cfg_peers ) {
struct peers * curpeers = cfg_peers , * * last ;
2011-09-07 15:24:49 -04:00
struct peer * p , * pb ;
2015-05-01 13:15:17 -04:00
/* Remove all peers sections which don't have a valid listener,
* which are not used by any table , or which are bound to more
* than one process .
2011-09-07 15:24:49 -04:00
*/
2017-07-13 03:07:09 -04:00
last = & cfg_peers ;
2011-09-07 15:24:49 -04:00
while ( * last ) {
2022-10-17 08:58:19 -04:00
struct peer * peer ;
2020-03-24 15:08:30 -04:00
struct stktable * t ;
2011-09-07 15:24:49 -04:00
curpeers = * last ;
2015-05-01 14:02:17 -04:00
2020-09-24 02:48:08 -04:00
if ( curpeers - > disabled ) {
2015-05-01 14:02:17 -04:00
/* the "disabled" keyword was present */
if ( curpeers - > peers_fe )
stop_proxy ( curpeers - > peers_fe ) ;
curpeers - > peers_fe = NULL ;
}
2019-10-04 02:30:04 -04:00
else if ( ! curpeers - > peers_fe | | ! curpeers - > peers_fe - > id ) {
2017-11-24 10:50:31 -05:00
ha_warning ( " Removing incomplete section 'peers %s' (no peer named '%s'). \n " ,
curpeers - > id , localpeer ) ;
2020-03-24 15:08:30 -04:00
if ( curpeers - > peers_fe )
stop_proxy ( curpeers - > peers_fe ) ;
curpeers - > peers_fe = NULL ;
2015-05-01 14:02:17 -04:00
}
else {
2019-02-12 13:12:32 -05:00
/* Initializes the transport layer of the server part of all the peers belonging to
* < curpeers > section if required .
* Note that - > srv is used by the local peer of a new process to connect to the local peer
* of an old process .
*/
2021-10-13 04:10:09 -04:00
curpeers - > peers_fe - > flags | = PR_FL_READY ;
2018-04-26 08:35:21 -04:00
p = curpeers - > remote ;
while ( p ) {
BUG/MINOR: peers: Improve detection of config errors in peers sections
There are several misuses in peers sections that are not detected during the
configuration parsing and that could lead to undefined behaviors or crashes.
First, only one listener is expected for a peers section. If several bind
lines or local peer definitions are used, an error is triggered. However, if
multiple addresses are set on the same bind line, there is no error while
only the last listener is properly configured. On the 2.8, there is no crash
but side effects are hardly predictable. On older version, HAProxy crashes
if an unconfigured listener is used.
Then, there is no check on remote peers name. It is unexpected to have same
name for several remote peers. There is now a test, performed during the
post-parsing, to verify all remote peer names are unique.
Finally, server parsing options for the peers sections are changed to be
sure a port is always defined, and not a port range or a port offset.
This patch fixes the issue #2066. It could be backported to all stable
versions.
2023-06-02 08:10:36 -04:00
struct peer * other_peer ;
for ( other_peer = curpeers - > remote ; other_peer & & other_peer ! = p ; other_peer = other_peer - > next ) {
if ( strcmp ( other_peer - > id , p - > id ) = = 0 ) {
ha_alert ( " Peer section '%s' [%s:%d]: another peer named '%s' was already defined at line %s:%d, please use distinct names. \n " ,
curpeers - > peers_fe - > id ,
p - > conf . file , p - > conf . line ,
other_peer - > id , other_peer - > conf . file , other_peer - > conf . line ) ;
cfgerr + + ;
break ;
}
}
2019-01-11 08:06:12 -05:00
if ( p - > srv ) {
2020-03-27 13:55:49 -04:00
if ( p - > srv - > use_ssl = = 1 & & xprt_get ( XPRT_SSL ) & & xprt_get ( XPRT_SSL ) - > prepare_srv )
2019-01-11 08:06:12 -05:00
cfgerr + = xprt_get ( XPRT_SSL ) - > prepare_srv ( p - > srv ) ;
}
2018-04-26 08:35:21 -04:00
p = p - > next ;
}
2019-02-12 13:12:32 -05:00
/* Configure the SSL bindings of the local peer if required. */
if ( ! LIST_ISEMPTY ( & curpeers - > peers_fe - > conf . bind ) ) {
struct list * l ;
struct bind_conf * bind_conf ;
2023-04-22 17:52:17 -04:00
int ret ;
2019-02-12 13:12:32 -05:00
l = & curpeers - > peers_fe - > conf . bind ;
bind_conf = LIST_ELEM ( l - > n , typeof ( bind_conf ) , by_fe ) ;
2022-07-05 10:00:56 -04:00
2022-07-26 13:03:51 -04:00
if ( curpeers - > local - > srv ) {
if ( curpeers - > local - > srv - > use_ssl = = 1 & & ! ( bind_conf - > options & BC_O_USE_SSL ) ) {
ha_warning ( " Peers section '%s': local peer have a non-SSL listener and a SSL server configured at line %s:%d. \n " ,
curpeers - > peers_fe - > id , curpeers - > local - > conf . file , curpeers - > local - > conf . line ) ;
}
else if ( curpeers - > local - > srv - > use_ssl ! = 1 & & ( bind_conf - > options & BC_O_USE_SSL ) ) {
ha_warning ( " Peers section '%s': local peer have a SSL listener and a non-SSL server configured at line %s:%d. \n " ,
curpeers - > peers_fe - > id , curpeers - > local - > conf . file , curpeers - > local - > conf . line ) ;
}
}
2023-04-22 17:52:17 -04:00
/* finish the bind setup */
ret = bind_complete_thread_setup ( bind_conf , & err_code ) ;
if ( ret ! = 0 ) {
cfgerr + = ret ;
if ( err_code & ERR_FATAL )
goto out ;
2022-07-05 10:00:56 -04:00
}
2019-02-12 13:12:32 -05:00
if ( bind_conf - > xprt - > prepare_bind_conf & &
bind_conf - > xprt - > prepare_bind_conf ( bind_conf ) < 0 )
cfgerr + + ;
}
2019-05-20 12:22:52 -04:00
if ( ! peers_init_sync ( curpeers ) | | ! peers_alloc_dcache ( curpeers ) ) {
2018-10-15 05:18:03 -04:00
ha_alert ( " Peers section '%s': out of memory, giving up on peers. \n " ,
curpeers - > id ) ;
cfgerr + + ;
break ;
}
2011-09-07 15:24:49 -04:00
last = & curpeers - > next ;
2022-10-17 08:58:19 -04:00
/* Ignore the peer shard greater than the number of peer shard for this section.
* Also ignore the peer shard of the local peer .
*/
for ( peer = curpeers - > remote ; peer ; peer = peer - > next ) {
if ( peer = = curpeers - > local ) {
if ( peer - > srv - > shard ) {
ha_warning ( " Peers section '%s': shard ignored for '%s' local peer \n " ,
curpeers - > id , peer - > id ) ;
peer - > srv - > shard = 0 ;
}
}
else if ( peer - > srv - > shard > curpeers - > nb_shards ) {
ha_warning ( " Peers section '%s': shard ignored for '%s' local peer because "
" %d shard value is greater than the section number of shards (%d) \n " ,
curpeers - > id , peer - > id , peer - > srv - > shard , curpeers - > nb_shards ) ;
peer - > srv - > shard = 0 ;
}
}
2011-09-07 15:24:49 -04:00
continue ;
}
2015-05-01 14:02:17 -04:00
/* clean what has been detected above */
2011-09-07 15:24:49 -04:00
p = curpeers - > remote ;
while ( p ) {
pb = p - > next ;
free ( p - > id ) ;
free ( p ) ;
p = pb ;
}
/* Destroy and unlink this curpeers section.
* Note : curpeers is backed up into * last .
*/
free ( curpeers - > id ) ;
curpeers = curpeers - > next ;
2020-03-24 15:08:30 -04:00
/* Reset any refereance to this peers section in the list of stick-tables */
for ( t = stktables_list ; t ; t = t - > next ) {
if ( t - > peers . p & & t - > peers . p = = * last )
t - > peers . p = NULL ;
}
2011-09-07 15:24:49 -04:00
free ( * last ) ;
* last = curpeers ;
}
}
2019-03-14 02:07:41 -04:00
for ( t = stktables_list ; t ; t = t - > next ) {
if ( t - > proxy )
continue ;
2023-11-02 13:34:51 -04:00
err = NULL ;
if ( ! stktable_init ( t , & err ) ) {
ha_alert ( " Parsing [%s:%d]: failed to initialize '%s' stick-table: %s. \n " , t - > conf . file , t - > conf . line , t - > id , err ) ;
ha_free ( & err ) ;
2019-03-14 02:07:41 -04:00
cfgerr + + ;
}
}
2015-05-01 13:09:08 -04:00
/* initialize stick-tables on backend capable proxies. This must not
* be done earlier because the data size may be discovered while parsing
* other proxies .
*/
2017-11-24 10:54:05 -05:00
for ( curproxy = proxies_list ; curproxy ; curproxy = curproxy - > next ) {
2021-10-06 08:24:19 -04:00
if ( ( curproxy - > flags & PR_FL_DISABLED ) | | ! curproxy - > table )
2015-05-01 13:09:08 -04:00
continue ;
2023-11-02 13:34:51 -04:00
err = NULL ;
if ( ! stktable_init ( curproxy - > table , & err ) ) {
ha_alert ( " Proxy '%s': failed to initialize stick-table: %s. \n " , curproxy - > id , err ) ;
ha_free ( & err ) ;
2015-05-01 13:09:08 -04:00
cfgerr + + ;
}
}
2015-01-29 21:22:58 -05:00
if ( mailers ) {
struct mailers * curmailers = mailers , * * last ;
struct mailer * m , * mb ;
/* Remove all mailers sections which don't have a valid listener.
* This can happen when a mailers section is never referenced .
*/
last = & mailers ;
while ( * last ) {
curmailers = * last ;
if ( curmailers - > users ) {
last = & curmailers - > next ;
continue ;
}
2017-11-24 10:50:31 -05:00
ha_warning ( " Removing incomplete section 'mailers %s'. \n " ,
curmailers - > id ) ;
2015-01-29 21:22:58 -05:00
m = curmailers - > mailer_list ;
while ( m ) {
mb = m - > next ;
free ( m - > id ) ;
free ( m ) ;
m = mb ;
}
/* Destroy and unlink this curmailers section.
* Note : curmailers is backed up into * last .
*/
free ( curmailers - > id ) ;
curmailers = curmailers - > next ;
free ( * last ) ;
* last = curmailers ;
}
}
2015-08-19 10:44:03 -04:00
/* Update server_state_file_name to backend name if backend is supposed to use
* a server - state file locally defined and none has been provided */
2017-11-24 10:54:05 -05:00
for ( curproxy = proxies_list ; curproxy ; curproxy = curproxy - > next ) {
2015-08-19 10:44:03 -04:00
if ( curproxy - > load_server_state_from_file = = PR_SRV_STATE_FILE_LOCAL & &
curproxy - > server_state_file_name = = NULL )
curproxy - > server_state_file_name = strdup ( curproxy - > id ) ;
}
2020-12-23 10:51:12 -05:00
list_for_each_entry ( curr_resolvers , & sec_resolvers , list ) {
2018-04-13 17:43:04 -04:00
if ( LIST_ISEMPTY ( & curr_resolvers - > nameservers ) ) {
2021-06-04 12:22:08 -04:00
ha_warning ( " resolvers '%s' [%s:%d] has no nameservers configured! \n " ,
2018-04-13 17:43:04 -04:00
curr_resolvers - > id , curr_resolvers - > conf . file ,
curr_resolvers - > conf . line ) ;
err_code | = ERR_WARN ;
}
}
2017-10-23 08:36:34 -04:00
list_for_each_entry ( postparser , & postparsers , list ) {
if ( postparser - > func )
cfgerr + = postparser - > func ( ) ;
}
2009-07-23 07:36:36 -04:00
if ( cfgerr > 0 )
err_code | = ERR_ALERT | ERR_FATAL ;
out :
return err_code ;
2006-06-25 20:48:02 -04:00
}
[MEDIUM] add support for configuration keyword registration
Any module which needs configuration keywords may now dynamically
register a keyword in a given section, and associate it with a
configuration parsing function using cfg_register_keywords() from
a constructor function. This makes the configuration parser more
modular because it is not required anymore to touch cfg_parse.c.
Example :
static int parse_global_blah(char **args, int section_type, struct proxy *curpx,
struct proxy *defpx, char *err, int errlen)
{
printf("parsing blah in global section\n");
return 0;
}
static int parse_listen_blah(char **args, int section_type, struct proxy *curpx,
struct proxy *defpx, char *err, int errlen)
{
printf("parsing blah in listen section\n");
if (*args[1]) {
snprintf(err, errlen, "missing arg for listen_blah!!!");
return -1;
}
return 0;
}
static struct cfg_kw_list cfg_kws = {{ },{
{ CFG_GLOBAL, "blah", parse_global_blah },
{ CFG_LISTEN, "blah", parse_listen_blah },
{ 0, NULL, NULL },
}};
__attribute__((constructor))
static void __module_init(void)
{
cfg_register_keywords(&cfg_kws);
}
2008-07-09 13:39:06 -04:00
/*
* Registers the CFG keyword list < kwl > as a list of valid keywords for next
* parsing sessions .
*/
void cfg_register_keywords ( struct cfg_kw_list * kwl )
{
2021-04-21 01:32:39 -04:00
LIST_APPEND ( & cfg_keywords . list , & kwl - > list ) ;
[MEDIUM] add support for configuration keyword registration
Any module which needs configuration keywords may now dynamically
register a keyword in a given section, and associate it with a
configuration parsing function using cfg_register_keywords() from
a constructor function. This makes the configuration parser more
modular because it is not required anymore to touch cfg_parse.c.
Example :
static int parse_global_blah(char **args, int section_type, struct proxy *curpx,
struct proxy *defpx, char *err, int errlen)
{
printf("parsing blah in global section\n");
return 0;
}
static int parse_listen_blah(char **args, int section_type, struct proxy *curpx,
struct proxy *defpx, char *err, int errlen)
{
printf("parsing blah in listen section\n");
if (*args[1]) {
snprintf(err, errlen, "missing arg for listen_blah!!!");
return -1;
}
return 0;
}
static struct cfg_kw_list cfg_kws = {{ },{
{ CFG_GLOBAL, "blah", parse_global_blah },
{ CFG_LISTEN, "blah", parse_listen_blah },
{ 0, NULL, NULL },
}};
__attribute__((constructor))
static void __module_init(void)
{
cfg_register_keywords(&cfg_kws);
}
2008-07-09 13:39:06 -04:00
}
2006-06-25 20:48:02 -04:00
[MEDIUM] add support for configuration keyword registration
Any module which needs configuration keywords may now dynamically
register a keyword in a given section, and associate it with a
configuration parsing function using cfg_register_keywords() from
a constructor function. This makes the configuration parser more
modular because it is not required anymore to touch cfg_parse.c.
Example :
static int parse_global_blah(char **args, int section_type, struct proxy *curpx,
struct proxy *defpx, char *err, int errlen)
{
printf("parsing blah in global section\n");
return 0;
}
static int parse_listen_blah(char **args, int section_type, struct proxy *curpx,
struct proxy *defpx, char *err, int errlen)
{
printf("parsing blah in listen section\n");
if (*args[1]) {
snprintf(err, errlen, "missing arg for listen_blah!!!");
return -1;
}
return 0;
}
static struct cfg_kw_list cfg_kws = {{ },{
{ CFG_GLOBAL, "blah", parse_global_blah },
{ CFG_LISTEN, "blah", parse_listen_blah },
{ 0, NULL, NULL },
}};
__attribute__((constructor))
static void __module_init(void)
{
cfg_register_keywords(&cfg_kws);
}
2008-07-09 13:39:06 -04:00
/*
* Unregisters the CFG keyword list < kwl > from the list of valid keywords .
*/
void cfg_unregister_keywords ( struct cfg_kw_list * kwl )
{
2021-04-21 01:32:39 -04:00
LIST_DELETE ( & kwl - > list ) ;
[MEDIUM] add support for configuration keyword registration
Any module which needs configuration keywords may now dynamically
register a keyword in a given section, and associate it with a
configuration parsing function using cfg_register_keywords() from
a constructor function. This makes the configuration parser more
modular because it is not required anymore to touch cfg_parse.c.
Example :
static int parse_global_blah(char **args, int section_type, struct proxy *curpx,
struct proxy *defpx, char *err, int errlen)
{
printf("parsing blah in global section\n");
return 0;
}
static int parse_listen_blah(char **args, int section_type, struct proxy *curpx,
struct proxy *defpx, char *err, int errlen)
{
printf("parsing blah in listen section\n");
if (*args[1]) {
snprintf(err, errlen, "missing arg for listen_blah!!!");
return -1;
}
return 0;
}
static struct cfg_kw_list cfg_kws = {{ },{
{ CFG_GLOBAL, "blah", parse_global_blah },
{ CFG_LISTEN, "blah", parse_listen_blah },
{ 0, NULL, NULL },
}};
__attribute__((constructor))
static void __module_init(void)
{
cfg_register_keywords(&cfg_kws);
}
2008-07-09 13:39:06 -04:00
LIST_INIT ( & kwl - > list ) ;
}
2006-06-25 20:48:02 -04:00
2014-03-18 08:54:18 -04:00
/* this function register new section in the haproxy configuration file.
* < section_name > is the name of this new section and < section_parser >
* is the called parser . If two section declaration have the same name ,
* only the first declared is used .
*/
int cfg_register_section ( char * section_name ,
2017-10-16 05:06:50 -04:00
int ( * section_parser ) ( const char * , int , char * * , int ) ,
int ( * post_section_parser ) ( ) )
2014-03-18 08:54:18 -04:00
{
struct cfg_section * cs ;
2025-02-10 09:07:05 -05:00
if ( section_parser ) {
/* only checks if we register a section parser, not a post section callback */
list_for_each_entry ( cs , & sections , list ) {
if ( strcmp ( cs - > section_name , section_name ) = = 0 & & cs - > section_parser ) {
ha_alert ( " register section '%s': already registered. \n " , section_name ) ;
return 0 ;
}
2016-05-17 10:16:09 -04:00
}
}
2014-03-18 08:54:18 -04:00
cs = calloc ( 1 , sizeof ( * cs ) ) ;
if ( ! cs ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " register section '%s': out of memory. \n " , section_name ) ;
2014-03-18 08:54:18 -04:00
return 0 ;
}
cs - > section_name = section_name ;
cs - > section_parser = section_parser ;
2017-10-16 05:06:50 -04:00
cs - > post_section_parser = post_section_parser ;
2014-03-18 08:54:18 -04:00
2021-04-21 01:32:39 -04:00
LIST_APPEND ( & sections , & cs - > list ) ;
2014-03-18 08:54:18 -04:00
return 1 ;
}
2017-10-23 08:36:34 -04:00
/* this function register a new function which will be called once the haproxy
* configuration file has been parsed . It ' s useful to check dependencies
* between sections or to resolve items once everything is parsed .
*/
int cfg_register_postparser ( char * name , int ( * func ) ( ) )
{
struct cfg_postparser * cp ;
cp = calloc ( 1 , sizeof ( * cp ) ) ;
if ( ! cp ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " register postparser '%s': out of memory. \n " , name ) ;
2017-10-23 08:36:34 -04:00
return 0 ;
}
cp - > name = name ;
cp - > func = func ;
2021-04-21 01:32:39 -04:00
LIST_APPEND ( & postparsers , & cp - > list ) ;
2017-10-23 08:36:34 -04:00
return 1 ;
}
2015-09-25 06:49:18 -04:00
/*
* free all config section entries
*/
void cfg_unregister_sections ( void )
{
struct cfg_section * cs , * ics ;
list_for_each_entry_safe ( cs , ics , & sections , list ) {
2021-04-21 01:32:39 -04:00
LIST_DELETE ( & cs - > list ) ;
2015-09-25 06:49:18 -04:00
free ( cs ) ;
}
}
2016-10-26 05:09:44 -04:00
void cfg_backup_sections ( struct list * backup_sections )
{
struct cfg_section * cs , * ics ;
list_for_each_entry_safe ( cs , ics , & sections , list ) {
2021-04-21 01:32:39 -04:00
LIST_DELETE ( & cs - > list ) ;
LIST_APPEND ( backup_sections , & cs - > list ) ;
2016-10-26 05:09:44 -04:00
}
}
void cfg_restore_sections ( struct list * backup_sections )
{
struct cfg_section * cs , * ics ;
list_for_each_entry_safe ( cs , ics , backup_sections , list ) {
2021-04-21 01:32:39 -04:00
LIST_DELETE ( & cs - > list ) ;
LIST_APPEND ( & sections , & cs - > list ) ;
2016-10-26 05:09:44 -04:00
}
}
2022-03-29 09:02:44 -04:00
/* dumps all registered keywords by section on stdout */
void cfg_dump_registered_keywords ( )
{
2025-05-23 04:49:33 -04:00
/* CFG_GLOBAL, CFG_LISTEN, CFG_USERLIST, CFG_PEERS, CFG_CRTLIST, CFG_CRTSTORE, CFG_TRACES, CFG_ACME */
const char * sect_names [ ] = { " " , " global " , " listen " , " userlist " , " peers " , " crt-list " , " crt-store " , " traces " , " acme " , 0 } ;
2022-03-29 09:02:44 -04:00
int section ;
int index ;
for ( section = 1 ; sect_names [ section ] ; section + + ) {
struct cfg_kw_list * kwl ;
2022-03-30 05:21:32 -04:00
const struct cfg_keyword * kwp , * kwn ;
2022-03-29 09:02:44 -04:00
printf ( " %s \n " , sect_names [ section ] ) ;
2022-03-30 05:21:32 -04:00
for ( kwn = kwp = NULL ; ; kwp = kwn ) {
list_for_each_entry ( kwl , & cfg_keywords . list , list ) {
for ( index = 0 ; kwl - > kw [ index ] . kw ! = NULL ; index + + )
if ( kwl - > kw [ index ] . section = = section & &
strordered ( kwp ? kwp - > kw : NULL , kwl - > kw [ index ] . kw , kwn ! = kwp ? kwn - > kw : NULL ) )
kwn = & kwl - > kw [ index ] ;
}
if ( kwn = = kwp )
break ;
printf ( " \t %s \n " , kwn - > kw ) ;
2022-03-29 09:02:44 -04:00
}
if ( section = = CFG_LISTEN ) {
/* there are plenty of other keywords there */
extern struct list tcp_req_conn_keywords , tcp_req_sess_keywords ,
tcp_req_cont_keywords , tcp_res_cont_keywords ;
extern struct bind_kw_list bind_keywords ;
extern struct srv_kw_list srv_keywords ;
struct bind_kw_list * bkwl ;
struct srv_kw_list * skwl ;
2022-03-30 05:21:32 -04:00
const struct bind_kw * bkwp , * bkwn ;
const struct srv_kw * skwp , * skwn ;
const struct cfg_opt * coptp , * coptn ;
2023-02-13 09:24:01 -05:00
/* display the non-ssl keywords */
2022-03-30 05:21:32 -04:00
for ( bkwn = bkwp = NULL ; ; bkwp = bkwn ) {
list_for_each_entry ( bkwl , & bind_keywords . list , list ) {
2023-02-13 09:24:01 -05:00
if ( strcmp ( bkwl - > scope , " SSL " ) = = 0 ) /* skip SSL keywords */
continue ;
for ( index = 0 ; bkwl - > kw [ index ] . kw ! = NULL ; index + + ) {
2022-03-30 05:21:32 -04:00
if ( strordered ( bkwp ? bkwp - > kw : NULL ,
bkwl - > kw [ index ] . kw ,
bkwn ! = bkwp ? bkwn - > kw : NULL ) )
bkwn = & bkwl - > kw [ index ] ;
2023-02-13 09:24:01 -05:00
}
2022-03-29 09:02:44 -04:00
}
2022-03-30 05:21:32 -04:00
if ( bkwn = = bkwp )
break ;
if ( ! bkwn - > skip )
printf ( " \t bind <addr> %s \n " , bkwn - > kw ) ;
else
printf ( " \t bind <addr> %s +%d \n " , bkwn - > kw , bkwn - > skip ) ;
2022-03-29 09:02:44 -04:00
}
# if defined(USE_OPENSSL)
2023-02-13 09:24:01 -05:00
/* displays the "ssl" keywords */
for ( bkwn = bkwp = NULL ; ; bkwp = bkwn ) {
list_for_each_entry ( bkwl , & bind_keywords . list , list ) {
if ( strcmp ( bkwl - > scope , " SSL " ) ! = 0 ) /* skip non-SSL keywords */
continue ;
for ( index = 0 ; bkwl - > kw [ index ] . kw ! = NULL ; index + + ) {
if ( strordered ( bkwp ? bkwp - > kw : NULL ,
bkwl - > kw [ index ] . kw ,
bkwn ! = bkwp ? bkwn - > kw : NULL ) )
bkwn = & bkwl - > kw [ index ] ;
}
2022-03-30 05:21:32 -04:00
}
2023-02-13 09:24:01 -05:00
if ( bkwn = = bkwp )
2022-03-30 05:21:32 -04:00
break ;
2023-02-13 09:24:01 -05:00
if ( strcmp ( bkwn - > kw , " ssl " ) = = 0 ) /* skip "bind <addr> ssl ssl" */
continue ;
if ( ! bkwn - > skip )
printf ( " \t bind <addr> ssl %s \n " , bkwn - > kw ) ;
2022-03-29 09:02:44 -04:00
else
2023-02-13 09:24:01 -05:00
printf ( " \t bind <addr> ssl %s +%d \n " , bkwn - > kw , bkwn - > skip ) ;
2022-03-29 09:02:44 -04:00
}
# endif
2022-03-30 05:21:32 -04:00
for ( skwn = skwp = NULL ; ; skwp = skwn ) {
list_for_each_entry ( skwl , & srv_keywords . list , list ) {
for ( index = 0 ; skwl - > kw [ index ] . kw ! = NULL ; index + + )
if ( strordered ( skwp ? skwp - > kw : NULL ,
skwl - > kw [ index ] . kw ,
skwn ! = skwp ? skwn - > kw : NULL ) )
skwn = & skwl - > kw [ index ] ;
2022-03-29 09:02:44 -04:00
}
2022-03-30 05:21:32 -04:00
if ( skwn = = skwp )
break ;
2022-03-29 09:02:44 -04:00
2022-03-30 05:21:32 -04:00
if ( ! skwn - > skip )
printf ( " \t server <name> <addr> %s \n " , skwn - > kw ) ;
else
printf ( " \t server <name> <addr> %s +%d \n " , skwn - > kw , skwn - > skip ) ;
}
for ( coptn = coptp = NULL ; ; coptp = coptn ) {
for ( index = 0 ; cfg_opts [ index ] . name ; index + + )
if ( strordered ( coptp ? coptp - > name : NULL ,
cfg_opts [ index ] . name ,
coptn ! = coptp ? coptn - > name : NULL ) )
coptn = & cfg_opts [ index ] ;
for ( index = 0 ; cfg_opts2 [ index ] . name ; index + + )
if ( strordered ( coptp ? coptp - > name : NULL ,
cfg_opts2 [ index ] . name ,
coptn ! = coptp ? coptn - > name : NULL ) )
coptn = & cfg_opts2 [ index ] ;
if ( coptn = = coptp )
break ;
2022-03-29 09:02:44 -04:00
2022-03-30 05:21:32 -04:00
printf ( " \t option %s [ " , coptn - > name ) ;
if ( coptn - > cap & PR_CAP_FE )
2022-03-29 09:02:44 -04:00
printf ( " FE " ) ;
2022-03-30 05:21:32 -04:00
if ( coptn - > cap & PR_CAP_BE )
2022-03-29 09:02:44 -04:00
printf ( " BE " ) ;
2022-03-30 05:21:32 -04:00
if ( coptn - > mode = = PR_MODE_HTTP )
2022-03-29 09:02:44 -04:00
printf ( " HTTP " ) ;
printf ( " ] \n " ) ;
}
2022-03-30 05:21:32 -04:00
dump_act_rules ( & tcp_req_conn_keywords , " \t tcp-request connection " ) ;
dump_act_rules ( & tcp_req_sess_keywords , " \t tcp-request session " ) ;
dump_act_rules ( & tcp_req_cont_keywords , " \t tcp-request content " ) ;
dump_act_rules ( & tcp_res_cont_keywords , " \t tcp-response content " ) ;
dump_act_rules ( & http_req_keywords . list , " \t http-request " ) ;
dump_act_rules ( & http_res_keywords . list , " \t http-response " ) ;
dump_act_rules ( & http_after_res_keywords . list , " \t http-after-response " ) ;
2022-03-29 09:02:44 -04:00
}
2023-06-26 14:43:48 -04:00
if ( section = = CFG_PEERS ) {
struct peers_kw_list * pkwl ;
const struct peers_keyword * pkwp , * pkwn ;
for ( pkwn = pkwp = NULL ; ; pkwp = pkwn ) {
list_for_each_entry ( pkwl , & peers_keywords . list , list ) {
for ( index = 0 ; pkwl - > kw [ index ] . kw ! = NULL ; index + + ) {
if ( strordered ( pkwp ? pkwp - > kw : NULL ,
pkwl - > kw [ index ] . kw ,
pkwn ! = pkwp ? pkwn - > kw : NULL ) )
pkwn = & pkwl - > kw [ index ] ;
}
}
if ( pkwn = = pkwp )
break ;
printf ( " \t %s \n " , pkwn - > kw ) ;
}
}
2023-02-13 09:24:01 -05:00
if ( section = = CFG_CRTLIST ) {
/* displays the keyword available for the crt-lists */
extern struct ssl_crtlist_kw ssl_crtlist_kws [ ] __maybe_unused ;
const struct ssl_crtlist_kw * sbkwp __maybe_unused , * sbkwn __maybe_unused ;
# if defined(USE_OPENSSL)
for ( sbkwn = sbkwp = NULL ; ; sbkwp = sbkwn ) {
for ( index = 0 ; ssl_crtlist_kws [ index ] . kw ! = NULL ; index + + ) {
if ( strordered ( sbkwp ? sbkwp - > kw : NULL ,
ssl_crtlist_kws [ index ] . kw ,
sbkwn ! = sbkwp ? sbkwn - > kw : NULL ) )
sbkwn = & ssl_crtlist_kws [ index ] ;
}
if ( sbkwn = = sbkwp )
break ;
if ( ! sbkwn - > skip )
printf ( " \t %s \n " , sbkwn - > kw ) ;
else
printf ( " \t %s +%d \n " , sbkwn - > kw , sbkwn - > skip ) ;
}
# endif
}
2022-03-29 09:02:44 -04:00
}
}
2018-11-26 05:33:13 -05:00
/* these are the config sections handled by default */
REGISTER_CONFIG_SECTION ( " listen " , cfg_parse_listen , NULL ) ;
REGISTER_CONFIG_SECTION ( " frontend " , cfg_parse_listen , NULL ) ;
REGISTER_CONFIG_SECTION ( " backend " , cfg_parse_listen , NULL ) ;
REGISTER_CONFIG_SECTION ( " defaults " , cfg_parse_listen , NULL ) ;
REGISTER_CONFIG_SECTION ( " global " , cfg_parse_global , NULL ) ;
REGISTER_CONFIG_SECTION ( " userlist " , cfg_parse_users , NULL ) ;
REGISTER_CONFIG_SECTION ( " peers " , cfg_parse_peers , NULL ) ;
REGISTER_CONFIG_SECTION ( " mailers " , cfg_parse_mailers , NULL ) ;
REGISTER_CONFIG_SECTION ( " namespace_list " , cfg_parse_netns , NULL ) ;
2024-10-01 02:48:38 -04:00
REGISTER_CONFIG_SECTION ( " traces " , cfg_parse_traces , NULL ) ;
2016-05-26 11:55:28 -04:00
2021-04-27 14:29:11 -04:00
static struct cfg_kw_list cfg_kws = { { } , {
{ CFG_GLOBAL , " default-path " , cfg_parse_global_def_path } ,
{ /* END */ }
} } ;
INITCALL1 ( STG_REGISTER , cfg_register_keywords , & cfg_kws ) ;
2006-06-25 20:48:02 -04:00
/*
* Local variables :
* c - indent - level : 8
* c - basic - offset : 8
* End :
*/