2006-05-14 17:06:28 -04:00
/*
2006-06-25 20:48:02 -04:00
* Server management functions .
*
2012-10-10 02:27:36 -04:00
* Copyright 2000 - 2012 Willy Tarreau < w @ 1 wt . eu >
2008-01-20 19:54:06 -05:00
* Copyright 2007 - 2008 Krzysztof Piotr Oledzki < ole @ ans . pl >
2006-05-14 17:06:28 -04:00
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version
* 2 of the License , or ( at your option ) any later version .
*
*/
2014-03-31 04:39:59 -04:00
# include <ctype.h>
2015-08-19 10:44:03 -04:00
# include <errno.h>
2014-03-31 04:39:59 -04:00
2020-06-04 18:00:29 -04:00
# include <import/xxhash.h>
2020-06-04 13:42:41 -04:00
# include <haproxy/applet-t.h>
2020-06-04 16:50:02 -04:00
# include <haproxy/backend.h>
2020-05-27 06:58:42 -04:00
# include <haproxy/api.h>
2020-06-04 18:00:29 -04:00
# include <haproxy/cfgparse.h>
2020-06-04 12:21:56 -04:00
# include <haproxy/check.h>
2020-06-04 14:19:54 -04:00
# include <haproxy/cli.h>
2020-06-04 12:02:10 -04:00
# include <haproxy/connection.h>
2020-06-03 12:23:19 -04:00
# include <haproxy/dict-t.h>
2020-06-04 04:53:16 -04:00
# include <haproxy/dns.h>
2020-05-27 10:10:29 -04:00
# include <haproxy/errors.h>
2020-06-04 11:05:57 -04:00
# include <haproxy/global.h>
2020-06-05 05:40:38 -04:00
# include <haproxy/mailers.h>
2020-06-02 11:02:59 -04:00
# include <haproxy/namespace.h>
2020-06-04 16:59:39 -04:00
# include <haproxy/queue.h>
2020-06-04 09:33:47 -04:00
# include <haproxy/sample.h>
2020-06-04 17:20:13 -04:00
# include <haproxy/server.h>
2020-06-04 13:58:55 -04:00
# include <haproxy/stats-t.h>
2020-06-04 17:46:14 -04:00
# include <haproxy/stream.h>
2020-06-04 14:45:39 -04:00
# include <haproxy/stream_interface.h>
2020-06-04 11:25:40 -04:00
# include <haproxy/task.h>
2020-06-01 05:05:15 -04:00
# include <haproxy/time.h>
[MEDIUM] stats: report server and backend cumulated downtime
Hello,
This patch implements new statistics for SLA calculation by adding new
field 'Dwntime' with total down time since restart (both HTTP/CSV) and
extending status field (HTTP) or inserting a new one (CSV) with time
showing how long each server/backend is in a current state. Additionaly,
down transations are also calculated and displayed for backends, so it is
possible to know how many times selected backend was down, generating "No
server is available to handle this request." error.
New information are presentetd in two different ways:
- for HTTP: a "human redable form", one of "100000d 23h", "23h 59m" or
"59m 59s"
- for CSV: seconds
I believe that seconds resolution is enough.
As there are more columns in the status page I decided to shrink some
names to make more space:
- Weight -> Wght
- Check -> Chk
- Down -> Dwn
Making described changes I also made some improvements and fixed some
small bugs:
- don't increment s->health above 's->rise + s->fall - 1'. Previously it
was incremented an then (re)set to 's->rise + s->fall - 1'.
- do not set server down if it is down already
- do not set server up if it is up already
- fix colspan in multiple places (mostly introduced by my previous patch)
- add missing "status" header to CSV
- fix order of retries/redispatches in server (CSV)
- s/Tthen/Then/
- s/server/backend/ in DATA_ST_PX_BE (dumpstats.c)
Changes from previous version:
- deal with negative time intervales
- don't relay on s->state (SRV_RUNNING)
- little reworked human_time + compacted format (no spaces). If needed it
can be used in the future for other purposes by optionally making "cnt"
as an argument
- leave set_server_down mostly unchanged
- only little reworked "process_chk: 9"
- additional fields in CSV are appended to the rigth
- fix "SEC" macro
- named arguments (human_time, be_downtime, srv_downtime)
Hope it is OK. If there are only cosmetic changes needed please fill free
to correct it, however if there are some bigger changes required I would
like to discuss it first or at last to know what exactly was changed
especially since I already put this patch into my production server. :)
Thank you,
Best regards,
Krzysztof Oledzki
2007-10-22 10:21:10 -04:00
2020-06-03 13:20:59 -04:00
# include <haproxy/port_range.h>
2020-06-03 09:26:55 -04:00
# include <haproxy/protocol.h>
2017-04-03 16:58:04 -04:00
# include <netinet/tcp.h>
2014-05-16 05:48:10 -04:00
2020-05-27 04:58:19 -04:00
# include <import/ebsttree.h>
2019-06-13 07:24:29 -04:00
2018-08-02 05:48:52 -04:00
static void srv_update_status ( struct server * s ) ;
2015-08-19 10:44:03 -04:00
static void srv_update_state ( struct server * srv , int version , char * * params ) ;
2016-11-02 10:34:05 -04:00
static int srv_apply_lastaddr ( struct server * srv , int * err_code ) ;
2017-10-31 10:21:19 -04:00
static int srv_set_fqdn ( struct server * srv , const char * fqdn , int dns_locked ) ;
2019-06-13 07:24:29 -04:00
static void srv_state_parse_line ( char * buf , const int version , char * * params , char * * srv_params ) ;
static int srv_state_get_version ( FILE * f ) ;
2020-05-02 15:52:36 -04:00
static void srv_cleanup_connections ( struct server * srv ) ;
2006-05-14 17:06:28 -04:00
2012-10-10 02:27:36 -04:00
/* List head of all known server keywords */
static struct srv_kw_list srv_keywords = {
. list = LIST_HEAD_INIT ( srv_keywords . list )
} ;
[MEDIUM] stats: report server and backend cumulated downtime
Hello,
This patch implements new statistics for SLA calculation by adding new
field 'Dwntime' with total down time since restart (both HTTP/CSV) and
extending status field (HTTP) or inserting a new one (CSV) with time
showing how long each server/backend is in a current state. Additionaly,
down transations are also calculated and displayed for backends, so it is
possible to know how many times selected backend was down, generating "No
server is available to handle this request." error.
New information are presentetd in two different ways:
- for HTTP: a "human redable form", one of "100000d 23h", "23h 59m" or
"59m 59s"
- for CSV: seconds
I believe that seconds resolution is enough.
As there are more columns in the status page I decided to shrink some
names to make more space:
- Weight -> Wght
- Check -> Chk
- Down -> Dwn
Making described changes I also made some improvements and fixed some
small bugs:
- don't increment s->health above 's->rise + s->fall - 1'. Previously it
was incremented an then (re)set to 's->rise + s->fall - 1'.
- do not set server down if it is down already
- do not set server up if it is up already
- fix colspan in multiple places (mostly introduced by my previous patch)
- add missing "status" header to CSV
- fix order of retries/redispatches in server (CSV)
- s/Tthen/Then/
- s/server/backend/ in DATA_ST_PX_BE (dumpstats.c)
Changes from previous version:
- deal with negative time intervales
- don't relay on s->state (SRV_RUNNING)
- little reworked human_time + compacted format (no spaces). If needed it
can be used in the future for other purposes by optionally making "cnt"
as an argument
- leave set_server_down mostly unchanged
- only little reworked "process_chk: 9"
- additional fields in CSV are appended to the rigth
- fix "SEC" macro
- named arguments (human_time, be_downtime, srv_downtime)
Hope it is OK. If there are only cosmetic changes needed please fill free
to correct it, however if there are some bigger changes required I would
like to discuss it first or at last to know what exactly was changed
especially since I already put this patch into my production server. :)
Thank you,
Best regards,
Krzysztof Oledzki
2007-10-22 10:21:10 -04:00
2020-06-05 02:40:51 -04:00
__decl_thread ( HA_SPINLOCK_T idle_conn_srv_lock ) ;
2019-02-14 12:29:09 -05:00
struct eb_root idle_conn_srv = EB_ROOT ;
struct task * idle_conn_task = NULL ;
struct task * idle_conn_cleanup [ MAX_THREADS ] = { NULL } ;
2019-08-08 09:47:21 -04:00
struct mt_list toremove_connections [ MAX_THREADS ] ;
2020-06-05 02:40:51 -04:00
__decl_thread ( HA_SPINLOCK_T toremove_lock [ MAX_THREADS ] ) ;
2019-02-14 12:29:09 -05:00
2019-05-20 03:47:07 -04:00
/* The server names dictionary */
struct dict server_name_dict = {
. name = " server names " ,
. values = EB_ROOT_UNIQUE ,
} ;
2019-06-13 07:24:29 -04:00
/* tree where global state_file is loaded */
2019-12-20 11:23:40 -05:00
struct eb_root state_file = EB_ROOT_UNIQUE ;
2019-06-13 07:24:29 -04:00
2013-11-01 03:46:15 -04:00
int srv_downtime ( const struct server * s )
2012-10-10 02:27:36 -04:00
{
2017-08-31 08:41:55 -04:00
if ( ( s - > cur_state ! = SRV_ST_STOPPED ) & & s - > last_change < now . tv_sec ) // ignore negative time
[MEDIUM] stats: report server and backend cumulated downtime
Hello,
This patch implements new statistics for SLA calculation by adding new
field 'Dwntime' with total down time since restart (both HTTP/CSV) and
extending status field (HTTP) or inserting a new one (CSV) with time
showing how long each server/backend is in a current state. Additionaly,
down transations are also calculated and displayed for backends, so it is
possible to know how many times selected backend was down, generating "No
server is available to handle this request." error.
New information are presentetd in two different ways:
- for HTTP: a "human redable form", one of "100000d 23h", "23h 59m" or
"59m 59s"
- for CSV: seconds
I believe that seconds resolution is enough.
As there are more columns in the status page I decided to shrink some
names to make more space:
- Weight -> Wght
- Check -> Chk
- Down -> Dwn
Making described changes I also made some improvements and fixed some
small bugs:
- don't increment s->health above 's->rise + s->fall - 1'. Previously it
was incremented an then (re)set to 's->rise + s->fall - 1'.
- do not set server down if it is down already
- do not set server up if it is up already
- fix colspan in multiple places (mostly introduced by my previous patch)
- add missing "status" header to CSV
- fix order of retries/redispatches in server (CSV)
- s/Tthen/Then/
- s/server/backend/ in DATA_ST_PX_BE (dumpstats.c)
Changes from previous version:
- deal with negative time intervales
- don't relay on s->state (SRV_RUNNING)
- little reworked human_time + compacted format (no spaces). If needed it
can be used in the future for other purposes by optionally making "cnt"
as an argument
- leave set_server_down mostly unchanged
- only little reworked "process_chk: 9"
- additional fields in CSV are appended to the rigth
- fix "SEC" macro
- named arguments (human_time, be_downtime, srv_downtime)
Hope it is OK. If there are only cosmetic changes needed please fill free
to correct it, however if there are some bigger changes required I would
like to discuss it first or at last to know what exactly was changed
especially since I already put this patch into my production server. :)
Thank you,
Best regards,
Krzysztof Oledzki
2007-10-22 10:21:10 -04:00
return s - > down_time ;
return now . tv_sec - s - > last_change + s - > down_time ;
}
2006-05-14 17:06:28 -04:00
2014-02-03 16:26:46 -05:00
int srv_lastsession ( const struct server * s )
{
if ( s - > counters . last_sess )
return now . tv_sec - s - > counters . last_sess ;
return - 1 ;
}
2013-02-23 01:35:38 -05:00
int srv_getinter ( const struct check * check )
2012-10-10 02:27:36 -04:00
{
2013-02-23 01:35:38 -05:00
const struct server * s = check - > server ;
2013-12-11 14:36:34 -05:00
if ( ( check - > state & CHK_ST_CONFIGURED ) & & ( check - > health = = check - > rise + check - > fall - 1 ) )
2013-02-23 01:35:38 -05:00
return check - > inter ;
2008-01-20 19:54:06 -05:00
2017-08-31 08:41:55 -04:00
if ( ( s - > next_state = = SRV_ST_STOPPED ) & & check - > health = = 0 )
2013-02-23 01:35:38 -05:00
return ( check - > downinter ) ? ( check - > downinter ) : ( check - > inter ) ;
2008-01-20 19:54:06 -05:00
2013-02-23 01:35:38 -05:00
return ( check - > fastinter ) ? ( check - > fastinter ) : ( check - > inter ) ;
2008-01-20 19:54:06 -05:00
}
2018-01-17 11:39:34 -05:00
/*
* Check that we did not get a hash collision .
* Unlikely , but it can happen .
*/
static inline void srv_check_for_dup_dyncookie ( struct server * s )
MINOR: server: Add dynamic session cookies.
This adds a new "dynamic" keyword for the cookie option. If set, a cookie
will be generated for each server (assuming one isn't already provided on
the "server" line), from the IP of the server, the TCP port, and a secret
key provided. To provide the secret key, a new keyword as been added,
"dynamic-cookie-key", for backends.
Example :
backend bk_web
balance roundrobin
dynamic-cookie-key "bla"
cookie WEBSRV insert dynamic
server s1 127.0.0.1:80 check
server s2 192.168.56.1:80 check
This is a first step to be able to dynamically add and remove servers,
without modifying the configuration file, and still have all the load
balancers redirect the traffic to the right server.
Provide a way to generate session cookies, based on the IP address of the
server, the TCP port, and a secret key provided.
2017-03-14 15:01:29 -04:00
{
struct proxy * p = s - > proxy ;
struct server * tmpserv ;
2018-01-17 11:39:34 -05:00
for ( tmpserv = p - > srv ; tmpserv ! = NULL ;
tmpserv = tmpserv - > next ) {
if ( tmpserv = = s )
continue ;
if ( tmpserv - > next_admin & SRV_ADMF_FMAINT )
continue ;
if ( tmpserv - > cookie & &
strcmp ( tmpserv - > cookie , s - > cookie ) = = 0 ) {
ha_warning ( " We generated two equal cookies for two different servers. \n "
" Please change the secret key for '%s'. \n " ,
s - > proxy - > id ) ;
}
}
}
2018-08-21 05:54:26 -04:00
/*
2019-07-30 05:59:34 -04:00
* Must be called with the server lock held , and will grab the proxy lock .
2018-08-21 05:54:26 -04:00
*/
2018-01-17 11:39:34 -05:00
void srv_set_dyncookie ( struct server * s )
{
struct proxy * p = s - > proxy ;
MINOR: server: Add dynamic session cookies.
This adds a new "dynamic" keyword for the cookie option. If set, a cookie
will be generated for each server (assuming one isn't already provided on
the "server" line), from the IP of the server, the TCP port, and a secret
key provided. To provide the secret key, a new keyword as been added,
"dynamic-cookie-key", for backends.
Example :
backend bk_web
balance roundrobin
dynamic-cookie-key "bla"
cookie WEBSRV insert dynamic
server s1 127.0.0.1:80 check
server s2 192.168.56.1:80 check
This is a first step to be able to dynamically add and remove servers,
without modifying the configuration file, and still have all the load
balancers redirect the traffic to the right server.
Provide a way to generate session cookies, based on the IP address of the
server, the TCP port, and a secret key provided.
2017-03-14 15:01:29 -04:00
char * tmpbuf ;
unsigned long long hash_value ;
2017-03-15 10:11:06 -04:00
size_t key_len ;
MINOR: server: Add dynamic session cookies.
This adds a new "dynamic" keyword for the cookie option. If set, a cookie
will be generated for each server (assuming one isn't already provided on
the "server" line), from the IP of the server, the TCP port, and a secret
key provided. To provide the secret key, a new keyword as been added,
"dynamic-cookie-key", for backends.
Example :
backend bk_web
balance roundrobin
dynamic-cookie-key "bla"
cookie WEBSRV insert dynamic
server s1 127.0.0.1:80 check
server s2 192.168.56.1:80 check
This is a first step to be able to dynamically add and remove servers,
without modifying the configuration file, and still have all the load
balancers redirect the traffic to the right server.
Provide a way to generate session cookies, based on the IP address of the
server, the TCP port, and a secret key provided.
2017-03-14 15:01:29 -04:00
size_t buffer_len ;
int addr_len ;
int port ;
2019-07-30 05:59:34 -04:00
HA_SPIN_LOCK ( PROXY_LOCK , & p - > lock ) ;
MINOR: server: Add dynamic session cookies.
This adds a new "dynamic" keyword for the cookie option. If set, a cookie
will be generated for each server (assuming one isn't already provided on
the "server" line), from the IP of the server, the TCP port, and a secret
key provided. To provide the secret key, a new keyword as been added,
"dynamic-cookie-key", for backends.
Example :
backend bk_web
balance roundrobin
dynamic-cookie-key "bla"
cookie WEBSRV insert dynamic
server s1 127.0.0.1:80 check
server s2 192.168.56.1:80 check
This is a first step to be able to dynamically add and remove servers,
without modifying the configuration file, and still have all the load
balancers redirect the traffic to the right server.
Provide a way to generate session cookies, based on the IP address of the
server, the TCP port, and a secret key provided.
2017-03-14 15:01:29 -04:00
if ( ( s - > flags & SRV_F_COOKIESET ) | |
! ( s - > proxy - > ck_opts & PR_CK_DYNAMIC ) | |
s - > proxy - > dyncookie_key = = NULL )
2019-07-30 05:59:34 -04:00
goto out ;
2017-03-15 10:11:06 -04:00
key_len = strlen ( p - > dyncookie_key ) ;
MINOR: server: Add dynamic session cookies.
This adds a new "dynamic" keyword for the cookie option. If set, a cookie
will be generated for each server (assuming one isn't already provided on
the "server" line), from the IP of the server, the TCP port, and a secret
key provided. To provide the secret key, a new keyword as been added,
"dynamic-cookie-key", for backends.
Example :
backend bk_web
balance roundrobin
dynamic-cookie-key "bla"
cookie WEBSRV insert dynamic
server s1 127.0.0.1:80 check
server s2 192.168.56.1:80 check
This is a first step to be able to dynamically add and remove servers,
without modifying the configuration file, and still have all the load
balancers redirect the traffic to the right server.
Provide a way to generate session cookies, based on the IP address of the
server, the TCP port, and a secret key provided.
2017-03-14 15:01:29 -04:00
if ( s - > addr . ss_family ! = AF_INET & &
s - > addr . ss_family ! = AF_INET6 )
2019-07-30 05:59:34 -04:00
goto out ;
MINOR: server: Add dynamic session cookies.
This adds a new "dynamic" keyword for the cookie option. If set, a cookie
will be generated for each server (assuming one isn't already provided on
the "server" line), from the IP of the server, the TCP port, and a secret
key provided. To provide the secret key, a new keyword as been added,
"dynamic-cookie-key", for backends.
Example :
backend bk_web
balance roundrobin
dynamic-cookie-key "bla"
cookie WEBSRV insert dynamic
server s1 127.0.0.1:80 check
server s2 192.168.56.1:80 check
This is a first step to be able to dynamically add and remove servers,
without modifying the configuration file, and still have all the load
balancers redirect the traffic to the right server.
Provide a way to generate session cookies, based on the IP address of the
server, the TCP port, and a secret key provided.
2017-03-14 15:01:29 -04:00
/*
* Buffer to calculate the cookie value .
* The buffer contains the secret key + the server IP address
* + the TCP port .
*/
addr_len = ( s - > addr . ss_family = = AF_INET ) ? 4 : 16 ;
/*
* The TCP port should use only 2 bytes , but is stored in
* an unsigned int in struct server , so let ' s use 4 , to be
* on the safe side .
*/
buffer_len = key_len + addr_len + 4 ;
2018-07-13 04:54:26 -04:00
tmpbuf = trash . area ;
MINOR: server: Add dynamic session cookies.
This adds a new "dynamic" keyword for the cookie option. If set, a cookie
will be generated for each server (assuming one isn't already provided on
the "server" line), from the IP of the server, the TCP port, and a secret
key provided. To provide the secret key, a new keyword as been added,
"dynamic-cookie-key", for backends.
Example :
backend bk_web
balance roundrobin
dynamic-cookie-key "bla"
cookie WEBSRV insert dynamic
server s1 127.0.0.1:80 check
server s2 192.168.56.1:80 check
This is a first step to be able to dynamically add and remove servers,
without modifying the configuration file, and still have all the load
balancers redirect the traffic to the right server.
Provide a way to generate session cookies, based on the IP address of the
server, the TCP port, and a secret key provided.
2017-03-14 15:01:29 -04:00
memcpy ( tmpbuf , p - > dyncookie_key , key_len ) ;
memcpy ( & ( tmpbuf [ key_len ] ) ,
s - > addr . ss_family = = AF_INET ?
( void * ) & ( ( struct sockaddr_in * ) & s - > addr ) - > sin_addr . s_addr :
( void * ) & ( ( ( struct sockaddr_in6 * ) & s - > addr ) - > sin6_addr . s6_addr ) ,
addr_len ) ;
/*
* Make sure it ' s the same across all the load balancers ,
* no matter their endianness .
*/
port = htonl ( s - > svc_port ) ;
memcpy ( & tmpbuf [ key_len + addr_len ] , & port , 4 ) ;
hash_value = XXH64 ( tmpbuf , buffer_len , 0 ) ;
memprintf ( & s - > cookie , " %016llx " , hash_value ) ;
if ( ! s - > cookie )
2019-07-30 05:59:34 -04:00
goto out ;
MINOR: server: Add dynamic session cookies.
This adds a new "dynamic" keyword for the cookie option. If set, a cookie
will be generated for each server (assuming one isn't already provided on
the "server" line), from the IP of the server, the TCP port, and a secret
key provided. To provide the secret key, a new keyword as been added,
"dynamic-cookie-key", for backends.
Example :
backend bk_web
balance roundrobin
dynamic-cookie-key "bla"
cookie WEBSRV insert dynamic
server s1 127.0.0.1:80 check
server s2 192.168.56.1:80 check
This is a first step to be able to dynamically add and remove servers,
without modifying the configuration file, and still have all the load
balancers redirect the traffic to the right server.
Provide a way to generate session cookies, based on the IP address of the
server, the TCP port, and a secret key provided.
2017-03-14 15:01:29 -04:00
s - > cklen = 16 ;
2018-01-17 11:39:34 -05:00
/* Don't bother checking if the dyncookie is duplicated if
* the server is marked as " disabled " , maybe it doesn ' t have
* its real IP yet , but just a place holder .
MINOR: server: Add dynamic session cookies.
This adds a new "dynamic" keyword for the cookie option. If set, a cookie
will be generated for each server (assuming one isn't already provided on
the "server" line), from the IP of the server, the TCP port, and a secret
key provided. To provide the secret key, a new keyword as been added,
"dynamic-cookie-key", for backends.
Example :
backend bk_web
balance roundrobin
dynamic-cookie-key "bla"
cookie WEBSRV insert dynamic
server s1 127.0.0.1:80 check
server s2 192.168.56.1:80 check
This is a first step to be able to dynamically add and remove servers,
without modifying the configuration file, and still have all the load
balancers redirect the traffic to the right server.
Provide a way to generate session cookies, based on the IP address of the
server, the TCP port, and a secret key provided.
2017-03-14 15:01:29 -04:00
*/
2018-01-17 11:39:34 -05:00
if ( ! ( s - > next_admin & SRV_ADMF_FMAINT ) )
srv_check_for_dup_dyncookie ( s ) ;
2019-07-30 05:59:34 -04:00
out :
HA_SPIN_UNLOCK ( PROXY_LOCK , & p - > lock ) ;
MINOR: server: Add dynamic session cookies.
This adds a new "dynamic" keyword for the cookie option. If set, a cookie
will be generated for each server (assuming one isn't already provided on
the "server" line), from the IP of the server, the TCP port, and a secret
key provided. To provide the secret key, a new keyword as been added,
"dynamic-cookie-key", for backends.
Example :
backend bk_web
balance roundrobin
dynamic-cookie-key "bla"
cookie WEBSRV insert dynamic
server s1 127.0.0.1:80 check
server s2 192.168.56.1:80 check
This is a first step to be able to dynamically add and remove servers,
without modifying the configuration file, and still have all the load
balancers redirect the traffic to the right server.
Provide a way to generate session cookies, based on the IP address of the
server, the TCP port, and a secret key provided.
2017-03-14 15:01:29 -04:00
}
2012-10-10 02:27:36 -04:00
/*
* Registers the server keyword list < kwl > as a list of valid keywords for next
* parsing sessions .
*/
void srv_register_keywords ( struct srv_kw_list * kwl )
{
LIST_ADDQ ( & srv_keywords . list , & kwl - > list ) ;
}
/* Return a pointer to the server keyword <kw>, or NULL if not found. If the
* keyword is found with a NULL - > parse ( ) function , then an attempt is made to
* find one with a valid - > parse ( ) function . This way it is possible to declare
* platform - dependant , known keywords as NULL , then only declare them as valid
* if some options are met . Note that if the requested keyword contains an
* opening parenthesis , everything from this point is ignored .
*/
struct srv_kw * srv_find_kw ( const char * kw )
{
int index ;
const char * kwend ;
struct srv_kw_list * kwl ;
struct srv_kw * ret = NULL ;
kwend = strchr ( kw , ' ( ' ) ;
if ( ! kwend )
kwend = kw + strlen ( kw ) ;
list_for_each_entry ( kwl , & srv_keywords . list , list ) {
for ( index = 0 ; kwl - > kw [ index ] . kw ! = NULL ; index + + ) {
if ( ( strncmp ( kwl - > kw [ index ] . kw , kw , kwend - kw ) = = 0 ) & &
kwl - > kw [ index ] . kw [ kwend - kw ] = = 0 ) {
if ( kwl - > kw [ index ] . parse )
return & kwl - > kw [ index ] ; /* found it !*/
else
ret = & kwl - > kw [ index ] ; /* may be OK */
}
}
}
return ret ;
}
/* Dumps all registered "server" keywords to the <out> string pointer. The
* unsupported keywords are only dumped if their supported form was not
* found .
*/
void srv_dump_kws ( char * * out )
{
struct srv_kw_list * kwl ;
int index ;
2020-05-18 06:14:18 -04:00
if ( ! out )
return ;
2012-10-10 02:27:36 -04:00
* out = NULL ;
list_for_each_entry ( kwl , & srv_keywords . list , list ) {
for ( index = 0 ; kwl - > kw [ index ] . kw ! = NULL ; index + + ) {
if ( kwl - > kw [ index ] . parse | |
srv_find_kw ( kwl - > kw [ index ] . kw ) = = & kwl - > kw [ index ] ) {
memprintf ( out , " %s[%4s] %s%s%s%s \n " , * out ? * out : " " ,
kwl - > scope ,
kwl - > kw [ index ] . kw ,
kwl - > kw [ index ] . skip ? " <arg> " : " " ,
kwl - > kw [ index ] . default_ok ? " [dflt_ok] " : " " ,
kwl - > kw [ index ] . parse ? " " : " (not supported) " ) ;
}
}
}
}
2008-01-20 19:54:06 -05:00
2017-03-10 05:51:05 -05:00
/* Parse the "backup" server keyword */
static int srv_parse_backup ( char * * args , int * cur_arg ,
struct proxy * curproxy , struct server * newsrv , char * * err )
{
newsrv - > flags | = SRV_F_BACKUP ;
return 0 ;
}
2019-05-22 07:44:48 -04:00
2017-03-15 04:13:33 -04:00
/* Parse the "cookie" server keyword */
static int srv_parse_cookie ( char * * args , int * cur_arg ,
struct proxy * curproxy , struct server * newsrv , char * * err )
{
char * arg ;
arg = args [ * cur_arg + 1 ] ;
if ( ! * arg ) {
memprintf ( err , " '%s' expects <value> as argument. \n " , args [ * cur_arg ] ) ;
return ERR_ALERT | ERR_FATAL ;
}
free ( newsrv - > cookie ) ;
newsrv - > cookie = strdup ( arg ) ;
newsrv - > cklen = strlen ( arg ) ;
newsrv - > flags | = SRV_F_COOKIESET ;
return 0 ;
}
2017-03-21 06:53:54 -04:00
/* Parse the "disabled" server keyword */
static int srv_parse_disabled ( char * * args , int * cur_arg ,
struct proxy * curproxy , struct server * newsrv , char * * err )
{
2017-08-31 08:41:55 -04:00
newsrv - > next_admin | = SRV_ADMF_CMAINT | SRV_ADMF_FMAINT ;
newsrv - > next_state = SRV_ST_STOPPED ;
2017-03-21 06:53:54 -04:00
newsrv - > check . state | = CHK_ST_PAUSED ;
newsrv - > check . health = 0 ;
return 0 ;
}
/* Parse the "enabled" server keyword */
static int srv_parse_enabled ( char * * args , int * cur_arg ,
struct proxy * curproxy , struct server * newsrv , char * * err )
{
2017-08-31 08:41:55 -04:00
newsrv - > next_admin & = ~ SRV_ADMF_CMAINT & ~ SRV_ADMF_FMAINT ;
newsrv - > next_state = SRV_ST_RUNNING ;
2017-03-21 06:53:54 -04:00
newsrv - > check . state & = ~ CHK_ST_PAUSED ;
newsrv - > check . health = newsrv - > check . rise ;
return 0 ;
}
2019-01-23 04:21:49 -05:00
static int srv_parse_max_reuse ( char * * args , int * cur_arg , struct proxy * curproxy , struct server * newsrv , char * * err )
{
char * arg ;
arg = args [ * cur_arg + 1 ] ;
if ( ! * arg ) {
memprintf ( err , " '%s' expects <value> as argument. \n " , args [ * cur_arg ] ) ;
return ERR_ALERT | ERR_FATAL ;
}
newsrv - > max_reuse = atoi ( arg ) ;
return 0 ;
}
2018-12-14 12:15:36 -05:00
static int srv_parse_pool_purge_delay ( char * * args , int * cur_arg , struct proxy * curproxy , struct server * newsrv , char * * err )
2018-12-02 08:11:41 -05:00
{
const char * res ;
char * arg ;
unsigned int time ;
arg = args [ * cur_arg + 1 ] ;
if ( ! * arg ) {
memprintf ( err , " '%s' expects <value> as argument. \n " , args [ * cur_arg ] ) ;
return ERR_ALERT | ERR_FATAL ;
}
res = parse_time_err ( arg , & time , TIME_UNIT_MS ) ;
2019-06-07 13:00:37 -04:00
if ( res = = PARSE_TIME_OVER ) {
memprintf ( err , " timer overflow in argument '%s' to '%s' (maximum value is 2147483647 ms or ~24.8 days) " ,
args [ * cur_arg + 1 ] , args [ * cur_arg ] ) ;
return ERR_ALERT | ERR_FATAL ;
}
else if ( res = = PARSE_TIME_UNDER ) {
memprintf ( err , " timer underflow in argument '%s' to '%s' (minimum non-null value is 1 ms) " ,
args [ * cur_arg + 1 ] , args [ * cur_arg ] ) ;
return ERR_ALERT | ERR_FATAL ;
}
else if ( res ) {
2018-12-02 08:11:41 -05:00
memprintf ( err , " unexpected character '%c' in argument to <%s>. \n " ,
* res , args [ * cur_arg ] ) ;
return ERR_ALERT | ERR_FATAL ;
}
2018-12-14 12:15:36 -05:00
newsrv - > pool_purge_delay = time ;
2018-12-02 08:11:41 -05:00
return 0 ;
}
2018-12-10 12:30:32 -05:00
static int srv_parse_pool_max_conn ( char * * args , int * cur_arg , struct proxy * curproxy , struct server * newsrv , char * * err )
{
char * arg ;
arg = args [ * cur_arg + 1 ] ;
if ( ! * arg ) {
memprintf ( err , " '%s' expects <value> as argument. \n " , args [ * cur_arg ] ) ;
return ERR_ALERT | ERR_FATAL ;
}
2019-01-23 04:39:27 -05:00
2018-12-10 12:30:32 -05:00
newsrv - > max_idle_conns = atoi ( arg ) ;
2019-01-23 04:39:27 -05:00
if ( ( int ) newsrv - > max_idle_conns < - 1 ) {
memprintf ( err , " '%s' must be >= -1 " , args [ * cur_arg ] ) ;
return ERR_ALERT | ERR_FATAL ;
}
2018-12-10 12:30:32 -05:00
return 0 ;
}
2012-10-10 11:51:05 -04:00
/* parse the "id" server keyword */
static int srv_parse_id ( char * * args , int * cur_arg , struct proxy * curproxy , struct server * newsrv , char * * err )
{
struct eb32_node * node ;
if ( ! * args [ * cur_arg + 1 ] ) {
memprintf ( err , " '%s' : expects an integer argument " , args [ * cur_arg ] ) ;
return ERR_ALERT | ERR_FATAL ;
}
newsrv - > puid = atol ( args [ * cur_arg + 1 ] ) ;
newsrv - > conf . id . key = newsrv - > puid ;
if ( newsrv - > puid < = 0 ) {
memprintf ( err , " '%s' : custom id has to be > 0 " , args [ * cur_arg ] ) ;
return ERR_ALERT | ERR_FATAL ;
}
node = eb32_lookup ( & curproxy - > conf . used_server_id , newsrv - > puid ) ;
if ( node ) {
struct server * target = container_of ( node , struct server , conf . id ) ;
memprintf ( err , " '%s' : custom id %d already used at %s:%d ('server %s') " ,
args [ * cur_arg ] , newsrv - > puid , target - > conf . file , target - > conf . line ,
target - > id ) ;
return ERR_ALERT | ERR_FATAL ;
}
eb32_insert ( & curproxy - > conf . used_server_id , & newsrv - > conf . id ) ;
2015-07-07 16:02:20 -04:00
newsrv - > flags | = SRV_F_FORCED_ID ;
2012-10-10 11:51:05 -04:00
return 0 ;
}
2017-03-16 12:17:36 -04:00
/* Parse the "namespace" server keyword */
static int srv_parse_namespace ( char * * args , int * cur_arg ,
struct proxy * curproxy , struct server * newsrv , char * * err )
{
2019-05-22 13:24:06 -04:00
# ifdef USE_NS
2017-03-16 12:17:36 -04:00
char * arg ;
arg = args [ * cur_arg + 1 ] ;
if ( ! * arg ) {
memprintf ( err , " '%s' : expects <name> as argument " , args [ * cur_arg ] ) ;
return ERR_ALERT | ERR_FATAL ;
}
if ( ! strcmp ( arg , " * " ) ) {
/* Use the namespace associated with the connection (if present). */
newsrv - > flags | = SRV_F_USE_NS_FROM_PP ;
return 0 ;
}
/*
* As this parser may be called several times for the same ' default - server '
* object , or for a new ' server ' instance deriving from a ' default - server '
* one with SRV_F_USE_NS_FROM_PP flag enabled , let ' s reset it .
*/
newsrv - > flags & = ~ SRV_F_USE_NS_FROM_PP ;
newsrv - > netns = netns_store_lookup ( arg , strlen ( arg ) ) ;
if ( ! newsrv - > netns )
newsrv - > netns = netns_store_insert ( arg ) ;
if ( ! newsrv - > netns ) {
memprintf ( err , " Cannot open namespace '%s' " , arg ) ;
return ERR_ALERT | ERR_FATAL ;
}
return 0 ;
# else
memprintf ( err , " '%s': '%s' option not implemented " , args [ 0 ] , args [ * cur_arg ] ) ;
return ERR_ALERT | ERR_FATAL ;
# endif
}
2017-03-10 05:51:05 -05:00
/* Parse the "no-backup" server keyword */
static int srv_parse_no_backup ( char * * args , int * cur_arg ,
struct proxy * curproxy , struct server * newsrv , char * * err )
{
newsrv - > flags & = ~ SRV_F_BACKUP ;
return 0 ;
}
2017-03-10 08:04:31 -05:00
2017-03-10 10:40:00 -05:00
/* Disable server PROXY protocol flags. */
2019-04-15 15:25:03 -04:00
static inline int srv_disable_pp_flags ( struct server * srv , unsigned int flags )
2017-03-10 10:40:00 -05:00
{
srv - > pp_opts & = ~ flags ;
return 0 ;
}
/* Parse the "no-send-proxy" server keyword */
static int srv_parse_no_send_proxy ( char * * args , int * cur_arg ,
struct proxy * curproxy , struct server * newsrv , char * * err )
{
return srv_disable_pp_flags ( newsrv , SRV_PP_V1 ) ;
}
/* Parse the "no-send-proxy-v2" server keyword */
static int srv_parse_no_send_proxy_v2 ( char * * args , int * cur_arg ,
struct proxy * curproxy , struct server * newsrv , char * * err )
{
return srv_disable_pp_flags ( newsrv , SRV_PP_V2 ) ;
}
2019-07-04 08:19:06 -04:00
/* Parse the "no-tfo" server keyword */
static int srv_parse_no_tfo ( char * * args , int * cur_arg ,
struct proxy * curproxy , struct server * newsrv , char * * err )
{
newsrv - > flags & = ~ SRV_F_FASTOPEN ;
return 0 ;
}
2017-03-10 09:50:49 -05:00
/* Parse the "non-stick" server keyword */
static int srv_parse_non_stick ( char * * args , int * cur_arg ,
struct proxy * curproxy , struct server * newsrv , char * * err )
{
newsrv - > flags | = SRV_F_NON_STICK ;
return 0 ;
}
2017-03-10 10:40:00 -05:00
/* Enable server PROXY protocol flags. */
2019-04-15 15:25:03 -04:00
static inline int srv_enable_pp_flags ( struct server * srv , unsigned int flags )
2017-03-10 10:40:00 -05:00
{
srv - > pp_opts | = flags ;
return 0 ;
}
2018-04-10 08:45:45 -04:00
/* parse the "proto" server keyword */
static int srv_parse_proto ( char * * args , int * cur_arg ,
struct proxy * px , struct server * newsrv , char * * err )
{
struct ist proto ;
if ( ! * args [ * cur_arg + 1 ] ) {
memprintf ( err , " '%s' : missing value " , args [ * cur_arg ] ) ;
return ERR_ALERT | ERR_FATAL ;
}
proto = ist2 ( args [ * cur_arg + 1 ] , strlen ( args [ * cur_arg + 1 ] ) ) ;
newsrv - > mux_proto = get_mux_proto ( proto ) ;
if ( ! newsrv - > mux_proto ) {
memprintf ( err , " '%s' : unknown MUX protocol '%s' " , args [ * cur_arg ] , args [ * cur_arg + 1 ] ) ;
return ERR_ALERT | ERR_FATAL ;
}
return 0 ;
}
2017-03-10 10:40:00 -05:00
2018-02-01 09:20:32 -05:00
/* parse the "proxy-v2-options" */
static int srv_parse_proxy_v2_options ( char * * args , int * cur_arg ,
struct proxy * px , struct server * newsrv , char * * err )
{
char * p , * n ;
for ( p = args [ * cur_arg + 1 ] ; p ; p = n ) {
n = strchr ( p , ' , ' ) ;
if ( n )
* n + + = ' \0 ' ;
if ( ! strcmp ( p , " ssl " ) ) {
newsrv - > pp_opts | = SRV_PP_V2_SSL ;
} else if ( ! strcmp ( p , " cert-cn " ) ) {
newsrv - > pp_opts | = SRV_PP_V2_SSL ;
newsrv - > pp_opts | = SRV_PP_V2_SSL_CN ;
2018-02-01 09:53:52 -05:00
} else if ( ! strcmp ( p , " cert-key " ) ) {
newsrv - > pp_opts | = SRV_PP_V2_SSL ;
newsrv - > pp_opts | = SRV_PP_V2_SSL_KEY_ALG ;
} else if ( ! strcmp ( p , " cert-sig " ) ) {
newsrv - > pp_opts | = SRV_PP_V2_SSL ;
newsrv - > pp_opts | = SRV_PP_V2_SSL_SIG_ALG ;
} else if ( ! strcmp ( p , " ssl-cipher " ) ) {
newsrv - > pp_opts | = SRV_PP_V2_SSL ;
newsrv - > pp_opts | = SRV_PP_V2_SSL_CIPHER ;
2018-02-01 12:29:59 -05:00
} else if ( ! strcmp ( p , " authority " ) ) {
newsrv - > pp_opts | = SRV_PP_V2_AUTHORITY ;
2018-02-05 09:26:43 -05:00
} else if ( ! strcmp ( p , " crc32c " ) ) {
newsrv - > pp_opts | = SRV_PP_V2_CRC32C ;
2020-03-13 07:34:24 -04:00
} else if ( ! strcmp ( p , " unique-id " ) ) {
newsrv - > pp_opts | = SRV_PP_V2_UNIQUE_ID ;
2018-02-01 09:20:32 -05:00
} else
goto fail ;
}
return 0 ;
fail :
if ( err )
memprintf ( err , " '%s' : proxy v2 option not implemented " , p ) ;
return ERR_ALERT | ERR_FATAL ;
}
2017-03-15 03:55:39 -04:00
/* Parse the "observe" server keyword */
static int srv_parse_observe ( char * * args , int * cur_arg ,
struct proxy * curproxy , struct server * newsrv , char * * err )
{
char * arg ;
arg = args [ * cur_arg + 1 ] ;
if ( ! * arg ) {
memprintf ( err , " '%s' expects <mode> as argument. \n " , args [ * cur_arg ] ) ;
return ERR_ALERT | ERR_FATAL ;
}
if ( ! strcmp ( arg , " none " ) ) {
newsrv - > observe = HANA_OBS_NONE ;
}
else if ( ! strcmp ( arg , " layer4 " ) ) {
newsrv - > observe = HANA_OBS_LAYER4 ;
}
else if ( ! strcmp ( arg , " layer7 " ) ) {
if ( curproxy - > mode ! = PR_MODE_HTTP ) {
memprintf ( err , " '%s' can only be used in http proxies. \n " , arg ) ;
return ERR_ALERT ;
}
newsrv - > observe = HANA_OBS_LAYER7 ;
}
else {
memprintf ( err , " '%s' expects one of 'none', 'layer4', 'layer7' "
" but got '%s' \n " , args [ * cur_arg ] , arg ) ;
return ERR_ALERT | ERR_FATAL ;
}
return 0 ;
}
2017-03-14 11:42:49 -04:00
/* Parse the "redir" server keyword */
static int srv_parse_redir ( char * * args , int * cur_arg ,
struct proxy * curproxy , struct server * newsrv , char * * err )
{
char * arg ;
arg = args [ * cur_arg + 1 ] ;
if ( ! * arg ) {
memprintf ( err , " '%s' expects <prefix> as argument. \n " , args [ * cur_arg ] ) ;
return ERR_ALERT | ERR_FATAL ;
}
free ( newsrv - > rdr_pfx ) ;
newsrv - > rdr_pfx = strdup ( arg ) ;
newsrv - > rdr_len = strlen ( arg ) ;
return 0 ;
}
2017-03-10 10:40:00 -05:00
/* Parse the "send-proxy" server keyword */
static int srv_parse_send_proxy ( char * * args , int * cur_arg ,
struct proxy * curproxy , struct server * newsrv , char * * err )
{
return srv_enable_pp_flags ( newsrv , SRV_PP_V1 ) ;
}
/* Parse the "send-proxy-v2" server keyword */
static int srv_parse_send_proxy_v2 ( char * * args , int * cur_arg ,
struct proxy * curproxy , struct server * newsrv , char * * err )
{
return srv_enable_pp_flags ( newsrv , SRV_PP_V2 ) ;
}
2017-03-17 10:33:50 -04:00
/* Parse the "source" server keyword */
static int srv_parse_source ( char * * args , int * cur_arg ,
struct proxy * curproxy , struct server * newsrv , char * * err )
{
char * errmsg ;
int port_low , port_high ;
struct sockaddr_storage * sk ;
struct protocol * proto ;
errmsg = NULL ;
if ( ! * args [ * cur_arg + 1 ] ) {
memprintf ( err , " '%s' expects <addr>[:<port>[-<port>]], and optionally '%s' <addr>, "
" and '%s' <name> as argument. \n " , args [ * cur_arg ] , " usesrc " , " interface " ) ;
goto err ;
}
/* 'sk' is statically allocated (no need to be freed). */
sk = str2sa_range ( args [ * cur_arg + 1 ] , NULL , & port_low , & port_high , & errmsg , NULL , NULL , 1 ) ;
if ( ! sk ) {
memprintf ( err , " '%s %s' : %s \n " , args [ * cur_arg ] , args [ * cur_arg + 1 ] , errmsg ) ;
goto err ;
}
proto = protocol_by_family ( sk - > ss_family ) ;
if ( ! proto | | ! proto - > connect ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " '%s %s' : connect() not supported for this address family. \n " ,
args [ * cur_arg ] , args [ * cur_arg + 1 ] ) ;
2017-03-17 10:33:50 -04:00
goto err ;
}
newsrv - > conn_src . opts | = CO_SRC_BIND ;
newsrv - > conn_src . source_addr = * sk ;
if ( port_low ! = port_high ) {
int i ;
if ( ! port_low | | ! port_high ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " '%s' does not support port offsets (found '%s'). \n " ,
args [ * cur_arg ] , args [ * cur_arg + 1 ] ) ;
2017-03-17 10:33:50 -04:00
goto err ;
}
if ( port_low < = 0 | | port_low > 65535 | |
port_high < = 0 | | port_high > 65535 | |
port_low > port_high ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " '%s': invalid source port range %d-%d. \n " , args [ * cur_arg ] , port_low , port_high ) ;
2017-03-17 10:33:50 -04:00
goto err ;
}
newsrv - > conn_src . sport_range = port_range_alloc_range ( port_high - port_low + 1 ) ;
for ( i = 0 ; i < newsrv - > conn_src . sport_range - > size ; i + + )
newsrv - > conn_src . sport_range - > ports [ i ] = port_low + i ;
}
* cur_arg + = 2 ;
while ( * ( args [ * cur_arg ] ) ) {
if ( ! strcmp ( args [ * cur_arg ] , " usesrc " ) ) { /* address to use outside */
# if defined(CONFIG_HAP_TRANSPARENT)
if ( ! * args [ * cur_arg + 1 ] ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " 'usesrc' expects <addr>[:<port>], 'client', 'clientip', "
" or 'hdr_ip(name,#)' as argument. \n " ) ;
2017-03-17 10:33:50 -04:00
goto err ;
}
if ( ! strcmp ( args [ * cur_arg + 1 ] , " client " ) ) {
newsrv - > conn_src . opts & = ~ CO_SRC_TPROXY_MASK ;
newsrv - > conn_src . opts | = CO_SRC_TPROXY_CLI ;
}
else if ( ! strcmp ( args [ * cur_arg + 1 ] , " clientip " ) ) {
newsrv - > conn_src . opts & = ~ CO_SRC_TPROXY_MASK ;
newsrv - > conn_src . opts | = CO_SRC_TPROXY_CIP ;
}
else if ( ! strncmp ( args [ * cur_arg + 1 ] , " hdr_ip( " , 7 ) ) {
char * name , * end ;
name = args [ * cur_arg + 1 ] + 7 ;
2020-02-25 02:16:33 -05:00
while ( isspace ( ( unsigned char ) * name ) )
2017-03-17 10:33:50 -04:00
name + + ;
end = name ;
2020-02-25 02:16:33 -05:00
while ( * end & & ! isspace ( ( unsigned char ) * end ) & & * end ! = ' , ' & & * end ! = ' ) ' )
2017-03-17 10:33:50 -04:00
end + + ;
newsrv - > conn_src . opts & = ~ CO_SRC_TPROXY_MASK ;
newsrv - > conn_src . opts | = CO_SRC_TPROXY_DYN ;
free ( newsrv - > conn_src . bind_hdr_name ) ;
newsrv - > conn_src . bind_hdr_name = calloc ( 1 , end - name + 1 ) ;
newsrv - > conn_src . bind_hdr_len = end - name ;
memcpy ( newsrv - > conn_src . bind_hdr_name , name , end - name ) ;
newsrv - > conn_src . bind_hdr_name [ end - name ] = ' \0 ' ;
newsrv - > conn_src . bind_hdr_occ = - 1 ;
/* now look for an occurrence number */
2020-02-25 02:16:33 -05:00
while ( isspace ( ( unsigned char ) * end ) )
2017-03-17 10:33:50 -04:00
end + + ;
if ( * end = = ' , ' ) {
end + + ;
name = end ;
if ( * end = = ' - ' )
end + + ;
2020-02-25 02:16:33 -05:00
while ( isdigit ( ( unsigned char ) * end ) )
2017-03-17 10:33:50 -04:00
end + + ;
newsrv - > conn_src . bind_hdr_occ = strl2ic ( name , end - name ) ;
}
if ( newsrv - > conn_src . bind_hdr_occ < - MAX_HDR_HISTORY ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " usesrc hdr_ip(name,num) does not support negative "
" occurrences values smaller than %d. \n " , MAX_HDR_HISTORY ) ;
2017-03-17 10:33:50 -04:00
goto err ;
}
}
else {
struct sockaddr_storage * sk ;
int port1 , port2 ;
/* 'sk' is statically allocated (no need to be freed). */
sk = str2sa_range ( args [ * cur_arg + 1 ] , NULL , & port1 , & port2 , & errmsg , NULL , NULL , 1 ) ;
if ( ! sk ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " '%s %s' : %s \n " , args [ * cur_arg ] , args [ * cur_arg + 1 ] , errmsg ) ;
2017-03-17 10:33:50 -04:00
goto err ;
}
proto = protocol_by_family ( sk - > ss_family ) ;
if ( ! proto | | ! proto - > connect ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " '%s %s' : connect() not supported for this address family. \n " ,
args [ * cur_arg ] , args [ * cur_arg + 1 ] ) ;
2017-03-17 10:33:50 -04:00
goto err ;
}
if ( port1 ! = port2 ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " '%s' : port ranges and offsets are not allowed in '%s' \n " ,
args [ * cur_arg ] , args [ * cur_arg + 1 ] ) ;
2017-03-17 10:33:50 -04:00
goto err ;
}
newsrv - > conn_src . tproxy_addr = * sk ;
newsrv - > conn_src . opts | = CO_SRC_TPROXY_ADDR ;
}
global . last_checks | = LSTCHK_NETADM ;
* cur_arg + = 2 ;
continue ;
# else /* no TPROXY support */
2017-11-24 10:50:31 -05:00
ha_alert ( " 'usesrc' not allowed here because support for TPROXY was not compiled in. \n " ) ;
2017-03-17 10:33:50 -04:00
goto err ;
# endif /* defined(CONFIG_HAP_TRANSPARENT) */
} /* "usesrc" */
if ( ! strcmp ( args [ * cur_arg ] , " interface " ) ) { /* specifically bind to this interface */
# ifdef SO_BINDTODEVICE
if ( ! * args [ * cur_arg + 1 ] ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " '%s' : missing interface name. \n " , args [ 0 ] ) ;
2017-03-17 10:33:50 -04:00
goto err ;
}
free ( newsrv - > conn_src . iface_name ) ;
newsrv - > conn_src . iface_name = strdup ( args [ * cur_arg + 1 ] ) ;
newsrv - > conn_src . iface_len = strlen ( newsrv - > conn_src . iface_name ) ;
global . last_checks | = LSTCHK_NETADM ;
# else
2017-11-24 10:50:31 -05:00
ha_alert ( " '%s' : '%s' option not implemented. \n " , args [ 0 ] , args [ * cur_arg ] ) ;
2017-03-17 10:33:50 -04:00
goto err ;
# endif
* cur_arg + = 2 ;
continue ;
}
/* this keyword in not an option of "source" */
break ;
} /* while */
return 0 ;
err :
free ( errmsg ) ;
return ERR_ALERT | ERR_FATAL ;
}
2017-03-10 09:50:49 -05:00
/* Parse the "stick" server keyword */
static int srv_parse_stick ( char * * args , int * cur_arg ,
struct proxy * curproxy , struct server * newsrv , char * * err )
{
newsrv - > flags & = ~ SRV_F_NON_STICK ;
return 0 ;
}
2017-03-14 10:21:31 -04:00
/* Parse the "track" server keyword */
static int srv_parse_track ( char * * args , int * cur_arg ,
struct proxy * curproxy , struct server * newsrv , char * * err )
{
char * arg ;
arg = args [ * cur_arg + 1 ] ;
if ( ! * arg ) {
memprintf ( err , " 'track' expects [<proxy>/]<server> as argument. \n " ) ;
return ERR_ALERT | ERR_FATAL ;
}
free ( newsrv - > trackit ) ;
newsrv - > trackit = strdup ( arg ) ;
return 0 ;
}
2019-05-22 07:44:48 -04:00
/* Parse the "socks4" server keyword */
static int srv_parse_socks4 ( char * * args , int * cur_arg ,
struct proxy * curproxy , struct server * newsrv , char * * err )
{
char * errmsg ;
int port_low , port_high ;
struct sockaddr_storage * sk ;
struct protocol * proto ;
errmsg = NULL ;
if ( ! * args [ * cur_arg + 1 ] ) {
memprintf ( err , " '%s' expects <addr>:<port> as argument. \n " , args [ * cur_arg ] ) ;
goto err ;
}
/* 'sk' is statically allocated (no need to be freed). */
sk = str2sa_range ( args [ * cur_arg + 1 ] , NULL , & port_low , & port_high , & errmsg , NULL , NULL , 1 ) ;
if ( ! sk ) {
memprintf ( err , " '%s %s' : %s \n " , args [ * cur_arg ] , args [ * cur_arg + 1 ] , errmsg ) ;
goto err ;
}
proto = protocol_by_family ( sk - > ss_family ) ;
if ( ! proto | | ! proto - > connect ) {
ha_alert ( " '%s %s' : connect() not supported for this address family. \n " , args [ * cur_arg ] , args [ * cur_arg + 1 ] ) ;
goto err ;
}
newsrv - > flags | = SRV_F_SOCKS4_PROXY ;
newsrv - > socks4_addr = * sk ;
if ( port_low ! = port_high ) {
ha_alert ( " '%s' does not support port offsets (found '%s'). \n " , args [ * cur_arg ] , args [ * cur_arg + 1 ] ) ;
goto err ;
}
if ( ! port_low ) {
ha_alert ( " '%s': invalid port range %d-%d. \n " , args [ * cur_arg ] , port_low , port_high ) ;
goto err ;
}
return 0 ;
err :
free ( errmsg ) ;
return ERR_ALERT | ERR_FATAL ;
}
2017-03-17 10:33:50 -04:00
2017-01-23 17:36:45 -05:00
/* parse the "tfo" server keyword */
static int srv_parse_tfo ( char * * args , int * cur_arg , struct proxy * px , struct server * newsrv , char * * err )
{
newsrv - > flags | = SRV_F_FASTOPEN ;
return 0 ;
}
2014-05-16 05:48:10 -04:00
/* Shutdown all connections of a server. The caller must pass a termination
2015-04-02 19:14:29 -04:00
* code in < why > , which must be one of SF_ERR_ * indicating the reason for the
2014-05-16 05:48:10 -04:00
* shutdown .
2018-08-21 05:54:26 -04:00
*
* Must be called with the server lock held .
2014-05-16 05:48:10 -04:00
*/
REORG/MAJOR: session: rename the "session" entity to "stream"
With HTTP/2, we'll have to support multiplexed streams. A stream is in
fact the largest part of what we currently call a session, it has buffers,
logs, etc.
In order to catch any error, this commit removes any reference to the
struct session and tries to rename most "session" occurrences in function
names to "stream" and "sess" to "strm" when that's related to a session.
The files stream.{c,h} were added and session.{c,h} removed.
The session will be reintroduced later and a few parts of the stream
will progressively be moved overthere. It will more or less contain
only what we need in an embryonic session.
Sample fetch functions and converters will have to change a bit so
that they'll use an L5 (session) instead of what's currently called
"L4" which is in fact L6 for now.
Once all changes are completed, we should see approximately this :
L7 - http_txn
L6 - stream
L5 - session
L4 - connection | applet
There will be at most one http_txn per stream, and a same session will
possibly be referenced by multiple streams. A connection will point to
a session and to a stream. The session will hold all the information
we need to keep even when we don't yet have a stream.
Some more cleanup is needed because some code was already far from
being clean. The server queue management still refers to sessions at
many places while comments talk about connections. This will have to
be cleaned up once we have a server-side connection pool manager.
Stream flags "SN_*" still need to be renamed, it doesn't seem like
any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
void srv_shutdown_streams ( struct server * srv , int why )
2014-05-16 05:48:10 -04:00
{
REORG/MAJOR: session: rename the "session" entity to "stream"
With HTTP/2, we'll have to support multiplexed streams. A stream is in
fact the largest part of what we currently call a session, it has buffers,
logs, etc.
In order to catch any error, this commit removes any reference to the
struct session and tries to rename most "session" occurrences in function
names to "stream" and "sess" to "strm" when that's related to a session.
The files stream.{c,h} were added and session.{c,h} removed.
The session will be reintroduced later and a few parts of the stream
will progressively be moved overthere. It will more or less contain
only what we need in an embryonic session.
Sample fetch functions and converters will have to change a bit so
that they'll use an L5 (session) instead of what's currently called
"L4" which is in fact L6 for now.
Once all changes are completed, we should see approximately this :
L7 - http_txn
L6 - stream
L5 - session
L4 - connection | applet
There will be at most one http_txn per stream, and a same session will
possibly be referenced by multiple streams. A connection will point to
a session and to a stream. The session will hold all the information
we need to keep even when we don't yet have a stream.
Some more cleanup is needed because some code was already far from
being clean. The server queue management still refers to sessions at
many places while comments talk about connections. This will have to
be cleaned up once we have a server-side connection pool manager.
Stream flags "SN_*" still need to be renamed, it doesn't seem like
any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
struct stream * stream , * stream_bck ;
2014-05-16 05:48:10 -04:00
REORG/MAJOR: session: rename the "session" entity to "stream"
With HTTP/2, we'll have to support multiplexed streams. A stream is in
fact the largest part of what we currently call a session, it has buffers,
logs, etc.
In order to catch any error, this commit removes any reference to the
struct session and tries to rename most "session" occurrences in function
names to "stream" and "sess" to "strm" when that's related to a session.
The files stream.{c,h} were added and session.{c,h} removed.
The session will be reintroduced later and a few parts of the stream
will progressively be moved overthere. It will more or less contain
only what we need in an embryonic session.
Sample fetch functions and converters will have to change a bit so
that they'll use an L5 (session) instead of what's currently called
"L4" which is in fact L6 for now.
Once all changes are completed, we should see approximately this :
L7 - http_txn
L6 - stream
L5 - session
L4 - connection | applet
There will be at most one http_txn per stream, and a same session will
possibly be referenced by multiple streams. A connection will point to
a session and to a stream. The session will hold all the information
we need to keep even when we don't yet have a stream.
Some more cleanup is needed because some code was already far from
being clean. The server queue management still refers to sessions at
many places while comments talk about connections. This will have to
be cleaned up once we have a server-side connection pool manager.
Stream flags "SN_*" still need to be renamed, it doesn't seem like
any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
list_for_each_entry_safe ( stream , stream_bck , & srv - > actconns , by_srv )
if ( stream - > srv_conn = = srv )
stream_shutdown ( stream , why ) ;
2014-05-16 05:48:10 -04:00
}
/* Shutdown all connections of all backup servers of a proxy. The caller must
2015-04-02 19:14:29 -04:00
* pass a termination code in < why > , which must be one of SF_ERR_ * indicating
2014-05-16 05:48:10 -04:00
* the reason for the shutdown .
2018-08-21 05:54:26 -04:00
*
* Must be called with the server lock held .
2014-05-16 05:48:10 -04:00
*/
REORG/MAJOR: session: rename the "session" entity to "stream"
With HTTP/2, we'll have to support multiplexed streams. A stream is in
fact the largest part of what we currently call a session, it has buffers,
logs, etc.
In order to catch any error, this commit removes any reference to the
struct session and tries to rename most "session" occurrences in function
names to "stream" and "sess" to "strm" when that's related to a session.
The files stream.{c,h} were added and session.{c,h} removed.
The session will be reintroduced later and a few parts of the stream
will progressively be moved overthere. It will more or less contain
only what we need in an embryonic session.
Sample fetch functions and converters will have to change a bit so
that they'll use an L5 (session) instead of what's currently called
"L4" which is in fact L6 for now.
Once all changes are completed, we should see approximately this :
L7 - http_txn
L6 - stream
L5 - session
L4 - connection | applet
There will be at most one http_txn per stream, and a same session will
possibly be referenced by multiple streams. A connection will point to
a session and to a stream. The session will hold all the information
we need to keep even when we don't yet have a stream.
Some more cleanup is needed because some code was already far from
being clean. The server queue management still refers to sessions at
many places while comments talk about connections. This will have to
be cleaned up once we have a server-side connection pool manager.
Stream flags "SN_*" still need to be renamed, it doesn't seem like
any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
void srv_shutdown_backup_streams ( struct proxy * px , int why )
2014-05-16 05:48:10 -04:00
{
struct server * srv ;
for ( srv = px - > srv ; srv ! = NULL ; srv = srv - > next )
if ( srv - > flags & SRV_F_BACKUP )
REORG/MAJOR: session: rename the "session" entity to "stream"
With HTTP/2, we'll have to support multiplexed streams. A stream is in
fact the largest part of what we currently call a session, it has buffers,
logs, etc.
In order to catch any error, this commit removes any reference to the
struct session and tries to rename most "session" occurrences in function
names to "stream" and "sess" to "strm" when that's related to a session.
The files stream.{c,h} were added and session.{c,h} removed.
The session will be reintroduced later and a few parts of the stream
will progressively be moved overthere. It will more or less contain
only what we need in an embryonic session.
Sample fetch functions and converters will have to change a bit so
that they'll use an L5 (session) instead of what's currently called
"L4" which is in fact L6 for now.
Once all changes are completed, we should see approximately this :
L7 - http_txn
L6 - stream
L5 - session
L4 - connection | applet
There will be at most one http_txn per stream, and a same session will
possibly be referenced by multiple streams. A connection will point to
a session and to a stream. The session will hold all the information
we need to keep even when we don't yet have a stream.
Some more cleanup is needed because some code was already far from
being clean. The server queue management still refers to sessions at
many places while comments talk about connections. This will have to
be cleaned up once we have a server-side connection pool manager.
Stream flags "SN_*" still need to be renamed, it doesn't seem like
any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
srv_shutdown_streams ( srv , why ) ;
2014-05-16 05:48:10 -04:00
}
2014-05-20 15:55:30 -04:00
/* Appends some information to a message string related to a server going UP or
* DOWN . If both < forced > and < reason > are null and the server tracks another
* one , a " via " information will be provided to know where the status came from .
2017-10-19 08:42:30 -04:00
* If < check > is non - null , an entire string describing the check result will be
* appended after a comma and a space ( eg : to report some information from the
* check that changed the state ) . In the other case , the string will be built
* using the check results stored into the struct server if present .
* If < xferred > is non - negative , some information about requeued sessions are
2014-05-20 15:55:30 -04:00
* provided .
2018-08-21 05:54:26 -04:00
*
* Must be called with the server lock held .
2014-05-16 05:25:16 -04:00
*/
2018-07-13 05:56:34 -04:00
void srv_append_status ( struct buffer * msg , struct server * s ,
struct check * check , int xferred , int forced )
2014-05-16 05:25:16 -04:00
{
2017-10-19 08:42:30 -04:00
short status = s - > op_st_chg . status ;
short code = s - > op_st_chg . code ;
long duration = s - > op_st_chg . duration ;
char * desc = s - > op_st_chg . reason ;
if ( check ) {
status = check - > status ;
code = check - > code ;
duration = check - > duration ;
desc = check - > desc ;
}
if ( status ! = - 1 ) {
chunk_appendf ( msg , " , reason: %s " , get_check_status_description ( status ) ) ;
if ( status > = HCHK_STATUS_L57DATA )
chunk_appendf ( msg , " , code: %d " , code ) ;
if ( desc & & * desc ) {
2018-07-13 05:56:34 -04:00
struct buffer src ;
2017-10-19 08:42:30 -04:00
chunk_appendf ( msg , " , info: \" " ) ;
chunk_initlen ( & src , desc , 0 , strlen ( desc ) ) ;
chunk_asciiencode ( msg , & src , ' " ' ) ;
chunk_appendf ( msg , " \" " ) ;
}
if ( duration > = 0 )
chunk_appendf ( msg , " , check duration: %ldms " , duration ) ;
}
else if ( desc & & * desc ) {
chunk_appendf ( msg , " , %s " , desc ) ;
}
else if ( ! forced & & s - > track ) {
2014-05-20 15:55:30 -04:00
chunk_appendf ( msg , " via %s/%s " , s - > track - > proxy - > id , s - > track - > id ) ;
2017-10-19 08:42:30 -04:00
}
2014-05-16 05:25:16 -04:00
if ( xferred > = 0 ) {
2017-08-31 08:41:55 -04:00
if ( s - > next_state = = SRV_ST_STOPPED )
2014-05-16 05:25:16 -04:00
chunk_appendf ( msg , " . %d active and %d backup servers left.%s "
" %d sessions active, %d requeued, %d remaining in queue " ,
s - > proxy - > srv_act , s - > proxy - > srv_bck ,
( s - > proxy - > srv_bck & & ! s - > proxy - > srv_act ) ? " Running on backup. " : " " ,
s - > cur_sess , xferred , s - > nbpend ) ;
else
chunk_appendf ( msg , " . %d active and %d backup servers online.%s "
" %d sessions requeued, %d total in queue " ,
s - > proxy - > srv_act , s - > proxy - > srv_bck ,
( s - > proxy - > srv_bck & & ! s - > proxy - > srv_act ) ? " Running on backup. " : " " ,
xferred , s - > nbpend ) ;
}
}
2017-10-19 08:42:30 -04:00
/* Marks server <s> down, regardless of its checks' statuses. The server is
* registered in a list to postpone the counting of the remaining servers on
* the proxy and transfers queued streams whenever possible to other servers at
* a sync point . Maintenance servers are ignored . It stores the < reason > if
* non - null as the reason for going down or the available data from the check
* struct to recompute this reason later .
2018-08-21 05:54:26 -04:00
*
* Must be called with the server lock held .
2014-05-20 16:25:12 -04:00
*/
2017-10-19 08:42:30 -04:00
void srv_set_stopped ( struct server * s , const char * reason , struct check * check )
2014-05-20 16:25:12 -04:00
{
struct server * srv ;
2017-10-03 08:46:45 -04:00
if ( ( s - > cur_admin & SRV_ADMF_MAINT ) | | s - > next_state = = SRV_ST_STOPPED )
2014-05-20 16:25:12 -04:00
return ;
2017-08-31 08:41:55 -04:00
s - > next_state = SRV_ST_STOPPED ;
2017-10-19 08:42:30 -04:00
* s - > op_st_chg . reason = 0 ;
s - > op_st_chg . status = - 1 ;
if ( reason ) {
strlcpy2 ( s - > op_st_chg . reason , reason , sizeof ( s - > op_st_chg . reason ) ) ;
}
else if ( check ) {
2017-11-20 15:33:21 -05:00
strlcpy2 ( s - > op_st_chg . reason , check - > desc , sizeof ( s - > op_st_chg . reason ) ) ;
2017-10-19 08:42:30 -04:00
s - > op_st_chg . code = check - > code ;
s - > op_st_chg . status = check - > status ;
s - > op_st_chg . duration = check - > duration ;
}
2014-05-20 16:25:12 -04:00
2018-08-21 02:22:26 -04:00
/* propagate changes */
srv_update_status ( s ) ;
2017-10-23 08:39:51 -04:00
for ( srv = s - > trackers ; srv ; srv = srv - > tracknext ) {
2017-11-07 04:42:54 -05:00
HA_SPIN_LOCK ( SERVER_LOCK , & srv - > lock ) ;
2017-10-19 08:42:30 -04:00
srv_set_stopped ( srv , NULL , NULL ) ;
2017-11-07 04:42:54 -05:00
HA_SPIN_UNLOCK ( SERVER_LOCK , & srv - > lock ) ;
2017-10-23 08:39:51 -04:00
}
2014-05-20 16:25:12 -04:00
}
2014-05-20 16:46:35 -04:00
/* Marks server <s> up regardless of its checks' statuses and provided it isn't
2017-10-19 08:42:30 -04:00
* in maintenance . The server is registered in a list to postpone the counting
* of the remaining servers on the proxy and tries to grab requests from the
* proxy at a sync point . Maintenance servers are ignored . It stores the
* < reason > if non - null as the reason for going down or the available data
* from the check struct to recompute this reason later .
2018-08-21 05:54:26 -04:00
*
* Must be called with the server lock held .
2014-05-20 16:46:35 -04:00
*/
2017-10-19 08:42:30 -04:00
void srv_set_running ( struct server * s , const char * reason , struct check * check )
2014-05-20 16:46:35 -04:00
{
struct server * srv ;
2017-10-03 08:46:45 -04:00
if ( s - > cur_admin & SRV_ADMF_MAINT )
2014-05-20 16:46:35 -04:00
return ;
2017-08-31 08:41:55 -04:00
if ( s - > next_state = = SRV_ST_STARTING | | s - > next_state = = SRV_ST_RUNNING )
2014-05-20 16:46:35 -04:00
return ;
2017-08-31 08:41:55 -04:00
s - > next_state = SRV_ST_STARTING ;
2017-10-19 08:42:30 -04:00
* s - > op_st_chg . reason = 0 ;
s - > op_st_chg . status = - 1 ;
if ( reason ) {
strlcpy2 ( s - > op_st_chg . reason , reason , sizeof ( s - > op_st_chg . reason ) ) ;
}
else if ( check ) {
2017-11-20 15:33:21 -05:00
strlcpy2 ( s - > op_st_chg . reason , check - > desc , sizeof ( s - > op_st_chg . reason ) ) ;
2017-10-19 08:42:30 -04:00
s - > op_st_chg . code = check - > code ;
s - > op_st_chg . status = check - > status ;
s - > op_st_chg . duration = check - > duration ;
}
2014-05-20 16:46:35 -04:00
2017-10-03 08:46:45 -04:00
if ( s - > slowstart < = 0 )
s - > next_state = SRV_ST_RUNNING ;
2014-05-20 16:46:35 -04:00
2018-08-21 02:22:26 -04:00
/* propagate changes */
srv_update_status ( s ) ;
2017-10-23 08:39:51 -04:00
for ( srv = s - > trackers ; srv ; srv = srv - > tracknext ) {
2017-11-07 04:42:54 -05:00
HA_SPIN_LOCK ( SERVER_LOCK , & srv - > lock ) ;
2017-10-19 08:42:30 -04:00
srv_set_running ( srv , NULL , NULL ) ;
2017-11-07 04:42:54 -05:00
HA_SPIN_UNLOCK ( SERVER_LOCK , & srv - > lock ) ;
2017-10-23 08:39:51 -04:00
}
2014-05-20 16:46:35 -04:00
}
2014-05-21 07:54:57 -04:00
/* Marks server <s> stopping regardless of its checks' statuses and provided it
2017-10-19 08:42:30 -04:00
* isn ' t in maintenance . The server is registered in a list to postpone the
* counting of the remaining servers on the proxy and tries to grab requests
* from the proxy . Maintenance servers are ignored . It stores the
* < reason > if non - null as the reason for going down or the available data
* from the check struct to recompute this reason later .
2014-05-21 07:54:57 -04:00
* up . Note that it makes use of the trash to build the log strings , so < reason >
* must not be placed there .
2018-08-21 05:54:26 -04:00
*
* Must be called with the server lock held .
2014-05-21 07:54:57 -04:00
*/
2017-10-19 08:42:30 -04:00
void srv_set_stopping ( struct server * s , const char * reason , struct check * check )
2014-05-21 07:54:57 -04:00
{
struct server * srv ;
2017-10-03 08:46:45 -04:00
if ( s - > cur_admin & SRV_ADMF_MAINT )
2014-05-21 07:54:57 -04:00
return ;
2017-08-31 08:41:55 -04:00
if ( s - > next_state = = SRV_ST_STOPPING )
2014-05-21 07:54:57 -04:00
return ;
2017-08-31 08:41:55 -04:00
s - > next_state = SRV_ST_STOPPING ;
2017-10-19 08:42:30 -04:00
* s - > op_st_chg . reason = 0 ;
s - > op_st_chg . status = - 1 ;
if ( reason ) {
strlcpy2 ( s - > op_st_chg . reason , reason , sizeof ( s - > op_st_chg . reason ) ) ;
}
else if ( check ) {
2017-11-20 15:33:21 -05:00
strlcpy2 ( s - > op_st_chg . reason , check - > desc , sizeof ( s - > op_st_chg . reason ) ) ;
2017-10-19 08:42:30 -04:00
s - > op_st_chg . code = check - > code ;
s - > op_st_chg . status = check - > status ;
s - > op_st_chg . duration = check - > duration ;
}
2014-05-21 07:54:57 -04:00
2018-08-21 02:22:26 -04:00
/* propagate changes */
srv_update_status ( s ) ;
2017-10-23 08:39:51 -04:00
for ( srv = s - > trackers ; srv ; srv = srv - > tracknext ) {
2017-11-07 04:42:54 -05:00
HA_SPIN_LOCK ( SERVER_LOCK , & srv - > lock ) ;
2017-10-19 08:42:30 -04:00
srv_set_stopping ( srv , NULL , NULL ) ;
2018-01-24 15:49:41 -05:00
HA_SPIN_UNLOCK ( SERVER_LOCK , & srv - > lock ) ;
2017-10-23 08:39:51 -04:00
}
2014-05-21 07:54:57 -04:00
}
2014-05-20 16:46:35 -04:00
2014-05-22 10:14:34 -04:00
/* Enables admin flag <mode> (among SRV_ADMF_*) on server <s>. This is used to
* enforce either maint mode or drain mode . It is not allowed to set more than
* one flag at once . The equivalent " inherited " flag is propagated to all
* tracking servers . Maintenance mode disables health checks ( but not agent
* checks ) . When either the flag is already set or no flag is passed , nothing
2016-11-07 09:53:43 -05:00
* is done . If < cause > is non - null , it will be displayed at the end of the log
* lines to justify the state change .
2018-08-21 05:54:26 -04:00
*
* Must be called with the server lock held .
2014-05-16 05:25:16 -04:00
*/
2016-11-07 09:53:43 -05:00
void srv_set_admin_flag ( struct server * s , enum srv_admin mode , const char * cause )
2014-05-16 05:25:16 -04:00
{
struct server * srv ;
if ( ! mode )
return ;
/* stop going down as soon as we meet a server already in the same state */
2017-08-31 08:41:55 -04:00
if ( s - > next_admin & mode )
2014-05-16 05:25:16 -04:00
return ;
2017-08-31 08:41:55 -04:00
s - > next_admin | = mode ;
2017-10-03 08:46:45 -04:00
if ( cause )
strlcpy2 ( s - > adm_st_chg_cause , cause , sizeof ( s - > adm_st_chg_cause ) ) ;
2018-08-21 02:22:26 -04:00
/* propagate changes */
srv_update_status ( s ) ;
2014-05-22 10:14:34 -04:00
/* stop going down if the equivalent flag was already present (forced or inherited) */
2017-08-31 08:41:55 -04:00
if ( ( ( mode & SRV_ADMF_MAINT ) & & ( s - > next_admin & ~ mode & SRV_ADMF_MAINT ) ) | |
( ( mode & SRV_ADMF_DRAIN ) & & ( s - > next_admin & ~ mode & SRV_ADMF_DRAIN ) ) )
2018-08-21 02:22:26 -04:00
return ;
2014-05-22 10:14:34 -04:00
/* compute the inherited flag to propagate */
if ( mode & SRV_ADMF_MAINT )
mode = SRV_ADMF_IMAINT ;
else if ( mode & SRV_ADMF_DRAIN )
mode = SRV_ADMF_IDRAIN ;
2017-10-23 08:39:51 -04:00
for ( srv = s - > trackers ; srv ; srv = srv - > tracknext ) {
2017-11-07 04:42:54 -05:00
HA_SPIN_LOCK ( SERVER_LOCK , & srv - > lock ) ;
2016-11-07 09:53:43 -05:00
srv_set_admin_flag ( srv , mode , cause ) ;
2018-01-24 15:49:41 -05:00
HA_SPIN_UNLOCK ( SERVER_LOCK , & srv - > lock ) ;
2017-10-23 08:39:51 -04:00
}
2014-05-16 05:25:16 -04:00
}
2014-05-22 10:14:34 -04:00
/* Disables admin flag <mode> (among SRV_ADMF_*) on server <s>. This is used to
* stop enforcing either maint mode or drain mode . It is not allowed to set more
* than one flag at once . The equivalent " inherited " flag is propagated to all
* tracking servers . Leaving maintenance mode re - enables health checks . When
* either the flag is already cleared or no flag is passed , nothing is done .
2018-08-21 05:54:26 -04:00
*
* Must be called with the server lock held .
2014-05-16 05:25:16 -04:00
*/
2014-05-22 10:14:34 -04:00
void srv_clr_admin_flag ( struct server * s , enum srv_admin mode )
2014-05-16 05:25:16 -04:00
{
struct server * srv ;
if ( ! mode )
return ;
/* stop going down as soon as we see the flag is not there anymore */
2017-08-31 08:41:55 -04:00
if ( ! ( s - > next_admin & mode ) )
2014-05-16 05:25:16 -04:00
return ;
2017-08-31 08:41:55 -04:00
s - > next_admin & = ~ mode ;
2014-05-16 05:25:16 -04:00
2018-08-21 02:22:26 -04:00
/* propagate changes */
srv_update_status ( s ) ;
2014-05-22 10:14:34 -04:00
/* stop going down if the equivalent flag is still present (forced or inherited) */
2017-08-31 08:41:55 -04:00
if ( ( ( mode & SRV_ADMF_MAINT ) & & ( s - > next_admin & SRV_ADMF_MAINT ) ) | |
( ( mode & SRV_ADMF_DRAIN ) & & ( s - > next_admin & SRV_ADMF_DRAIN ) ) )
2018-08-21 02:22:26 -04:00
return ;
2014-05-16 05:25:16 -04:00
2014-05-22 10:14:34 -04:00
if ( mode & SRV_ADMF_MAINT )
mode = SRV_ADMF_IMAINT ;
else if ( mode & SRV_ADMF_DRAIN )
mode = SRV_ADMF_IDRAIN ;
2014-05-16 05:25:16 -04:00
2017-10-23 08:39:51 -04:00
for ( srv = s - > trackers ; srv ; srv = srv - > tracknext ) {
2017-11-07 04:42:54 -05:00
HA_SPIN_LOCK ( SERVER_LOCK , & srv - > lock ) ;
2014-05-22 10:14:34 -04:00
srv_clr_admin_flag ( srv , mode ) ;
2017-11-07 04:42:54 -05:00
HA_SPIN_UNLOCK ( SERVER_LOCK , & srv - > lock ) ;
2017-10-23 08:39:51 -04:00
}
2014-05-16 05:25:16 -04:00
}
BUG/MEDIUM: servers: properly propagate the maintenance states during startup
Right now there is an issue with the way the maintenance flags are
propagated upon startup. They are not propagate, just copied from the
tracked server. This implies that depending on the server's order, some
tracking servers may not be marked down. For example this configuration
does not work as expected :
server s1 1.1.1.1:8000 track s2
server s2 1.1.1.1:8000 track s3
server s3 1.1.1.1:8000 track s4
server s4 wtap:8000 check inter 1s disabled
It results in s1/s2 being up, and s3/s4 being down, while all of them
should be down.
The only clean way to process this is to run through all "root" servers
(those not tracking any other server), and to propagate their state down
to all their trackers. This is the same algorithm used to propagate the
state changes. It has to be done both to compute the IDRAIN flag and the
IMAINT flag. However, doing so requires that tracking servers are not
marked as inherited maintenance anymore while parsing the configuration
(and given that it is wrong, better drop it).
This fix also addresses another side effect of the bug above which is
that the IDRAIN/IMAINT flags are stored in the state files, and if
restored while the tracked server doesn't have the equivalent flag,
the servers may end up in a situation where it's impossible to remove
these flags. For example in the configuration above, after removing
"disabled" on server s4, the other servers would have remained down,
and not anymore with this fix. Similarly, the combination of IMAINT
or IDRAIN with their respective forced modes was not accepted on
reload, which is wrong as well.
This bug has been present at least since 1.5, maybe even 1.4 (it came
with tracking support). The fix needs to be backported there, though
the srv-state parts are irrelevant.
This commit relies on previous patch to silence warnings on startup.
2016-11-03 14:22:19 -04:00
/* principle: propagate maint and drain to tracking servers. This is useful
* upon startup so that inherited states are correct .
*/
static void srv_propagate_admin_state ( struct server * srv )
{
struct server * srv2 ;
if ( ! srv - > trackers )
return ;
for ( srv2 = srv - > trackers ; srv2 ; srv2 = srv2 - > tracknext ) {
2017-11-07 04:42:54 -05:00
HA_SPIN_LOCK ( SERVER_LOCK , & srv2 - > lock ) ;
2017-08-31 08:41:55 -04:00
if ( srv - > next_admin & ( SRV_ADMF_MAINT | SRV_ADMF_CMAINT ) )
2016-11-07 09:53:43 -05:00
srv_set_admin_flag ( srv2 , SRV_ADMF_IMAINT , NULL ) ;
BUG/MEDIUM: servers: properly propagate the maintenance states during startup
Right now there is an issue with the way the maintenance flags are
propagated upon startup. They are not propagate, just copied from the
tracked server. This implies that depending on the server's order, some
tracking servers may not be marked down. For example this configuration
does not work as expected :
server s1 1.1.1.1:8000 track s2
server s2 1.1.1.1:8000 track s3
server s3 1.1.1.1:8000 track s4
server s4 wtap:8000 check inter 1s disabled
It results in s1/s2 being up, and s3/s4 being down, while all of them
should be down.
The only clean way to process this is to run through all "root" servers
(those not tracking any other server), and to propagate their state down
to all their trackers. This is the same algorithm used to propagate the
state changes. It has to be done both to compute the IDRAIN flag and the
IMAINT flag. However, doing so requires that tracking servers are not
marked as inherited maintenance anymore while parsing the configuration
(and given that it is wrong, better drop it).
This fix also addresses another side effect of the bug above which is
that the IDRAIN/IMAINT flags are stored in the state files, and if
restored while the tracked server doesn't have the equivalent flag,
the servers may end up in a situation where it's impossible to remove
these flags. For example in the configuration above, after removing
"disabled" on server s4, the other servers would have remained down,
and not anymore with this fix. Similarly, the combination of IMAINT
or IDRAIN with their respective forced modes was not accepted on
reload, which is wrong as well.
This bug has been present at least since 1.5, maybe even 1.4 (it came
with tracking support). The fix needs to be backported there, though
the srv-state parts are irrelevant.
This commit relies on previous patch to silence warnings on startup.
2016-11-03 14:22:19 -04:00
2017-08-31 08:41:55 -04:00
if ( srv - > next_admin & SRV_ADMF_DRAIN )
2016-11-07 09:53:43 -05:00
srv_set_admin_flag ( srv2 , SRV_ADMF_IDRAIN , NULL ) ;
2017-11-07 04:42:54 -05:00
HA_SPIN_UNLOCK ( SERVER_LOCK , & srv2 - > lock ) ;
BUG/MEDIUM: servers: properly propagate the maintenance states during startup
Right now there is an issue with the way the maintenance flags are
propagated upon startup. They are not propagate, just copied from the
tracked server. This implies that depending on the server's order, some
tracking servers may not be marked down. For example this configuration
does not work as expected :
server s1 1.1.1.1:8000 track s2
server s2 1.1.1.1:8000 track s3
server s3 1.1.1.1:8000 track s4
server s4 wtap:8000 check inter 1s disabled
It results in s1/s2 being up, and s3/s4 being down, while all of them
should be down.
The only clean way to process this is to run through all "root" servers
(those not tracking any other server), and to propagate their state down
to all their trackers. This is the same algorithm used to propagate the
state changes. It has to be done both to compute the IDRAIN flag and the
IMAINT flag. However, doing so requires that tracking servers are not
marked as inherited maintenance anymore while parsing the configuration
(and given that it is wrong, better drop it).
This fix also addresses another side effect of the bug above which is
that the IDRAIN/IMAINT flags are stored in the state files, and if
restored while the tracked server doesn't have the equivalent flag,
the servers may end up in a situation where it's impossible to remove
these flags. For example in the configuration above, after removing
"disabled" on server s4, the other servers would have remained down,
and not anymore with this fix. Similarly, the combination of IMAINT
or IDRAIN with their respective forced modes was not accepted on
reload, which is wrong as well.
This bug has been present at least since 1.5, maybe even 1.4 (it came
with tracking support). The fix needs to be backported there, though
the srv-state parts are irrelevant.
This commit relies on previous patch to silence warnings on startup.
2016-11-03 14:22:19 -04:00
}
}
/* Compute and propagate the admin states for all servers in proxy <px>.
* Only servers * not * tracking another one are considered , because other
* ones will be handled when the server they track is visited .
*/
void srv_compute_all_admin_states ( struct proxy * px )
{
struct server * srv ;
for ( srv = px - > srv ; srv ; srv = srv - > next ) {
if ( srv - > track )
continue ;
srv_propagate_admin_state ( srv ) ;
}
}
2012-10-10 11:51:05 -04:00
/* Note: must not be declared <const> as its list will be overwritten.
* Please take care of keeping this list alphabetically sorted , doing so helps
* all code contributors .
* Optional keywords are also declared with a NULL - > parse ( ) function so that
* the config parser can report an appropriate error when a known keyword was
* not enabled .
2017-04-16 11:14:14 -04:00
* Note : - 1 as - > skip value means that the number of arguments are variable .
2012-10-10 11:51:05 -04:00
*/
static struct srv_kw_list srv_kws = { " ALL " , { } , {
2017-03-10 09:36:14 -05:00
{ " backup " , srv_parse_backup , 0 , 1 } , /* Flag as backup server */
2017-03-15 04:13:33 -04:00
{ " cookie " , srv_parse_cookie , 1 , 1 } , /* Assign a cookie to the server */
2017-03-21 06:53:54 -04:00
{ " disabled " , srv_parse_disabled , 0 , 1 } , /* Start the server in 'disabled' state */
{ " enabled " , srv_parse_enabled , 0 , 1 } , /* Start the server in 'enabled' state */
2017-03-10 09:36:14 -05:00
{ " id " , srv_parse_id , 1 , 0 } , /* set id# of server */
2019-01-23 04:21:49 -05:00
{ " max-reuse " , srv_parse_max_reuse , 1 , 1 } , /* Set the max number of requests on a connection, -1 means unlimited */
2017-03-16 12:17:36 -04:00
{ " namespace " , srv_parse_namespace , 1 , 1 } , /* Namespace the server socket belongs to (if supported) */
2017-03-10 09:36:14 -05:00
{ " no-backup " , srv_parse_no_backup , 0 , 1 } , /* Flag as non-backup server */
2017-03-10 10:40:00 -05:00
{ " no-send-proxy " , srv_parse_no_send_proxy , 0 , 1 } , /* Disable use of PROXY V1 protocol */
{ " no-send-proxy-v2 " , srv_parse_no_send_proxy_v2 , 0 , 1 } , /* Disable use of PROXY V2 protocol */
2019-07-04 08:19:06 -04:00
{ " no-tfo " , srv_parse_no_tfo , 0 , 1 } , /* Disable use of TCP Fast Open */
2017-03-10 09:50:49 -05:00
{ " non-stick " , srv_parse_non_stick , 0 , 1 } , /* Disable stick-table persistence */
2017-03-15 03:55:39 -04:00
{ " observe " , srv_parse_observe , 1 , 1 } , /* Enables health adjusting based on observing communication with the server */
2018-12-10 12:30:32 -05:00
{ " pool-max-conn " , srv_parse_pool_max_conn , 1 , 1 } , /* Set the max number of orphan idle connections, 0 means unlimited */
2018-12-14 12:15:36 -05:00
{ " pool-purge-delay " , srv_parse_pool_purge_delay , 1 , 1 } , /* Set the time before we destroy orphan idle connections, defaults to 1s */
2018-04-10 08:45:45 -04:00
{ " proto " , srv_parse_proto , 1 , 1 } , /* Set the proto to use for all outgoing connections */
2018-02-01 09:20:32 -05:00
{ " proxy-v2-options " , srv_parse_proxy_v2_options , 1 , 1 } , /* options for send-proxy-v2 */
2017-03-14 11:42:49 -04:00
{ " redir " , srv_parse_redir , 1 , 1 } , /* Enable redirection mode */
2017-03-10 10:40:00 -05:00
{ " send-proxy " , srv_parse_send_proxy , 0 , 1 } , /* Enforce use of PROXY V1 protocol */
{ " send-proxy-v2 " , srv_parse_send_proxy_v2 , 0 , 1 } , /* Enforce use of PROXY V2 protocol */
2017-04-16 11:14:14 -04:00
{ " source " , srv_parse_source , - 1 , 1 } , /* Set the source address to be used to connect to the server */
2017-03-10 09:50:49 -05:00
{ " stick " , srv_parse_stick , 0 , 1 } , /* Enable stick-table persistence */
2019-07-04 07:34:10 -04:00
{ " tfo " , srv_parse_tfo , 0 , 1 } , /* enable TCP Fast Open of server */
2017-03-14 10:21:31 -04:00
{ " track " , srv_parse_track , 1 , 1 } , /* Set the current state of the server, tracking another one */
2019-05-22 07:44:48 -04:00
{ " socks4 " , srv_parse_socks4 , 1 , 1 } , /* Set the socks4 proxy of the server*/
2012-10-10 11:51:05 -04:00
{ NULL , NULL , 0 } ,
} } ;
2018-11-25 13:14:37 -05:00
INITCALL1 ( STG_REGISTER , srv_register_keywords , & srv_kws ) ;
2012-10-10 11:51:05 -04:00
2013-11-21 05:22:01 -05:00
/* Recomputes the server's eweight based on its state, uweight, the current time,
2020-05-05 15:53:22 -04:00
* and the proxy ' s algorithm . To be used after updating sv - > uweight . The warmup
2018-08-02 05:48:52 -04:00
* state is automatically disabled if the time is elapsed . If < must_update > is
* not zero , the update will be propagated immediately .
2018-08-21 05:54:26 -04:00
*
* Must be called with the server lock held .
2013-11-21 05:22:01 -05:00
*/
2018-08-02 05:48:52 -04:00
void server_recalc_eweight ( struct server * sv , int must_update )
2013-11-21 05:22:01 -05:00
{
struct proxy * px = sv - > proxy ;
unsigned w ;
if ( now . tv_sec < sv - > last_change | | now . tv_sec > = sv - > last_change + sv - > slowstart ) {
/* go to full throttle if the slowstart interval is reached */
2017-08-31 08:41:55 -04:00
if ( sv - > next_state = = SRV_ST_STARTING )
sv - > next_state = SRV_ST_RUNNING ;
2013-11-21 05:22:01 -05:00
}
/* We must take care of not pushing the server to full throttle during slow starts.
* It must also start immediately , at least at the minimal step when leaving maintenance .
*/
2017-08-31 08:41:55 -04:00
if ( ( sv - > next_state = = SRV_ST_STARTING ) & & ( px - > lbprm . algo & BE_LB_PROP_DYN ) )
2013-11-21 05:22:01 -05:00
w = ( px - > lbprm . wdiv * ( now . tv_sec - sv - > last_change ) + sv - > slowstart ) / sv - > slowstart ;
else
w = px - > lbprm . wdiv ;
2017-08-31 08:41:55 -04:00
sv - > next_eweight = ( sv - > uweight * w + px - > lbprm . wmult - 1 ) / px - > lbprm . wmult ;
2013-11-21 05:22:01 -05:00
2018-08-02 05:48:52 -04:00
/* propagate changes only if needed (i.e. not recursively) */
2018-08-21 13:54:09 -04:00
if ( must_update )
2018-08-02 05:48:52 -04:00
srv_update_status ( sv ) ;
2013-11-21 05:22:01 -05:00
}
2013-02-11 20:45:51 -05:00
/*
* Parses weight_str and configures sv accordingly .
* Returns NULL on success , error message string otherwise .
2018-08-21 05:54:26 -04:00
*
* Must be called with the server lock held .
2013-02-11 20:45:51 -05:00
*/
const char * server_parse_weight_change_request ( struct server * sv ,
const char * weight_str )
{
struct proxy * px ;
2013-02-11 20:45:53 -05:00
long int w ;
char * end ;
2013-02-11 20:45:51 -05:00
px = sv - > proxy ;
/* if the weight is terminated with '%', it is set relative to
* the initial weight , otherwise it is absolute .
*/
if ( ! * weight_str )
return " Require <weight> or <weight%>. \n " ;
2013-02-11 20:45:53 -05:00
w = strtol ( weight_str , & end , 10 ) ;
if ( end = = weight_str )
return " Empty weight string empty or preceded by garbage " ;
else if ( end [ 0 ] = = ' % ' & & end [ 1 ] = = ' \0 ' ) {
2013-02-11 20:45:52 -05:00
if ( w < 0 )
2013-02-11 20:45:51 -05:00
return " Relative weight must be positive. \n " ;
2013-02-11 20:45:52 -05:00
/* Avoid integer overflow */
if ( w > 25600 )
w = 25600 ;
2013-02-11 20:45:51 -05:00
w = sv - > iweight * w / 100 ;
2013-02-11 20:45:52 -05:00
if ( w > 256 )
w = 256 ;
2013-02-11 20:45:51 -05:00
}
else if ( w < 0 | | w > 256 )
return " Absolute weight can only be between 0 and 256 inclusive. \n " ;
2013-02-11 20:45:53 -05:00
else if ( end [ 0 ] ! = ' \0 ' )
return " Trailing garbage in weight string " ;
2013-02-11 20:45:51 -05:00
if ( w & & w ! = sv - > iweight & & ! ( px - > lbprm . algo & BE_LB_PROP_DYN ) )
return " Backend is using a static LB algorithm and only accepts weights '0%' and '100%'. \n " ;
sv - > uweight = w ;
2018-08-02 05:48:52 -04:00
server_recalc_eweight ( sv , 1 ) ;
2013-02-11 20:45:51 -05:00
return NULL ;
}
2015-04-13 16:54:33 -04:00
/*
2016-02-24 02:25:39 -05:00
* Parses < addr_str > and configures < sv > accordingly . < from > precise
* the source of the change in the associated message log .
2015-04-13 16:54:33 -04:00
* Returns :
* - error string on error
* - NULL on success
2018-08-21 05:54:26 -04:00
*
* Must be called with the server lock held .
2015-04-13 16:54:33 -04:00
*/
const char * server_parse_addr_change_request ( struct server * sv ,
2016-02-24 02:25:39 -05:00
const char * addr_str , const char * updater )
2015-04-13 16:54:33 -04:00
{
unsigned char ip [ INET6_ADDRSTRLEN ] ;
if ( inet_pton ( AF_INET6 , addr_str , ip ) ) {
2016-02-24 02:25:39 -05:00
update_server_addr ( sv , ip , AF_INET6 , updater ) ;
2015-04-13 16:54:33 -04:00
return NULL ;
}
if ( inet_pton ( AF_INET , addr_str , ip ) ) {
2016-02-24 02:25:39 -05:00
update_server_addr ( sv , ip , AF_INET , updater ) ;
2015-04-13 16:54:33 -04:00
return NULL ;
}
return " Could not understand IP address format. \n " ;
}
2018-08-21 05:54:26 -04:00
/*
* Must be called with the server lock held .
*/
2016-04-24 17:10:06 -04:00
const char * server_parse_maxconn_change_request ( struct server * sv ,
const char * maxconn_str )
{
long int v ;
char * end ;
if ( ! * maxconn_str )
return " Require <maxconn>. \n " ;
v = strtol ( maxconn_str , & end , 10 ) ;
if ( end = = maxconn_str )
return " maxconn string empty or preceded by garbage " ;
else if ( end [ 0 ] ! = ' \0 ' )
return " Trailing garbage in maxconn string " ;
if ( sv - > maxconn = = sv - > minconn ) { // static maxconn
sv - > maxconn = sv - > minconn = v ;
} else { // dynamic maxconn
sv - > maxconn = v ;
}
if ( may_dequeue_tasks ( sv , sv - > proxy ) )
process_srv_queue ( sv ) ;
return NULL ;
}
2017-03-20 09:54:41 -04:00
# ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME
2017-03-30 11:32:36 -04:00
static struct sample_expr * srv_sni_sample_parse_expr ( struct server * srv , struct proxy * px ,
const char * file , int linenum , char * * err )
2017-03-20 09:54:41 -04:00
{
int idx ;
const char * args [ ] = {
2017-03-30 11:32:36 -04:00
srv - > sni_expr ,
2017-03-20 09:54:41 -04:00
NULL ,
} ;
idx = 0 ;
2017-04-20 12:21:17 -04:00
px - > conf . args . ctx = ARGC_SRV ;
2017-03-20 09:54:41 -04:00
2020-02-14 10:50:14 -05:00
return sample_parse_expr ( ( char * * ) args , & idx , file , linenum , err , & px - > conf . args , NULL ) ;
2017-03-30 11:32:36 -04:00
}
static int server_parse_sni_expr ( struct server * newsrv , struct proxy * px , char * * err )
{
struct sample_expr * expr ;
expr = srv_sni_sample_parse_expr ( newsrv , px , px - > conf . file , px - > conf . line , err ) ;
2017-03-20 09:54:41 -04:00
if ( ! expr ) {
memprintf ( err , " error detected while parsing sni expression : %s " , * err ) ;
return ERR_ALERT | ERR_FATAL ;
}
if ( ! ( expr - > fetch - > val & SMP_VAL_BE_SRV_CON ) ) {
memprintf ( err , " error detected while parsing sni expression : "
" fetch method '%s' extracts information from '%s', "
" none of which is available here. \n " ,
2017-03-30 11:32:36 -04:00
newsrv - > sni_expr , sample_src_names ( expr - > fetch - > use ) ) ;
2017-03-20 09:54:41 -04:00
return ERR_ALERT | ERR_FATAL ;
}
px - > http_needed | = ! ! ( expr - > fetch - > use & SMP_USE_HTTP_ANY ) ;
release_sample_expr ( newsrv - > ssl_ctx . sni ) ;
newsrv - > ssl_ctx . sni = expr ;
return 0 ;
}
# endif
2020-02-11 05:42:38 -05:00
static void display_parser_err ( const char * file , int linenum , char * * args , int cur_arg , int err_code , char * * err )
2017-03-20 09:54:41 -04:00
{
2020-02-11 05:42:38 -05:00
char * msg = " error encountered while processing " ;
char * quote = " ' " ;
char * token = args [ cur_arg ] ;
2017-03-20 09:54:41 -04:00
if ( err & & * err ) {
indent_msg ( err , 2 ) ;
2020-02-11 05:42:38 -05:00
msg = * err ;
quote = " " ;
token = " " ;
2017-03-20 09:54:41 -04:00
}
2020-02-11 05:42:38 -05:00
if ( err_code & ERR_WARN & & ! ( err_code & ERR_ALERT ) )
ha_warning ( " parsing [%s:%d] : '%s %s' : %s%s%s%s. \n " ,
file , linenum , args [ 0 ] , args [ 1 ] ,
msg , quote , token , quote ) ;
2017-03-20 09:54:41 -04:00
else
2020-02-11 05:42:38 -05:00
ha_alert ( " parsing [%s:%d] : '%s %s' : %s%s%s%s. \n " ,
file , linenum , args [ 0 ] , args [ 1 ] ,
msg , quote , token , quote ) ;
2017-03-20 09:54:41 -04:00
}
2017-03-30 08:18:30 -04:00
static void srv_conn_src_sport_range_cpy ( struct server * srv ,
struct server * src )
{
int range_sz ;
range_sz = src - > conn_src . sport_range - > size ;
if ( range_sz > 0 ) {
srv - > conn_src . sport_range = port_range_alloc_range ( range_sz ) ;
if ( srv - > conn_src . sport_range ! = NULL ) {
int i ;
for ( i = 0 ; i < range_sz ; i + + ) {
srv - > conn_src . sport_range - > ports [ i ] =
src - > conn_src . sport_range - > ports [ i ] ;
}
}
}
}
/*
* Copy < src > server connection source settings to < srv > server everything needed .
*/
static void srv_conn_src_cpy ( struct server * srv , struct server * src )
{
srv - > conn_src . opts = src - > conn_src . opts ;
srv - > conn_src . source_addr = src - > conn_src . source_addr ;
/* Source port range copy. */
if ( src - > conn_src . sport_range ! = NULL )
srv_conn_src_sport_range_cpy ( srv , src ) ;
# ifdef CONFIG_HAP_TRANSPARENT
if ( src - > conn_src . bind_hdr_name ! = NULL ) {
srv - > conn_src . bind_hdr_name = strdup ( src - > conn_src . bind_hdr_name ) ;
srv - > conn_src . bind_hdr_len = strlen ( src - > conn_src . bind_hdr_name ) ;
}
srv - > conn_src . bind_hdr_occ = src - > conn_src . bind_hdr_occ ;
srv - > conn_src . tproxy_addr = src - > conn_src . tproxy_addr ;
# endif
if ( src - > conn_src . iface_name ! = NULL )
srv - > conn_src . iface_name = strdup ( src - > conn_src . iface_name ) ;
}
/*
* Copy < src > server SSL settings to < srv > server allocating
* everything needed .
*/
# if defined(USE_OPENSSL)
static void srv_ssl_settings_cpy ( struct server * srv , struct server * src )
{
if ( src - > ssl_ctx . ca_file ! = NULL )
srv - > ssl_ctx . ca_file = strdup ( src - > ssl_ctx . ca_file ) ;
if ( src - > ssl_ctx . crl_file ! = NULL )
srv - > ssl_ctx . crl_file = strdup ( src - > ssl_ctx . crl_file ) ;
if ( src - > ssl_ctx . client_crt ! = NULL )
srv - > ssl_ctx . client_crt = strdup ( src - > ssl_ctx . client_crt ) ;
srv - > ssl_ctx . verify = src - > ssl_ctx . verify ;
if ( src - > ssl_ctx . verify_host ! = NULL )
srv - > ssl_ctx . verify_host = strdup ( src - > ssl_ctx . verify_host ) ;
if ( src - > ssl_ctx . ciphers ! = NULL )
srv - > ssl_ctx . ciphers = strdup ( src - > ssl_ctx . ciphers ) ;
2020-04-22 05:40:18 -04:00
if ( src - > ssl_ctx . options )
srv - > ssl_ctx . options = src - > ssl_ctx . options ;
if ( src - > ssl_ctx . methods . flags )
srv - > ssl_ctx . methods . flags = src - > ssl_ctx . methods . flags ;
if ( src - > ssl_ctx . methods . min )
srv - > ssl_ctx . methods . min = src - > ssl_ctx . methods . min ;
if ( src - > ssl_ctx . methods . max )
srv - > ssl_ctx . methods . max = src - > ssl_ctx . methods . max ;
2019-05-09 08:13:35 -04:00
# if (HA_OPENSSL_VERSION_NUMBER >= 0x10101000L && !defined OPENSSL_IS_BORINGSSL)
2018-09-14 05:14:21 -04:00
if ( src - > ssl_ctx . ciphersuites ! = NULL )
srv - > ssl_ctx . ciphersuites = strdup ( src - > ssl_ctx . ciphersuites ) ;
# endif
2017-03-30 08:18:30 -04:00
if ( src - > sni_expr ! = NULL )
srv - > sni_expr = strdup ( src - > sni_expr ) ;
2018-11-20 17:33:50 -05:00
# ifdef TLSEXT_TYPE_application_layer_protocol_negotiation
if ( src - > ssl_ctx . alpn_str ) {
srv - > ssl_ctx . alpn_str = malloc ( src - > ssl_ctx . alpn_len ) ;
if ( srv - > ssl_ctx . alpn_str ) {
memcpy ( srv - > ssl_ctx . alpn_str , src - > ssl_ctx . alpn_str ,
src - > ssl_ctx . alpn_len ) ;
srv - > ssl_ctx . alpn_len = src - > ssl_ctx . alpn_len ;
}
}
# endif
# ifdef OPENSSL_NPN_NEGOTIATED
if ( src - > ssl_ctx . npn_str ) {
srv - > ssl_ctx . npn_str = malloc ( src - > ssl_ctx . npn_len ) ;
if ( srv - > ssl_ctx . npn_str ) {
memcpy ( srv - > ssl_ctx . npn_str , src - > ssl_ctx . npn_str ,
src - > ssl_ctx . npn_len ) ;
srv - > ssl_ctx . npn_len = src - > ssl_ctx . npn_len ;
}
}
# endif
2017-03-30 08:18:30 -04:00
}
# endif
2017-04-14 07:28:00 -04:00
/*
MAJOR/REORG: dns: DNS resolution task and requester queues
This patch is a major upgrade of the internal run-time DNS resolver in
HAProxy and it brings the following 2 main changes:
1. DNS resolution task
Up to now, DNS resolution was triggered by the health check task.
From now, DNS resolution task is autonomous. It is started by HAProxy
right after the scheduler is available and it is woken either when a
network IO occurs for one of its nameserver or when a timeout is
matched.
From now, this means we can enable DNS resolution for a server without
enabling health checking.
2. Introduction of a dns_requester structure
Up to now, DNS resolution was purposely made for resolving server
hostnames.
The idea, is to ensure that any HAProxy internal object should be able
to trigger a DNS resolution. For this purpose, 2 things has to be done:
- clean up the DNS code from the server structure (this was already
quite clean actually) and clean up the server's callbacks from
manipulating too much DNS resolution
- create an agnostic structure which allows linking a DNS resolution
and a requester of any type (using obj_type enum)
3. Manage requesters through queues
Up to now, there was an uniq relationship between a resolution and it's
owner (aka the requester now). It's a shame, because in some cases,
multiple objects may share the same hostname and may benefit from a
resolution being performed by a third party.
This patch introduces the notion of queues, which are basically lists of
either currently running resolution or waiting ones.
The resolutions are now available as a pool, which belongs to the resolvers.
The pool has has a default size of 64 resolutions per resolvers and is
allocated at configuration parsing.
2017-05-22 09:17:15 -04:00
* Prepare < srv > for hostname resolution .
2017-04-14 07:28:00 -04:00
* May be safely called with a default server as < src > argument ( without hostname ) .
2017-04-26 05:24:02 -04:00
* Returns - 1 in case of any allocation failure , 0 if not .
2017-04-14 07:28:00 -04:00
*/
MAJOR/REORG: dns: DNS resolution task and requester queues
This patch is a major upgrade of the internal run-time DNS resolver in
HAProxy and it brings the following 2 main changes:
1. DNS resolution task
Up to now, DNS resolution was triggered by the health check task.
From now, DNS resolution task is autonomous. It is started by HAProxy
right after the scheduler is available and it is woken either when a
network IO occurs for one of its nameserver or when a timeout is
matched.
From now, this means we can enable DNS resolution for a server without
enabling health checking.
2. Introduction of a dns_requester structure
Up to now, DNS resolution was purposely made for resolving server
hostnames.
The idea, is to ensure that any HAProxy internal object should be able
to trigger a DNS resolution. For this purpose, 2 things has to be done:
- clean up the DNS code from the server structure (this was already
quite clean actually) and clean up the server's callbacks from
manipulating too much DNS resolution
- create an agnostic structure which allows linking a DNS resolution
and a requester of any type (using obj_type enum)
3. Manage requesters through queues
Up to now, there was an uniq relationship between a resolution and it's
owner (aka the requester now). It's a shame, because in some cases,
multiple objects may share the same hostname and may benefit from a
resolution being performed by a third party.
This patch introduces the notion of queues, which are basically lists of
either currently running resolution or waiting ones.
The resolutions are now available as a pool, which belongs to the resolvers.
The pool has has a default size of 64 resolutions per resolvers and is
allocated at configuration parsing.
2017-05-22 09:17:15 -04:00
static int srv_prepare_for_resolution ( struct server * srv , const char * hostname )
2017-04-14 07:28:00 -04:00
{
MAJOR: dns: Refactor the DNS code
This is a huge patch with many changes, all about the DNS. Initially, the idea
was to update the DNS part to ease the threads support integration. But quickly,
I started to refactor some parts. And after several iterations, it was
impossible for me to commit the different parts atomically. So, instead of
adding tens of patches, often reworking the same parts, it was easier to merge
all my changes in a uniq patch. Here are all changes made on the DNS.
First, the DNS initialization has been refactored. The DNS configuration parsing
remains untouched, in cfgparse.c. But all checks have been moved in a post-check
callback. In the function dns_finalize_config, for each resolvers, the
nameservers configuration is tested and the task used to manage DNS resolutions
is created. The links between the backend's servers and the resolvers are also
created at this step. Here no connection are kept alive. So there is no needs
anymore to reopen them after HAProxy fork. Connections used to send DNS queries
will be opened on demand.
Then, the way DNS requesters are linked to a DNS resolution has been
reworked. The resolution used by a requester is now referenced into the
dns_requester structure and the resolution pointers in server and dns_srvrq
structures have been removed. wait and curr list of requesters, for a DNS
resolution, have been replaced by a uniq list. And Finally, the way a requester
is removed from a DNS resolution has been simplified. Now everything is done in
dns_unlink_resolution.
srv_set_fqdn function has been simplified. Now, there is only 1 way to set the
server's FQDN, independently it is done by the CLI or when a SRV record is
resolved.
The static DNS resolutions pool has been replaced by a dynamoc pool. The part
has been modified by Baptiste Assmann.
The way the DNS resolutions are triggered by the task or by a health-check has
been totally refactored. Now, all timeouts are respected. Especially
hold.valid. The default frequency to wake up a resolvers is now configurable
using "timeout resolve" parameter.
Now, as documented, as long as invalid repsonses are received, we really wait
all name servers responses before retrying.
As far as possible, resources allocated during DNS configuration parsing are
releases when HAProxy is shutdown.
Beside all these changes, the code has been cleaned to ease code review and the
doc has been updated.
2017-09-27 05:00:59 -04:00
char * hostname_dn ;
int hostname_len , hostname_dn_len ;
2017-04-14 07:28:00 -04:00
if ( ! hostname )
2017-04-26 05:24:02 -04:00
return 0 ;
2017-04-14 07:28:00 -04:00
MAJOR: dns: Refactor the DNS code
This is a huge patch with many changes, all about the DNS. Initially, the idea
was to update the DNS part to ease the threads support integration. But quickly,
I started to refactor some parts. And after several iterations, it was
impossible for me to commit the different parts atomically. So, instead of
adding tens of patches, often reworking the same parts, it was easier to merge
all my changes in a uniq patch. Here are all changes made on the DNS.
First, the DNS initialization has been refactored. The DNS configuration parsing
remains untouched, in cfgparse.c. But all checks have been moved in a post-check
callback. In the function dns_finalize_config, for each resolvers, the
nameservers configuration is tested and the task used to manage DNS resolutions
is created. The links between the backend's servers and the resolvers are also
created at this step. Here no connection are kept alive. So there is no needs
anymore to reopen them after HAProxy fork. Connections used to send DNS queries
will be opened on demand.
Then, the way DNS requesters are linked to a DNS resolution has been
reworked. The resolution used by a requester is now referenced into the
dns_requester structure and the resolution pointers in server and dns_srvrq
structures have been removed. wait and curr list of requesters, for a DNS
resolution, have been replaced by a uniq list. And Finally, the way a requester
is removed from a DNS resolution has been simplified. Now everything is done in
dns_unlink_resolution.
srv_set_fqdn function has been simplified. Now, there is only 1 way to set the
server's FQDN, independently it is done by the CLI or when a SRV record is
resolved.
The static DNS resolutions pool has been replaced by a dynamoc pool. The part
has been modified by Baptiste Assmann.
The way the DNS resolutions are triggered by the task or by a health-check has
been totally refactored. Now, all timeouts are respected. Especially
hold.valid. The default frequency to wake up a resolvers is now configurable
using "timeout resolve" parameter.
Now, as documented, as long as invalid repsonses are received, we really wait
all name servers responses before retrying.
As far as possible, resources allocated during DNS configuration parsing are
releases when HAProxy is shutdown.
Beside all these changes, the code has been cleaned to ease code review and the
doc has been updated.
2017-09-27 05:00:59 -04:00
hostname_len = strlen ( hostname ) ;
2018-07-13 04:54:26 -04:00
hostname_dn = trash . area ;
MAJOR: dns: Refactor the DNS code
This is a huge patch with many changes, all about the DNS. Initially, the idea
was to update the DNS part to ease the threads support integration. But quickly,
I started to refactor some parts. And after several iterations, it was
impossible for me to commit the different parts atomically. So, instead of
adding tens of patches, often reworking the same parts, it was easier to merge
all my changes in a uniq patch. Here are all changes made on the DNS.
First, the DNS initialization has been refactored. The DNS configuration parsing
remains untouched, in cfgparse.c. But all checks have been moved in a post-check
callback. In the function dns_finalize_config, for each resolvers, the
nameservers configuration is tested and the task used to manage DNS resolutions
is created. The links between the backend's servers and the resolvers are also
created at this step. Here no connection are kept alive. So there is no needs
anymore to reopen them after HAProxy fork. Connections used to send DNS queries
will be opened on demand.
Then, the way DNS requesters are linked to a DNS resolution has been
reworked. The resolution used by a requester is now referenced into the
dns_requester structure and the resolution pointers in server and dns_srvrq
structures have been removed. wait and curr list of requesters, for a DNS
resolution, have been replaced by a uniq list. And Finally, the way a requester
is removed from a DNS resolution has been simplified. Now everything is done in
dns_unlink_resolution.
srv_set_fqdn function has been simplified. Now, there is only 1 way to set the
server's FQDN, independently it is done by the CLI or when a SRV record is
resolved.
The static DNS resolutions pool has been replaced by a dynamoc pool. The part
has been modified by Baptiste Assmann.
The way the DNS resolutions are triggered by the task or by a health-check has
been totally refactored. Now, all timeouts are respected. Especially
hold.valid. The default frequency to wake up a resolvers is now configurable
using "timeout resolve" parameter.
Now, as documented, as long as invalid repsonses are received, we really wait
all name servers responses before retrying.
As far as possible, resources allocated during DNS configuration parsing are
releases when HAProxy is shutdown.
Beside all these changes, the code has been cleaned to ease code review and the
doc has been updated.
2017-09-27 05:00:59 -04:00
hostname_dn_len = dns_str_to_dn_label ( hostname , hostname_len + 1 ,
hostname_dn , trash . size ) ;
if ( hostname_dn_len = = - 1 )
goto err ;
2017-05-03 04:11:44 -04:00
2017-04-14 07:28:00 -04:00
MAJOR: dns: Refactor the DNS code
This is a huge patch with many changes, all about the DNS. Initially, the idea
was to update the DNS part to ease the threads support integration. But quickly,
I started to refactor some parts. And after several iterations, it was
impossible for me to commit the different parts atomically. So, instead of
adding tens of patches, often reworking the same parts, it was easier to merge
all my changes in a uniq patch. Here are all changes made on the DNS.
First, the DNS initialization has been refactored. The DNS configuration parsing
remains untouched, in cfgparse.c. But all checks have been moved in a post-check
callback. In the function dns_finalize_config, for each resolvers, the
nameservers configuration is tested and the task used to manage DNS resolutions
is created. The links between the backend's servers and the resolvers are also
created at this step. Here no connection are kept alive. So there is no needs
anymore to reopen them after HAProxy fork. Connections used to send DNS queries
will be opened on demand.
Then, the way DNS requesters are linked to a DNS resolution has been
reworked. The resolution used by a requester is now referenced into the
dns_requester structure and the resolution pointers in server and dns_srvrq
structures have been removed. wait and curr list of requesters, for a DNS
resolution, have been replaced by a uniq list. And Finally, the way a requester
is removed from a DNS resolution has been simplified. Now everything is done in
dns_unlink_resolution.
srv_set_fqdn function has been simplified. Now, there is only 1 way to set the
server's FQDN, independently it is done by the CLI or when a SRV record is
resolved.
The static DNS resolutions pool has been replaced by a dynamoc pool. The part
has been modified by Baptiste Assmann.
The way the DNS resolutions are triggered by the task or by a health-check has
been totally refactored. Now, all timeouts are respected. Especially
hold.valid. The default frequency to wake up a resolvers is now configurable
using "timeout resolve" parameter.
Now, as documented, as long as invalid repsonses are received, we really wait
all name servers responses before retrying.
As far as possible, resources allocated during DNS configuration parsing are
releases when HAProxy is shutdown.
Beside all these changes, the code has been cleaned to ease code review and the
doc has been updated.
2017-09-27 05:00:59 -04:00
free ( srv - > hostname ) ;
free ( srv - > hostname_dn ) ;
srv - > hostname = strdup ( hostname ) ;
srv - > hostname_dn = strdup ( hostname_dn ) ;
srv - > hostname_dn_len = hostname_dn_len ;
MAJOR/REORG: dns: DNS resolution task and requester queues
This patch is a major upgrade of the internal run-time DNS resolver in
HAProxy and it brings the following 2 main changes:
1. DNS resolution task
Up to now, DNS resolution was triggered by the health check task.
From now, DNS resolution task is autonomous. It is started by HAProxy
right after the scheduler is available and it is woken either when a
network IO occurs for one of its nameserver or when a timeout is
matched.
From now, this means we can enable DNS resolution for a server without
enabling health checking.
2. Introduction of a dns_requester structure
Up to now, DNS resolution was purposely made for resolving server
hostnames.
The idea, is to ensure that any HAProxy internal object should be able
to trigger a DNS resolution. For this purpose, 2 things has to be done:
- clean up the DNS code from the server structure (this was already
quite clean actually) and clean up the server's callbacks from
manipulating too much DNS resolution
- create an agnostic structure which allows linking a DNS resolution
and a requester of any type (using obj_type enum)
3. Manage requesters through queues
Up to now, there was an uniq relationship between a resolution and it's
owner (aka the requester now). It's a shame, because in some cases,
multiple objects may share the same hostname and may benefit from a
resolution being performed by a third party.
This patch introduces the notion of queues, which are basically lists of
either currently running resolution or waiting ones.
The resolutions are now available as a pool, which belongs to the resolvers.
The pool has has a default size of 64 resolutions per resolvers and is
allocated at configuration parsing.
2017-05-22 09:17:15 -04:00
if ( ! srv - > hostname | | ! srv - > hostname_dn )
2017-04-14 07:28:00 -04:00
goto err ;
2017-04-26 05:24:02 -04:00
return 0 ;
2017-04-14 07:28:00 -04:00
err :
MAJOR: dns: Refactor the DNS code
This is a huge patch with many changes, all about the DNS. Initially, the idea
was to update the DNS part to ease the threads support integration. But quickly,
I started to refactor some parts. And after several iterations, it was
impossible for me to commit the different parts atomically. So, instead of
adding tens of patches, often reworking the same parts, it was easier to merge
all my changes in a uniq patch. Here are all changes made on the DNS.
First, the DNS initialization has been refactored. The DNS configuration parsing
remains untouched, in cfgparse.c. But all checks have been moved in a post-check
callback. In the function dns_finalize_config, for each resolvers, the
nameservers configuration is tested and the task used to manage DNS resolutions
is created. The links between the backend's servers and the resolvers are also
created at this step. Here no connection are kept alive. So there is no needs
anymore to reopen them after HAProxy fork. Connections used to send DNS queries
will be opened on demand.
Then, the way DNS requesters are linked to a DNS resolution has been
reworked. The resolution used by a requester is now referenced into the
dns_requester structure and the resolution pointers in server and dns_srvrq
structures have been removed. wait and curr list of requesters, for a DNS
resolution, have been replaced by a uniq list. And Finally, the way a requester
is removed from a DNS resolution has been simplified. Now everything is done in
dns_unlink_resolution.
srv_set_fqdn function has been simplified. Now, there is only 1 way to set the
server's FQDN, independently it is done by the CLI or when a SRV record is
resolved.
The static DNS resolutions pool has been replaced by a dynamoc pool. The part
has been modified by Baptiste Assmann.
The way the DNS resolutions are triggered by the task or by a health-check has
been totally refactored. Now, all timeouts are respected. Especially
hold.valid. The default frequency to wake up a resolvers is now configurable
using "timeout resolve" parameter.
Now, as documented, as long as invalid repsonses are received, we really wait
all name servers responses before retrying.
As far as possible, resources allocated during DNS configuration parsing are
releases when HAProxy is shutdown.
Beside all these changes, the code has been cleaned to ease code review and the
doc has been updated.
2017-09-27 05:00:59 -04:00
free ( srv - > hostname ) ; srv - > hostname = NULL ;
free ( srv - > hostname_dn ) ; srv - > hostname_dn = NULL ;
2017-04-26 05:24:02 -04:00
return - 1 ;
}
2017-03-30 08:18:30 -04:00
/*
* Copy < src > server settings to < srv > server allocating
* everything needed .
2017-04-14 07:28:00 -04:00
* This function is not supposed to be called at any time , but only
* during server settings parsing or during server allocations from
* a server template , and just after having calloc ( ) ' ed a new server .
* So , < src > may only be a default server ( when parsing server settings )
* or a server template ( during server allocations from a server template ) .
* < srv_tmpl > distinguishes these two cases ( must be 1 if < srv > is a template ,
* 0 if not ) .
2017-03-30 08:18:30 -04:00
*/
2017-04-14 07:28:00 -04:00
static void srv_settings_cpy ( struct server * srv , struct server * src , int srv_tmpl )
2017-03-30 08:18:30 -04:00
{
/* Connection source settings copy */
srv_conn_src_cpy ( srv , src ) ;
2017-04-14 07:28:00 -04:00
if ( srv_tmpl ) {
srv - > addr = src - > addr ;
srv - > svc_port = src - > svc_port ;
}
2017-03-30 08:18:30 -04:00
srv - > pp_opts = src - > pp_opts ;
if ( src - > rdr_pfx ! = NULL ) {
srv - > rdr_pfx = strdup ( src - > rdr_pfx ) ;
srv - > rdr_len = src - > rdr_len ;
}
if ( src - > cookie ! = NULL ) {
srv - > cookie = strdup ( src - > cookie ) ;
srv - > cklen = src - > cklen ;
}
srv - > use_ssl = src - > use_ssl ;
2019-12-11 09:43:45 -05:00
srv - > check . addr = src - > check . addr ;
srv - > agent . addr = src - > agent . addr ;
2017-03-30 08:18:30 -04:00
srv - > check . use_ssl = src - > check . use_ssl ;
srv - > check . port = src - > check . port ;
2018-12-21 13:42:01 -05:00
srv - > check . sni = src - > check . sni ;
2018-12-21 13:47:01 -05:00
srv - > check . alpn_str = src - > check . alpn_str ;
2019-04-30 12:21:28 -04:00
srv - > check . alpn_len = src - > check . alpn_len ;
2017-03-30 08:18:30 -04:00
/* Note: 'flags' field has potentially been already initialized. */
srv - > flags | = src - > flags ;
srv - > do_check = src - > do_check ;
srv - > do_agent = src - > do_agent ;
if ( srv - > check . port )
srv - > flags | = SRV_F_CHECKPORT ;
srv - > check . inter = src - > check . inter ;
srv - > check . fastinter = src - > check . fastinter ;
srv - > check . downinter = src - > check . downinter ;
srv - > agent . use_ssl = src - > agent . use_ssl ;
srv - > agent . port = src - > agent . port ;
2020-04-06 11:54:24 -04:00
if ( src - > agent . tcpcheck_rules ) {
srv - > agent . tcpcheck_rules = calloc ( 1 , sizeof ( * srv - > agent . tcpcheck_rules ) ) ;
if ( srv - > agent . tcpcheck_rules ) {
srv - > agent . tcpcheck_rules - > flags = src - > agent . tcpcheck_rules - > flags ;
srv - > agent . tcpcheck_rules - > list = src - > agent . tcpcheck_rules - > list ;
LIST_INIT ( & srv - > agent . tcpcheck_rules - > preset_vars ) ;
dup_tcpcheck_vars ( & srv - > agent . tcpcheck_rules - > preset_vars ,
& src - > agent . tcpcheck_rules - > preset_vars ) ;
}
}
2017-03-30 08:18:30 -04:00
srv - > agent . inter = src - > agent . inter ;
srv - > agent . fastinter = src - > agent . fastinter ;
srv - > agent . downinter = src - > agent . downinter ;
srv - > maxqueue = src - > maxqueue ;
srv - > minconn = src - > minconn ;
srv - > maxconn = src - > maxconn ;
srv - > slowstart = src - > slowstart ;
srv - > observe = src - > observe ;
srv - > onerror = src - > onerror ;
srv - > onmarkeddown = src - > onmarkeddown ;
srv - > onmarkedup = src - > onmarkedup ;
if ( src - > trackit ! = NULL )
srv - > trackit = strdup ( src - > trackit ) ;
srv - > consecutive_errors_limit = src - > consecutive_errors_limit ;
srv - > uweight = srv - > iweight = src - > iweight ;
srv - > check . send_proxy = src - > check . send_proxy ;
/* health: up, but will fall down at first failure */
srv - > check . rise = srv - > check . health = src - > check . rise ;
srv - > check . fall = src - > check . fall ;
/* Here we check if 'disabled' is the default server state */
2017-08-31 08:41:55 -04:00
if ( src - > next_admin & ( SRV_ADMF_CMAINT | SRV_ADMF_FMAINT ) ) {
srv - > next_admin | = SRV_ADMF_CMAINT | SRV_ADMF_FMAINT ;
srv - > next_state = SRV_ST_STOPPED ;
2017-03-30 08:18:30 -04:00
srv - > check . state | = CHK_ST_PAUSED ;
srv - > check . health = 0 ;
}
/* health: up but will fall down at first failure */
srv - > agent . rise = srv - > agent . health = src - > agent . rise ;
srv - > agent . fall = src - > agent . fall ;
if ( src - > resolvers_id ! = NULL )
srv - > resolvers_id = strdup ( src - > resolvers_id ) ;
srv - > dns_opts . family_prio = src - > dns_opts . family_prio ;
2018-06-22 09:04:43 -04:00
srv - > dns_opts . accept_duplicate_ip = src - > dns_opts . accept_duplicate_ip ;
2019-11-17 09:48:56 -05:00
srv - > dns_opts . ignore_weight = src - > dns_opts . ignore_weight ;
2017-03-30 08:18:30 -04:00
if ( srv - > dns_opts . family_prio = = AF_UNSPEC )
srv - > dns_opts . family_prio = AF_INET6 ;
memcpy ( srv - > dns_opts . pref_net ,
src - > dns_opts . pref_net ,
sizeof srv - > dns_opts . pref_net ) ;
srv - > dns_opts . pref_net_nb = src - > dns_opts . pref_net_nb ;
srv - > init_addr_methods = src - > init_addr_methods ;
srv - > init_addr = src - > init_addr ;
# if defined(USE_OPENSSL)
srv_ssl_settings_cpy ( srv , src ) ;
# endif
# ifdef TCP_USER_TIMEOUT
srv - > tcp_ut = src - > tcp_ut ;
# endif
2018-04-10 08:45:45 -04:00
srv - > mux_proto = src - > mux_proto ;
2018-12-14 12:15:36 -05:00
srv - > pool_purge_delay = src - > pool_purge_delay ;
2018-12-10 12:30:32 -05:00
srv - > max_idle_conns = src - > max_idle_conns ;
2019-01-23 04:21:49 -05:00
srv - > max_reuse = src - > max_reuse ;
2018-04-10 08:45:45 -04:00
2017-08-04 12:35:36 -04:00
if ( srv_tmpl )
srv - > srvrq = src - > srvrq ;
2019-05-22 07:44:48 -04:00
srv - > check . via_socks4 = src - > check . via_socks4 ;
srv - > socks4_addr = src - > socks4_addr ;
2017-03-30 08:18:30 -04:00
}
2018-10-26 08:47:32 -04:00
struct server * new_server ( struct proxy * proxy )
2017-03-30 08:18:30 -04:00
{
struct server * srv ;
srv = calloc ( 1 , sizeof * srv ) ;
if ( ! srv )
return NULL ;
srv - > obj_type = OBJ_TYPE_SERVER ;
srv - > proxy = proxy ;
LIST_INIT ( & srv - > actconns ) ;
2018-05-11 12:52:31 -04:00
srv - > pendconns = EB_ROOT ;
2017-07-03 09:41:01 -04:00
2017-08-31 08:41:55 -04:00
srv - > next_state = SRV_ST_RUNNING ; /* early server setup */
2017-03-30 08:18:30 -04:00
srv - > last_change = now . tv_sec ;
2020-04-21 05:46:40 -04:00
srv - > check . obj_type = OBJ_TYPE_CHECK ;
2017-03-30 08:18:30 -04:00
srv - > check . status = HCHK_STATUS_INI ;
srv - > check . server = srv ;
2019-01-11 12:17:17 -05:00
srv - > check . proxy = proxy ;
2020-03-30 14:34:34 -04:00
srv - > check . tcpcheck_rules = & proxy - > tcpcheck_rules ;
2017-03-30 08:18:30 -04:00
2020-04-21 05:46:40 -04:00
srv - > agent . obj_type = OBJ_TYPE_CHECK ;
2017-03-30 08:18:30 -04:00
srv - > agent . status = HCHK_STATUS_INI ;
srv - > agent . server = srv ;
2019-01-21 01:48:26 -05:00
srv - > agent . proxy = proxy ;
2017-03-30 08:18:30 -04:00
srv - > xprt = srv - > check . xprt = srv - > agent . xprt = xprt_get ( XPRT_RAW ) ;
2019-06-06 10:25:55 -04:00
/* please don't put default server settings here, they are set in
* init_default_instance ( ) .
*/
2017-03-30 08:18:30 -04:00
return srv ;
}
2017-03-30 11:32:36 -04:00
# ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME
static int server_sni_expr_init ( const char * file , int linenum , char * * args , int cur_arg ,
struct server * srv , struct proxy * proxy )
{
int ret ;
char * err = NULL ;
if ( ! srv - > sni_expr )
return 0 ;
ret = server_parse_sni_expr ( srv , proxy , & err ) ;
if ( ! ret )
return 0 ;
2020-02-11 05:42:38 -05:00
display_parser_err ( file , linenum , args , cur_arg , ret , & err ) ;
2017-03-30 11:32:36 -04:00
free ( err ) ;
return ret ;
}
# endif
/*
* Server initializations finalization .
* Initialize health check , agent check and SNI expression if enabled .
* Must not be called for a default server instance .
*/
static int server_finalize_init ( const char * file , int linenum , char * * args , int cur_arg ,
struct server * srv , struct proxy * px )
{
2020-04-27 05:17:10 -04:00
# ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME
2017-03-30 11:32:36 -04:00
int ret ;
2020-04-27 05:17:10 -04:00
# endif
2017-03-30 11:32:36 -04:00
2020-03-26 14:48:20 -04:00
if ( srv - > do_check & & srv - > trackit ) {
ha_alert ( " parsing [%s:%d]: unable to enable checks and tracking at the same time! \n " ,
file , linenum ) ;
return ERR_ALERT | ERR_FATAL ;
}
if ( srv - > do_agent & & ! srv - > agent . port ) {
ha_alert ( " parsing [%s:%d] : server %s does not have agent port. Agent check has been disabled. \n " ,
file , linenum , srv - > id ) ;
return ERR_ALERT | ERR_FATAL ;
2017-03-30 11:32:36 -04:00
}
# ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME
if ( ( ret = server_sni_expr_init ( file , linenum , args , cur_arg , srv , px ) ) ! = 0 )
return ret ;
# endif
if ( srv - > flags & SRV_F_BACKUP )
px - > srv_bck + + ;
else
px - > srv_act + + ;
srv_lb_commit_status ( srv ) ;
return 0 ;
}
2017-04-13 12:24:23 -04:00
/*
* Parse as much as possible such a range string argument : low [ - high ]
* Set < nb_low > and < nb_high > values so that they may be reused by this loop
* for ( int i = nb_low ; i < = nb_high ; i + + ) . . . with nb_low > = 1.
* Fails if ' low ' < 0 or ' high ' is present and not higher than ' low ' .
* Returns 0 if succeeded , - 1 if not .
*/
static int srv_tmpl_parse_range ( struct server * srv , const char * arg , int * nb_low , int * nb_high )
{
char * nb_high_arg ;
* nb_high = 0 ;
chunk_printf ( & trash , " %s " , arg ) ;
2018-07-13 04:54:26 -04:00
* nb_low = atoi ( trash . area ) ;
2017-04-13 12:24:23 -04:00
2018-07-13 04:54:26 -04:00
if ( ( nb_high_arg = strchr ( trash . area , ' - ' ) ) ) {
2017-04-13 12:24:23 -04:00
* nb_high_arg + + = ' \0 ' ;
* nb_high = atoi ( nb_high_arg ) ;
}
else {
* nb_high + = * nb_low ;
* nb_low = 1 ;
}
if ( * nb_low < 0 | | * nb_high < * nb_low )
return - 1 ;
return 0 ;
}
2017-04-14 07:28:00 -04:00
static inline void srv_set_id_from_prefix ( struct server * srv , const char * prefix , int nb )
{
chunk_printf ( & trash , " %s%d " , prefix , nb ) ;
free ( srv - > id ) ;
2018-07-13 04:54:26 -04:00
srv - > id = strdup ( trash . area ) ;
2017-04-14 07:28:00 -04:00
}
/*
* Initialize as much as possible servers from < srv > server template .
* Note that a server template is a special server with
* a few different parameters than a server which has
* been parsed mostly the same way as a server .
2018-11-15 11:57:51 -05:00
* Returns the number of servers successfully allocated ,
2017-04-14 07:28:00 -04:00
* ' srv ' template included .
*/
static int server_template_init ( struct server * srv , struct proxy * px )
{
int i ;
struct server * newsrv ;
for ( i = srv - > tmpl_info . nb_low + 1 ; i < = srv - > tmpl_info . nb_high ; i + + ) {
newsrv = new_server ( px ) ;
if ( ! newsrv )
goto err ;
srv_settings_cpy ( newsrv , srv , 1 ) ;
MAJOR/REORG: dns: DNS resolution task and requester queues
This patch is a major upgrade of the internal run-time DNS resolver in
HAProxy and it brings the following 2 main changes:
1. DNS resolution task
Up to now, DNS resolution was triggered by the health check task.
From now, DNS resolution task is autonomous. It is started by HAProxy
right after the scheduler is available and it is woken either when a
network IO occurs for one of its nameserver or when a timeout is
matched.
From now, this means we can enable DNS resolution for a server without
enabling health checking.
2. Introduction of a dns_requester structure
Up to now, DNS resolution was purposely made for resolving server
hostnames.
The idea, is to ensure that any HAProxy internal object should be able
to trigger a DNS resolution. For this purpose, 2 things has to be done:
- clean up the DNS code from the server structure (this was already
quite clean actually) and clean up the server's callbacks from
manipulating too much DNS resolution
- create an agnostic structure which allows linking a DNS resolution
and a requester of any type (using obj_type enum)
3. Manage requesters through queues
Up to now, there was an uniq relationship between a resolution and it's
owner (aka the requester now). It's a shame, because in some cases,
multiple objects may share the same hostname and may benefit from a
resolution being performed by a third party.
This patch introduces the notion of queues, which are basically lists of
either currently running resolution or waiting ones.
The resolutions are now available as a pool, which belongs to the resolvers.
The pool has has a default size of 64 resolutions per resolvers and is
allocated at configuration parsing.
2017-05-22 09:17:15 -04:00
srv_prepare_for_resolution ( newsrv , srv - > hostname ) ;
2017-04-14 07:28:00 -04:00
# ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME
if ( newsrv - > sni_expr ) {
newsrv - > ssl_ctx . sni = srv_sni_sample_parse_expr ( newsrv , px , NULL , 0 , NULL ) ;
if ( ! newsrv - > ssl_ctx . sni )
goto err ;
}
# endif
/* Set this new server ID. */
srv_set_id_from_prefix ( newsrv , srv - > tmpl_info . prefix , i ) ;
/* Linked backwards first. This will be restablished after parsing. */
newsrv - > next = px - > srv ;
px - > srv = newsrv ;
}
srv_set_id_from_prefix ( srv , srv - > tmpl_info . prefix , srv - > tmpl_info . nb_low ) ;
return i - srv - > tmpl_info . nb_low ;
err :
srv_set_id_from_prefix ( srv , srv - > tmpl_info . prefix , srv - > tmpl_info . nb_low ) ;
if ( newsrv ) {
# ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME
release_sample_expr ( newsrv - > ssl_ctx . sni ) ;
# endif
free_check ( & newsrv - > agent ) ;
free_check ( & newsrv - > check ) ;
}
free ( newsrv ) ;
return i - srv - > tmpl_info . nb_low ;
}
2020-04-03 03:43:47 -04:00
int parse_server ( const char * file , int linenum , char * * args , struct proxy * curproxy , struct proxy * defproxy , int parse_addr , int in_peers_section )
2014-03-31 04:39:59 -04:00
{
struct server * newsrv = NULL ;
2017-04-13 12:24:23 -04:00
const char * err = NULL ;
2014-03-31 04:39:59 -04:00
char * errmsg = NULL ;
int err_code = 0 ;
unsigned val ;
2015-09-08 10:16:35 -04:00
char * fqdn = NULL ;
2014-03-31 04:39:59 -04:00
2017-04-13 12:24:23 -04:00
if ( ! strcmp ( args [ 0 ] , " server " ) | |
2018-04-26 04:06:41 -04:00
! strcmp ( args [ 0 ] , " peer " ) | |
2017-04-13 12:24:23 -04:00
! strcmp ( args [ 0 ] , " default-server " ) | |
! strcmp ( args [ 0 ] , " server-template " ) ) {
2014-03-31 04:39:59 -04:00
int cur_arg ;
2017-03-21 11:39:15 -04:00
int defsrv = ( * args [ 0 ] = = ' d ' ) ;
2018-04-26 04:06:41 -04:00
int srv = ! defsrv & & ( * args [ 0 ] = = ' p ' | | ! strcmp ( args [ 0 ] , " server " ) ) ;
2017-04-13 12:24:23 -04:00
int srv_tmpl = ! defsrv & & ! srv ;
int tmpl_range_low = 0 , tmpl_range_high = 0 ;
2014-03-31 04:39:59 -04:00
if ( ! defsrv & & curproxy = = defproxy ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d] : '%s' not allowed in 'defaults' section. \n " , file , linenum , args [ 0 ] ) ;
2014-03-31 04:39:59 -04:00
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
else if ( warnifnotcap ( curproxy , PR_CAP_BE , file , linenum , args [ 0 ] , NULL ) )
2018-07-24 10:48:59 -04:00
err_code | = ERR_WARN ;
2014-03-31 04:39:59 -04:00
2017-04-13 12:24:23 -04:00
/* There is no mandatory first arguments for default server. */
2019-01-11 08:06:12 -05:00
if ( srv & & parse_addr ) {
2017-04-13 12:24:23 -04:00
if ( ! * args [ 2 ] ) {
2020-04-03 03:43:47 -04:00
if ( in_peers_section ) {
return 0 ;
}
else {
/* 'server' line number of argument check. */
ha_alert ( " parsing [%s:%d] : '%s' expects <name> and <addr>[:<port>] as arguments. \n " ,
file , linenum , args [ 0 ] ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
2017-04-13 12:24:23 -04:00
}
err = invalid_char ( args [ 1 ] ) ;
}
else if ( srv_tmpl ) {
if ( ! * args [ 3 ] ) {
/* 'server-template' line number of argument check. */
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d] : '%s' expects <prefix> <nb | range> <addr>[:<port>] as arguments. \n " ,
2017-04-13 12:24:23 -04:00
file , linenum , args [ 0 ] ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
err = invalid_prefix_char ( args [ 1 ] ) ;
2014-03-31 04:39:59 -04:00
}
2017-04-13 12:24:23 -04:00
if ( err ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d] : character '%c' is not permitted in %s %s '%s'. \n " ,
2017-04-13 12:24:23 -04:00
file , linenum , * err , args [ 0 ] , srv ? " name " : " prefix " , args [ 1 ] ) ;
2014-03-31 04:39:59 -04:00
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
2017-04-13 12:24:23 -04:00
cur_arg = 2 ;
if ( srv_tmpl ) {
/* Parse server-template <nb | range> arg. */
if ( srv_tmpl_parse_range ( newsrv , args [ cur_arg ] , & tmpl_range_low , & tmpl_range_high ) < 0 ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d] : Wrong %s number or range arg '%s'. \n " ,
2017-04-13 12:24:23 -04:00
file , linenum , args [ 0 ] , args [ cur_arg ] ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
cur_arg + + ;
}
2014-03-31 04:39:59 -04:00
if ( ! defsrv ) {
struct sockaddr_storage * sk ;
2017-01-06 12:36:06 -05:00
int port1 , port2 , port ;
2014-03-31 04:39:59 -04:00
struct protocol * proto ;
2017-03-30 08:18:30 -04:00
newsrv = new_server ( curproxy ) ;
if ( ! newsrv ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d] : out of memory. \n " , file , linenum ) ;
2014-03-31 04:39:59 -04:00
err_code | = ERR_ALERT | ERR_ABORT ;
goto out ;
}
2017-04-13 12:24:23 -04:00
if ( srv_tmpl ) {
newsrv - > tmpl_info . nb_low = tmpl_range_low ;
newsrv - > tmpl_info . nb_high = tmpl_range_high ;
}
2014-03-31 04:39:59 -04:00
/* the servers are linked backwards first */
newsrv - > next = curproxy - > srv ;
curproxy - > srv = newsrv ;
newsrv - > conf . file = strdup ( file ) ;
newsrv - > conf . line = linenum ;
2017-04-13 12:24:23 -04:00
/* Note: for a server template, its id is its prefix.
* This is a temporary id which will be used for server allocations to come
* after parsing .
*/
if ( srv )
newsrv - > id = strdup ( args [ 1 ] ) ;
else
newsrv - > tmpl_info . prefix = strdup ( args [ 1 ] ) ;
2014-03-31 04:39:59 -04:00
/* several ways to check the port component :
* - IP = > port = + 0 , relative ( IPv4 only )
* - IP : = > port = + 0 , relative
* - IP : N = > port = N , absolute
* - IP : + N = > port = + N , relative
* - IP : - N = > port = - N , relative
*/
2019-01-11 08:06:12 -05:00
if ( ! parse_addr )
goto skip_addr ;
2017-04-13 12:24:23 -04:00
sk = str2sa_range ( args [ cur_arg ] , & port , & port1 , & port2 , & errmsg , NULL , & fqdn , 0 ) ;
2014-03-31 04:39:59 -04:00
if ( ! sk ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d] : '%s %s' : %s \n " , file , linenum , args [ 0 ] , args [ 1 ] , errmsg ) ;
2014-03-31 04:39:59 -04:00
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
proto = protocol_by_family ( sk - > ss_family ) ;
2017-01-06 12:42:57 -05:00
if ( ! fqdn & & ( ! proto | | ! proto - > connect ) ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d] : '%s %s' : connect() not supported for this address family. \n " ,
2014-03-31 04:39:59 -04:00
file , linenum , args [ 0 ] , args [ 1 ] ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
if ( ! port1 | | ! port2 ) {
/* no port specified, +offset, -offset */
2014-05-13 09:54:22 -04:00
newsrv - > flags | = SRV_F_MAPPORTS ;
2014-03-31 04:39:59 -04:00
}
else if ( port1 ! = port2 ) {
/* port range */
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d] : '%s %s' : port ranges are not allowed in '%s' \n " ,
2014-03-31 04:39:59 -04:00
file , linenum , args [ 0 ] , args [ 1 ] , args [ 2 ] ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
2015-04-13 19:15:08 -04:00
/* save hostname and create associated name resolution */
2017-05-03 06:09:54 -04:00
if ( fqdn ) {
MAJOR: dns: Refactor the DNS code
This is a huge patch with many changes, all about the DNS. Initially, the idea
was to update the DNS part to ease the threads support integration. But quickly,
I started to refactor some parts. And after several iterations, it was
impossible for me to commit the different parts atomically. So, instead of
adding tens of patches, often reworking the same parts, it was easier to merge
all my changes in a uniq patch. Here are all changes made on the DNS.
First, the DNS initialization has been refactored. The DNS configuration parsing
remains untouched, in cfgparse.c. But all checks have been moved in a post-check
callback. In the function dns_finalize_config, for each resolvers, the
nameservers configuration is tested and the task used to manage DNS resolutions
is created. The links between the backend's servers and the resolvers are also
created at this step. Here no connection are kept alive. So there is no needs
anymore to reopen them after HAProxy fork. Connections used to send DNS queries
will be opened on demand.
Then, the way DNS requesters are linked to a DNS resolution has been
reworked. The resolution used by a requester is now referenced into the
dns_requester structure and the resolution pointers in server and dns_srvrq
structures have been removed. wait and curr list of requesters, for a DNS
resolution, have been replaced by a uniq list. And Finally, the way a requester
is removed from a DNS resolution has been simplified. Now everything is done in
dns_unlink_resolution.
srv_set_fqdn function has been simplified. Now, there is only 1 way to set the
server's FQDN, independently it is done by the CLI or when a SRV record is
resolved.
The static DNS resolutions pool has been replaced by a dynamoc pool. The part
has been modified by Baptiste Assmann.
The way the DNS resolutions are triggered by the task or by a health-check has
been totally refactored. Now, all timeouts are respected. Especially
hold.valid. The default frequency to wake up a resolvers is now configurable
using "timeout resolve" parameter.
Now, as documented, as long as invalid repsonses are received, we really wait
all name servers responses before retrying.
As far as possible, resources allocated during DNS configuration parsing are
releases when HAProxy is shutdown.
Beside all these changes, the code has been cleaned to ease code review and the
doc has been updated.
2017-09-27 05:00:59 -04:00
if ( fqdn [ 0 ] = = ' _ ' ) { /* SRV record */
2017-08-04 12:35:36 -04:00
/* Check if a SRV request already exists, and if not, create it */
MAJOR: dns: Refactor the DNS code
This is a huge patch with many changes, all about the DNS. Initially, the idea
was to update the DNS part to ease the threads support integration. But quickly,
I started to refactor some parts. And after several iterations, it was
impossible for me to commit the different parts atomically. So, instead of
adding tens of patches, often reworking the same parts, it was easier to merge
all my changes in a uniq patch. Here are all changes made on the DNS.
First, the DNS initialization has been refactored. The DNS configuration parsing
remains untouched, in cfgparse.c. But all checks have been moved in a post-check
callback. In the function dns_finalize_config, for each resolvers, the
nameservers configuration is tested and the task used to manage DNS resolutions
is created. The links between the backend's servers and the resolvers are also
created at this step. Here no connection are kept alive. So there is no needs
anymore to reopen them after HAProxy fork. Connections used to send DNS queries
will be opened on demand.
Then, the way DNS requesters are linked to a DNS resolution has been
reworked. The resolution used by a requester is now referenced into the
dns_requester structure and the resolution pointers in server and dns_srvrq
structures have been removed. wait and curr list of requesters, for a DNS
resolution, have been replaced by a uniq list. And Finally, the way a requester
is removed from a DNS resolution has been simplified. Now everything is done in
dns_unlink_resolution.
srv_set_fqdn function has been simplified. Now, there is only 1 way to set the
server's FQDN, independently it is done by the CLI or when a SRV record is
resolved.
The static DNS resolutions pool has been replaced by a dynamoc pool. The part
has been modified by Baptiste Assmann.
The way the DNS resolutions are triggered by the task or by a health-check has
been totally refactored. Now, all timeouts are respected. Especially
hold.valid. The default frequency to wake up a resolvers is now configurable
using "timeout resolve" parameter.
Now, as documented, as long as invalid repsonses are received, we really wait
all name servers responses before retrying.
As far as possible, resources allocated during DNS configuration parsing are
releases when HAProxy is shutdown.
Beside all these changes, the code has been cleaned to ease code review and the
doc has been updated.
2017-09-27 05:00:59 -04:00
if ( ( newsrv - > srvrq = find_srvrq_by_name ( fqdn , curproxy ) ) = = NULL )
newsrv - > srvrq = new_dns_srvrq ( newsrv , fqdn ) ;
if ( newsrv - > srvrq = = NULL ) {
2017-08-04 12:35:36 -04:00
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
MAJOR: dns: Refactor the DNS code
This is a huge patch with many changes, all about the DNS. Initially, the idea
was to update the DNS part to ease the threads support integration. But quickly,
I started to refactor some parts. And after several iterations, it was
impossible for me to commit the different parts atomically. So, instead of
adding tens of patches, often reworking the same parts, it was easier to merge
all my changes in a uniq patch. Here are all changes made on the DNS.
First, the DNS initialization has been refactored. The DNS configuration parsing
remains untouched, in cfgparse.c. But all checks have been moved in a post-check
callback. In the function dns_finalize_config, for each resolvers, the
nameservers configuration is tested and the task used to manage DNS resolutions
is created. The links between the backend's servers and the resolvers are also
created at this step. Here no connection are kept alive. So there is no needs
anymore to reopen them after HAProxy fork. Connections used to send DNS queries
will be opened on demand.
Then, the way DNS requesters are linked to a DNS resolution has been
reworked. The resolution used by a requester is now referenced into the
dns_requester structure and the resolution pointers in server and dns_srvrq
structures have been removed. wait and curr list of requesters, for a DNS
resolution, have been replaced by a uniq list. And Finally, the way a requester
is removed from a DNS resolution has been simplified. Now everything is done in
dns_unlink_resolution.
srv_set_fqdn function has been simplified. Now, there is only 1 way to set the
server's FQDN, independently it is done by the CLI or when a SRV record is
resolved.
The static DNS resolutions pool has been replaced by a dynamoc pool. The part
has been modified by Baptiste Assmann.
The way the DNS resolutions are triggered by the task or by a health-check has
been totally refactored. Now, all timeouts are respected. Especially
hold.valid. The default frequency to wake up a resolvers is now configurable
using "timeout resolve" parameter.
Now, as documented, as long as invalid repsonses are received, we really wait
all name servers responses before retrying.
As far as possible, resources allocated during DNS configuration parsing are
releases when HAProxy is shutdown.
Beside all these changes, the code has been cleaned to ease code review and the
doc has been updated.
2017-09-27 05:00:59 -04:00
}
}
else if ( srv_prepare_for_resolution ( newsrv , fqdn ) = = - 1 ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d] : Can't create DNS resolution for server '%s' \n " ,
MAJOR: dns: Refactor the DNS code
This is a huge patch with many changes, all about the DNS. Initially, the idea
was to update the DNS part to ease the threads support integration. But quickly,
I started to refactor some parts. And after several iterations, it was
impossible for me to commit the different parts atomically. So, instead of
adding tens of patches, often reworking the same parts, it was easier to merge
all my changes in a uniq patch. Here are all changes made on the DNS.
First, the DNS initialization has been refactored. The DNS configuration parsing
remains untouched, in cfgparse.c. But all checks have been moved in a post-check
callback. In the function dns_finalize_config, for each resolvers, the
nameservers configuration is tested and the task used to manage DNS resolutions
is created. The links between the backend's servers and the resolvers are also
created at this step. Here no connection are kept alive. So there is no needs
anymore to reopen them after HAProxy fork. Connections used to send DNS queries
will be opened on demand.
Then, the way DNS requesters are linked to a DNS resolution has been
reworked. The resolution used by a requester is now referenced into the
dns_requester structure and the resolution pointers in server and dns_srvrq
structures have been removed. wait and curr list of requesters, for a DNS
resolution, have been replaced by a uniq list. And Finally, the way a requester
is removed from a DNS resolution has been simplified. Now everything is done in
dns_unlink_resolution.
srv_set_fqdn function has been simplified. Now, there is only 1 way to set the
server's FQDN, independently it is done by the CLI or when a SRV record is
resolved.
The static DNS resolutions pool has been replaced by a dynamoc pool. The part
has been modified by Baptiste Assmann.
The way the DNS resolutions are triggered by the task or by a health-check has
been totally refactored. Now, all timeouts are respected. Especially
hold.valid. The default frequency to wake up a resolvers is now configurable
using "timeout resolve" parameter.
Now, as documented, as long as invalid repsonses are received, we really wait
all name servers responses before retrying.
As far as possible, resources allocated during DNS configuration parsing are
releases when HAProxy is shutdown.
Beside all these changes, the code has been cleaned to ease code review and the
doc has been updated.
2017-09-27 05:00:59 -04:00
file , linenum , newsrv - > id ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
2017-05-03 06:09:54 -04:00
}
2015-04-13 19:15:08 -04:00
}
2014-03-31 04:39:59 -04:00
newsrv - > addr = * sk ;
2017-01-06 12:36:06 -05:00
newsrv - > svc_port = port ;
2014-03-31 04:39:59 -04:00
2017-08-04 12:35:36 -04:00
if ( ! newsrv - > srvrq & & ! newsrv - > hostname & & ! protocol_by_family ( newsrv - > addr . ss_family ) ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d] : Unknown protocol family %d '%s' \n " ,
2017-04-13 12:24:23 -04:00
file , linenum , newsrv - > addr . ss_family , args [ cur_arg ] ) ;
2014-03-31 04:39:59 -04:00
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
2019-01-11 08:06:12 -05:00
cur_arg + + ;
skip_addr :
2017-03-30 08:18:30 -04:00
/* Copy default server settings to new server settings. */
2017-04-14 07:28:00 -04:00
srv_settings_cpy ( newsrv , & curproxy - > defsrv , 0 ) ;
2017-11-07 04:42:54 -05:00
HA_SPIN_INIT ( & newsrv - > lock ) ;
2014-03-31 04:39:59 -04:00
} else {
newsrv = & curproxy - > defsrv ;
cur_arg = 1 ;
2016-02-17 15:25:09 -05:00
newsrv - > dns_opts . family_prio = AF_INET6 ;
2018-06-22 09:04:43 -04:00
newsrv - > dns_opts . accept_duplicate_ip = 0 ;
2014-03-31 04:39:59 -04:00
}
while ( * args [ cur_arg ] ) {
2020-04-06 08:26:30 -04:00
if ( ! strcmp ( args [ cur_arg ] , " init-addr " ) ) {
2016-09-21 14:26:16 -04:00
char * p , * end ;
int done ;
2016-11-02 10:05:56 -04:00
struct sockaddr_storage sa ;
2016-09-21 14:26:16 -04:00
newsrv - > init_addr_methods = 0 ;
memset ( & newsrv - > init_addr , 0 , sizeof ( newsrv - > init_addr ) ) ;
for ( p = args [ cur_arg + 1 ] ; * p ; p = end ) {
/* cut on next comma */
for ( end = p ; * end & & * end ! = ' , ' ; end + + ) ;
if ( * end )
* ( end + + ) = 0 ;
2016-11-02 10:05:56 -04:00
memset ( & sa , 0 , sizeof ( sa ) ) ;
2016-09-21 14:26:16 -04:00
if ( ! strcmp ( p , " libc " ) ) {
done = srv_append_initaddr ( & newsrv - > init_addr_methods , SRV_IADDR_LIBC ) ;
}
else if ( ! strcmp ( p , " last " ) ) {
done = srv_append_initaddr ( & newsrv - > init_addr_methods , SRV_IADDR_LAST ) ;
}
2016-11-04 10:17:58 -04:00
else if ( ! strcmp ( p , " none " ) ) {
done = srv_append_initaddr ( & newsrv - > init_addr_methods , SRV_IADDR_NONE ) ;
}
2016-11-02 10:05:56 -04:00
else if ( str2ip2 ( p , & sa , 0 ) ) {
if ( is_addr ( & newsrv - > init_addr ) ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d]: '%s' : initial address already specified, cannot add '%s'. \n " ,
2016-11-02 10:05:56 -04:00
file , linenum , args [ cur_arg ] , p ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
newsrv - > init_addr = sa ;
done = srv_append_initaddr ( & newsrv - > init_addr_methods , SRV_IADDR_IP ) ;
}
2016-09-21 14:26:16 -04:00
else {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d]: '%s' : unknown init-addr method '%s', supported methods are 'libc', 'last', 'none'. \n " ,
2016-09-21 14:26:16 -04:00
file , linenum , args [ cur_arg ] , p ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
if ( ! done ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d]: '%s' : too many init-addr methods when trying to add '%s' \n " ,
2016-09-21 14:26:16 -04:00
file , linenum , args [ cur_arg ] , p ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
}
cur_arg + = 2 ;
}
2015-04-13 19:15:08 -04:00
else if ( ! strcmp ( args [ cur_arg ] , " resolvers " ) ) {
2017-04-20 06:17:50 -04:00
free ( newsrv - > resolvers_id ) ;
2015-04-13 19:15:08 -04:00
newsrv - > resolvers_id = strdup ( args [ cur_arg + 1 ] ) ;
cur_arg + = 2 ;
}
2018-06-22 09:04:43 -04:00
else if ( ! strcmp ( args [ cur_arg ] , " resolve-opts " ) ) {
char * p , * end ;
for ( p = args [ cur_arg + 1 ] ; * p ; p = end ) {
/* cut on next comma */
for ( end = p ; * end & & * end ! = ' , ' ; end + + ) ;
if ( * end )
* ( end + + ) = 0 ;
if ( ! strcmp ( p , " allow-dup-ip " ) ) {
newsrv - > dns_opts . accept_duplicate_ip = 1 ;
}
2019-11-17 09:48:56 -05:00
else if ( ! strcmp ( p , " ignore-weight " ) ) {
newsrv - > dns_opts . ignore_weight = 1 ;
}
2018-06-22 09:04:43 -04:00
else if ( ! strcmp ( p , " prevent-dup-ip " ) ) {
newsrv - > dns_opts . accept_duplicate_ip = 0 ;
}
else {
2019-11-17 09:48:56 -05:00
ha_alert ( " parsing [%s:%d]: '%s' : unknown resolve-opts option '%s', supported methods are 'allow-dup-ip', 'ignore-weight', and 'prevent-dup-ip'. \n " ,
2018-06-22 09:04:43 -04:00
file , linenum , args [ cur_arg ] , p ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
}
cur_arg + = 2 ;
}
2015-04-13 19:15:08 -04:00
else if ( ! strcmp ( args [ cur_arg ] , " resolve-prefer " ) ) {
if ( ! strcmp ( args [ cur_arg + 1 ] , " ipv4 " ) )
2016-02-17 15:25:09 -05:00
newsrv - > dns_opts . family_prio = AF_INET ;
2015-04-13 19:15:08 -04:00
else if ( ! strcmp ( args [ cur_arg + 1 ] , " ipv6 " ) )
2016-02-17 15:25:09 -05:00
newsrv - > dns_opts . family_prio = AF_INET6 ;
2015-04-13 19:15:08 -04:00
else {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d]: '%s' expects either ipv4 or ipv6 as argument. \n " ,
2015-04-13 19:15:08 -04:00
file , linenum , args [ cur_arg ] ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
cur_arg + = 2 ;
}
2016-02-17 16:05:30 -05:00
else if ( ! strcmp ( args [ cur_arg ] , " resolve-net " ) ) {
char * p , * e ;
unsigned char mask ;
struct dns_options * opt ;
if ( ! args [ cur_arg + 1 ] | | args [ cur_arg + 1 ] [ 0 ] = = ' \0 ' ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d]: '%s' expects a list of networks. \n " ,
2016-02-17 16:05:30 -05:00
file , linenum , args [ cur_arg ] ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
opt = & newsrv - > dns_opts ;
/* Split arguments by comma, and convert it from ipv4 or ipv6
* string network in in_addr or in6_addr .
*/
p = args [ cur_arg + 1 ] ;
e = p ;
while ( * p ! = ' \0 ' ) {
2018-11-15 11:57:51 -05:00
/* If no room available, return error. */
2016-04-08 05:26:44 -04:00
if ( opt - > pref_net_nb > = SRV_MAX_PREF_NET ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d]: '%s' exceed %d networks. \n " ,
2016-02-17 16:05:30 -05:00
file , linenum , args [ cur_arg ] , SRV_MAX_PREF_NET ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
/* look for end or comma. */
while ( * e ! = ' , ' & & * e ! = ' \0 ' )
e + + ;
if ( * e = = ' , ' ) {
* e = ' \0 ' ;
e + + ;
}
if ( str2net ( p , 0 , & opt - > pref_net [ opt - > pref_net_nb ] . addr . in4 ,
& opt - > pref_net [ opt - > pref_net_nb ] . mask . in4 ) ) {
/* Try to convert input string from ipv4 or ipv6 network. */
opt - > pref_net [ opt - > pref_net_nb ] . family = AF_INET ;
} else if ( str62net ( p , & opt - > pref_net [ opt - > pref_net_nb ] . addr . in6 ,
& mask ) ) {
/* Try to convert input string from ipv6 network. */
len2mask6 ( mask , & opt - > pref_net [ opt - > pref_net_nb ] . mask . in6 ) ;
opt - > pref_net [ opt - > pref_net_nb ] . family = AF_INET6 ;
} else {
2020-05-05 15:53:22 -04:00
/* All network conversions fail, return error. */
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d]: '%s': invalid network '%s'. \n " ,
2016-02-17 16:05:30 -05:00
file , linenum , args [ cur_arg ] , p ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
opt - > pref_net_nb + + ;
p = e ;
}
cur_arg + = 2 ;
}
2014-03-31 04:39:59 -04:00
else if ( ! strcmp ( args [ cur_arg ] , " weight " ) ) {
int w ;
w = atol ( args [ cur_arg + 1 ] ) ;
if ( w < 0 | | w > SRV_UWGHT_MAX ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d] : weight of server %s is not within 0 and %d (%d). \n " ,
2014-03-31 04:39:59 -04:00
file , linenum , newsrv - > id , SRV_UWGHT_MAX , w ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
newsrv - > uweight = newsrv - > iweight = w ;
cur_arg + = 2 ;
}
2020-05-29 19:42:45 -04:00
else if ( ! strcmp ( args [ cur_arg ] , " log-proto " ) ) {
if ( ! strcmp ( args [ cur_arg + 1 ] , " legacy " ) )
newsrv - > log_proto = SRV_LOG_PROTO_LEGACY ;
else if ( ! strcmp ( args [ cur_arg + 1 ] , " octet-count " ) )
newsrv - > log_proto = SRV_LOG_PROTO_OCTET_COUNTING ;
else {
ha_alert ( " parsing [%s:%d]: '%s' expects one of 'legacy' or "
" 'octet-count' but got '%s' \n " ,
file , linenum , args [ cur_arg ] , args [ cur_arg + 1 ] ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
cur_arg + = 2 ;
}
2014-03-31 04:39:59 -04:00
else if ( ! strcmp ( args [ cur_arg ] , " minconn " ) ) {
newsrv - > minconn = atol ( args [ cur_arg + 1 ] ) ;
cur_arg + = 2 ;
}
else if ( ! strcmp ( args [ cur_arg ] , " maxconn " ) ) {
newsrv - > maxconn = atol ( args [ cur_arg + 1 ] ) ;
cur_arg + = 2 ;
}
else if ( ! strcmp ( args [ cur_arg ] , " maxqueue " ) ) {
newsrv - > maxqueue = atol ( args [ cur_arg + 1 ] ) ;
cur_arg + = 2 ;
}
else if ( ! strcmp ( args [ cur_arg ] , " slowstart " ) ) {
/* slowstart is stored in seconds */
const char * err = parse_time_err ( args [ cur_arg + 1 ] , & val , TIME_UNIT_MS ) ;
2019-06-07 13:00:37 -04:00
if ( err = = PARSE_TIME_OVER ) {
ha_alert ( " parsing [%s:%d]: timer overflow in argument <%s> to <%s> of server %s, maximum value is 2147483647 ms (~24.8 days). \n " ,
file , linenum , args [ cur_arg + 1 ] , args [ cur_arg ] , newsrv - > id ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
else if ( err = = PARSE_TIME_UNDER ) {
ha_alert ( " parsing [%s:%d]: timer underflow in argument <%s> to <%s> of server %s, minimum non-null value is 1 ms. \n " ,
file , linenum , args [ cur_arg + 1 ] , args [ cur_arg ] , newsrv - > id ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
else if ( err ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d] : unexpected character '%c' in 'slowstart' argument of server %s. \n " ,
2014-03-31 04:39:59 -04:00
file , linenum , * err , newsrv - > id ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
newsrv - > slowstart = ( val + 999 ) / 1000 ;
cur_arg + = 2 ;
}
else if ( ! strcmp ( args [ cur_arg ] , " on-error " ) ) {
if ( ! strcmp ( args [ cur_arg + 1 ] , " fastinter " ) )
newsrv - > onerror = HANA_ONERR_FASTINTER ;
else if ( ! strcmp ( args [ cur_arg + 1 ] , " fail-check " ) )
newsrv - > onerror = HANA_ONERR_FAILCHK ;
else if ( ! strcmp ( args [ cur_arg + 1 ] , " sudden-death " ) )
newsrv - > onerror = HANA_ONERR_SUDDTH ;
else if ( ! strcmp ( args [ cur_arg + 1 ] , " mark-down " ) )
newsrv - > onerror = HANA_ONERR_MARKDWN ;
else {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d]: '%s' expects one of 'fastinter', "
2014-03-31 04:39:59 -04:00
" 'fail-check', 'sudden-death' or 'mark-down' but got '%s' \n " ,
file , linenum , args [ cur_arg ] , args [ cur_arg + 1 ] ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
cur_arg + = 2 ;
}
else if ( ! strcmp ( args [ cur_arg ] , " on-marked-down " ) ) {
if ( ! strcmp ( args [ cur_arg + 1 ] , " shutdown-sessions " ) )
newsrv - > onmarkeddown = HANA_ONMARKEDDOWN_SHUTDOWNSESSIONS ;
else {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d]: '%s' expects 'shutdown-sessions' but got '%s' \n " ,
2014-03-31 04:39:59 -04:00
file , linenum , args [ cur_arg ] , args [ cur_arg + 1 ] ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
cur_arg + = 2 ;
}
else if ( ! strcmp ( args [ cur_arg ] , " on-marked-up " ) ) {
if ( ! strcmp ( args [ cur_arg + 1 ] , " shutdown-backup-sessions " ) )
newsrv - > onmarkedup = HANA_ONMARKEDUP_SHUTDOWNBACKUPSESSIONS ;
else {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d]: '%s' expects 'shutdown-backup-sessions' but got '%s' \n " ,
2014-03-31 04:39:59 -04:00
file , linenum , args [ cur_arg ] , args [ cur_arg + 1 ] ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
cur_arg + = 2 ;
}
else if ( ! strcmp ( args [ cur_arg ] , " error-limit " ) ) {
if ( ! * args [ cur_arg + 1 ] ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d]: '%s' expects an integer argument. \n " ,
2014-03-31 04:39:59 -04:00
file , linenum , args [ cur_arg ] ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
newsrv - > consecutive_errors_limit = atoi ( args [ cur_arg + 1 ] ) ;
if ( newsrv - > consecutive_errors_limit < = 0 ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d]: %s has to be > 0. \n " ,
2014-03-31 04:39:59 -04:00
file , linenum , args [ cur_arg ] ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
cur_arg + = 2 ;
}
2017-04-14 09:19:56 -04:00
else if ( ! strcmp ( args [ cur_arg ] , " usesrc " ) ) { /* address to use outside: needs "source" first */
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d] : '%s' only allowed after a '%s' statement. \n " ,
2014-03-31 04:39:59 -04:00
file , linenum , " usesrc " , " source " ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
else {
static int srv_dumped ;
struct srv_kw * kw ;
char * err ;
kw = srv_find_kw ( args [ cur_arg ] ) ;
if ( kw ) {
char * err = NULL ;
int code ;
if ( ! kw - > parse ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d] : '%s %s' : '%s' option is not implemented in this version (check build options). \n " ,
2014-03-31 04:39:59 -04:00
file , linenum , args [ 0 ] , args [ 1 ] , args [ cur_arg ] ) ;
2017-04-16 11:14:14 -04:00
if ( kw - > skip ! = - 1 )
cur_arg + = 1 + kw - > skip ;
2014-03-31 04:39:59 -04:00
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
if ( defsrv & & ! kw - > default_ok ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d] : '%s %s' : '%s' option is not accepted in default-server sections. \n " ,
2014-03-31 04:39:59 -04:00
file , linenum , args [ 0 ] , args [ 1 ] , args [ cur_arg ] ) ;
2017-04-16 11:14:14 -04:00
if ( kw - > skip ! = - 1 )
cur_arg + = 1 + kw - > skip ;
2014-03-31 04:39:59 -04:00
err_code | = ERR_ALERT ;
continue ;
}
code = kw - > parse ( args , & cur_arg , curproxy , newsrv , & err ) ;
err_code | = code ;
if ( code ) {
2020-02-11 05:42:38 -05:00
display_parser_err ( file , linenum , args , cur_arg , code , & err ) ;
2014-03-31 04:39:59 -04:00
if ( code & ERR_FATAL ) {
free ( err ) ;
2017-04-16 11:14:14 -04:00
if ( kw - > skip ! = - 1 )
cur_arg + = 1 + kw - > skip ;
2014-03-31 04:39:59 -04:00
goto out ;
}
}
free ( err ) ;
2017-04-16 11:14:14 -04:00
if ( kw - > skip ! = - 1 )
cur_arg + = 1 + kw - > skip ;
2014-03-31 04:39:59 -04:00
continue ;
}
err = NULL ;
if ( ! srv_dumped ) {
srv_dump_kws ( & err ) ;
indent_msg ( & err , 4 ) ;
srv_dumped = 1 ;
}
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d] : '%s %s' unknown keyword '%s'.%s%s \n " ,
2014-03-31 04:39:59 -04:00
file , linenum , args [ 0 ] , args [ 1 ] , args [ cur_arg ] ,
err ? " Registered keywords : " : " " , err ? err : " " ) ;
free ( err ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
}
2017-03-30 11:32:36 -04:00
if ( ! defsrv )
err_code | = server_finalize_init ( file , linenum , args , cur_arg , newsrv , curproxy ) ;
if ( err_code & ERR_FATAL )
goto out ;
2017-04-14 07:28:00 -04:00
if ( srv_tmpl )
server_template_init ( newsrv , curproxy ) ;
2014-03-31 04:39:59 -04:00
}
2015-09-08 10:16:35 -04:00
free ( fqdn ) ;
2014-03-31 04:39:59 -04:00
return 0 ;
out :
2015-09-08 10:16:35 -04:00
free ( fqdn ) ;
2014-03-31 04:39:59 -04:00
free ( errmsg ) ;
return err_code ;
}
2015-07-08 16:03:56 -04:00
/* Returns a pointer to the first server matching either id <id>.
* NULL is returned if no match is found .
* the lookup is performed in the backend < bk >
*/
struct server * server_find_by_id ( struct proxy * bk , int id )
{
struct eb32_node * eb32 ;
struct server * curserver ;
if ( ! bk | | ( id = = 0 ) )
return NULL ;
/* <bk> has no backend capabilities, so it can't have a server */
if ( ! ( bk - > cap & PR_CAP_BE ) )
return NULL ;
curserver = NULL ;
eb32 = eb32_lookup ( & bk - > conf . used_server_id , id ) ;
if ( eb32 )
curserver = container_of ( eb32 , struct server , conf . id ) ;
return curserver ;
}
/* Returns a pointer to the first server matching either name <name>, or id
* if < name > starts with a ' # ' . NULL is returned if no match is found .
* the lookup is performed in the backend < bk >
*/
struct server * server_find_by_name ( struct proxy * bk , const char * name )
{
struct server * curserver ;
if ( ! bk | | ! name )
return NULL ;
/* <bk> has no backend capabilities, so it can't have a server */
if ( ! ( bk - > cap & PR_CAP_BE ) )
return NULL ;
curserver = NULL ;
if ( * name = = ' # ' ) {
curserver = server_find_by_id ( bk , atoi ( name + 1 ) ) ;
if ( curserver )
return curserver ;
}
else {
curserver = bk - > srv ;
while ( curserver & & ( strcmp ( curserver - > id , name ) ! = 0 ) )
curserver = curserver - > next ;
if ( curserver )
return curserver ;
}
return NULL ;
}
struct server * server_find_best_match ( struct proxy * bk , char * name , int id , int * diff )
{
struct server * byname ;
struct server * byid ;
if ( ! name & & ! id )
return NULL ;
if ( diff )
* diff = 0 ;
byname = byid = NULL ;
if ( name ) {
byname = server_find_by_name ( bk , name ) ;
if ( byname & & ( ! id | | byname - > puid = = id ) )
return byname ;
}
/* remaining possibilities :
* - name not set
* - name set but not found
* - name found but ID doesn ' t match
*/
if ( id ) {
byid = server_find_by_id ( bk , id ) ;
if ( byid ) {
if ( byname ) {
/* use id only if forced by configuration */
if ( byid - > flags & SRV_F_FORCED_ID ) {
if ( diff )
* diff | = 2 ;
return byid ;
}
else {
if ( diff )
* diff | = 1 ;
return byname ;
}
}
/* remaining possibilities:
* - name not set
* - name set but not found
*/
if ( name & & diff )
* diff | = 2 ;
return byid ;
}
/* id bot found */
if ( byname ) {
if ( diff )
* diff | = 1 ;
return byname ;
}
}
return NULL ;
}
2018-08-21 05:54:26 -04:00
/* Update a server state using the parameters available in the params list.
*
* Grabs the server lock during operation .
*/
2015-08-19 10:44:03 -04:00
static void srv_update_state ( struct server * srv , int version , char * * params )
{
char * p ;
2018-07-13 05:56:34 -04:00
struct buffer * msg ;
2015-08-19 10:44:03 -04:00
/* fields since version 1
* and common to all other upcoming versions
*/
enum srv_state srv_op_state ;
enum srv_admin srv_admin_state ;
unsigned srv_uweight , srv_iweight ;
unsigned long srv_last_time_change ;
short srv_check_status ;
enum chk_result srv_check_result ;
int srv_check_health ;
int srv_check_state , srv_agent_state ;
int bk_f_forced_id ;
int srv_f_forced_id ;
2017-04-26 05:24:02 -04:00
int fqdn_set_by_cli ;
const char * fqdn ;
2017-08-01 02:47:19 -04:00
const char * port_str ;
unsigned int port ;
2018-07-02 11:00:54 -04:00
char * srvrecord ;
2015-08-19 10:44:03 -04:00
2017-04-26 05:24:02 -04:00
fqdn = NULL ;
2017-08-01 02:47:19 -04:00
port = 0 ;
2015-09-29 12:38:47 -04:00
msg = get_trash_chunk ( ) ;
2015-08-19 10:44:03 -04:00
switch ( version ) {
case 1 :
/*
* now we can proceed with server ' s state update :
* srv_addr : params [ 0 ]
* srv_op_state : params [ 1 ]
* srv_admin_state : params [ 2 ]
* srv_uweight : params [ 3 ]
* srv_iweight : params [ 4 ]
* srv_last_time_change : params [ 5 ]
* srv_check_status : params [ 6 ]
* srv_check_result : params [ 7 ]
* srv_check_health : params [ 8 ]
* srv_check_state : params [ 9 ]
* srv_agent_state : params [ 10 ]
* bk_f_forced_id : params [ 11 ]
* srv_f_forced_id : params [ 12 ]
2017-04-26 05:24:02 -04:00
* srv_fqdn : params [ 13 ]
2017-08-01 02:47:19 -04:00
* srv_port : params [ 14 ]
2018-07-02 11:00:54 -04:00
* srvrecord : params [ 15 ]
2015-08-19 10:44:03 -04:00
*/
/* validating srv_op_state */
p = NULL ;
errno = 0 ;
srv_op_state = strtol ( params [ 1 ] , & p , 10 ) ;
if ( ( p = = params [ 1 ] ) | | errno = = EINVAL | | errno = = ERANGE | |
( srv_op_state ! = SRV_ST_STOPPED & &
srv_op_state ! = SRV_ST_STARTING & &
srv_op_state ! = SRV_ST_RUNNING & &
srv_op_state ! = SRV_ST_STOPPING ) ) {
chunk_appendf ( msg , " , invalid srv_op_state value '%s' " , params [ 1 ] ) ;
}
/* validating srv_admin_state */
p = NULL ;
errno = 0 ;
srv_admin_state = strtol ( params [ 2 ] , & p , 10 ) ;
2017-04-26 05:24:02 -04:00
fqdn_set_by_cli = ! ! ( srv_admin_state & SRV_ADMF_HMAINT ) ;
BUG/MEDIUM: servers: properly propagate the maintenance states during startup
Right now there is an issue with the way the maintenance flags are
propagated upon startup. They are not propagate, just copied from the
tracked server. This implies that depending on the server's order, some
tracking servers may not be marked down. For example this configuration
does not work as expected :
server s1 1.1.1.1:8000 track s2
server s2 1.1.1.1:8000 track s3
server s3 1.1.1.1:8000 track s4
server s4 wtap:8000 check inter 1s disabled
It results in s1/s2 being up, and s3/s4 being down, while all of them
should be down.
The only clean way to process this is to run through all "root" servers
(those not tracking any other server), and to propagate their state down
to all their trackers. This is the same algorithm used to propagate the
state changes. It has to be done both to compute the IDRAIN flag and the
IMAINT flag. However, doing so requires that tracking servers are not
marked as inherited maintenance anymore while parsing the configuration
(and given that it is wrong, better drop it).
This fix also addresses another side effect of the bug above which is
that the IDRAIN/IMAINT flags are stored in the state files, and if
restored while the tracked server doesn't have the equivalent flag,
the servers may end up in a situation where it's impossible to remove
these flags. For example in the configuration above, after removing
"disabled" on server s4, the other servers would have remained down,
and not anymore with this fix. Similarly, the combination of IMAINT
or IDRAIN with their respective forced modes was not accepted on
reload, which is wrong as well.
This bug has been present at least since 1.5, maybe even 1.4 (it came
with tracking support). The fix needs to be backported there, though
the srv-state parts are irrelevant.
This commit relies on previous patch to silence warnings on startup.
2016-11-03 14:22:19 -04:00
2017-04-26 05:24:02 -04:00
/* inherited statuses will be recomputed later.
* Also disable SRV_ADMF_HMAINT flag ( set from stats socket fqdn ) .
*/
srv_admin_state & = ~ SRV_ADMF_IDRAIN & ~ SRV_ADMF_IMAINT & ~ SRV_ADMF_HMAINT ;
BUG/MEDIUM: servers: properly propagate the maintenance states during startup
Right now there is an issue with the way the maintenance flags are
propagated upon startup. They are not propagate, just copied from the
tracked server. This implies that depending on the server's order, some
tracking servers may not be marked down. For example this configuration
does not work as expected :
server s1 1.1.1.1:8000 track s2
server s2 1.1.1.1:8000 track s3
server s3 1.1.1.1:8000 track s4
server s4 wtap:8000 check inter 1s disabled
It results in s1/s2 being up, and s3/s4 being down, while all of them
should be down.
The only clean way to process this is to run through all "root" servers
(those not tracking any other server), and to propagate their state down
to all their trackers. This is the same algorithm used to propagate the
state changes. It has to be done both to compute the IDRAIN flag and the
IMAINT flag. However, doing so requires that tracking servers are not
marked as inherited maintenance anymore while parsing the configuration
(and given that it is wrong, better drop it).
This fix also addresses another side effect of the bug above which is
that the IDRAIN/IMAINT flags are stored in the state files, and if
restored while the tracked server doesn't have the equivalent flag,
the servers may end up in a situation where it's impossible to remove
these flags. For example in the configuration above, after removing
"disabled" on server s4, the other servers would have remained down,
and not anymore with this fix. Similarly, the combination of IMAINT
or IDRAIN with their respective forced modes was not accepted on
reload, which is wrong as well.
This bug has been present at least since 1.5, maybe even 1.4 (it came
with tracking support). The fix needs to be backported there, though
the srv-state parts are irrelevant.
This commit relies on previous patch to silence warnings on startup.
2016-11-03 14:22:19 -04:00
2015-08-19 10:44:03 -04:00
if ( ( p = = params [ 2 ] ) | | errno = = EINVAL | | errno = = ERANGE | |
( srv_admin_state ! = 0 & &
srv_admin_state ! = SRV_ADMF_FMAINT & &
srv_admin_state ! = SRV_ADMF_CMAINT & &
srv_admin_state ! = ( SRV_ADMF_CMAINT | SRV_ADMF_FMAINT ) & &
2016-11-03 13:33:25 -04:00
srv_admin_state ! = ( SRV_ADMF_CMAINT | SRV_ADMF_FDRAIN ) & &
BUG/MEDIUM: servers: properly propagate the maintenance states during startup
Right now there is an issue with the way the maintenance flags are
propagated upon startup. They are not propagate, just copied from the
tracked server. This implies that depending on the server's order, some
tracking servers may not be marked down. For example this configuration
does not work as expected :
server s1 1.1.1.1:8000 track s2
server s2 1.1.1.1:8000 track s3
server s3 1.1.1.1:8000 track s4
server s4 wtap:8000 check inter 1s disabled
It results in s1/s2 being up, and s3/s4 being down, while all of them
should be down.
The only clean way to process this is to run through all "root" servers
(those not tracking any other server), and to propagate their state down
to all their trackers. This is the same algorithm used to propagate the
state changes. It has to be done both to compute the IDRAIN flag and the
IMAINT flag. However, doing so requires that tracking servers are not
marked as inherited maintenance anymore while parsing the configuration
(and given that it is wrong, better drop it).
This fix also addresses another side effect of the bug above which is
that the IDRAIN/IMAINT flags are stored in the state files, and if
restored while the tracked server doesn't have the equivalent flag,
the servers may end up in a situation where it's impossible to remove
these flags. For example in the configuration above, after removing
"disabled" on server s4, the other servers would have remained down,
and not anymore with this fix. Similarly, the combination of IMAINT
or IDRAIN with their respective forced modes was not accepted on
reload, which is wrong as well.
This bug has been present at least since 1.5, maybe even 1.4 (it came
with tracking support). The fix needs to be backported there, though
the srv-state parts are irrelevant.
This commit relies on previous patch to silence warnings on startup.
2016-11-03 14:22:19 -04:00
srv_admin_state ! = SRV_ADMF_FDRAIN ) ) {
2015-08-19 10:44:03 -04:00
chunk_appendf ( msg , " , invalid srv_admin_state value '%s' " , params [ 2 ] ) ;
}
/* validating srv_uweight */
p = NULL ;
errno = 0 ;
srv_uweight = strtol ( params [ 3 ] , & p , 10 ) ;
2015-09-29 12:32:57 -04:00
if ( ( p = = params [ 3 ] ) | | errno = = EINVAL | | errno = = ERANGE | | ( srv_uweight > SRV_UWGHT_MAX ) )
2015-08-19 10:44:03 -04:00
chunk_appendf ( msg , " , invalid srv_uweight value '%s' " , params [ 3 ] ) ;
/* validating srv_iweight */
p = NULL ;
errno = 0 ;
srv_iweight = strtol ( params [ 4 ] , & p , 10 ) ;
2015-09-29 12:32:57 -04:00
if ( ( p = = params [ 4 ] ) | | errno = = EINVAL | | errno = = ERANGE | | ( srv_iweight > SRV_UWGHT_MAX ) )
2015-08-19 10:44:03 -04:00
chunk_appendf ( msg , " , invalid srv_iweight value '%s' " , params [ 4 ] ) ;
/* validating srv_last_time_change */
p = NULL ;
errno = 0 ;
srv_last_time_change = strtol ( params [ 5 ] , & p , 10 ) ;
if ( ( p = = params [ 5 ] ) | | errno = = EINVAL | | errno = = ERANGE )
chunk_appendf ( msg , " , invalid srv_last_time_change value '%s' " , params [ 5 ] ) ;
/* validating srv_check_status */
p = NULL ;
errno = 0 ;
srv_check_status = strtol ( params [ 6 ] , & p , 10 ) ;
if ( p = = params [ 6 ] | | errno = = EINVAL | | errno = = ERANGE | |
( srv_check_status > = HCHK_STATUS_SIZE ) )
chunk_appendf ( msg , " , invalid srv_check_status value '%s' " , params [ 6 ] ) ;
/* validating srv_check_result */
p = NULL ;
errno = 0 ;
srv_check_result = strtol ( params [ 7 ] , & p , 10 ) ;
if ( ( p = = params [ 7 ] ) | | errno = = EINVAL | | errno = = ERANGE | |
( srv_check_result ! = CHK_RES_UNKNOWN & &
srv_check_result ! = CHK_RES_NEUTRAL & &
srv_check_result ! = CHK_RES_FAILED & &
srv_check_result ! = CHK_RES_PASSED & &
srv_check_result ! = CHK_RES_CONDPASS ) ) {
chunk_appendf ( msg , " , invalid srv_check_result value '%s' " , params [ 7 ] ) ;
}
/* validating srv_check_health */
p = NULL ;
errno = 0 ;
srv_check_health = strtol ( params [ 8 ] , & p , 10 ) ;
if ( p = = params [ 8 ] | | errno = = EINVAL | | errno = = ERANGE )
chunk_appendf ( msg , " , invalid srv_check_health value '%s' " , params [ 8 ] ) ;
/* validating srv_check_state */
p = NULL ;
errno = 0 ;
srv_check_state = strtol ( params [ 9 ] , & p , 10 ) ;
if ( p = = params [ 9 ] | | errno = = EINVAL | | errno = = ERANGE | |
( srv_check_state & ~ ( CHK_ST_INPROGRESS | CHK_ST_CONFIGURED | CHK_ST_ENABLED | CHK_ST_PAUSED | CHK_ST_AGENT ) ) )
chunk_appendf ( msg , " , invalid srv_check_state value '%s' " , params [ 9 ] ) ;
/* validating srv_agent_state */
p = NULL ;
errno = 0 ;
srv_agent_state = strtol ( params [ 10 ] , & p , 10 ) ;
if ( p = = params [ 10 ] | | errno = = EINVAL | | errno = = ERANGE | |
( srv_agent_state & ~ ( CHK_ST_INPROGRESS | CHK_ST_CONFIGURED | CHK_ST_ENABLED | CHK_ST_PAUSED | CHK_ST_AGENT ) ) )
chunk_appendf ( msg , " , invalid srv_agent_state value '%s' " , params [ 10 ] ) ;
/* validating bk_f_forced_id */
p = NULL ;
errno = 0 ;
bk_f_forced_id = strtol ( params [ 11 ] , & p , 10 ) ;
if ( p = = params [ 11 ] | | errno = = EINVAL | | errno = = ERANGE | | ! ( ( bk_f_forced_id = = 0 ) | | ( bk_f_forced_id = = 1 ) ) )
chunk_appendf ( msg , " , invalid bk_f_forced_id value '%s' " , params [ 11 ] ) ;
/* validating srv_f_forced_id */
p = NULL ;
errno = 0 ;
srv_f_forced_id = strtol ( params [ 12 ] , & p , 10 ) ;
if ( p = = params [ 12 ] | | errno = = EINVAL | | errno = = ERANGE | | ! ( ( srv_f_forced_id = = 0 ) | | ( srv_f_forced_id = = 1 ) ) )
chunk_appendf ( msg , " , invalid srv_f_forced_id value '%s' " , params [ 12 ] ) ;
2017-04-26 05:24:02 -04:00
/* validating srv_fqdn */
fqdn = params [ 13 ] ;
if ( fqdn & & * fqdn = = ' - ' )
fqdn = NULL ;
if ( fqdn & & ( strlen ( fqdn ) > DNS_MAX_NAME_SIZE | | invalid_domainchar ( fqdn ) ) ) {
chunk_appendf ( msg , " , invalid srv_fqdn value '%s' " , params [ 13 ] ) ;
fqdn = NULL ;
}
2015-08-19 10:44:03 -04:00
2017-08-01 02:47:19 -04:00
port_str = params [ 14 ] ;
if ( port_str ) {
port = strl2uic ( port_str , strlen ( port_str ) ) ;
if ( port > USHRT_MAX ) {
chunk_appendf ( msg , " , invalid srv_port value '%s' " , port_str ) ;
port_str = NULL ;
}
}
2018-07-02 11:00:54 -04:00
/* SRV record
* NOTE : in HAProxy , SRV records must start with an underscore ' _ '
*/
srvrecord = params [ 15 ] ;
if ( srvrecord & & * srvrecord ! = ' _ ' )
srvrecord = NULL ;
2015-08-19 10:44:03 -04:00
/* don't apply anything if one error has been detected */
2018-07-13 04:54:26 -04:00
if ( msg - > data )
2015-08-19 10:44:03 -04:00
goto out ;
2017-11-07 04:42:54 -05:00
HA_SPIN_LOCK ( SERVER_LOCK , & srv - > lock ) ;
2015-08-19 10:44:03 -04:00
/* recover operational state and apply it to this server
* and all servers tracking this one */
2019-01-20 05:27:40 -05:00
srv - > check . health = srv_check_health ;
2015-08-19 10:44:03 -04:00
switch ( srv_op_state ) {
case SRV_ST_STOPPED :
srv - > check . health = 0 ;
2017-10-19 08:42:30 -04:00
srv_set_stopped ( srv , " changed from server-state after a reload " , NULL ) ;
2015-08-19 10:44:03 -04:00
break ;
case SRV_ST_STARTING :
2019-01-20 05:27:40 -05:00
/* If rise == 1 there is no STARTING state, let's switch to
* RUNNING
*/
if ( srv - > check . rise = = 1 ) {
srv - > check . health = srv - > check . rise + srv - > check . fall - 1 ;
srv_set_running ( srv , " " , NULL ) ;
break ;
}
if ( srv - > check . health < 1 | | srv - > check . health > = srv - > check . rise )
srv - > check . health = srv - > check . rise - 1 ;
2017-08-31 08:41:55 -04:00
srv - > next_state = srv_op_state ;
2015-08-19 10:44:03 -04:00
break ;
case SRV_ST_STOPPING :
2019-01-20 05:27:40 -05:00
/* If fall == 1 there is no STOPPING state, let's switch to
* STOPPED
*/
if ( srv - > check . fall = = 1 ) {
srv - > check . health = 0 ;
srv_set_stopped ( srv , " changed from server-state after a reload " , NULL ) ;
break ;
}
if ( srv - > check . health < srv - > check . rise | |
srv - > check . health > srv - > check . rise + srv - > check . fall - 2 )
srv - > check . health = srv - > check . rise ;
2017-10-19 08:42:30 -04:00
srv_set_stopping ( srv , " changed from server-state after a reload " , NULL ) ;
2015-08-19 10:44:03 -04:00
break ;
case SRV_ST_RUNNING :
srv - > check . health = srv - > check . rise + srv - > check . fall - 1 ;
2017-10-19 08:42:30 -04:00
srv_set_running ( srv , " " , NULL ) ;
2015-08-19 10:44:03 -04:00
break ;
}
/* When applying server state, the following rules apply:
* - in case of a configuration change , we apply the setting from the new
* configuration , regardless of old running state
* - if no configuration change , we apply old running state only if old running
* state is different from new configuration state
*/
/* configuration has changed */
2017-08-31 08:41:55 -04:00
if ( ( srv_admin_state & SRV_ADMF_CMAINT ) ! = ( srv - > next_admin & SRV_ADMF_CMAINT ) ) {
if ( srv - > next_admin & SRV_ADMF_CMAINT )
2015-08-19 10:44:03 -04:00
srv_adm_set_maint ( srv ) ;
else
srv_adm_set_ready ( srv ) ;
}
/* configuration is the same, let's compate old running state and new conf state */
else {
2017-08-31 08:41:55 -04:00
if ( srv_admin_state & SRV_ADMF_FMAINT & & ! ( srv - > next_admin & SRV_ADMF_CMAINT ) )
2015-08-19 10:44:03 -04:00
srv_adm_set_maint ( srv ) ;
2017-08-31 08:41:55 -04:00
else if ( ! ( srv_admin_state & SRV_ADMF_FMAINT ) & & ( srv - > next_admin & SRV_ADMF_CMAINT ) )
2015-08-19 10:44:03 -04:00
srv_adm_set_ready ( srv ) ;
}
/* apply drain mode if server is currently enabled */
2017-08-31 08:41:55 -04:00
if ( ! ( srv - > next_admin & SRV_ADMF_FMAINT ) & & ( srv_admin_state & SRV_ADMF_FDRAIN ) ) {
2015-08-19 10:44:03 -04:00
/* The SRV_ADMF_FDRAIN flag is inherited when srv->iweight is 0
2016-11-03 13:19:49 -04:00
* ( srv - > iweight is the weight set up in configuration ) .
* There are two possible reasons for FDRAIN to have been present :
* - previous config weight was zero
* - " set server b/s drain " was sent to the CLI
*
* In the first case , we simply want to drop this drain state
* if the new weight is not zero anymore , meaning the administrator
* has intentionally turned the weight back to a positive value to
* enable the server again after an operation . In the second case ,
* the drain state was forced on the CLI regardless of the config ' s
* weight so we don ' t want a change to the config weight to lose this
* status . What this means is :
* - if previous weight was 0 and new one is > 0 , drop the DRAIN state .
* - if the previous weight was > 0 , keep it .
2015-08-19 10:44:03 -04:00
*/
2016-11-03 13:19:49 -04:00
if ( srv_iweight > 0 | | srv - > iweight = = 0 )
2015-08-19 10:44:03 -04:00
srv_adm_set_drain ( srv ) ;
}
srv - > last_change = date . tv_sec - srv_last_time_change ;
srv - > check . status = srv_check_status ;
srv - > check . result = srv_check_result ;
/* Only case we want to apply is removing ENABLED flag which could have been
* done by the " disable health " command over the stats socket
*/
if ( ( srv - > check . state & CHK_ST_CONFIGURED ) & &
( srv_check_state & CHK_ST_CONFIGURED ) & &
! ( srv_check_state & CHK_ST_ENABLED ) )
srv - > check . state & = ~ CHK_ST_ENABLED ;
/* Only case we want to apply is removing ENABLED flag which could have been
* done by the " disable agent " command over the stats socket
*/
if ( ( srv - > agent . state & CHK_ST_CONFIGURED ) & &
( srv_agent_state & CHK_ST_CONFIGURED ) & &
! ( srv_agent_state & CHK_ST_ENABLED ) )
srv - > agent . state & = ~ CHK_ST_ENABLED ;
2015-09-17 16:53:59 -04:00
/* We want to apply the previous 'running' weight (srv_uweight) only if there
* was no change in the configuration : both previous and new iweight are equals
2015-08-19 10:44:03 -04:00
*
2015-09-17 16:53:59 -04:00
* It means that a configuration file change has precedence over a unix socket change
* for server ' s weight
*
* by default , HAProxy applies the following weight when parsing the configuration
* srv - > uweight = srv - > iweight
2015-08-19 10:44:03 -04:00
*/
2015-09-17 16:53:59 -04:00
if ( srv_iweight = = srv - > iweight ) {
2015-08-19 10:44:03 -04:00
srv - > uweight = srv_uweight ;
}
2018-08-02 05:48:52 -04:00
server_recalc_eweight ( srv , 1 ) ;
2015-08-19 10:44:03 -04:00
2016-11-09 08:54:53 -05:00
/* load server IP address */
2018-05-19 19:43:24 -04:00
if ( strcmp ( params [ 0 ] , " - " ) )
srv - > lastaddr = strdup ( params [ 0 ] ) ;
2017-04-26 05:24:02 -04:00
if ( fqdn & & srv - > hostname ) {
if ( ! strcmp ( srv - > hostname , fqdn ) ) {
/* Here we reset the 'set from stats socket FQDN' flag
* to support such transitions :
* Let ' s say initial FQDN value is foo1 ( in configuration file ) .
* - FQDN changed from stats socket , from foo1 to foo2 value ,
* - FQDN changed again from file configuration ( with the same previous value
set from stats socket , from foo1 to foo2 value ) ,
* - reload for any other reason than a FQDN modification ,
* the configuration file FQDN matches the fqdn server state file value .
* So we must reset the ' set from stats socket FQDN ' flag to be consistent with
2018-11-15 11:57:51 -05:00
* any further FQDN modification .
2017-04-26 05:24:02 -04:00
*/
2017-08-31 08:41:55 -04:00
srv - > next_admin & = ~ SRV_ADMF_HMAINT ;
2017-04-26 05:24:02 -04:00
}
else {
/* If the FDQN has been changed from stats socket,
* apply fqdn state file value ( which is the value set
* from stats socket ) .
2019-06-07 03:40:55 -04:00
* Also ensure the runtime resolver will process this resolution .
2017-04-26 05:24:02 -04:00
*/
if ( fqdn_set_by_cli ) {
2017-10-31 10:21:19 -04:00
srv_set_fqdn ( srv , fqdn , 0 ) ;
2019-06-07 03:40:55 -04:00
srv - > flags & = ~ SRV_F_NO_RESOLUTION ;
2017-08-31 08:41:55 -04:00
srv - > next_admin | = SRV_ADMF_HMAINT ;
2017-04-26 05:24:02 -04:00
}
}
}
2018-07-02 11:00:54 -04:00
/* If all the conditions below are validated, this means
* we ' re evaluating a server managed by SRV resolution
*/
else if ( fqdn & & ! srv - > hostname & & srvrecord ) {
int res ;
/* we can't apply previous state if SRV record has changed */
if ( srv - > srvrq & & strcmp ( srv - > srvrq - > name , srvrecord ) ! = 0 ) {
chunk_appendf ( msg , " , SRV record mismatch between configuration ('%s') and state file ('%s) for server '%s'. Previous state not applied " , srv - > srvrq - > name , srvrecord , srv - > id ) ;
HA_SPIN_UNLOCK ( SERVER_LOCK , & srv - > lock ) ;
goto out ;
}
/* create or find a SRV resolution for this srv record */
if ( srv - > srvrq = = NULL & & ( srv - > srvrq = find_srvrq_by_name ( srvrecord , srv - > proxy ) ) = = NULL )
srv - > srvrq = new_dns_srvrq ( srv , srvrecord ) ;
if ( srv - > srvrq = = NULL ) {
chunk_appendf ( msg , " , can't create or find SRV resolution '%s' for server '%s' " , srvrecord , srv - > id ) ;
HA_SPIN_UNLOCK ( SERVER_LOCK , & srv - > lock ) ;
goto out ;
}
/* prepare DNS resolution for this server */
res = srv_prepare_for_resolution ( srv , fqdn ) ;
if ( res = = - 1 ) {
chunk_appendf ( msg , " , can't allocate memory for DNS resolution for server '%s' " , srv - > id ) ;
HA_SPIN_UNLOCK ( SERVER_LOCK , & srv - > lock ) ;
goto out ;
}
/* configure check.port accordingly */
if ( ( srv - > check . state & CHK_ST_CONFIGURED ) & &
! ( srv - > flags & SRV_F_CHECKPORT ) )
srv - > check . port = port ;
/* Unset SRV_F_MAPPORTS for SRV records.
* SRV_F_MAPPORTS is unfortunately set by parse_server ( )
* because no ports are provided in the configuration file .
* This is because HAProxy will use the port found into the SRV record .
*/
srv - > flags & = ~ SRV_F_MAPPORTS ;
}
2017-04-26 05:24:02 -04:00
2017-08-01 02:47:19 -04:00
if ( port_str )
srv - > svc_port = port ;
2017-11-07 04:42:54 -05:00
HA_SPIN_UNLOCK ( SERVER_LOCK , & srv - > lock ) ;
2017-08-01 02:47:19 -04:00
2015-08-19 10:44:03 -04:00
break ;
default :
chunk_appendf ( msg , " , version '%d' not supported " , version ) ;
}
out :
2018-07-13 04:54:26 -04:00
if ( msg - > data ) {
2016-01-20 18:20:50 -05:00
chunk_appendf ( msg , " \n " ) ;
2017-11-24 10:50:31 -05:00
ha_warning ( " server-state application failed for server '%s/%s'%s " ,
2018-07-13 04:54:26 -04:00
srv - > proxy - > id , srv - > id , msg - > area ) ;
2016-01-20 18:20:50 -05:00
}
2015-08-19 10:44:03 -04:00
}
2019-06-13 07:24:29 -04:00
/*
* read next line from file < f > and return the server state version if one found .
* If no version is found , then 0 is returned
* Note that this should be the first read on < f >
*/
static int srv_state_get_version ( FILE * f ) {
char buf [ 2 ] ;
int ret ;
/* first character of first line of the file must contain the version of the export */
if ( fgets ( buf , 2 , f ) = = NULL ) {
return 0 ;
}
ret = atoi ( buf ) ;
if ( ( ret < SRV_STATE_FILE_VERSION_MIN ) | |
( ret > SRV_STATE_FILE_VERSION_MAX ) )
return 0 ;
return ret ;
}
/*
* parses server state line stored in < buf > and supposedly in version < version > .
* Set < params > and < srv_params > accordingly .
* In case of error , params [ 0 ] is set to NULL .
*/
static void srv_state_parse_line ( char * buf , const int version , char * * params , char * * srv_params )
{
int buflen , arg , srv_arg ;
char * cur , * end ;
buflen = strlen ( buf ) ;
cur = buf ;
end = cur + buflen ;
/* we need at least one character */
if ( buflen = = 0 ) {
params [ 0 ] = NULL ;
return ;
}
/* ignore blank characters at the beginning of the line */
2020-02-25 02:16:33 -05:00
while ( isspace ( ( unsigned char ) * cur ) )
2019-06-13 07:24:29 -04:00
+ + cur ;
/* Ignore empty or commented lines */
if ( cur = = end | | * cur = = ' # ' ) {
params [ 0 ] = NULL ;
return ;
}
/* truncated lines */
if ( buf [ buflen - 1 ] ! = ' \n ' ) {
//ha_warning("server-state file '%s': truncated line\n", filepath);
params [ 0 ] = NULL ;
return ;
}
/* Removes trailing '\n' */
buf [ buflen - 1 ] = ' \0 ' ;
/* we're now ready to move the line into *srv_params[] */
params [ 0 ] = cur ;
arg = 1 ;
srv_arg = 0 ;
while ( * cur & & arg < SRV_STATE_FILE_MAX_FIELDS ) {
2020-02-25 02:16:33 -05:00
if ( isspace ( ( unsigned char ) * cur ) ) {
2019-06-13 07:24:29 -04:00
* cur = ' \0 ' ;
+ + cur ;
2020-02-25 02:16:33 -05:00
while ( isspace ( ( unsigned char ) * cur ) )
2019-06-13 07:24:29 -04:00
+ + cur ;
switch ( version ) {
case 1 :
/*
* srv_addr : params [ 4 ] = > srv_params [ 0 ]
* srv_op_state : params [ 5 ] = > srv_params [ 1 ]
* srv_admin_state : params [ 6 ] = > srv_params [ 2 ]
* srv_uweight : params [ 7 ] = > srv_params [ 3 ]
* srv_iweight : params [ 8 ] = > srv_params [ 4 ]
* srv_last_time_change : params [ 9 ] = > srv_params [ 5 ]
* srv_check_status : params [ 10 ] = > srv_params [ 6 ]
* srv_check_result : params [ 11 ] = > srv_params [ 7 ]
* srv_check_health : params [ 12 ] = > srv_params [ 8 ]
* srv_check_state : params [ 13 ] = > srv_params [ 9 ]
* srv_agent_state : params [ 14 ] = > srv_params [ 10 ]
* bk_f_forced_id : params [ 15 ] = > srv_params [ 11 ]
* srv_f_forced_id : params [ 16 ] = > srv_params [ 12 ]
* srv_fqdn : params [ 17 ] = > srv_params [ 13 ]
* srv_port : params [ 18 ] = > srv_params [ 14 ]
* srvrecord : params [ 19 ] = > srv_params [ 15 ]
*/
if ( arg > = 4 ) {
srv_params [ srv_arg ] = cur ;
+ + srv_arg ;
}
break ;
}
params [ arg ] = cur ;
+ + arg ;
}
else {
+ + cur ;
}
}
/* if line is incomplete line, then ignore it.
* otherwise , update useful flags */
switch ( version ) {
case 1 :
if ( arg < SRV_STATE_FILE_NB_FIELDS_VERSION_1 ) {
params [ 0 ] = NULL ;
return ;
}
break ;
}
return ;
}
2015-08-19 10:44:03 -04:00
/* This function parses all the proxies and only take care of the backends (since we're looking for server)
* For each proxy , it does the following :
* - opens its server state file ( either one or local one )
* - read whole file , line by line
* - analyse each line to check if it matches our current backend :
* - backend name matches
* - backend id matches if id is forced and name doesn ' t match
* - if the server pointed by the line is found , then state is applied
*
* If the running backend uuid or id differs from the state file , then HAProxy reports
* a warning .
2018-08-21 05:54:26 -04:00
*
* Grabs the server ' s lock via srv_update_state ( ) .
2015-08-19 10:44:03 -04:00
*/
void apply_server_state ( void )
{
char mybuf [ SRV_STATE_LINE_MAXLEN ] ;
2017-04-26 05:24:02 -04:00
char * params [ SRV_STATE_FILE_MAX_FIELDS ] = { 0 } ;
char * srv_params [ SRV_STATE_FILE_MAX_FIELDS ] = { 0 } ;
2019-06-13 07:24:29 -04:00
int version , global_file_version ;
2015-08-19 10:44:03 -04:00
FILE * f ;
char * filepath ;
char globalfilepath [ MAXPATHLEN + 1 ] ;
char localfilepath [ MAXPATHLEN + 1 ] ;
int len , fileopenerr , globalfilepathlen , localfilepathlen ;
struct proxy * curproxy , * bk ;
struct server * srv ;
2019-06-13 07:24:29 -04:00
char * line ;
char * bkname , * srvname ;
struct state_line * st ;
struct ebmb_node * node , * next_node ;
2015-08-19 10:44:03 -04:00
2019-06-13 07:24:29 -04:00
global_file_version = 0 ;
2015-08-19 10:44:03 -04:00
globalfilepathlen = 0 ;
/* create the globalfilepath variable */
if ( global . server_state_file ) {
/* absolute path or no base directory provided */
if ( ( global . server_state_file [ 0 ] = = ' / ' ) | | ( ! global . server_state_base ) ) {
len = strlen ( global . server_state_file ) ;
if ( len > MAXPATHLEN ) {
globalfilepathlen = 0 ;
goto globalfileerror ;
}
memcpy ( globalfilepath , global . server_state_file , len ) ;
globalfilepath [ len ] = ' \0 ' ;
globalfilepathlen = len ;
}
else if ( global . server_state_base ) {
len = strlen ( global . server_state_base ) ;
2018-10-16 13:26:12 -04:00
if ( len > MAXPATHLEN ) {
2015-08-19 10:44:03 -04:00
globalfilepathlen = 0 ;
goto globalfileerror ;
}
2018-10-16 12:35:01 -04:00
memcpy ( globalfilepath , global . server_state_base , len ) ;
2018-10-16 13:26:12 -04:00
globalfilepath [ len ] = 0 ;
globalfilepathlen = len ;
2015-08-19 10:44:03 -04:00
/* append a slash if needed */
if ( ! globalfilepathlen | | globalfilepath [ globalfilepathlen - 1 ] ! = ' / ' ) {
if ( globalfilepathlen + 1 > MAXPATHLEN ) {
globalfilepathlen = 0 ;
goto globalfileerror ;
}
globalfilepath [ globalfilepathlen + + ] = ' / ' ;
}
len = strlen ( global . server_state_file ) ;
if ( globalfilepathlen + len > MAXPATHLEN ) {
globalfilepathlen = 0 ;
goto globalfileerror ;
}
memcpy ( globalfilepath + globalfilepathlen , global . server_state_file , len ) ;
globalfilepathlen + = len ;
globalfilepath [ globalfilepathlen + + ] = 0 ;
}
}
globalfileerror :
if ( globalfilepathlen = = 0 )
globalfilepath [ 0 ] = ' \0 ' ;
2019-06-13 07:24:29 -04:00
/* Load global server state in a tree */
if ( globalfilepathlen > 0 ) {
errno = 0 ;
f = fopen ( globalfilepath , " r " ) ;
if ( errno )
ha_warning ( " Can't open global server state file '%s': %s \n " , globalfilepath , strerror ( errno ) ) ;
2019-10-16 08:49:38 -04:00
if ( ! f )
goto out_load_server_state_in_tree ;
2019-06-13 07:24:29 -04:00
global_file_version = srv_state_get_version ( f ) ;
if ( global_file_version = = 0 )
goto out_load_server_state_in_tree ;
while ( fgets ( mybuf , SRV_STATE_LINE_MAXLEN , f ) ) {
line = NULL ;
line = strdup ( mybuf ) ;
if ( line = = NULL )
continue ;
srv_state_parse_line ( mybuf , global_file_version , params , srv_params ) ;
if ( params [ 0 ] = = NULL )
2019-12-20 11:26:27 -05:00
goto nextline ;
2019-06-13 07:24:29 -04:00
/* bkname */
bkname = params [ 1 ] ;
/* srvname */
srvname = params [ 3 ] ;
/* key */
chunk_printf ( & trash , " %s %s " , bkname , srvname ) ;
/* store line in tree */
2019-12-20 11:18:13 -05:00
st = calloc ( 1 , sizeof ( * st ) + trash . data + 1 ) ;
2019-06-13 07:24:29 -04:00
if ( st = = NULL ) {
goto nextline ;
}
2019-12-20 11:18:13 -05:00
memcpy ( st - > name_name . key , trash . area , trash . data + 1 ) ;
2019-12-20 11:23:40 -05:00
if ( ebst_insert ( & state_file , & st - > name_name ) ! = & st - > name_name ) {
/* this is a duplicate key, probably a hand-crafted file,
* drop it !
*/
goto nextline ;
}
2019-06-13 07:24:29 -04:00
/* save line */
st - > line = line ;
continue ;
nextline :
/* free up memory in case of error during the processing of the line */
free ( line ) ;
}
}
out_load_server_state_in_tree :
/* parse all proxies and load states form tree (global file) or from local file */
2017-11-24 10:54:05 -05:00
for ( curproxy = proxies_list ; curproxy ! = NULL ; curproxy = curproxy - > next ) {
2015-08-19 10:44:03 -04:00
/* servers are only in backends */
if ( ! ( curproxy - > cap & PR_CAP_BE ) )
continue ;
fileopenerr = 0 ;
filepath = NULL ;
/* search server state file path and name */
switch ( curproxy - > load_server_state_from_file ) {
/* read servers state from global file */
case PR_SRV_STATE_FILE_GLOBAL :
/* there was an error while generating global server state file path */
if ( globalfilepathlen = = 0 )
continue ;
filepath = globalfilepath ;
fileopenerr = 1 ;
break ;
/* this backend has its own file */
case PR_SRV_STATE_FILE_LOCAL :
localfilepathlen = 0 ;
localfilepath [ 0 ] = ' \0 ' ;
len = 0 ;
/* create the localfilepath variable */
/* absolute path or no base directory provided */
if ( ( curproxy - > server_state_file_name [ 0 ] = = ' / ' ) | | ( ! global . server_state_base ) ) {
len = strlen ( curproxy - > server_state_file_name ) ;
if ( len > MAXPATHLEN ) {
localfilepathlen = 0 ;
goto localfileerror ;
}
memcpy ( localfilepath , curproxy - > server_state_file_name , len ) ;
localfilepath [ len ] = ' \0 ' ;
localfilepathlen = len ;
}
else if ( global . server_state_base ) {
len = strlen ( global . server_state_base ) ;
localfilepathlen + = len ;
if ( localfilepathlen > MAXPATHLEN ) {
localfilepathlen = 0 ;
goto localfileerror ;
}
2018-10-16 12:35:01 -04:00
memcpy ( localfilepath , global . server_state_base , len ) ;
2015-08-19 10:44:03 -04:00
localfilepath [ localfilepathlen ] = 0 ;
/* append a slash if needed */
if ( ! localfilepathlen | | localfilepath [ localfilepathlen - 1 ] ! = ' / ' ) {
if ( localfilepathlen + 1 > MAXPATHLEN ) {
localfilepathlen = 0 ;
goto localfileerror ;
}
localfilepath [ localfilepathlen + + ] = ' / ' ;
}
len = strlen ( curproxy - > server_state_file_name ) ;
if ( localfilepathlen + len > MAXPATHLEN ) {
localfilepathlen = 0 ;
goto localfileerror ;
}
memcpy ( localfilepath + localfilepathlen , curproxy - > server_state_file_name , len ) ;
localfilepathlen + = len ;
localfilepath [ localfilepathlen + + ] = 0 ;
}
filepath = localfilepath ;
localfileerror :
if ( localfilepathlen = = 0 )
localfilepath [ 0 ] = ' \0 ' ;
break ;
case PR_SRV_STATE_FILE_NONE :
default :
continue ;
}
2019-06-13 07:24:29 -04:00
/* when global file is used, we get data from the tree
* Note that in such case we don ' t check backend name neither uuid .
* Backend name can ' t be wrong since it ' s used as a key to retrieve the server state
* line from the tree .
*/
if ( curproxy - > load_server_state_from_file = = PR_SRV_STATE_FILE_GLOBAL ) {
struct server * srv = curproxy - > srv ;
while ( srv ) {
struct ebmb_node * node ;
struct state_line * st ;
2015-08-19 10:44:03 -04:00
2019-06-13 07:24:29 -04:00
chunk_printf ( & trash , " %s %s " , curproxy - > id , srv - > id ) ;
node = ebst_lookup ( & state_file , trash . area ) ;
if ( ! node )
goto next ;
2015-11-04 17:03:26 -05:00
2019-06-13 07:24:29 -04:00
st = container_of ( node , struct state_line , name_name ) ;
memcpy ( mybuf , st - > line , strlen ( st - > line ) ) ;
mybuf [ strlen ( st - > line ) ] = 0 ;
2015-08-19 10:44:03 -04:00
2019-06-13 07:24:29 -04:00
srv_state_parse_line ( mybuf , global_file_version , params , srv_params ) ;
if ( params [ 0 ] = = NULL )
goto next ;
2015-08-19 10:44:03 -04:00
2019-06-13 07:24:29 -04:00
srv_update_state ( srv , global_file_version , srv_params ) ;
2015-08-19 10:44:03 -04:00
2019-06-13 07:24:29 -04:00
next :
srv = srv - > next ;
}
2015-08-19 10:44:03 -04:00
2019-06-13 07:24:29 -04:00
continue ; /* next proxy in list */
}
else {
/* load 'local' state file */
errno = 0 ;
f = fopen ( filepath , " r " ) ;
if ( errno & & fileopenerr )
ha_warning ( " Can't open server state file '%s': %s \n " , filepath , strerror ( errno ) ) ;
if ( ! f )
2015-08-19 10:44:03 -04:00
continue ;
2019-06-13 07:24:29 -04:00
mybuf [ 0 ] = ' \0 ' ;
2015-08-19 10:44:03 -04:00
2019-06-13 07:24:29 -04:00
/* first character of first line of the file must contain the version of the export */
version = srv_state_get_version ( f ) ;
if ( version = = 0 ) {
ha_warning ( " Can't get version of the server state file '%s' \n " , filepath ) ;
goto fileclose ;
2017-04-26 05:24:02 -04:00
}
2019-06-13 07:24:29 -04:00
while ( fgets ( mybuf , SRV_STATE_LINE_MAXLEN , f ) ) {
int bk_f_forced_id = 0 ;
int check_id = 0 ;
int check_name = 0 ;
srv_state_parse_line ( mybuf , version , params , srv_params ) ;
2015-08-19 10:44:03 -04:00
2019-06-13 07:24:29 -04:00
if ( params [ 0 ] = = NULL ) {
continue ;
2015-08-19 10:44:03 -04:00
}
2019-06-13 07:24:29 -04:00
/* if line is incomplete line, then ignore it.
* otherwise , update useful flags */
switch ( version ) {
case 1 :
bk_f_forced_id = ( atoi ( params [ 15 ] ) & PR_O_FORCED_ID ) ;
check_id = ( atoi ( params [ 0 ] ) = = curproxy - > uuid ) ;
check_name = ( strcmp ( curproxy - > id , params [ 1 ] ) = = 0 ) ;
break ;
2015-08-19 10:44:03 -04:00
}
2019-06-13 07:24:29 -04:00
bk = curproxy ;
/* if backend can't be found, let's continue */
if ( ! check_id & & ! check_name )
continue ;
else if ( ! check_id & & check_name ) {
ha_warning ( " backend ID mismatch: from server state file: '%s', from running config '%d' \n " , params [ 0 ] , bk - > uuid ) ;
send_log ( bk , LOG_NOTICE , " backend ID mismatch: from server state file: '%s', from running config '%d' \n " , params [ 0 ] , bk - > uuid ) ;
}
else if ( check_id & & ! check_name ) {
ha_warning ( " backend name mismatch: from server state file: '%s', from running config '%s' \n " , params [ 1 ] , bk - > id ) ;
send_log ( bk , LOG_NOTICE , " backend name mismatch: from server state file: '%s', from running config '%s' \n " , params [ 1 ] , bk - > id ) ;
/* if name doesn't match, we still want to update curproxy if the backend id
* was forced in previous the previous configuration */
if ( ! bk_f_forced_id )
2015-08-19 10:44:03 -04:00
continue ;
2019-06-13 07:24:29 -04:00
}
2015-08-19 10:44:03 -04:00
2019-06-13 07:24:29 -04:00
/* look for the server by its name: param[3] */
srv = server_find_best_match ( bk , params [ 3 ] , 0 , NULL ) ;
2015-08-19 10:44:03 -04:00
2019-06-13 07:24:29 -04:00
if ( ! srv ) {
/* if no server found, then warning and continue with next line */
ha_warning ( " can't find server '%s' in backend '%s' \n " ,
params [ 3 ] , params [ 1 ] ) ;
send_log ( bk , LOG_NOTICE , " can't find server '%s' in backend '%s' \n " ,
params [ 3 ] , params [ 1 ] ) ;
2015-08-19 10:44:03 -04:00
continue ;
2019-06-13 07:24:29 -04:00
}
2015-08-19 10:44:03 -04:00
2019-06-13 07:24:29 -04:00
/* now we can proceed with server's state update */
srv_update_state ( srv , version , srv_params ) ;
2015-08-19 10:44:03 -04:00
}
}
2015-11-04 17:03:26 -05:00
fileclose :
2015-08-19 10:44:03 -04:00
fclose ( f ) ;
}
2019-06-13 07:24:29 -04:00
/* now free memory allocated for the tree */
for ( node = ebmb_first ( & state_file ) , next_node = node ? ebmb_next ( node ) : NULL ;
node ;
node = next_node , next_node = node ? ebmb_next ( node ) : NULL ) {
st = container_of ( node , struct state_line , name_name ) ;
ebmb_delete ( & st - > name_name ) ;
free ( st - > line ) ;
free ( st ) ;
}
2015-08-19 10:44:03 -04:00
}
2015-04-13 19:13:07 -04:00
/*
* update a server ' s current IP address .
* ip is a pointer to the new IP address , whose address family is ip_sin_family .
* ip is in network format .
* updater is a string which contains an information about the requester of the update .
* updater is used if not NULL .
*
* A log line and a stderr warning message is generated based on server ' s backend options .
2018-08-21 05:54:26 -04:00
*
* Must be called with the server lock held .
2015-04-13 19:13:07 -04:00
*/
2016-02-24 02:23:22 -05:00
int update_server_addr ( struct server * s , void * ip , int ip_sin_family , const char * updater )
2015-04-13 19:13:07 -04:00
{
/* generates a log line and a warning on stderr */
if ( 1 ) {
/* book enough space for both IPv4 and IPv6 */
char oldip [ INET6_ADDRSTRLEN ] ;
char newip [ INET6_ADDRSTRLEN ] ;
memset ( oldip , ' \0 ' , INET6_ADDRSTRLEN ) ;
memset ( newip , ' \0 ' , INET6_ADDRSTRLEN ) ;
/* copy old IP address in a string */
switch ( s - > addr . ss_family ) {
case AF_INET :
inet_ntop ( s - > addr . ss_family , & ( ( struct sockaddr_in * ) & s - > addr ) - > sin_addr , oldip , INET_ADDRSTRLEN ) ;
break ;
case AF_INET6 :
inet_ntop ( s - > addr . ss_family , & ( ( struct sockaddr_in6 * ) & s - > addr ) - > sin6_addr , oldip , INET6_ADDRSTRLEN ) ;
break ;
} ;
/* copy new IP address in a string */
switch ( ip_sin_family ) {
case AF_INET :
inet_ntop ( ip_sin_family , ip , newip , INET_ADDRSTRLEN ) ;
break ;
case AF_INET6 :
inet_ntop ( ip_sin_family , ip , newip , INET6_ADDRSTRLEN ) ;
break ;
} ;
/* save log line into a buffer */
chunk_printf ( & trash , " %s/%s changed its IP from %s to %s by %s " ,
s - > proxy - > id , s - > id , oldip , newip , updater ) ;
/* write the buffer on stderr */
2018-07-13 04:54:26 -04:00
ha_warning ( " %s. \n " , trash . area ) ;
2015-04-13 19:13:07 -04:00
/* send a log */
2018-07-13 04:54:26 -04:00
send_log ( s - > proxy , LOG_NOTICE , " %s. \n " , trash . area ) ;
2015-04-13 19:13:07 -04:00
}
/* save the new IP family */
s - > addr . ss_family = ip_sin_family ;
/* save the new IP address */
switch ( ip_sin_family ) {
case AF_INET :
2016-07-13 05:59:39 -04:00
memcpy ( & ( ( struct sockaddr_in * ) & s - > addr ) - > sin_addr . s_addr , ip , 4 ) ;
2015-04-13 19:13:07 -04:00
break ;
case AF_INET6 :
memcpy ( ( ( struct sockaddr_in6 * ) & s - > addr ) - > sin6_addr . s6_addr , ip , 16 ) ;
break ;
} ;
MINOR: server: Add dynamic session cookies.
This adds a new "dynamic" keyword for the cookie option. If set, a cookie
will be generated for each server (assuming one isn't already provided on
the "server" line), from the IP of the server, the TCP port, and a secret
key provided. To provide the secret key, a new keyword as been added,
"dynamic-cookie-key", for backends.
Example :
backend bk_web
balance roundrobin
dynamic-cookie-key "bla"
cookie WEBSRV insert dynamic
server s1 127.0.0.1:80 check
server s2 192.168.56.1:80 check
This is a first step to be able to dynamically add and remove servers,
without modifying the configuration file, and still have all the load
balancers redirect the traffic to the right server.
Provide a way to generate session cookies, based on the IP address of the
server, the TCP port, and a secret key provided.
2017-03-14 15:01:29 -04:00
srv_set_dyncookie ( s ) ;
2015-04-13 19:13:07 -04:00
return 0 ;
}
2016-08-02 02:18:55 -04:00
/*
* This function update a server ' s addr and port only for AF_INET and AF_INET6 families .
*
* Caller can pass its name through < updater > to get it integrated in the response message
* returned by the function .
*
* The function first does the following , in that order :
* - validates the new addr and / or port
* - checks if an update is required ( new IP or port is different than current ones )
* - checks the update is allowed :
* - don ' t switch from / to a family other than AF_INET4 and AF_INET6
* - allow all changes if no CHECKS are configured
* - if CHECK is configured :
* - if switch to port map ( SRV_F_MAPPORTS ) , ensure health check have their own ports
* - applies required changes to both ADDR and PORT if both ' required ' and ' allowed '
* conditions are met
2018-08-21 05:54:26 -04:00
*
* Must be called with the server lock held .
2016-08-02 02:18:55 -04:00
*/
const char * update_server_addr_port ( struct server * s , const char * addr , const char * port , char * updater )
{
struct sockaddr_storage sa ;
int ret , port_change_required ;
char current_addr [ INET6_ADDRSTRLEN ] ;
2016-11-20 05:42:38 -05:00
uint16_t current_port , new_port ;
2018-07-13 05:56:34 -04:00
struct buffer * msg ;
MINOR: server: Add dynamic session cookies.
This adds a new "dynamic" keyword for the cookie option. If set, a cookie
will be generated for each server (assuming one isn't already provided on
the "server" line), from the IP of the server, the TCP port, and a secret
key provided. To provide the secret key, a new keyword as been added,
"dynamic-cookie-key", for backends.
Example :
backend bk_web
balance roundrobin
dynamic-cookie-key "bla"
cookie WEBSRV insert dynamic
server s1 127.0.0.1:80 check
server s2 192.168.56.1:80 check
This is a first step to be able to dynamically add and remove servers,
without modifying the configuration file, and still have all the load
balancers redirect the traffic to the right server.
Provide a way to generate session cookies, based on the IP address of the
server, the TCP port, and a secret key provided.
2017-03-14 15:01:29 -04:00
int changed = 0 ;
2016-08-02 02:18:55 -04:00
msg = get_trash_chunk ( ) ;
chunk_reset ( msg ) ;
if ( addr ) {
memset ( & sa , 0 , sizeof ( struct sockaddr_storage ) ) ;
if ( str2ip2 ( addr , & sa , 0 ) = = NULL ) {
chunk_printf ( msg , " Invalid addr '%s' " , addr ) ;
goto out ;
}
/* changes are allowed on AF_INET* families only */
if ( ( sa . ss_family ! = AF_INET ) & & ( sa . ss_family ! = AF_INET6 ) ) {
chunk_printf ( msg , " Update to families other than AF_INET and AF_INET6 supported only through configuration file " ) ;
goto out ;
}
/* collecting data currently setup */
memset ( current_addr , ' \0 ' , sizeof ( current_addr ) ) ;
ret = addr_to_str ( & s - > addr , current_addr , sizeof ( current_addr ) ) ;
/* changes are allowed on AF_INET* families only */
if ( ( ret ! = AF_INET ) & & ( ret ! = AF_INET6 ) ) {
chunk_printf ( msg , " Update for the current server address family is only supported through configuration file " ) ;
goto out ;
}
/* applying ADDR changes if required and allowed
* ipcmp returns 0 when both ADDR are the same
*/
if ( ipcmp ( & s - > addr , & sa ) = = 0 ) {
chunk_appendf ( msg , " no need to change the addr " ) ;
goto port ;
}
ipcpy ( & sa , & s - > addr ) ;
MINOR: server: Add dynamic session cookies.
This adds a new "dynamic" keyword for the cookie option. If set, a cookie
will be generated for each server (assuming one isn't already provided on
the "server" line), from the IP of the server, the TCP port, and a secret
key provided. To provide the secret key, a new keyword as been added,
"dynamic-cookie-key", for backends.
Example :
backend bk_web
balance roundrobin
dynamic-cookie-key "bla"
cookie WEBSRV insert dynamic
server s1 127.0.0.1:80 check
server s2 192.168.56.1:80 check
This is a first step to be able to dynamically add and remove servers,
without modifying the configuration file, and still have all the load
balancers redirect the traffic to the right server.
Provide a way to generate session cookies, based on the IP address of the
server, the TCP port, and a secret key provided.
2017-03-14 15:01:29 -04:00
changed = 1 ;
2016-08-02 02:18:55 -04:00
/* we also need to update check's ADDR only if it uses the server's one */
if ( ( s - > check . state & CHK_ST_CONFIGURED ) & & ( s - > flags & SRV_F_CHECKADDR ) ) {
ipcpy ( & sa , & s - > check . addr ) ;
}
/* we also need to update agent ADDR only if it use the server's one */
if ( ( s - > agent . state & CHK_ST_CONFIGURED ) & & ( s - > flags & SRV_F_AGENTADDR ) ) {
ipcpy ( & sa , & s - > agent . addr ) ;
}
/* update report for caller */
chunk_printf ( msg , " IP changed from '%s' to '%s' " , current_addr , addr ) ;
}
port :
if ( port ) {
char sign = ' \0 ' ;
char * endptr ;
if ( addr )
chunk_appendf ( msg , " , " ) ;
/* collecting data currently setup */
2017-01-06 11:41:29 -05:00
current_port = s - > svc_port ;
2016-08-02 02:18:55 -04:00
/* check if PORT change is required */
port_change_required = 0 ;
sign = * port ;
2017-01-11 10:39:55 -05:00
errno = 0 ;
2016-08-02 02:18:55 -04:00
new_port = strtol ( port , & endptr , 10 ) ;
if ( ( errno ! = 0 ) | | ( port = = endptr ) ) {
chunk_appendf ( msg , " problem converting port '%s' to an int " , port ) ;
goto out ;
}
/* check if caller triggers a port mapped or offset */
if ( sign = = ' - ' | | ( sign = = ' + ' ) ) {
/* check if server currently uses port map */
if ( ! ( s - > flags & SRV_F_MAPPORTS ) ) {
/* switch from fixed port to port map mandatorily triggers
* a port change */
port_change_required = 1 ;
/* check is configured
* we ' re switching from a fixed port to a SRV_F_MAPPORTS ( mapped ) port
* prevent PORT change if check doesn ' t have it ' s dedicated port while switching
* to port mapping */
if ( ( s - > check . state & CHK_ST_CONFIGURED ) & & ! ( s - > flags & SRV_F_CHECKPORT ) ) {
chunk_appendf ( msg , " can't change <port> to port map because it is incompatible with current health check port configuration (use 'port' statement from the 'server' directive. " ) ;
goto out ;
}
}
/* we're already using port maps */
else {
port_change_required = current_port ! = new_port ;
}
}
/* fixed port */
else {
port_change_required = current_port ! = new_port ;
}
/* applying PORT changes if required and update response message */
if ( port_change_required ) {
/* apply new port */
2017-01-06 11:41:29 -05:00
s - > svc_port = new_port ;
MINOR: server: Add dynamic session cookies.
This adds a new "dynamic" keyword for the cookie option. If set, a cookie
will be generated for each server (assuming one isn't already provided on
the "server" line), from the IP of the server, the TCP port, and a secret
key provided. To provide the secret key, a new keyword as been added,
"dynamic-cookie-key", for backends.
Example :
backend bk_web
balance roundrobin
dynamic-cookie-key "bla"
cookie WEBSRV insert dynamic
server s1 127.0.0.1:80 check
server s2 192.168.56.1:80 check
This is a first step to be able to dynamically add and remove servers,
without modifying the configuration file, and still have all the load
balancers redirect the traffic to the right server.
Provide a way to generate session cookies, based on the IP address of the
server, the TCP port, and a secret key provided.
2017-03-14 15:01:29 -04:00
changed = 1 ;
2016-08-02 02:18:55 -04:00
/* prepare message */
chunk_appendf ( msg , " port changed from ' " ) ;
if ( s - > flags & SRV_F_MAPPORTS )
chunk_appendf ( msg , " + " ) ;
chunk_appendf ( msg , " %d' to ' " , current_port ) ;
if ( sign = = ' - ' ) {
s - > flags | = SRV_F_MAPPORTS ;
chunk_appendf ( msg , " %c " , sign ) ;
/* just use for result output */
new_port = - new_port ;
}
else if ( sign = = ' + ' ) {
s - > flags | = SRV_F_MAPPORTS ;
chunk_appendf ( msg , " %c " , sign ) ;
}
else {
s - > flags & = ~ SRV_F_MAPPORTS ;
}
chunk_appendf ( msg , " %d' " , new_port ) ;
/* we also need to update health checks port only if it uses server's realport */
if ( ( s - > check . state & CHK_ST_CONFIGURED ) & & ! ( s - > flags & SRV_F_CHECKPORT ) ) {
s - > check . port = new_port ;
}
}
else {
chunk_appendf ( msg , " no need to change the port " ) ;
}
}
out :
2020-05-02 15:52:36 -04:00
if ( changed ) {
/* force connection cleanup on the given server */
srv_cleanup_connections ( s ) ;
MINOR: server: Add dynamic session cookies.
This adds a new "dynamic" keyword for the cookie option. If set, a cookie
will be generated for each server (assuming one isn't already provided on
the "server" line), from the IP of the server, the TCP port, and a secret
key provided. To provide the secret key, a new keyword as been added,
"dynamic-cookie-key", for backends.
Example :
backend bk_web
balance roundrobin
dynamic-cookie-key "bla"
cookie WEBSRV insert dynamic
server s1 127.0.0.1:80 check
server s2 192.168.56.1:80 check
This is a first step to be able to dynamically add and remove servers,
without modifying the configuration file, and still have all the load
balancers redirect the traffic to the right server.
Provide a way to generate session cookies, based on the IP address of the
server, the TCP port, and a secret key provided.
2017-03-14 15:01:29 -04:00
srv_set_dyncookie ( s ) ;
2020-05-02 15:52:36 -04:00
}
2016-08-02 02:18:55 -04:00
if ( updater )
chunk_appendf ( msg , " by '%s' " , updater ) ;
chunk_appendf ( msg , " \n " ) ;
2018-07-13 04:54:26 -04:00
return msg - > area ;
2016-08-02 02:18:55 -04:00
}
2015-04-13 19:15:08 -04:00
/*
* update server status based on result of name resolution
* returns :
* 0 if server status is updated
* 1 if server status has not changed
2018-08-21 05:54:26 -04:00
*
* Must be called with the server lock held .
2015-04-13 19:15:08 -04:00
*/
2017-07-06 12:46:47 -04:00
int snr_update_srv_status ( struct server * s , int has_no_ip )
2015-04-13 19:15:08 -04:00
{
MAJOR: dns: Refactor the DNS code
This is a huge patch with many changes, all about the DNS. Initially, the idea
was to update the DNS part to ease the threads support integration. But quickly,
I started to refactor some parts. And after several iterations, it was
impossible for me to commit the different parts atomically. So, instead of
adding tens of patches, often reworking the same parts, it was easier to merge
all my changes in a uniq patch. Here are all changes made on the DNS.
First, the DNS initialization has been refactored. The DNS configuration parsing
remains untouched, in cfgparse.c. But all checks have been moved in a post-check
callback. In the function dns_finalize_config, for each resolvers, the
nameservers configuration is tested and the task used to manage DNS resolutions
is created. The links between the backend's servers and the resolvers are also
created at this step. Here no connection are kept alive. So there is no needs
anymore to reopen them after HAProxy fork. Connections used to send DNS queries
will be opened on demand.
Then, the way DNS requesters are linked to a DNS resolution has been
reworked. The resolution used by a requester is now referenced into the
dns_requester structure and the resolution pointers in server and dns_srvrq
structures have been removed. wait and curr list of requesters, for a DNS
resolution, have been replaced by a uniq list. And Finally, the way a requester
is removed from a DNS resolution has been simplified. Now everything is done in
dns_unlink_resolution.
srv_set_fqdn function has been simplified. Now, there is only 1 way to set the
server's FQDN, independently it is done by the CLI or when a SRV record is
resolved.
The static DNS resolutions pool has been replaced by a dynamoc pool. The part
has been modified by Baptiste Assmann.
The way the DNS resolutions are triggered by the task or by a health-check has
been totally refactored. Now, all timeouts are respected. Especially
hold.valid. The default frequency to wake up a resolvers is now configurable
using "timeout resolve" parameter.
Now, as documented, as long as invalid repsonses are received, we really wait
all name servers responses before retrying.
As far as possible, resources allocated during DNS configuration parsing are
releases when HAProxy is shutdown.
Beside all these changes, the code has been cleaned to ease code review and the
doc has been updated.
2017-09-27 05:00:59 -04:00
struct dns_resolvers * resolvers = s - > resolvers ;
struct dns_resolution * resolution = s - > dns_requester - > resolution ;
int exp ;
2015-04-13 19:15:08 -04:00
switch ( resolution - > status ) {
case RSLV_STATUS_NONE :
MAJOR/REORG: dns: DNS resolution task and requester queues
This patch is a major upgrade of the internal run-time DNS resolver in
HAProxy and it brings the following 2 main changes:
1. DNS resolution task
Up to now, DNS resolution was triggered by the health check task.
From now, DNS resolution task is autonomous. It is started by HAProxy
right after the scheduler is available and it is woken either when a
network IO occurs for one of its nameserver or when a timeout is
matched.
From now, this means we can enable DNS resolution for a server without
enabling health checking.
2. Introduction of a dns_requester structure
Up to now, DNS resolution was purposely made for resolving server
hostnames.
The idea, is to ensure that any HAProxy internal object should be able
to trigger a DNS resolution. For this purpose, 2 things has to be done:
- clean up the DNS code from the server structure (this was already
quite clean actually) and clean up the server's callbacks from
manipulating too much DNS resolution
- create an agnostic structure which allows linking a DNS resolution
and a requester of any type (using obj_type enum)
3. Manage requesters through queues
Up to now, there was an uniq relationship between a resolution and it's
owner (aka the requester now). It's a shame, because in some cases,
multiple objects may share the same hostname and may benefit from a
resolution being performed by a third party.
This patch introduces the notion of queues, which are basically lists of
either currently running resolution or waiting ones.
The resolutions are now available as a pool, which belongs to the resolvers.
The pool has has a default size of 64 resolutions per resolvers and is
allocated at configuration parsing.
2017-05-22 09:17:15 -04:00
/* status when HAProxy has just (re)started.
* Nothing to do , since the task is already automatically started */
2015-04-13 19:15:08 -04:00
break ;
2016-11-02 17:58:18 -04:00
case RSLV_STATUS_VALID :
/*
* resume health checks
* server will be turned back on if health check is safe
*/
2017-07-06 12:46:47 -04:00
if ( has_no_ip ) {
2017-08-31 08:41:55 -04:00
if ( s - > next_admin & SRV_ADMF_RMAINT )
2017-07-06 12:46:47 -04:00
return 1 ;
srv_set_admin_flag ( s , SRV_ADMF_RMAINT ,
" No IP for server " ) ;
MAJOR: dns: Refactor the DNS code
This is a huge patch with many changes, all about the DNS. Initially, the idea
was to update the DNS part to ease the threads support integration. But quickly,
I started to refactor some parts. And after several iterations, it was
impossible for me to commit the different parts atomically. So, instead of
adding tens of patches, often reworking the same parts, it was easier to merge
all my changes in a uniq patch. Here are all changes made on the DNS.
First, the DNS initialization has been refactored. The DNS configuration parsing
remains untouched, in cfgparse.c. But all checks have been moved in a post-check
callback. In the function dns_finalize_config, for each resolvers, the
nameservers configuration is tested and the task used to manage DNS resolutions
is created. The links between the backend's servers and the resolvers are also
created at this step. Here no connection are kept alive. So there is no needs
anymore to reopen them after HAProxy fork. Connections used to send DNS queries
will be opened on demand.
Then, the way DNS requesters are linked to a DNS resolution has been
reworked. The resolution used by a requester is now referenced into the
dns_requester structure and the resolution pointers in server and dns_srvrq
structures have been removed. wait and curr list of requesters, for a DNS
resolution, have been replaced by a uniq list. And Finally, the way a requester
is removed from a DNS resolution has been simplified. Now everything is done in
dns_unlink_resolution.
srv_set_fqdn function has been simplified. Now, there is only 1 way to set the
server's FQDN, independently it is done by the CLI or when a SRV record is
resolved.
The static DNS resolutions pool has been replaced by a dynamoc pool. The part
has been modified by Baptiste Assmann.
The way the DNS resolutions are triggered by the task or by a health-check has
been totally refactored. Now, all timeouts are respected. Especially
hold.valid. The default frequency to wake up a resolvers is now configurable
using "timeout resolve" parameter.
Now, as documented, as long as invalid repsonses are received, we really wait
all name servers responses before retrying.
As far as possible, resources allocated during DNS configuration parsing are
releases when HAProxy is shutdown.
Beside all these changes, the code has been cleaned to ease code review and the
doc has been updated.
2017-09-27 05:00:59 -04:00
return 0 ;
2017-07-06 12:46:47 -04:00
}
MAJOR: dns: Refactor the DNS code
This is a huge patch with many changes, all about the DNS. Initially, the idea
was to update the DNS part to ease the threads support integration. But quickly,
I started to refactor some parts. And after several iterations, it was
impossible for me to commit the different parts atomically. So, instead of
adding tens of patches, often reworking the same parts, it was easier to merge
all my changes in a uniq patch. Here are all changes made on the DNS.
First, the DNS initialization has been refactored. The DNS configuration parsing
remains untouched, in cfgparse.c. But all checks have been moved in a post-check
callback. In the function dns_finalize_config, for each resolvers, the
nameservers configuration is tested and the task used to manage DNS resolutions
is created. The links between the backend's servers and the resolvers are also
created at this step. Here no connection are kept alive. So there is no needs
anymore to reopen them after HAProxy fork. Connections used to send DNS queries
will be opened on demand.
Then, the way DNS requesters are linked to a DNS resolution has been
reworked. The resolution used by a requester is now referenced into the
dns_requester structure and the resolution pointers in server and dns_srvrq
structures have been removed. wait and curr list of requesters, for a DNS
resolution, have been replaced by a uniq list. And Finally, the way a requester
is removed from a DNS resolution has been simplified. Now everything is done in
dns_unlink_resolution.
srv_set_fqdn function has been simplified. Now, there is only 1 way to set the
server's FQDN, independently it is done by the CLI or when a SRV record is
resolved.
The static DNS resolutions pool has been replaced by a dynamoc pool. The part
has been modified by Baptiste Assmann.
The way the DNS resolutions are triggered by the task or by a health-check has
been totally refactored. Now, all timeouts are respected. Especially
hold.valid. The default frequency to wake up a resolvers is now configurable
using "timeout resolve" parameter.
Now, as documented, as long as invalid repsonses are received, we really wait
all name servers responses before retrying.
As far as possible, resources allocated during DNS configuration parsing are
releases when HAProxy is shutdown.
Beside all these changes, the code has been cleaned to ease code review and the
doc has been updated.
2017-09-27 05:00:59 -04:00
2017-08-31 08:41:55 -04:00
if ( ! ( s - > next_admin & SRV_ADMF_RMAINT ) )
2016-11-02 17:58:18 -04:00
return 1 ;
srv_clr_admin_flag ( s , SRV_ADMF_RMAINT ) ;
chunk_printf ( & trash , " Server %s/%s administratively READY thanks to valid DNS answer " ,
s - > proxy - > id , s - > id ) ;
2018-07-13 04:54:26 -04:00
ha_warning ( " %s. \n " , trash . area ) ;
send_log ( s - > proxy , LOG_NOTICE , " %s. \n " , trash . area ) ;
2016-11-02 17:58:18 -04:00
return 0 ;
case RSLV_STATUS_NX :
/* stop server if resolution is NX for a long enough period */
MAJOR: dns: Refactor the DNS code
This is a huge patch with many changes, all about the DNS. Initially, the idea
was to update the DNS part to ease the threads support integration. But quickly,
I started to refactor some parts. And after several iterations, it was
impossible for me to commit the different parts atomically. So, instead of
adding tens of patches, often reworking the same parts, it was easier to merge
all my changes in a uniq patch. Here are all changes made on the DNS.
First, the DNS initialization has been refactored. The DNS configuration parsing
remains untouched, in cfgparse.c. But all checks have been moved in a post-check
callback. In the function dns_finalize_config, for each resolvers, the
nameservers configuration is tested and the task used to manage DNS resolutions
is created. The links between the backend's servers and the resolvers are also
created at this step. Here no connection are kept alive. So there is no needs
anymore to reopen them after HAProxy fork. Connections used to send DNS queries
will be opened on demand.
Then, the way DNS requesters are linked to a DNS resolution has been
reworked. The resolution used by a requester is now referenced into the
dns_requester structure and the resolution pointers in server and dns_srvrq
structures have been removed. wait and curr list of requesters, for a DNS
resolution, have been replaced by a uniq list. And Finally, the way a requester
is removed from a DNS resolution has been simplified. Now everything is done in
dns_unlink_resolution.
srv_set_fqdn function has been simplified. Now, there is only 1 way to set the
server's FQDN, independently it is done by the CLI or when a SRV record is
resolved.
The static DNS resolutions pool has been replaced by a dynamoc pool. The part
has been modified by Baptiste Assmann.
The way the DNS resolutions are triggered by the task or by a health-check has
been totally refactored. Now, all timeouts are respected. Especially
hold.valid. The default frequency to wake up a resolvers is now configurable
using "timeout resolve" parameter.
Now, as documented, as long as invalid repsonses are received, we really wait
all name servers responses before retrying.
As far as possible, resources allocated during DNS configuration parsing are
releases when HAProxy is shutdown.
Beside all these changes, the code has been cleaned to ease code review and the
doc has been updated.
2017-09-27 05:00:59 -04:00
exp = tick_add ( resolution - > last_valid , resolvers - > hold . nx ) ;
if ( ! tick_is_expired ( exp , now_ms ) )
break ;
if ( s - > next_admin & SRV_ADMF_RMAINT )
return 1 ;
srv_set_admin_flag ( s , SRV_ADMF_RMAINT , " DNS NX status " ) ;
return 0 ;
2016-11-02 17:58:18 -04:00
case RSLV_STATUS_TIMEOUT :
/* stop server if resolution is TIMEOUT for a long enough period */
MAJOR: dns: Refactor the DNS code
This is a huge patch with many changes, all about the DNS. Initially, the idea
was to update the DNS part to ease the threads support integration. But quickly,
I started to refactor some parts. And after several iterations, it was
impossible for me to commit the different parts atomically. So, instead of
adding tens of patches, often reworking the same parts, it was easier to merge
all my changes in a uniq patch. Here are all changes made on the DNS.
First, the DNS initialization has been refactored. The DNS configuration parsing
remains untouched, in cfgparse.c. But all checks have been moved in a post-check
callback. In the function dns_finalize_config, for each resolvers, the
nameservers configuration is tested and the task used to manage DNS resolutions
is created. The links between the backend's servers and the resolvers are also
created at this step. Here no connection are kept alive. So there is no needs
anymore to reopen them after HAProxy fork. Connections used to send DNS queries
will be opened on demand.
Then, the way DNS requesters are linked to a DNS resolution has been
reworked. The resolution used by a requester is now referenced into the
dns_requester structure and the resolution pointers in server and dns_srvrq
structures have been removed. wait and curr list of requesters, for a DNS
resolution, have been replaced by a uniq list. And Finally, the way a requester
is removed from a DNS resolution has been simplified. Now everything is done in
dns_unlink_resolution.
srv_set_fqdn function has been simplified. Now, there is only 1 way to set the
server's FQDN, independently it is done by the CLI or when a SRV record is
resolved.
The static DNS resolutions pool has been replaced by a dynamoc pool. The part
has been modified by Baptiste Assmann.
The way the DNS resolutions are triggered by the task or by a health-check has
been totally refactored. Now, all timeouts are respected. Especially
hold.valid. The default frequency to wake up a resolvers is now configurable
using "timeout resolve" parameter.
Now, as documented, as long as invalid repsonses are received, we really wait
all name servers responses before retrying.
As far as possible, resources allocated during DNS configuration parsing are
releases when HAProxy is shutdown.
Beside all these changes, the code has been cleaned to ease code review and the
doc has been updated.
2017-09-27 05:00:59 -04:00
exp = tick_add ( resolution - > last_valid , resolvers - > hold . timeout ) ;
if ( ! tick_is_expired ( exp , now_ms ) )
break ;
if ( s - > next_admin & SRV_ADMF_RMAINT )
return 1 ;
srv_set_admin_flag ( s , SRV_ADMF_RMAINT , " DNS timeout status " ) ;
return 0 ;
2016-11-02 17:58:18 -04:00
case RSLV_STATUS_REFUSED :
/* stop server if resolution is REFUSED for a long enough period */
MAJOR: dns: Refactor the DNS code
This is a huge patch with many changes, all about the DNS. Initially, the idea
was to update the DNS part to ease the threads support integration. But quickly,
I started to refactor some parts. And after several iterations, it was
impossible for me to commit the different parts atomically. So, instead of
adding tens of patches, often reworking the same parts, it was easier to merge
all my changes in a uniq patch. Here are all changes made on the DNS.
First, the DNS initialization has been refactored. The DNS configuration parsing
remains untouched, in cfgparse.c. But all checks have been moved in a post-check
callback. In the function dns_finalize_config, for each resolvers, the
nameservers configuration is tested and the task used to manage DNS resolutions
is created. The links between the backend's servers and the resolvers are also
created at this step. Here no connection are kept alive. So there is no needs
anymore to reopen them after HAProxy fork. Connections used to send DNS queries
will be opened on demand.
Then, the way DNS requesters are linked to a DNS resolution has been
reworked. The resolution used by a requester is now referenced into the
dns_requester structure and the resolution pointers in server and dns_srvrq
structures have been removed. wait and curr list of requesters, for a DNS
resolution, have been replaced by a uniq list. And Finally, the way a requester
is removed from a DNS resolution has been simplified. Now everything is done in
dns_unlink_resolution.
srv_set_fqdn function has been simplified. Now, there is only 1 way to set the
server's FQDN, independently it is done by the CLI or when a SRV record is
resolved.
The static DNS resolutions pool has been replaced by a dynamoc pool. The part
has been modified by Baptiste Assmann.
The way the DNS resolutions are triggered by the task or by a health-check has
been totally refactored. Now, all timeouts are respected. Especially
hold.valid. The default frequency to wake up a resolvers is now configurable
using "timeout resolve" parameter.
Now, as documented, as long as invalid repsonses are received, we really wait
all name servers responses before retrying.
As far as possible, resources allocated during DNS configuration parsing are
releases when HAProxy is shutdown.
Beside all these changes, the code has been cleaned to ease code review and the
doc has been updated.
2017-09-27 05:00:59 -04:00
exp = tick_add ( resolution - > last_valid , resolvers - > hold . refused ) ;
if ( ! tick_is_expired ( exp , now_ms ) )
break ;
if ( s - > next_admin & SRV_ADMF_RMAINT )
return 1 ;
srv_set_admin_flag ( s , SRV_ADMF_RMAINT , " DNS refused status " ) ;
return 0 ;
2016-11-02 17:58:18 -04:00
2015-04-13 19:15:08 -04:00
default :
MAJOR: dns: Refactor the DNS code
This is a huge patch with many changes, all about the DNS. Initially, the idea
was to update the DNS part to ease the threads support integration. But quickly,
I started to refactor some parts. And after several iterations, it was
impossible for me to commit the different parts atomically. So, instead of
adding tens of patches, often reworking the same parts, it was easier to merge
all my changes in a uniq patch. Here are all changes made on the DNS.
First, the DNS initialization has been refactored. The DNS configuration parsing
remains untouched, in cfgparse.c. But all checks have been moved in a post-check
callback. In the function dns_finalize_config, for each resolvers, the
nameservers configuration is tested and the task used to manage DNS resolutions
is created. The links between the backend's servers and the resolvers are also
created at this step. Here no connection are kept alive. So there is no needs
anymore to reopen them after HAProxy fork. Connections used to send DNS queries
will be opened on demand.
Then, the way DNS requesters are linked to a DNS resolution has been
reworked. The resolution used by a requester is now referenced into the
dns_requester structure and the resolution pointers in server and dns_srvrq
structures have been removed. wait and curr list of requesters, for a DNS
resolution, have been replaced by a uniq list. And Finally, the way a requester
is removed from a DNS resolution has been simplified. Now everything is done in
dns_unlink_resolution.
srv_set_fqdn function has been simplified. Now, there is only 1 way to set the
server's FQDN, independently it is done by the CLI or when a SRV record is
resolved.
The static DNS resolutions pool has been replaced by a dynamoc pool. The part
has been modified by Baptiste Assmann.
The way the DNS resolutions are triggered by the task or by a health-check has
been totally refactored. Now, all timeouts are respected. Especially
hold.valid. The default frequency to wake up a resolvers is now configurable
using "timeout resolve" parameter.
Now, as documented, as long as invalid repsonses are received, we really wait
all name servers responses before retrying.
As far as possible, resources allocated during DNS configuration parsing are
releases when HAProxy is shutdown.
Beside all these changes, the code has been cleaned to ease code review and the
doc has been updated.
2017-09-27 05:00:59 -04:00
/* stop server if resolution failed for a long enough period */
exp = tick_add ( resolution - > last_valid , resolvers - > hold . other ) ;
if ( ! tick_is_expired ( exp , now_ms ) )
break ;
if ( s - > next_admin & SRV_ADMF_RMAINT )
return 1 ;
srv_set_admin_flag ( s , SRV_ADMF_RMAINT , " unspecified DNS error " ) ;
return 0 ;
2015-04-13 19:15:08 -04:00
}
return 1 ;
}
/*
* Server Name Resolution valid response callback
* It expects :
* - < nameserver > : the name server which answered the valid response
* - < response > : buffer containing a valid DNS response
* - < response_len > : size of < response >
* It performs the following actions :
* - ignore response if current ip found and server family not met
* - update with first new ip found if family is met and current IP is not found
* returns :
* 0 on error
* 1 when no error or safe ignore
2017-11-06 11:30:28 -05:00
*
* Must be called with server lock held
2015-04-13 19:15:08 -04:00
*/
MAJOR/REORG: dns: DNS resolution task and requester queues
This patch is a major upgrade of the internal run-time DNS resolver in
HAProxy and it brings the following 2 main changes:
1. DNS resolution task
Up to now, DNS resolution was triggered by the health check task.
From now, DNS resolution task is autonomous. It is started by HAProxy
right after the scheduler is available and it is woken either when a
network IO occurs for one of its nameserver or when a timeout is
matched.
From now, this means we can enable DNS resolution for a server without
enabling health checking.
2. Introduction of a dns_requester structure
Up to now, DNS resolution was purposely made for resolving server
hostnames.
The idea, is to ensure that any HAProxy internal object should be able
to trigger a DNS resolution. For this purpose, 2 things has to be done:
- clean up the DNS code from the server structure (this was already
quite clean actually) and clean up the server's callbacks from
manipulating too much DNS resolution
- create an agnostic structure which allows linking a DNS resolution
and a requester of any type (using obj_type enum)
3. Manage requesters through queues
Up to now, there was an uniq relationship between a resolution and it's
owner (aka the requester now). It's a shame, because in some cases,
multiple objects may share the same hostname and may benefit from a
resolution being performed by a third party.
This patch introduces the notion of queues, which are basically lists of
either currently running resolution or waiting ones.
The resolutions are now available as a pool, which belongs to the resolvers.
The pool has has a default size of 64 resolutions per resolvers and is
allocated at configuration parsing.
2017-05-22 09:17:15 -04:00
int snr_resolution_cb ( struct dns_requester * requester , struct dns_nameserver * nameserver )
2015-04-13 19:15:08 -04:00
{
MAJOR/REORG: dns: DNS resolution task and requester queues
This patch is a major upgrade of the internal run-time DNS resolver in
HAProxy and it brings the following 2 main changes:
1. DNS resolution task
Up to now, DNS resolution was triggered by the health check task.
From now, DNS resolution task is autonomous. It is started by HAProxy
right after the scheduler is available and it is woken either when a
network IO occurs for one of its nameserver or when a timeout is
matched.
From now, this means we can enable DNS resolution for a server without
enabling health checking.
2. Introduction of a dns_requester structure
Up to now, DNS resolution was purposely made for resolving server
hostnames.
The idea, is to ensure that any HAProxy internal object should be able
to trigger a DNS resolution. For this purpose, 2 things has to be done:
- clean up the DNS code from the server structure (this was already
quite clean actually) and clean up the server's callbacks from
manipulating too much DNS resolution
- create an agnostic structure which allows linking a DNS resolution
and a requester of any type (using obj_type enum)
3. Manage requesters through queues
Up to now, there was an uniq relationship between a resolution and it's
owner (aka the requester now). It's a shame, because in some cases,
multiple objects may share the same hostname and may benefit from a
resolution being performed by a third party.
This patch introduces the notion of queues, which are basically lists of
either currently running resolution or waiting ones.
The resolutions are now available as a pool, which belongs to the resolvers.
The pool has has a default size of 64 resolutions per resolvers and is
allocated at configuration parsing.
2017-05-22 09:17:15 -04:00
struct server * s = NULL ;
struct dns_resolution * resolution = NULL ;
2015-04-13 19:15:08 -04:00
void * serverip , * firstip ;
short server_sin_family , firstip_sin_family ;
int ret ;
2018-07-13 05:56:34 -04:00
struct buffer * chk = get_trash_chunk ( ) ;
2017-07-06 12:46:47 -04:00
int has_no_ip = 0 ;
2015-04-13 19:15:08 -04:00
MAJOR: dns: Refactor the DNS code
This is a huge patch with many changes, all about the DNS. Initially, the idea
was to update the DNS part to ease the threads support integration. But quickly,
I started to refactor some parts. And after several iterations, it was
impossible for me to commit the different parts atomically. So, instead of
adding tens of patches, often reworking the same parts, it was easier to merge
all my changes in a uniq patch. Here are all changes made on the DNS.
First, the DNS initialization has been refactored. The DNS configuration parsing
remains untouched, in cfgparse.c. But all checks have been moved in a post-check
callback. In the function dns_finalize_config, for each resolvers, the
nameservers configuration is tested and the task used to manage DNS resolutions
is created. The links between the backend's servers and the resolvers are also
created at this step. Here no connection are kept alive. So there is no needs
anymore to reopen them after HAProxy fork. Connections used to send DNS queries
will be opened on demand.
Then, the way DNS requesters are linked to a DNS resolution has been
reworked. The resolution used by a requester is now referenced into the
dns_requester structure and the resolution pointers in server and dns_srvrq
structures have been removed. wait and curr list of requesters, for a DNS
resolution, have been replaced by a uniq list. And Finally, the way a requester
is removed from a DNS resolution has been simplified. Now everything is done in
dns_unlink_resolution.
srv_set_fqdn function has been simplified. Now, there is only 1 way to set the
server's FQDN, independently it is done by the CLI or when a SRV record is
resolved.
The static DNS resolutions pool has been replaced by a dynamoc pool. The part
has been modified by Baptiste Assmann.
The way the DNS resolutions are triggered by the task or by a health-check has
been totally refactored. Now, all timeouts are respected. Especially
hold.valid. The default frequency to wake up a resolvers is now configurable
using "timeout resolve" parameter.
Now, as documented, as long as invalid repsonses are received, we really wait
all name servers responses before retrying.
As far as possible, resources allocated during DNS configuration parsing are
releases when HAProxy is shutdown.
Beside all these changes, the code has been cleaned to ease code review and the
doc has been updated.
2017-09-27 05:00:59 -04:00
s = objt_server ( requester - > owner ) ;
MAJOR/REORG: dns: DNS resolution task and requester queues
This patch is a major upgrade of the internal run-time DNS resolver in
HAProxy and it brings the following 2 main changes:
1. DNS resolution task
Up to now, DNS resolution was triggered by the health check task.
From now, DNS resolution task is autonomous. It is started by HAProxy
right after the scheduler is available and it is woken either when a
network IO occurs for one of its nameserver or when a timeout is
matched.
From now, this means we can enable DNS resolution for a server without
enabling health checking.
2. Introduction of a dns_requester structure
Up to now, DNS resolution was purposely made for resolving server
hostnames.
The idea, is to ensure that any HAProxy internal object should be able
to trigger a DNS resolution. For this purpose, 2 things has to be done:
- clean up the DNS code from the server structure (this was already
quite clean actually) and clean up the server's callbacks from
manipulating too much DNS resolution
- create an agnostic structure which allows linking a DNS resolution
and a requester of any type (using obj_type enum)
3. Manage requesters through queues
Up to now, there was an uniq relationship between a resolution and it's
owner (aka the requester now). It's a shame, because in some cases,
multiple objects may share the same hostname and may benefit from a
resolution being performed by a third party.
This patch introduces the notion of queues, which are basically lists of
either currently running resolution or waiting ones.
The resolutions are now available as a pool, which belongs to the resolvers.
The pool has has a default size of 64 resolutions per resolvers and is
allocated at configuration parsing.
2017-05-22 09:17:15 -04:00
if ( ! s )
return 1 ;
MAJOR: dns: Refactor the DNS code
This is a huge patch with many changes, all about the DNS. Initially, the idea
was to update the DNS part to ease the threads support integration. But quickly,
I started to refactor some parts. And after several iterations, it was
impossible for me to commit the different parts atomically. So, instead of
adding tens of patches, often reworking the same parts, it was easier to merge
all my changes in a uniq patch. Here are all changes made on the DNS.
First, the DNS initialization has been refactored. The DNS configuration parsing
remains untouched, in cfgparse.c. But all checks have been moved in a post-check
callback. In the function dns_finalize_config, for each resolvers, the
nameservers configuration is tested and the task used to manage DNS resolutions
is created. The links between the backend's servers and the resolvers are also
created at this step. Here no connection are kept alive. So there is no needs
anymore to reopen them after HAProxy fork. Connections used to send DNS queries
will be opened on demand.
Then, the way DNS requesters are linked to a DNS resolution has been
reworked. The resolution used by a requester is now referenced into the
dns_requester structure and the resolution pointers in server and dns_srvrq
structures have been removed. wait and curr list of requesters, for a DNS
resolution, have been replaced by a uniq list. And Finally, the way a requester
is removed from a DNS resolution has been simplified. Now everything is done in
dns_unlink_resolution.
srv_set_fqdn function has been simplified. Now, there is only 1 way to set the
server's FQDN, independently it is done by the CLI or when a SRV record is
resolved.
The static DNS resolutions pool has been replaced by a dynamoc pool. The part
has been modified by Baptiste Assmann.
The way the DNS resolutions are triggered by the task or by a health-check has
been totally refactored. Now, all timeouts are respected. Especially
hold.valid. The default frequency to wake up a resolvers is now configurable
using "timeout resolve" parameter.
Now, as documented, as long as invalid repsonses are received, we really wait
all name servers responses before retrying.
As far as possible, resources allocated during DNS configuration parsing are
releases when HAProxy is shutdown.
Beside all these changes, the code has been cleaned to ease code review and the
doc has been updated.
2017-09-27 05:00:59 -04:00
resolution = s - > dns_requester - > resolution ;
MAJOR/REORG: dns: DNS resolution task and requester queues
This patch is a major upgrade of the internal run-time DNS resolver in
HAProxy and it brings the following 2 main changes:
1. DNS resolution task
Up to now, DNS resolution was triggered by the health check task.
From now, DNS resolution task is autonomous. It is started by HAProxy
right after the scheduler is available and it is woken either when a
network IO occurs for one of its nameserver or when a timeout is
matched.
From now, this means we can enable DNS resolution for a server without
enabling health checking.
2. Introduction of a dns_requester structure
Up to now, DNS resolution was purposely made for resolving server
hostnames.
The idea, is to ensure that any HAProxy internal object should be able
to trigger a DNS resolution. For this purpose, 2 things has to be done:
- clean up the DNS code from the server structure (this was already
quite clean actually) and clean up the server's callbacks from
manipulating too much DNS resolution
- create an agnostic structure which allows linking a DNS resolution
and a requester of any type (using obj_type enum)
3. Manage requesters through queues
Up to now, there was an uniq relationship between a resolution and it's
owner (aka the requester now). It's a shame, because in some cases,
multiple objects may share the same hostname and may benefit from a
resolution being performed by a third party.
This patch introduces the notion of queues, which are basically lists of
either currently running resolution or waiting ones.
The resolutions are now available as a pool, which belongs to the resolvers.
The pool has has a default size of 64 resolutions per resolvers and is
allocated at configuration parsing.
2017-05-22 09:17:15 -04:00
2015-04-13 19:15:08 -04:00
/* initializing variables */
firstip = NULL ; /* pointer to the first valid response found */
/* it will be used as the new IP if a change is required */
firstip_sin_family = AF_UNSPEC ;
serverip = NULL ; /* current server IP address */
/* initializing server IP pointer */
server_sin_family = s - > addr . ss_family ;
switch ( server_sin_family ) {
case AF_INET :
serverip = & ( ( struct sockaddr_in * ) & s - > addr ) - > sin_addr . s_addr ;
break ;
case AF_INET6 :
serverip = & ( ( struct sockaddr_in6 * ) & s - > addr ) - > sin6_addr . s6_addr ;
break ;
2017-01-06 13:18:32 -05:00
case AF_UNSPEC :
break ;
2015-04-13 19:15:08 -04:00
default :
goto invalid ;
}
2017-05-22 09:13:10 -04:00
ret = dns_get_ip_from_response ( & resolution - > response , & s - > dns_opts ,
2016-02-17 15:25:09 -05:00
serverip , server_sin_family , & firstip ,
2017-05-03 09:43:12 -04:00
& firstip_sin_family , s ) ;
2015-04-13 19:15:08 -04:00
switch ( ret ) {
case DNS_UPD_NO :
MAJOR/REORG: dns: DNS resolution task and requester queues
This patch is a major upgrade of the internal run-time DNS resolver in
HAProxy and it brings the following 2 main changes:
1. DNS resolution task
Up to now, DNS resolution was triggered by the health check task.
From now, DNS resolution task is autonomous. It is started by HAProxy
right after the scheduler is available and it is woken either when a
network IO occurs for one of its nameserver or when a timeout is
matched.
From now, this means we can enable DNS resolution for a server without
enabling health checking.
2. Introduction of a dns_requester structure
Up to now, DNS resolution was purposely made for resolving server
hostnames.
The idea, is to ensure that any HAProxy internal object should be able
to trigger a DNS resolution. For this purpose, 2 things has to be done:
- clean up the DNS code from the server structure (this was already
quite clean actually) and clean up the server's callbacks from
manipulating too much DNS resolution
- create an agnostic structure which allows linking a DNS resolution
and a requester of any type (using obj_type enum)
3. Manage requesters through queues
Up to now, there was an uniq relationship between a resolution and it's
owner (aka the requester now). It's a shame, because in some cases,
multiple objects may share the same hostname and may benefit from a
resolution being performed by a third party.
This patch introduces the notion of queues, which are basically lists of
either currently running resolution or waiting ones.
The resolutions are now available as a pool, which belongs to the resolvers.
The pool has has a default size of 64 resolutions per resolvers and is
allocated at configuration parsing.
2017-05-22 09:17:15 -04:00
goto update_status ;
2015-04-13 19:15:08 -04:00
case DNS_UPD_SRVIP_NOT_FOUND :
goto save_ip ;
case DNS_UPD_CNAME :
goto invalid ;
2015-09-08 18:51:08 -04:00
case DNS_UPD_NO_IP_FOUND :
2017-07-06 12:46:47 -04:00
has_no_ip = 1 ;
MAJOR/REORG: dns: DNS resolution task and requester queues
This patch is a major upgrade of the internal run-time DNS resolver in
HAProxy and it brings the following 2 main changes:
1. DNS resolution task
Up to now, DNS resolution was triggered by the health check task.
From now, DNS resolution task is autonomous. It is started by HAProxy
right after the scheduler is available and it is woken either when a
network IO occurs for one of its nameserver or when a timeout is
matched.
From now, this means we can enable DNS resolution for a server without
enabling health checking.
2. Introduction of a dns_requester structure
Up to now, DNS resolution was purposely made for resolving server
hostnames.
The idea, is to ensure that any HAProxy internal object should be able
to trigger a DNS resolution. For this purpose, 2 things has to be done:
- clean up the DNS code from the server structure (this was already
quite clean actually) and clean up the server's callbacks from
manipulating too much DNS resolution
- create an agnostic structure which allows linking a DNS resolution
and a requester of any type (using obj_type enum)
3. Manage requesters through queues
Up to now, there was an uniq relationship between a resolution and it's
owner (aka the requester now). It's a shame, because in some cases,
multiple objects may share the same hostname and may benefit from a
resolution being performed by a third party.
This patch introduces the notion of queues, which are basically lists of
either currently running resolution or waiting ones.
The resolutions are now available as a pool, which belongs to the resolvers.
The pool has has a default size of 64 resolutions per resolvers and is
allocated at configuration parsing.
2017-05-22 09:17:15 -04:00
goto update_status ;
2015-09-08 18:51:08 -04:00
2015-10-27 21:03:32 -04:00
case DNS_UPD_NAME_ERROR :
/* update resolution status to OTHER error type */
MAJOR: dns: Refactor the DNS code
This is a huge patch with many changes, all about the DNS. Initially, the idea
was to update the DNS part to ease the threads support integration. But quickly,
I started to refactor some parts. And after several iterations, it was
impossible for me to commit the different parts atomically. So, instead of
adding tens of patches, often reworking the same parts, it was easier to merge
all my changes in a uniq patch. Here are all changes made on the DNS.
First, the DNS initialization has been refactored. The DNS configuration parsing
remains untouched, in cfgparse.c. But all checks have been moved in a post-check
callback. In the function dns_finalize_config, for each resolvers, the
nameservers configuration is tested and the task used to manage DNS resolutions
is created. The links between the backend's servers and the resolvers are also
created at this step. Here no connection are kept alive. So there is no needs
anymore to reopen them after HAProxy fork. Connections used to send DNS queries
will be opened on demand.
Then, the way DNS requesters are linked to a DNS resolution has been
reworked. The resolution used by a requester is now referenced into the
dns_requester structure and the resolution pointers in server and dns_srvrq
structures have been removed. wait and curr list of requesters, for a DNS
resolution, have been replaced by a uniq list. And Finally, the way a requester
is removed from a DNS resolution has been simplified. Now everything is done in
dns_unlink_resolution.
srv_set_fqdn function has been simplified. Now, there is only 1 way to set the
server's FQDN, independently it is done by the CLI or when a SRV record is
resolved.
The static DNS resolutions pool has been replaced by a dynamoc pool. The part
has been modified by Baptiste Assmann.
The way the DNS resolutions are triggered by the task or by a health-check has
been totally refactored. Now, all timeouts are respected. Especially
hold.valid. The default frequency to wake up a resolvers is now configurable
using "timeout resolve" parameter.
Now, as documented, as long as invalid repsonses are received, we really wait
all name servers responses before retrying.
As far as possible, resources allocated during DNS configuration parsing are
releases when HAProxy is shutdown.
Beside all these changes, the code has been cleaned to ease code review and the
doc has been updated.
2017-09-27 05:00:59 -04:00
resolution - > status = RSLV_STATUS_OTHER ;
MAJOR/REORG: dns: DNS resolution task and requester queues
This patch is a major upgrade of the internal run-time DNS resolver in
HAProxy and it brings the following 2 main changes:
1. DNS resolution task
Up to now, DNS resolution was triggered by the health check task.
From now, DNS resolution task is autonomous. It is started by HAProxy
right after the scheduler is available and it is woken either when a
network IO occurs for one of its nameserver or when a timeout is
matched.
From now, this means we can enable DNS resolution for a server without
enabling health checking.
2. Introduction of a dns_requester structure
Up to now, DNS resolution was purposely made for resolving server
hostnames.
The idea, is to ensure that any HAProxy internal object should be able
to trigger a DNS resolution. For this purpose, 2 things has to be done:
- clean up the DNS code from the server structure (this was already
quite clean actually) and clean up the server's callbacks from
manipulating too much DNS resolution
- create an agnostic structure which allows linking a DNS resolution
and a requester of any type (using obj_type enum)
3. Manage requesters through queues
Up to now, there was an uniq relationship between a resolution and it's
owner (aka the requester now). It's a shame, because in some cases,
multiple objects may share the same hostname and may benefit from a
resolution being performed by a third party.
This patch introduces the notion of queues, which are basically lists of
either currently running resolution or waiting ones.
The resolutions are now available as a pool, which belongs to the resolvers.
The pool has has a default size of 64 resolutions per resolvers and is
allocated at configuration parsing.
2017-05-22 09:17:15 -04:00
goto update_status ;
2015-10-27 21:03:32 -04:00
2015-04-13 19:15:08 -04:00
default :
goto invalid ;
}
save_ip :
MAJOR: dns: Refactor the DNS code
This is a huge patch with many changes, all about the DNS. Initially, the idea
was to update the DNS part to ease the threads support integration. But quickly,
I started to refactor some parts. And after several iterations, it was
impossible for me to commit the different parts atomically. So, instead of
adding tens of patches, often reworking the same parts, it was easier to merge
all my changes in a uniq patch. Here are all changes made on the DNS.
First, the DNS initialization has been refactored. The DNS configuration parsing
remains untouched, in cfgparse.c. But all checks have been moved in a post-check
callback. In the function dns_finalize_config, for each resolvers, the
nameservers configuration is tested and the task used to manage DNS resolutions
is created. The links between the backend's servers and the resolvers are also
created at this step. Here no connection are kept alive. So there is no needs
anymore to reopen them after HAProxy fork. Connections used to send DNS queries
will be opened on demand.
Then, the way DNS requesters are linked to a DNS resolution has been
reworked. The resolution used by a requester is now referenced into the
dns_requester structure and the resolution pointers in server and dns_srvrq
structures have been removed. wait and curr list of requesters, for a DNS
resolution, have been replaced by a uniq list. And Finally, the way a requester
is removed from a DNS resolution has been simplified. Now everything is done in
dns_unlink_resolution.
srv_set_fqdn function has been simplified. Now, there is only 1 way to set the
server's FQDN, independently it is done by the CLI or when a SRV record is
resolved.
The static DNS resolutions pool has been replaced by a dynamoc pool. The part
has been modified by Baptiste Assmann.
The way the DNS resolutions are triggered by the task or by a health-check has
been totally refactored. Now, all timeouts are respected. Especially
hold.valid. The default frequency to wake up a resolvers is now configurable
using "timeout resolve" parameter.
Now, as documented, as long as invalid repsonses are received, we really wait
all name servers responses before retrying.
As far as possible, resources allocated during DNS configuration parsing are
releases when HAProxy is shutdown.
Beside all these changes, the code has been cleaned to ease code review and the
doc has been updated.
2017-09-27 05:00:59 -04:00
if ( nameserver ) {
nameserver - > counters . update + + ;
/* save the first ip we found */
MAJOR/REORG: dns: DNS resolution task and requester queues
This patch is a major upgrade of the internal run-time DNS resolver in
HAProxy and it brings the following 2 main changes:
1. DNS resolution task
Up to now, DNS resolution was triggered by the health check task.
From now, DNS resolution task is autonomous. It is started by HAProxy
right after the scheduler is available and it is woken either when a
network IO occurs for one of its nameserver or when a timeout is
matched.
From now, this means we can enable DNS resolution for a server without
enabling health checking.
2. Introduction of a dns_requester structure
Up to now, DNS resolution was purposely made for resolving server
hostnames.
The idea, is to ensure that any HAProxy internal object should be able
to trigger a DNS resolution. For this purpose, 2 things has to be done:
- clean up the DNS code from the server structure (this was already
quite clean actually) and clean up the server's callbacks from
manipulating too much DNS resolution
- create an agnostic structure which allows linking a DNS resolution
and a requester of any type (using obj_type enum)
3. Manage requesters through queues
Up to now, there was an uniq relationship between a resolution and it's
owner (aka the requester now). It's a shame, because in some cases,
multiple objects may share the same hostname and may benefit from a
resolution being performed by a third party.
This patch introduces the notion of queues, which are basically lists of
either currently running resolution or waiting ones.
The resolutions are now available as a pool, which belongs to the resolvers.
The pool has has a default size of 64 resolutions per resolvers and is
allocated at configuration parsing.
2017-05-22 09:17:15 -04:00
chunk_printf ( chk , " %s/%s " , nameserver - > resolvers - > id , nameserver - > id ) ;
MAJOR: dns: Refactor the DNS code
This is a huge patch with many changes, all about the DNS. Initially, the idea
was to update the DNS part to ease the threads support integration. But quickly,
I started to refactor some parts. And after several iterations, it was
impossible for me to commit the different parts atomically. So, instead of
adding tens of patches, often reworking the same parts, it was easier to merge
all my changes in a uniq patch. Here are all changes made on the DNS.
First, the DNS initialization has been refactored. The DNS configuration parsing
remains untouched, in cfgparse.c. But all checks have been moved in a post-check
callback. In the function dns_finalize_config, for each resolvers, the
nameservers configuration is tested and the task used to manage DNS resolutions
is created. The links between the backend's servers and the resolvers are also
created at this step. Here no connection are kept alive. So there is no needs
anymore to reopen them after HAProxy fork. Connections used to send DNS queries
will be opened on demand.
Then, the way DNS requesters are linked to a DNS resolution has been
reworked. The resolution used by a requester is now referenced into the
dns_requester structure and the resolution pointers in server and dns_srvrq
structures have been removed. wait and curr list of requesters, for a DNS
resolution, have been replaced by a uniq list. And Finally, the way a requester
is removed from a DNS resolution has been simplified. Now everything is done in
dns_unlink_resolution.
srv_set_fqdn function has been simplified. Now, there is only 1 way to set the
server's FQDN, independently it is done by the CLI or when a SRV record is
resolved.
The static DNS resolutions pool has been replaced by a dynamoc pool. The part
has been modified by Baptiste Assmann.
The way the DNS resolutions are triggered by the task or by a health-check has
been totally refactored. Now, all timeouts are respected. Especially
hold.valid. The default frequency to wake up a resolvers is now configurable
using "timeout resolve" parameter.
Now, as documented, as long as invalid repsonses are received, we really wait
all name servers responses before retrying.
As far as possible, resources allocated during DNS configuration parsing are
releases when HAProxy is shutdown.
Beside all these changes, the code has been cleaned to ease code review and the
doc has been updated.
2017-09-27 05:00:59 -04:00
}
MAJOR/REORG: dns: DNS resolution task and requester queues
This patch is a major upgrade of the internal run-time DNS resolver in
HAProxy and it brings the following 2 main changes:
1. DNS resolution task
Up to now, DNS resolution was triggered by the health check task.
From now, DNS resolution task is autonomous. It is started by HAProxy
right after the scheduler is available and it is woken either when a
network IO occurs for one of its nameserver or when a timeout is
matched.
From now, this means we can enable DNS resolution for a server without
enabling health checking.
2. Introduction of a dns_requester structure
Up to now, DNS resolution was purposely made for resolving server
hostnames.
The idea, is to ensure that any HAProxy internal object should be able
to trigger a DNS resolution. For this purpose, 2 things has to be done:
- clean up the DNS code from the server structure (this was already
quite clean actually) and clean up the server's callbacks from
manipulating too much DNS resolution
- create an agnostic structure which allows linking a DNS resolution
and a requester of any type (using obj_type enum)
3. Manage requesters through queues
Up to now, there was an uniq relationship between a resolution and it's
owner (aka the requester now). It's a shame, because in some cases,
multiple objects may share the same hostname and may benefit from a
resolution being performed by a third party.
This patch introduces the notion of queues, which are basically lists of
either currently running resolution or waiting ones.
The resolutions are now available as a pool, which belongs to the resolvers.
The pool has has a default size of 64 resolutions per resolvers and is
allocated at configuration parsing.
2017-05-22 09:17:15 -04:00
else
chunk_printf ( chk , " DNS cache " ) ;
2018-07-13 04:54:26 -04:00
update_server_addr ( s , firstip , firstip_sin_family , ( char * ) chk - > area ) ;
2015-04-13 19:15:08 -04:00
MAJOR/REORG: dns: DNS resolution task and requester queues
This patch is a major upgrade of the internal run-time DNS resolver in
HAProxy and it brings the following 2 main changes:
1. DNS resolution task
Up to now, DNS resolution was triggered by the health check task.
From now, DNS resolution task is autonomous. It is started by HAProxy
right after the scheduler is available and it is woken either when a
network IO occurs for one of its nameserver or when a timeout is
matched.
From now, this means we can enable DNS resolution for a server without
enabling health checking.
2. Introduction of a dns_requester structure
Up to now, DNS resolution was purposely made for resolving server
hostnames.
The idea, is to ensure that any HAProxy internal object should be able
to trigger a DNS resolution. For this purpose, 2 things has to be done:
- clean up the DNS code from the server structure (this was already
quite clean actually) and clean up the server's callbacks from
manipulating too much DNS resolution
- create an agnostic structure which allows linking a DNS resolution
and a requester of any type (using obj_type enum)
3. Manage requesters through queues
Up to now, there was an uniq relationship between a resolution and it's
owner (aka the requester now). It's a shame, because in some cases,
multiple objects may share the same hostname and may benefit from a
resolution being performed by a third party.
This patch introduces the notion of queues, which are basically lists of
either currently running resolution or waiting ones.
The resolutions are now available as a pool, which belongs to the resolvers.
The pool has has a default size of 64 resolutions per resolvers and is
allocated at configuration parsing.
2017-05-22 09:17:15 -04:00
update_status :
2017-07-06 12:46:47 -04:00
snr_update_srv_status ( s , has_no_ip ) ;
MAJOR/REORG: dns: DNS resolution task and requester queues
This patch is a major upgrade of the internal run-time DNS resolver in
HAProxy and it brings the following 2 main changes:
1. DNS resolution task
Up to now, DNS resolution was triggered by the health check task.
From now, DNS resolution task is autonomous. It is started by HAProxy
right after the scheduler is available and it is woken either when a
network IO occurs for one of its nameserver or when a timeout is
matched.
From now, this means we can enable DNS resolution for a server without
enabling health checking.
2. Introduction of a dns_requester structure
Up to now, DNS resolution was purposely made for resolving server
hostnames.
The idea, is to ensure that any HAProxy internal object should be able
to trigger a DNS resolution. For this purpose, 2 things has to be done:
- clean up the DNS code from the server structure (this was already
quite clean actually) and clean up the server's callbacks from
manipulating too much DNS resolution
- create an agnostic structure which allows linking a DNS resolution
and a requester of any type (using obj_type enum)
3. Manage requesters through queues
Up to now, there was an uniq relationship between a resolution and it's
owner (aka the requester now). It's a shame, because in some cases,
multiple objects may share the same hostname and may benefit from a
resolution being performed by a third party.
This patch introduces the notion of queues, which are basically lists of
either currently running resolution or waiting ones.
The resolutions are now available as a pool, which belongs to the resolvers.
The pool has has a default size of 64 resolutions per resolvers and is
allocated at configuration parsing.
2017-05-22 09:17:15 -04:00
return 1 ;
2015-04-13 19:15:08 -04:00
invalid :
2017-09-15 05:55:45 -04:00
if ( nameserver ) {
MAJOR: dns: Refactor the DNS code
This is a huge patch with many changes, all about the DNS. Initially, the idea
was to update the DNS part to ease the threads support integration. But quickly,
I started to refactor some parts. And after several iterations, it was
impossible for me to commit the different parts atomically. So, instead of
adding tens of patches, often reworking the same parts, it was easier to merge
all my changes in a uniq patch. Here are all changes made on the DNS.
First, the DNS initialization has been refactored. The DNS configuration parsing
remains untouched, in cfgparse.c. But all checks have been moved in a post-check
callback. In the function dns_finalize_config, for each resolvers, the
nameservers configuration is tested and the task used to manage DNS resolutions
is created. The links between the backend's servers and the resolvers are also
created at this step. Here no connection are kept alive. So there is no needs
anymore to reopen them after HAProxy fork. Connections used to send DNS queries
will be opened on demand.
Then, the way DNS requesters are linked to a DNS resolution has been
reworked. The resolution used by a requester is now referenced into the
dns_requester structure and the resolution pointers in server and dns_srvrq
structures have been removed. wait and curr list of requesters, for a DNS
resolution, have been replaced by a uniq list. And Finally, the way a requester
is removed from a DNS resolution has been simplified. Now everything is done in
dns_unlink_resolution.
srv_set_fqdn function has been simplified. Now, there is only 1 way to set the
server's FQDN, independently it is done by the CLI or when a SRV record is
resolved.
The static DNS resolutions pool has been replaced by a dynamoc pool. The part
has been modified by Baptiste Assmann.
The way the DNS resolutions are triggered by the task or by a health-check has
been totally refactored. Now, all timeouts are respected. Especially
hold.valid. The default frequency to wake up a resolvers is now configurable
using "timeout resolve" parameter.
Now, as documented, as long as invalid repsonses are received, we really wait
all name servers responses before retrying.
As far as possible, resources allocated during DNS configuration parsing are
releases when HAProxy is shutdown.
Beside all these changes, the code has been cleaned to ease code review and the
doc has been updated.
2017-09-27 05:00:59 -04:00
nameserver - > counters . invalid + + ;
goto update_status ;
2017-09-15 05:55:45 -04:00
}
2017-07-06 12:46:47 -04:00
snr_update_srv_status ( s , has_no_ip ) ;
2015-04-13 19:15:08 -04:00
return 0 ;
}
/*
* Server Name Resolution error management callback
* returns :
* 0 on error
* 1 when no error or safe ignore
2018-08-21 05:54:26 -04:00
*
* Grabs the server ' s lock .
2015-04-13 19:15:08 -04:00
*/
MAJOR/REORG: dns: DNS resolution task and requester queues
This patch is a major upgrade of the internal run-time DNS resolver in
HAProxy and it brings the following 2 main changes:
1. DNS resolution task
Up to now, DNS resolution was triggered by the health check task.
From now, DNS resolution task is autonomous. It is started by HAProxy
right after the scheduler is available and it is woken either when a
network IO occurs for one of its nameserver or when a timeout is
matched.
From now, this means we can enable DNS resolution for a server without
enabling health checking.
2. Introduction of a dns_requester structure
Up to now, DNS resolution was purposely made for resolving server
hostnames.
The idea, is to ensure that any HAProxy internal object should be able
to trigger a DNS resolution. For this purpose, 2 things has to be done:
- clean up the DNS code from the server structure (this was already
quite clean actually) and clean up the server's callbacks from
manipulating too much DNS resolution
- create an agnostic structure which allows linking a DNS resolution
and a requester of any type (using obj_type enum)
3. Manage requesters through queues
Up to now, there was an uniq relationship between a resolution and it's
owner (aka the requester now). It's a shame, because in some cases,
multiple objects may share the same hostname and may benefit from a
resolution being performed by a third party.
This patch introduces the notion of queues, which are basically lists of
either currently running resolution or waiting ones.
The resolutions are now available as a pool, which belongs to the resolvers.
The pool has has a default size of 64 resolutions per resolvers and is
allocated at configuration parsing.
2017-05-22 09:17:15 -04:00
int snr_resolution_error_cb ( struct dns_requester * requester , int error_code )
2015-04-13 19:15:08 -04:00
{
MAJOR: dns: Refactor the DNS code
This is a huge patch with many changes, all about the DNS. Initially, the idea
was to update the DNS part to ease the threads support integration. But quickly,
I started to refactor some parts. And after several iterations, it was
impossible for me to commit the different parts atomically. So, instead of
adding tens of patches, often reworking the same parts, it was easier to merge
all my changes in a uniq patch. Here are all changes made on the DNS.
First, the DNS initialization has been refactored. The DNS configuration parsing
remains untouched, in cfgparse.c. But all checks have been moved in a post-check
callback. In the function dns_finalize_config, for each resolvers, the
nameservers configuration is tested and the task used to manage DNS resolutions
is created. The links between the backend's servers and the resolvers are also
created at this step. Here no connection are kept alive. So there is no needs
anymore to reopen them after HAProxy fork. Connections used to send DNS queries
will be opened on demand.
Then, the way DNS requesters are linked to a DNS resolution has been
reworked. The resolution used by a requester is now referenced into the
dns_requester structure and the resolution pointers in server and dns_srvrq
structures have been removed. wait and curr list of requesters, for a DNS
resolution, have been replaced by a uniq list. And Finally, the way a requester
is removed from a DNS resolution has been simplified. Now everything is done in
dns_unlink_resolution.
srv_set_fqdn function has been simplified. Now, there is only 1 way to set the
server's FQDN, independently it is done by the CLI or when a SRV record is
resolved.
The static DNS resolutions pool has been replaced by a dynamoc pool. The part
has been modified by Baptiste Assmann.
The way the DNS resolutions are triggered by the task or by a health-check has
been totally refactored. Now, all timeouts are respected. Especially
hold.valid. The default frequency to wake up a resolvers is now configurable
using "timeout resolve" parameter.
Now, as documented, as long as invalid repsonses are received, we really wait
all name servers responses before retrying.
As far as possible, resources allocated during DNS configuration parsing are
releases when HAProxy is shutdown.
Beside all these changes, the code has been cleaned to ease code review and the
doc has been updated.
2017-09-27 05:00:59 -04:00
struct server * s ;
2015-04-13 19:15:08 -04:00
MAJOR: dns: Refactor the DNS code
This is a huge patch with many changes, all about the DNS. Initially, the idea
was to update the DNS part to ease the threads support integration. But quickly,
I started to refactor some parts. And after several iterations, it was
impossible for me to commit the different parts atomically. So, instead of
adding tens of patches, often reworking the same parts, it was easier to merge
all my changes in a uniq patch. Here are all changes made on the DNS.
First, the DNS initialization has been refactored. The DNS configuration parsing
remains untouched, in cfgparse.c. But all checks have been moved in a post-check
callback. In the function dns_finalize_config, for each resolvers, the
nameservers configuration is tested and the task used to manage DNS resolutions
is created. The links between the backend's servers and the resolvers are also
created at this step. Here no connection are kept alive. So there is no needs
anymore to reopen them after HAProxy fork. Connections used to send DNS queries
will be opened on demand.
Then, the way DNS requesters are linked to a DNS resolution has been
reworked. The resolution used by a requester is now referenced into the
dns_requester structure and the resolution pointers in server and dns_srvrq
structures have been removed. wait and curr list of requesters, for a DNS
resolution, have been replaced by a uniq list. And Finally, the way a requester
is removed from a DNS resolution has been simplified. Now everything is done in
dns_unlink_resolution.
srv_set_fqdn function has been simplified. Now, there is only 1 way to set the
server's FQDN, independently it is done by the CLI or when a SRV record is
resolved.
The static DNS resolutions pool has been replaced by a dynamoc pool. The part
has been modified by Baptiste Assmann.
The way the DNS resolutions are triggered by the task or by a health-check has
been totally refactored. Now, all timeouts are respected. Especially
hold.valid. The default frequency to wake up a resolvers is now configurable
using "timeout resolve" parameter.
Now, as documented, as long as invalid repsonses are received, we really wait
all name servers responses before retrying.
As far as possible, resources allocated during DNS configuration parsing are
releases when HAProxy is shutdown.
Beside all these changes, the code has been cleaned to ease code review and the
doc has been updated.
2017-09-27 05:00:59 -04:00
s = objt_server ( requester - > owner ) ;
MAJOR/REORG: dns: DNS resolution task and requester queues
This patch is a major upgrade of the internal run-time DNS resolver in
HAProxy and it brings the following 2 main changes:
1. DNS resolution task
Up to now, DNS resolution was triggered by the health check task.
From now, DNS resolution task is autonomous. It is started by HAProxy
right after the scheduler is available and it is woken either when a
network IO occurs for one of its nameserver or when a timeout is
matched.
From now, this means we can enable DNS resolution for a server without
enabling health checking.
2. Introduction of a dns_requester structure
Up to now, DNS resolution was purposely made for resolving server
hostnames.
The idea, is to ensure that any HAProxy internal object should be able
to trigger a DNS resolution. For this purpose, 2 things has to be done:
- clean up the DNS code from the server structure (this was already
quite clean actually) and clean up the server's callbacks from
manipulating too much DNS resolution
- create an agnostic structure which allows linking a DNS resolution
and a requester of any type (using obj_type enum)
3. Manage requesters through queues
Up to now, there was an uniq relationship between a resolution and it's
owner (aka the requester now). It's a shame, because in some cases,
multiple objects may share the same hostname and may benefit from a
resolution being performed by a third party.
This patch introduces the notion of queues, which are basically lists of
either currently running resolution or waiting ones.
The resolutions are now available as a pool, which belongs to the resolvers.
The pool has has a default size of 64 resolutions per resolvers and is
allocated at configuration parsing.
2017-05-22 09:17:15 -04:00
if ( ! s )
return 1 ;
2017-11-07 04:42:54 -05:00
HA_SPIN_LOCK ( SERVER_LOCK , & s - > lock ) ;
2017-07-06 12:46:47 -04:00
snr_update_srv_status ( s , 0 ) ;
2017-11-07 04:42:54 -05:00
HA_SPIN_UNLOCK ( SERVER_LOCK , & s - > lock ) ;
2015-04-13 19:15:08 -04:00
return 1 ;
}
2017-05-03 09:43:12 -04:00
/*
* Function to check if < ip > is already affected to a server in the backend
2017-07-06 12:46:47 -04:00
* which owns < srv > and is up .
2017-05-03 09:43:12 -04:00
* It returns a pointer to the first server found or NULL if < ip > is not yet
* assigned .
2017-11-06 11:30:28 -05:00
*
* Must be called with server lock held
2017-05-03 09:43:12 -04:00
*/
struct server * snr_check_ip_callback ( struct server * srv , void * ip , unsigned char * ip_family )
{
struct server * tmpsrv ;
struct proxy * be ;
if ( ! srv )
return NULL ;
be = srv - > proxy ;
for ( tmpsrv = be - > srv ; tmpsrv ; tmpsrv = tmpsrv - > next ) {
2017-11-02 12:20:39 -04:00
/* we found the current server is the same, ignore it */
if ( srv = = tmpsrv )
continue ;
2017-05-03 09:43:12 -04:00
/* We want to compare the IP in the record with the IP of the servers in the
* same backend , only if :
* * DNS resolution is enabled on the server
* * the hostname used for the resolution by our server is the same than the
* one used for the server found in the backend
* * the server found in the backend is not our current server
*/
2017-11-07 04:42:54 -05:00
HA_SPIN_LOCK ( SERVER_LOCK , & tmpsrv - > lock ) ;
MAJOR/REORG: dns: DNS resolution task and requester queues
This patch is a major upgrade of the internal run-time DNS resolver in
HAProxy and it brings the following 2 main changes:
1. DNS resolution task
Up to now, DNS resolution was triggered by the health check task.
From now, DNS resolution task is autonomous. It is started by HAProxy
right after the scheduler is available and it is woken either when a
network IO occurs for one of its nameserver or when a timeout is
matched.
From now, this means we can enable DNS resolution for a server without
enabling health checking.
2. Introduction of a dns_requester structure
Up to now, DNS resolution was purposely made for resolving server
hostnames.
The idea, is to ensure that any HAProxy internal object should be able
to trigger a DNS resolution. For this purpose, 2 things has to be done:
- clean up the DNS code from the server structure (this was already
quite clean actually) and clean up the server's callbacks from
manipulating too much DNS resolution
- create an agnostic structure which allows linking a DNS resolution
and a requester of any type (using obj_type enum)
3. Manage requesters through queues
Up to now, there was an uniq relationship between a resolution and it's
owner (aka the requester now). It's a shame, because in some cases,
multiple objects may share the same hostname and may benefit from a
resolution being performed by a third party.
This patch introduces the notion of queues, which are basically lists of
either currently running resolution or waiting ones.
The resolutions are now available as a pool, which belongs to the resolvers.
The pool has has a default size of 64 resolutions per resolvers and is
allocated at configuration parsing.
2017-05-22 09:17:15 -04:00
if ( ( tmpsrv - > hostname_dn = = NULL ) | |
( srv - > hostname_dn_len ! = tmpsrv - > hostname_dn_len ) | |
( strcmp ( srv - > hostname_dn , tmpsrv - > hostname_dn ) ! = 0 ) | |
2017-11-02 12:20:39 -04:00
( srv - > puid = = tmpsrv - > puid ) ) {
2017-11-07 04:42:54 -05:00
HA_SPIN_UNLOCK ( SERVER_LOCK , & tmpsrv - > lock ) ;
2017-05-03 09:43:12 -04:00
continue ;
2017-11-02 12:20:39 -04:00
}
2017-05-03 09:43:12 -04:00
2017-07-06 12:46:47 -04:00
/* If the server has been taken down, don't consider it */
2017-11-02 12:20:39 -04:00
if ( tmpsrv - > next_admin & SRV_ADMF_RMAINT ) {
2017-11-07 04:42:54 -05:00
HA_SPIN_UNLOCK ( SERVER_LOCK , & tmpsrv - > lock ) ;
2017-07-06 12:46:47 -04:00
continue ;
2017-11-02 12:20:39 -04:00
}
2017-07-06 12:46:47 -04:00
2017-05-03 09:43:12 -04:00
/* At this point, we have 2 different servers using the same DNS hostname
* for their respective resolution .
*/
if ( * ip_family = = tmpsrv - > addr . ss_family & &
( ( tmpsrv - > addr . ss_family = = AF_INET & &
memcmp ( ip , & ( ( struct sockaddr_in * ) & tmpsrv - > addr ) - > sin_addr , 4 ) = = 0 ) | |
( tmpsrv - > addr . ss_family = = AF_INET6 & &
memcmp ( ip , & ( ( struct sockaddr_in6 * ) & tmpsrv - > addr ) - > sin6_addr , 16 ) = = 0 ) ) ) {
2017-11-07 04:42:54 -05:00
HA_SPIN_UNLOCK ( SERVER_LOCK , & tmpsrv - > lock ) ;
2017-05-03 09:43:12 -04:00
return tmpsrv ;
}
2017-11-07 04:42:54 -05:00
HA_SPIN_UNLOCK ( SERVER_LOCK , & tmpsrv - > lock ) ;
2017-05-03 09:43:12 -04:00
}
2017-11-02 12:20:39 -04:00
2017-05-03 09:43:12 -04:00
return NULL ;
}
2016-11-02 10:34:05 -04:00
/* Sets the server's address (srv->addr) from srv->hostname using the libc's
* resolver . This is suited for initial address configuration . Returns 0 on
* success otherwise a non - zero error code . In case of error , * err_code , if
* not NULL , is filled up .
*/
int srv_set_addr_via_libc ( struct server * srv , int * err_code )
{
if ( str2ip2 ( srv - > hostname , & srv - > addr , 1 ) = = NULL ) {
if ( err_code )
2016-11-07 13:19:22 -05:00
* err_code | = ERR_WARN ;
2016-11-02 10:34:05 -04:00
return 1 ;
}
return 0 ;
}
2017-04-26 05:24:02 -04:00
/* Set the server's FDQN (->hostname) from <hostname>.
* Returns - 1 if failed , 0 if not .
2018-08-21 05:54:26 -04:00
*
* Must be called with the server lock held .
2017-04-26 05:24:02 -04:00
*/
2017-10-31 10:21:19 -04:00
int srv_set_fqdn ( struct server * srv , const char * hostname , int dns_locked )
2017-04-26 05:24:02 -04:00
{
MAJOR/REORG: dns: DNS resolution task and requester queues
This patch is a major upgrade of the internal run-time DNS resolver in
HAProxy and it brings the following 2 main changes:
1. DNS resolution task
Up to now, DNS resolution was triggered by the health check task.
From now, DNS resolution task is autonomous. It is started by HAProxy
right after the scheduler is available and it is woken either when a
network IO occurs for one of its nameserver or when a timeout is
matched.
From now, this means we can enable DNS resolution for a server without
enabling health checking.
2. Introduction of a dns_requester structure
Up to now, DNS resolution was purposely made for resolving server
hostnames.
The idea, is to ensure that any HAProxy internal object should be able
to trigger a DNS resolution. For this purpose, 2 things has to be done:
- clean up the DNS code from the server structure (this was already
quite clean actually) and clean up the server's callbacks from
manipulating too much DNS resolution
- create an agnostic structure which allows linking a DNS resolution
and a requester of any type (using obj_type enum)
3. Manage requesters through queues
Up to now, there was an uniq relationship between a resolution and it's
owner (aka the requester now). It's a shame, because in some cases,
multiple objects may share the same hostname and may benefit from a
resolution being performed by a third party.
This patch introduces the notion of queues, which are basically lists of
either currently running resolution or waiting ones.
The resolutions are now available as a pool, which belongs to the resolvers.
The pool has has a default size of 64 resolutions per resolvers and is
allocated at configuration parsing.
2017-05-22 09:17:15 -04:00
struct dns_resolution * resolution ;
MAJOR: dns: Refactor the DNS code
This is a huge patch with many changes, all about the DNS. Initially, the idea
was to update the DNS part to ease the threads support integration. But quickly,
I started to refactor some parts. And after several iterations, it was
impossible for me to commit the different parts atomically. So, instead of
adding tens of patches, often reworking the same parts, it was easier to merge
all my changes in a uniq patch. Here are all changes made on the DNS.
First, the DNS initialization has been refactored. The DNS configuration parsing
remains untouched, in cfgparse.c. But all checks have been moved in a post-check
callback. In the function dns_finalize_config, for each resolvers, the
nameservers configuration is tested and the task used to manage DNS resolutions
is created. The links between the backend's servers and the resolvers are also
created at this step. Here no connection are kept alive. So there is no needs
anymore to reopen them after HAProxy fork. Connections used to send DNS queries
will be opened on demand.
Then, the way DNS requesters are linked to a DNS resolution has been
reworked. The resolution used by a requester is now referenced into the
dns_requester structure and the resolution pointers in server and dns_srvrq
structures have been removed. wait and curr list of requesters, for a DNS
resolution, have been replaced by a uniq list. And Finally, the way a requester
is removed from a DNS resolution has been simplified. Now everything is done in
dns_unlink_resolution.
srv_set_fqdn function has been simplified. Now, there is only 1 way to set the
server's FQDN, independently it is done by the CLI or when a SRV record is
resolved.
The static DNS resolutions pool has been replaced by a dynamoc pool. The part
has been modified by Baptiste Assmann.
The way the DNS resolutions are triggered by the task or by a health-check has
been totally refactored. Now, all timeouts are respected. Especially
hold.valid. The default frequency to wake up a resolvers is now configurable
using "timeout resolve" parameter.
Now, as documented, as long as invalid repsonses are received, we really wait
all name servers responses before retrying.
As far as possible, resources allocated during DNS configuration parsing are
releases when HAProxy is shutdown.
Beside all these changes, the code has been cleaned to ease code review and the
doc has been updated.
2017-09-27 05:00:59 -04:00
char * hostname_dn ;
int hostname_len , hostname_dn_len ;
2017-04-26 05:24:02 -04:00
2018-08-21 09:04:23 -04:00
/* Note that the server lock is already held. */
if ( ! srv - > resolvers )
return - 1 ;
2017-10-31 10:21:19 -04:00
if ( ! dns_locked )
2017-11-07 04:42:54 -05:00
HA_SPIN_LOCK ( DNS_LOCK , & srv - > resolvers - > lock ) ;
MAJOR/REORG: dns: DNS resolution task and requester queues
This patch is a major upgrade of the internal run-time DNS resolver in
HAProxy and it brings the following 2 main changes:
1. DNS resolution task
Up to now, DNS resolution was triggered by the health check task.
From now, DNS resolution task is autonomous. It is started by HAProxy
right after the scheduler is available and it is woken either when a
network IO occurs for one of its nameserver or when a timeout is
matched.
From now, this means we can enable DNS resolution for a server without
enabling health checking.
2. Introduction of a dns_requester structure
Up to now, DNS resolution was purposely made for resolving server
hostnames.
The idea, is to ensure that any HAProxy internal object should be able
to trigger a DNS resolution. For this purpose, 2 things has to be done:
- clean up the DNS code from the server structure (this was already
quite clean actually) and clean up the server's callbacks from
manipulating too much DNS resolution
- create an agnostic structure which allows linking a DNS resolution
and a requester of any type (using obj_type enum)
3. Manage requesters through queues
Up to now, there was an uniq relationship between a resolution and it's
owner (aka the requester now). It's a shame, because in some cases,
multiple objects may share the same hostname and may benefit from a
resolution being performed by a third party.
This patch introduces the notion of queues, which are basically lists of
either currently running resolution or waiting ones.
The resolutions are now available as a pool, which belongs to the resolvers.
The pool has has a default size of 64 resolutions per resolvers and is
allocated at configuration parsing.
2017-05-22 09:17:15 -04:00
/* run time DNS resolution was not active for this server
* and we can ' t enable it at run time for now .
*/
if ( ! srv - > dns_requester )
2017-10-04 10:17:58 -04:00
goto err ;
MAJOR/REORG: dns: DNS resolution task and requester queues
This patch is a major upgrade of the internal run-time DNS resolver in
HAProxy and it brings the following 2 main changes:
1. DNS resolution task
Up to now, DNS resolution was triggered by the health check task.
From now, DNS resolution task is autonomous. It is started by HAProxy
right after the scheduler is available and it is woken either when a
network IO occurs for one of its nameserver or when a timeout is
matched.
From now, this means we can enable DNS resolution for a server without
enabling health checking.
2. Introduction of a dns_requester structure
Up to now, DNS resolution was purposely made for resolving server
hostnames.
The idea, is to ensure that any HAProxy internal object should be able
to trigger a DNS resolution. For this purpose, 2 things has to be done:
- clean up the DNS code from the server structure (this was already
quite clean actually) and clean up the server's callbacks from
manipulating too much DNS resolution
- create an agnostic structure which allows linking a DNS resolution
and a requester of any type (using obj_type enum)
3. Manage requesters through queues
Up to now, there was an uniq relationship between a resolution and it's
owner (aka the requester now). It's a shame, because in some cases,
multiple objects may share the same hostname and may benefit from a
resolution being performed by a third party.
This patch introduces the notion of queues, which are basically lists of
either currently running resolution or waiting ones.
The resolutions are now available as a pool, which belongs to the resolvers.
The pool has has a default size of 64 resolutions per resolvers and is
allocated at configuration parsing.
2017-05-22 09:17:15 -04:00
chunk_reset ( & trash ) ;
MAJOR: dns: Refactor the DNS code
This is a huge patch with many changes, all about the DNS. Initially, the idea
was to update the DNS part to ease the threads support integration. But quickly,
I started to refactor some parts. And after several iterations, it was
impossible for me to commit the different parts atomically. So, instead of
adding tens of patches, often reworking the same parts, it was easier to merge
all my changes in a uniq patch. Here are all changes made on the DNS.
First, the DNS initialization has been refactored. The DNS configuration parsing
remains untouched, in cfgparse.c. But all checks have been moved in a post-check
callback. In the function dns_finalize_config, for each resolvers, the
nameservers configuration is tested and the task used to manage DNS resolutions
is created. The links between the backend's servers and the resolvers are also
created at this step. Here no connection are kept alive. So there is no needs
anymore to reopen them after HAProxy fork. Connections used to send DNS queries
will be opened on demand.
Then, the way DNS requesters are linked to a DNS resolution has been
reworked. The resolution used by a requester is now referenced into the
dns_requester structure and the resolution pointers in server and dns_srvrq
structures have been removed. wait and curr list of requesters, for a DNS
resolution, have been replaced by a uniq list. And Finally, the way a requester
is removed from a DNS resolution has been simplified. Now everything is done in
dns_unlink_resolution.
srv_set_fqdn function has been simplified. Now, there is only 1 way to set the
server's FQDN, independently it is done by the CLI or when a SRV record is
resolved.
The static DNS resolutions pool has been replaced by a dynamoc pool. The part
has been modified by Baptiste Assmann.
The way the DNS resolutions are triggered by the task or by a health-check has
been totally refactored. Now, all timeouts are respected. Especially
hold.valid. The default frequency to wake up a resolvers is now configurable
using "timeout resolve" parameter.
Now, as documented, as long as invalid repsonses are received, we really wait
all name servers responses before retrying.
As far as possible, resources allocated during DNS configuration parsing are
releases when HAProxy is shutdown.
Beside all these changes, the code has been cleaned to ease code review and the
doc has been updated.
2017-09-27 05:00:59 -04:00
hostname_len = strlen ( hostname ) ;
2018-07-13 04:54:26 -04:00
hostname_dn = trash . area ;
MAJOR: dns: Refactor the DNS code
This is a huge patch with many changes, all about the DNS. Initially, the idea
was to update the DNS part to ease the threads support integration. But quickly,
I started to refactor some parts. And after several iterations, it was
impossible for me to commit the different parts atomically. So, instead of
adding tens of patches, often reworking the same parts, it was easier to merge
all my changes in a uniq patch. Here are all changes made on the DNS.
First, the DNS initialization has been refactored. The DNS configuration parsing
remains untouched, in cfgparse.c. But all checks have been moved in a post-check
callback. In the function dns_finalize_config, for each resolvers, the
nameservers configuration is tested and the task used to manage DNS resolutions
is created. The links between the backend's servers and the resolvers are also
created at this step. Here no connection are kept alive. So there is no needs
anymore to reopen them after HAProxy fork. Connections used to send DNS queries
will be opened on demand.
Then, the way DNS requesters are linked to a DNS resolution has been
reworked. The resolution used by a requester is now referenced into the
dns_requester structure and the resolution pointers in server and dns_srvrq
structures have been removed. wait and curr list of requesters, for a DNS
resolution, have been replaced by a uniq list. And Finally, the way a requester
is removed from a DNS resolution has been simplified. Now everything is done in
dns_unlink_resolution.
srv_set_fqdn function has been simplified. Now, there is only 1 way to set the
server's FQDN, independently it is done by the CLI or when a SRV record is
resolved.
The static DNS resolutions pool has been replaced by a dynamoc pool. The part
has been modified by Baptiste Assmann.
The way the DNS resolutions are triggered by the task or by a health-check has
been totally refactored. Now, all timeouts are respected. Especially
hold.valid. The default frequency to wake up a resolvers is now configurable
using "timeout resolve" parameter.
Now, as documented, as long as invalid repsonses are received, we really wait
all name servers responses before retrying.
As far as possible, resources allocated during DNS configuration parsing are
releases when HAProxy is shutdown.
Beside all these changes, the code has been cleaned to ease code review and the
doc has been updated.
2017-09-27 05:00:59 -04:00
hostname_dn_len = dns_str_to_dn_label ( hostname , hostname_len + 1 ,
hostname_dn , trash . size ) ;
if ( hostname_dn_len = = - 1 )
2017-10-04 10:17:58 -04:00
goto err ;
MAJOR/REORG: dns: DNS resolution task and requester queues
This patch is a major upgrade of the internal run-time DNS resolver in
HAProxy and it brings the following 2 main changes:
1. DNS resolution task
Up to now, DNS resolution was triggered by the health check task.
From now, DNS resolution task is autonomous. It is started by HAProxy
right after the scheduler is available and it is woken either when a
network IO occurs for one of its nameserver or when a timeout is
matched.
From now, this means we can enable DNS resolution for a server without
enabling health checking.
2. Introduction of a dns_requester structure
Up to now, DNS resolution was purposely made for resolving server
hostnames.
The idea, is to ensure that any HAProxy internal object should be able
to trigger a DNS resolution. For this purpose, 2 things has to be done:
- clean up the DNS code from the server structure (this was already
quite clean actually) and clean up the server's callbacks from
manipulating too much DNS resolution
- create an agnostic structure which allows linking a DNS resolution
and a requester of any type (using obj_type enum)
3. Manage requesters through queues
Up to now, there was an uniq relationship between a resolution and it's
owner (aka the requester now). It's a shame, because in some cases,
multiple objects may share the same hostname and may benefit from a
resolution being performed by a third party.
This patch introduces the notion of queues, which are basically lists of
either currently running resolution or waiting ones.
The resolutions are now available as a pool, which belongs to the resolvers.
The pool has has a default size of 64 resolutions per resolvers and is
allocated at configuration parsing.
2017-05-22 09:17:15 -04:00
MAJOR: dns: Refactor the DNS code
This is a huge patch with many changes, all about the DNS. Initially, the idea
was to update the DNS part to ease the threads support integration. But quickly,
I started to refactor some parts. And after several iterations, it was
impossible for me to commit the different parts atomically. So, instead of
adding tens of patches, often reworking the same parts, it was easier to merge
all my changes in a uniq patch. Here are all changes made on the DNS.
First, the DNS initialization has been refactored. The DNS configuration parsing
remains untouched, in cfgparse.c. But all checks have been moved in a post-check
callback. In the function dns_finalize_config, for each resolvers, the
nameservers configuration is tested and the task used to manage DNS resolutions
is created. The links between the backend's servers and the resolvers are also
created at this step. Here no connection are kept alive. So there is no needs
anymore to reopen them after HAProxy fork. Connections used to send DNS queries
will be opened on demand.
Then, the way DNS requesters are linked to a DNS resolution has been
reworked. The resolution used by a requester is now referenced into the
dns_requester structure and the resolution pointers in server and dns_srvrq
structures have been removed. wait and curr list of requesters, for a DNS
resolution, have been replaced by a uniq list. And Finally, the way a requester
is removed from a DNS resolution has been simplified. Now everything is done in
dns_unlink_resolution.
srv_set_fqdn function has been simplified. Now, there is only 1 way to set the
server's FQDN, independently it is done by the CLI or when a SRV record is
resolved.
The static DNS resolutions pool has been replaced by a dynamoc pool. The part
has been modified by Baptiste Assmann.
The way the DNS resolutions are triggered by the task or by a health-check has
been totally refactored. Now, all timeouts are respected. Especially
hold.valid. The default frequency to wake up a resolvers is now configurable
using "timeout resolve" parameter.
Now, as documented, as long as invalid repsonses are received, we really wait
all name servers responses before retrying.
As far as possible, resources allocated during DNS configuration parsing are
releases when HAProxy is shutdown.
Beside all these changes, the code has been cleaned to ease code review and the
doc has been updated.
2017-09-27 05:00:59 -04:00
resolution = srv - > dns_requester - > resolution ;
if ( resolution & &
resolution - > hostname_dn & &
! strcmp ( resolution - > hostname_dn , hostname_dn ) )
2017-10-04 10:17:58 -04:00
goto end ;
MAJOR/REORG: dns: DNS resolution task and requester queues
This patch is a major upgrade of the internal run-time DNS resolver in
HAProxy and it brings the following 2 main changes:
1. DNS resolution task
Up to now, DNS resolution was triggered by the health check task.
From now, DNS resolution task is autonomous. It is started by HAProxy
right after the scheduler is available and it is woken either when a
network IO occurs for one of its nameserver or when a timeout is
matched.
From now, this means we can enable DNS resolution for a server without
enabling health checking.
2. Introduction of a dns_requester structure
Up to now, DNS resolution was purposely made for resolving server
hostnames.
The idea, is to ensure that any HAProxy internal object should be able
to trigger a DNS resolution. For this purpose, 2 things has to be done:
- clean up the DNS code from the server structure (this was already
quite clean actually) and clean up the server's callbacks from
manipulating too much DNS resolution
- create an agnostic structure which allows linking a DNS resolution
and a requester of any type (using obj_type enum)
3. Manage requesters through queues
Up to now, there was an uniq relationship between a resolution and it's
owner (aka the requester now). It's a shame, because in some cases,
multiple objects may share the same hostname and may benefit from a
resolution being performed by a third party.
This patch introduces the notion of queues, which are basically lists of
either currently running resolution or waiting ones.
The resolutions are now available as a pool, which belongs to the resolvers.
The pool has has a default size of 64 resolutions per resolvers and is
allocated at configuration parsing.
2017-05-22 09:17:15 -04:00
MAJOR: dns: Refactor the DNS code
This is a huge patch with many changes, all about the DNS. Initially, the idea
was to update the DNS part to ease the threads support integration. But quickly,
I started to refactor some parts. And after several iterations, it was
impossible for me to commit the different parts atomically. So, instead of
adding tens of patches, often reworking the same parts, it was easier to merge
all my changes in a uniq patch. Here are all changes made on the DNS.
First, the DNS initialization has been refactored. The DNS configuration parsing
remains untouched, in cfgparse.c. But all checks have been moved in a post-check
callback. In the function dns_finalize_config, for each resolvers, the
nameservers configuration is tested and the task used to manage DNS resolutions
is created. The links between the backend's servers and the resolvers are also
created at this step. Here no connection are kept alive. So there is no needs
anymore to reopen them after HAProxy fork. Connections used to send DNS queries
will be opened on demand.
Then, the way DNS requesters are linked to a DNS resolution has been
reworked. The resolution used by a requester is now referenced into the
dns_requester structure and the resolution pointers in server and dns_srvrq
structures have been removed. wait and curr list of requesters, for a DNS
resolution, have been replaced by a uniq list. And Finally, the way a requester
is removed from a DNS resolution has been simplified. Now everything is done in
dns_unlink_resolution.
srv_set_fqdn function has been simplified. Now, there is only 1 way to set the
server's FQDN, independently it is done by the CLI or when a SRV record is
resolved.
The static DNS resolutions pool has been replaced by a dynamoc pool. The part
has been modified by Baptiste Assmann.
The way the DNS resolutions are triggered by the task or by a health-check has
been totally refactored. Now, all timeouts are respected. Especially
hold.valid. The default frequency to wake up a resolvers is now configurable
using "timeout resolve" parameter.
Now, as documented, as long as invalid repsonses are received, we really wait
all name servers responses before retrying.
As far as possible, resources allocated during DNS configuration parsing are
releases when HAProxy is shutdown.
Beside all these changes, the code has been cleaned to ease code review and the
doc has been updated.
2017-09-27 05:00:59 -04:00
dns_unlink_resolution ( srv - > dns_requester ) ;
MAJOR/REORG: dns: DNS resolution task and requester queues
This patch is a major upgrade of the internal run-time DNS resolver in
HAProxy and it brings the following 2 main changes:
1. DNS resolution task
Up to now, DNS resolution was triggered by the health check task.
From now, DNS resolution task is autonomous. It is started by HAProxy
right after the scheduler is available and it is woken either when a
network IO occurs for one of its nameserver or when a timeout is
matched.
From now, this means we can enable DNS resolution for a server without
enabling health checking.
2. Introduction of a dns_requester structure
Up to now, DNS resolution was purposely made for resolving server
hostnames.
The idea, is to ensure that any HAProxy internal object should be able
to trigger a DNS resolution. For this purpose, 2 things has to be done:
- clean up the DNS code from the server structure (this was already
quite clean actually) and clean up the server's callbacks from
manipulating too much DNS resolution
- create an agnostic structure which allows linking a DNS resolution
and a requester of any type (using obj_type enum)
3. Manage requesters through queues
Up to now, there was an uniq relationship between a resolution and it's
owner (aka the requester now). It's a shame, because in some cases,
multiple objects may share the same hostname and may benefit from a
resolution being performed by a third party.
This patch introduces the notion of queues, which are basically lists of
either currently running resolution or waiting ones.
The resolutions are now available as a pool, which belongs to the resolvers.
The pool has has a default size of 64 resolutions per resolvers and is
allocated at configuration parsing.
2017-05-22 09:17:15 -04:00
free ( srv - > hostname ) ;
free ( srv - > hostname_dn ) ;
MAJOR: dns: Refactor the DNS code
This is a huge patch with many changes, all about the DNS. Initially, the idea
was to update the DNS part to ease the threads support integration. But quickly,
I started to refactor some parts. And after several iterations, it was
impossible for me to commit the different parts atomically. So, instead of
adding tens of patches, often reworking the same parts, it was easier to merge
all my changes in a uniq patch. Here are all changes made on the DNS.
First, the DNS initialization has been refactored. The DNS configuration parsing
remains untouched, in cfgparse.c. But all checks have been moved in a post-check
callback. In the function dns_finalize_config, for each resolvers, the
nameservers configuration is tested and the task used to manage DNS resolutions
is created. The links between the backend's servers and the resolvers are also
created at this step. Here no connection are kept alive. So there is no needs
anymore to reopen them after HAProxy fork. Connections used to send DNS queries
will be opened on demand.
Then, the way DNS requesters are linked to a DNS resolution has been
reworked. The resolution used by a requester is now referenced into the
dns_requester structure and the resolution pointers in server and dns_srvrq
structures have been removed. wait and curr list of requesters, for a DNS
resolution, have been replaced by a uniq list. And Finally, the way a requester
is removed from a DNS resolution has been simplified. Now everything is done in
dns_unlink_resolution.
srv_set_fqdn function has been simplified. Now, there is only 1 way to set the
server's FQDN, independently it is done by the CLI or when a SRV record is
resolved.
The static DNS resolutions pool has been replaced by a dynamoc pool. The part
has been modified by Baptiste Assmann.
The way the DNS resolutions are triggered by the task or by a health-check has
been totally refactored. Now, all timeouts are respected. Especially
hold.valid. The default frequency to wake up a resolvers is now configurable
using "timeout resolve" parameter.
Now, as documented, as long as invalid repsonses are received, we really wait
all name servers responses before retrying.
As far as possible, resources allocated during DNS configuration parsing are
releases when HAProxy is shutdown.
Beside all these changes, the code has been cleaned to ease code review and the
doc has been updated.
2017-09-27 05:00:59 -04:00
srv - > hostname = strdup ( hostname ) ;
srv - > hostname_dn = strdup ( hostname_dn ) ;
MAJOR/REORG: dns: DNS resolution task and requester queues
This patch is a major upgrade of the internal run-time DNS resolver in
HAProxy and it brings the following 2 main changes:
1. DNS resolution task
Up to now, DNS resolution was triggered by the health check task.
From now, DNS resolution task is autonomous. It is started by HAProxy
right after the scheduler is available and it is woken either when a
network IO occurs for one of its nameserver or when a timeout is
matched.
From now, this means we can enable DNS resolution for a server without
enabling health checking.
2. Introduction of a dns_requester structure
Up to now, DNS resolution was purposely made for resolving server
hostnames.
The idea, is to ensure that any HAProxy internal object should be able
to trigger a DNS resolution. For this purpose, 2 things has to be done:
- clean up the DNS code from the server structure (this was already
quite clean actually) and clean up the server's callbacks from
manipulating too much DNS resolution
- create an agnostic structure which allows linking a DNS resolution
and a requester of any type (using obj_type enum)
3. Manage requesters through queues
Up to now, there was an uniq relationship between a resolution and it's
owner (aka the requester now). It's a shame, because in some cases,
multiple objects may share the same hostname and may benefit from a
resolution being performed by a third party.
This patch introduces the notion of queues, which are basically lists of
either currently running resolution or waiting ones.
The resolutions are now available as a pool, which belongs to the resolvers.
The pool has has a default size of 64 resolutions per resolvers and is
allocated at configuration parsing.
2017-05-22 09:17:15 -04:00
srv - > hostname_dn_len = hostname_dn_len ;
if ( ! srv - > hostname | | ! srv - > hostname_dn )
2017-10-04 10:17:58 -04:00
goto err ;
MAJOR/REORG: dns: DNS resolution task and requester queues
This patch is a major upgrade of the internal run-time DNS resolver in
HAProxy and it brings the following 2 main changes:
1. DNS resolution task
Up to now, DNS resolution was triggered by the health check task.
From now, DNS resolution task is autonomous. It is started by HAProxy
right after the scheduler is available and it is woken either when a
network IO occurs for one of its nameserver or when a timeout is
matched.
From now, this means we can enable DNS resolution for a server without
enabling health checking.
2. Introduction of a dns_requester structure
Up to now, DNS resolution was purposely made for resolving server
hostnames.
The idea, is to ensure that any HAProxy internal object should be able
to trigger a DNS resolution. For this purpose, 2 things has to be done:
- clean up the DNS code from the server structure (this was already
quite clean actually) and clean up the server's callbacks from
manipulating too much DNS resolution
- create an agnostic structure which allows linking a DNS resolution
and a requester of any type (using obj_type enum)
3. Manage requesters through queues
Up to now, there was an uniq relationship between a resolution and it's
owner (aka the requester now). It's a shame, because in some cases,
multiple objects may share the same hostname and may benefit from a
resolution being performed by a third party.
This patch introduces the notion of queues, which are basically lists of
either currently running resolution or waiting ones.
The resolutions are now available as a pool, which belongs to the resolvers.
The pool has has a default size of 64 resolutions per resolvers and is
allocated at configuration parsing.
2017-05-22 09:17:15 -04:00
2019-06-07 03:40:55 -04:00
if ( srv - > flags & SRV_F_NO_RESOLUTION )
goto end ;
2017-11-06 09:15:04 -05:00
if ( dns_link_resolution ( srv , OBJ_TYPE_SERVER , 1 ) = = - 1 )
2017-10-04 10:17:58 -04:00
goto err ;
end :
2017-10-31 10:21:19 -04:00
if ( ! dns_locked )
2017-11-07 04:42:54 -05:00
HA_SPIN_UNLOCK ( DNS_LOCK , & srv - > resolvers - > lock ) ;
MAJOR/REORG: dns: DNS resolution task and requester queues
This patch is a major upgrade of the internal run-time DNS resolver in
HAProxy and it brings the following 2 main changes:
1. DNS resolution task
Up to now, DNS resolution was triggered by the health check task.
From now, DNS resolution task is autonomous. It is started by HAProxy
right after the scheduler is available and it is woken either when a
network IO occurs for one of its nameserver or when a timeout is
matched.
From now, this means we can enable DNS resolution for a server without
enabling health checking.
2. Introduction of a dns_requester structure
Up to now, DNS resolution was purposely made for resolving server
hostnames.
The idea, is to ensure that any HAProxy internal object should be able
to trigger a DNS resolution. For this purpose, 2 things has to be done:
- clean up the DNS code from the server structure (this was already
quite clean actually) and clean up the server's callbacks from
manipulating too much DNS resolution
- create an agnostic structure which allows linking a DNS resolution
and a requester of any type (using obj_type enum)
3. Manage requesters through queues
Up to now, there was an uniq relationship between a resolution and it's
owner (aka the requester now). It's a shame, because in some cases,
multiple objects may share the same hostname and may benefit from a
resolution being performed by a third party.
This patch introduces the notion of queues, which are basically lists of
either currently running resolution or waiting ones.
The resolutions are now available as a pool, which belongs to the resolvers.
The pool has has a default size of 64 resolutions per resolvers and is
allocated at configuration parsing.
2017-05-22 09:17:15 -04:00
return 0 ;
2017-10-04 10:17:58 -04:00
err :
2017-10-31 10:21:19 -04:00
if ( ! dns_locked )
2017-11-07 04:42:54 -05:00
HA_SPIN_UNLOCK ( DNS_LOCK , & srv - > resolvers - > lock ) ;
2017-10-04 10:17:58 -04:00
return - 1 ;
2017-04-26 05:24:02 -04:00
}
2016-11-02 10:34:05 -04:00
/* Sets the server's address (srv->addr) from srv->lastaddr which was filled
* from the state file . This is suited for initial address configuration .
* Returns 0 on success otherwise a non - zero error code . In case of error ,
* * err_code , if not NULL , is filled up .
*/
static int srv_apply_lastaddr ( struct server * srv , int * err_code )
{
if ( ! str2ip2 ( srv - > lastaddr , & srv - > addr , 0 ) ) {
if ( err_code )
* err_code | = ERR_WARN ;
return 1 ;
}
return 0 ;
}
2016-11-04 10:10:17 -04:00
/* returns 0 if no error, otherwise a combination of ERR_* flags */
static int srv_iterate_initaddr ( struct server * srv )
{
int return_code = 0 ;
int err_code ;
unsigned int methods ;
methods = srv - > init_addr_methods ;
if ( ! methods ) { // default to "last,libc"
srv_append_initaddr ( & methods , SRV_IADDR_LAST ) ;
srv_append_initaddr ( & methods , SRV_IADDR_LIBC ) ;
}
2016-11-07 15:03:16 -05:00
/* "-dr" : always append "none" so that server addresses resolution
* failures are silently ignored , this is convenient to validate some
* configs out of their environment .
*/
if ( global . tune . options & GTUNE_RESOLVE_DONTFAIL )
srv_append_initaddr ( & methods , SRV_IADDR_NONE ) ;
2016-11-04 10:10:17 -04:00
while ( methods ) {
err_code = 0 ;
switch ( srv_get_next_initaddr ( & methods ) ) {
case SRV_IADDR_LAST :
if ( ! srv - > lastaddr )
continue ;
if ( srv_apply_lastaddr ( srv , & err_code ) = = 0 )
MINOR: server: Add dynamic session cookies.
This adds a new "dynamic" keyword for the cookie option. If set, a cookie
will be generated for each server (assuming one isn't already provided on
the "server" line), from the IP of the server, the TCP port, and a secret
key provided. To provide the secret key, a new keyword as been added,
"dynamic-cookie-key", for backends.
Example :
backend bk_web
balance roundrobin
dynamic-cookie-key "bla"
cookie WEBSRV insert dynamic
server s1 127.0.0.1:80 check
server s2 192.168.56.1:80 check
This is a first step to be able to dynamically add and remove servers,
without modifying the configuration file, and still have all the load
balancers redirect the traffic to the right server.
Provide a way to generate session cookies, based on the IP address of the
server, the TCP port, and a secret key provided.
2017-03-14 15:01:29 -04:00
goto out ;
2016-11-04 10:10:17 -04:00
return_code | = err_code ;
break ;
case SRV_IADDR_LIBC :
if ( ! srv - > hostname )
continue ;
if ( srv_set_addr_via_libc ( srv , & err_code ) = = 0 )
MINOR: server: Add dynamic session cookies.
This adds a new "dynamic" keyword for the cookie option. If set, a cookie
will be generated for each server (assuming one isn't already provided on
the "server" line), from the IP of the server, the TCP port, and a secret
key provided. To provide the secret key, a new keyword as been added,
"dynamic-cookie-key", for backends.
Example :
backend bk_web
balance roundrobin
dynamic-cookie-key "bla"
cookie WEBSRV insert dynamic
server s1 127.0.0.1:80 check
server s2 192.168.56.1:80 check
This is a first step to be able to dynamically add and remove servers,
without modifying the configuration file, and still have all the load
balancers redirect the traffic to the right server.
Provide a way to generate session cookies, based on the IP address of the
server, the TCP port, and a secret key provided.
2017-03-14 15:01:29 -04:00
goto out ;
2016-11-04 10:10:17 -04:00
return_code | = err_code ;
break ;
2016-11-04 10:17:58 -04:00
case SRV_IADDR_NONE :
srv_set_admin_flag ( srv , SRV_ADMF_RMAINT , NULL ) ;
2016-11-07 13:19:22 -05:00
if ( return_code ) {
2017-11-24 10:50:31 -05:00
ha_warning ( " parsing [%s:%d] : 'server %s' : could not resolve address '%s', disabling server. \n " ,
srv - > conf . file , srv - > conf . line , srv - > id , srv - > hostname ) ;
2016-11-07 13:19:22 -05:00
}
2016-11-04 10:17:58 -04:00
return return_code ;
2016-11-02 10:05:56 -04:00
case SRV_IADDR_IP :
ipcpy ( & srv - > init_addr , & srv - > addr ) ;
if ( return_code ) {
2017-11-24 10:50:31 -05:00
ha_warning ( " parsing [%s:%d] : 'server %s' : could not resolve address '%s', falling back to configured address. \n " ,
srv - > conf . file , srv - > conf . line , srv - > id , srv - > hostname ) ;
2016-11-02 10:05:56 -04:00
}
MINOR: server: Add dynamic session cookies.
This adds a new "dynamic" keyword for the cookie option. If set, a cookie
will be generated for each server (assuming one isn't already provided on
the "server" line), from the IP of the server, the TCP port, and a secret
key provided. To provide the secret key, a new keyword as been added,
"dynamic-cookie-key", for backends.
Example :
backend bk_web
balance roundrobin
dynamic-cookie-key "bla"
cookie WEBSRV insert dynamic
server s1 127.0.0.1:80 check
server s2 192.168.56.1:80 check
This is a first step to be able to dynamically add and remove servers,
without modifying the configuration file, and still have all the load
balancers redirect the traffic to the right server.
Provide a way to generate session cookies, based on the IP address of the
server, the TCP port, and a secret key provided.
2017-03-14 15:01:29 -04:00
goto out ;
2016-11-02 10:05:56 -04:00
2016-11-04 10:10:17 -04:00
default : /* unhandled method */
break ;
}
}
if ( ! return_code ) {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d] : 'server %s' : no method found to resolve address '%s' \n " ,
2016-11-04 10:10:17 -04:00
srv - > conf . file , srv - > conf . line , srv - > id , srv - > hostname ) ;
}
2016-11-07 13:19:22 -05:00
else {
2017-11-24 10:50:31 -05:00
ha_alert ( " parsing [%s:%d] : 'server %s' : could not resolve address '%s'. \n " ,
2016-11-07 13:19:22 -05:00
srv - > conf . file , srv - > conf . line , srv - > id , srv - > hostname ) ;
}
2016-11-04 10:10:17 -04:00
return_code | = ERR_ALERT | ERR_FATAL ;
return return_code ;
MINOR: server: Add dynamic session cookies.
This adds a new "dynamic" keyword for the cookie option. If set, a cookie
will be generated for each server (assuming one isn't already provided on
the "server" line), from the IP of the server, the TCP port, and a secret
key provided. To provide the secret key, a new keyword as been added,
"dynamic-cookie-key", for backends.
Example :
backend bk_web
balance roundrobin
dynamic-cookie-key "bla"
cookie WEBSRV insert dynamic
server s1 127.0.0.1:80 check
server s2 192.168.56.1:80 check
This is a first step to be able to dynamically add and remove servers,
without modifying the configuration file, and still have all the load
balancers redirect the traffic to the right server.
Provide a way to generate session cookies, based on the IP address of the
server, the TCP port, and a secret key provided.
2017-03-14 15:01:29 -04:00
out :
srv_set_dyncookie ( srv ) ;
return return_code ;
2016-11-04 10:10:17 -04:00
}
2016-11-02 10:34:05 -04:00
/*
* This function parses all backends and all servers within each backend
* and performs servers ' addr resolution based on information provided by :
* - configuration file
* - server - state file ( states provided by an ' old ' haproxy process )
*
* Returns 0 if no error , otherwise , a combination of ERR_ flags .
*/
int srv_init_addr ( void )
{
struct proxy * curproxy ;
int return_code = 0 ;
2017-11-24 10:54:05 -05:00
curproxy = proxies_list ;
2016-11-02 10:34:05 -04:00
while ( curproxy ) {
struct server * srv ;
/* servers are in backend only */
if ( ! ( curproxy - > cap & PR_CAP_BE ) )
goto srv_init_addr_next ;
2016-11-04 10:10:17 -04:00
for ( srv = curproxy - > srv ; srv ; srv = srv - > next )
2017-09-06 08:22:45 -04:00
if ( srv - > hostname )
return_code | = srv_iterate_initaddr ( srv ) ;
2016-11-02 10:34:05 -04:00
srv_init_addr_next :
curproxy = curproxy - > next ;
}
return return_code ;
}
2018-08-21 05:54:26 -04:00
/*
* Must be called with the server lock held .
*/
2017-10-31 10:21:19 -04:00
const char * update_server_fqdn ( struct server * server , const char * fqdn , const char * updater , int dns_locked )
2017-04-26 05:24:02 -04:00
{
2018-07-13 05:56:34 -04:00
struct buffer * msg ;
2017-04-26 05:24:02 -04:00
msg = get_trash_chunk ( ) ;
chunk_reset ( msg ) ;
2017-08-04 12:35:36 -04:00
if ( server - > hostname & & ! strcmp ( fqdn , server - > hostname ) ) {
2017-04-26 05:24:02 -04:00
chunk_appendf ( msg , " no need to change the FDQN " ) ;
goto out ;
}
if ( strlen ( fqdn ) > DNS_MAX_NAME_SIZE | | invalid_domainchar ( fqdn ) ) {
chunk_appendf ( msg , " invalid fqdn '%s' " , fqdn ) ;
goto out ;
}
chunk_appendf ( msg , " %s/%s changed its FQDN from %s to %s " ,
server - > proxy - > id , server - > id , server - > hostname , fqdn ) ;
2017-10-31 10:21:19 -04:00
if ( srv_set_fqdn ( server , fqdn , dns_locked ) < 0 ) {
2017-04-26 05:24:02 -04:00
chunk_reset ( msg ) ;
chunk_appendf ( msg , " could not update %s/%s FQDN " ,
server - > proxy - > id , server - > id ) ;
goto out ;
}
/* Flag as FQDN set from stats socket. */
2017-08-31 08:41:55 -04:00
server - > next_admin | = SRV_ADMF_HMAINT ;
2017-04-26 05:24:02 -04:00
out :
if ( updater )
chunk_appendf ( msg , " by '%s' " , updater ) ;
chunk_appendf ( msg , " \n " ) ;
2018-07-13 04:54:26 -04:00
return msg - > area ;
2017-04-26 05:24:02 -04:00
}
2016-11-23 11:15:08 -05:00
/* Expects to find a backend and a server in <arg> under the form <backend>/<server>,
* and returns the pointer to the server . Otherwise , display adequate error messages
2016-11-24 09:53:53 -05:00
* on the CLI , sets the CLI ' s state to CLI_ST_PRINT and returns NULL . This is only
2016-11-23 11:15:08 -05:00
* used for CLI commands requiring a server name .
* Important : the < arg > is modified to remove the ' / ' .
*/
struct server * cli_find_server ( struct appctx * appctx , char * arg )
{
struct proxy * px ;
struct server * sv ;
char * line ;
/* split "backend/server" and make <line> point to server */
for ( line = arg ; * line ; line + + )
if ( * line = = ' / ' ) {
* line + + = ' \0 ' ;
break ;
}
if ( ! * line | | ! * arg ) {
2019-08-09 05:21:01 -04:00
cli_err ( appctx , " Require 'backend/server'. \n " ) ;
2016-11-23 11:15:08 -05:00
return NULL ;
}
if ( ! get_backend_server ( arg , line , & px , & sv ) ) {
2019-08-09 05:21:01 -04:00
cli_err ( appctx , px ? " No such server. \n " : " No such backend. \n " ) ;
2016-11-23 11:15:08 -05:00
return NULL ;
}
if ( px - > state = = PR_STSTOPPED ) {
2019-08-09 05:21:01 -04:00
cli_err ( appctx , " Proxy is disabled. \n " ) ;
2016-11-23 11:15:08 -05:00
return NULL ;
}
return sv ;
}
2016-11-02 10:34:05 -04:00
2018-08-21 05:54:26 -04:00
/* grabs the server lock */
2018-04-18 07:26:46 -04:00
static int cli_parse_set_server ( char * * args , char * payload , struct appctx * appctx , void * private )
2016-11-18 20:00:33 -05:00
{
struct server * sv ;
const char * warning ;
if ( ! cli_has_level ( appctx , ACCESS_LVL_ADMIN ) )
return 1 ;
sv = cli_find_server ( appctx , args [ 2 ] ) ;
if ( ! sv )
return 1 ;
2017-11-07 04:42:54 -05:00
HA_SPIN_LOCK ( SERVER_LOCK , & sv - > lock ) ;
2017-10-23 08:39:51 -04:00
2016-11-18 20:00:33 -05:00
if ( strcmp ( args [ 3 ] , " weight " ) = = 0 ) {
warning = server_parse_weight_change_request ( sv , args [ 4 ] ) ;
2019-08-09 05:21:01 -04:00
if ( warning )
cli_err ( appctx , warning ) ;
2016-11-18 20:00:33 -05:00
}
else if ( strcmp ( args [ 3 ] , " state " ) = = 0 ) {
if ( strcmp ( args [ 4 ] , " ready " ) = = 0 )
srv_adm_set_ready ( sv ) ;
else if ( strcmp ( args [ 4 ] , " drain " ) = = 0 )
srv_adm_set_drain ( sv ) ;
else if ( strcmp ( args [ 4 ] , " maint " ) = = 0 )
srv_adm_set_maint ( sv ) ;
2019-08-09 05:21:01 -04:00
else
cli_err ( appctx , " 'set server <srv> state' expects 'ready', 'drain' and 'maint'. \n " ) ;
2016-11-18 20:00:33 -05:00
}
else if ( strcmp ( args [ 3 ] , " health " ) = = 0 ) {
2019-08-09 05:21:01 -04:00
if ( sv - > track )
cli_err ( appctx , " cannot change health on a tracking server. \n " ) ;
2016-11-18 20:00:33 -05:00
else if ( strcmp ( args [ 4 ] , " up " ) = = 0 ) {
sv - > check . health = sv - > check . rise + sv - > check . fall - 1 ;
2017-10-19 08:42:30 -04:00
srv_set_running ( sv , " changed from CLI " , NULL ) ;
2016-11-18 20:00:33 -05:00
}
else if ( strcmp ( args [ 4 ] , " stopping " ) = = 0 ) {
sv - > check . health = sv - > check . rise + sv - > check . fall - 1 ;
2017-10-19 08:42:30 -04:00
srv_set_stopping ( sv , " changed from CLI " , NULL ) ;
2016-11-18 20:00:33 -05:00
}
else if ( strcmp ( args [ 4 ] , " down " ) = = 0 ) {
sv - > check . health = 0 ;
2017-10-19 08:42:30 -04:00
srv_set_stopped ( sv , " changed from CLI " , NULL ) ;
2016-11-18 20:00:33 -05:00
}
2019-08-09 05:21:01 -04:00
else
cli_err ( appctx , " 'set server <srv> health' expects 'up', 'stopping', or 'down'. \n " ) ;
2016-11-18 20:00:33 -05:00
}
else if ( strcmp ( args [ 3 ] , " agent " ) = = 0 ) {
2019-08-09 05:21:01 -04:00
if ( ! ( sv - > agent . state & CHK_ST_ENABLED ) )
cli_err ( appctx , " agent checks are not enabled on this server. \n " ) ;
2016-11-18 20:00:33 -05:00
else if ( strcmp ( args [ 4 ] , " up " ) = = 0 ) {
sv - > agent . health = sv - > agent . rise + sv - > agent . fall - 1 ;
2017-10-19 08:42:30 -04:00
srv_set_running ( sv , " changed from CLI " , NULL ) ;
2016-11-18 20:00:33 -05:00
}
else if ( strcmp ( args [ 4 ] , " down " ) = = 0 ) {
sv - > agent . health = 0 ;
2017-10-19 08:42:30 -04:00
srv_set_stopped ( sv , " changed from CLI " , NULL ) ;
2016-11-18 20:00:33 -05:00
}
2019-08-09 05:21:01 -04:00
else
cli_err ( appctx , " 'set server <srv> agent' expects 'up' or 'down'. \n " ) ;
2016-11-18 20:00:33 -05:00
}
2017-01-09 03:40:42 -05:00
else if ( strcmp ( args [ 3 ] , " agent-addr " ) = = 0 ) {
2019-08-09 05:21:01 -04:00
if ( ! ( sv - > agent . state & CHK_ST_ENABLED ) )
cli_err ( appctx , " agent checks are not enabled on this server. \n " ) ;
else if ( str2ip ( args [ 4 ] , & sv - > agent . addr ) = = NULL )
cli_err ( appctx , " incorrect addr address given for agent. \n " ) ;
2017-01-09 03:40:42 -05:00
}
else if ( strcmp ( args [ 3 ] , " agent-send " ) = = 0 ) {
2019-08-09 05:21:01 -04:00
if ( ! ( sv - > agent . state & CHK_ST_ENABLED ) )
cli_err ( appctx , " agent checks are not enabled on this server. \n " ) ;
else {
2020-04-06 11:54:24 -04:00
if ( ! set_srv_agent_send ( sv , args [ 4 ] ) )
2019-08-09 05:21:01 -04:00
cli_err ( appctx , " cannot allocate memory for new string. \n " ) ;
2017-01-09 03:40:42 -05:00
}
}
2016-11-18 20:00:33 -05:00
else if ( strcmp ( args [ 3 ] , " check-port " ) = = 0 ) {
int i = 0 ;
if ( strl2irc ( args [ 4 ] , strlen ( args [ 4 ] ) , & i ) ! = 0 ) {
2019-08-09 05:21:01 -04:00
cli_err ( appctx , " 'set server <srv> check-port' expects an integer as argument. \n " ) ;
2017-11-05 04:19:23 -05:00
goto out_unlock ;
2016-11-18 20:00:33 -05:00
}
if ( ( i < 0 ) | | ( i > 65535 ) ) {
2019-08-09 05:21:01 -04:00
cli_err ( appctx , " provided port is not valid. \n " ) ;
2017-11-05 04:19:23 -05:00
goto out_unlock ;
2016-11-18 20:00:33 -05:00
}
/* prevent the update of port to 0 if MAPPORTS are in use */
if ( ( sv - > flags & SRV_F_MAPPORTS ) & & ( i = = 0 ) ) {
2019-08-09 05:21:01 -04:00
cli_err ( appctx , " can't unset 'port' since MAPPORTS is in use. \n " ) ;
2017-11-05 04:19:23 -05:00
goto out_unlock ;
2016-11-18 20:00:33 -05:00
}
sv - > check . port = i ;
2019-08-09 05:21:01 -04:00
cli_msg ( appctx , LOG_NOTICE , " health check port updated. \n " ) ;
2016-11-18 20:00:33 -05:00
}
else if ( strcmp ( args [ 3 ] , " addr " ) = = 0 ) {
char * addr = NULL ;
char * port = NULL ;
if ( strlen ( args [ 4 ] ) = = 0 ) {
2019-08-09 05:21:01 -04:00
cli_err ( appctx , " set server <b>/<s> addr requires an address and optionally a port. \n " ) ;
2017-11-05 04:19:23 -05:00
goto out_unlock ;
2016-11-18 20:00:33 -05:00
}
else {
addr = args [ 4 ] ;
}
if ( strcmp ( args [ 5 ] , " port " ) = = 0 ) {
port = args [ 6 ] ;
}
warning = update_server_addr_port ( sv , addr , port , " stats socket command " ) ;
2019-08-09 05:21:01 -04:00
if ( warning )
cli_msg ( appctx , LOG_WARNING , warning ) ;
2016-11-18 20:00:33 -05:00
srv_clr_admin_flag ( sv , SRV_ADMF_RMAINT ) ;
}
2017-04-26 05:24:02 -04:00
else if ( strcmp ( args [ 3 ] , " fqdn " ) = = 0 ) {
if ( ! * args [ 4 ] ) {
2019-08-09 05:21:01 -04:00
cli_err ( appctx , " set server <b>/<s> fqdn requires a FQDN. \n " ) ;
2017-11-05 04:19:23 -05:00
goto out_unlock ;
2017-04-26 05:24:02 -04:00
}
2019-06-07 03:40:55 -04:00
/* ensure runtime resolver will process this new fqdn */
if ( sv - > flags & SRV_F_NO_RESOLUTION ) {
sv - > flags & = ~ SRV_F_NO_RESOLUTION ;
}
2017-10-31 10:21:19 -04:00
warning = update_server_fqdn ( sv , args [ 4 ] , " stats socket command " , 0 ) ;
2019-08-09 05:21:01 -04:00
if ( warning )
cli_msg ( appctx , LOG_WARNING , warning ) ;
2017-04-26 05:24:02 -04:00
}
2016-11-18 20:00:33 -05:00
else {
2019-08-09 05:21:01 -04:00
cli_err ( appctx ,
" 'set server <srv>' only supports 'agent', 'health', 'state', "
" 'weight', 'addr', 'fqdn' and 'check-port'. \n " ) ;
2016-11-18 20:00:33 -05:00
}
2017-11-05 04:19:23 -05:00
out_unlock :
2017-11-07 04:42:54 -05:00
HA_SPIN_UNLOCK ( SERVER_LOCK , & sv - > lock ) ;
2016-11-18 20:00:33 -05:00
return 1 ;
}
2018-04-18 07:26:46 -04:00
static int cli_parse_get_weight ( char * * args , char * payload , struct appctx * appctx , void * private )
2016-11-22 06:34:35 -05:00
{
struct stream_interface * si = appctx - > owner ;
struct proxy * px ;
struct server * sv ;
char * line ;
/* split "backend/server" and make <line> point to server */
for ( line = args [ 2 ] ; * line ; line + + )
if ( * line = = ' / ' ) {
* line + + = ' \0 ' ;
break ;
}
2019-08-09 05:21:01 -04:00
if ( ! * line )
return cli_err ( appctx , " Require 'backend/server'. \n " ) ;
2016-11-22 06:34:35 -05:00
2019-08-09 05:21:01 -04:00
if ( ! get_backend_server ( args [ 2 ] , line , & px , & sv ) )
return cli_err ( appctx , px ? " No such server. \n " : " No such backend. \n " ) ;
2016-11-22 06:34:35 -05:00
/* return server's effective weight at the moment */
2018-07-13 04:54:26 -04:00
snprintf ( trash . area , trash . size , " %d (initial %d) \n " , sv - > uweight ,
sv - > iweight ) ;
if ( ci_putstr ( si_ic ( si ) , trash . area ) = = - 1 ) {
2018-11-15 05:08:52 -05:00
si_rx_room_blk ( si ) ;
2016-12-05 08:25:08 -05:00
return 0 ;
}
2016-11-22 06:34:35 -05:00
return 1 ;
}
2018-08-21 09:35:31 -04:00
/* Parse a "set weight" command.
*
* Grabs the server lock .
2018-08-21 05:54:26 -04:00
*/
2018-04-18 07:26:46 -04:00
static int cli_parse_set_weight ( char * * args , char * payload , struct appctx * appctx , void * private )
2016-11-22 06:34:35 -05:00
{
struct server * sv ;
const char * warning ;
if ( ! cli_has_level ( appctx , ACCESS_LVL_ADMIN ) )
return 1 ;
sv = cli_find_server ( appctx , args [ 2 ] ) ;
if ( ! sv )
return 1 ;
2018-08-21 09:35:31 -04:00
HA_SPIN_LOCK ( SERVER_LOCK , & sv - > lock ) ;
2016-11-22 06:34:35 -05:00
warning = server_parse_weight_change_request ( sv , args [ 3 ] ) ;
2019-08-09 05:21:01 -04:00
if ( warning )
cli_err ( appctx , warning ) ;
2018-08-21 09:35:31 -04:00
HA_SPIN_UNLOCK ( SERVER_LOCK , & sv - > lock ) ;
2016-11-22 06:34:35 -05:00
return 1 ;
}
2018-08-21 05:54:26 -04:00
/* parse a "set maxconn server" command. It always returns 1.
*
2018-08-21 09:35:31 -04:00
* Grabs the server lock .
2018-08-21 05:54:26 -04:00
*/
2018-04-18 07:26:46 -04:00
static int cli_parse_set_maxconn_server ( char * * args , char * payload , struct appctx * appctx , void * private )
2016-11-23 05:26:56 -05:00
{
struct server * sv ;
const char * warning ;
if ( ! cli_has_level ( appctx , ACCESS_LVL_ADMIN ) )
return 1 ;
sv = cli_find_server ( appctx , args [ 3 ] ) ;
if ( ! sv )
return 1 ;
2018-08-21 09:35:31 -04:00
HA_SPIN_LOCK ( SERVER_LOCK , & sv - > lock ) ;
2016-11-23 05:26:56 -05:00
warning = server_parse_maxconn_change_request ( sv , args [ 4 ] ) ;
2019-08-09 05:21:01 -04:00
if ( warning )
cli_err ( appctx , warning ) ;
2018-08-21 09:35:31 -04:00
HA_SPIN_UNLOCK ( SERVER_LOCK , & sv - > lock ) ;
2016-11-23 05:26:56 -05:00
return 1 ;
}
2016-11-22 06:34:35 -05:00
2018-08-21 05:54:26 -04:00
/* parse a "disable agent" command. It always returns 1.
*
2018-08-21 09:35:31 -04:00
* Grabs the server lock .
2018-08-21 05:54:26 -04:00
*/
2018-04-18 07:26:46 -04:00
static int cli_parse_disable_agent ( char * * args , char * payload , struct appctx * appctx , void * private )
2016-11-24 06:56:01 -05:00
{
struct server * sv ;
if ( ! cli_has_level ( appctx , ACCESS_LVL_ADMIN ) )
return 1 ;
sv = cli_find_server ( appctx , args [ 2 ] ) ;
if ( ! sv )
return 1 ;
2018-08-21 09:35:31 -04:00
HA_SPIN_LOCK ( SERVER_LOCK , & sv - > lock ) ;
2016-11-24 06:56:01 -05:00
sv - > agent . state & = ~ CHK_ST_ENABLED ;
2018-08-21 09:35:31 -04:00
HA_SPIN_UNLOCK ( SERVER_LOCK , & sv - > lock ) ;
2016-11-24 06:56:01 -05:00
return 1 ;
}
2018-08-21 05:54:26 -04:00
/* parse a "disable health" command. It always returns 1.
*
2018-08-21 09:35:31 -04:00
* Grabs the server lock .
2018-08-21 05:54:26 -04:00
*/
2018-04-18 07:26:46 -04:00
static int cli_parse_disable_health ( char * * args , char * payload , struct appctx * appctx , void * private )
2016-11-24 06:51:04 -05:00
{
struct server * sv ;
if ( ! cli_has_level ( appctx , ACCESS_LVL_ADMIN ) )
return 1 ;
sv = cli_find_server ( appctx , args [ 2 ] ) ;
if ( ! sv )
return 1 ;
2018-08-21 09:35:31 -04:00
HA_SPIN_LOCK ( SERVER_LOCK , & sv - > lock ) ;
2016-11-24 06:51:04 -05:00
sv - > check . state & = ~ CHK_ST_ENABLED ;
2018-08-21 09:35:31 -04:00
HA_SPIN_UNLOCK ( SERVER_LOCK , & sv - > lock ) ;
2016-11-24 06:51:04 -05:00
return 1 ;
}
2018-08-21 05:54:26 -04:00
/* parse a "disable server" command. It always returns 1.
*
2018-08-21 09:35:31 -04:00
* Grabs the server lock .
2018-08-21 05:54:26 -04:00
*/
2018-04-18 07:26:46 -04:00
static int cli_parse_disable_server ( char * * args , char * payload , struct appctx * appctx , void * private )
2016-11-24 06:47:00 -05:00
{
struct server * sv ;
if ( ! cli_has_level ( appctx , ACCESS_LVL_ADMIN ) )
return 1 ;
sv = cli_find_server ( appctx , args [ 2 ] ) ;
if ( ! sv )
return 1 ;
2018-08-21 09:35:31 -04:00
HA_SPIN_LOCK ( SERVER_LOCK , & sv - > lock ) ;
2016-11-24 06:47:00 -05:00
srv_adm_set_maint ( sv ) ;
2018-08-21 09:35:31 -04:00
HA_SPIN_UNLOCK ( SERVER_LOCK , & sv - > lock ) ;
2016-11-24 06:47:00 -05:00
return 1 ;
}
2018-08-21 05:54:26 -04:00
/* parse a "enable agent" command. It always returns 1.
*
2018-08-21 09:35:31 -04:00
* Grabs the server lock .
2018-08-21 05:54:26 -04:00
*/
2018-04-18 07:26:46 -04:00
static int cli_parse_enable_agent ( char * * args , char * payload , struct appctx * appctx , void * private )
2016-11-24 06:56:01 -05:00
{
struct server * sv ;
if ( ! cli_has_level ( appctx , ACCESS_LVL_ADMIN ) )
return 1 ;
sv = cli_find_server ( appctx , args [ 2 ] ) ;
if ( ! sv )
return 1 ;
2019-08-09 05:21:01 -04:00
if ( ! ( sv - > agent . state & CHK_ST_CONFIGURED ) )
return cli_err ( appctx , " Agent was not configured on this server, cannot enable. \n " ) ;
2016-11-24 06:56:01 -05:00
2018-08-21 09:35:31 -04:00
HA_SPIN_LOCK ( SERVER_LOCK , & sv - > lock ) ;
2016-11-24 06:56:01 -05:00
sv - > agent . state | = CHK_ST_ENABLED ;
2018-08-21 09:35:31 -04:00
HA_SPIN_UNLOCK ( SERVER_LOCK , & sv - > lock ) ;
2016-11-24 06:56:01 -05:00
return 1 ;
}
2018-08-21 05:54:26 -04:00
/* parse a "enable health" command. It always returns 1.
*
2018-08-21 09:35:31 -04:00
* Grabs the server lock .
2018-08-21 05:54:26 -04:00
*/
2018-04-18 07:26:46 -04:00
static int cli_parse_enable_health ( char * * args , char * payload , struct appctx * appctx , void * private )
2016-11-24 06:51:04 -05:00
{
struct server * sv ;
if ( ! cli_has_level ( appctx , ACCESS_LVL_ADMIN ) )
return 1 ;
sv = cli_find_server ( appctx , args [ 2 ] ) ;
if ( ! sv )
return 1 ;
2018-08-21 09:35:31 -04:00
HA_SPIN_LOCK ( SERVER_LOCK , & sv - > lock ) ;
2016-11-24 06:51:04 -05:00
sv - > check . state | = CHK_ST_ENABLED ;
2018-08-21 09:35:31 -04:00
HA_SPIN_UNLOCK ( SERVER_LOCK , & sv - > lock ) ;
2016-11-24 06:51:04 -05:00
return 1 ;
}
2018-08-21 05:54:26 -04:00
/* parse a "enable server" command. It always returns 1.
*
2018-08-21 09:35:31 -04:00
* Grabs the server lock .
2018-08-21 05:54:26 -04:00
*/
2018-04-18 07:26:46 -04:00
static int cli_parse_enable_server ( char * * args , char * payload , struct appctx * appctx , void * private )
2016-11-24 06:47:00 -05:00
{
struct server * sv ;
if ( ! cli_has_level ( appctx , ACCESS_LVL_ADMIN ) )
return 1 ;
sv = cli_find_server ( appctx , args [ 2 ] ) ;
if ( ! sv )
return 1 ;
2018-08-21 09:35:31 -04:00
HA_SPIN_LOCK ( SERVER_LOCK , & sv - > lock ) ;
2016-11-24 06:47:00 -05:00
srv_adm_set_ready ( sv ) ;
2018-01-17 11:39:34 -05:00
if ( ! ( sv - > flags & SRV_F_COOKIESET )
& & ( sv - > proxy - > ck_opts & PR_CK_DYNAMIC ) & &
sv - > cookie )
srv_check_for_dup_dyncookie ( sv ) ;
2018-08-21 09:35:31 -04:00
HA_SPIN_UNLOCK ( SERVER_LOCK , & sv - > lock ) ;
2016-11-24 06:47:00 -05:00
return 1 ;
}
2016-11-18 20:00:33 -05:00
/* register cli keywords */
static struct cli_kw_list cli_kws = { { } , {
2016-11-24 06:56:01 -05:00
{ { " disable " , " agent " , NULL } , " disable agent : disable agent checks (use 'set server' instead) " , cli_parse_disable_agent , NULL } ,
2016-11-24 06:51:04 -05:00
{ { " disable " , " health " , NULL } , " disable health : disable health checks (use 'set server' instead) " , cli_parse_disable_health , NULL } ,
2016-11-24 06:47:00 -05:00
{ { " disable " , " server " , NULL } , " disable server : disable a server for maintenance (use 'set server' instead) " , cli_parse_disable_server , NULL } ,
2016-11-24 06:56:01 -05:00
{ { " enable " , " agent " , NULL } , " enable agent : enable agent checks (use 'set server' instead) " , cli_parse_enable_agent , NULL } ,
2016-11-24 06:51:04 -05:00
{ { " enable " , " health " , NULL } , " enable health : enable health checks (use 'set server' instead) " , cli_parse_enable_health , NULL } ,
2016-11-24 06:47:00 -05:00
{ { " enable " , " server " , NULL } , " enable server : enable a disabled server (use 'set server' instead) " , cli_parse_enable_server , NULL } ,
2016-11-23 05:26:56 -05:00
{ { " set " , " maxconn " , " server " , NULL } , " set maxconn server : change a server's maxconn setting " , cli_parse_set_maxconn_server , NULL } ,
2016-11-18 20:00:33 -05:00
{ { " set " , " server " , NULL } , " set server : change a server's state, weight or address " , cli_parse_set_server } ,
2016-11-22 06:34:35 -05:00
{ { " get " , " weight " , NULL } , " get weight : report a server's current weight " , cli_parse_get_weight } ,
{ { " set " , " weight " , NULL } , " set weight : change a server's weight (deprecated) " , cli_parse_set_weight } ,
2016-11-18 20:00:33 -05:00
{ { } , }
} } ;
2018-11-25 13:14:37 -05:00
INITCALL1 ( STG_REGISTER , cli_register_kw , & cli_kws ) ;
2016-11-18 20:00:33 -05:00
2017-10-03 08:46:45 -04:00
/*
* This function applies server ' s status changes , it is
* is designed to be called asynchronously .
*
2018-08-21 05:54:26 -04:00
* Must be called with the server lock held .
2017-10-03 08:46:45 -04:00
*/
2018-08-02 05:48:52 -04:00
static void srv_update_status ( struct server * s )
2017-10-03 08:46:45 -04:00
{
struct check * check = & s - > check ;
int xferred ;
struct proxy * px = s - > proxy ;
int prev_srv_count = s - > proxy - > srv_bck + s - > proxy - > srv_act ;
int srv_was_stopping = ( s - > cur_state = = SRV_ST_STOPPING ) | | ( s - > cur_admin & SRV_ADMF_DRAIN ) ;
int log_level ;
2018-07-13 05:56:34 -04:00
struct buffer * tmptrash = NULL ;
2017-10-03 08:46:45 -04:00
/* If currently main is not set we try to apply pending state changes */
if ( ! ( s - > cur_admin & SRV_ADMF_MAINT ) ) {
int next_admin ;
/* Backup next admin */
next_admin = s - > next_admin ;
/* restore current admin state */
s - > next_admin = s - > cur_admin ;
if ( ( s - > cur_state ! = SRV_ST_STOPPED ) & & ( s - > next_state = = SRV_ST_STOPPED ) ) {
s - > last_change = now . tv_sec ;
if ( s - > proxy - > lbprm . set_server_status_down )
s - > proxy - > lbprm . set_server_status_down ( s ) ;
if ( s - > onmarkeddown & HANA_ONMARKEDDOWN_SHUTDOWNSESSIONS )
srv_shutdown_streams ( s , SF_ERR_DOWN ) ;
/* we might have streams queued on this server and waiting for
* a connection . Those which are redispatchable will be queued
* to another server or to the proxy itself .
*/
xferred = pendconn_redistribute ( s ) ;
tmptrash = alloc_trash_chunk ( ) ;
if ( tmptrash ) {
chunk_printf ( tmptrash ,
" %sServer %s/%s is DOWN " , s - > flags & SRV_F_BACKUP ? " Backup " : " " ,
s - > proxy - > id , s - > id ) ;
2017-10-19 08:42:30 -04:00
srv_append_status ( tmptrash , s , NULL , xferred , 0 ) ;
2018-07-13 04:54:26 -04:00
ha_warning ( " %s. \n " , tmptrash - > area ) ;
2017-10-03 08:46:45 -04:00
/* we don't send an alert if the server was previously paused */
log_level = srv_was_stopping ? LOG_NOTICE : LOG_ALERT ;
2018-07-13 04:54:26 -04:00
send_log ( s - > proxy , log_level , " %s. \n " ,
tmptrash - > area ) ;
send_email_alert ( s , log_level , " %s " ,
tmptrash - > area ) ;
2017-10-03 08:46:45 -04:00
free_trash_chunk ( tmptrash ) ;
tmptrash = NULL ;
}
if ( prev_srv_count & & s - > proxy - > srv_bck = = 0 & & s - > proxy - > srv_act = = 0 )
set_backend_down ( s - > proxy ) ;
s - > counters . down_trans + + ;
}
else if ( ( s - > cur_state ! = SRV_ST_STOPPING ) & & ( s - > next_state = = SRV_ST_STOPPING ) ) {
s - > last_change = now . tv_sec ;
if ( s - > proxy - > lbprm . set_server_status_down )
s - > proxy - > lbprm . set_server_status_down ( s ) ;
/* we might have streams queued on this server and waiting for
* a connection . Those which are redispatchable will be queued
* to another server or to the proxy itself .
*/
xferred = pendconn_redistribute ( s ) ;
tmptrash = alloc_trash_chunk ( ) ;
if ( tmptrash ) {
chunk_printf ( tmptrash ,
" %sServer %s/%s is stopping " , s - > flags & SRV_F_BACKUP ? " Backup " : " " ,
s - > proxy - > id , s - > id ) ;
2017-10-19 08:42:30 -04:00
srv_append_status ( tmptrash , s , NULL , xferred , 0 ) ;
2017-10-03 08:46:45 -04:00
2018-07-13 04:54:26 -04:00
ha_warning ( " %s. \n " , tmptrash - > area ) ;
send_log ( s - > proxy , LOG_NOTICE , " %s. \n " ,
tmptrash - > area ) ;
2017-10-03 08:46:45 -04:00
free_trash_chunk ( tmptrash ) ;
tmptrash = NULL ;
}
if ( prev_srv_count & & s - > proxy - > srv_bck = = 0 & & s - > proxy - > srv_act = = 0 )
set_backend_down ( s - > proxy ) ;
}
else if ( ( ( s - > cur_state ! = SRV_ST_RUNNING ) & & ( s - > next_state = = SRV_ST_RUNNING ) )
| | ( ( s - > cur_state ! = SRV_ST_STARTING ) & & ( s - > next_state = = SRV_ST_STARTING ) ) ) {
if ( s - > proxy - > srv_bck = = 0 & & s - > proxy - > srv_act = = 0 ) {
if ( s - > proxy - > last_change < now . tv_sec ) // ignore negative times
s - > proxy - > down_time + = now . tv_sec - s - > proxy - > last_change ;
s - > proxy - > last_change = now . tv_sec ;
}
if ( s - > next_state = = SRV_ST_STOPPED & & s - > last_change < now . tv_sec ) // ignore negative times
s - > down_time + = now . tv_sec - s - > last_change ;
s - > last_change = now . tv_sec ;
if ( s - > next_state = = SRV_ST_STARTING )
task_schedule ( s - > warmup , tick_add ( now_ms , MS_TO_TICKS ( MAX ( 1000 , s - > slowstart / 20 ) ) ) ) ;
2018-08-02 05:48:52 -04:00
server_recalc_eweight ( s , 0 ) ;
2017-10-03 08:46:45 -04:00
/* now propagate the status change to any LB algorithms */
if ( px - > lbprm . update_server_eweight )
px - > lbprm . update_server_eweight ( s ) ;
else if ( srv_willbe_usable ( s ) ) {
if ( px - > lbprm . set_server_status_up )
px - > lbprm . set_server_status_up ( s ) ;
}
else {
if ( px - > lbprm . set_server_status_down )
px - > lbprm . set_server_status_down ( s ) ;
}
/* If the server is set with "on-marked-up shutdown-backup-sessions",
* and it ' s not a backup server and its effective weight is > 0 ,
* then it can accept new connections , so we shut down all streams
* on all backup servers .
*/
if ( ( s - > onmarkedup & HANA_ONMARKEDUP_SHUTDOWNBACKUPSESSIONS ) & &
! ( s - > flags & SRV_F_BACKUP ) & & s - > next_eweight )
srv_shutdown_backup_streams ( s - > proxy , SF_ERR_UP ) ;
/* check if we can handle some connections queued at the proxy. We
* will take as many as we can handle .
*/
xferred = pendconn_grab_from_px ( s ) ;
tmptrash = alloc_trash_chunk ( ) ;
if ( tmptrash ) {
chunk_printf ( tmptrash ,
" %sServer %s/%s is UP " , s - > flags & SRV_F_BACKUP ? " Backup " : " " ,
s - > proxy - > id , s - > id ) ;
2017-10-19 08:42:30 -04:00
srv_append_status ( tmptrash , s , NULL , xferred , 0 ) ;
2018-07-13 04:54:26 -04:00
ha_warning ( " %s. \n " , tmptrash - > area ) ;
send_log ( s - > proxy , LOG_NOTICE , " %s. \n " ,
tmptrash - > area ) ;
send_email_alert ( s , LOG_NOTICE , " %s " ,
tmptrash - > area ) ;
2017-10-03 08:46:45 -04:00
free_trash_chunk ( tmptrash ) ;
tmptrash = NULL ;
}
if ( prev_srv_count & & s - > proxy - > srv_bck = = 0 & & s - > proxy - > srv_act = = 0 )
set_backend_down ( s - > proxy ) ;
}
else if ( s - > cur_eweight ! = s - > next_eweight ) {
/* now propagate the status change to any LB algorithms */
if ( px - > lbprm . update_server_eweight )
px - > lbprm . update_server_eweight ( s ) ;
else if ( srv_willbe_usable ( s ) ) {
if ( px - > lbprm . set_server_status_up )
px - > lbprm . set_server_status_up ( s ) ;
}
else {
if ( px - > lbprm . set_server_status_down )
px - > lbprm . set_server_status_down ( s ) ;
}
if ( prev_srv_count & & s - > proxy - > srv_bck = = 0 & & s - > proxy - > srv_act = = 0 )
set_backend_down ( s - > proxy ) ;
}
s - > next_admin = next_admin ;
}
2017-10-19 08:42:30 -04:00
/* reset operational state change */
* s - > op_st_chg . reason = 0 ;
s - > op_st_chg . status = s - > op_st_chg . code = - 1 ;
s - > op_st_chg . duration = 0 ;
2017-10-03 08:46:45 -04:00
/* Now we try to apply pending admin changes */
/* Maintenance must also disable health checks */
if ( ! ( s - > cur_admin & SRV_ADMF_MAINT ) & & ( s - > next_admin & SRV_ADMF_MAINT ) ) {
if ( s - > check . state & CHK_ST_ENABLED ) {
s - > check . state | = CHK_ST_PAUSED ;
check - > health = 0 ;
}
if ( s - > cur_state = = SRV_ST_STOPPED ) { /* server was already down */
2017-10-24 11:42:47 -04:00
tmptrash = alloc_trash_chunk ( ) ;
if ( tmptrash ) {
chunk_printf ( tmptrash ,
" %sServer %s/%s was DOWN and now enters maintenance%s%s%s " ,
s - > flags & SRV_F_BACKUP ? " Backup " : " " , s - > proxy - > id , s - > id ,
* ( s - > adm_st_chg_cause ) ? " ( " : " " , s - > adm_st_chg_cause , * ( s - > adm_st_chg_cause ) ? " ) " : " " ) ;
2017-10-03 08:46:45 -04:00
2017-10-24 11:42:47 -04:00
srv_append_status ( tmptrash , s , NULL , - 1 , ( s - > next_admin & SRV_ADMF_FMAINT ) ) ;
2017-10-03 08:46:45 -04:00
2017-10-24 11:42:47 -04:00
if ( ! ( global . mode & MODE_STARTING ) ) {
2018-07-13 04:54:26 -04:00
ha_warning ( " %s. \n " , tmptrash - > area ) ;
send_log ( s - > proxy , LOG_NOTICE , " %s. \n " ,
tmptrash - > area ) ;
2017-10-24 11:42:47 -04:00
}
free_trash_chunk ( tmptrash ) ;
tmptrash = NULL ;
2017-10-03 08:46:45 -04:00
}
2017-12-06 10:47:17 -05:00
/* commit new admin status */
s - > cur_admin = s - > next_admin ;
2017-10-03 08:46:45 -04:00
}
else { /* server was still running */
check - > health = 0 ; /* failure */
s - > last_change = now . tv_sec ;
2017-12-21 08:42:26 -05:00
s - > next_state = SRV_ST_STOPPED ;
2017-10-03 08:46:45 -04:00
if ( s - > proxy - > lbprm . set_server_status_down )
s - > proxy - > lbprm . set_server_status_down ( s ) ;
if ( s - > onmarkeddown & HANA_ONMARKEDDOWN_SHUTDOWNSESSIONS )
srv_shutdown_streams ( s , SF_ERR_DOWN ) ;
2020-05-02 15:52:36 -04:00
/* force connection cleanup on the given server */
srv_cleanup_connections ( s ) ;
2017-10-03 08:46:45 -04:00
/* we might have streams queued on this server and waiting for
* a connection . Those which are redispatchable will be queued
* to another server or to the proxy itself .
*/
xferred = pendconn_redistribute ( s ) ;
tmptrash = alloc_trash_chunk ( ) ;
if ( tmptrash ) {
chunk_printf ( tmptrash ,
" %sServer %s/%s is going DOWN for maintenance%s%s%s " ,
s - > flags & SRV_F_BACKUP ? " Backup " : " " ,
s - > proxy - > id , s - > id ,
* ( s - > adm_st_chg_cause ) ? " ( " : " " , s - > adm_st_chg_cause , * ( s - > adm_st_chg_cause ) ? " ) " : " " ) ;
srv_append_status ( tmptrash , s , NULL , xferred , ( s - > next_admin & SRV_ADMF_FMAINT ) ) ;
if ( ! ( global . mode & MODE_STARTING ) ) {
2018-07-13 04:54:26 -04:00
ha_warning ( " %s. \n " , tmptrash - > area ) ;
send_log ( s - > proxy , srv_was_stopping ? LOG_NOTICE : LOG_ALERT , " %s. \n " ,
tmptrash - > area ) ;
2017-10-03 08:46:45 -04:00
}
free_trash_chunk ( tmptrash ) ;
tmptrash = NULL ;
}
if ( prev_srv_count & & s - > proxy - > srv_bck = = 0 & & s - > proxy - > srv_act = = 0 )
set_backend_down ( s - > proxy ) ;
s - > counters . down_trans + + ;
}
}
else if ( ( s - > cur_admin & SRV_ADMF_MAINT ) & & ! ( s - > next_admin & SRV_ADMF_MAINT ) ) {
/* OK here we're leaving maintenance, we have many things to check,
* because the server might possibly be coming back up depending on
* its state . In practice , leaving maintenance means that we should
* immediately turn to UP ( more or less the slowstart ) under the
* following conditions :
* - server is neither checked nor tracked
* - server tracks another server which is not checked
* - server tracks another server which is already up
* Which sums up as something simpler :
* " either the tracking server is up or the server's checks are disabled
* or up " . Otherwise we only re-enable health checks. There's a special
* case associated to the stopping state which can be inherited . Note
* that the server might still be in drain mode , which is naturally dealt
* with by the lower level functions .
*/
if ( s - > check . state & CHK_ST_ENABLED ) {
s - > check . state & = ~ CHK_ST_PAUSED ;
check - > health = check - > rise ; /* start OK but check immediately */
}
if ( ( ! s - > track | | s - > track - > next_state ! = SRV_ST_STOPPED ) & &
( ! ( s - > agent . state & CHK_ST_ENABLED ) | | ( s - > agent . health > = s - > agent . rise ) ) & &
( ! ( s - > check . state & CHK_ST_ENABLED ) | | ( s - > check . health > = s - > check . rise ) ) ) {
if ( s - > track & & s - > track - > next_state = = SRV_ST_STOPPING ) {
s - > next_state = SRV_ST_STOPPING ;
}
else {
s - > next_state = SRV_ST_STARTING ;
if ( s - > slowstart > 0 )
task_schedule ( s - > warmup , tick_add ( now_ms , MS_TO_TICKS ( MAX ( 1000 , s - > slowstart / 20 ) ) ) ) ;
else
s - > next_state = SRV_ST_RUNNING ;
}
}
tmptrash = alloc_trash_chunk ( ) ;
if ( tmptrash ) {
if ( ! ( s - > next_admin & SRV_ADMF_FMAINT ) & & ( s - > cur_admin & SRV_ADMF_FMAINT ) ) {
chunk_printf ( tmptrash ,
" %sServer %s/%s is %s/%s (leaving forced maintenance) " ,
s - > flags & SRV_F_BACKUP ? " Backup " : " " ,
s - > proxy - > id , s - > id ,
( s - > next_state = = SRV_ST_STOPPED ) ? " DOWN " : " UP " ,
( s - > next_admin & SRV_ADMF_DRAIN ) ? " DRAIN " : " READY " ) ;
}
if ( ! ( s - > next_admin & SRV_ADMF_RMAINT ) & & ( s - > cur_admin & SRV_ADMF_RMAINT ) ) {
chunk_printf ( tmptrash ,
" %sServer %s/%s ('%s') is %s/%s (resolves again) " ,
s - > flags & SRV_F_BACKUP ? " Backup " : " " ,
s - > proxy - > id , s - > id , s - > hostname ,
( s - > next_state = = SRV_ST_STOPPED ) ? " DOWN " : " UP " ,
( s - > next_admin & SRV_ADMF_DRAIN ) ? " DRAIN " : " READY " ) ;
}
if ( ! ( s - > next_admin & SRV_ADMF_IMAINT ) & & ( s - > cur_admin & SRV_ADMF_IMAINT ) ) {
chunk_printf ( tmptrash ,
" %sServer %s/%s is %s/%s (leaving maintenance) " ,
s - > flags & SRV_F_BACKUP ? " Backup " : " " ,
s - > proxy - > id , s - > id ,
( s - > next_state = = SRV_ST_STOPPED ) ? " DOWN " : " UP " ,
( s - > next_admin & SRV_ADMF_DRAIN ) ? " DRAIN " : " READY " ) ;
}
2018-07-13 04:54:26 -04:00
ha_warning ( " %s. \n " , tmptrash - > area ) ;
send_log ( s - > proxy , LOG_NOTICE , " %s. \n " ,
tmptrash - > area ) ;
2017-10-03 08:46:45 -04:00
free_trash_chunk ( tmptrash ) ;
tmptrash = NULL ;
}
2018-08-02 05:48:52 -04:00
server_recalc_eweight ( s , 0 ) ;
2017-10-03 08:46:45 -04:00
/* now propagate the status change to any LB algorithms */
if ( px - > lbprm . update_server_eweight )
px - > lbprm . update_server_eweight ( s ) ;
else if ( srv_willbe_usable ( s ) ) {
if ( px - > lbprm . set_server_status_up )
px - > lbprm . set_server_status_up ( s ) ;
}
else {
if ( px - > lbprm . set_server_status_down )
px - > lbprm . set_server_status_down ( s ) ;
}
if ( prev_srv_count & & s - > proxy - > srv_bck = = 0 & & s - > proxy - > srv_act = = 0 )
set_backend_down ( s - > proxy ) ;
2018-08-07 04:14:53 -04:00
/* If the server is set with "on-marked-up shutdown-backup-sessions",
* and it ' s not a backup server and its effective weight is > 0 ,
* then it can accept new connections , so we shut down all streams
* on all backup servers .
*/
if ( ( s - > onmarkedup & HANA_ONMARKEDUP_SHUTDOWNBACKUPSESSIONS ) & &
! ( s - > flags & SRV_F_BACKUP ) & & s - > next_eweight )
srv_shutdown_backup_streams ( s - > proxy , SF_ERR_UP ) ;
/* check if we can handle some connections queued at the proxy. We
* will take as many as we can handle .
*/
xferred = pendconn_grab_from_px ( s ) ;
2017-10-03 08:46:45 -04:00
}
else if ( s - > next_admin & SRV_ADMF_MAINT ) {
/* remaining in maintenance mode, let's inform precisely about the
* situation .
*/
if ( ! ( s - > next_admin & SRV_ADMF_FMAINT ) & & ( s - > cur_admin & SRV_ADMF_FMAINT ) ) {
tmptrash = alloc_trash_chunk ( ) ;
if ( tmptrash ) {
chunk_printf ( tmptrash ,
" %sServer %s/%s is leaving forced maintenance but remains in maintenance " ,
s - > flags & SRV_F_BACKUP ? " Backup " : " " ,
s - > proxy - > id , s - > id ) ;
if ( s - > track ) /* normally it's mandatory here */
chunk_appendf ( tmptrash , " via %s/%s " ,
s - > track - > proxy - > id , s - > track - > id ) ;
2018-07-13 04:54:26 -04:00
ha_warning ( " %s. \n " , tmptrash - > area ) ;
send_log ( s - > proxy , LOG_NOTICE , " %s. \n " ,
tmptrash - > area ) ;
2017-10-03 08:46:45 -04:00
free_trash_chunk ( tmptrash ) ;
tmptrash = NULL ;
}
}
if ( ! ( s - > next_admin & SRV_ADMF_RMAINT ) & & ( s - > cur_admin & SRV_ADMF_RMAINT ) ) {
tmptrash = alloc_trash_chunk ( ) ;
if ( tmptrash ) {
chunk_printf ( tmptrash ,
" %sServer %s/%s ('%s') resolves again but remains in maintenance " ,
s - > flags & SRV_F_BACKUP ? " Backup " : " " ,
s - > proxy - > id , s - > id , s - > hostname ) ;
if ( s - > track ) /* normally it's mandatory here */
chunk_appendf ( tmptrash , " via %s/%s " ,
s - > track - > proxy - > id , s - > track - > id ) ;
2018-07-13 04:54:26 -04:00
ha_warning ( " %s. \n " , tmptrash - > area ) ;
send_log ( s - > proxy , LOG_NOTICE , " %s. \n " ,
tmptrash - > area ) ;
2017-10-03 08:46:45 -04:00
free_trash_chunk ( tmptrash ) ;
tmptrash = NULL ;
}
}
else if ( ! ( s - > next_admin & SRV_ADMF_IMAINT ) & & ( s - > cur_admin & SRV_ADMF_IMAINT ) ) {
tmptrash = alloc_trash_chunk ( ) ;
if ( tmptrash ) {
chunk_printf ( tmptrash ,
" %sServer %s/%s remains in forced maintenance " ,
s - > flags & SRV_F_BACKUP ? " Backup " : " " ,
s - > proxy - > id , s - > id ) ;
2018-07-13 04:54:26 -04:00
ha_warning ( " %s. \n " , tmptrash - > area ) ;
send_log ( s - > proxy , LOG_NOTICE , " %s. \n " ,
tmptrash - > area ) ;
2017-10-03 08:46:45 -04:00
free_trash_chunk ( tmptrash ) ;
tmptrash = NULL ;
}
}
/* don't report anything when leaving drain mode and remaining in maintenance */
s - > cur_admin = s - > next_admin ;
}
if ( ! ( s - > next_admin & SRV_ADMF_MAINT ) ) {
if ( ! ( s - > cur_admin & SRV_ADMF_DRAIN ) & & ( s - > next_admin & SRV_ADMF_DRAIN ) ) {
/* drain state is applied only if not yet in maint */
s - > last_change = now . tv_sec ;
if ( px - > lbprm . set_server_status_down )
px - > lbprm . set_server_status_down ( s ) ;
/* we might have streams queued on this server and waiting for
* a connection . Those which are redispatchable will be queued
* to another server or to the proxy itself .
*/
xferred = pendconn_redistribute ( s ) ;
tmptrash = alloc_trash_chunk ( ) ;
if ( tmptrash ) {
chunk_printf ( tmptrash , " %sServer %s/%s enters drain state%s%s%s " ,
s - > flags & SRV_F_BACKUP ? " Backup " : " " , s - > proxy - > id , s - > id ,
* ( s - > adm_st_chg_cause ) ? " ( " : " " , s - > adm_st_chg_cause , * ( s - > adm_st_chg_cause ) ? " ) " : " " ) ;
srv_append_status ( tmptrash , s , NULL , xferred , ( s - > next_admin & SRV_ADMF_FDRAIN ) ) ;
if ( ! ( global . mode & MODE_STARTING ) ) {
2018-07-13 04:54:26 -04:00
ha_warning ( " %s. \n " , tmptrash - > area ) ;
send_log ( s - > proxy , LOG_NOTICE , " %s. \n " ,
tmptrash - > area ) ;
send_email_alert ( s , LOG_NOTICE , " %s " ,
tmptrash - > area ) ;
2017-10-03 08:46:45 -04:00
}
free_trash_chunk ( tmptrash ) ;
tmptrash = NULL ;
}
if ( prev_srv_count & & s - > proxy - > srv_bck = = 0 & & s - > proxy - > srv_act = = 0 )
set_backend_down ( s - > proxy ) ;
}
else if ( ( s - > cur_admin & SRV_ADMF_DRAIN ) & & ! ( s - > next_admin & SRV_ADMF_DRAIN ) ) {
/* OK completely leaving drain mode */
if ( s - > proxy - > srv_bck = = 0 & & s - > proxy - > srv_act = = 0 ) {
if ( s - > proxy - > last_change < now . tv_sec ) // ignore negative times
s - > proxy - > down_time + = now . tv_sec - s - > proxy - > last_change ;
s - > proxy - > last_change = now . tv_sec ;
}
if ( s - > last_change < now . tv_sec ) // ignore negative times
s - > down_time + = now . tv_sec - s - > last_change ;
s - > last_change = now . tv_sec ;
2018-08-02 05:48:52 -04:00
server_recalc_eweight ( s , 0 ) ;
2017-10-03 08:46:45 -04:00
tmptrash = alloc_trash_chunk ( ) ;
if ( tmptrash ) {
if ( ! ( s - > next_admin & SRV_ADMF_FDRAIN ) ) {
chunk_printf ( tmptrash ,
" %sServer %s/%s is %s (leaving forced drain) " ,
s - > flags & SRV_F_BACKUP ? " Backup " : " " ,
s - > proxy - > id , s - > id ,
( s - > next_state = = SRV_ST_STOPPED ) ? " DOWN " : " UP " ) ;
}
else {
chunk_printf ( tmptrash ,
" %sServer %s/%s is %s (leaving drain) " ,
s - > flags & SRV_F_BACKUP ? " Backup " : " " ,
s - > proxy - > id , s - > id ,
( s - > next_state = = SRV_ST_STOPPED ) ? " DOWN " : " UP " ) ;
if ( s - > track ) /* normally it's mandatory here */
chunk_appendf ( tmptrash , " via %s/%s " ,
s - > track - > proxy - > id , s - > track - > id ) ;
}
2018-07-13 04:54:26 -04:00
ha_warning ( " %s. \n " , tmptrash - > area ) ;
send_log ( s - > proxy , LOG_NOTICE , " %s. \n " ,
tmptrash - > area ) ;
2017-10-03 08:46:45 -04:00
free_trash_chunk ( tmptrash ) ;
tmptrash = NULL ;
}
/* now propagate the status change to any LB algorithms */
if ( px - > lbprm . update_server_eweight )
px - > lbprm . update_server_eweight ( s ) ;
else if ( srv_willbe_usable ( s ) ) {
if ( px - > lbprm . set_server_status_up )
px - > lbprm . set_server_status_up ( s ) ;
}
else {
if ( px - > lbprm . set_server_status_down )
px - > lbprm . set_server_status_down ( s ) ;
}
}
else if ( ( s - > next_admin & SRV_ADMF_DRAIN ) ) {
/* remaining in drain mode after removing one of its flags */
tmptrash = alloc_trash_chunk ( ) ;
if ( tmptrash ) {
if ( ! ( s - > next_admin & SRV_ADMF_FDRAIN ) ) {
chunk_printf ( tmptrash ,
" %sServer %s/%s is leaving forced drain but remains in drain mode " ,
s - > flags & SRV_F_BACKUP ? " Backup " : " " ,
s - > proxy - > id , s - > id ) ;
if ( s - > track ) /* normally it's mandatory here */
chunk_appendf ( tmptrash , " via %s/%s " ,
s - > track - > proxy - > id , s - > track - > id ) ;
}
else {
chunk_printf ( tmptrash ,
" %sServer %s/%s remains in forced drain mode " ,
s - > flags & SRV_F_BACKUP ? " Backup " : " " ,
s - > proxy - > id , s - > id ) ;
}
2018-07-13 04:54:26 -04:00
ha_warning ( " %s. \n " , tmptrash - > area ) ;
send_log ( s - > proxy , LOG_NOTICE , " %s. \n " ,
tmptrash - > area ) ;
2017-10-03 08:46:45 -04:00
free_trash_chunk ( tmptrash ) ;
tmptrash = NULL ;
}
/* commit new admin status */
s - > cur_admin = s - > next_admin ;
}
}
/* Re-set log strings to empty */
* s - > adm_st_chg_cause = 0 ;
}
2019-02-14 12:29:09 -05:00
struct task * srv_cleanup_toremove_connections ( struct task * task , void * context , unsigned short state )
2018-12-02 08:11:41 -05:00
{
2019-02-14 12:29:09 -05:00
struct connection * conn ;
2018-12-14 12:15:36 -05:00
2019-08-08 09:47:21 -04:00
while ( ( conn = MT_LIST_POP ( & toremove_connections [ tid ] ,
2019-02-14 12:29:09 -05:00
struct connection * , list ) ) ! = NULL ) {
2019-04-08 05:23:22 -04:00
conn - > mux - > destroy ( conn - > ctx ) ;
2019-02-14 12:29:09 -05:00
}
2018-12-14 12:15:36 -05:00
2019-02-14 12:29:09 -05:00
return task ;
}
2018-12-02 08:11:41 -05:00
2020-05-02 15:52:36 -04:00
/* cleanup connections for a given server
* might be useful when going on forced maintenance or live changing ip / port
*/
2020-05-04 07:52:40 -04:00
static void srv_cleanup_connections ( struct server * srv )
2020-05-02 15:52:36 -04:00
{
struct connection * conn ;
int did_remove ;
int i ;
int j ;
HA_SPIN_LOCK ( OTHER_LOCK , & idle_conn_srv_lock ) ;
for ( i = 0 ; i < global . nbthread ; i + + ) {
did_remove = 0 ;
HA_SPIN_LOCK ( OTHER_LOCK , & toremove_lock [ i ] ) ;
for ( j = 0 ; j < srv - > curr_idle_conns ; j + + ) {
conn = MT_LIST_POP ( & srv - > idle_conns [ i ] , struct connection * , list ) ;
if ( ! conn )
conn = MT_LIST_POP ( & srv - > safe_conns [ i ] ,
struct connection * , list ) ;
if ( ! conn )
break ;
did_remove = 1 ;
MT_LIST_ADDQ ( & toremove_connections [ i ] , ( struct mt_list * ) & conn - > list ) ;
}
HA_SPIN_UNLOCK ( OTHER_LOCK , & toremove_lock [ i ] ) ;
if ( did_remove )
task_wakeup ( idle_conn_cleanup [ i ] , TASK_WOKEN_OTHER ) ;
}
HA_SPIN_UNLOCK ( OTHER_LOCK , & idle_conn_srv_lock ) ;
}
2019-02-14 12:29:09 -05:00
struct task * srv_cleanup_idle_connections ( struct task * task , void * context , unsigned short state )
{
struct server * srv ;
struct eb32_node * eb ;
int i ;
unsigned int next_wakeup ;
int need_wakeup = 0 ;
HA_SPIN_LOCK ( OTHER_LOCK , & idle_conn_srv_lock ) ;
while ( 1 ) {
int srv_is_empty = 1 ;
2020-03-29 18:23:57 -04:00
int exceed_conns ;
int to_kill ;
int curr_idle ;
2019-02-14 12:29:09 -05:00
eb = eb32_lookup_ge ( & idle_conn_srv , now_ms - TIMER_LOOK_BACK ) ;
if ( ! eb ) {
/* we might have reached the end of the tree, typically because
* < now_ms > is in the first half and we ' re first scanning the last
* half . Let ' s loop back to the beginning of the tree now .
*/
eb = eb32_first ( & idle_conn_srv ) ;
if ( likely ( ! eb ) )
break ;
}
if ( tick_is_lt ( now_ms , eb - > key ) ) {
/* timer not expired yet, revisit it later */
next_wakeup = eb - > key ;
need_wakeup = 1 ;
2018-12-02 08:11:41 -05:00
break ;
2019-02-14 12:29:09 -05:00
}
srv = eb32_entry ( eb , struct server , idle_node ) ;
2020-03-29 18:23:57 -04:00
/* Calculate how many idle connections we want to kill :
* we want to remove half the difference between the total
* of established connections ( used or idle ) and the max
* number of used connections .
*/
curr_idle = srv - > curr_idle_conns ;
if ( curr_idle = = 0 )
goto remove ;
exceed_conns = srv - > curr_used_conns + curr_idle -
srv - > max_used_conns ;
exceed_conns = to_kill = exceed_conns / 2 + ( exceed_conns & 1 ) ;
srv - > max_used_conns = srv - > curr_used_conns ;
for ( i = 0 ; i < global . nbthread & & to_kill > 0 ; i + + ) {
int max_conn ;
2019-02-14 12:29:09 -05:00
int j ;
int did_remove = 0 ;
2020-03-29 18:23:57 -04:00
max_conn = ( exceed_conns * srv - > curr_idle_thr [ i ] ) /
curr_idle + 1 ;
BUG/MEDIUM: servers: Fix a race condition with idle connections.
When we're purging idle connections, there's a race condition, when we're
removing the connection from the idle list, to add it to the list of
connections to free, if the thread owning the connection tries to free it
at the same time.
To fix this, simply add a per-thread lock, that has to be hold before
removing the connection from the idle list, and when, in conn_free(), we're
about to remove the connection from every list. That way, we know for sure
the connection will stay valid while we remove it from the idle list, to add
it to the list of connections to free.
This should happen rarely enough that it shouldn't have any impact on
performances.
This has not been reported yet, but could provoke random segfaults.
This should be backported to 2.0.
2019-07-11 09:49:00 -04:00
HA_SPIN_LOCK ( OTHER_LOCK , & toremove_lock [ i ] ) ;
2019-02-14 12:29:09 -05:00
for ( j = 0 ; j < max_conn ; j + + ) {
2020-02-13 13:12:07 -05:00
struct connection * conn = MT_LIST_POP ( & srv - > idle_conns [ i ] , struct connection * , list ) ;
if ( ! conn )
conn = MT_LIST_POP ( & srv - > safe_conns [ i ] ,
struct connection * , list ) ;
2019-02-14 12:29:09 -05:00
if ( ! conn )
break ;
did_remove = 1 ;
2019-08-08 09:47:21 -04:00
MT_LIST_ADDQ ( & toremove_connections [ i ] , ( struct mt_list * ) & conn - > list ) ;
2019-02-14 12:29:09 -05:00
}
BUG/MEDIUM: servers: Fix a race condition with idle connections.
When we're purging idle connections, there's a race condition, when we're
removing the connection from the idle list, to add it to the list of
connections to free, if the thread owning the connection tries to free it
at the same time.
To fix this, simply add a per-thread lock, that has to be hold before
removing the connection from the idle list, and when, in conn_free(), we're
about to remove the connection from every list. That way, we know for sure
the connection will stay valid while we remove it from the idle list, to add
it to the list of connections to free.
This should happen rarely enough that it shouldn't have any impact on
performances.
This has not been reported yet, but could provoke random segfaults.
This should be backported to 2.0.
2019-07-11 09:49:00 -04:00
HA_SPIN_UNLOCK ( OTHER_LOCK , & toremove_lock [ i ] ) ;
2019-02-14 12:29:09 -05:00
if ( did_remove & & max_conn < srv - > curr_idle_thr [ i ] )
srv_is_empty = 0 ;
if ( did_remove )
task_wakeup ( idle_conn_cleanup [ i ] , TASK_WOKEN_OTHER ) ;
}
2020-03-29 18:23:57 -04:00
remove :
2019-02-14 12:29:09 -05:00
eb32_delete ( & srv - > idle_node ) ;
if ( ! srv_is_empty ) {
/* There are still more idle connections, add the
* server back in the tree .
*/
srv - > idle_node . key = tick_add ( srv - > pool_purge_delay ,
now_ms ) ;
eb32_insert ( & idle_conn_srv , & srv - > idle_node ) ;
}
2018-12-02 08:11:41 -05:00
}
2019-02-14 12:29:09 -05:00
HA_SPIN_UNLOCK ( OTHER_LOCK , & idle_conn_srv_lock ) ;
if ( need_wakeup )
task - > expire = next_wakeup ;
2018-12-14 12:15:36 -05:00
else
task - > expire = TICK_ETERNITY ;
2019-02-14 12:29:09 -05:00
2018-12-02 08:11:41 -05:00
return task ;
}
MEDIUM: connections: Add a way to control the number of idling connections.
As by default we add all keepalive connections to the idle pool, if we run
into a pathological case, where all client don't do keepalive, but the server
does, and haproxy is configured to only reuse "safe" connections, we will
soon find ourself having lots of idling, unusable for new sessions, connections,
while we won't have any file descriptors available to create new connections.
To fix this, add 2 new global settings, "pool_low_ratio" and "pool_high_ratio".
pool-low-fd-ratio is the % of fds we're allowed to use (against the maximum
number of fds available to haproxy) before we stop adding connections to the
idle pool, and destroy them instead. The default is 20. pool-high-fd-ratio is
the % of fds we're allowed to use (against the maximum number of fds available
to haproxy) before we start killing idling connection in the event we have to
create a new outgoing connection, and no reuse is possible. The default is 25.
2019-04-16 13:07:22 -04:00
/* config parser for global "tune.pool-{low,high}-fd-ratio" */
static int cfg_parse_pool_fd_ratio ( char * * args , int section_type , struct proxy * curpx ,
struct proxy * defpx , const char * file , int line ,
char * * err )
{
int arg = - 1 ;
if ( too_many_args ( 1 , args , err , NULL ) )
return - 1 ;
if ( * ( args [ 1 ] ) ! = 0 )
arg = atoi ( args [ 1 ] ) ;
if ( arg < 0 | | arg > 100 ) {
memprintf ( err , " '%s' expects an integer argument between 0 and 100. " , args [ 0 ] ) ;
return - 1 ;
}
if ( args [ 0 ] [ 10 ] = = ' h ' )
global . tune . pool_high_ratio = arg ;
else
global . tune . pool_low_ratio = arg ;
return 0 ;
}
/* config keyword parsers */
static struct cfg_kw_list cfg_kws = { ILH , {
{ CFG_GLOBAL , " tune.pool-high-fd-ratio " , cfg_parse_pool_fd_ratio } ,
{ CFG_GLOBAL , " tune.pool-low-fd-ratio " , cfg_parse_pool_fd_ratio } ,
{ 0 , NULL , NULL }
} } ;
INITCALL1 ( STG_REGISTER , cfg_register_keywords , & cfg_kws ) ;
2006-06-25 20:48:02 -04:00
/*
* Local variables :
* c - indent - level : 8
* c - basic - offset : 8
* End :
*/