haproxy/include/types/proxy.h

501 lines
26 KiB
C
Raw Normal View History

/*
* include/types/proxy.h
* This file defines everything related to proxies.
*
* Copyright (C) 2000-2011 Willy Tarreau - w@1wt.eu
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation, version 2.1
* exclusively.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef _TYPES_PROXY_H
#define _TYPES_PROXY_H
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <common/chunk.h>
#include <common/config.h>
#include <common/mini-clist.h>
#include <common/regex.h>
#include <common/tools.h>
#include <eb32tree.h>
#include <ebistree.h>
#include <types/acl.h>
#include <types/backend.h>
#include <types/counters.h>
MAJOR: filters: Add filters support This patch adds the support of filters in HAProxy. The main idea is to have a way to "easely" extend HAProxy by adding some "modules", called filters, that will be able to change HAProxy behavior in a programmatic way. To do so, many entry points has been added in code to let filters to hook up to different steps of the processing. A filter must define a flt_ops sutrctures (see include/types/filters.h for details). This structure contains all available callbacks that a filter can define: struct flt_ops { /* * Callbacks to manage the filter lifecycle */ int (*init) (struct proxy *p); void (*deinit)(struct proxy *p); int (*check) (struct proxy *p); /* * Stream callbacks */ void (*stream_start) (struct stream *s); void (*stream_accept) (struct stream *s); void (*session_establish)(struct stream *s); void (*stream_stop) (struct stream *s); /* * HTTP callbacks */ int (*http_start) (struct stream *s, struct http_msg *msg); int (*http_start_body) (struct stream *s, struct http_msg *msg); int (*http_start_chunk) (struct stream *s, struct http_msg *msg); int (*http_data) (struct stream *s, struct http_msg *msg); int (*http_last_chunk) (struct stream *s, struct http_msg *msg); int (*http_end_chunk) (struct stream *s, struct http_msg *msg); int (*http_chunk_trailers)(struct stream *s, struct http_msg *msg); int (*http_end_body) (struct stream *s, struct http_msg *msg); void (*http_end) (struct stream *s, struct http_msg *msg); void (*http_reset) (struct stream *s, struct http_msg *msg); int (*http_pre_process) (struct stream *s, struct http_msg *msg); int (*http_post_process) (struct stream *s, struct http_msg *msg); void (*http_reply) (struct stream *s, short status, const struct chunk *msg); }; To declare and use a filter, in the configuration, the "filter" keyword must be used in a listener/frontend section: frontend test ... filter <FILTER-NAME> [OPTIONS...] The filter referenced by the <FILTER-NAME> must declare a configuration parser on its own name to fill flt_ops and filter_conf field in the proxy's structure. An exemple will be provided later to make it perfectly clear. For now, filters cannot be used in backend section. But this is only a matter of time. Documentation will also be added later. This is the first commit of a long list about filters. It is possible to have several filters on the same listener/frontend. These filters are stored in an array of at most MAX_FILTERS elements (define in include/types/filters.h). Again, this will be replaced later by a list of filters. The filter API has been highly refactored. Main changes are: * Now, HA supports an infinite number of filters per proxy. To do so, filters are stored in list. * Because filters are stored in list, filters state has been moved from the channel structure to the filter structure. This is cleaner because there is no more info about filters in channel structure. * It is possible to defined filters on backends only. For such filters, stream_start/stream_stop callbacks are not called. Of course, it is possible to mix frontend and backend filters. * Now, TCP streams are also filtered. All callbacks without the 'http_' prefix are called for all kind of streams. In addition, 2 new callbacks were added to filter data exchanged through a TCP stream: - tcp_data: it is called when new data are available or when old unprocessed data are still waiting. - tcp_forward_data: it is called when some data can be consumed. * New callbacks attached to channel were added: - channel_start_analyze: it is called when a filter is ready to process data exchanged through a channel. 2 new analyzers (a frontend and a backend) are attached to channels to call this callback. For a frontend filter, it is called before any other analyzer. For a backend filter, it is called when a backend is attached to a stream. So some processing cannot be filtered in that case. - channel_analyze: it is called before each analyzer attached to a channel, expects analyzers responsible for data sending. - channel_end_analyze: it is called when all other analyzers have finished their processing. A new analyzers is attached to channels to call this callback. For a TCP stream, this is always the last one called. For a HTTP one, the callback is called when a request/response ends, so it is called one time for each request/response. * 'session_established' callback has been removed. Everything that is done in this callback can be handled by 'channel_start_analyze' on the response channel. * 'http_pre_process' and 'http_post_process' callbacks have been replaced by 'channel_analyze'. * 'http_start' callback has been replaced by 'http_headers'. This new one is called just before headers sending and parsing of the body. * 'http_end' callback has been replaced by 'channel_end_analyze'. * It is possible to set a forwarder for TCP channels. It was already possible to do it for HTTP ones. * Forwarders can partially consumed forwardable data. For this reason a new HTTP message state was added before HTTP_MSG_DONE : HTTP_MSG_ENDING. Now all filters can define corresponding callbacks (http_forward_data and tcp_forward_data). Each filter owns 2 offsets relative to buf->p, next and forward, to track, respectively, input data already parsed but not forwarded yet by the filter and parsed data considered as forwarded by the filter. A any time, we have the warranty that a filter cannot parse or forward more input than previous ones. And, of course, it cannot forward more input than it has parsed. 2 macros has been added to retrieve these offets: FLT_NXT and FLT_FWD. In addition, 2 functions has been added to change the 'next size' and the 'forward size' of a filter. When a filter parses input data, it can alter these data, so the size of these data can vary. This action has an effet on all previous filters that must be handled. To do so, the function 'filter_change_next_size' must be called, passing the size variation. In the same spirit, if a filter alter forwarded data, it must call the function 'filter_change_forward_size'. 'filter_change_next_size' can be called in 'http_data' and 'tcp_data' callbacks and only these ones. And 'filter_change_forward_size' can be called in 'http_forward_data' and 'tcp_forward_data' callbacks and only these ones. The data changes are the filter responsability, but with some limitation. It must not change already parsed/forwarded data or data that previous filters have not parsed/forwarded yet. Because filters can be used on backends, when we the backend is set for a stream, we add filters defined for this backend in the filter list of the stream. But we must only do that when the backend and the frontend of the stream are not the same. Else same filters are added a second time leading to undefined behavior. The HTTP compression code had to be moved. So it simplifies http_response_forward_body function. To do so, the way the data are forwarded has changed. Now, a filter (and only one) can forward data. In a commit to come, this limitation will be removed to let all filters take part to data forwarding. There are 2 new functions that filters should use to deal with this feature: * flt_set_http_data_forwarder: This function sets the filter (using its id) that will forward data for the specified HTTP message. It is possible if it was not already set by another filter _AND_ if no data was yet forwarded (msg->msg_state <= HTTP_MSG_BODY). It returns -1 if an error occurs. * flt_http_data_forwarder: This function returns the filter id that will forward data for the specified HTTP message. If there is no forwarder set, it returns -1. When an HTTP data forwarder is set for the response, the HTTP compression is disabled. Of course, this is not definitive.
2015-04-30 05:48:27 -04:00
#include <types/filters.h>
#include <types/freq_ctr.h>
#include <types/listener.h>
#include <types/log.h>
#include <types/obj_type.h>
#include <types/proto_http.h>
#include <types/sample.h>
#include <types/server.h>
#include <types/stick_table.h>
/* values for proxy->state */
enum pr_state {
PR_STNEW = 0, /* proxy has not been initialized yet */
PR_STREADY, /* proxy has been initialized and is ready */
PR_STFULL, /* frontend is full (maxconn reached) */
PR_STPAUSED, /* frontend is paused (during hot restart) */
PR_STSTOPPED, /* proxy is stopped (end of a restart) */
PR_STERROR, /* proxy experienced an unrecoverable error */
} __attribute__((packed));
/* values for proxy->mode */
enum pr_mode {
PR_MODE_TCP = 0,
PR_MODE_HTTP,
PR_MODE_HEALTH,
} __attribute__((packed));
enum PR_SRV_STATE_FILE {
PR_SRV_STATE_FILE_UNSPEC = 0,
PR_SRV_STATE_FILE_NONE,
PR_SRV_STATE_FILE_GLOBAL,
PR_SRV_STATE_FILE_LOCAL,
};
/* flag values for proxy->cap. This is a bitmask of capabilities supported by the proxy */
#define PR_CAP_NONE 0x0000
#define PR_CAP_FE 0x0001
#define PR_CAP_BE 0x0002
#define PR_CAP_LISTEN (PR_CAP_FE|PR_CAP_BE)
/* bits for proxy->options */
#define PR_O_REDISP 0x00000001 /* allow reconnection to dispatch in case of errors */
#define PR_O_TRANSP 0x00000002 /* transparent mode : use original DEST as dispatch */
/* HTTP server-side reuse */
#define PR_O_REUSE_NEVR 0x00000000 /* never reuse a shared connection */
#define PR_O_REUSE_SAFE 0x00000004 /* only reuse a shared connection when it's safe to do so */
#define PR_O_REUSE_AGGR 0x00000008 /* aggressively reuse a shared connection */
#define PR_O_REUSE_ALWS 0x0000000C /* always reuse a shared connection */
#define PR_O_REUSE_MASK 0x0000000C /* mask to retrieve shared connection preferences */
/* unused: 0x10 */
#define PR_O_PREF_LAST 0x00000020 /* prefer last server */
#define PR_O_DISPATCH 0x00000040 /* use dispatch mode */
#define PR_O_FORCED_ID 0x00000080 /* proxy's ID was forced in the configuration */
#define PR_O_FWDFOR 0x00000100 /* conditionally insert x-forwarded-for with client address */
2015-05-01 09:37:53 -04:00
#define PR_O_IGNORE_PRB 0x00000200 /* ignore empty requests (aborts and timeouts) */
#define PR_O_NULLNOLOG 0x00000400 /* a connect without request will not be logged */
#define PR_O_WREQ_BODY 0x00000800 /* always wait for the HTTP request body */
/* unused: 0x1000 */
#define PR_O_FF_ALWAYS 0x00002000 /* always set x-forwarded-for */
#define PR_O_PERSIST 0x00004000 /* server persistence stays effective even when server is down */
REORG/MAJOR: session: rename the "session" entity to "stream" With HTTP/2, we'll have to support multiplexed streams. A stream is in fact the largest part of what we currently call a session, it has buffers, logs, etc. In order to catch any error, this commit removes any reference to the struct session and tries to rename most "session" occurrences in function names to "stream" and "sess" to "strm" when that's related to a session. The files stream.{c,h} were added and session.{c,h} removed. The session will be reintroduced later and a few parts of the stream will progressively be moved overthere. It will more or less contain only what we need in an embryonic session. Sample fetch functions and converters will have to change a bit so that they'll use an L5 (session) instead of what's currently called "L4" which is in fact L6 for now. Once all changes are completed, we should see approximately this : L7 - http_txn L6 - stream L5 - session L4 - connection | applet There will be at most one http_txn per stream, and a same session will possibly be referenced by multiple streams. A connection will point to a session and to a stream. The session will hold all the information we need to keep even when we don't yet have a stream. Some more cleanup is needed because some code was already far from being clean. The server queue management still refers to sessions at many places while comments talk about connections. This will have to be cleaned up once we have a server-side connection pool manager. Stream flags "SN_*" still need to be renamed, it doesn't seem like any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
#define PR_O_LOGASAP 0x00008000 /* log as soon as possible, without waiting for the stream to complete */
MAJOR: http: update connection mode configuration At the very beginning of haproxy, there was "option httpclose" to make haproxy add a "Connection: close" header in both directions to invite both sides to agree on closing the connection. It did not work with some rare products, so "option forceclose" was added to do the same and actively close the connection. Then client-side keep-alive was supported, so option http-server-close was introduced. Now we have keep-alive with a fourth option, not to mention the implicit tunnel mode. The connection configuration has become a total mess because all the options above may be combined together, despite almost everyone thinking they cancel each other, as judging from the common problem reports on the mailing list. Unfortunately, re-reading the doc shows that it's not clear at all that options may be combined, and the opposite seems more obvious since they're compared. The most common issue is options being set in the defaults section that are not negated in other sections, but are just combined when the user expects them to be overloaded. The migration to keep-alive by default will only make things worse. So let's start to address the first problem. A transaction can only work in 5 modes today : - tunnel : haproxy doesn't bother with what follows the first req/resp - passive close : option http-close - forced close : option forceclose - server close : option http-server-close with keep-alive on the client side - keep-alive : option http-keep-alive, end to end All 16 combination for each section fall into one of these cases. Same for the 256 combinations resulting from frontend+backend different modes. With this patch, we're doing something slightly different, which will not change anything for users with valid configs, and will only change the behaviour for users with unsafe configs. The principle is that these options may not combined anymore, and that the latest one always overrides all the other ones, including those inherited from the defaults section. The "no option xxx" statement is still supported to cancel one option and fall back to the default one. It is mainly needed to ignore defaults sections (eg: force the tunnel mode). The frontend+backend combinations have not changed. So for examplen the following configuration used to put the connection into forceclose : defaults http mode http option httpclose frontend foo. option http-server-close => http-server-close+httpclose = forceclose before this patch! Now the frontend's config replaces the defaults config and results in the more expected http-server-close. All 25 combinations of the 5 modes in (frontend,backend) have been successfully tested. In order to prepare for upcoming changes, a new "option http-tunnel" was added. It currently only voids all other options, and has the lowest precedence when mixed with another option in another frontend/backend.
2014-01-29 18:15:28 -05:00
/* unused: 0x00010000 */
#define PR_O_CHK_CACHE 0x00020000 /* require examination of cacheability of the 'set-cookie' field */
REORG/MAJOR: session: rename the "session" entity to "stream" With HTTP/2, we'll have to support multiplexed streams. A stream is in fact the largest part of what we currently call a session, it has buffers, logs, etc. In order to catch any error, this commit removes any reference to the struct session and tries to rename most "session" occurrences in function names to "stream" and "sess" to "strm" when that's related to a session. The files stream.{c,h} were added and session.{c,h} removed. The session will be reintroduced later and a few parts of the stream will progressively be moved overthere. It will more or less contain only what we need in an embryonic session. Sample fetch functions and converters will have to change a bit so that they'll use an L5 (session) instead of what's currently called "L4" which is in fact L6 for now. Once all changes are completed, we should see approximately this : L7 - http_txn L6 - stream L5 - session L4 - connection | applet There will be at most one http_txn per stream, and a same session will possibly be referenced by multiple streams. A connection will point to a session and to a stream. The session will hold all the information we need to keep even when we don't yet have a stream. Some more cleanup is needed because some code was already far from being clean. The server queue management still refers to sessions at many places while comments talk about connections. This will have to be cleaned up once we have a server-side connection pool manager. Stream flags "SN_*" still need to be renamed, it doesn't seem like any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
#define PR_O_TCP_CLI_KA 0x00040000 /* enable TCP keep-alive on client-side streams */
#define PR_O_TCP_SRV_KA 0x00080000 /* enable TCP keep-alive on server-side streams */
#define PR_O_USE_ALL_BK 0x00100000 /* load-balance between backup servers */
MAJOR: http: update connection mode configuration At the very beginning of haproxy, there was "option httpclose" to make haproxy add a "Connection: close" header in both directions to invite both sides to agree on closing the connection. It did not work with some rare products, so "option forceclose" was added to do the same and actively close the connection. Then client-side keep-alive was supported, so option http-server-close was introduced. Now we have keep-alive with a fourth option, not to mention the implicit tunnel mode. The connection configuration has become a total mess because all the options above may be combined together, despite almost everyone thinking they cancel each other, as judging from the common problem reports on the mailing list. Unfortunately, re-reading the doc shows that it's not clear at all that options may be combined, and the opposite seems more obvious since they're compared. The most common issue is options being set in the defaults section that are not negated in other sections, but are just combined when the user expects them to be overloaded. The migration to keep-alive by default will only make things worse. So let's start to address the first problem. A transaction can only work in 5 modes today : - tunnel : haproxy doesn't bother with what follows the first req/resp - passive close : option http-close - forced close : option forceclose - server close : option http-server-close with keep-alive on the client side - keep-alive : option http-keep-alive, end to end All 16 combination for each section fall into one of these cases. Same for the 256 combinations resulting from frontend+backend different modes. With this patch, we're doing something slightly different, which will not change anything for users with valid configs, and will only change the behaviour for users with unsafe configs. The principle is that these options may not combined anymore, and that the latest one always overrides all the other ones, including those inherited from the defaults section. The "no option xxx" statement is still supported to cancel one option and fall back to the default one. It is mainly needed to ignore defaults sections (eg: force the tunnel mode). The frontend+backend combinations have not changed. So for examplen the following configuration used to put the connection into forceclose : defaults http mode http option httpclose frontend foo. option http-server-close => http-server-close+httpclose = forceclose before this patch! Now the frontend's config replaces the defaults config and results in the more expected http-server-close. All 25 combinations of the 5 modes in (frontend,backend) have been successfully tested. In order to prepare for upcoming changes, a new "option http-tunnel" was added. It currently only voids all other options, and has the lowest precedence when mixed with another option in another frontend/backend.
2014-01-29 18:15:28 -05:00
/* unused: 0x00020000 */
#define PR_O_TCP_NOLING 0x00400000 /* disable lingering on client and server connections */
#define PR_O_ABRT_CLOSE 0x00800000 /* immediately abort request when client closes */
MAJOR: http: update connection mode configuration At the very beginning of haproxy, there was "option httpclose" to make haproxy add a "Connection: close" header in both directions to invite both sides to agree on closing the connection. It did not work with some rare products, so "option forceclose" was added to do the same and actively close the connection. Then client-side keep-alive was supported, so option http-server-close was introduced. Now we have keep-alive with a fourth option, not to mention the implicit tunnel mode. The connection configuration has become a total mess because all the options above may be combined together, despite almost everyone thinking they cancel each other, as judging from the common problem reports on the mailing list. Unfortunately, re-reading the doc shows that it's not clear at all that options may be combined, and the opposite seems more obvious since they're compared. The most common issue is options being set in the defaults section that are not negated in other sections, but are just combined when the user expects them to be overloaded. The migration to keep-alive by default will only make things worse. So let's start to address the first problem. A transaction can only work in 5 modes today : - tunnel : haproxy doesn't bother with what follows the first req/resp - passive close : option http-close - forced close : option forceclose - server close : option http-server-close with keep-alive on the client side - keep-alive : option http-keep-alive, end to end All 16 combination for each section fall into one of these cases. Same for the 256 combinations resulting from frontend+backend different modes. With this patch, we're doing something slightly different, which will not change anything for users with valid configs, and will only change the behaviour for users with unsafe configs. The principle is that these options may not combined anymore, and that the latest one always overrides all the other ones, including those inherited from the defaults section. The "no option xxx" statement is still supported to cancel one option and fall back to the default one. It is mainly needed to ignore defaults sections (eg: force the tunnel mode). The frontend+backend combinations have not changed. So for examplen the following configuration used to put the connection into forceclose : defaults http mode http option httpclose frontend foo. option http-server-close => http-server-close+httpclose = forceclose before this patch! Now the frontend's config replaces the defaults config and results in the more expected http-server-close. All 25 combinations of the 5 modes in (frontend,backend) have been successfully tested. In order to prepare for upcoming changes, a new "option http-tunnel" was added. It currently only voids all other options, and has the lowest precedence when mixed with another option in another frontend/backend.
2014-01-29 18:15:28 -05:00
/* unused: 0x01000000, 0x02000000, 0x04000000, 0x08000000 */
#define PR_O_HTTP_KAL 0x00000000 /* HTTP keep-alive mode (http-keep-alive) */
MAJOR: http: update connection mode configuration At the very beginning of haproxy, there was "option httpclose" to make haproxy add a "Connection: close" header in both directions to invite both sides to agree on closing the connection. It did not work with some rare products, so "option forceclose" was added to do the same and actively close the connection. Then client-side keep-alive was supported, so option http-server-close was introduced. Now we have keep-alive with a fourth option, not to mention the implicit tunnel mode. The connection configuration has become a total mess because all the options above may be combined together, despite almost everyone thinking they cancel each other, as judging from the common problem reports on the mailing list. Unfortunately, re-reading the doc shows that it's not clear at all that options may be combined, and the opposite seems more obvious since they're compared. The most common issue is options being set in the defaults section that are not negated in other sections, but are just combined when the user expects them to be overloaded. The migration to keep-alive by default will only make things worse. So let's start to address the first problem. A transaction can only work in 5 modes today : - tunnel : haproxy doesn't bother with what follows the first req/resp - passive close : option http-close - forced close : option forceclose - server close : option http-server-close with keep-alive on the client side - keep-alive : option http-keep-alive, end to end All 16 combination for each section fall into one of these cases. Same for the 256 combinations resulting from frontend+backend different modes. With this patch, we're doing something slightly different, which will not change anything for users with valid configs, and will only change the behaviour for users with unsafe configs. The principle is that these options may not combined anymore, and that the latest one always overrides all the other ones, including those inherited from the defaults section. The "no option xxx" statement is still supported to cancel one option and fall back to the default one. It is mainly needed to ignore defaults sections (eg: force the tunnel mode). The frontend+backend combinations have not changed. So for examplen the following configuration used to put the connection into forceclose : defaults http mode http option httpclose frontend foo. option http-server-close => http-server-close+httpclose = forceclose before this patch! Now the frontend's config replaces the defaults config and results in the more expected http-server-close. All 25 combinations of the 5 modes in (frontend,backend) have been successfully tested. In order to prepare for upcoming changes, a new "option http-tunnel" was added. It currently only voids all other options, and has the lowest precedence when mixed with another option in another frontend/backend.
2014-01-29 18:15:28 -05:00
#define PR_O_HTTP_PCL 0x01000000 /* HTTP passive close mode (httpclose) = tunnel with Connection: close */
#define PR_O_HTTP_FCL 0x02000000 /* HTTP forced close mode (forceclose) */
#define PR_O_HTTP_SCL 0x03000000 /* HTTP server close mode (http-server-close) */
#define PR_O_HTTP_TUN 0x04000000 /* HTTP tunnel mode : no analysis past first request/response */
MAJOR: http: update connection mode configuration At the very beginning of haproxy, there was "option httpclose" to make haproxy add a "Connection: close" header in both directions to invite both sides to agree on closing the connection. It did not work with some rare products, so "option forceclose" was added to do the same and actively close the connection. Then client-side keep-alive was supported, so option http-server-close was introduced. Now we have keep-alive with a fourth option, not to mention the implicit tunnel mode. The connection configuration has become a total mess because all the options above may be combined together, despite almost everyone thinking they cancel each other, as judging from the common problem reports on the mailing list. Unfortunately, re-reading the doc shows that it's not clear at all that options may be combined, and the opposite seems more obvious since they're compared. The most common issue is options being set in the defaults section that are not negated in other sections, but are just combined when the user expects them to be overloaded. The migration to keep-alive by default will only make things worse. So let's start to address the first problem. A transaction can only work in 5 modes today : - tunnel : haproxy doesn't bother with what follows the first req/resp - passive close : option http-close - forced close : option forceclose - server close : option http-server-close with keep-alive on the client side - keep-alive : option http-keep-alive, end to end All 16 combination for each section fall into one of these cases. Same for the 256 combinations resulting from frontend+backend different modes. With this patch, we're doing something slightly different, which will not change anything for users with valid configs, and will only change the behaviour for users with unsafe configs. The principle is that these options may not combined anymore, and that the latest one always overrides all the other ones, including those inherited from the defaults section. The "no option xxx" statement is still supported to cancel one option and fall back to the default one. It is mainly needed to ignore defaults sections (eg: force the tunnel mode). The frontend+backend combinations have not changed. So for examplen the following configuration used to put the connection into forceclose : defaults http mode http option httpclose frontend foo. option http-server-close => http-server-close+httpclose = forceclose before this patch! Now the frontend's config replaces the defaults config and results in the more expected http-server-close. All 25 combinations of the 5 modes in (frontend,backend) have been successfully tested. In order to prepare for upcoming changes, a new "option http-tunnel" was added. It currently only voids all other options, and has the lowest precedence when mixed with another option in another frontend/backend.
2014-01-29 18:15:28 -05:00
/* unassigned values : 0x05000000, 0x06000000, 0x07000000 */
#define PR_O_HTTP_MODE 0x07000000 /* MASK to retrieve the HTTP mode */
#define PR_O_TCPCHK_SSL 0x08000000 /* at least one TCPCHECK connect rule requires SSL */
#define PR_O_CONTSTATS 0x10000000 /* continous counters */
REORG/MAJOR: session: rename the "session" entity to "stream" With HTTP/2, we'll have to support multiplexed streams. A stream is in fact the largest part of what we currently call a session, it has buffers, logs, etc. In order to catch any error, this commit removes any reference to the struct session and tries to rename most "session" occurrences in function names to "stream" and "sess" to "strm" when that's related to a session. The files stream.{c,h} were added and session.{c,h} removed. The session will be reintroduced later and a few parts of the stream will progressively be moved overthere. It will more or less contain only what we need in an embryonic session. Sample fetch functions and converters will have to change a bit so that they'll use an L5 (session) instead of what's currently called "L4" which is in fact L6 for now. Once all changes are completed, we should see approximately this : L7 - http_txn L6 - stream L5 - session L4 - connection | applet There will be at most one http_txn per stream, and a same session will possibly be referenced by multiple streams. A connection will point to a session and to a stream. The session will hold all the information we need to keep even when we don't yet have a stream. Some more cleanup is needed because some code was already far from being clean. The server queue management still refers to sessions at many places while comments talk about connections. This will have to be cleaned up once we have a server-side connection pool manager. Stream flags "SN_*" still need to be renamed, it doesn't seem like any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
#define PR_O_HTTP_PROXY 0x20000000 /* Enable stream to use HTTP proxy operations */
#define PR_O_DISABLE404 0x40000000 /* Disable a server on a 404 response to a health-check */
#define PR_O_ORGTO 0x80000000 /* insert x-original-to with destination address */
/* bits for proxy->options2 */
#define PR_O2_SPLIC_REQ 0x00000001 /* transfer requests using linux kernel's splice() */
#define PR_O2_SPLIC_RTR 0x00000002 /* transfer responses using linux kernel's splice() */
#define PR_O2_SPLIC_AUT 0x00000004 /* automatically use linux kernel's splice() */
#define PR_O2_SPLIC_ANY (PR_O2_SPLIC_REQ|PR_O2_SPLIC_RTR|PR_O2_SPLIC_AUT)
#define PR_O2_REQBUG_OK 0x00000008 /* let buggy requests pass through */
#define PR_O2_RSPBUG_OK 0x00000010 /* let buggy responses pass through */
#define PR_O2_NOLOGNORM 0x00000020 /* don't log normal traffic, only errors and retries */
#define PR_O2_LOGERRORS 0x00000040 /* log errors and retries at level LOG_ERR */
#define PR_O2_SMARTACC 0x00000080 /* don't immediately ACK request after accept */
#define PR_O2_SMARTCON 0x00000100 /* don't immediately send empty ACK after connect */
#define PR_O2_RDPC_PRST 0x00000200 /* Actvate rdp cookie analyser */
#define PR_O2_CLFLOG 0x00000400 /* log into clf format */
#define PR_O2_LOGHCHKS 0x00000800 /* log health checks */
#define PR_O2_INDEPSTR 0x00001000 /* independent streams, don't update rex on write */
#define PR_O2_SOCKSTAT 0x00002000 /* collect & provide separate statistics for sockets */
/* unused: 0x00004000 0x00008000 0x00010000 */
#define PR_O2_NODELAY 0x00020000 /* fully interactive mode, never delay outgoing data */
#define PR_O2_USE_PXHDR 0x00040000 /* use Proxy-Connection for proxy requests */
#define PR_O2_CHK_SNDST 0x00080000 /* send the state of each server along with HTTP health checks */
#define PR_O2_SRC_ADDR 0x00100000 /* get the source ip and port for logs */
#define PR_O2_FAKE_KA 0x00200000 /* pretend we do keep-alive with server eventhough we close */
/* unused: 0x00400000 */
#define PR_O2_EXP_NONE 0x00000000 /* http-check : no expect rule */
#define PR_O2_EXP_STS 0x00800000 /* http-check expect status */
#define PR_O2_EXP_RSTS 0x01000000 /* http-check expect rstatus */
#define PR_O2_EXP_STR 0x01800000 /* http-check expect string */
#define PR_O2_EXP_RSTR 0x02000000 /* http-check expect rstring */
#define PR_O2_EXP_TYPE 0x03800000 /* mask for http-check expect type */
#define PR_O2_EXP_INV 0x04000000 /* http-check expect !<rule> */
/* unused: 0x08000000 */
/* server health checks */
#define PR_O2_CHK_NONE 0x00000000 /* no L7 health checks configured (TCP by default) */
#define PR_O2_PGSQL_CHK 0x10000000 /* use PGSQL check for server health */
#define PR_O2_REDIS_CHK 0x20000000 /* use LDAP check for server health */
#define PR_O2_SMTP_CHK 0x30000000 /* use SMTP EHLO check for server health - pvandijk@vision6.com.au */
#define PR_O2_HTTP_CHK 0x40000000 /* use HTTP 'OPTIONS' method to check server health */
#define PR_O2_MYSQL_CHK 0x50000000 /* use MYSQL check for server health */
#define PR_O2_LDAP_CHK 0x60000000 /* use LDAP check for server health */
#define PR_O2_SSL3_CHK 0x70000000 /* use SSLv3 CLIENT_HELLO packets for server health */
#define PR_O2_LB_AGENT_CHK 0x80000000 /* use a TCP connection to obtain a metric of server health */
#define PR_O2_TCPCHK_CHK 0x90000000 /* use TCPCHK check for server health */
#define PR_O2_EXT_CHK 0xA0000000 /* use external command for server health */
#define PR_O2_SPOP_CHK 0xB0000000 /* use SPOP for server health */
/* unused: 0xC0000000 to 0xF000000, reserved for health checks */
#define PR_O2_CHK_ANY 0xF0000000 /* Mask to cover any check */
/* end of proxy->options2 */
/* Cookie settings for pr->ck_opts */
#define PR_CK_RW 0x00000001 /* rewrite all direct cookies with the right serverid */
#define PR_CK_IND 0x00000002 /* keep only indirect cookies */
#define PR_CK_INS 0x00000004 /* insert cookies when not accessing a server directly */
#define PR_CK_PFX 0x00000008 /* rewrite all cookies by prefixing the right serverid */
#define PR_CK_ANY (PR_CK_RW | PR_CK_IND | PR_CK_INS | PR_CK_PFX)
#define PR_CK_NOC 0x00000010 /* add a 'Cache-control' header with the cookie */
#define PR_CK_POST 0x00000020 /* don't insert cookies for requests other than a POST */
#define PR_CK_PSV 0x00000040 /* cookie ... preserve */
#define PR_CK_HTTPONLY 0x00000080 /* emit the "HttpOnly" attribute */
#define PR_CK_SECURE 0x00000100 /* emit the "Secure" attribute */
/* bits for sticking rules */
#define STK_IS_MATCH 0x00000001 /* match on request fetch */
#define STK_IS_STORE 0x00000002 /* store on request fetch */
#define STK_ON_RSP 0x00000004 /* store on response fetch */
/* diff bits for proxy_find_best_match */
#define PR_FBM_MISMATCH_ID 0x01
#define PR_FBM_MISMATCH_NAME 0x02
#define PR_FBM_MISMATCH_PROXYTYPE 0x04
REORG/MAJOR: session: rename the "session" entity to "stream" With HTTP/2, we'll have to support multiplexed streams. A stream is in fact the largest part of what we currently call a session, it has buffers, logs, etc. In order to catch any error, this commit removes any reference to the struct session and tries to rename most "session" occurrences in function names to "stream" and "sess" to "strm" when that's related to a session. The files stream.{c,h} were added and session.{c,h} removed. The session will be reintroduced later and a few parts of the stream will progressively be moved overthere. It will more or less contain only what we need in an embryonic session. Sample fetch functions and converters will have to change a bit so that they'll use an L5 (session) instead of what's currently called "L4" which is in fact L6 for now. Once all changes are completed, we should see approximately this : L7 - http_txn L6 - stream L5 - session L4 - connection | applet There will be at most one http_txn per stream, and a same session will possibly be referenced by multiple streams. A connection will point to a session and to a stream. The session will hold all the information we need to keep even when we don't yet have a stream. Some more cleanup is needed because some code was already far from being clean. The server queue management still refers to sessions at many places while comments talk about connections. This will have to be cleaned up once we have a server-side connection pool manager. Stream flags "SN_*" still need to be renamed, it doesn't seem like any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
struct stream;
struct error_snapshot {
struct timeval when; /* date of this event, (tv_sec == 0) means "never" */
unsigned int len; /* original length of the last invalid request/response */
unsigned int pos; /* position of the first invalid character */
REORG/MAJOR: session: rename the "session" entity to "stream" With HTTP/2, we'll have to support multiplexed streams. A stream is in fact the largest part of what we currently call a session, it has buffers, logs, etc. In order to catch any error, this commit removes any reference to the struct session and tries to rename most "session" occurrences in function names to "stream" and "sess" to "strm" when that's related to a session. The files stream.{c,h} were added and session.{c,h} removed. The session will be reintroduced later and a few parts of the stream will progressively be moved overthere. It will more or less contain only what we need in an embryonic session. Sample fetch functions and converters will have to change a bit so that they'll use an L5 (session) instead of what's currently called "L4" which is in fact L6 for now. Once all changes are completed, we should see approximately this : L7 - http_txn L6 - stream L5 - session L4 - connection | applet There will be at most one http_txn per stream, and a same session will possibly be referenced by multiple streams. A connection will point to a session and to a stream. The session will hold all the information we need to keep even when we don't yet have a stream. Some more cleanup is needed because some code was already far from being clean. The server queue management still refers to sessions at many places while comments talk about connections. This will have to be cleaned up once we have a server-side connection pool manager. Stream flags "SN_*" still need to be renamed, it doesn't seem like any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
unsigned int sid; /* ID of the faulty stream */
unsigned int ev_id; /* event number (counter incremented for each capture) */
unsigned int state; /* message state before the error (when saved) */
unsigned int b_flags; /* buffer flags */
REORG/MAJOR: session: rename the "session" entity to "stream" With HTTP/2, we'll have to support multiplexed streams. A stream is in fact the largest part of what we currently call a session, it has buffers, logs, etc. In order to catch any error, this commit removes any reference to the struct session and tries to rename most "session" occurrences in function names to "stream" and "sess" to "strm" when that's related to a session. The files stream.{c,h} were added and session.{c,h} removed. The session will be reintroduced later and a few parts of the stream will progressively be moved overthere. It will more or less contain only what we need in an embryonic session. Sample fetch functions and converters will have to change a bit so that they'll use an L5 (session) instead of what's currently called "L4" which is in fact L6 for now. Once all changes are completed, we should see approximately this : L7 - http_txn L6 - stream L5 - session L4 - connection | applet There will be at most one http_txn per stream, and a same session will possibly be referenced by multiple streams. A connection will point to a session and to a stream. The session will hold all the information we need to keep even when we don't yet have a stream. Some more cleanup is needed because some code was already far from being clean. The server queue management still refers to sessions at many places while comments talk about connections. This will have to be cleaned up once we have a server-side connection pool manager. Stream flags "SN_*" still need to be renamed, it doesn't seem like any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
unsigned int s_flags; /* stream flags */
unsigned int t_flags; /* transaction flags */
unsigned int m_flags; /* message flags */
unsigned int b_out; /* pending output bytes */
unsigned int b_wrap; /* position where the buffer is expected to wrap */
unsigned long long b_tot; /* total bytes transferred via this buffer */
unsigned long long m_clen; /* chunk len for this message */
unsigned long long m_blen; /* body len for this message */
struct server *srv; /* server associated with the error (or NULL) */
struct proxy *oe; /* other end = frontend or backend involved */
struct sockaddr_storage src; /* client's address */
char *buf; /* copy of the beginning of the message (may be NULL) */
};
struct email_alert {
struct list list;
struct list tcpcheck_rules;
};
struct email_alertq {
struct list email_alerts;
struct check check; /* Email alerts are implemented using existing check
* code even though they are not checks. This structure
* is as a parameter to the check code.
* Each check corresponds to a mailer */
};
struct proxy {
enum obj_type obj_type; /* object type == OBJ_TYPE_PROXY */
enum pr_state state; /* proxy state, one of PR_* */
enum pr_mode mode; /* mode = PR_MODE_TCP, PR_MODE_HTTP or PR_MODE_HEALTH */
char cap; /* supported capabilities (PR_CAP_*) */
REORG/MAJOR: session: rename the "session" entity to "stream" With HTTP/2, we'll have to support multiplexed streams. A stream is in fact the largest part of what we currently call a session, it has buffers, logs, etc. In order to catch any error, this commit removes any reference to the struct session and tries to rename most "session" occurrences in function names to "stream" and "sess" to "strm" when that's related to a session. The files stream.{c,h} were added and session.{c,h} removed. The session will be reintroduced later and a few parts of the stream will progressively be moved overthere. It will more or less contain only what we need in an embryonic session. Sample fetch functions and converters will have to change a bit so that they'll use an L5 (session) instead of what's currently called "L4" which is in fact L6 for now. Once all changes are completed, we should see approximately this : L7 - http_txn L6 - stream L5 - session L4 - connection | applet There will be at most one http_txn per stream, and a same session will possibly be referenced by multiple streams. A connection will point to a session and to a stream. The session will hold all the information we need to keep even when we don't yet have a stream. Some more cleanup is needed because some code was already far from being clean. The server queue management still refers to sessions at many places while comments talk about connections. This will have to be cleaned up once we have a server-side connection pool manager. Stream flags "SN_*" still need to be renamed, it doesn't seem like any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
unsigned int maxconn; /* max # of active streams on the frontend */
int options; /* PR_O_REDISP, PR_O_TRANSP, ... */
int options2; /* PR_O2_* */
struct in_addr mon_net, mon_mask; /* don't forward connections from this net (network order) FIXME: should support IPv6 */
unsigned int ck_opts; /* PR_CK_* (cookie options) */
unsigned int fe_req_ana, be_req_ana; /* bitmap of common request protocol analysers for the frontend and backend */
unsigned int fe_rsp_ana, be_rsp_ana; /* bitmap of common response protocol analysers for the frontend and backend */
unsigned int http_needed; /* non-null if HTTP analyser may be used */
union {
struct proxy *be; /* default backend, or NULL if none set */
char *name; /* default backend name during config parse */
} defbe;
struct list acl; /* ACL declared on this proxy */
struct list http_req_rules; /* HTTP request rules: allow/deny/... */
struct list http_res_rules; /* HTTP response rules: allow/deny/... */
struct list block_rules; /* http-request block rules to be inserted before other ones */
struct list redirect_rules; /* content redirecting rules (chained) */
struct list switching_rules; /* content switching rules (chained) */
struct list persist_rules; /* 'force-persist' and 'ignore-persist' rules (chained) */
struct list sticking_rules; /* content sticking rules (chained) */
struct list storersp_rules; /* content store response rules (chained) */
struct list server_rules; /* server switching rules (chained) */
[MAJOR] implement tcp request content inspection Some people need to inspect contents of TCP requests before deciding to forward a connection or not. A future extension of this demand might consist in selecting a server farm depending on the protocol detected in the request. For this reason, a new state CL_STINSPECT has been added on the client side. It is immediately entered upon accept() if the statement "tcp-request inspect-delay <xxx>" is found in the frontend configuration. Haproxy will then wait up to this amount of time trying to find a matching ACL, and will either accept or reject the connection depending on the "tcp-request content <action> {if|unless}" rules, where <action> is either "accept" or "reject". Note that it only waits that long if no definitive verdict can be found earlier. That generally implies calling a fetch() function which does not have enough information to decode some contents, or a match() function which only finds the beginning of what it's looking for. It is only at the ACL level that partial data may be processed as such, because we need to distinguish between MISS and FAIL *before* applying the term negation. Thus it is enough to add "| ACL_PARTIAL" to the last argument when calling acl_exec_cond() to indicate that we expect ACL_PAT_MISS to be returned if some data is missing (for fetch() or match()). This is the only case we may return this value. For this reason, the ACL check in process_cli() has become a lot simpler. A new ACL "req_len" of type "int" has been added. Right now it is already possible to drop requests which talk too early (eg: for SMTP) or which don't talk at all (eg: HTTP/SSL). Also, the acl fetch() functions have been extended in order to permit reporting of missing data in case of fetch failure, using the ACL_TEST_F_MAY_CHANGE flag. The default behaviour is unchanged, and if no rule matches, the request is accepted. As a side effect, all layer 7 fetching functions have been cleaned up so that they now check for the validity of the layer 7 pointer before dereferencing it.
2008-07-14 17:54:42 -04:00
struct { /* TCP request processing */
unsigned int inspect_delay; /* inspection delay */
[MAJOR] implement tcp request content inspection Some people need to inspect contents of TCP requests before deciding to forward a connection or not. A future extension of this demand might consist in selecting a server farm depending on the protocol detected in the request. For this reason, a new state CL_STINSPECT has been added on the client side. It is immediately entered upon accept() if the statement "tcp-request inspect-delay <xxx>" is found in the frontend configuration. Haproxy will then wait up to this amount of time trying to find a matching ACL, and will either accept or reject the connection depending on the "tcp-request content <action> {if|unless}" rules, where <action> is either "accept" or "reject". Note that it only waits that long if no definitive verdict can be found earlier. That generally implies calling a fetch() function which does not have enough information to decode some contents, or a match() function which only finds the beginning of what it's looking for. It is only at the ACL level that partial data may be processed as such, because we need to distinguish between MISS and FAIL *before* applying the term negation. Thus it is enough to add "| ACL_PARTIAL" to the last argument when calling acl_exec_cond() to indicate that we expect ACL_PAT_MISS to be returned if some data is missing (for fetch() or match()). This is the only case we may return this value. For this reason, the ACL check in process_cli() has become a lot simpler. A new ACL "req_len" of type "int" has been added. Right now it is already possible to drop requests which talk too early (eg: for SMTP) or which don't talk at all (eg: HTTP/SSL). Also, the acl fetch() functions have been extended in order to permit reporting of missing data in case of fetch failure, using the ACL_TEST_F_MAY_CHANGE flag. The default behaviour is unchanged, and if no rule matches, the request is accepted. As a side effect, all layer 7 fetching functions have been cleaned up so that they now check for the validity of the layer 7 pointer before dereferencing it.
2008-07-14 17:54:42 -04:00
struct list inspect_rules; /* inspection rules */
struct list l4_rules; /* layer4 rules */
struct list l5_rules; /* layer5 rules */
[MAJOR] implement tcp request content inspection Some people need to inspect contents of TCP requests before deciding to forward a connection or not. A future extension of this demand might consist in selecting a server farm depending on the protocol detected in the request. For this reason, a new state CL_STINSPECT has been added on the client side. It is immediately entered upon accept() if the statement "tcp-request inspect-delay <xxx>" is found in the frontend configuration. Haproxy will then wait up to this amount of time trying to find a matching ACL, and will either accept or reject the connection depending on the "tcp-request content <action> {if|unless}" rules, where <action> is either "accept" or "reject". Note that it only waits that long if no definitive verdict can be found earlier. That generally implies calling a fetch() function which does not have enough information to decode some contents, or a match() function which only finds the beginning of what it's looking for. It is only at the ACL level that partial data may be processed as such, because we need to distinguish between MISS and FAIL *before* applying the term negation. Thus it is enough to add "| ACL_PARTIAL" to the last argument when calling acl_exec_cond() to indicate that we expect ACL_PAT_MISS to be returned if some data is missing (for fetch() or match()). This is the only case we may return this value. For this reason, the ACL check in process_cli() has become a lot simpler. A new ACL "req_len" of type "int" has been added. Right now it is already possible to drop requests which talk too early (eg: for SMTP) or which don't talk at all (eg: HTTP/SSL). Also, the acl fetch() functions have been extended in order to permit reporting of missing data in case of fetch failure, using the ACL_TEST_F_MAY_CHANGE flag. The default behaviour is unchanged, and if no rule matches, the request is accepted. As a side effect, all layer 7 fetching functions have been cleaned up so that they now check for the validity of the layer 7 pointer before dereferencing it.
2008-07-14 17:54:42 -04:00
} tcp_req;
struct { /* TCP request processing */
unsigned int inspect_delay; /* inspection delay */
struct list inspect_rules; /* inspection rules */
} tcp_rep;
struct server *srv, defsrv; /* known servers; default server configuration */
int srv_act, srv_bck; /* # of servers eligible for LB (UP|!checked) AND (enabled+weight!=0) */
int served; /* # of active sessions currently being served */
struct lbprm lbprm; /* load-balancing parameters */
char *cookie_domain; /* domain used to insert the cookie */
char *cookie_name; /* name of the cookie to look for */
int cookie_len; /* strlen(cookie_name), computed only once */
unsigned int cookie_maxidle; /* max idle time for this cookie */
unsigned int cookie_maxlife; /* max life time for this cookie */
int rdp_cookie_len; /* strlen(rdp_cookie_name), computed only once */
char *rdp_cookie_name; /* name of the RDP cookie to look for */
char *url_param_name; /* name of the URL parameter used for hashing */
int url_param_len; /* strlen(url_param_name), computed only once */
int uri_len_limit; /* character limit for uri balancing algorithm */
int uri_dirs_depth1; /* directories+1 (slashes) limit for uri balancing algorithm */
MINOR: balance uri: added 'whole' parameter to include query string in hash calculation This patch brings a new "whole" parameter to "balance uri" which makes the hash work over the whole uri, not just the part before the query string. Len and depth parameter are still honnored. The reason for this new feature is explained below. I have 3 backend servers, each accepting different form of HTTP queries: http://backend1.server.tld/service1.php?q=... http://backend1.server.tld/service2.php?q=... http://backend2.server.tld/index.php?query=...&subquery=... http://backend3.server.tld/image/49b8c0d9ff Each backend server returns a different response based on either: - the URI path (the left part of the URI before the question mark) - the query string (the right part of the URI after the question mark) - or the combination of both I wanted to set up a common caching cluster (using 6 Squid servers, each configured as reverse proxy for those 3 backends) and have HAProxy balance the queries among the Squid servers based on URL. I also wanted to achieve hight cache hit ration on each Squid server and send the same queries to the same Squid servers. Initially I was considering using the 'balance uri' algorithm, but that would not work as in case of backend2 all queries would go to only one Squid server. The 'balance url_param' would not work either as it would send the backend3 queries to only one Squid server. So I thought the simplest solution would be to use 'balance uri', but to calculate the hash based on the whole URI (URI path + query string), instead of just the URI path.
2012-05-19 05:19:54 -04:00
int uri_whole; /* if != 0, calculates the hash from the whole uri. Still honors the len_limit and dirs_depth1 */
char *hh_name; /* name of the header parameter used for hashing */
int hh_len; /* strlen(hh_name), computed only once */
int hh_match_domain; /* toggle use of special match function */
char *capture_name; /* beginning of the name of the cookie to capture */
int capture_namelen; /* length of the cookie name to match */
int capture_len; /* length of the string to be captured */
struct uri_auth *uri_auth; /* if non-NULL, the (list of) per-URI authentications */
int max_ka_queue; /* 1+maximum requests in queue accepted for reusing a K-A conn (0=none) */
int monitor_uri_len; /* length of the string above. 0 if unused */
char *monitor_uri; /* a special URI to which we respond with HTTP/200 OK */
struct list mon_fail_cond; /* list of conditions to fail monitoring requests (chained) */
struct { /* WARNING! check proxy_reset_timeouts() in proxy.h !!! */
int client; /* client I/O timeout (in ticks) */
int tarpit; /* tarpit timeout, defaults to connect if unspecified */
int queue; /* queue timeout, defaults to connect if unspecified */
int connect; /* connect timeout (in ticks) */
int server; /* server I/O timeout (in ticks) */
int httpreq; /* maximum time for complete HTTP request */
int httpka; /* maximum time for a new HTTP request when using keep-alive */
int check; /* maximum time for complete check */
int tunnel; /* I/O timeout to use in tunnel mode (in ticks) */
int clientfin; /* timeout to apply to client half-closed connections */
int serverfin; /* timeout to apply to server half-closed connections */
} timeout;
char *id, *desc; /* proxy id (name) and description */
struct list pendconns; /* pending connections with no server assigned yet */
int nbpend; /* number of pending connections with no server assigned yet */
int totpend; /* total number of pending connections on this instance (for stats) */
REORG/MAJOR: session: rename the "session" entity to "stream" With HTTP/2, we'll have to support multiplexed streams. A stream is in fact the largest part of what we currently call a session, it has buffers, logs, etc. In order to catch any error, this commit removes any reference to the struct session and tries to rename most "session" occurrences in function names to "stream" and "sess" to "strm" when that's related to a session. The files stream.{c,h} were added and session.{c,h} removed. The session will be reintroduced later and a few parts of the stream will progressively be moved overthere. It will more or less contain only what we need in an embryonic session. Sample fetch functions and converters will have to change a bit so that they'll use an L5 (session) instead of what's currently called "L4" which is in fact L6 for now. Once all changes are completed, we should see approximately this : L7 - http_txn L6 - stream L5 - session L4 - connection | applet There will be at most one http_txn per stream, and a same session will possibly be referenced by multiple streams. A connection will point to a session and to a stream. The session will hold all the information we need to keep even when we don't yet have a stream. Some more cleanup is needed because some code was already far from being clean. The server queue management still refers to sessions at many places while comments talk about connections. This will have to be cleaned up once we have a server-side connection pool manager. Stream flags "SN_*" still need to be renamed, it doesn't seem like any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
unsigned int feconn, beconn; /* # of active frontend and backends streams */
struct freq_ctr fe_req_per_sec; /* HTTP requests per second on the frontend */
struct freq_ctr fe_conn_per_sec; /* received connections per second on the frontend */
struct freq_ctr fe_sess_per_sec; /* accepted sessions per second on the frontend (after tcp rules) */
struct freq_ctr be_sess_per_sec; /* sessions per second on the backend */
unsigned int fe_sps_lim; /* limit on new sessions per second on the frontend */
unsigned int fullconn; /* #conns on backend above which servers are used at full load */
struct in_addr except_net, except_mask; /* don't x-forward-for for this address. FIXME: should support IPv6 */
struct in_addr except_to; /* don't x-original-to for this address. */
struct in_addr except_mask_to; /* the netmask for except_to. */
char *fwdfor_hdr_name; /* header to use - default: "x-forwarded-for" */
char *orgto_hdr_name; /* header to use - default: "x-original-to" */
int fwdfor_hdr_len; /* length of "x-forwarded-for" header */
int orgto_hdr_len; /* length of "x-original-to" header */
char *server_id_hdr_name; /* the header to use to send the server id (name) */
int server_id_hdr_len; /* the length of the id (name) header... name */
int conn_retries; /* maximum number of connect retries */
int redispatch_after; /* number of retries before redispatch */
[MEDIUM] stats: report server and backend cumulated downtime Hello, This patch implements new statistics for SLA calculation by adding new field 'Dwntime' with total down time since restart (both HTTP/CSV) and extending status field (HTTP) or inserting a new one (CSV) with time showing how long each server/backend is in a current state. Additionaly, down transations are also calculated and displayed for backends, so it is possible to know how many times selected backend was down, generating "No server is available to handle this request." error. New information are presentetd in two different ways: - for HTTP: a "human redable form", one of "100000d 23h", "23h 59m" or "59m 59s" - for CSV: seconds I believe that seconds resolution is enough. As there are more columns in the status page I decided to shrink some names to make more space: - Weight -> Wght - Check -> Chk - Down -> Dwn Making described changes I also made some improvements and fixed some small bugs: - don't increment s->health above 's->rise + s->fall - 1'. Previously it was incremented an then (re)set to 's->rise + s->fall - 1'. - do not set server down if it is down already - do not set server up if it is up already - fix colspan in multiple places (mostly introduced by my previous patch) - add missing "status" header to CSV - fix order of retries/redispatches in server (CSV) - s/Tthen/Then/ - s/server/backend/ in DATA_ST_PX_BE (dumpstats.c) Changes from previous version: - deal with negative time intervales - don't relay on s->state (SRV_RUNNING) - little reworked human_time + compacted format (no spaces). If needed it can be used in the future for other purposes by optionally making "cnt" as an argument - leave set_server_down mostly unchanged - only little reworked "process_chk: 9" - additional fields in CSV are appended to the rigth - fix "SEC" macro - named arguments (human_time, be_downtime, srv_downtime) Hope it is OK. If there are only cosmetic changes needed please fill free to correct it, however if there are some bigger changes required I would like to discuss it first or at last to know what exactly was changed especially since I already put this patch into my production server. :) Thank you, Best regards, Krzysztof Oledzki
2007-10-22 10:21:10 -04:00
unsigned down_trans; /* up-down transitions */
unsigned down_time; /* total time the proxy was down */
time_t last_change; /* last time, when the state was changed */
REORG/MAJOR: session: rename the "session" entity to "stream" With HTTP/2, we'll have to support multiplexed streams. A stream is in fact the largest part of what we currently call a session, it has buffers, logs, etc. In order to catch any error, this commit removes any reference to the struct session and tries to rename most "session" occurrences in function names to "stream" and "sess" to "strm" when that's related to a session. The files stream.{c,h} were added and session.{c,h} removed. The session will be reintroduced later and a few parts of the stream will progressively be moved overthere. It will more or less contain only what we need in an embryonic session. Sample fetch functions and converters will have to change a bit so that they'll use an L5 (session) instead of what's currently called "L4" which is in fact L6 for now. Once all changes are completed, we should see approximately this : L7 - http_txn L6 - stream L5 - session L4 - connection | applet There will be at most one http_txn per stream, and a same session will possibly be referenced by multiple streams. A connection will point to a session and to a stream. The session will hold all the information we need to keep even when we don't yet have a stream. Some more cleanup is needed because some code was already far from being clean. The server queue management still refers to sessions at many places while comments talk about connections. This will have to be cleaned up once we have a server-side connection pool manager. Stream flags "SN_*" still need to be renamed, it doesn't seem like any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
int (*accept)(struct stream *s); /* application layer's accept() */
struct conn_src conn_src; /* connection source settings */
REORG/MAJOR: session: rename the "session" entity to "stream" With HTTP/2, we'll have to support multiplexed streams. A stream is in fact the largest part of what we currently call a session, it has buffers, logs, etc. In order to catch any error, this commit removes any reference to the struct session and tries to rename most "session" occurrences in function names to "stream" and "sess" to "strm" when that's related to a session. The files stream.{c,h} were added and session.{c,h} removed. The session will be reintroduced later and a few parts of the stream will progressively be moved overthere. It will more or less contain only what we need in an embryonic session. Sample fetch functions and converters will have to change a bit so that they'll use an L5 (session) instead of what's currently called "L4" which is in fact L6 for now. Once all changes are completed, we should see approximately this : L7 - http_txn L6 - stream L5 - session L4 - connection | applet There will be at most one http_txn per stream, and a same session will possibly be referenced by multiple streams. A connection will point to a session and to a stream. The session will hold all the information we need to keep even when we don't yet have a stream. Some more cleanup is needed because some code was already far from being clean. The server queue management still refers to sessions at many places while comments talk about connections. This will have to be cleaned up once we have a server-side connection pool manager. Stream flags "SN_*" still need to be renamed, it doesn't seem like any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
enum obj_type *default_target; /* default target to use for accepted streams or NULL */
struct proxy *next;
unsigned int log_count; /* number of logs produced by the frontend */
struct list logsrvs;
struct list logformat; /* log_format linked list */
struct list logformat_sd; /* log_format linked list for the RFC5424 structured-data part */
struct chunk log_tag; /* override default syslog tag */
char *header_unique_id; /* unique-id header */
struct list format_unique_id; /* unique-id format */
int to_log; /* things to be logged (LW_*) */
int stop_time; /* date to stop listening, when stopping != 0 (int ticks) */
struct hdr_exp *req_exp; /* regular expressions for request headers */
struct hdr_exp *rsp_exp; /* regular expressions for response headers */
int nb_req_cap, nb_rsp_cap; /* # of headers to be captured */
struct cap_hdr *req_cap; /* chained list of request headers to be captured */
struct cap_hdr *rsp_cap; /* chained list of response headers to be captured */
REORG/MAJOR: session: rename the "session" entity to "stream" With HTTP/2, we'll have to support multiplexed streams. A stream is in fact the largest part of what we currently call a session, it has buffers, logs, etc. In order to catch any error, this commit removes any reference to the struct session and tries to rename most "session" occurrences in function names to "stream" and "sess" to "strm" when that's related to a session. The files stream.{c,h} were added and session.{c,h} removed. The session will be reintroduced later and a few parts of the stream will progressively be moved overthere. It will more or less contain only what we need in an embryonic session. Sample fetch functions and converters will have to change a bit so that they'll use an L5 (session) instead of what's currently called "L4" which is in fact L6 for now. Once all changes are completed, we should see approximately this : L7 - http_txn L6 - stream L5 - session L4 - connection | applet There will be at most one http_txn per stream, and a same session will possibly be referenced by multiple streams. A connection will point to a session and to a stream. The session will hold all the information we need to keep even when we don't yet have a stream. Some more cleanup is needed because some code was already far from being clean. The server queue management still refers to sessions at many places while comments talk about connections. This will have to be cleaned up once we have a server-side connection pool manager. Stream flags "SN_*" still need to be renamed, it doesn't seem like any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
struct pool_head *req_cap_pool, /* pools of pre-allocated char ** used to build the streams */
*rsp_cap_pool;
struct list req_add, rsp_add; /* headers to be added */
CLEANUP: counters: move from 3 types to 2 types We used to have 3 types of counters with a huge overlap : - listener counters : stats collected for each bind line - proxy counters : union of the frontend and backend counters - server counters : stats collected per server It happens that quite a good part was common between listeners and proxies due to the frontend counters being updated at the two locations, and that similarly the server and proxy counters were overlapping and being updated together. This patch cleans this up to propose only two types of counters : - fe_counters: used by frontends and listeners, related to incoming connections activity - be_counters: used by backends and servers, related to outgoing connections activity This allowed to remove some non-sensical counters from both parts. For frontends, the following entries were removed : cum_lbconn, last_sess, nbpend_max, failed_conns, failed_resp, retries, redispatches, q_time, c_time, d_time, t_time For backends, this ones was removed : intercepted_req. While doing this it was discovered that we used to incorrectly report intercepted_req for backends in the HTML stats, which was always zero since it's never updated. Also it revealed a few inconsistencies (which were not fixed as they are harmless). For example, backends count connections (cum_conn) instead of sessions while servers count sessions and not connections. Over the long term, some extra cleanups may be performed by having some counters update functions touching both the server and backend at the same time, as well as both the frontend and listener, to ensure that all sides have all their stats properly filled. The stats dump will also be able to factor the dump functions by counter types.
2016-11-25 08:44:52 -05:00
struct be_counters be_counters; /* backend statistics counters */
struct fe_counters fe_counters; /* frontend statistics counters */
struct list listener_queue; /* list of the temporarily limited listeners because of lack of a proxy resource */
REORG/MAJOR: session: rename the "session" entity to "stream" With HTTP/2, we'll have to support multiplexed streams. A stream is in fact the largest part of what we currently call a session, it has buffers, logs, etc. In order to catch any error, this commit removes any reference to the struct session and tries to rename most "session" occurrences in function names to "stream" and "sess" to "strm" when that's related to a session. The files stream.{c,h} were added and session.{c,h} removed. The session will be reintroduced later and a few parts of the stream will progressively be moved overthere. It will more or less contain only what we need in an embryonic session. Sample fetch functions and converters will have to change a bit so that they'll use an L5 (session) instead of what's currently called "L4" which is in fact L6 for now. Once all changes are completed, we should see approximately this : L7 - http_txn L6 - stream L5 - session L4 - connection | applet There will be at most one http_txn per stream, and a same session will possibly be referenced by multiple streams. A connection will point to a session and to a stream. The session will hold all the information we need to keep even when we don't yet have a stream. Some more cleanup is needed because some code was already far from being clean. The server queue management still refers to sessions at many places while comments talk about connections. This will have to be cleaned up once we have a server-side connection pool manager. Stream flags "SN_*" still need to be renamed, it doesn't seem like any of them will need to move to the session.
2015-04-02 18:22:06 -04:00
struct stktable table; /* table for storing sticking streams */
struct task *task; /* the associated task, mandatory to manage rate limiting, stopping and resource shortage, NULL if disabled */
struct list tcpcheck_rules; /* tcp-check send / expect rules */
int grace; /* grace time after stop request */
int check_len; /* Length of the HTTP or SSL3 request */
char *check_req; /* HTTP or SSL request to use for PR_O_HTTP_CHK|PR_O_SSL3_CHK */
char *check_command; /* Command to use for external agent checks */
char *check_path; /* PATH environment to use for external agent checks */
char *expect_str; /* http-check expected content : string or text version of the regex */
struct my_regex *expect_regex; /* http-check expected content */
struct chunk errmsg[HTTP_ERR_SIZE]; /* default or customized error messages for known errors */
int uuid; /* universally unique proxy ID, used for SNMP */
unsigned int backlog; /* force the frontend's listen backlog */
unsigned long bind_proc; /* bitmask of processes using this proxy */
/* warning: these structs are huge, keep them at the bottom */
struct sockaddr_storage dispatch_addr; /* the default address to connect to */
struct error_snapshot invalid_req, invalid_rep; /* captures of last errors */
/* used only during configuration parsing */
int no_options; /* PR_O_REDISP, PR_O_TRANSP, ... */
int no_options2; /* PR_O2_* */
struct {
char *file; /* file where the section appears */
int line; /* line where the section appears */
struct eb32_node id; /* place in the tree of used IDs */
struct eb_root used_listener_id;/* list of listener IDs in use */
struct eb_root used_server_id; /* list of server IDs in use */
struct list bind; /* list of bind settings */
struct list listeners; /* list of listeners belonging to this frontend */
MAJOR: sample: maintain a per-proxy list of the fetch args to resolve While ACL args were resolved after all the config was parsed, it was not the case with sample fetch args because they're almost everywhere now. The issue is that ACLs now solely rely on sample fetches, so their args resolving doesn't work anymore. And many fetches involving a server, a proxy or a userlist don't work at all. The real issue is that at the bottom layers we have no information about proxies, line numbers, even ACLs in order to report understandable errors, and that at the top layers we have no visibility over the locations where fetches are referenced (think log node). After failing multiple unsatisfying solutions attempts, we now have a new concept of args list. The principle is that every proxy has a list head which contains a number of indications such as the config keyword, the context where it's used, the file and line number, etc... and a list of arguments. This list head is of the same type as the elements, so it serves as a template for adding new elements. This way, it is filled from top to bottom by the callers with the information they have (eg: line numbers, ACL name, ...) and the lower layers just have to duplicate it and add an element when they face an argument they cannot resolve yet. Then at the end of the configuration parsing, a loop passes over each proxy's list and resolves all the args in sequence. And this way there is all necessary information to report verbose errors. The first immediate benefit is that for the first time we got very precise location of issues (arg number in a keyword in its context, ...). Second, in order to do this we had to parse log-format and unique-id-format a bit earlier, so that was a great opportunity for doing so when the directives are encountered (unless it's a default section). This way, the recorded line numbers for these args are the ones of the place where the log format is declared, not the end of the file. Userlists report slightly more information now. They're the only remaining ones in the ACL resolving function.
2013-04-02 10:34:32 -04:00
struct arg_list args; /* sample arg list that need to be resolved */
struct ebpt_node by_name; /* proxies are stored sorted by name here */
char *logformat_string; /* log format string */
char *lfs_file; /* file name where the logformat string appears (strdup) */
int lfs_line; /* file name where the logformat string appears */
int uif_line; /* file name where the unique-id-format string appears */
char *uif_file; /* file name where the unique-id-format string appears (strdup) */
char *uniqueid_format_string; /* unique-id format string */
char *logformat_sd_string; /* log format string for the RFC5424 structured-data part */
char *lfsd_file; /* file name where the structured-data logformat string for RFC5424 appears (strdup) */
int lfsd_line; /* file name where the structured-data logformat string for RFC5424 appears */
} conf; /* config information */
void *parent; /* parent of the proxy when applicable */
MEDIUM: HTTP compression (zlib library support) This commit introduces HTTP compression using the zlib library. http_response_forward_body has been modified to call the compression functions. This feature includes 3 algorithms: identity, gzip and deflate: * identity: this is mostly for debugging, and it was useful for developping the compression feature. With Content-Length in input, it is making each chunk with the data available in the current buffer. With chunks in input, it is rechunking, the output chunks will be bigger or smaller depending of the size of the input chunk and the size of the buffer. Identity does not apply any change on data. * gzip: same as identity, but applying a gzip compression. The data are deflated using the Z_NO_FLUSH flag in zlib. When there is no more data in the input buffer, it flushes the data in the output buffer (Z_SYNC_FLUSH). At the end of data, when it receives the last chunk in input, or when there is no more data to read, it writes the end of data with Z_FINISH and the ending chunk. * deflate: same as gzip, but with deflate algorithm and zlib format. Note that this algorithm has ambiguous support on many browsers and no support at all from recent ones. It is strongly recommended not to use it for anything else than experimentation. You can't choose the compression ratio at the moment, it will be set to Z_BEST_SPEED (1), as tests have shown very little benefit in terms of compression ration when going above for HTML contents, at the cost of a massive CPU impact. Compression will be activated depending of the Accept-Encoding request header. With identity, it does not take care of that header. To build HAProxy with zlib support, use USE_ZLIB=1 in the make parameters. This work was initially started by David Du Colombier at Exceliance.
2012-10-23 04:25:10 -04:00
struct comp *comp; /* http compression */
struct {
union {
struct mailers *m; /* Mailer to send email alerts via */
char *name;
} mailers;
char *from; /* Address to send email alerts from */
char *to; /* Address(es) to send email alerts to */
char *myhostname; /* Identity to use in HELO command sent to mailer */
int level; /* Maximum syslog level of messages to send
* email alerts for */
int set; /* True if email_alert settings are present */
struct email_alertq *queues; /* per-mailer alerts queues */
} email_alert;
int load_server_state_from_file; /* location of the file containing server state.
* flag PR_SRV_STATE_FILE_* */
char *server_state_file_name; /* used when load_server_state_from_file is set to
* PR_SRV_STATE_FILE_LOCAL. Give a specific file name for
* this backend. If not specified or void, then the backend
* name is used
*/
struct list filter_configs; /* list of the filters that are declared on this proxy */
};
struct switching_rule {
struct list list; /* list linked to from the proxy */
struct acl_cond *cond; /* acl condition to meet */
MEDIUM: proxy: support use_backend with dynamic names We have a use case where we look up a customer ID in an HTTP header and direct it to the corresponding server. This can easily be done using ACLs and use_backend rules, but the configuration becomes painful to maintain when the number of customers grows to a few tens or even a several hundreds. We realized it would be nice if we could make the use_backend resolve its name at run time instead of config parsing time, and use a similar expression as http-request add-header to decide on the proper backend to use. This permits the use of prefixes or even complex names in backend expressions. If no name matches, then the default backend is used. Doing so allowed us to get rid of all the use_backend rules. Since there are some config checks on the use_backend rules to see if the referenced backend exists, we want to keep them to detect config errors in normal config. So this patch does not modify the default behaviour and proceeds this way : - if the backend name in the use_backend directive parses as a log format rule, it's used as-is and is resolved at run time ; - otherwise it's a static name which must be valid at config time. There was the possibility of doing this with the use-server directive instead of use_backend, but it seems like use_backend is more suited to this task, as it can be used for other purposes. For example, it becomes easy to serve a customer-specific proxy.pac file based on the customer ID by abusing the errorfile primitive : use_backend bk_cust_%[hdr(X-Cust-Id)] if { hdr(X-Cust-Id) -m found } default_backend bk_err_404 backend bk_cust_1 errorfile 200 /etc/haproxy/static/proxy.pac.cust1 Signed-off-by: Bertrand Jacquin <bjacquin@exosec.fr>
2013-11-19 05:43:06 -05:00
int dynamic; /* this is a dynamic rule using the logformat expression */
union {
struct proxy *backend; /* target backend */
char *name; /* target backend name during config parsing */
MEDIUM: proxy: support use_backend with dynamic names We have a use case where we look up a customer ID in an HTTP header and direct it to the corresponding server. This can easily be done using ACLs and use_backend rules, but the configuration becomes painful to maintain when the number of customers grows to a few tens or even a several hundreds. We realized it would be nice if we could make the use_backend resolve its name at run time instead of config parsing time, and use a similar expression as http-request add-header to decide on the proper backend to use. This permits the use of prefixes or even complex names in backend expressions. If no name matches, then the default backend is used. Doing so allowed us to get rid of all the use_backend rules. Since there are some config checks on the use_backend rules to see if the referenced backend exists, we want to keep them to detect config errors in normal config. So this patch does not modify the default behaviour and proceeds this way : - if the backend name in the use_backend directive parses as a log format rule, it's used as-is and is resolved at run time ; - otherwise it's a static name which must be valid at config time. There was the possibility of doing this with the use-server directive instead of use_backend, but it seems like use_backend is more suited to this task, as it can be used for other purposes. For example, it becomes easy to serve a customer-specific proxy.pac file based on the customer ID by abusing the errorfile primitive : use_backend bk_cust_%[hdr(X-Cust-Id)] if { hdr(X-Cust-Id) -m found } default_backend bk_err_404 backend bk_cust_1 errorfile 200 /etc/haproxy/static/proxy.pac.cust1 Signed-off-by: Bertrand Jacquin <bjacquin@exosec.fr>
2013-11-19 05:43:06 -05:00
struct list expr; /* logformat expression to use for dynamic rules */
} be;
char *file;
int line;
};
struct server_rule {
struct list list; /* list linked to from the proxy */
struct acl_cond *cond; /* acl condition to meet */
union {
struct server *ptr; /* target server */
char *name; /* target server name during config parsing */
} srv;
};
struct persist_rule {
struct list list; /* list linked to from the proxy */
struct acl_cond *cond; /* acl condition to meet */
int type;
};
struct sticking_rule {
struct list list; /* list linked to from the proxy */
struct acl_cond *cond; /* acl condition to meet */
struct sample_expr *expr; /* fetch expr to fetch key */
int flags; /* STK_* */
union {
struct stktable *t; /* target table */
char *name; /* target table name during config parsing */
} table;
};
struct redirect_rule {
struct list list; /* list linked to from the proxy */
struct acl_cond *cond; /* acl condition to meet */
int type;
int rdr_len;
char *rdr_str;
struct list rdr_fmt;
int code;
unsigned int flags;
int cookie_len;
char *cookie_str;
};
#endif /* _TYPES_PROXY_H */
/*
* Local variables:
* c-indent-level: 8
* c-basic-offset: 8
* End:
*/