2006-06-25 20:48:02 -04:00
|
|
|
/*
|
2010-06-01 11:45:26 -04:00
|
|
|
* Session management functions.
|
2006-06-25 20:48:02 -04:00
|
|
|
*
|
2012-04-19 13:28:33 -04:00
|
|
|
* Copyright 2000-2012 Willy Tarreau <w@1wt.eu>
|
2006-06-25 20:48:02 -04:00
|
|
|
*
|
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
|
* 2 of the License, or (at your option) any later version.
|
|
|
|
|
*
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
#include <stdlib.h>
|
2010-06-01 11:45:26 -04:00
|
|
|
#include <unistd.h>
|
|
|
|
|
#include <fcntl.h>
|
2006-06-29 12:54:54 -04:00
|
|
|
|
|
|
|
|
#include <common/config.h>
|
2012-10-12 17:49:43 -04:00
|
|
|
#include <common/buffer.h>
|
[BUG] fix the dequeuing logic to ensure that all requests get served
The dequeuing logic was completely wrong. First, a task was assigned
to all servers to process the queue, but this task was never scheduled
and was only woken up on session free. Second, there was no reservation
of server entries when a task was assigned a server. This means that
as long as the task was not connected to the server, its presence was
not accounted for. This was causing trouble when detecting whether or
not a server had reached maxconn. Third, during a redispatch, a session
could lose its place at the server's and get blocked because another
session at the same moment would have stolen the entry. Fourth, the
redispatch option did not work when maxqueue was reached for a server,
and it was not possible to do so without indefinitely hanging a session.
The root cause of all those problems was the lack of pre-reservation of
connections at the server's, and the lack of tracking of servers during
a redispatch. Everything relied on combinations of flags which could
appear similarly in quite distinct situations.
This patch is a major rework but there was no other solution, as the
internal logic was deeply flawed. The resulting code is cleaner, more
understandable, uses less magics and is overall more robust.
As an added bonus, "option redispatch" now works when maxqueue has
been reached on a server.
2008-06-20 09:04:11 -04:00
|
|
|
#include <common/debug.h>
|
2006-06-29 11:53:05 -04:00
|
|
|
#include <common/memory.h>
|
2006-06-25 20:48:02 -04:00
|
|
|
|
|
|
|
|
#include <types/capture.h>
|
2008-11-30 12:47:21 -05:00
|
|
|
#include <types/global.h>
|
2006-06-25 20:48:02 -04:00
|
|
|
|
2009-07-07 09:10:31 -04:00
|
|
|
#include <proto/acl.h>
|
2012-04-19 12:42:05 -04:00
|
|
|
#include <proto/arg.h>
|
2008-11-30 12:47:21 -05:00
|
|
|
#include <proto/backend.h>
|
2012-08-24 13:22:53 -04:00
|
|
|
#include <proto/channel.h>
|
2009-12-15 16:31:24 -05:00
|
|
|
#include <proto/checks.h>
|
2012-07-06 08:29:45 -04:00
|
|
|
#include <proto/connection.h>
|
2009-08-16 13:06:42 -04:00
|
|
|
#include <proto/dumpstats.h>
|
2012-09-02 16:34:23 -04:00
|
|
|
#include <proto/fd.h>
|
2010-06-20 05:19:22 -04:00
|
|
|
#include <proto/freq_ctr.h>
|
2010-10-15 17:25:20 -04:00
|
|
|
#include <proto/frontend.h>
|
2007-01-21 13:16:41 -05:00
|
|
|
#include <proto/hdr_idx.h>
|
2012-09-12 16:58:11 -04:00
|
|
|
#include <proto/listener.h>
|
2007-05-13 15:36:56 -04:00
|
|
|
#include <proto/log.h>
|
2012-09-06 05:32:07 -04:00
|
|
|
#include <proto/raw_sock.h>
|
2006-06-25 20:48:02 -04:00
|
|
|
#include <proto/session.h>
|
2009-01-25 07:56:13 -05:00
|
|
|
#include <proto/pipe.h>
|
2008-11-30 12:47:21 -05:00
|
|
|
#include <proto/proto_http.h>
|
|
|
|
|
#include <proto/proto_tcp.h>
|
2009-07-07 09:10:31 -04:00
|
|
|
#include <proto/proxy.h>
|
2006-06-25 20:48:02 -04:00
|
|
|
#include <proto/queue.h>
|
2009-03-05 12:43:00 -05:00
|
|
|
#include <proto/server.h>
|
2012-04-27 15:52:18 -04:00
|
|
|
#include <proto/sample.h>
|
2010-01-04 09:47:17 -05:00
|
|
|
#include <proto/stick_table.h>
|
2008-11-30 12:47:21 -05:00
|
|
|
#include <proto/stream_interface.h>
|
|
|
|
|
#include <proto/task.h>
|
2006-06-25 20:48:02 -04:00
|
|
|
|
2007-05-13 13:43:47 -04:00
|
|
|
struct pool_head *pool2_session;
|
2008-11-23 13:53:55 -05:00
|
|
|
struct list sessions;
|
2006-06-25 20:48:02 -04:00
|
|
|
|
MAJOR: session: implement a wait-queue for sessions who need a buffer
When a session_alloc_buffers() fails to allocate one or two buffers,
it subscribes the session to buffer_wq, and waits for another session
to release buffers. It's then removed from the queue and woken up with
TASK_WAKE_RES, and can attempt its allocation again.
We decide to try to wake as many waiters as we release buffers so
that if we release 2 and two waiters need only once, they both have
their chance. We must never come to the situation where we don't wake
enough tasks up.
It's common to release buffers after the completion of an I/O callback,
which can happen even if the I/O could not be performed due to half a
failure on memory allocation. In this situation, we don't want to move
out of the wait queue the session that was just added, otherwise it
will never get any buffer. Thus, we only force ourselves out of the
queue when freeing the session.
Note: at the moment, since session_alloc_buffers() is not used, no task
is subscribed to the wait queue.
2014-11-25 15:10:35 -05:00
|
|
|
/* list of sessions waiting for at least one buffer */
|
|
|
|
|
struct list buffer_wq = LIST_HEAD_INIT(buffer_wq);
|
|
|
|
|
|
2012-10-02 19:39:48 -04:00
|
|
|
static int conn_session_complete(struct connection *conn);
|
2012-10-03 15:17:23 -04:00
|
|
|
static int conn_session_update(struct connection *conn);
|
2012-08-31 10:01:23 -04:00
|
|
|
static struct task *expire_mini_session(struct task *t);
|
|
|
|
|
int session_complete(struct session *s);
|
|
|
|
|
|
2012-10-02 15:21:20 -04:00
|
|
|
/* data layer callbacks for an embryonic session */
|
|
|
|
|
struct data_cb sess_conn_cb = {
|
|
|
|
|
.recv = NULL,
|
|
|
|
|
.send = NULL,
|
2012-10-03 15:17:23 -04:00
|
|
|
.wake = conn_session_update,
|
2012-10-02 19:39:48 -04:00
|
|
|
.init = conn_session_complete,
|
2012-10-02 15:21:20 -04:00
|
|
|
};
|
|
|
|
|
|
2012-08-31 10:01:23 -04:00
|
|
|
/* This function is called from the protocol layer accept() in order to
|
|
|
|
|
* instanciate a new embryonic session on behalf of a given listener and
|
|
|
|
|
* frontend. It returns a positive value upon success, 0 if the connection
|
|
|
|
|
* can be ignored, or a negative value upon critical failure. The accepted
|
|
|
|
|
* file descriptor is closed if we return <= 0.
|
2010-06-01 11:45:26 -04:00
|
|
|
*/
|
|
|
|
|
int session_accept(struct listener *l, int cfd, struct sockaddr_storage *addr)
|
|
|
|
|
{
|
2013-10-01 04:45:07 -04:00
|
|
|
struct connection *cli_conn;
|
2010-06-01 11:45:26 -04:00
|
|
|
struct proxy *p = l->frontend;
|
|
|
|
|
struct session *s;
|
|
|
|
|
struct task *t;
|
2010-11-11 04:56:04 -05:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ret = -1; /* assume unrecoverable error by default */
|
2010-06-01 11:45:26 -04:00
|
|
|
|
2013-10-11 13:34:20 -04:00
|
|
|
if (unlikely((cli_conn = conn_new()) == NULL))
|
2010-06-01 11:45:26 -04:00
|
|
|
goto out_close;
|
|
|
|
|
|
2013-10-11 13:34:20 -04:00
|
|
|
conn_prepare(cli_conn, l->proto, l->xprt);
|
2012-10-26 14:10:28 -04:00
|
|
|
|
2013-10-11 13:34:20 -04:00
|
|
|
cli_conn->t.sock.fd = cfd;
|
|
|
|
|
cli_conn->addr.from = *addr;
|
|
|
|
|
cli_conn->flags |= CO_FL_ADDR_FROM_SET;
|
|
|
|
|
cli_conn->target = &l->obj_type;
|
2014-11-17 09:11:45 -05:00
|
|
|
cli_conn->proxy_netns = l->netns;
|
2013-10-11 13:34:20 -04:00
|
|
|
|
|
|
|
|
if (unlikely((s = pool_alloc2(pool2_session)) == NULL))
|
|
|
|
|
goto out_free_conn;
|
2012-10-26 14:10:28 -04:00
|
|
|
|
2012-08-31 10:01:23 -04:00
|
|
|
/* minimum session initialization required for an embryonic session is
|
|
|
|
|
* fairly low. We need very little to execute L4 ACLs, then we need a
|
|
|
|
|
* task to make the client-side connection live on its own.
|
|
|
|
|
* - flags
|
|
|
|
|
* - stick-entry tracking
|
|
|
|
|
*/
|
2010-06-01 11:45:26 -04:00
|
|
|
s->flags = 0;
|
|
|
|
|
s->logs.logwait = p->to_log;
|
2013-06-11 11:45:46 -04:00
|
|
|
s->logs.level = 0;
|
2013-05-28 11:40:25 -04:00
|
|
|
|
|
|
|
|
memset(s->stkctr, 0, sizeof(s->stkctr));
|
2010-06-01 11:45:26 -04:00
|
|
|
|
2012-08-31 10:01:23 -04:00
|
|
|
s->listener = l;
|
|
|
|
|
s->fe = p;
|
2010-11-11 04:56:04 -05:00
|
|
|
|
2013-10-14 15:32:07 -04:00
|
|
|
/* On a mini-session, the connection is directly attached to the
|
|
|
|
|
* session's target so that we don't need to initialize the stream
|
|
|
|
|
* interfaces. Another benefit is that it's easy to detect a mini-
|
|
|
|
|
* session in dumps using this : it's the only one which has a
|
|
|
|
|
* connection in s->target.
|
|
|
|
|
*/
|
|
|
|
|
s->target = &cli_conn->obj_type;
|
|
|
|
|
|
2010-06-01 11:45:26 -04:00
|
|
|
s->logs.accept_date = date; /* user-visible date for logging */
|
|
|
|
|
s->logs.tv_accept = now; /* corrected date for internal use */
|
2014-01-25 05:01:50 -05:00
|
|
|
s->uniq_id = global.req_count++;
|
2012-08-31 10:01:23 -04:00
|
|
|
p->feconn++;
|
|
|
|
|
/* This session was accepted, count it now */
|
|
|
|
|
if (p->feconn > p->fe_counters.conn_max)
|
|
|
|
|
p->fe_counters.conn_max = p->feconn;
|
2010-06-01 11:45:26 -04:00
|
|
|
|
2012-08-31 10:01:23 -04:00
|
|
|
proxy_inc_fe_conn_ctr(l, p);
|
2010-06-01 11:45:26 -04:00
|
|
|
|
2013-12-15 20:16:50 -05:00
|
|
|
/* Add the minimum callbacks to prepare the connection's control layer.
|
|
|
|
|
* We need this so that we can safely execute the ACLs used by the
|
|
|
|
|
* "tcp-request connection" ruleset. We also carefully attach the
|
|
|
|
|
* connection to the stream interface without initializing the rest,
|
|
|
|
|
* so that ACLs can use si[0]->end.
|
|
|
|
|
*/
|
|
|
|
|
si_attach_conn(&s->si[0], cli_conn);
|
|
|
|
|
conn_attach(cli_conn, s, &sess_conn_cb);
|
|
|
|
|
conn_ctrl_init(cli_conn);
|
|
|
|
|
|
2013-12-16 04:12:54 -05:00
|
|
|
/* now evaluate the tcp-request layer4 rules. Since we expect to be able
|
|
|
|
|
* to abort right here as soon as possible, we check the rules before
|
|
|
|
|
* even initializing the stream interfaces.
|
|
|
|
|
*/
|
2010-06-01 11:45:26 -04:00
|
|
|
if ((l->options & LI_O_TCP_RULES) && !tcp_exec_req_rules(s)) {
|
|
|
|
|
/* let's do a no-linger now to close with a single RST. */
|
|
|
|
|
setsockopt(cfd, SOL_SOCKET, SO_LINGER, (struct linger *) &nolinger, sizeof(struct linger));
|
2010-11-11 04:56:04 -05:00
|
|
|
ret = 0; /* successful termination */
|
2012-08-31 10:01:23 -04:00
|
|
|
goto out_free_session;
|
|
|
|
|
}
|
|
|
|
|
|
MEDIUM: monitor: simplify handling of monitor-net and mode health
We were having several different behaviours with monitor-net and
"mode health" :
- monitor-net on TCP connections was evaluated just after accept(),
did not count a connection on the frontend and were not subject
to tcp-request connection rules, and caused an immediate close().
- monitor-net in HTTP mode was evaluated once the session was
accepted (eg: on top of SSL), returned "HTTP/1.0 200 OK\r\n\r\n"
over the connection's data layer and instanciated a session which
was responsible for closing this connection. A connection AND a
session were counted for the frontend ;
- "mode health" with "option httpchk" would do exactly the same as
monitor-net in HTTP mode ;
- "mode health" without "option httpchk" would do the same as above
except that "OK" was returned instead of "HTTP/1.0 200 OK\r\n\r\n".
None of them took care of cleaning the input buffer, sometimes resulting
in a TCP reset to be emitted after the last packet if a request was received
over the connection.
Given the inconsistencies and the complexity in keeping all these features
handled at the right position, we now slightly changed the way they are
handled :
- all of them are handled just after the "tcp-request connection" rules,
so that all of them may be blocked using such rules, offering more
flexibility and consistency ;
- no connection handshake is performed anymore for non-TCP modes
- all of them send the response as raw data over the socket, there is no
more difference between TCP and HTTP mode for example (these rules were
never meant to be served over SSL connections and were never documented
as able to do that).
- any possible pending data on the incoming socket is drained before the
response is sent, in order to avoid the risk of a reset.
- none of them exactly did what was documented !
This results in more consistent, more flexible and more accurate handling of
monitor rules, with smaller and more robust code.
2012-09-27 17:48:56 -04:00
|
|
|
/* monitor-net and health mode are processed immediately after TCP
|
|
|
|
|
* connection rules. This way it's possible to block them, but they
|
|
|
|
|
* never use the lower data layers, they send directly over the socket,
|
|
|
|
|
* as they were designed for. We first flush the socket receive buffer
|
|
|
|
|
* in order to avoid emission of an RST by the system. We ignore any
|
|
|
|
|
* error.
|
|
|
|
|
*/
|
|
|
|
|
if (unlikely((p->mode == PR_MODE_HEALTH) ||
|
|
|
|
|
((l->options & LI_O_CHK_MONNET) &&
|
|
|
|
|
addr->ss_family == AF_INET &&
|
|
|
|
|
(((struct sockaddr_in *)addr)->sin_addr.s_addr & p->mon_mask.s_addr) == p->mon_net.s_addr))) {
|
|
|
|
|
/* we have 4 possibilities here :
|
|
|
|
|
* - HTTP mode, from monitoring address => send "HTTP/1.0 200 OK"
|
|
|
|
|
* - HEALTH mode with HTTP check => send "HTTP/1.0 200 OK"
|
|
|
|
|
* - HEALTH mode without HTTP check => just send "OK"
|
|
|
|
|
* - TCP mode from monitoring address => just close
|
|
|
|
|
*/
|
MEDIUM: protocol: implement a "drain" function in protocol layers
Since commit cfd97c6f was merged into 1.5-dev14 (BUG/MEDIUM: checks:
prevent TIME_WAITs from appearing also on timeouts), some valid health
checks sometimes used to show some TCP resets. For example, this HTTP
health check sent to a local server :
19:55:15.742818 IP 127.0.0.1.16568 > 127.0.0.1.8000: S 3355859679:3355859679(0) win 32792 <mss 16396,nop,nop,sackOK,nop,wscale 7>
19:55:15.742841 IP 127.0.0.1.8000 > 127.0.0.1.16568: S 1060952566:1060952566(0) ack 3355859680 win 32792 <mss 16396,nop,nop,sackOK,nop,wscale 7>
19:55:15.742863 IP 127.0.0.1.16568 > 127.0.0.1.8000: . ack 1 win 257
19:55:15.745402 IP 127.0.0.1.16568 > 127.0.0.1.8000: P 1:23(22) ack 1 win 257
19:55:15.745488 IP 127.0.0.1.8000 > 127.0.0.1.16568: FP 1:146(145) ack 23 win 257
19:55:15.747109 IP 127.0.0.1.16568 > 127.0.0.1.8000: R 23:23(0) ack 147 win 257
After some discussion with Chris Huang-Leaver, it appeared clear that
what we want is to only send the RST when we have no other choice, which
means when the server has not closed. So we still keep SYN/SYN-ACK/RST
for pure TCP checks, but don't want to see an RST emitted as above when
the server has already sent the FIN.
The solution against this consists in implementing a "drain" function at
the protocol layer, which, when defined, causes as much as possible of
the input socket buffer to be flushed to make recv() return zero so that
we know that the server's FIN was received and ACKed. On Linux, we can make
use of MSG_TRUNC on TCP sockets, which has the benefit of draining everything
at once without even copying data. On other platforms, we read up to one
buffer of data before the close. If recv() manages to get the final zero,
we don't disable lingering. Same for hard errors. Otherwise we do.
In practice, on HTTP health checks we generally find that the close was
pending and is returned upon first recv() call. The network trace becomes
cleaner :
19:55:23.650621 IP 127.0.0.1.16561 > 127.0.0.1.8000: S 3982804816:3982804816(0) win 32792 <mss 16396,nop,nop,sackOK,nop,wscale 7>
19:55:23.650644 IP 127.0.0.1.8000 > 127.0.0.1.16561: S 4082139313:4082139313(0) ack 3982804817 win 32792 <mss 16396,nop,nop,sackOK,nop,wscale 7>
19:55:23.650666 IP 127.0.0.1.16561 > 127.0.0.1.8000: . ack 1 win 257
19:55:23.651615 IP 127.0.0.1.16561 > 127.0.0.1.8000: P 1:23(22) ack 1 win 257
19:55:23.651696 IP 127.0.0.1.8000 > 127.0.0.1.16561: FP 1:146(145) ack 23 win 257
19:55:23.652628 IP 127.0.0.1.16561 > 127.0.0.1.8000: F 23:23(0) ack 147 win 257
19:55:23.652655 IP 127.0.0.1.8000 > 127.0.0.1.16561: . ack 24 win 257
This change should be backported to 1.4 which is where Chris encountered
this issue. The code is different, so probably the tcp_drain() function
will have to be put in the checks only.
2013-06-10 13:56:38 -04:00
|
|
|
if (l->proto->drain)
|
|
|
|
|
l->proto->drain(cfd);
|
MEDIUM: monitor: simplify handling of monitor-net and mode health
We were having several different behaviours with monitor-net and
"mode health" :
- monitor-net on TCP connections was evaluated just after accept(),
did not count a connection on the frontend and were not subject
to tcp-request connection rules, and caused an immediate close().
- monitor-net in HTTP mode was evaluated once the session was
accepted (eg: on top of SSL), returned "HTTP/1.0 200 OK\r\n\r\n"
over the connection's data layer and instanciated a session which
was responsible for closing this connection. A connection AND a
session were counted for the frontend ;
- "mode health" with "option httpchk" would do exactly the same as
monitor-net in HTTP mode ;
- "mode health" without "option httpchk" would do the same as above
except that "OK" was returned instead of "HTTP/1.0 200 OK\r\n\r\n".
None of them took care of cleaning the input buffer, sometimes resulting
in a TCP reset to be emitted after the last packet if a request was received
over the connection.
Given the inconsistencies and the complexity in keeping all these features
handled at the right position, we now slightly changed the way they are
handled :
- all of them are handled just after the "tcp-request connection" rules,
so that all of them may be blocked using such rules, offering more
flexibility and consistency ;
- no connection handshake is performed anymore for non-TCP modes
- all of them send the response as raw data over the socket, there is no
more difference between TCP and HTTP mode for example (these rules were
never meant to be served over SSL connections and were never documented
as able to do that).
- any possible pending data on the incoming socket is drained before the
response is sent, in order to avoid the risk of a reset.
- none of them exactly did what was documented !
This results in more consistent, more flexible and more accurate handling of
monitor rules, with smaller and more robust code.
2012-09-27 17:48:56 -04:00
|
|
|
if (p->mode == PR_MODE_HTTP ||
|
|
|
|
|
(p->mode == PR_MODE_HEALTH && (p->options2 & PR_O2_CHK_ANY) == PR_O2_HTTP_CHK))
|
|
|
|
|
send(cfd, "HTTP/1.0 200 OK\r\n\r\n", 19, MSG_DONTWAIT|MSG_NOSIGNAL|MSG_MORE);
|
|
|
|
|
else if (p->mode == PR_MODE_HEALTH)
|
|
|
|
|
send(cfd, "OK\n", 3, MSG_DONTWAIT|MSG_NOSIGNAL|MSG_MORE);
|
|
|
|
|
ret = 0;
|
|
|
|
|
goto out_free_session;
|
|
|
|
|
}
|
|
|
|
|
|
2012-08-31 11:43:29 -04:00
|
|
|
/* wait for a PROXY protocol header */
|
|
|
|
|
if (l->options & LI_O_ACC_PROXY) {
|
2013-10-01 04:45:07 -04:00
|
|
|
cli_conn->flags |= CO_FL_ACCEPT_PROXY;
|
|
|
|
|
conn_sock_want_recv(cli_conn);
|
2012-08-31 11:43:29 -04:00
|
|
|
}
|
|
|
|
|
|
2012-08-31 10:01:23 -04:00
|
|
|
if (unlikely((t = task_new()) == NULL))
|
|
|
|
|
goto out_free_session;
|
|
|
|
|
|
|
|
|
|
t->context = s;
|
|
|
|
|
t->nice = l->nice;
|
|
|
|
|
s->task = t;
|
|
|
|
|
|
2013-12-15 20:16:50 -05:00
|
|
|
/* Finish setting the callbacks. Right now the transport layer is present
|
REORG: connection: rename the data layer the "transport layer"
While working on the changes required to make the health checks use the
new connections, it started to become obvious that some naming was not
logical at all in the connections. Specifically, it is not logical to
call the "data layer" the layer which is in charge for all the handshake
and which does not yet provide a data layer once established until a
session has allocated all the required buffers.
In fact, it's more a transport layer, which makes much more sense. The
transport layer offers a medium on which data can transit, and it offers
the functions to move these data when the upper layer requests this. And
it is the upper layer which iterates over the transport layer's functions
to move data which should be called the data layer.
The use case where it's obvious is with embryonic sessions : an incoming
SSL connection is accepted. Only the connection is allocated, not the
buffers nor stream interface, etc... The connection handles the SSL
handshake by itself. Once this handshake is complete, we can't use the
data functions because the buffers and stream interface are not there
yet. Hence we have to first call a specific function to complete the
session initialization, after which we'll be able to use the data
functions. This clearly proves that SSL here is only a transport layer
and that the stream interface constitutes the data layer.
A similar change will be performed to rename app_cb => data, but the
two could not be in the same commit for obvious reasons.
2012-10-02 18:19:48 -04:00
|
|
|
* but not initialized. Also note we need to be careful as the stream
|
|
|
|
|
* int is not initialized yet.
|
2012-08-31 10:01:23 -04:00
|
|
|
*/
|
2013-10-01 04:45:07 -04:00
|
|
|
conn_data_want_recv(cli_conn);
|
|
|
|
|
if (conn_xprt_init(cli_conn) < 0)
|
2010-11-11 04:56:04 -05:00
|
|
|
goto out_free_task;
|
2012-08-31 10:01:23 -04:00
|
|
|
|
|
|
|
|
/* OK, now either we have a pending handshake to execute with and
|
|
|
|
|
* then we must return to the I/O layer, or we can proceed with the
|
|
|
|
|
* end of the session initialization. In case of handshake, we also
|
|
|
|
|
* set the I/O timeout to the frontend's client timeout.
|
|
|
|
|
*/
|
|
|
|
|
|
2013-10-01 04:45:07 -04:00
|
|
|
if (cli_conn->flags & CO_FL_HANDSHAKE) {
|
2012-08-31 10:01:23 -04:00
|
|
|
t->process = expire_mini_session;
|
|
|
|
|
t->expire = tick_add_ifset(now_ms, p->timeout.client);
|
|
|
|
|
task_queue(t);
|
2013-10-01 04:45:07 -04:00
|
|
|
cli_conn->flags |= CO_FL_INIT_DATA | CO_FL_WAKE_DATA;
|
2012-08-31 10:01:23 -04:00
|
|
|
return 1;
|
2010-06-01 11:45:26 -04:00
|
|
|
}
|
|
|
|
|
|
2012-11-05 18:14:25 -05:00
|
|
|
/* OK let's complete session initialization since there is no handshake */
|
2013-10-01 04:45:07 -04:00
|
|
|
cli_conn->flags |= CO_FL_CONNECTED;
|
2012-08-31 10:01:23 -04:00
|
|
|
ret = session_complete(s);
|
|
|
|
|
if (ret > 0)
|
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
|
|
/* Error unrolling */
|
|
|
|
|
out_free_task:
|
|
|
|
|
task_free(t);
|
|
|
|
|
out_free_session:
|
|
|
|
|
p->feconn--;
|
2014-01-28 16:48:24 -05:00
|
|
|
session_store_counters(s);
|
2012-08-31 10:01:23 -04:00
|
|
|
pool_free2(pool2_session, s);
|
2013-10-11 13:34:20 -04:00
|
|
|
out_free_conn:
|
|
|
|
|
cli_conn->flags &= ~CO_FL_XPRT_TRACKED;
|
|
|
|
|
conn_xprt_close(cli_conn);
|
|
|
|
|
conn_free(cli_conn);
|
2012-08-31 10:01:23 -04:00
|
|
|
out_close:
|
REORG: connection: rename the data layer the "transport layer"
While working on the changes required to make the health checks use the
new connections, it started to become obvious that some naming was not
logical at all in the connections. Specifically, it is not logical to
call the "data layer" the layer which is in charge for all the handshake
and which does not yet provide a data layer once established until a
session has allocated all the required buffers.
In fact, it's more a transport layer, which makes much more sense. The
transport layer offers a medium on which data can transit, and it offers
the functions to move these data when the upper layer requests this. And
it is the upper layer which iterates over the transport layer's functions
to move data which should be called the data layer.
The use case where it's obvious is with embryonic sessions : an incoming
SSL connection is accepted. Only the connection is allocated, not the
buffers nor stream interface, etc... The connection handles the SSL
handshake by itself. Once this handshake is complete, we can't use the
data functions because the buffers and stream interface are not there
yet. Hence we have to first call a specific function to complete the
session initialization, after which we'll be able to use the data
functions. This clearly proves that SSL here is only a transport layer
and that the stream interface constitutes the data layer.
A similar change will be performed to rename app_cb => data, but the
two could not be in the same commit for obvious reasons.
2012-10-02 18:19:48 -04:00
|
|
|
if (ret < 0 && l->xprt == &raw_sock && p->mode == PR_MODE_HTTP) {
|
2012-08-31 10:01:23 -04:00
|
|
|
/* critical error, no more memory, try to emit a 500 response */
|
2013-10-20 17:10:28 -04:00
|
|
|
struct chunk *err_msg = &p->errmsg[HTTP_ERR_500];
|
|
|
|
|
if (!err_msg->str)
|
|
|
|
|
err_msg = &http_err_chunks[HTTP_ERR_500];
|
2012-08-31 10:01:23 -04:00
|
|
|
send(cfd, err_msg->str, err_msg->len, MSG_DONTWAIT|MSG_NOSIGNAL);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (fdtab[cfd].owner)
|
|
|
|
|
fd_delete(cfd);
|
|
|
|
|
else
|
|
|
|
|
close(cfd);
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
2012-12-03 09:35:00 -05:00
|
|
|
|
2013-10-01 04:45:07 -04:00
|
|
|
/* prepare the trash with a log prefix for session <s>. It only works with
|
BUG/MAJOR: session: recover the correct connection pointer in half-initialized sessions
John-Paul Bader reported a nasty segv which happens after a few hours
when SSL is enabled under a high load. Fortunately he could catch a
stack trace, systematically looking like this one :
(gdb) bt full
level = 6
conn = (struct connection *) 0x0
err_msg = <value optimized out>
s = (struct session *) 0x80337f800
conn = <value optimized out>
flags = 41997063
new_updt = <value optimized out>
old_updt = 1
e = <value optimized out>
status = 0
fd = 53999616
nbfd = 279
wait_time = <value optimized out>
updt_idx = <value optimized out>
en = <value optimized out>
eo = <value optimized out>
count = 78
sr = <value optimized out>
sw = <value optimized out>
rn = <value optimized out>
wn = <value optimized out>
The variable "flags" in conn_fd_handler() holds a copy of connection->flags
when entering the function. These flags indicate 41997063 = 0x0280d307 :
- {SOCK,DATA,CURR}_RD_ENA=1 => it's a handshake, waiting for reading
- {SOCK,DATA,CURR}_WR_ENA=0 => no need for writing
- CTRL_READY=1 => FD is still allocated
- XPRT_READY=1 => transport layer is initialized
- ADDR_FROM_SET=1, ADDR_TO_SET=0 => clearly it's a frontend connection
- INIT_DATA=1, WAKE_DATA=1 => processing a handshake (ssl I guess)
- {DATA,SOCK}_{RD,WR}_SH=0 => no shutdown
- ERROR=0, CONNECTED=0 => handshake not completed yet
- WAIT_L4_CONN=0 => normal
- WAIT_L6_CONN=1 => waiting for an L6 handshake to complete
- SSL_WAIT_HS=1 => the pending handshake is an SSL handshake
So this is a handshake is in progress. And the only way to reach line 88
is for the handshake to complete without error. So we know for sure that
ssl_sock_handshake() was called and completed the handshake then removed
the CO_FL_SSL_WAIT_HS flag from the connection. With these flags,
ssl_sock_handshake() does only call SSL_do_handshake() and retruns. So
that means that the problem is necessarily in data->init().
The fd is wrong as reported but is simply mis-decoded as it's the lower
half of the last function pointer.
What happens in practice is that there's an issue with the way we deal
with embryonic sessions during their conversion to regular sessions.
Since they have no stream interface at the beginning, the pointer to
the connection is temporarily stored into s->target. Then during their
conversion, the first stream interface is properly initialized and the
connection is attached to it, then s->target is set to NULL.
The problem is that if anything fails in session_complete(), the
session is left in this intermediate state where s->target is NULL,
and kill_mini_session() is called afterwards to perform the cleanup.
It needs the connection, that it finds in s->target which is NULL,
dereferences it and dies. The only reasons for dying here are a problem
on the TCP connection when doing the setsockopt(TCP_NODELAY) or a
memory allocation issue.
This patch implements a solution consisting in restoring s->target in
session_complete() on the error path. That way embryonic sessions that
were valid before calling it are still valid after.
The bug was introduced in 1.5-dev20 by commit f8a49ea ("MEDIUM: session:
attach incoming connection to target on embryonic sessions"). No backport
is needed.
Special thanks to John for his numerous tests and traces.
2014-05-08 15:06:11 -04:00
|
|
|
* embryonic sessions based on a real connection. This function requires that
|
|
|
|
|
* at s->target still points to the incoming connection.
|
2013-10-01 04:45:07 -04:00
|
|
|
*/
|
2012-12-03 09:35:00 -05:00
|
|
|
static void prepare_mini_sess_log_prefix(struct session *s)
|
|
|
|
|
{
|
|
|
|
|
struct tm tm;
|
|
|
|
|
char pn[INET6_ADDRSTRLEN];
|
|
|
|
|
int ret;
|
|
|
|
|
char *end;
|
2013-10-14 15:32:07 -04:00
|
|
|
struct connection *cli_conn = __objt_conn(s->target);
|
2012-12-03 09:35:00 -05:00
|
|
|
|
2013-10-01 04:45:07 -04:00
|
|
|
ret = addr_to_str(&cli_conn->addr.from, pn, sizeof(pn));
|
2012-12-03 09:35:00 -05:00
|
|
|
if (ret <= 0)
|
|
|
|
|
chunk_printf(&trash, "unknown [");
|
|
|
|
|
else if (ret == AF_UNIX)
|
|
|
|
|
chunk_printf(&trash, "%s:%d [", pn, s->listener->luid);
|
|
|
|
|
else
|
2013-10-01 04:45:07 -04:00
|
|
|
chunk_printf(&trash, "%s:%d [", pn, get_host_port(&cli_conn->addr.from));
|
2012-12-03 09:35:00 -05:00
|
|
|
|
|
|
|
|
get_localtime(s->logs.accept_date.tv_sec, &tm);
|
|
|
|
|
end = date2str_log(trash.str + trash.len, &tm, &(s->logs.accept_date), trash.size - trash.len);
|
|
|
|
|
trash.len = end - trash.str;
|
|
|
|
|
if (s->listener->name)
|
|
|
|
|
chunk_appendf(&trash, "] %s/%s", s->fe->id, s->listener->name);
|
|
|
|
|
else
|
|
|
|
|
chunk_appendf(&trash, "] %s/%d", s->fe->id, s->listener->luid);
|
|
|
|
|
}
|
|
|
|
|
|
2012-08-31 10:01:23 -04:00
|
|
|
/* This function kills an existing embryonic session. It stops the connection's
|
REORG: connection: rename the data layer the "transport layer"
While working on the changes required to make the health checks use the
new connections, it started to become obvious that some naming was not
logical at all in the connections. Specifically, it is not logical to
call the "data layer" the layer which is in charge for all the handshake
and which does not yet provide a data layer once established until a
session has allocated all the required buffers.
In fact, it's more a transport layer, which makes much more sense. The
transport layer offers a medium on which data can transit, and it offers
the functions to move these data when the upper layer requests this. And
it is the upper layer which iterates over the transport layer's functions
to move data which should be called the data layer.
The use case where it's obvious is with embryonic sessions : an incoming
SSL connection is accepted. Only the connection is allocated, not the
buffers nor stream interface, etc... The connection handles the SSL
handshake by itself. Once this handshake is complete, we can't use the
data functions because the buffers and stream interface are not there
yet. Hence we have to first call a specific function to complete the
session initialization, after which we'll be able to use the data
functions. This clearly proves that SSL here is only a transport layer
and that the stream interface constitutes the data layer.
A similar change will be performed to rename app_cb => data, but the
two could not be in the same commit for obvious reasons.
2012-10-02 18:19:48 -04:00
|
|
|
* transport layer, releases assigned resources, resumes the listener if it was
|
BUG/MAJOR: session: recover the correct connection pointer in half-initialized sessions
John-Paul Bader reported a nasty segv which happens after a few hours
when SSL is enabled under a high load. Fortunately he could catch a
stack trace, systematically looking like this one :
(gdb) bt full
level = 6
conn = (struct connection *) 0x0
err_msg = <value optimized out>
s = (struct session *) 0x80337f800
conn = <value optimized out>
flags = 41997063
new_updt = <value optimized out>
old_updt = 1
e = <value optimized out>
status = 0
fd = 53999616
nbfd = 279
wait_time = <value optimized out>
updt_idx = <value optimized out>
en = <value optimized out>
eo = <value optimized out>
count = 78
sr = <value optimized out>
sw = <value optimized out>
rn = <value optimized out>
wn = <value optimized out>
The variable "flags" in conn_fd_handler() holds a copy of connection->flags
when entering the function. These flags indicate 41997063 = 0x0280d307 :
- {SOCK,DATA,CURR}_RD_ENA=1 => it's a handshake, waiting for reading
- {SOCK,DATA,CURR}_WR_ENA=0 => no need for writing
- CTRL_READY=1 => FD is still allocated
- XPRT_READY=1 => transport layer is initialized
- ADDR_FROM_SET=1, ADDR_TO_SET=0 => clearly it's a frontend connection
- INIT_DATA=1, WAKE_DATA=1 => processing a handshake (ssl I guess)
- {DATA,SOCK}_{RD,WR}_SH=0 => no shutdown
- ERROR=0, CONNECTED=0 => handshake not completed yet
- WAIT_L4_CONN=0 => normal
- WAIT_L6_CONN=1 => waiting for an L6 handshake to complete
- SSL_WAIT_HS=1 => the pending handshake is an SSL handshake
So this is a handshake is in progress. And the only way to reach line 88
is for the handshake to complete without error. So we know for sure that
ssl_sock_handshake() was called and completed the handshake then removed
the CO_FL_SSL_WAIT_HS flag from the connection. With these flags,
ssl_sock_handshake() does only call SSL_do_handshake() and retruns. So
that means that the problem is necessarily in data->init().
The fd is wrong as reported but is simply mis-decoded as it's the lower
half of the last function pointer.
What happens in practice is that there's an issue with the way we deal
with embryonic sessions during their conversion to regular sessions.
Since they have no stream interface at the beginning, the pointer to
the connection is temporarily stored into s->target. Then during their
conversion, the first stream interface is properly initialized and the
connection is attached to it, then s->target is set to NULL.
The problem is that if anything fails in session_complete(), the
session is left in this intermediate state where s->target is NULL,
and kill_mini_session() is called afterwards to perform the cleanup.
It needs the connection, that it finds in s->target which is NULL,
dereferences it and dies. The only reasons for dying here are a problem
on the TCP connection when doing the setsockopt(TCP_NODELAY) or a
memory allocation issue.
This patch implements a solution consisting in restoring s->target in
session_complete() on the error path. That way embryonic sessions that
were valid before calling it are still valid after.
The bug was introduced in 1.5-dev20 by commit f8a49ea ("MEDIUM: session:
attach incoming connection to target on embryonic sessions"). No backport
is needed.
Special thanks to John for his numerous tests and traces.
2014-05-08 15:06:11 -04:00
|
|
|
* disabled and finally kills the file descriptor. This function requires that
|
|
|
|
|
* at s->target still points to the incoming connection.
|
2012-08-31 10:01:23 -04:00
|
|
|
*/
|
|
|
|
|
static void kill_mini_session(struct session *s)
|
|
|
|
|
{
|
2012-12-03 09:35:00 -05:00
|
|
|
int level = LOG_INFO;
|
2013-10-14 15:32:07 -04:00
|
|
|
struct connection *conn = __objt_conn(s->target);
|
2012-12-03 09:35:00 -05:00
|
|
|
unsigned int log = s->logs.logwait;
|
|
|
|
|
const char *err_msg;
|
|
|
|
|
|
|
|
|
|
if (s->fe->options2 & PR_O2_LOGERRORS)
|
|
|
|
|
level = LOG_ERR;
|
|
|
|
|
|
|
|
|
|
if (log && (s->fe->options & PR_O_NULLNOLOG)) {
|
|
|
|
|
/* with "option dontlognull", we don't log connections with no transfer */
|
2012-12-03 09:41:18 -05:00
|
|
|
if (!conn->err_code ||
|
2012-12-03 10:32:10 -05:00
|
|
|
conn->err_code == CO_ER_PRX_EMPTY || conn->err_code == CO_ER_PRX_ABORT ||
|
|
|
|
|
conn->err_code == CO_ER_SSL_EMPTY || conn->err_code == CO_ER_SSL_ABORT)
|
2012-12-03 09:35:00 -05:00
|
|
|
log = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (log) {
|
|
|
|
|
if (!conn->err_code && (s->task->state & TASK_WOKEN_TIMER)) {
|
|
|
|
|
if (conn->flags & CO_FL_ACCEPT_PROXY)
|
|
|
|
|
conn->err_code = CO_ER_PRX_TIMEOUT;
|
|
|
|
|
else if (conn->flags & CO_FL_SSL_WAIT_HS)
|
|
|
|
|
conn->err_code = CO_ER_SSL_TIMEOUT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
prepare_mini_sess_log_prefix(s);
|
|
|
|
|
err_msg = conn_err_code_str(conn);
|
|
|
|
|
if (err_msg)
|
|
|
|
|
send_log(s->fe, level, "%s: %s\n", trash.str, err_msg);
|
|
|
|
|
else
|
|
|
|
|
send_log(s->fe, level, "%s: unknown connection error (code=%d flags=%08x)\n",
|
|
|
|
|
trash.str, conn->err_code, conn->flags);
|
|
|
|
|
}
|
|
|
|
|
|
2012-08-31 10:01:23 -04:00
|
|
|
/* kill the connection now */
|
MAJOR: connection: add two new flags to indicate readiness of control/transport
Currently the control and transport layers of a connection are supposed
to be initialized when their respective pointers are not NULL. This will
not work anymore when we plan to reuse connections, because there is an
asymmetry between the accept() side and the connect() side :
- on accept() side, the fd is set first, then the ctrl layer then the
transport layer ; upon error, they must be undone in the reverse order,
then the FD must be closed. The FD must not be deleted if the control
layer was not yet initialized ;
- on the connect() side, the fd is set last and there is no reliable way
to know if it has been initialized or not. In practice it's initialized
to -1 first but this is hackish and supposes that local FDs only will
be used forever. Also, there are even less solutions for keeping trace
of the transport layer's state.
Also it is possible to support delayed close() when something (eg: logs)
tracks some information requiring the transport and/or control layers,
making it even more difficult to clean them.
So the proposed solution is to add two flags to the connection :
- CO_FL_CTRL_READY is set when the control layer is initialized (fd_insert)
and cleared after it's released (fd_delete).
- CO_FL_XPRT_READY is set when the control layer is initialized (xprt->init)
and cleared after it's released (xprt->close).
The functions have been adapted to rely on this and not on the pointers
anymore. conn_xprt_close() was unused and dangerous : it did not close
the control layer (eg: the socket itself) but still marks the transport
layer as closed, preventing any future call to conn_full_close() from
finishing the job.
The problem comes from conn_full_close() in fact. It needs to close the
xprt and ctrl layers independantly. After that we're still having an issue :
we don't know based on ->ctrl alone whether the fd was registered or not.
For this we use the two new flags CO_FL_XPRT_READY and CO_FL_CTRL_READY. We
now rely on this and not on conn->xprt nor conn->ctrl anymore to decide what
remains to be done on the connection.
In order not to miss some flag assignments, we introduce conn_ctrl_init()
to initialize the control layer, register the fd using fd_insert() and set
the flag, and conn_ctrl_close() which unregisters the fd and removes the
flag, but only if the transport layer was closed.
Similarly, at the transport layer, conn_xprt_init() calls ->init and sets
the flag, while conn_xprt_close() checks the flag, calls ->close and clears
the flag, regardless xprt_ctx or xprt_st. This also ensures that the ->init
and the ->close functions are called only once each and in the correct order.
Note that conn_xprt_close() does nothing if the transport layer is still
tracked.
conn_full_close() now simply calls conn_xprt_close() then conn_full_close()
in turn, which do nothing if CO_FL_XPRT_TRACKED is set.
In order to handle the error path, we also provide conn_force_close() which
ignores CO_FL_XPRT_TRACKED and closes the transport and the control layers
in turns. All relevant instances of fd_delete() have been replaced with
conn_force_close(). Now we always know what state the connection is in and
we can expect to split its initialization.
2013-10-21 10:30:56 -04:00
|
|
|
conn_force_close(conn);
|
2014-02-04 18:18:47 -05:00
|
|
|
conn_free(conn);
|
2012-08-31 10:01:23 -04:00
|
|
|
|
|
|
|
|
s->fe->feconn--;
|
2013-07-01 12:07:03 -04:00
|
|
|
session_store_counters(s);
|
2012-08-31 10:01:23 -04:00
|
|
|
|
|
|
|
|
if (!(s->listener->options & LI_O_UNLIMITED))
|
|
|
|
|
actconn--;
|
|
|
|
|
jobs--;
|
|
|
|
|
s->listener->nbconn--;
|
|
|
|
|
if (s->listener->state == LI_FULL)
|
|
|
|
|
resume_listener(s->listener);
|
|
|
|
|
|
|
|
|
|
/* Dequeues all of the listeners waiting for a resource */
|
|
|
|
|
if (!LIST_ISEMPTY(&global_listener_queue))
|
|
|
|
|
dequeue_all_listeners(&global_listener_queue);
|
|
|
|
|
|
|
|
|
|
if (!LIST_ISEMPTY(&s->fe->listener_queue) &&
|
|
|
|
|
(!s->fe->fe_sps_lim || freq_ctr_remain(&s->fe->fe_sess_per_sec, s->fe->fe_sps_lim, 0) > 0))
|
|
|
|
|
dequeue_all_listeners(&s->fe->listener_queue);
|
|
|
|
|
|
|
|
|
|
task_delete(s->task);
|
|
|
|
|
task_free(s->task);
|
|
|
|
|
pool_free2(pool2_session, s);
|
|
|
|
|
}
|
|
|
|
|
|
2012-08-31 11:43:29 -04:00
|
|
|
/* Finish initializing a session from a connection, or kills it if the
|
|
|
|
|
* connection shows and error. Returns <0 if the connection was killed.
|
2012-08-31 10:01:23 -04:00
|
|
|
*/
|
2012-10-02 19:39:48 -04:00
|
|
|
static int conn_session_complete(struct connection *conn)
|
2012-08-31 10:01:23 -04:00
|
|
|
{
|
2012-10-02 15:21:20 -04:00
|
|
|
struct session *s = conn->owner;
|
2012-08-31 10:01:23 -04:00
|
|
|
|
2012-08-31 11:43:29 -04:00
|
|
|
if (!(conn->flags & CO_FL_ERROR) && (session_complete(s) > 0)) {
|
2012-10-02 19:39:48 -04:00
|
|
|
conn->flags &= ~CO_FL_INIT_DATA;
|
2012-08-31 10:01:23 -04:00
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* kill the connection now */
|
|
|
|
|
kill_mini_session(s);
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
|
2012-10-03 15:17:23 -04:00
|
|
|
/* Update an embryonic session status. The connection is killed in case of
|
|
|
|
|
* error, and <0 will be returned. Otherwise it does nothing.
|
|
|
|
|
*/
|
|
|
|
|
static int conn_session_update(struct connection *conn)
|
|
|
|
|
{
|
|
|
|
|
if (conn->flags & CO_FL_ERROR) {
|
|
|
|
|
kill_mini_session(conn->owner);
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2012-08-31 10:01:23 -04:00
|
|
|
/* Manages embryonic sessions timeout. It is only called when the timeout
|
|
|
|
|
* strikes and performs the required cleanup.
|
|
|
|
|
*/
|
|
|
|
|
static struct task *expire_mini_session(struct task *t)
|
|
|
|
|
{
|
|
|
|
|
struct session *s = t->context;
|
|
|
|
|
|
|
|
|
|
if (!(t->state & TASK_WOKEN_TIMER))
|
|
|
|
|
return t;
|
|
|
|
|
|
|
|
|
|
kill_mini_session(s);
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* This function is called from the I/O handler which detects the end of
|
|
|
|
|
* handshake, in order to complete initialization of a valid session. It must
|
|
|
|
|
* be called with an embryonic session. It returns a positive value upon
|
|
|
|
|
* success, 0 if the connection can be ignored, or a negative value upon
|
|
|
|
|
* critical failure. The accepted file descriptor is closed if we return <= 0.
|
BUG/MAJOR: session: recover the correct connection pointer in half-initialized sessions
John-Paul Bader reported a nasty segv which happens after a few hours
when SSL is enabled under a high load. Fortunately he could catch a
stack trace, systematically looking like this one :
(gdb) bt full
level = 6
conn = (struct connection *) 0x0
err_msg = <value optimized out>
s = (struct session *) 0x80337f800
conn = <value optimized out>
flags = 41997063
new_updt = <value optimized out>
old_updt = 1
e = <value optimized out>
status = 0
fd = 53999616
nbfd = 279
wait_time = <value optimized out>
updt_idx = <value optimized out>
en = <value optimized out>
eo = <value optimized out>
count = 78
sr = <value optimized out>
sw = <value optimized out>
rn = <value optimized out>
wn = <value optimized out>
The variable "flags" in conn_fd_handler() holds a copy of connection->flags
when entering the function. These flags indicate 41997063 = 0x0280d307 :
- {SOCK,DATA,CURR}_RD_ENA=1 => it's a handshake, waiting for reading
- {SOCK,DATA,CURR}_WR_ENA=0 => no need for writing
- CTRL_READY=1 => FD is still allocated
- XPRT_READY=1 => transport layer is initialized
- ADDR_FROM_SET=1, ADDR_TO_SET=0 => clearly it's a frontend connection
- INIT_DATA=1, WAKE_DATA=1 => processing a handshake (ssl I guess)
- {DATA,SOCK}_{RD,WR}_SH=0 => no shutdown
- ERROR=0, CONNECTED=0 => handshake not completed yet
- WAIT_L4_CONN=0 => normal
- WAIT_L6_CONN=1 => waiting for an L6 handshake to complete
- SSL_WAIT_HS=1 => the pending handshake is an SSL handshake
So this is a handshake is in progress. And the only way to reach line 88
is for the handshake to complete without error. So we know for sure that
ssl_sock_handshake() was called and completed the handshake then removed
the CO_FL_SSL_WAIT_HS flag from the connection. With these flags,
ssl_sock_handshake() does only call SSL_do_handshake() and retruns. So
that means that the problem is necessarily in data->init().
The fd is wrong as reported but is simply mis-decoded as it's the lower
half of the last function pointer.
What happens in practice is that there's an issue with the way we deal
with embryonic sessions during their conversion to regular sessions.
Since they have no stream interface at the beginning, the pointer to
the connection is temporarily stored into s->target. Then during their
conversion, the first stream interface is properly initialized and the
connection is attached to it, then s->target is set to NULL.
The problem is that if anything fails in session_complete(), the
session is left in this intermediate state where s->target is NULL,
and kill_mini_session() is called afterwards to perform the cleanup.
It needs the connection, that it finds in s->target which is NULL,
dereferences it and dies. The only reasons for dying here are a problem
on the TCP connection when doing the setsockopt(TCP_NODELAY) or a
memory allocation issue.
This patch implements a solution consisting in restoring s->target in
session_complete() on the error path. That way embryonic sessions that
were valid before calling it are still valid after.
The bug was introduced in 1.5-dev20 by commit f8a49ea ("MEDIUM: session:
attach incoming connection to target on embryonic sessions"). No backport
is needed.
Special thanks to John for his numerous tests and traces.
2014-05-08 15:06:11 -04:00
|
|
|
* The client-side end point is assumed to be a connection, whose pointer is
|
|
|
|
|
* taken from s->target which is assumed to be valid. If the function fails,
|
|
|
|
|
* it restores s->target.
|
2012-08-31 10:01:23 -04:00
|
|
|
*/
|
|
|
|
|
int session_complete(struct session *s)
|
|
|
|
|
{
|
|
|
|
|
struct listener *l = s->listener;
|
|
|
|
|
struct proxy *p = s->fe;
|
|
|
|
|
struct http_txn *txn;
|
|
|
|
|
struct task *t = s->task;
|
2013-10-14 15:32:07 -04:00
|
|
|
struct connection *conn = __objt_conn(s->target);
|
2012-08-31 10:01:23 -04:00
|
|
|
int ret;
|
2012-12-09 09:55:40 -05:00
|
|
|
int i;
|
2012-08-31 10:01:23 -04:00
|
|
|
|
|
|
|
|
ret = -1; /* assume unrecoverable error by default */
|
|
|
|
|
|
|
|
|
|
/* OK, we're keeping the session, so let's properly initialize the session */
|
|
|
|
|
LIST_ADDQ(&sessions, &s->list);
|
|
|
|
|
LIST_INIT(&s->back_refs);
|
MAJOR: session: implement a wait-queue for sessions who need a buffer
When a session_alloc_buffers() fails to allocate one or two buffers,
it subscribes the session to buffer_wq, and waits for another session
to release buffers. It's then removed from the queue and woken up with
TASK_WAKE_RES, and can attempt its allocation again.
We decide to try to wake as many waiters as we release buffers so
that if we release 2 and two waiters need only once, they both have
their chance. We must never come to the situation where we don't wake
enough tasks up.
It's common to release buffers after the completion of an I/O callback,
which can happen even if the I/O could not be performed due to half a
failure on memory allocation. In this situation, we don't want to move
out of the wait queue the session that was just added, otherwise it
will never get any buffer. Thus, we only force ourselves out of the
queue when freeing the session.
Note: at the moment, since session_alloc_buffers() is not used, no task
is subscribed to the wait queue.
2014-11-25 15:10:35 -05:00
|
|
|
LIST_INIT(&s->buffer_wait);
|
2013-10-14 15:32:07 -04:00
|
|
|
|
|
|
|
|
s->flags |= SN_INITIALIZED;
|
2012-08-31 10:01:23 -04:00
|
|
|
s->unique_id = NULL;
|
|
|
|
|
|
|
|
|
|
t->process = l->handler;
|
|
|
|
|
t->context = s;
|
|
|
|
|
t->expire = TICK_ETERNITY;
|
|
|
|
|
|
|
|
|
|
/* Note: initially, the session's backend points to the frontend.
|
|
|
|
|
* This changes later when switching rules are executed or
|
|
|
|
|
* when the default backend is assigned.
|
|
|
|
|
*/
|
|
|
|
|
s->be = s->fe;
|
|
|
|
|
s->req = s->rep = NULL; /* will be allocated later */
|
MEDIUM: HTTP compression (zlib library support)
This commit introduces HTTP compression using the zlib library.
http_response_forward_body has been modified to call the compression
functions.
This feature includes 3 algorithms: identity, gzip and deflate:
* identity: this is mostly for debugging, and it was useful for
developping the compression feature. With Content-Length in input, it
is making each chunk with the data available in the current buffer.
With chunks in input, it is rechunking, the output chunks will be
bigger or smaller depending of the size of the input chunk and the
size of the buffer. Identity does not apply any change on data.
* gzip: same as identity, but applying a gzip compression. The data
are deflated using the Z_NO_FLUSH flag in zlib. When there is no more
data in the input buffer, it flushes the data in the output buffer
(Z_SYNC_FLUSH). At the end of data, when it receives the last chunk in
input, or when there is no more data to read, it writes the end of
data with Z_FINISH and the ending chunk.
* deflate: same as gzip, but with deflate algorithm and zlib format.
Note that this algorithm has ambiguous support on many browsers and
no support at all from recent ones. It is strongly recommended not
to use it for anything else than experimentation.
You can't choose the compression ratio at the moment, it will be set to
Z_BEST_SPEED (1), as tests have shown very little benefit in terms of
compression ration when going above for HTML contents, at the cost of
a massive CPU impact.
Compression will be activated depending of the Accept-Encoding request
header. With identity, it does not take care of that header.
To build HAProxy with zlib support, use USE_ZLIB=1 in the make
parameters.
This work was initially started by David Du Colombier at Exceliance.
2012-10-23 04:25:10 -04:00
|
|
|
s->comp_algo = NULL;
|
2010-11-11 04:56:04 -05:00
|
|
|
|
2012-08-31 10:01:23 -04:00
|
|
|
/* Let's count a session now */
|
2010-06-04 14:59:39 -04:00
|
|
|
proxy_inc_fe_sess_ctr(l, p);
|
2010-06-20 05:19:22 -04:00
|
|
|
|
2013-07-23 13:15:30 -04:00
|
|
|
for (i = 0; i < MAX_SESS_STKCTR; i++) {
|
2010-08-06 09:25:22 -04:00
|
|
|
void *ptr;
|
|
|
|
|
|
2014-01-28 17:18:23 -05:00
|
|
|
if (!stkctr_entry(&s->stkctr[i]))
|
2012-12-09 09:55:40 -05:00
|
|
|
continue;
|
|
|
|
|
|
2014-01-28 17:18:23 -05:00
|
|
|
ptr = stktable_data_ptr(s->stkctr[i].table, stkctr_entry(&s->stkctr[i]), STKTABLE_DT_SESS_CNT);
|
2010-08-06 09:25:22 -04:00
|
|
|
if (ptr)
|
|
|
|
|
stktable_data_cast(ptr, sess_cnt)++;
|
|
|
|
|
|
2014-01-28 17:18:23 -05:00
|
|
|
ptr = stktable_data_ptr(s->stkctr[i].table, stkctr_entry(&s->stkctr[i]), STKTABLE_DT_SESS_RATE);
|
2010-08-06 09:25:22 -04:00
|
|
|
if (ptr)
|
|
|
|
|
update_freq_ctr_period(&stktable_data_cast(ptr, sess_rate),
|
2012-12-09 09:55:40 -05:00
|
|
|
s->stkctr[i].table->data_arg[STKTABLE_DT_SESS_RATE].u, 1);
|
2010-08-06 09:25:22 -04:00
|
|
|
}
|
|
|
|
|
|
2013-10-11 13:34:20 -04:00
|
|
|
/* this part should be common with other protocols */
|
|
|
|
|
si_reset(&s->si[0], t);
|
|
|
|
|
si_set_state(&s->si[0], SI_ST_EST);
|
|
|
|
|
|
BUG/MAJOR: session: recover the correct connection pointer in half-initialized sessions
John-Paul Bader reported a nasty segv which happens after a few hours
when SSL is enabled under a high load. Fortunately he could catch a
stack trace, systematically looking like this one :
(gdb) bt full
level = 6
conn = (struct connection *) 0x0
err_msg = <value optimized out>
s = (struct session *) 0x80337f800
conn = <value optimized out>
flags = 41997063
new_updt = <value optimized out>
old_updt = 1
e = <value optimized out>
status = 0
fd = 53999616
nbfd = 279
wait_time = <value optimized out>
updt_idx = <value optimized out>
en = <value optimized out>
eo = <value optimized out>
count = 78
sr = <value optimized out>
sw = <value optimized out>
rn = <value optimized out>
wn = <value optimized out>
The variable "flags" in conn_fd_handler() holds a copy of connection->flags
when entering the function. These flags indicate 41997063 = 0x0280d307 :
- {SOCK,DATA,CURR}_RD_ENA=1 => it's a handshake, waiting for reading
- {SOCK,DATA,CURR}_WR_ENA=0 => no need for writing
- CTRL_READY=1 => FD is still allocated
- XPRT_READY=1 => transport layer is initialized
- ADDR_FROM_SET=1, ADDR_TO_SET=0 => clearly it's a frontend connection
- INIT_DATA=1, WAKE_DATA=1 => processing a handshake (ssl I guess)
- {DATA,SOCK}_{RD,WR}_SH=0 => no shutdown
- ERROR=0, CONNECTED=0 => handshake not completed yet
- WAIT_L4_CONN=0 => normal
- WAIT_L6_CONN=1 => waiting for an L6 handshake to complete
- SSL_WAIT_HS=1 => the pending handshake is an SSL handshake
So this is a handshake is in progress. And the only way to reach line 88
is for the handshake to complete without error. So we know for sure that
ssl_sock_handshake() was called and completed the handshake then removed
the CO_FL_SSL_WAIT_HS flag from the connection. With these flags,
ssl_sock_handshake() does only call SSL_do_handshake() and retruns. So
that means that the problem is necessarily in data->init().
The fd is wrong as reported but is simply mis-decoded as it's the lower
half of the last function pointer.
What happens in practice is that there's an issue with the way we deal
with embryonic sessions during their conversion to regular sessions.
Since they have no stream interface at the beginning, the pointer to
the connection is temporarily stored into s->target. Then during their
conversion, the first stream interface is properly initialized and the
connection is attached to it, then s->target is set to NULL.
The problem is that if anything fails in session_complete(), the
session is left in this intermediate state where s->target is NULL,
and kill_mini_session() is called afterwards to perform the cleanup.
It needs the connection, that it finds in s->target which is NULL,
dereferences it and dies. The only reasons for dying here are a problem
on the TCP connection when doing the setsockopt(TCP_NODELAY) or a
memory allocation issue.
This patch implements a solution consisting in restoring s->target in
session_complete() on the error path. That way embryonic sessions that
were valid before calling it are still valid after.
The bug was introduced in 1.5-dev20 by commit f8a49ea ("MEDIUM: session:
attach incoming connection to target on embryonic sessions"). No backport
is needed.
Special thanks to John for his numerous tests and traces.
2014-05-08 15:06:11 -04:00
|
|
|
/* attach the incoming connection to the stream interface now.
|
|
|
|
|
* We must do that *before* clearing ->target because we need
|
|
|
|
|
* to keep a pointer to the connection in case we have to call
|
|
|
|
|
* kill_mini_session().
|
|
|
|
|
*/
|
2013-10-11 13:34:20 -04:00
|
|
|
si_attach_conn(&s->si[0], conn);
|
|
|
|
|
|
|
|
|
|
if (likely(s->fe->options2 & PR_O2_INDEPSTR))
|
|
|
|
|
s->si[0].flags |= SI_FL_INDEP_STR;
|
|
|
|
|
|
2010-06-01 11:45:26 -04:00
|
|
|
/* pre-initialize the other side's stream interface to an INIT state. The
|
|
|
|
|
* callbacks will be initialized before attempting to connect.
|
|
|
|
|
*/
|
2013-10-24 05:51:38 -04:00
|
|
|
si_reset(&s->si[1], t);
|
2013-10-24 09:50:53 -04:00
|
|
|
si_detach(&s->si[1]);
|
2013-10-01 04:45:07 -04:00
|
|
|
|
2010-06-01 11:45:26 -04:00
|
|
|
if (likely(s->fe->options2 & PR_O2_INDEPSTR))
|
|
|
|
|
s->si[1].flags |= SI_FL_INDEP_STR;
|
|
|
|
|
|
2011-07-19 18:17:39 -04:00
|
|
|
session_init_srv_conn(s);
|
2012-11-11 18:42:33 -05:00
|
|
|
s->target = NULL;
|
2010-06-01 11:45:26 -04:00
|
|
|
s->pend_pos = NULL;
|
|
|
|
|
|
|
|
|
|
/* init store persistence */
|
|
|
|
|
s->store_count = 0;
|
|
|
|
|
|
2012-08-27 18:06:31 -04:00
|
|
|
if (unlikely((s->req = pool_alloc2(pool2_channel)) == NULL))
|
2010-06-01 11:45:26 -04:00
|
|
|
goto out_free_task; /* no memory */
|
|
|
|
|
|
2012-08-27 18:06:31 -04:00
|
|
|
channel_init(s->req);
|
2010-06-01 11:45:26 -04:00
|
|
|
s->req->prod = &s->si[0];
|
|
|
|
|
s->req->cons = &s->si[1];
|
|
|
|
|
s->si[0].ib = s->si[1].ob = s->req;
|
2012-08-27 17:14:58 -04:00
|
|
|
s->req->flags |= CF_READ_ATTACHED; /* the producer is already connected */
|
2010-06-01 11:45:26 -04:00
|
|
|
|
|
|
|
|
/* activate default analysers enabled for this listener */
|
|
|
|
|
s->req->analysers = l->analysers;
|
|
|
|
|
|
|
|
|
|
s->req->wto = TICK_ETERNITY;
|
|
|
|
|
s->req->rto = TICK_ETERNITY;
|
|
|
|
|
s->req->rex = TICK_ETERNITY;
|
|
|
|
|
s->req->wex = TICK_ETERNITY;
|
|
|
|
|
s->req->analyse_exp = TICK_ETERNITY;
|
|
|
|
|
|
2014-11-24 05:36:57 -05:00
|
|
|
if (unlikely((s->rep = pool_alloc2(pool2_channel)) == NULL))
|
2014-11-25 13:54:11 -05:00
|
|
|
goto out_free_req; /* no memory */
|
2014-11-24 05:36:57 -05:00
|
|
|
|
2012-08-27 18:06:31 -04:00
|
|
|
channel_init(s->rep);
|
2010-06-01 11:45:26 -04:00
|
|
|
s->rep->prod = &s->si[1];
|
|
|
|
|
s->rep->cons = &s->si[0];
|
|
|
|
|
s->si[0].ob = s->si[1].ib = s->rep;
|
|
|
|
|
s->rep->analysers = 0;
|
|
|
|
|
|
2011-05-30 12:10:30 -04:00
|
|
|
if (s->fe->options2 & PR_O2_NODELAY) {
|
2012-08-27 17:14:58 -04:00
|
|
|
s->req->flags |= CF_NEVER_WAIT;
|
|
|
|
|
s->rep->flags |= CF_NEVER_WAIT;
|
2011-05-30 12:10:30 -04:00
|
|
|
}
|
|
|
|
|
|
2010-06-01 11:45:26 -04:00
|
|
|
s->rep->rto = TICK_ETERNITY;
|
|
|
|
|
s->rep->wto = TICK_ETERNITY;
|
|
|
|
|
s->rep->rex = TICK_ETERNITY;
|
|
|
|
|
s->rep->wex = TICK_ETERNITY;
|
|
|
|
|
s->rep->analyse_exp = TICK_ETERNITY;
|
|
|
|
|
|
2014-11-25 13:54:11 -05:00
|
|
|
if (unlikely(b_alloc(&s->req->buf) == NULL))
|
2014-11-24 05:36:57 -05:00
|
|
|
goto out_free_rep; /* no memory */
|
|
|
|
|
|
2014-11-25 13:54:11 -05:00
|
|
|
if (unlikely(b_alloc(&s->rep->buf) == NULL))
|
|
|
|
|
goto out_free_req_buf; /* no memory */
|
|
|
|
|
|
2012-03-09 05:32:30 -05:00
|
|
|
txn = &s->txn;
|
|
|
|
|
/* Those variables will be checked and freed if non-NULL in
|
|
|
|
|
* session.c:session_free(). It is important that they are
|
|
|
|
|
* properly initialized.
|
|
|
|
|
*/
|
|
|
|
|
txn->sessid = NULL;
|
|
|
|
|
txn->srv_cookie = NULL;
|
|
|
|
|
txn->cli_cookie = NULL;
|
|
|
|
|
txn->uri = NULL;
|
|
|
|
|
txn->req.cap = NULL;
|
|
|
|
|
txn->rsp.cap = NULL;
|
|
|
|
|
txn->hdr_idx.v = NULL;
|
|
|
|
|
txn->hdr_idx.size = txn->hdr_idx.used = 0;
|
2013-12-23 09:11:25 -05:00
|
|
|
txn->flags = 0;
|
2012-03-09 05:32:30 -05:00
|
|
|
txn->req.flags = 0;
|
|
|
|
|
txn->rsp.flags = 0;
|
|
|
|
|
/* the HTTP messages need to know what buffer they're associated with */
|
2012-10-12 16:40:39 -04:00
|
|
|
txn->req.chn = s->req;
|
|
|
|
|
txn->rsp.chn = s->rep;
|
2012-03-09 05:32:30 -05:00
|
|
|
|
2010-06-01 11:45:26 -04:00
|
|
|
/* finish initialization of the accepted file descriptor */
|
2013-10-14 15:32:07 -04:00
|
|
|
conn_data_want_recv(conn);
|
2010-06-01 11:45:26 -04:00
|
|
|
|
2010-11-11 04:56:04 -05:00
|
|
|
if (p->accept && (ret = p->accept(s)) <= 0) {
|
|
|
|
|
/* Either we had an unrecoverable error (<0) or work is
|
|
|
|
|
* finished (=0, eg: monitoring), in both situations,
|
|
|
|
|
* we can release everything and close.
|
|
|
|
|
*/
|
2012-10-12 17:49:43 -04:00
|
|
|
goto out_free_rep_buf;
|
2010-06-01 11:45:26 -04:00
|
|
|
}
|
|
|
|
|
|
2012-10-12 12:01:49 -04:00
|
|
|
/* if logs require transport layer information, note it on the connection */
|
|
|
|
|
if (s->logs.logwait & LW_XPRT)
|
2013-10-14 15:32:07 -04:00
|
|
|
conn->flags |= CO_FL_XPRT_TRACKED;
|
2012-10-12 12:01:49 -04:00
|
|
|
|
2012-08-31 10:01:23 -04:00
|
|
|
/* we want the connection handler to notify the stream interface about updates. */
|
2013-10-14 15:32:07 -04:00
|
|
|
conn->flags |= CO_FL_WAKE_DATA;
|
2012-08-31 10:01:23 -04:00
|
|
|
|
2010-06-01 11:45:26 -04:00
|
|
|
/* it is important not to call the wakeup function directly but to
|
|
|
|
|
* pass through task_wakeup(), because this one knows how to apply
|
|
|
|
|
* priorities to tasks.
|
|
|
|
|
*/
|
|
|
|
|
task_wakeup(t, TASK_WOKEN_INIT);
|
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
|
|
/* Error unrolling */
|
2012-10-12 17:49:43 -04:00
|
|
|
out_free_rep_buf:
|
2014-11-25 13:45:11 -05:00
|
|
|
b_free(&s->rep->buf);
|
2012-10-12 17:49:43 -04:00
|
|
|
out_free_req_buf:
|
2014-11-25 13:45:11 -05:00
|
|
|
b_free(&s->req->buf);
|
2014-11-25 13:54:11 -05:00
|
|
|
out_free_rep:
|
|
|
|
|
pool_free2(pool2_channel, s->rep);
|
2010-06-01 11:45:26 -04:00
|
|
|
out_free_req:
|
2012-08-27 18:06:31 -04:00
|
|
|
pool_free2(pool2_channel, s->req);
|
2010-06-01 11:45:26 -04:00
|
|
|
out_free_task:
|
BUG/MAJOR: session: recover the correct connection pointer in half-initialized sessions
John-Paul Bader reported a nasty segv which happens after a few hours
when SSL is enabled under a high load. Fortunately he could catch a
stack trace, systematically looking like this one :
(gdb) bt full
level = 6
conn = (struct connection *) 0x0
err_msg = <value optimized out>
s = (struct session *) 0x80337f800
conn = <value optimized out>
flags = 41997063
new_updt = <value optimized out>
old_updt = 1
e = <value optimized out>
status = 0
fd = 53999616
nbfd = 279
wait_time = <value optimized out>
updt_idx = <value optimized out>
en = <value optimized out>
eo = <value optimized out>
count = 78
sr = <value optimized out>
sw = <value optimized out>
rn = <value optimized out>
wn = <value optimized out>
The variable "flags" in conn_fd_handler() holds a copy of connection->flags
when entering the function. These flags indicate 41997063 = 0x0280d307 :
- {SOCK,DATA,CURR}_RD_ENA=1 => it's a handshake, waiting for reading
- {SOCK,DATA,CURR}_WR_ENA=0 => no need for writing
- CTRL_READY=1 => FD is still allocated
- XPRT_READY=1 => transport layer is initialized
- ADDR_FROM_SET=1, ADDR_TO_SET=0 => clearly it's a frontend connection
- INIT_DATA=1, WAKE_DATA=1 => processing a handshake (ssl I guess)
- {DATA,SOCK}_{RD,WR}_SH=0 => no shutdown
- ERROR=0, CONNECTED=0 => handshake not completed yet
- WAIT_L4_CONN=0 => normal
- WAIT_L6_CONN=1 => waiting for an L6 handshake to complete
- SSL_WAIT_HS=1 => the pending handshake is an SSL handshake
So this is a handshake is in progress. And the only way to reach line 88
is for the handshake to complete without error. So we know for sure that
ssl_sock_handshake() was called and completed the handshake then removed
the CO_FL_SSL_WAIT_HS flag from the connection. With these flags,
ssl_sock_handshake() does only call SSL_do_handshake() and retruns. So
that means that the problem is necessarily in data->init().
The fd is wrong as reported but is simply mis-decoded as it's the lower
half of the last function pointer.
What happens in practice is that there's an issue with the way we deal
with embryonic sessions during their conversion to regular sessions.
Since they have no stream interface at the beginning, the pointer to
the connection is temporarily stored into s->target. Then during their
conversion, the first stream interface is properly initialized and the
connection is attached to it, then s->target is set to NULL.
The problem is that if anything fails in session_complete(), the
session is left in this intermediate state where s->target is NULL,
and kill_mini_session() is called afterwards to perform the cleanup.
It needs the connection, that it finds in s->target which is NULL,
dereferences it and dies. The only reasons for dying here are a problem
on the TCP connection when doing the setsockopt(TCP_NODELAY) or a
memory allocation issue.
This patch implements a solution consisting in restoring s->target in
session_complete() on the error path. That way embryonic sessions that
were valid before calling it are still valid after.
The bug was introduced in 1.5-dev20 by commit f8a49ea ("MEDIUM: session:
attach incoming connection to target on embryonic sessions"). No backport
is needed.
Special thanks to John for his numerous tests and traces.
2014-05-08 15:06:11 -04:00
|
|
|
/* and restore the connection pointer in case we destroyed it,
|
|
|
|
|
* because kill_mini_session() will need it.
|
|
|
|
|
*/
|
2014-11-25 11:10:33 -05:00
|
|
|
LIST_DEL(&s->list);
|
BUG/MAJOR: session: recover the correct connection pointer in half-initialized sessions
John-Paul Bader reported a nasty segv which happens after a few hours
when SSL is enabled under a high load. Fortunately he could catch a
stack trace, systematically looking like this one :
(gdb) bt full
level = 6
conn = (struct connection *) 0x0
err_msg = <value optimized out>
s = (struct session *) 0x80337f800
conn = <value optimized out>
flags = 41997063
new_updt = <value optimized out>
old_updt = 1
e = <value optimized out>
status = 0
fd = 53999616
nbfd = 279
wait_time = <value optimized out>
updt_idx = <value optimized out>
en = <value optimized out>
eo = <value optimized out>
count = 78
sr = <value optimized out>
sw = <value optimized out>
rn = <value optimized out>
wn = <value optimized out>
The variable "flags" in conn_fd_handler() holds a copy of connection->flags
when entering the function. These flags indicate 41997063 = 0x0280d307 :
- {SOCK,DATA,CURR}_RD_ENA=1 => it's a handshake, waiting for reading
- {SOCK,DATA,CURR}_WR_ENA=0 => no need for writing
- CTRL_READY=1 => FD is still allocated
- XPRT_READY=1 => transport layer is initialized
- ADDR_FROM_SET=1, ADDR_TO_SET=0 => clearly it's a frontend connection
- INIT_DATA=1, WAKE_DATA=1 => processing a handshake (ssl I guess)
- {DATA,SOCK}_{RD,WR}_SH=0 => no shutdown
- ERROR=0, CONNECTED=0 => handshake not completed yet
- WAIT_L4_CONN=0 => normal
- WAIT_L6_CONN=1 => waiting for an L6 handshake to complete
- SSL_WAIT_HS=1 => the pending handshake is an SSL handshake
So this is a handshake is in progress. And the only way to reach line 88
is for the handshake to complete without error. So we know for sure that
ssl_sock_handshake() was called and completed the handshake then removed
the CO_FL_SSL_WAIT_HS flag from the connection. With these flags,
ssl_sock_handshake() does only call SSL_do_handshake() and retruns. So
that means that the problem is necessarily in data->init().
The fd is wrong as reported but is simply mis-decoded as it's the lower
half of the last function pointer.
What happens in practice is that there's an issue with the way we deal
with embryonic sessions during their conversion to regular sessions.
Since they have no stream interface at the beginning, the pointer to
the connection is temporarily stored into s->target. Then during their
conversion, the first stream interface is properly initialized and the
connection is attached to it, then s->target is set to NULL.
The problem is that if anything fails in session_complete(), the
session is left in this intermediate state where s->target is NULL,
and kill_mini_session() is called afterwards to perform the cleanup.
It needs the connection, that it finds in s->target which is NULL,
dereferences it and dies. The only reasons for dying here are a problem
on the TCP connection when doing the setsockopt(TCP_NODELAY) or a
memory allocation issue.
This patch implements a solution consisting in restoring s->target in
session_complete() on the error path. That way embryonic sessions that
were valid before calling it are still valid after.
The bug was introduced in 1.5-dev20 by commit f8a49ea ("MEDIUM: session:
attach incoming connection to target on embryonic sessions"). No backport
is needed.
Special thanks to John for his numerous tests and traces.
2014-05-08 15:06:11 -04:00
|
|
|
s->target = &conn->obj_type;
|
2010-11-11 04:56:04 -05:00
|
|
|
return ret;
|
2010-06-01 11:45:26 -04:00
|
|
|
}
|
|
|
|
|
|
2006-06-25 20:48:02 -04:00
|
|
|
/*
|
|
|
|
|
* frees the context associated to a session. It must have been removed first.
|
|
|
|
|
*/
|
2011-06-07 20:19:07 -04:00
|
|
|
static void session_free(struct session *s)
|
2006-06-25 20:48:02 -04:00
|
|
|
{
|
2007-03-03 10:23:22 -05:00
|
|
|
struct http_txn *txn = &s->txn;
|
2007-07-11 04:42:35 -04:00
|
|
|
struct proxy *fe = s->fe;
|
2008-12-07 14:16:23 -05:00
|
|
|
struct bref *bref, *back;
|
2013-10-01 04:45:07 -04:00
|
|
|
struct connection *cli_conn = objt_conn(s->si[0].end);
|
2010-06-06 12:28:49 -04:00
|
|
|
int i;
|
2007-01-07 09:46:13 -05:00
|
|
|
|
2006-06-25 20:48:02 -04:00
|
|
|
if (s->pend_pos)
|
|
|
|
|
pendconn_free(s->pend_pos);
|
2008-12-04 03:33:58 -05:00
|
|
|
|
2012-11-11 18:42:33 -05:00
|
|
|
if (objt_server(s->target)) { /* there may be requests left pending in queue */
|
2008-11-11 14:20:02 -05:00
|
|
|
if (s->flags & SN_CURR_SESS) {
|
|
|
|
|
s->flags &= ~SN_CURR_SESS;
|
2012-11-11 18:42:33 -05:00
|
|
|
objt_server(s->target)->cur_sess--;
|
2008-11-11 14:20:02 -05:00
|
|
|
}
|
2012-11-11 18:42:33 -05:00
|
|
|
if (may_dequeue_tasks(objt_server(s->target), s->be))
|
|
|
|
|
process_srv_queue(objt_server(s->target));
|
2008-11-11 14:20:02 -05:00
|
|
|
}
|
2008-12-04 03:33:58 -05:00
|
|
|
|
[BUG] fix the dequeuing logic to ensure that all requests get served
The dequeuing logic was completely wrong. First, a task was assigned
to all servers to process the queue, but this task was never scheduled
and was only woken up on session free. Second, there was no reservation
of server entries when a task was assigned a server. This means that
as long as the task was not connected to the server, its presence was
not accounted for. This was causing trouble when detecting whether or
not a server had reached maxconn. Third, during a redispatch, a session
could lose its place at the server's and get blocked because another
session at the same moment would have stolen the entry. Fourth, the
redispatch option did not work when maxqueue was reached for a server,
and it was not possible to do so without indefinitely hanging a session.
The root cause of all those problems was the lack of pre-reservation of
connections at the server's, and the lack of tracking of servers during
a redispatch. Everything relied on combinations of flags which could
appear similarly in quite distinct situations.
This patch is a major rework but there was no other solution, as the
internal logic was deeply flawed. The resulting code is cleaner, more
understandable, uses less magics and is overall more robust.
As an added bonus, "option redispatch" now works when maxqueue has
been reached on a server.
2008-06-20 09:04:11 -04:00
|
|
|
if (unlikely(s->srv_conn)) {
|
|
|
|
|
/* the session still has a reserved slot on a server, but
|
|
|
|
|
* it should normally be only the same as the one above,
|
|
|
|
|
* so this should not happen in fact.
|
|
|
|
|
*/
|
|
|
|
|
sess_change_server(s, NULL);
|
|
|
|
|
}
|
|
|
|
|
|
2009-01-25 07:56:13 -05:00
|
|
|
if (s->req->pipe)
|
|
|
|
|
put_pipe(s->req->pipe);
|
2009-01-18 15:56:21 -05:00
|
|
|
|
2009-01-25 07:56:13 -05:00
|
|
|
if (s->rep->pipe)
|
|
|
|
|
put_pipe(s->rep->pipe);
|
2009-01-18 15:56:21 -05:00
|
|
|
|
MAJOR: session: implement a wait-queue for sessions who need a buffer
When a session_alloc_buffers() fails to allocate one or two buffers,
it subscribes the session to buffer_wq, and waits for another session
to release buffers. It's then removed from the queue and woken up with
TASK_WAKE_RES, and can attempt its allocation again.
We decide to try to wake as many waiters as we release buffers so
that if we release 2 and two waiters need only once, they both have
their chance. We must never come to the situation where we don't wake
enough tasks up.
It's common to release buffers after the completion of an I/O callback,
which can happen even if the I/O could not be performed due to half a
failure on memory allocation. In this situation, we don't want to move
out of the wait queue the session that was just added, otherwise it
will never get any buffer. Thus, we only force ourselves out of the
queue when freeing the session.
Note: at the moment, since session_alloc_buffers() is not used, no task
is subscribed to the wait queue.
2014-11-25 15:10:35 -05:00
|
|
|
/* We may still be present in the buffer wait queue */
|
|
|
|
|
if (!LIST_ISEMPTY(&s->buffer_wait)) {
|
|
|
|
|
LIST_DEL(&s->buffer_wait);
|
|
|
|
|
LIST_INIT(&s->buffer_wait);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
b_drop(&s->req->buf);
|
|
|
|
|
b_drop(&s->rep->buf);
|
|
|
|
|
if (!LIST_ISEMPTY(&buffer_wq))
|
|
|
|
|
session_offer_buffers(1);
|
2012-10-12 17:49:43 -04:00
|
|
|
|
2012-08-27 18:06:31 -04:00
|
|
|
pool_free2(pool2_channel, s->req);
|
|
|
|
|
pool_free2(pool2_channel, s->rep);
|
2006-06-25 20:48:02 -04:00
|
|
|
|
2010-01-07 16:51:47 -05:00
|
|
|
http_end_txn(s);
|
|
|
|
|
|
2012-10-12 11:50:05 -04:00
|
|
|
/* ensure the client-side transport layer is destroyed */
|
MAJOR: connection: add two new flags to indicate readiness of control/transport
Currently the control and transport layers of a connection are supposed
to be initialized when their respective pointers are not NULL. This will
not work anymore when we plan to reuse connections, because there is an
asymmetry between the accept() side and the connect() side :
- on accept() side, the fd is set first, then the ctrl layer then the
transport layer ; upon error, they must be undone in the reverse order,
then the FD must be closed. The FD must not be deleted if the control
layer was not yet initialized ;
- on the connect() side, the fd is set last and there is no reliable way
to know if it has been initialized or not. In practice it's initialized
to -1 first but this is hackish and supposes that local FDs only will
be used forever. Also, there are even less solutions for keeping trace
of the transport layer's state.
Also it is possible to support delayed close() when something (eg: logs)
tracks some information requiring the transport and/or control layers,
making it even more difficult to clean them.
So the proposed solution is to add two flags to the connection :
- CO_FL_CTRL_READY is set when the control layer is initialized (fd_insert)
and cleared after it's released (fd_delete).
- CO_FL_XPRT_READY is set when the control layer is initialized (xprt->init)
and cleared after it's released (xprt->close).
The functions have been adapted to rely on this and not on the pointers
anymore. conn_xprt_close() was unused and dangerous : it did not close
the control layer (eg: the socket itself) but still marks the transport
layer as closed, preventing any future call to conn_full_close() from
finishing the job.
The problem comes from conn_full_close() in fact. It needs to close the
xprt and ctrl layers independantly. After that we're still having an issue :
we don't know based on ->ctrl alone whether the fd was registered or not.
For this we use the two new flags CO_FL_XPRT_READY and CO_FL_CTRL_READY. We
now rely on this and not on conn->xprt nor conn->ctrl anymore to decide what
remains to be done on the connection.
In order not to miss some flag assignments, we introduce conn_ctrl_init()
to initialize the control layer, register the fd using fd_insert() and set
the flag, and conn_ctrl_close() which unregisters the fd and removes the
flag, but only if the transport layer was closed.
Similarly, at the transport layer, conn_xprt_init() calls ->init and sets
the flag, while conn_xprt_close() checks the flag, calls ->close and clears
the flag, regardless xprt_ctx or xprt_st. This also ensures that the ->init
and the ->close functions are called only once each and in the correct order.
Note that conn_xprt_close() does nothing if the transport layer is still
tracked.
conn_full_close() now simply calls conn_xprt_close() then conn_full_close()
in turn, which do nothing if CO_FL_XPRT_TRACKED is set.
In order to handle the error path, we also provide conn_force_close() which
ignores CO_FL_XPRT_TRACKED and closes the transport and the control layers
in turns. All relevant instances of fd_delete() have been replaced with
conn_force_close(). Now we always know what state the connection is in and
we can expect to split its initialization.
2013-10-21 10:30:56 -04:00
|
|
|
if (cli_conn)
|
|
|
|
|
conn_force_close(cli_conn);
|
2012-10-12 11:50:05 -04:00
|
|
|
|
2010-06-06 12:28:49 -04:00
|
|
|
for (i = 0; i < s->store_count; i++) {
|
|
|
|
|
if (!s->store[i].ts)
|
|
|
|
|
continue;
|
|
|
|
|
stksess_free(s->store[i].table, s->store[i].ts);
|
|
|
|
|
s->store[i].ts = NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2011-10-24 12:15:04 -04:00
|
|
|
pool_free2(pool2_hdr_idx, txn->hdr_idx.v);
|
2007-10-16 11:34:28 -04:00
|
|
|
if (fe) {
|
2010-01-07 16:51:47 -05:00
|
|
|
pool_free2(fe->rsp_cap_pool, txn->rsp.cap);
|
|
|
|
|
pool_free2(fe->req_cap_pool, txn->req.cap);
|
2006-06-25 20:48:02 -04:00
|
|
|
}
|
2009-12-22 09:03:09 -05:00
|
|
|
|
2013-07-01 12:07:03 -04:00
|
|
|
session_store_counters(s);
|
2010-06-14 15:04:55 -04:00
|
|
|
|
2008-12-07 14:16:23 -05:00
|
|
|
list_for_each_entry_safe(bref, back, &s->back_refs, users) {
|
2009-02-22 09:17:24 -05:00
|
|
|
/* we have to unlink all watchers. We must not relink them if
|
|
|
|
|
* this session was the last one in the list.
|
|
|
|
|
*/
|
2008-12-07 14:16:23 -05:00
|
|
|
LIST_DEL(&bref->users);
|
2009-02-22 09:17:24 -05:00
|
|
|
LIST_INIT(&bref->users);
|
|
|
|
|
if (s->list.n != &sessions)
|
|
|
|
|
LIST_ADDQ(&LIST_ELEM(s->list.n, struct session *, list)->back_refs, &bref->users);
|
2008-12-07 14:16:23 -05:00
|
|
|
bref->ref = s->list.n;
|
|
|
|
|
}
|
2008-11-23 13:53:55 -05:00
|
|
|
LIST_DEL(&s->list);
|
2013-10-11 13:34:20 -04:00
|
|
|
si_release_endpoint(&s->si[1]);
|
|
|
|
|
si_release_endpoint(&s->si[0]);
|
2007-05-13 13:43:47 -04:00
|
|
|
pool_free2(pool2_session, s);
|
2007-07-11 04:42:35 -04:00
|
|
|
|
|
|
|
|
/* We may want to free the maximum amount of pools if the proxy is stopping */
|
2007-10-16 11:34:28 -04:00
|
|
|
if (fe && unlikely(fe->state == PR_STSTOPPED)) {
|
2012-10-12 17:49:43 -04:00
|
|
|
pool_flush2(pool2_buffer);
|
2012-08-27 18:06:31 -04:00
|
|
|
pool_flush2(pool2_channel);
|
2011-10-24 12:15:04 -04:00
|
|
|
pool_flush2(pool2_hdr_idx);
|
2008-08-03 11:41:33 -04:00
|
|
|
pool_flush2(pool2_requri);
|
|
|
|
|
pool_flush2(pool2_capture);
|
|
|
|
|
pool_flush2(pool2_session);
|
2014-11-13 10:46:28 -05:00
|
|
|
pool_flush2(pool2_connection);
|
|
|
|
|
pool_flush2(pool2_pendconn);
|
2008-08-03 11:41:33 -04:00
|
|
|
pool_flush2(fe->req_cap_pool);
|
|
|
|
|
pool_flush2(fe->rsp_cap_pool);
|
2007-07-11 04:42:35 -04:00
|
|
|
}
|
2007-05-13 13:43:47 -04:00
|
|
|
}
|
|
|
|
|
|
2014-11-25 13:46:36 -05:00
|
|
|
/* Allocates a single buffer for session <s>, but only if it's guaranteed that
|
|
|
|
|
* it's not the last available buffer. To be called at the beginning of recv()
|
|
|
|
|
* callbacks to ensure that the required buffers are properly allocated.
|
|
|
|
|
* Returns 0 in case of failure, non-zero otherwise.
|
|
|
|
|
*/
|
|
|
|
|
int session_alloc_recv_buffer(struct session *s, struct buffer **buf)
|
|
|
|
|
{
|
|
|
|
|
struct buffer *b;
|
|
|
|
|
|
|
|
|
|
b = b_alloc_margin(buf, 2);
|
|
|
|
|
if (b)
|
|
|
|
|
return 1;
|
|
|
|
|
|
MAJOR: session: implement a wait-queue for sessions who need a buffer
When a session_alloc_buffers() fails to allocate one or two buffers,
it subscribes the session to buffer_wq, and waits for another session
to release buffers. It's then removed from the queue and woken up with
TASK_WAKE_RES, and can attempt its allocation again.
We decide to try to wake as many waiters as we release buffers so
that if we release 2 and two waiters need only once, they both have
their chance. We must never come to the situation where we don't wake
enough tasks up.
It's common to release buffers after the completion of an I/O callback,
which can happen even if the I/O could not be performed due to half a
failure on memory allocation. In this situation, we don't want to move
out of the wait queue the session that was just added, otherwise it
will never get any buffer. Thus, we only force ourselves out of the
queue when freeing the session.
Note: at the moment, since session_alloc_buffers() is not used, no task
is subscribed to the wait queue.
2014-11-25 15:10:35 -05:00
|
|
|
if (LIST_ISEMPTY(&s->buffer_wait))
|
|
|
|
|
LIST_ADDQ(&buffer_wq, &s->buffer_wait);
|
2014-11-25 13:46:36 -05:00
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Allocates up to two buffers for session <s>. Only succeeds if both buffers
|
|
|
|
|
* are properly allocated. It is meant to be called inside process_session() so
|
MAJOR: session: implement a wait-queue for sessions who need a buffer
When a session_alloc_buffers() fails to allocate one or two buffers,
it subscribes the session to buffer_wq, and waits for another session
to release buffers. It's then removed from the queue and woken up with
TASK_WAKE_RES, and can attempt its allocation again.
We decide to try to wake as many waiters as we release buffers so
that if we release 2 and two waiters need only once, they both have
their chance. We must never come to the situation where we don't wake
enough tasks up.
It's common to release buffers after the completion of an I/O callback,
which can happen even if the I/O could not be performed due to half a
failure on memory allocation. In this situation, we don't want to move
out of the wait queue the session that was just added, otherwise it
will never get any buffer. Thus, we only force ourselves out of the
queue when freeing the session.
Note: at the moment, since session_alloc_buffers() is not used, no task
is subscribed to the wait queue.
2014-11-25 15:10:35 -05:00
|
|
|
* that both request and response buffers are allocated. Returns 0 in case of
|
2014-11-25 13:46:36 -05:00
|
|
|
* failure, non-zero otherwise.
|
|
|
|
|
*/
|
|
|
|
|
int session_alloc_buffers(struct session *s)
|
|
|
|
|
{
|
MAJOR: session: implement a wait-queue for sessions who need a buffer
When a session_alloc_buffers() fails to allocate one or two buffers,
it subscribes the session to buffer_wq, and waits for another session
to release buffers. It's then removed from the queue and woken up with
TASK_WAKE_RES, and can attempt its allocation again.
We decide to try to wake as many waiters as we release buffers so
that if we release 2 and two waiters need only once, they both have
their chance. We must never come to the situation where we don't wake
enough tasks up.
It's common to release buffers after the completion of an I/O callback,
which can happen even if the I/O could not be performed due to half a
failure on memory allocation. In this situation, we don't want to move
out of the wait queue the session that was just added, otherwise it
will never get any buffer. Thus, we only force ourselves out of the
queue when freeing the session.
Note: at the moment, since session_alloc_buffers() is not used, no task
is subscribed to the wait queue.
2014-11-25 15:10:35 -05:00
|
|
|
if (!LIST_ISEMPTY(&s->buffer_wait)) {
|
|
|
|
|
LIST_DEL(&s->buffer_wait);
|
|
|
|
|
LIST_INIT(&s->buffer_wait);
|
|
|
|
|
}
|
2014-11-25 13:46:36 -05:00
|
|
|
|
MAJOR: session: implement a wait-queue for sessions who need a buffer
When a session_alloc_buffers() fails to allocate one or two buffers,
it subscribes the session to buffer_wq, and waits for another session
to release buffers. It's then removed from the queue and woken up with
TASK_WAKE_RES, and can attempt its allocation again.
We decide to try to wake as many waiters as we release buffers so
that if we release 2 and two waiters need only once, they both have
their chance. We must never come to the situation where we don't wake
enough tasks up.
It's common to release buffers after the completion of an I/O callback,
which can happen even if the I/O could not be performed due to half a
failure on memory allocation. In this situation, we don't want to move
out of the wait queue the session that was just added, otherwise it
will never get any buffer. Thus, we only force ourselves out of the
queue when freeing the session.
Note: at the moment, since session_alloc_buffers() is not used, no task
is subscribed to the wait queue.
2014-11-25 15:10:35 -05:00
|
|
|
if ((s->req->buf->size || b_alloc(&s->req->buf)) &&
|
|
|
|
|
(s->rep->buf->size || b_alloc(&s->rep->buf)))
|
2014-11-25 13:46:36 -05:00
|
|
|
return 1;
|
|
|
|
|
|
MAJOR: session: implement a wait-queue for sessions who need a buffer
When a session_alloc_buffers() fails to allocate one or two buffers,
it subscribes the session to buffer_wq, and waits for another session
to release buffers. It's then removed from the queue and woken up with
TASK_WAKE_RES, and can attempt its allocation again.
We decide to try to wake as many waiters as we release buffers so
that if we release 2 and two waiters need only once, they both have
their chance. We must never come to the situation where we don't wake
enough tasks up.
It's common to release buffers after the completion of an I/O callback,
which can happen even if the I/O could not be performed due to half a
failure on memory allocation. In this situation, we don't want to move
out of the wait queue the session that was just added, otherwise it
will never get any buffer. Thus, we only force ourselves out of the
queue when freeing the session.
Note: at the moment, since session_alloc_buffers() is not used, no task
is subscribed to the wait queue.
2014-11-25 15:10:35 -05:00
|
|
|
session_release_buffers(s);
|
|
|
|
|
LIST_ADDQ(&buffer_wq, &s->buffer_wait);
|
2014-11-25 13:46:36 -05:00
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* releases unused buffers after processing. Typically used at the end of the
|
MAJOR: session: implement a wait-queue for sessions who need a buffer
When a session_alloc_buffers() fails to allocate one or two buffers,
it subscribes the session to buffer_wq, and waits for another session
to release buffers. It's then removed from the queue and woken up with
TASK_WAKE_RES, and can attempt its allocation again.
We decide to try to wake as many waiters as we release buffers so
that if we release 2 and two waiters need only once, they both have
their chance. We must never come to the situation where we don't wake
enough tasks up.
It's common to release buffers after the completion of an I/O callback,
which can happen even if the I/O could not be performed due to half a
failure on memory allocation. In this situation, we don't want to move
out of the wait queue the session that was just added, otherwise it
will never get any buffer. Thus, we only force ourselves out of the
queue when freeing the session.
Note: at the moment, since session_alloc_buffers() is not used, no task
is subscribed to the wait queue.
2014-11-25 15:10:35 -05:00
|
|
|
* update() functions. It will try to wake up as many tasks as the number of
|
|
|
|
|
* buffers that it releases. In practice, most often sessions are blocked on
|
|
|
|
|
* a single buffer, so it makes sense to try to wake two up when two buffers
|
|
|
|
|
* are released at once.
|
2014-11-25 13:46:36 -05:00
|
|
|
*/
|
|
|
|
|
void session_release_buffers(struct session *s)
|
|
|
|
|
{
|
MAJOR: session: implement a wait-queue for sessions who need a buffer
When a session_alloc_buffers() fails to allocate one or two buffers,
it subscribes the session to buffer_wq, and waits for another session
to release buffers. It's then removed from the queue and woken up with
TASK_WAKE_RES, and can attempt its allocation again.
We decide to try to wake as many waiters as we release buffers so
that if we release 2 and two waiters need only once, they both have
their chance. We must never come to the situation where we don't wake
enough tasks up.
It's common to release buffers after the completion of an I/O callback,
which can happen even if the I/O could not be performed due to half a
failure on memory allocation. In this situation, we don't want to move
out of the wait queue the session that was just added, otherwise it
will never get any buffer. Thus, we only force ourselves out of the
queue when freeing the session.
Note: at the moment, since session_alloc_buffers() is not used, no task
is subscribed to the wait queue.
2014-11-25 15:10:35 -05:00
|
|
|
int release_count = 0;
|
|
|
|
|
|
|
|
|
|
release_count = !!s->req->buf->size + !!s->rep->buf->size;
|
|
|
|
|
|
2014-11-25 13:46:36 -05:00
|
|
|
if (s->req->buf->size && buffer_empty(s->req->buf))
|
|
|
|
|
b_free(&s->req->buf);
|
|
|
|
|
|
|
|
|
|
if (s->rep->buf->size && buffer_empty(s->rep->buf))
|
|
|
|
|
b_free(&s->rep->buf);
|
|
|
|
|
|
MAJOR: session: implement a wait-queue for sessions who need a buffer
When a session_alloc_buffers() fails to allocate one or two buffers,
it subscribes the session to buffer_wq, and waits for another session
to release buffers. It's then removed from the queue and woken up with
TASK_WAKE_RES, and can attempt its allocation again.
We decide to try to wake as many waiters as we release buffers so
that if we release 2 and two waiters need only once, they both have
their chance. We must never come to the situation where we don't wake
enough tasks up.
It's common to release buffers after the completion of an I/O callback,
which can happen even if the I/O could not be performed due to half a
failure on memory allocation. In this situation, we don't want to move
out of the wait queue the session that was just added, otherwise it
will never get any buffer. Thus, we only force ourselves out of the
queue when freeing the session.
Note: at the moment, since session_alloc_buffers() is not used, no task
is subscribed to the wait queue.
2014-11-25 15:10:35 -05:00
|
|
|
/* if we're certain to have at least 1 buffer available, and there is
|
|
|
|
|
* someone waiting, we can wake up a waiter and offer them.
|
|
|
|
|
*/
|
|
|
|
|
if (release_count >= 1 && !LIST_ISEMPTY(&buffer_wq))
|
|
|
|
|
session_offer_buffers(release_count);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* run across the list of pending sessions waiting for a buffer and wake
|
|
|
|
|
* one up if buffers are available.
|
|
|
|
|
*/
|
|
|
|
|
void session_offer_buffers(int count)
|
|
|
|
|
{
|
|
|
|
|
struct session *sess, *bak;
|
|
|
|
|
|
|
|
|
|
list_for_each_entry_safe(sess, bak, &buffer_wq, buffer_wait) {
|
|
|
|
|
if (sess->task->state & TASK_RUNNING)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
LIST_DEL(&sess->buffer_wait);
|
|
|
|
|
LIST_INIT(&sess->buffer_wait);
|
|
|
|
|
|
|
|
|
|
task_wakeup(sess->task, TASK_WOKEN_RES);
|
|
|
|
|
if (--count <= 0)
|
|
|
|
|
break;
|
|
|
|
|
}
|
2014-11-25 13:46:36 -05:00
|
|
|
}
|
2007-05-13 13:43:47 -04:00
|
|
|
|
|
|
|
|
/* perform minimal intializations, report 0 in case of error, 1 if OK. */
|
|
|
|
|
int init_session()
|
|
|
|
|
{
|
2008-11-23 13:53:55 -05:00
|
|
|
LIST_INIT(&sessions);
|
2007-05-13 13:43:47 -04:00
|
|
|
pool2_session = create_pool("session", sizeof(struct session), MEM_F_SHARED);
|
|
|
|
|
return pool2_session != NULL;
|
2006-06-25 20:48:02 -04:00
|
|
|
}
|
|
|
|
|
|
2007-11-26 14:15:35 -05:00
|
|
|
void session_process_counters(struct session *s)
|
|
|
|
|
{
|
2007-11-24 16:12:47 -05:00
|
|
|
unsigned long long bytes;
|
2012-12-09 09:55:40 -05:00
|
|
|
void *ptr;
|
|
|
|
|
int i;
|
2007-11-24 16:12:47 -05:00
|
|
|
|
2007-11-26 14:15:35 -05:00
|
|
|
if (s->req) {
|
2007-11-24 16:12:47 -05:00
|
|
|
bytes = s->req->total - s->logs.bytes_in;
|
2007-11-26 14:15:35 -05:00
|
|
|
s->logs.bytes_in = s->req->total;
|
|
|
|
|
if (bytes) {
|
2012-12-09 09:55:40 -05:00
|
|
|
s->fe->fe_counters.bytes_in += bytes;
|
2007-11-24 16:12:47 -05:00
|
|
|
|
2012-12-09 09:55:40 -05:00
|
|
|
s->be->be_counters.bytes_in += bytes;
|
2007-11-24 16:12:47 -05:00
|
|
|
|
2012-11-11 18:42:33 -05:00
|
|
|
if (objt_server(s->target))
|
2012-12-09 09:55:40 -05:00
|
|
|
objt_server(s->target)->counters.bytes_in += bytes;
|
2009-10-04 09:43:17 -04:00
|
|
|
|
2014-03-20 10:42:53 -04:00
|
|
|
if (s->listener && s->listener->counters)
|
2012-12-09 09:55:40 -05:00
|
|
|
s->listener->counters->bytes_in += bytes;
|
2010-06-18 12:33:32 -04:00
|
|
|
|
2013-07-23 13:15:30 -04:00
|
|
|
for (i = 0; i < MAX_SESS_STKCTR; i++) {
|
2014-01-28 17:18:23 -05:00
|
|
|
if (!stkctr_entry(&s->stkctr[i]))
|
2012-12-09 09:55:40 -05:00
|
|
|
continue;
|
2010-06-20 05:56:30 -04:00
|
|
|
|
2012-12-09 09:55:40 -05:00
|
|
|
ptr = stktable_data_ptr(s->stkctr[i].table,
|
2014-01-28 17:18:23 -05:00
|
|
|
stkctr_entry(&s->stkctr[i]),
|
2012-12-09 09:55:40 -05:00
|
|
|
STKTABLE_DT_BYTES_IN_CNT);
|
2010-06-18 12:33:32 -04:00
|
|
|
if (ptr)
|
|
|
|
|
stktable_data_cast(ptr, bytes_in_cnt) += bytes;
|
2010-06-20 05:56:30 -04:00
|
|
|
|
2012-12-09 09:55:40 -05:00
|
|
|
ptr = stktable_data_ptr(s->stkctr[i].table,
|
2014-01-28 17:18:23 -05:00
|
|
|
stkctr_entry(&s->stkctr[i]),
|
2012-12-09 09:55:40 -05:00
|
|
|
STKTABLE_DT_BYTES_IN_RATE);
|
2010-06-20 05:56:30 -04:00
|
|
|
if (ptr)
|
|
|
|
|
update_freq_ctr_period(&stktable_data_cast(ptr, bytes_in_rate),
|
2012-12-09 09:55:40 -05:00
|
|
|
s->stkctr[i].table->data_arg[STKTABLE_DT_BYTES_IN_RATE].u, bytes);
|
2010-06-18 12:33:32 -04:00
|
|
|
}
|
2007-11-26 14:15:35 -05:00
|
|
|
}
|
2007-11-24 16:12:47 -05:00
|
|
|
}
|
|
|
|
|
|
2007-11-26 14:15:35 -05:00
|
|
|
if (s->rep) {
|
2007-11-24 16:12:47 -05:00
|
|
|
bytes = s->rep->total - s->logs.bytes_out;
|
2007-11-26 14:15:35 -05:00
|
|
|
s->logs.bytes_out = s->rep->total;
|
|
|
|
|
if (bytes) {
|
2012-12-09 09:55:40 -05:00
|
|
|
s->fe->fe_counters.bytes_out += bytes;
|
2007-11-24 16:12:47 -05:00
|
|
|
|
2012-12-09 09:55:40 -05:00
|
|
|
s->be->be_counters.bytes_out += bytes;
|
2007-11-24 16:12:47 -05:00
|
|
|
|
2012-11-11 18:42:33 -05:00
|
|
|
if (objt_server(s->target))
|
2012-12-09 09:55:40 -05:00
|
|
|
objt_server(s->target)->counters.bytes_out += bytes;
|
2009-10-04 09:43:17 -04:00
|
|
|
|
2014-03-20 10:42:53 -04:00
|
|
|
if (s->listener && s->listener->counters)
|
2012-12-09 09:55:40 -05:00
|
|
|
s->listener->counters->bytes_out += bytes;
|
2010-08-03 10:29:52 -04:00
|
|
|
|
2013-07-23 13:15:30 -04:00
|
|
|
for (i = 0; i < MAX_SESS_STKCTR; i++) {
|
2014-01-28 17:18:23 -05:00
|
|
|
if (!stkctr_entry(&s->stkctr[i]))
|
2012-12-09 09:55:40 -05:00
|
|
|
continue;
|
2010-06-20 05:56:30 -04:00
|
|
|
|
2012-12-09 09:55:40 -05:00
|
|
|
ptr = stktable_data_ptr(s->stkctr[i].table,
|
2014-01-28 17:18:23 -05:00
|
|
|
stkctr_entry(&s->stkctr[i]),
|
2012-12-09 09:55:40 -05:00
|
|
|
STKTABLE_DT_BYTES_OUT_CNT);
|
2010-06-18 12:33:32 -04:00
|
|
|
if (ptr)
|
|
|
|
|
stktable_data_cast(ptr, bytes_out_cnt) += bytes;
|
2010-06-20 05:56:30 -04:00
|
|
|
|
2012-12-09 09:55:40 -05:00
|
|
|
ptr = stktable_data_ptr(s->stkctr[i].table,
|
2014-01-28 17:18:23 -05:00
|
|
|
stkctr_entry(&s->stkctr[i]),
|
2012-12-09 09:55:40 -05:00
|
|
|
STKTABLE_DT_BYTES_OUT_RATE);
|
2010-06-20 05:56:30 -04:00
|
|
|
if (ptr)
|
|
|
|
|
update_freq_ctr_period(&stktable_data_cast(ptr, bytes_out_rate),
|
2012-12-09 09:55:40 -05:00
|
|
|
s->stkctr[i].table->data_arg[STKTABLE_DT_BYTES_OUT_RATE].u, bytes);
|
2010-06-18 12:33:32 -04:00
|
|
|
}
|
2007-11-26 14:15:35 -05:00
|
|
|
}
|
2007-11-24 16:12:47 -05:00
|
|
|
}
|
|
|
|
|
}
|
2006-06-25 20:48:02 -04:00
|
|
|
|
2008-11-30 12:47:21 -05:00
|
|
|
/* This function is called with (si->state == SI_ST_CON) meaning that a
|
|
|
|
|
* connection was attempted and that the file descriptor is already allocated.
|
|
|
|
|
* We must check for establishment, error and abort. Possible output states
|
|
|
|
|
* are SI_ST_EST (established), SI_ST_CER (error), SI_ST_DIS (abort), and
|
|
|
|
|
* SI_ST_CON (no change). The function returns 0 if it switches to SI_ST_CER,
|
2013-10-01 04:45:07 -04:00
|
|
|
* otherwise 1. This only works with connection-based sessions.
|
2008-11-30 12:47:21 -05:00
|
|
|
*/
|
2011-06-07 20:19:07 -04:00
|
|
|
static int sess_update_st_con_tcp(struct session *s, struct stream_interface *si)
|
2008-11-30 12:47:21 -05:00
|
|
|
{
|
2012-07-02 09:11:27 -04:00
|
|
|
struct channel *req = si->ob;
|
|
|
|
|
struct channel *rep = si->ib;
|
2013-10-01 04:45:07 -04:00
|
|
|
struct connection *srv_conn = __objt_conn(si->end);
|
2008-11-30 12:47:21 -05:00
|
|
|
|
|
|
|
|
/* If we got an error, or if nothing happened and the connection timed
|
|
|
|
|
* out, we must give up. The CER state handler will take care of retry
|
|
|
|
|
* attempts and error reports.
|
|
|
|
|
*/
|
|
|
|
|
if (unlikely(si->flags & (SI_FL_EXP|SI_FL_ERR))) {
|
2012-10-29 17:41:31 -04:00
|
|
|
if (unlikely(si->ob->flags & CF_WRITE_PARTIAL)) {
|
|
|
|
|
/* Some data were sent past the connection establishment,
|
|
|
|
|
* so we need to pretend we're established to log correctly
|
|
|
|
|
* and let later states handle the failure.
|
|
|
|
|
*/
|
|
|
|
|
si->state = SI_ST_EST;
|
|
|
|
|
si->err_type = SI_ET_DATA_ERR;
|
|
|
|
|
si->ib->flags |= CF_READ_ERROR | CF_WRITE_ERROR;
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
2009-03-28 05:47:26 -04:00
|
|
|
si->exp = TICK_ETERNITY;
|
2008-11-30 12:47:21 -05:00
|
|
|
si->state = SI_ST_CER;
|
|
|
|
|
|
MAJOR: connection: add two new flags to indicate readiness of control/transport
Currently the control and transport layers of a connection are supposed
to be initialized when their respective pointers are not NULL. This will
not work anymore when we plan to reuse connections, because there is an
asymmetry between the accept() side and the connect() side :
- on accept() side, the fd is set first, then the ctrl layer then the
transport layer ; upon error, they must be undone in the reverse order,
then the FD must be closed. The FD must not be deleted if the control
layer was not yet initialized ;
- on the connect() side, the fd is set last and there is no reliable way
to know if it has been initialized or not. In practice it's initialized
to -1 first but this is hackish and supposes that local FDs only will
be used forever. Also, there are even less solutions for keeping trace
of the transport layer's state.
Also it is possible to support delayed close() when something (eg: logs)
tracks some information requiring the transport and/or control layers,
making it even more difficult to clean them.
So the proposed solution is to add two flags to the connection :
- CO_FL_CTRL_READY is set when the control layer is initialized (fd_insert)
and cleared after it's released (fd_delete).
- CO_FL_XPRT_READY is set when the control layer is initialized (xprt->init)
and cleared after it's released (xprt->close).
The functions have been adapted to rely on this and not on the pointers
anymore. conn_xprt_close() was unused and dangerous : it did not close
the control layer (eg: the socket itself) but still marks the transport
layer as closed, preventing any future call to conn_full_close() from
finishing the job.
The problem comes from conn_full_close() in fact. It needs to close the
xprt and ctrl layers independantly. After that we're still having an issue :
we don't know based on ->ctrl alone whether the fd was registered or not.
For this we use the two new flags CO_FL_XPRT_READY and CO_FL_CTRL_READY. We
now rely on this and not on conn->xprt nor conn->ctrl anymore to decide what
remains to be done on the connection.
In order not to miss some flag assignments, we introduce conn_ctrl_init()
to initialize the control layer, register the fd using fd_insert() and set
the flag, and conn_ctrl_close() which unregisters the fd and removes the
flag, but only if the transport layer was closed.
Similarly, at the transport layer, conn_xprt_init() calls ->init and sets
the flag, while conn_xprt_close() checks the flag, calls ->close and clears
the flag, regardless xprt_ctx or xprt_st. This also ensures that the ->init
and the ->close functions are called only once each and in the correct order.
Note that conn_xprt_close() does nothing if the transport layer is still
tracked.
conn_full_close() now simply calls conn_xprt_close() then conn_full_close()
in turn, which do nothing if CO_FL_XPRT_TRACKED is set.
In order to handle the error path, we also provide conn_force_close() which
ignores CO_FL_XPRT_TRACKED and closes the transport and the control layers
in turns. All relevant instances of fd_delete() have been replaced with
conn_force_close(). Now we always know what state the connection is in and
we can expect to split its initialization.
2013-10-21 10:30:56 -04:00
|
|
|
conn_force_close(srv_conn);
|
2012-12-08 02:44:02 -05:00
|
|
|
|
2008-11-30 12:47:21 -05:00
|
|
|
if (si->err_type)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
if (si->flags & SI_FL_ERR)
|
|
|
|
|
si->err_type = SI_ET_CONN_ERR;
|
|
|
|
|
else
|
|
|
|
|
si->err_type = SI_ET_CONN_TO;
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* OK, maybe we want to abort */
|
2012-12-29 18:50:35 -05:00
|
|
|
if (!(req->flags & CF_WRITE_PARTIAL) &&
|
|
|
|
|
unlikely((rep->flags & CF_SHUTW) ||
|
2012-08-27 17:14:58 -04:00
|
|
|
((req->flags & CF_SHUTW_NOW) && /* FIXME: this should not prevent a connection from establishing */
|
|
|
|
|
((!(req->flags & CF_WRITE_ACTIVITY) && channel_is_empty(req)) ||
|
2008-11-30 12:47:21 -05:00
|
|
|
s->be->options & PR_O_ABRT_CLOSE)))) {
|
|
|
|
|
/* give up */
|
2012-05-21 10:31:45 -04:00
|
|
|
si_shutw(si);
|
2008-11-30 12:47:21 -05:00
|
|
|
si->err_type |= SI_ET_CONN_ABRT;
|
2009-03-15 17:34:05 -04:00
|
|
|
if (s->srv_error)
|
|
|
|
|
s->srv_error(s, si);
|
2008-11-30 12:47:21 -05:00
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* we need to wait a bit more if there was no activity either */
|
2012-08-27 17:14:58 -04:00
|
|
|
if (!(req->flags & CF_WRITE_ACTIVITY))
|
2008-11-30 12:47:21 -05:00
|
|
|
return 1;
|
|
|
|
|
|
|
|
|
|
/* OK, this means that a connection succeeded. The caller will be
|
|
|
|
|
* responsible for handling the transition from CON to EST.
|
|
|
|
|
*/
|
|
|
|
|
si->state = SI_ST_EST;
|
|
|
|
|
si->err_type = SI_ET_NONE;
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* This function is called with (si->state == SI_ST_CER) meaning that a
|
|
|
|
|
* previous connection attempt has failed and that the file descriptor
|
|
|
|
|
* has already been released. Possible causes include asynchronous error
|
|
|
|
|
* notification and time out. Possible output states are SI_ST_CLO when
|
|
|
|
|
* retries are exhausted, SI_ST_TAR when a delay is wanted before a new
|
|
|
|
|
* connection attempt, SI_ST_ASS when it's wise to retry on the same server,
|
|
|
|
|
* and SI_ST_REQ when an immediate redispatch is wanted. The buffers are
|
|
|
|
|
* marked as in error state. It returns 0.
|
|
|
|
|
*/
|
2011-06-07 20:19:07 -04:00
|
|
|
static int sess_update_st_cer(struct session *s, struct stream_interface *si)
|
2008-11-30 12:47:21 -05:00
|
|
|
{
|
|
|
|
|
/* we probably have to release last session from the server */
|
2012-11-11 18:42:33 -05:00
|
|
|
if (objt_server(s->target)) {
|
|
|
|
|
health_adjust(objt_server(s->target), HANA_STATUS_L4_ERR);
|
2009-12-15 16:31:24 -05:00
|
|
|
|
2008-11-30 12:47:21 -05:00
|
|
|
if (s->flags & SN_CURR_SESS) {
|
|
|
|
|
s->flags &= ~SN_CURR_SESS;
|
2012-11-11 18:42:33 -05:00
|
|
|
objt_server(s->target)->cur_sess--;
|
2008-11-30 12:47:21 -05:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* ensure that we have enough retries left */
|
2010-06-01 03:51:00 -04:00
|
|
|
si->conn_retries--;
|
|
|
|
|
if (si->conn_retries < 0) {
|
2008-11-30 12:47:21 -05:00
|
|
|
if (!si->err_type) {
|
|
|
|
|
si->err_type = SI_ET_CONN_ERR;
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-11 18:42:33 -05:00
|
|
|
if (objt_server(s->target))
|
|
|
|
|
objt_server(s->target)->counters.failed_conns++;
|
2011-03-10 17:25:56 -05:00
|
|
|
s->be->be_counters.failed_conns++;
|
2010-12-29 08:32:28 -05:00
|
|
|
sess_change_server(s, NULL);
|
2012-11-11 18:42:33 -05:00
|
|
|
if (may_dequeue_tasks(objt_server(s->target), s->be))
|
|
|
|
|
process_srv_queue(objt_server(s->target));
|
2008-11-30 12:47:21 -05:00
|
|
|
|
|
|
|
|
/* shutw is enough so stop a connecting socket */
|
2012-05-21 10:31:45 -04:00
|
|
|
si_shutw(si);
|
2012-08-27 17:14:58 -04:00
|
|
|
si->ob->flags |= CF_WRITE_ERROR;
|
|
|
|
|
si->ib->flags |= CF_READ_ERROR;
|
2008-11-30 12:47:21 -05:00
|
|
|
|
|
|
|
|
si->state = SI_ST_CLO;
|
2008-11-30 14:44:17 -05:00
|
|
|
if (s->srv_error)
|
|
|
|
|
s->srv_error(s, si);
|
2008-11-30 12:47:21 -05:00
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* If the "redispatch" option is set on the backend, we are allowed to
|
|
|
|
|
* retry on another server for the last retry. In order to achieve this,
|
|
|
|
|
* we must mark the session unassigned, and eventually clear the DIRECT
|
|
|
|
|
* bit to ignore any persistence cookie. We won't count a retry nor a
|
|
|
|
|
* redispatch yet, because this will depend on what server is selected.
|
2014-06-13 11:49:40 -04:00
|
|
|
* If the connection is not persistent, the balancing algorithm is not
|
|
|
|
|
* determinist (round robin) and there is more than one active server,
|
|
|
|
|
* we accept to perform an immediate redispatch without waiting since
|
|
|
|
|
* we don't care about this particular server.
|
2008-11-30 12:47:21 -05:00
|
|
|
*/
|
2014-06-13 11:49:40 -04:00
|
|
|
if (objt_server(s->target) &&
|
|
|
|
|
(si->conn_retries == 0 ||
|
|
|
|
|
(!(s->flags & SN_DIRECT) && s->be->srv_act > 1 &&
|
|
|
|
|
((s->be->lbprm.algo & BE_LB_KIND) == BE_LB_KIND_RR))) &&
|
2010-01-22 13:10:05 -05:00
|
|
|
s->be->options & PR_O_REDISP && !(s->flags & SN_FORCE_PRST)) {
|
2010-12-29 08:32:28 -05:00
|
|
|
sess_change_server(s, NULL);
|
2012-11-11 18:42:33 -05:00
|
|
|
if (may_dequeue_tasks(objt_server(s->target), s->be))
|
|
|
|
|
process_srv_queue(objt_server(s->target));
|
2008-11-30 12:47:21 -05:00
|
|
|
|
|
|
|
|
s->flags &= ~(SN_DIRECT | SN_ASSIGNED | SN_ADDR_SET);
|
|
|
|
|
si->state = SI_ST_REQ;
|
|
|
|
|
} else {
|
2012-11-11 18:42:33 -05:00
|
|
|
if (objt_server(s->target))
|
|
|
|
|
objt_server(s->target)->counters.retries++;
|
2011-03-10 17:25:56 -05:00
|
|
|
s->be->be_counters.retries++;
|
2008-11-30 12:47:21 -05:00
|
|
|
si->state = SI_ST_ASS;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (si->flags & SI_FL_ERR) {
|
|
|
|
|
/* The error was an asynchronous connection error, and we will
|
|
|
|
|
* likely have to retry connecting to the same server, most
|
|
|
|
|
* likely leading to the same result. To avoid this, we wait
|
2014-06-13 11:04:44 -04:00
|
|
|
* MIN(one second, connect timeout) before retrying.
|
2008-11-30 12:47:21 -05:00
|
|
|
*/
|
|
|
|
|
|
2014-06-13 11:04:44 -04:00
|
|
|
int delay = 1000;
|
|
|
|
|
|
|
|
|
|
if (s->be->timeout.connect && s->be->timeout.connect < delay)
|
|
|
|
|
delay = s->be->timeout.connect;
|
|
|
|
|
|
2008-11-30 12:47:21 -05:00
|
|
|
if (!si->err_type)
|
|
|
|
|
si->err_type = SI_ET_CONN_ERR;
|
|
|
|
|
|
2014-06-13 11:40:15 -04:00
|
|
|
/* only wait when we're retrying on the same server */
|
|
|
|
|
if (si->state == SI_ST_ASS ||
|
|
|
|
|
(s->be->lbprm.algo & BE_LB_KIND) != BE_LB_KIND_RR ||
|
|
|
|
|
(s->be->srv_act <= 1)) {
|
|
|
|
|
si->state = SI_ST_TAR;
|
|
|
|
|
si->exp = tick_add(now_ms, MS_TO_TICKS(delay));
|
|
|
|
|
}
|
2008-11-30 12:47:21 -05:00
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* This function handles the transition between the SI_ST_CON state and the
|
2010-05-31 05:57:51 -04:00
|
|
|
* SI_ST_EST state. It must only be called after switching from SI_ST_CON (or
|
2012-05-07 12:12:14 -04:00
|
|
|
* SI_ST_INI) to SI_ST_EST, but only when a ->proto is defined.
|
2008-11-30 12:47:21 -05:00
|
|
|
*/
|
2011-06-07 20:19:07 -04:00
|
|
|
static void sess_establish(struct session *s, struct stream_interface *si)
|
2008-11-30 12:47:21 -05:00
|
|
|
{
|
2012-07-02 09:11:27 -04:00
|
|
|
struct channel *req = si->ob;
|
|
|
|
|
struct channel *rep = si->ib;
|
2008-11-30 12:47:21 -05:00
|
|
|
|
2013-12-31 17:06:46 -05:00
|
|
|
/* First, centralize the timers information */
|
|
|
|
|
s->logs.t_connect = tv_ms_elapsed(&s->logs.tv_accept, &now);
|
|
|
|
|
si->exp = TICK_ETERNITY;
|
|
|
|
|
|
2012-11-11 18:42:33 -05:00
|
|
|
if (objt_server(s->target))
|
|
|
|
|
health_adjust(objt_server(s->target), HANA_STATUS_L4_OK);
|
2009-12-15 16:31:24 -05:00
|
|
|
|
2008-11-30 12:47:21 -05:00
|
|
|
if (s->be->mode == PR_MODE_TCP) { /* let's allow immediate data connection in this case */
|
|
|
|
|
/* if the user wants to log as soon as possible, without counting
|
|
|
|
|
* bytes from the server, then this is the right moment. */
|
BUG/MINOR: log: make log-format, unique-id-format and add-header more independant
It happens that all of them call parse_logformat_line() which sets
proxy->to_log with a number of flags affecting the line format for
all three users. For example, having a unique-id specified disables
the default log-format since fe->to_log is tested when the session
is established.
Similarly, having "option logasap" will cause "+" to be inserted in
unique-id or headers referencing some of the fields depending on
LW_BYTES.
This patch first removes most of the dependency on fe->to_log whenever
possible. The first possible cleanup is to stop checking fe->to_log
for being null, considering that it always contains at least LW_INIT
when any such usage is made of the log-format!
Also, some checks are wrong. s->logs.logwait cannot be nulled by
"logwait &= ~LW_*" since LW_INIT is always there. This results in
getting the wrong log at the end of a request or session when a
unique-id or add-header is set, because logwait is still not null
but the log-format is not checked.
Further cleanups are required. Most LW_* flags should be removed or at
least replaced with what they really mean (eg: depend on client-side
connection, depend on server-side connection, etc...) and this should
only affect logging, not other mechanisms.
This patch fixes the default log-format and tries to limit interferences
between the log formats, but does not pretend to do more for the moment,
since it's the most visible breakage.
2012-12-28 03:40:16 -05:00
|
|
|
if (!LIST_ISEMPTY(&s->fe->logformat) && !(s->logs.logwait & LW_BYTES)) {
|
2008-11-30 12:47:21 -05:00
|
|
|
s->logs.t_close = s->logs.t_connect; /* to get a valid end date */
|
2008-11-30 13:02:32 -05:00
|
|
|
s->do_log(s);
|
2008-11-30 12:47:21 -05:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
s->txn.rsp.msg_state = HTTP_MSG_RPBEFORE;
|
2013-12-31 16:33:13 -05:00
|
|
|
rep->flags |= CF_READ_DONTWAIT; /* a single read is enough to get response headers */
|
2008-11-30 12:47:21 -05:00
|
|
|
}
|
|
|
|
|
|
2009-08-16 16:57:50 -04:00
|
|
|
rep->analysers |= s->fe->fe_rsp_ana | s->be->be_rsp_ana;
|
2012-08-27 17:14:58 -04:00
|
|
|
rep->flags |= CF_READ_ATTACHED; /* producer is now attached */
|
BUG/MAJOR: http: connection setup may stall on balance url_param
On the mailing list, seri0528@naver.com reported an issue when
using balance url_param or balance uri. The request would sometimes
stall forever.
Cyril Bont managed to reproduce it with the configuration below :
listen test :80
mode http
balance url_param q
hash-type consistent
server s demo.1wt.eu:80
and found it appeared with this commit : 80a92c0 ("BUG/MEDIUM: http:
don't start to forward request data before the connect").
The bug is subtle but real. The problem is that the HTTP request
forwarding analyzer refrains from starting to parse the request
body when some LB algorithms might need the body contents, in order
to preserve the data pointer and avoid moving things around during
analysis in case a redispatch is later needed. And in order to detect
that the connection establishes, it watches the response channel's
CF_READ_ATTACHED flag.
The problem is that a request analyzer is not subscribed to a response
channel, so it will only see changes when woken for other (generally
correlated) reasons, such as the fact that part of the request could
be sent. And since the CF_READ_ATTACHED flag is cleared once leaving
process_session(), it is important not to miss it. It simply happens
that sometimes the server starts to respond in a sequence that validates
the connection in the middle of process_session(), that it is detected
after the analysers, and that the newly assigned CF_READ_ATTACHED is
not used to detect that the request analysers need to be called again,
then the flag is lost.
The CF_WAKE_WRITE flag doesn't work either because it's cleared upon
entry into process_session(), ie if we spend more than one call not
connecting.
Thus we need a new flag to tell the connection initiator that we are
specifically interested in being notified about connection establishment.
This new flag is CF_WAKE_CONNECT. It is set by the requester, and is
cleared once the connection succeeds, where CF_WAKE_ONCE is set instead,
causing the request analysers to be scanned again.
For future versions, some better options will have to be considered :
- let all analysers subscribe to both request and response events ;
- let analysers subscribe to stream interface events (reduces number
of useless calls)
- change CF_WAKE_WRITE's semantics to persist across calls to
process_session(), but that is different from validating a
connection establishment (eg: no data sent, or no data to send)
The bug was introduced in 1.5-dev23, no backport is needed.
2014-04-30 12:11:11 -04:00
|
|
|
if (req->flags & CF_WAKE_CONNECT) {
|
|
|
|
|
req->flags |= CF_WAKE_ONCE;
|
|
|
|
|
req->flags &= ~CF_WAKE_CONNECT;
|
|
|
|
|
}
|
2013-10-01 04:45:07 -04:00
|
|
|
if (objt_conn(si->end)) {
|
2010-05-31 06:31:35 -04:00
|
|
|
/* real connections have timeouts */
|
|
|
|
|
req->wto = s->be->timeout.server;
|
|
|
|
|
rep->rto = s->be->timeout.server;
|
|
|
|
|
}
|
2008-11-30 12:47:21 -05:00
|
|
|
req->wex = TICK_ETERNITY;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Update stream interface status for input states SI_ST_ASS, SI_ST_QUE, SI_ST_TAR.
|
|
|
|
|
* Other input states are simply ignored.
|
2013-12-31 17:32:12 -05:00
|
|
|
* Possible output states are SI_ST_CLO, SI_ST_TAR, SI_ST_ASS, SI_ST_REQ, SI_ST_CON
|
|
|
|
|
* and SI_ST_EST. Flags must have previously been updated for timeouts and other
|
|
|
|
|
* conditions.
|
2008-11-30 12:47:21 -05:00
|
|
|
*/
|
2011-06-07 20:19:07 -04:00
|
|
|
static void sess_update_stream_int(struct session *s, struct stream_interface *si)
|
2008-11-30 12:47:21 -05:00
|
|
|
{
|
2012-11-11 18:42:33 -05:00
|
|
|
struct server *srv = objt_server(s->target);
|
2011-03-10 10:55:02 -05:00
|
|
|
|
2012-03-01 12:19:58 -05:00
|
|
|
DPRINTF(stderr,"[%u] %s: sess=%p rq=%p, rp=%p, exp(r,w)=%u,%u rqf=%08x rpf=%08x rqh=%d rqt=%d rph=%d rpt=%d cs=%d ss=%d\n",
|
2008-11-30 12:47:21 -05:00
|
|
|
now_ms, __FUNCTION__,
|
|
|
|
|
s,
|
|
|
|
|
s->req, s->rep,
|
|
|
|
|
s->req->rex, s->rep->wex,
|
|
|
|
|
s->req->flags, s->rep->flags,
|
2012-10-12 17:49:43 -04:00
|
|
|
s->req->buf->i, s->req->buf->o, s->rep->buf->i, s->rep->buf->o, s->rep->cons->state, s->req->cons->state);
|
2008-11-30 12:47:21 -05:00
|
|
|
|
|
|
|
|
if (si->state == SI_ST_ASS) {
|
|
|
|
|
/* Server assigned to connection request, we have to try to connect now */
|
|
|
|
|
int conn_err;
|
|
|
|
|
|
|
|
|
|
conn_err = connect_server(s);
|
2012-11-11 18:42:33 -05:00
|
|
|
srv = objt_server(s->target);
|
2011-03-10 10:55:02 -05:00
|
|
|
|
2008-11-30 12:47:21 -05:00
|
|
|
if (conn_err == SN_ERR_NONE) {
|
2013-12-31 17:32:12 -05:00
|
|
|
/* state = SI_ST_CON or SI_ST_EST now */
|
2011-03-10 10:55:02 -05:00
|
|
|
if (srv)
|
|
|
|
|
srv_inc_sess_ctr(srv);
|
2014-02-03 16:26:46 -05:00
|
|
|
if (srv)
|
|
|
|
|
srv_set_sess_last(srv);
|
2008-11-30 12:47:21 -05:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* We have received a synchronous error. We might have to
|
|
|
|
|
* abort, retry immediately or redispatch.
|
|
|
|
|
*/
|
|
|
|
|
if (conn_err == SN_ERR_INTERNAL) {
|
|
|
|
|
if (!si->err_type) {
|
|
|
|
|
si->err_type = SI_ET_CONN_OTHER;
|
|
|
|
|
}
|
|
|
|
|
|
2011-03-10 10:55:02 -05:00
|
|
|
if (srv)
|
|
|
|
|
srv_inc_sess_ctr(srv);
|
2014-02-03 16:26:46 -05:00
|
|
|
if (srv)
|
|
|
|
|
srv_set_sess_last(srv);
|
2011-03-10 10:55:02 -05:00
|
|
|
if (srv)
|
|
|
|
|
srv->counters.failed_conns++;
|
2011-03-10 17:25:56 -05:00
|
|
|
s->be->be_counters.failed_conns++;
|
2008-11-30 12:47:21 -05:00
|
|
|
|
|
|
|
|
/* release other sessions waiting for this server */
|
2010-12-29 08:32:28 -05:00
|
|
|
sess_change_server(s, NULL);
|
2011-03-10 10:55:02 -05:00
|
|
|
if (may_dequeue_tasks(srv, s->be))
|
|
|
|
|
process_srv_queue(srv);
|
2008-11-30 12:47:21 -05:00
|
|
|
|
|
|
|
|
/* Failed and not retryable. */
|
2012-05-21 10:31:45 -04:00
|
|
|
si_shutr(si);
|
|
|
|
|
si_shutw(si);
|
2012-08-27 17:14:58 -04:00
|
|
|
si->ob->flags |= CF_WRITE_ERROR;
|
2008-11-30 12:47:21 -05:00
|
|
|
|
|
|
|
|
s->logs.t_queue = tv_ms_elapsed(&s->logs.tv_accept, &now);
|
|
|
|
|
|
|
|
|
|
/* no session was ever accounted for this server */
|
|
|
|
|
si->state = SI_ST_CLO;
|
2008-11-30 14:44:17 -05:00
|
|
|
if (s->srv_error)
|
|
|
|
|
s->srv_error(s, si);
|
2008-11-30 12:47:21 -05:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* We are facing a retryable error, but we don't want to run a
|
|
|
|
|
* turn-around now, as the problem is likely a source port
|
|
|
|
|
* allocation problem, so we want to retry now.
|
|
|
|
|
*/
|
|
|
|
|
si->state = SI_ST_CER;
|
|
|
|
|
si->flags &= ~SI_FL_ERR;
|
|
|
|
|
sess_update_st_cer(s, si);
|
|
|
|
|
/* now si->state is one of SI_ST_CLO, SI_ST_TAR, SI_ST_ASS, SI_ST_REQ */
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
else if (si->state == SI_ST_QUE) {
|
|
|
|
|
/* connection request was queued, check for any update */
|
|
|
|
|
if (!s->pend_pos) {
|
|
|
|
|
/* The connection is not in the queue anymore. Either
|
|
|
|
|
* we have a server connection slot available and we
|
|
|
|
|
* go directly to the assigned state, or we need to
|
|
|
|
|
* load-balance first and go to the INI state.
|
|
|
|
|
*/
|
|
|
|
|
si->exp = TICK_ETERNITY;
|
|
|
|
|
if (unlikely(!(s->flags & SN_ASSIGNED)))
|
|
|
|
|
si->state = SI_ST_REQ;
|
|
|
|
|
else {
|
|
|
|
|
s->logs.t_queue = tv_ms_elapsed(&s->logs.tv_accept, &now);
|
|
|
|
|
si->state = SI_ST_ASS;
|
|
|
|
|
}
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Connection request still in queue... */
|
|
|
|
|
if (si->flags & SI_FL_EXP) {
|
|
|
|
|
/* ... and timeout expired */
|
|
|
|
|
si->exp = TICK_ETERNITY;
|
|
|
|
|
s->logs.t_queue = tv_ms_elapsed(&s->logs.tv_accept, &now);
|
2011-03-10 10:55:02 -05:00
|
|
|
if (srv)
|
|
|
|
|
srv->counters.failed_conns++;
|
2011-03-10 17:25:56 -05:00
|
|
|
s->be->be_counters.failed_conns++;
|
2012-05-21 10:31:45 -04:00
|
|
|
si_shutr(si);
|
|
|
|
|
si_shutw(si);
|
2012-08-27 17:14:58 -04:00
|
|
|
si->ob->flags |= CF_WRITE_TIMEOUT;
|
2008-11-30 12:47:21 -05:00
|
|
|
if (!si->err_type)
|
|
|
|
|
si->err_type = SI_ET_QUEUE_TO;
|
|
|
|
|
si->state = SI_ST_CLO;
|
2008-11-30 14:44:17 -05:00
|
|
|
if (s->srv_error)
|
|
|
|
|
s->srv_error(s, si);
|
2008-11-30 12:47:21 -05:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Connection remains in queue, check if we have to abort it */
|
2012-08-27 17:14:58 -04:00
|
|
|
if ((si->ob->flags & (CF_READ_ERROR)) ||
|
|
|
|
|
((si->ob->flags & CF_SHUTW_NOW) && /* empty and client aborted */
|
2012-08-24 16:40:29 -04:00
|
|
|
(channel_is_empty(si->ob) || s->be->options & PR_O_ABRT_CLOSE))) {
|
2008-11-30 12:47:21 -05:00
|
|
|
/* give up */
|
|
|
|
|
si->exp = TICK_ETERNITY;
|
|
|
|
|
s->logs.t_queue = tv_ms_elapsed(&s->logs.tv_accept, &now);
|
2012-05-21 10:31:45 -04:00
|
|
|
si_shutr(si);
|
|
|
|
|
si_shutw(si);
|
2008-11-30 12:47:21 -05:00
|
|
|
si->err_type |= SI_ET_QUEUE_ABRT;
|
|
|
|
|
si->state = SI_ST_CLO;
|
2008-11-30 14:44:17 -05:00
|
|
|
if (s->srv_error)
|
|
|
|
|
s->srv_error(s, si);
|
2008-11-30 12:47:21 -05:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Nothing changed */
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
else if (si->state == SI_ST_TAR) {
|
|
|
|
|
/* Connection request might be aborted */
|
2012-08-27 17:14:58 -04:00
|
|
|
if ((si->ob->flags & (CF_READ_ERROR)) ||
|
|
|
|
|
((si->ob->flags & CF_SHUTW_NOW) && /* empty and client aborted */
|
2012-08-24 16:40:29 -04:00
|
|
|
(channel_is_empty(si->ob) || s->be->options & PR_O_ABRT_CLOSE))) {
|
2008-11-30 12:47:21 -05:00
|
|
|
/* give up */
|
|
|
|
|
si->exp = TICK_ETERNITY;
|
2012-05-21 10:31:45 -04:00
|
|
|
si_shutr(si);
|
|
|
|
|
si_shutw(si);
|
2008-11-30 12:47:21 -05:00
|
|
|
si->err_type |= SI_ET_CONN_ABRT;
|
|
|
|
|
si->state = SI_ST_CLO;
|
2008-11-30 14:44:17 -05:00
|
|
|
if (s->srv_error)
|
|
|
|
|
s->srv_error(s, si);
|
2008-11-30 12:47:21 -05:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!(si->flags & SI_FL_EXP))
|
|
|
|
|
return; /* still in turn-around */
|
|
|
|
|
|
|
|
|
|
si->exp = TICK_ETERNITY;
|
|
|
|
|
|
|
|
|
|
/* we keep trying on the same server as long as the session is
|
|
|
|
|
* marked "assigned".
|
|
|
|
|
* FIXME: Should we force a redispatch attempt when the server is down ?
|
|
|
|
|
*/
|
|
|
|
|
if (s->flags & SN_ASSIGNED)
|
|
|
|
|
si->state = SI_ST_ASS;
|
|
|
|
|
else
|
|
|
|
|
si->state = SI_ST_REQ;
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2011-06-07 20:19:07 -04:00
|
|
|
/* Set correct session termination flags in case no analyser has done it. It
|
|
|
|
|
* also counts a failed request if the server state has not reached the request
|
|
|
|
|
* stage.
|
|
|
|
|
*/
|
|
|
|
|
static void sess_set_term_flags(struct session *s)
|
|
|
|
|
{
|
|
|
|
|
if (!(s->flags & SN_FINST_MASK)) {
|
|
|
|
|
if (s->si[1].state < SI_ST_REQ) {
|
|
|
|
|
|
|
|
|
|
s->fe->fe_counters.failed_req++;
|
|
|
|
|
if (s->listener->counters)
|
|
|
|
|
s->listener->counters->failed_req++;
|
|
|
|
|
|
|
|
|
|
s->flags |= SN_FINST_R;
|
|
|
|
|
}
|
|
|
|
|
else if (s->si[1].state == SI_ST_QUE)
|
|
|
|
|
s->flags |= SN_FINST_Q;
|
|
|
|
|
else if (s->si[1].state < SI_ST_EST)
|
|
|
|
|
s->flags |= SN_FINST_C;
|
|
|
|
|
else if (s->si[1].state == SI_ST_EST || s->si[1].prev_state == SI_ST_EST)
|
|
|
|
|
s->flags |= SN_FINST_D;
|
|
|
|
|
else
|
|
|
|
|
s->flags |= SN_FINST_L;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2008-11-30 12:47:21 -05:00
|
|
|
/* This function initiates a server connection request on a stream interface
|
2013-11-30 03:06:53 -05:00
|
|
|
* already in SI_ST_REQ state. Upon success, the state goes to SI_ST_ASS for
|
|
|
|
|
* a real connection to a server, indicating that a server has been assigned,
|
|
|
|
|
* or SI_ST_EST for a successful connection to an applet. It may also return
|
|
|
|
|
* SI_ST_QUE, or SI_ST_CLO upon error.
|
2008-11-30 12:47:21 -05:00
|
|
|
*/
|
2012-03-01 12:19:58 -05:00
|
|
|
static void sess_prepare_conn_req(struct session *s, struct stream_interface *si)
|
|
|
|
|
{
|
|
|
|
|
DPRINTF(stderr,"[%u] %s: sess=%p rq=%p, rp=%p, exp(r,w)=%u,%u rqf=%08x rpf=%08x rqh=%d rqt=%d rph=%d rpt=%d cs=%d ss=%d\n",
|
2008-11-30 12:47:21 -05:00
|
|
|
now_ms, __FUNCTION__,
|
|
|
|
|
s,
|
|
|
|
|
s->req, s->rep,
|
|
|
|
|
s->req->rex, s->rep->wex,
|
|
|
|
|
s->req->flags, s->rep->flags,
|
2012-10-12 17:49:43 -04:00
|
|
|
s->req->buf->i, s->req->buf->o, s->rep->buf->i, s->rep->buf->o, s->rep->cons->state, s->req->cons->state);
|
2008-11-30 12:47:21 -05:00
|
|
|
|
|
|
|
|
if (si->state != SI_ST_REQ)
|
|
|
|
|
return;
|
|
|
|
|
|
2013-11-30 03:06:53 -05:00
|
|
|
if (unlikely(obj_type(s->target) == OBJ_TYPE_APPLET)) {
|
|
|
|
|
/* the applet directly goes to the EST state */
|
2013-12-01 06:25:52 -05:00
|
|
|
struct appctx *appctx = objt_appctx(si->end);
|
|
|
|
|
|
|
|
|
|
if (!appctx || appctx->applet != __objt_applet(s->target))
|
|
|
|
|
appctx = stream_int_register_handler(si, objt_applet(s->target));
|
|
|
|
|
|
|
|
|
|
if (!appctx) {
|
|
|
|
|
/* No more memory, let's immediately abort. Force the
|
|
|
|
|
* error code to ignore the ERR_LOCAL which is not a
|
|
|
|
|
* real error.
|
|
|
|
|
*/
|
2013-12-09 11:14:23 -05:00
|
|
|
s->flags &= ~(SN_ERR_MASK | SN_FINST_MASK);
|
2013-12-01 06:25:52 -05:00
|
|
|
|
|
|
|
|
si_shutr(si);
|
|
|
|
|
si_shutw(si);
|
|
|
|
|
si->ob->flags |= CF_WRITE_ERROR;
|
2013-12-09 11:14:23 -05:00
|
|
|
si->err_type = SI_ET_CONN_RES;
|
2013-12-01 06:25:52 -05:00
|
|
|
si->state = SI_ST_CLO;
|
|
|
|
|
if (s->srv_error)
|
|
|
|
|
s->srv_error(s, si);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2013-11-30 03:06:53 -05:00
|
|
|
s->logs.t_queue = tv_ms_elapsed(&s->logs.tv_accept, &now);
|
|
|
|
|
si->state = SI_ST_EST;
|
|
|
|
|
si->err_type = SI_ET_NONE;
|
2014-04-22 18:35:17 -04:00
|
|
|
be_set_sess_last(s->be);
|
2013-11-30 03:21:49 -05:00
|
|
|
/* let sess_establish() finish the job */
|
2013-11-30 03:06:53 -05:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2008-11-30 12:47:21 -05:00
|
|
|
/* Try to assign a server */
|
|
|
|
|
if (srv_redispatch_connect(s) != 0) {
|
|
|
|
|
/* We did not get a server. Either we queued the
|
|
|
|
|
* connection request, or we encountered an error.
|
|
|
|
|
*/
|
|
|
|
|
if (si->state == SI_ST_QUE)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
/* we did not get any server, let's check the cause */
|
2012-05-21 10:31:45 -04:00
|
|
|
si_shutr(si);
|
|
|
|
|
si_shutw(si);
|
2012-08-27 17:14:58 -04:00
|
|
|
si->ob->flags |= CF_WRITE_ERROR;
|
2008-11-30 12:47:21 -05:00
|
|
|
if (!si->err_type)
|
|
|
|
|
si->err_type = SI_ET_CONN_OTHER;
|
|
|
|
|
si->state = SI_ST_CLO;
|
2008-11-30 14:44:17 -05:00
|
|
|
if (s->srv_error)
|
|
|
|
|
s->srv_error(s, si);
|
2008-11-30 12:47:21 -05:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* The server is assigned */
|
|
|
|
|
s->logs.t_queue = tv_ms_elapsed(&s->logs.tv_accept, &now);
|
|
|
|
|
si->state = SI_ST_ASS;
|
2014-04-22 18:35:17 -04:00
|
|
|
be_set_sess_last(s->be);
|
2008-11-30 12:47:21 -05:00
|
|
|
}
|
|
|
|
|
|
2009-07-07 09:10:31 -04:00
|
|
|
/* This stream analyser checks the switching rules and changes the backend
|
2010-01-22 13:10:05 -05:00
|
|
|
* if appropriate. The default_backend rule is also considered, then the
|
|
|
|
|
* target backend's forced persistence rules are also evaluated last if any.
|
2009-07-07 09:10:31 -04:00
|
|
|
* It returns 1 if the processing can continue on next analysers, or zero if it
|
|
|
|
|
* either needs more data or wants to immediately abort the request.
|
|
|
|
|
*/
|
2012-07-02 09:11:27 -04:00
|
|
|
static int process_switching_rules(struct session *s, struct channel *req, int an_bit)
|
2009-07-07 09:10:31 -04:00
|
|
|
{
|
2010-04-24 18:00:51 -04:00
|
|
|
struct persist_rule *prst_rule;
|
2010-01-22 13:10:05 -05:00
|
|
|
|
2009-07-07 09:10:31 -04:00
|
|
|
req->analysers &= ~an_bit;
|
|
|
|
|
req->analyse_exp = TICK_ETERNITY;
|
|
|
|
|
|
2012-03-01 12:19:58 -05:00
|
|
|
DPRINTF(stderr,"[%u] %s: session=%p b=%p, exp(r,w)=%u,%u bf=%08x bh=%d analysers=%02x\n",
|
2009-07-07 09:10:31 -04:00
|
|
|
now_ms, __FUNCTION__,
|
|
|
|
|
s,
|
|
|
|
|
req,
|
|
|
|
|
req->rex, req->wex,
|
|
|
|
|
req->flags,
|
2012-10-12 17:49:43 -04:00
|
|
|
req->buf->i,
|
2009-07-07 09:10:31 -04:00
|
|
|
req->analysers);
|
|
|
|
|
|
|
|
|
|
/* now check whether we have some switching rules for this request */
|
|
|
|
|
if (!(s->flags & SN_BE_ASSIGNED)) {
|
|
|
|
|
struct switching_rule *rule;
|
|
|
|
|
|
|
|
|
|
list_for_each_entry(rule, &s->fe->switching_rules, list) {
|
2014-04-22 19:21:56 -04:00
|
|
|
int ret = 1;
|
2009-07-07 09:10:31 -04:00
|
|
|
|
2014-04-22 19:21:56 -04:00
|
|
|
if (rule->cond) {
|
|
|
|
|
ret = acl_exec_cond(rule->cond, s->fe, s, &s->txn, SMP_OPT_DIR_REQ|SMP_OPT_FINAL);
|
|
|
|
|
ret = acl_pass(ret);
|
|
|
|
|
if (rule->cond->pol == ACL_COND_UNLESS)
|
|
|
|
|
ret = !ret;
|
|
|
|
|
}
|
2009-07-07 09:10:31 -04:00
|
|
|
|
|
|
|
|
if (ret) {
|
2013-11-19 05:43:06 -05:00
|
|
|
/* If the backend name is dynamic, try to resolve the name.
|
|
|
|
|
* If we can't resolve the name, or if any error occurs, break
|
|
|
|
|
* the loop and fallback to the default backend.
|
|
|
|
|
*/
|
|
|
|
|
struct proxy *backend;
|
|
|
|
|
|
|
|
|
|
if (rule->dynamic) {
|
|
|
|
|
struct chunk *tmp = get_trash_chunk();
|
|
|
|
|
if (!build_logline(s, tmp->str, tmp->size, &rule->be.expr))
|
|
|
|
|
break;
|
|
|
|
|
backend = findproxy(tmp->str, PR_CAP_BE);
|
|
|
|
|
if (!backend)
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
backend = rule->be.backend;
|
|
|
|
|
|
|
|
|
|
if (!session_set_backend(s, backend))
|
2009-07-12 02:27:39 -04:00
|
|
|
goto sw_failed;
|
2009-07-07 09:10:31 -04:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* To ensure correct connection accounting on the backend, we
|
|
|
|
|
* have to assign one if it was not set (eg: a listen). This
|
|
|
|
|
* measure also takes care of correctly setting the default
|
|
|
|
|
* backend if any.
|
|
|
|
|
*/
|
|
|
|
|
if (!(s->flags & SN_BE_ASSIGNED))
|
2009-07-12 02:27:39 -04:00
|
|
|
if (!session_set_backend(s, s->fe->defbe.be ? s->fe->defbe.be : s->be))
|
|
|
|
|
goto sw_failed;
|
2009-07-07 09:10:31 -04:00
|
|
|
}
|
|
|
|
|
|
2010-08-03 08:02:05 -04:00
|
|
|
/* we don't want to run the TCP or HTTP filters again if the backend has not changed */
|
|
|
|
|
if (s->fe == s->be) {
|
|
|
|
|
s->req->analysers &= ~AN_REQ_INSPECT_BE;
|
2009-07-07 09:10:31 -04:00
|
|
|
s->req->analysers &= ~AN_REQ_HTTP_PROCESS_BE;
|
2010-08-03 08:02:05 -04:00
|
|
|
}
|
2009-07-07 09:10:31 -04:00
|
|
|
|
2010-04-24 18:00:51 -04:00
|
|
|
/* as soon as we know the backend, we must check if we have a matching forced or ignored
|
2010-01-22 13:10:05 -05:00
|
|
|
* persistence rule, and report that in the session.
|
|
|
|
|
*/
|
2010-04-24 18:00:51 -04:00
|
|
|
list_for_each_entry(prst_rule, &s->be->persist_rules, list) {
|
2010-01-22 13:10:05 -05:00
|
|
|
int ret = 1;
|
|
|
|
|
|
|
|
|
|
if (prst_rule->cond) {
|
2012-04-25 04:13:36 -04:00
|
|
|
ret = acl_exec_cond(prst_rule->cond, s->be, s, &s->txn, SMP_OPT_DIR_REQ|SMP_OPT_FINAL);
|
2010-01-22 13:10:05 -05:00
|
|
|
ret = acl_pass(ret);
|
|
|
|
|
if (prst_rule->cond->pol == ACL_COND_UNLESS)
|
|
|
|
|
ret = !ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (ret) {
|
|
|
|
|
/* no rule, or the rule matches */
|
2010-04-24 18:00:51 -04:00
|
|
|
if (prst_rule->type == PERSIST_TYPE_FORCE) {
|
|
|
|
|
s->flags |= SN_FORCE_PRST;
|
|
|
|
|
} else {
|
|
|
|
|
s->flags |= SN_IGNORE_PRST;
|
|
|
|
|
}
|
2010-01-22 13:10:05 -05:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2009-07-07 09:10:31 -04:00
|
|
|
return 1;
|
2009-07-12 02:27:39 -04:00
|
|
|
|
|
|
|
|
sw_failed:
|
|
|
|
|
/* immediately abort this request in case of allocation failure */
|
2012-08-27 18:06:31 -04:00
|
|
|
channel_abort(s->req);
|
|
|
|
|
channel_abort(s->rep);
|
2009-07-12 02:27:39 -04:00
|
|
|
|
|
|
|
|
if (!(s->flags & SN_ERR_MASK))
|
|
|
|
|
s->flags |= SN_ERR_RESOURCE;
|
|
|
|
|
if (!(s->flags & SN_FINST_MASK))
|
|
|
|
|
s->flags |= SN_FINST_R;
|
|
|
|
|
|
|
|
|
|
s->txn.status = 500;
|
|
|
|
|
s->req->analysers = 0;
|
|
|
|
|
s->req->analyse_exp = TICK_ETERNITY;
|
|
|
|
|
return 0;
|
2009-07-07 09:10:31 -04:00
|
|
|
}
|
|
|
|
|
|
2012-04-05 15:09:48 -04:00
|
|
|
/* This stream analyser works on a request. It applies all use-server rules on
|
|
|
|
|
* it then returns 1. The data must already be present in the buffer otherwise
|
|
|
|
|
* they won't match. It always returns 1.
|
|
|
|
|
*/
|
2012-07-02 09:11:27 -04:00
|
|
|
static int process_server_rules(struct session *s, struct channel *req, int an_bit)
|
2012-04-05 15:09:48 -04:00
|
|
|
{
|
|
|
|
|
struct proxy *px = s->be;
|
|
|
|
|
struct server_rule *rule;
|
|
|
|
|
|
|
|
|
|
DPRINTF(stderr,"[%u] %s: session=%p b=%p, exp(r,w)=%u,%u bf=%08x bl=%d analysers=%02x\n",
|
|
|
|
|
now_ms, __FUNCTION__,
|
|
|
|
|
s,
|
|
|
|
|
req,
|
|
|
|
|
req->rex, req->wex,
|
|
|
|
|
req->flags,
|
2012-10-12 17:49:43 -04:00
|
|
|
req->buf->i + req->buf->o,
|
2012-04-05 15:09:48 -04:00
|
|
|
req->analysers);
|
|
|
|
|
|
|
|
|
|
if (!(s->flags & SN_ASSIGNED)) {
|
|
|
|
|
list_for_each_entry(rule, &px->server_rules, list) {
|
|
|
|
|
int ret;
|
|
|
|
|
|
2012-04-25 04:13:36 -04:00
|
|
|
ret = acl_exec_cond(rule->cond, s->be, s, &s->txn, SMP_OPT_DIR_REQ|SMP_OPT_FINAL);
|
2012-04-05 15:09:48 -04:00
|
|
|
ret = acl_pass(ret);
|
|
|
|
|
if (rule->cond->pol == ACL_COND_UNLESS)
|
|
|
|
|
ret = !ret;
|
|
|
|
|
|
|
|
|
|
if (ret) {
|
|
|
|
|
struct server *srv = rule->srv.ptr;
|
|
|
|
|
|
2014-05-13 17:41:20 -04:00
|
|
|
if ((srv->state != SRV_ST_STOPPED) ||
|
2012-04-05 15:09:48 -04:00
|
|
|
(px->options & PR_O_PERSIST) ||
|
|
|
|
|
(s->flags & SN_FORCE_PRST)) {
|
|
|
|
|
s->flags |= SN_DIRECT | SN_ASSIGNED;
|
2012-11-11 18:42:33 -05:00
|
|
|
s->target = &srv->obj_type;
|
2012-04-05 15:09:48 -04:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
/* if the server is not UP, let's go on with next rules
|
|
|
|
|
* just in case another one is suited.
|
|
|
|
|
*/
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
req->analysers &= ~an_bit;
|
|
|
|
|
req->analyse_exp = TICK_ETERNITY;
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
2010-01-04 09:47:17 -05:00
|
|
|
/* This stream analyser works on a request. It applies all sticking rules on
|
|
|
|
|
* it then returns 1. The data must already be present in the buffer otherwise
|
|
|
|
|
* they won't match. It always returns 1.
|
|
|
|
|
*/
|
2012-07-02 09:11:27 -04:00
|
|
|
static int process_sticking_rules(struct session *s, struct channel *req, int an_bit)
|
2010-01-04 09:47:17 -05:00
|
|
|
{
|
|
|
|
|
struct proxy *px = s->be;
|
|
|
|
|
struct sticking_rule *rule;
|
|
|
|
|
|
2012-03-01 12:19:58 -05:00
|
|
|
DPRINTF(stderr,"[%u] %s: session=%p b=%p, exp(r,w)=%u,%u bf=%08x bh=%d analysers=%02x\n",
|
2010-01-04 09:47:17 -05:00
|
|
|
now_ms, __FUNCTION__,
|
|
|
|
|
s,
|
|
|
|
|
req,
|
|
|
|
|
req->rex, req->wex,
|
|
|
|
|
req->flags,
|
2012-10-12 17:49:43 -04:00
|
|
|
req->buf->i,
|
2010-01-04 09:47:17 -05:00
|
|
|
req->analysers);
|
|
|
|
|
|
|
|
|
|
list_for_each_entry(rule, &px->sticking_rules, list) {
|
|
|
|
|
int ret = 1 ;
|
|
|
|
|
int i;
|
|
|
|
|
|
2013-12-09 06:52:13 -05:00
|
|
|
/* Only the first stick store-request of each table is applied
|
|
|
|
|
* and other ones are ignored. The purpose is to allow complex
|
|
|
|
|
* configurations which look for multiple entries by decreasing
|
|
|
|
|
* order of precision and to stop at the first which matches.
|
|
|
|
|
* An example could be a store of the IP address from an HTTP
|
|
|
|
|
* header first, then from the source if not found.
|
|
|
|
|
*/
|
2010-01-04 09:47:17 -05:00
|
|
|
for (i = 0; i < s->store_count; i++) {
|
|
|
|
|
if (rule->table.t == s->store[i].table)
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (i != s->store_count)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
if (rule->cond) {
|
2012-04-25 04:13:36 -04:00
|
|
|
ret = acl_exec_cond(rule->cond, px, s, &s->txn, SMP_OPT_DIR_REQ|SMP_OPT_FINAL);
|
2010-01-04 09:47:17 -05:00
|
|
|
ret = acl_pass(ret);
|
|
|
|
|
if (rule->cond->pol == ACL_COND_UNLESS)
|
|
|
|
|
ret = !ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (ret) {
|
|
|
|
|
struct stktable_key *key;
|
|
|
|
|
|
2014-06-25 10:20:53 -04:00
|
|
|
key = stktable_fetch_key(rule->table.t, px, s, &s->txn, SMP_OPT_DIR_REQ|SMP_OPT_FINAL, rule->expr, NULL);
|
2010-01-04 09:47:17 -05:00
|
|
|
if (!key)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
if (rule->flags & STK_IS_MATCH) {
|
|
|
|
|
struct stksess *ts;
|
|
|
|
|
|
2010-06-06 09:38:59 -04:00
|
|
|
if ((ts = stktable_lookup_key(rule->table.t, key)) != NULL) {
|
2010-01-04 09:47:17 -05:00
|
|
|
if (!(s->flags & SN_ASSIGNED)) {
|
|
|
|
|
struct eb32_node *node;
|
2010-06-06 10:40:39 -04:00
|
|
|
void *ptr;
|
2010-01-04 09:47:17 -05:00
|
|
|
|
|
|
|
|
/* srv found in table */
|
2010-06-06 10:40:39 -04:00
|
|
|
ptr = stktable_data_ptr(rule->table.t, ts, STKTABLE_DT_SERVER_ID);
|
|
|
|
|
node = eb32_lookup(&px->conf.used_server_id, stktable_data_cast(ptr, server_id));
|
2010-01-04 09:47:17 -05:00
|
|
|
if (node) {
|
|
|
|
|
struct server *srv;
|
|
|
|
|
|
|
|
|
|
srv = container_of(node, struct server, conf.id);
|
2014-05-13 17:41:20 -04:00
|
|
|
if ((srv->state != SRV_ST_STOPPED) ||
|
2010-01-22 13:10:05 -05:00
|
|
|
(px->options & PR_O_PERSIST) ||
|
|
|
|
|
(s->flags & SN_FORCE_PRST)) {
|
2010-01-04 09:47:17 -05:00
|
|
|
s->flags |= SN_DIRECT | SN_ASSIGNED;
|
2012-11-11 18:42:33 -05:00
|
|
|
s->target = &srv->obj_type;
|
2010-01-04 09:47:17 -05:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2010-09-23 12:16:52 -04:00
|
|
|
stktable_touch(rule->table.t, ts, 1);
|
2010-01-04 09:47:17 -05:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if (rule->flags & STK_IS_STORE) {
|
|
|
|
|
if (s->store_count < (sizeof(s->store) / sizeof(s->store[0]))) {
|
|
|
|
|
struct stksess *ts;
|
|
|
|
|
|
|
|
|
|
ts = stksess_new(rule->table.t, key);
|
|
|
|
|
if (ts) {
|
|
|
|
|
s->store[s->store_count].table = rule->table.t;
|
|
|
|
|
s->store[s->store_count++].ts = ts;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
req->analysers &= ~an_bit;
|
|
|
|
|
req->analyse_exp = TICK_ETERNITY;
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* This stream analyser works on a response. It applies all store rules on it
|
|
|
|
|
* then returns 1. The data must already be present in the buffer otherwise
|
|
|
|
|
* they won't match. It always returns 1.
|
|
|
|
|
*/
|
2012-07-02 09:11:27 -04:00
|
|
|
static int process_store_rules(struct session *s, struct channel *rep, int an_bit)
|
2010-01-04 09:47:17 -05:00
|
|
|
{
|
|
|
|
|
struct proxy *px = s->be;
|
|
|
|
|
struct sticking_rule *rule;
|
|
|
|
|
int i;
|
2013-12-09 06:52:13 -05:00
|
|
|
int nbreq = s->store_count;
|
2010-01-04 09:47:17 -05:00
|
|
|
|
2012-03-01 12:19:58 -05:00
|
|
|
DPRINTF(stderr,"[%u] %s: session=%p b=%p, exp(r,w)=%u,%u bf=%08x bh=%d analysers=%02x\n",
|
2010-01-04 09:47:17 -05:00
|
|
|
now_ms, __FUNCTION__,
|
|
|
|
|
s,
|
2010-02-09 14:55:44 -05:00
|
|
|
rep,
|
|
|
|
|
rep->rex, rep->wex,
|
|
|
|
|
rep->flags,
|
2012-10-12 17:49:43 -04:00
|
|
|
rep->buf->i,
|
2010-02-09 14:55:44 -05:00
|
|
|
rep->analysers);
|
2010-01-04 09:47:17 -05:00
|
|
|
|
|
|
|
|
list_for_each_entry(rule, &px->storersp_rules, list) {
|
|
|
|
|
int ret = 1 ;
|
|
|
|
|
|
2013-12-09 06:52:13 -05:00
|
|
|
/* Only the first stick store-response of each table is applied
|
|
|
|
|
* and other ones are ignored. The purpose is to allow complex
|
|
|
|
|
* configurations which look for multiple entries by decreasing
|
|
|
|
|
* order of precision and to stop at the first which matches.
|
|
|
|
|
* An example could be a store of a set-cookie value, with a
|
|
|
|
|
* fallback to a parameter found in a 302 redirect.
|
|
|
|
|
*
|
|
|
|
|
* The store-response rules are not allowed to override the
|
|
|
|
|
* store-request rules for the same table, but they may coexist.
|
|
|
|
|
* Thus we can have up to one store-request entry and one store-
|
|
|
|
|
* response entry for the same table at any time.
|
|
|
|
|
*/
|
|
|
|
|
for (i = nbreq; i < s->store_count; i++) {
|
|
|
|
|
if (rule->table.t == s->store[i].table)
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* skip existing entries for this table */
|
|
|
|
|
if (i < s->store_count)
|
|
|
|
|
continue;
|
|
|
|
|
|
2010-01-04 09:47:17 -05:00
|
|
|
if (rule->cond) {
|
2012-04-25 04:13:36 -04:00
|
|
|
ret = acl_exec_cond(rule->cond, px, s, &s->txn, SMP_OPT_DIR_RES|SMP_OPT_FINAL);
|
2010-01-04 09:47:17 -05:00
|
|
|
ret = acl_pass(ret);
|
|
|
|
|
if (rule->cond->pol == ACL_COND_UNLESS)
|
|
|
|
|
ret = !ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (ret) {
|
|
|
|
|
struct stktable_key *key;
|
|
|
|
|
|
2014-06-25 10:20:53 -04:00
|
|
|
key = stktable_fetch_key(rule->table.t, px, s, &s->txn, SMP_OPT_DIR_RES|SMP_OPT_FINAL, rule->expr, NULL);
|
2010-01-04 09:47:17 -05:00
|
|
|
if (!key)
|
|
|
|
|
continue;
|
|
|
|
|
|
BUG/MEDIUM: stick: completely remove the unused flag from the store entries
The store[] array in the session holds a flag which probably aimed to
differenciate store entries learned from the request from those learned
from the response, and allowing responses to overwrite only the request
ones (eg: have a server set a response cookie which overwrites the request
one).
But this flag is set when a response data is stored, and is never cleared.
So in practice, haproxy always runs with this flag set, meaning that
responses prevent themselves from overriding the request data.
It is desirable anyway to keep the ability not to override data, because
the override is performed only based on the table and not on the key, so
that would mean that it would be impossible to retrieve two different
keys to store into a same table. For example, if a client sets a cookie
and a server another one, both need to be updated in the table in the
proper order. This is especially true when multiple keys may be tracked
on each side into the same table (eg: list of IP addresses in a header).
So the correct fix which also maintains the current behaviour consists in
simply removing this flag and never try to optimize for the overwrite case.
This fix also has the benefit of significantly reducing the session size,
by 64 bytes due to alignment issues caused by this flag!
The bug has been there forever (since 1.4-dev7), so a backport to 1.4
would be appropriate.
2013-12-06 17:05:21 -05:00
|
|
|
if (s->store_count < (sizeof(s->store) / sizeof(s->store[0]))) {
|
2010-01-04 09:47:17 -05:00
|
|
|
struct stksess *ts;
|
|
|
|
|
|
|
|
|
|
ts = stksess_new(rule->table.t, key);
|
|
|
|
|
if (ts) {
|
|
|
|
|
s->store[s->store_count].table = rule->table.t;
|
|
|
|
|
s->store[s->store_count++].ts = ts;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* process store request and store response */
|
|
|
|
|
for (i = 0; i < s->store_count; i++) {
|
2010-06-06 09:38:59 -04:00
|
|
|
struct stksess *ts;
|
2010-06-06 10:40:39 -04:00
|
|
|
void *ptr;
|
2010-06-06 09:38:59 -04:00
|
|
|
|
2014-05-13 09:54:22 -04:00
|
|
|
if (objt_server(s->target) && objt_server(s->target)->flags & SRV_F_NON_STICK) {
|
2011-06-24 20:39:49 -04:00
|
|
|
stksess_free(s->store[i].table, s->store[i].ts);
|
|
|
|
|
s->store[i].ts = NULL;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2010-06-06 09:38:59 -04:00
|
|
|
ts = stktable_lookup(s->store[i].table, s->store[i].ts);
|
|
|
|
|
if (ts) {
|
|
|
|
|
/* the entry already existed, we can free ours */
|
2010-09-23 12:16:52 -04:00
|
|
|
stktable_touch(s->store[i].table, ts, 1);
|
2010-01-04 09:47:17 -05:00
|
|
|
stksess_free(s->store[i].table, s->store[i].ts);
|
|
|
|
|
}
|
2010-06-06 09:38:59 -04:00
|
|
|
else
|
2010-09-23 12:16:52 -04:00
|
|
|
ts = stktable_store(s->store[i].table, s->store[i].ts, 1);
|
2010-06-06 09:38:59 -04:00
|
|
|
|
|
|
|
|
s->store[i].ts = NULL;
|
2010-06-06 10:40:39 -04:00
|
|
|
ptr = stktable_data_ptr(s->store[i].table, ts, STKTABLE_DT_SERVER_ID);
|
2012-11-11 18:42:33 -05:00
|
|
|
stktable_data_cast(ptr, server_id) = objt_server(s->target)->puid;
|
2010-01-04 09:47:17 -05:00
|
|
|
}
|
2010-06-18 03:57:45 -04:00
|
|
|
s->store_count = 0; /* everything is stored */
|
2010-01-04 09:47:17 -05:00
|
|
|
|
|
|
|
|
rep->analysers &= ~an_bit;
|
|
|
|
|
rep->analyse_exp = TICK_ETERNITY;
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
2010-01-06 17:53:24 -05:00
|
|
|
/* This macro is very specific to the function below. See the comments in
|
|
|
|
|
* process_session() below to understand the logic and the tests.
|
|
|
|
|
*/
|
|
|
|
|
#define UPDATE_ANALYSERS(real, list, back, flag) { \
|
|
|
|
|
list = (((list) & ~(flag)) | ~(back)) & (real); \
|
|
|
|
|
back = real; \
|
|
|
|
|
if (!(list)) \
|
|
|
|
|
break; \
|
|
|
|
|
if (((list) ^ ((list) & ((list) - 1))) < (flag)) \
|
|
|
|
|
continue; \
|
|
|
|
|
}
|
|
|
|
|
|
2008-11-30 12:47:21 -05:00
|
|
|
/* Processes the client, server, request and response jobs of a session task,
|
|
|
|
|
* then puts it back to the wait queue in a clean state, or cleans up its
|
|
|
|
|
* resources if it must be deleted. Returns in <next> the date the task wants
|
|
|
|
|
* to be woken up, or TICK_ETERNITY. In order not to call all functions for
|
|
|
|
|
* nothing too many times, the request and response buffers flags are monitored
|
|
|
|
|
* and each function is called only if at least another function has changed at
|
|
|
|
|
* least one flag it is interested in.
|
|
|
|
|
*/
|
2009-03-08 04:38:41 -04:00
|
|
|
struct task *process_session(struct task *t)
|
2008-11-30 12:47:21 -05:00
|
|
|
{
|
2011-03-10 10:55:02 -05:00
|
|
|
struct server *srv;
|
2008-11-30 12:47:21 -05:00
|
|
|
struct session *s = t->context;
|
|
|
|
|
unsigned int rqf_last, rpf_last;
|
2010-07-27 11:15:12 -04:00
|
|
|
unsigned int rq_prod_last, rq_cons_last;
|
|
|
|
|
unsigned int rp_cons_last, rp_prod_last;
|
2010-01-06 18:09:04 -05:00
|
|
|
unsigned int req_ana_back;
|
2008-11-30 12:47:21 -05:00
|
|
|
|
|
|
|
|
//DPRINTF(stderr, "%s:%d: cs=%d ss=%d(%d) rqf=0x%08x rpf=0x%08x\n", __FUNCTION__, __LINE__,
|
|
|
|
|
// s->si[0].state, s->si[1].state, s->si[1].err_type, s->req->flags, s->rep->flags);
|
|
|
|
|
|
2010-01-29 13:26:18 -05:00
|
|
|
/* this data may be no longer valid, clear it */
|
|
|
|
|
memset(&s->txn.auth, 0, sizeof(s->txn.auth));
|
|
|
|
|
|
2014-06-23 09:22:31 -04:00
|
|
|
/* This flag must explicitly be set every time */
|
|
|
|
|
s->req->flags &= ~(CF_READ_NOEXP|CF_WAKE_WRITE);
|
|
|
|
|
s->rep->flags &= ~(CF_READ_NOEXP|CF_WAKE_WRITE);
|
2009-06-21 16:03:51 -04:00
|
|
|
|
|
|
|
|
/* Keep a copy of req/rep flags so that we can detect shutdowns */
|
2012-08-27 17:14:58 -04:00
|
|
|
rqf_last = s->req->flags & ~CF_MASK_ANALYSER;
|
|
|
|
|
rpf_last = s->rep->flags & ~CF_MASK_ANALYSER;
|
2009-06-21 16:03:51 -04:00
|
|
|
|
2009-09-05 14:57:35 -04:00
|
|
|
/* we don't want the stream interface functions to recursively wake us up */
|
|
|
|
|
if (s->req->prod->owner == t)
|
|
|
|
|
s->req->prod->flags |= SI_FL_DONT_WAKE;
|
|
|
|
|
if (s->req->cons->owner == t)
|
|
|
|
|
s->req->cons->flags |= SI_FL_DONT_WAKE;
|
|
|
|
|
|
2008-11-30 12:47:21 -05:00
|
|
|
/* 1a: Check for low level timeouts if needed. We just set a flag on
|
|
|
|
|
* stream interfaces when their timeouts have expired.
|
|
|
|
|
*/
|
|
|
|
|
if (unlikely(t->state & TASK_WOKEN_TIMER)) {
|
|
|
|
|
stream_int_check_timeouts(&s->si[0]);
|
|
|
|
|
stream_int_check_timeouts(&s->si[1]);
|
2009-06-21 16:03:51 -04:00
|
|
|
|
2012-08-27 18:06:31 -04:00
|
|
|
/* check channel timeouts, and close the corresponding stream interfaces
|
2009-06-21 16:03:51 -04:00
|
|
|
* for future reads or writes. Note: this will also concern upper layers
|
|
|
|
|
* but we do not touch any other flag. We must be careful and correctly
|
|
|
|
|
* detect state changes when calling them.
|
|
|
|
|
*/
|
|
|
|
|
|
2012-08-27 18:06:31 -04:00
|
|
|
channel_check_timeouts(s->req);
|
2009-06-21 16:03:51 -04:00
|
|
|
|
2012-08-27 17:14:58 -04:00
|
|
|
if (unlikely((s->req->flags & (CF_SHUTW|CF_WRITE_TIMEOUT)) == CF_WRITE_TIMEOUT)) {
|
2009-12-29 08:49:56 -05:00
|
|
|
s->req->cons->flags |= SI_FL_NOLINGER;
|
2012-05-21 10:31:45 -04:00
|
|
|
si_shutw(s->req->cons);
|
2009-12-29 08:49:56 -05:00
|
|
|
}
|
|
|
|
|
|
2012-08-27 17:14:58 -04:00
|
|
|
if (unlikely((s->req->flags & (CF_SHUTR|CF_READ_TIMEOUT)) == CF_READ_TIMEOUT)) {
|
2012-05-13 08:48:59 -04:00
|
|
|
if (s->req->prod->flags & SI_FL_NOHALF)
|
|
|
|
|
s->req->prod->flags |= SI_FL_NOLINGER;
|
2012-05-21 10:31:45 -04:00
|
|
|
si_shutr(s->req->prod);
|
2012-05-13 08:48:59 -04:00
|
|
|
}
|
2009-06-21 16:03:51 -04:00
|
|
|
|
2012-08-27 18:06:31 -04:00
|
|
|
channel_check_timeouts(s->rep);
|
2008-11-30 12:47:21 -05:00
|
|
|
|
2012-08-27 17:14:58 -04:00
|
|
|
if (unlikely((s->rep->flags & (CF_SHUTW|CF_WRITE_TIMEOUT)) == CF_WRITE_TIMEOUT)) {
|
2009-12-29 08:49:56 -05:00
|
|
|
s->rep->cons->flags |= SI_FL_NOLINGER;
|
2012-05-21 10:31:45 -04:00
|
|
|
si_shutw(s->rep->cons);
|
2009-12-29 08:49:56 -05:00
|
|
|
}
|
|
|
|
|
|
2012-08-27 17:14:58 -04:00
|
|
|
if (unlikely((s->rep->flags & (CF_SHUTR|CF_READ_TIMEOUT)) == CF_READ_TIMEOUT)) {
|
2012-05-13 08:48:59 -04:00
|
|
|
if (s->rep->prod->flags & SI_FL_NOHALF)
|
|
|
|
|
s->rep->prod->flags |= SI_FL_NOLINGER;
|
2012-05-21 10:31:45 -04:00
|
|
|
si_shutr(s->rep->prod);
|
2012-05-13 08:48:59 -04:00
|
|
|
}
|
2012-11-08 08:49:17 -05:00
|
|
|
|
|
|
|
|
/* Once in a while we're woken up because the task expires. But
|
|
|
|
|
* this does not necessarily mean that a timeout has been reached.
|
|
|
|
|
* So let's not run a whole session processing if only an expiration
|
|
|
|
|
* timeout needs to be refreshed.
|
|
|
|
|
*/
|
|
|
|
|
if (!((s->req->flags | s->rep->flags) &
|
|
|
|
|
(CF_SHUTR|CF_READ_ACTIVITY|CF_READ_TIMEOUT|CF_SHUTW|
|
|
|
|
|
CF_WRITE_ACTIVITY|CF_WRITE_TIMEOUT|CF_ANA_TIMEOUT)) &&
|
|
|
|
|
!((s->si[0].flags | s->si[1].flags) & (SI_FL_EXP|SI_FL_ERR)) &&
|
|
|
|
|
((t->state & TASK_WOKEN_ANY) == TASK_WOKEN_TIMER))
|
|
|
|
|
goto update_exp_and_leave;
|
2009-06-21 16:03:51 -04:00
|
|
|
}
|
2008-11-30 12:47:21 -05:00
|
|
|
|
|
|
|
|
/* 1b: check for low-level errors reported at the stream interface.
|
|
|
|
|
* First we check if it's a retryable error (in which case we don't
|
|
|
|
|
* want to tell the buffer). Otherwise we report the error one level
|
|
|
|
|
* upper by setting flags into the buffers. Note that the side towards
|
|
|
|
|
* the client cannot have connect (hence retryable) errors. Also, the
|
|
|
|
|
* connection setup code must be able to deal with any type of abort.
|
|
|
|
|
*/
|
2012-11-11 18:42:33 -05:00
|
|
|
srv = objt_server(s->target);
|
2008-11-30 12:47:21 -05:00
|
|
|
if (unlikely(s->si[0].flags & SI_FL_ERR)) {
|
|
|
|
|
if (s->si[0].state == SI_ST_EST || s->si[0].state == SI_ST_DIS) {
|
2012-05-21 10:31:45 -04:00
|
|
|
si_shutr(&s->si[0]);
|
|
|
|
|
si_shutw(&s->si[0]);
|
2008-11-30 12:47:21 -05:00
|
|
|
stream_int_report_error(&s->si[0]);
|
2008-12-14 05:44:04 -05:00
|
|
|
if (!(s->req->analysers) && !(s->rep->analysers)) {
|
2011-03-10 17:25:56 -05:00
|
|
|
s->be->be_counters.cli_aborts++;
|
|
|
|
|
s->fe->fe_counters.cli_aborts++;
|
2011-03-10 10:55:02 -05:00
|
|
|
if (srv)
|
|
|
|
|
srv->counters.cli_aborts++;
|
2008-12-14 05:44:04 -05:00
|
|
|
if (!(s->flags & SN_ERR_MASK))
|
|
|
|
|
s->flags |= SN_ERR_CLICL;
|
|
|
|
|
if (!(s->flags & SN_FINST_MASK))
|
|
|
|
|
s->flags |= SN_FINST_D;
|
|
|
|
|
}
|
2008-11-30 12:47:21 -05:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (unlikely(s->si[1].flags & SI_FL_ERR)) {
|
|
|
|
|
if (s->si[1].state == SI_ST_EST || s->si[1].state == SI_ST_DIS) {
|
2012-05-21 10:31:45 -04:00
|
|
|
si_shutr(&s->si[1]);
|
|
|
|
|
si_shutw(&s->si[1]);
|
2008-11-30 12:47:21 -05:00
|
|
|
stream_int_report_error(&s->si[1]);
|
2011-03-10 17:25:56 -05:00
|
|
|
s->be->be_counters.failed_resp++;
|
2011-03-10 10:55:02 -05:00
|
|
|
if (srv)
|
|
|
|
|
srv->counters.failed_resp++;
|
2008-12-14 05:44:04 -05:00
|
|
|
if (!(s->req->analysers) && !(s->rep->analysers)) {
|
2011-03-10 17:25:56 -05:00
|
|
|
s->be->be_counters.srv_aborts++;
|
|
|
|
|
s->fe->fe_counters.srv_aborts++;
|
2011-03-10 10:55:02 -05:00
|
|
|
if (srv)
|
|
|
|
|
srv->counters.srv_aborts++;
|
2008-12-14 05:44:04 -05:00
|
|
|
if (!(s->flags & SN_ERR_MASK))
|
|
|
|
|
s->flags |= SN_ERR_SRVCL;
|
|
|
|
|
if (!(s->flags & SN_FINST_MASK))
|
|
|
|
|
s->flags |= SN_FINST_D;
|
|
|
|
|
}
|
2008-11-30 12:47:21 -05:00
|
|
|
}
|
|
|
|
|
/* note: maybe we should process connection errors here ? */
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (s->si[1].state == SI_ST_CON) {
|
|
|
|
|
/* we were trying to establish a connection on the server side,
|
|
|
|
|
* maybe it succeeded, maybe it failed, maybe we timed out, ...
|
|
|
|
|
*/
|
|
|
|
|
if (unlikely(!sess_update_st_con_tcp(s, &s->si[1])))
|
|
|
|
|
sess_update_st_cer(s, &s->si[1]);
|
|
|
|
|
else if (s->si[1].state == SI_ST_EST)
|
|
|
|
|
sess_establish(s, &s->si[1]);
|
|
|
|
|
|
|
|
|
|
/* state is now one of SI_ST_CON (still in progress), SI_ST_EST
|
|
|
|
|
* (established), SI_ST_DIS (abort), SI_ST_CLO (last error),
|
|
|
|
|
* SI_ST_ASS/SI_ST_TAR/SI_ST_REQ for retryable errors.
|
|
|
|
|
*/
|
|
|
|
|
}
|
|
|
|
|
|
2010-07-27 11:15:12 -04:00
|
|
|
rq_prod_last = s->si[0].state;
|
|
|
|
|
rq_cons_last = s->si[1].state;
|
|
|
|
|
rp_cons_last = s->si[0].state;
|
|
|
|
|
rp_prod_last = s->si[1].state;
|
|
|
|
|
|
|
|
|
|
resync_stream_interface:
|
2008-11-30 12:47:21 -05:00
|
|
|
/* Check for connection closure */
|
|
|
|
|
|
|
|
|
|
DPRINTF(stderr,
|
2012-03-01 12:19:58 -05:00
|
|
|
"[%u] %s:%d: task=%p s=%p, sfl=0x%08x, rq=%p, rp=%p, exp(r,w)=%u,%u rqf=%08x rpf=%08x rqh=%d rqt=%d rph=%d rpt=%d cs=%d ss=%d, cet=0x%x set=0x%x retr=%d\n",
|
2008-11-30 12:47:21 -05:00
|
|
|
now_ms, __FUNCTION__, __LINE__,
|
|
|
|
|
t,
|
|
|
|
|
s, s->flags,
|
|
|
|
|
s->req, s->rep,
|
|
|
|
|
s->req->rex, s->rep->wex,
|
|
|
|
|
s->req->flags, s->rep->flags,
|
2012-10-12 17:49:43 -04:00
|
|
|
s->req->buf->i, s->req->buf->o, s->rep->buf->i, s->rep->buf->o, s->rep->cons->state, s->req->cons->state,
|
2008-11-30 12:47:21 -05:00
|
|
|
s->rep->cons->err_type, s->req->cons->err_type,
|
2010-06-01 03:51:00 -04:00
|
|
|
s->req->cons->conn_retries);
|
2008-11-30 12:47:21 -05:00
|
|
|
|
|
|
|
|
/* nothing special to be done on client side */
|
|
|
|
|
if (unlikely(s->req->prod->state == SI_ST_DIS))
|
|
|
|
|
s->req->prod->state = SI_ST_CLO;
|
|
|
|
|
|
|
|
|
|
/* When a server-side connection is released, we have to count it and
|
|
|
|
|
* check for pending connections on this server.
|
|
|
|
|
*/
|
|
|
|
|
if (unlikely(s->req->cons->state == SI_ST_DIS)) {
|
|
|
|
|
s->req->cons->state = SI_ST_CLO;
|
2012-11-11 18:42:33 -05:00
|
|
|
srv = objt_server(s->target);
|
2011-03-10 10:55:02 -05:00
|
|
|
if (srv) {
|
2008-11-30 12:47:21 -05:00
|
|
|
if (s->flags & SN_CURR_SESS) {
|
|
|
|
|
s->flags &= ~SN_CURR_SESS;
|
2011-03-10 10:55:02 -05:00
|
|
|
srv->cur_sess--;
|
2008-11-30 12:47:21 -05:00
|
|
|
}
|
|
|
|
|
sess_change_server(s, NULL);
|
2011-03-10 10:55:02 -05:00
|
|
|
if (may_dequeue_tasks(srv, s->be))
|
|
|
|
|
process_srv_queue(srv);
|
2008-11-30 12:47:21 -05:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Note: of the transient states (REQ, CER, DIS), only REQ may remain
|
|
|
|
|
* at this point.
|
|
|
|
|
*/
|
|
|
|
|
|
2009-03-08 14:20:25 -04:00
|
|
|
resync_request:
|
2008-11-30 12:47:21 -05:00
|
|
|
/* Analyse request */
|
2012-08-27 17:14:58 -04:00
|
|
|
if (((s->req->flags & ~rqf_last) & CF_MASK_ANALYSER) ||
|
|
|
|
|
((s->req->flags ^ rqf_last) & CF_MASK_STATIC) ||
|
2010-07-27 11:15:12 -04:00
|
|
|
s->si[0].state != rq_prod_last ||
|
|
|
|
|
s->si[1].state != rq_cons_last) {
|
2008-11-30 12:47:21 -05:00
|
|
|
unsigned int flags = s->req->flags;
|
|
|
|
|
|
|
|
|
|
if (s->req->prod->state >= SI_ST_EST) {
|
2010-01-07 18:32:27 -05:00
|
|
|
int max_loops = global.tune.maxpollevents;
|
2010-01-06 17:53:24 -05:00
|
|
|
unsigned int ana_list;
|
|
|
|
|
unsigned int ana_back;
|
2009-06-28 13:37:53 -04:00
|
|
|
|
2010-01-06 18:20:41 -05:00
|
|
|
/* it's up to the analysers to stop new connections,
|
|
|
|
|
* disable reading or closing. Note: if an analyser
|
|
|
|
|
* disables any of these bits, it is responsible for
|
|
|
|
|
* enabling them again when it disables itself, so
|
|
|
|
|
* that other analysers are called in similar conditions.
|
|
|
|
|
*/
|
2012-08-27 18:06:31 -04:00
|
|
|
channel_auto_read(s->req);
|
|
|
|
|
channel_auto_connect(s->req);
|
|
|
|
|
channel_auto_close(s->req);
|
2008-11-30 17:15:34 -05:00
|
|
|
|
|
|
|
|
/* We will call all analysers for which a bit is set in
|
|
|
|
|
* s->req->analysers, following the bit order from LSB
|
|
|
|
|
* to MSB. The analysers must remove themselves from
|
2009-06-28 13:37:53 -04:00
|
|
|
* the list when not needed. Any analyser may return 0
|
|
|
|
|
* to break out of the loop, either because of missing
|
|
|
|
|
* data to take a decision, or because it decides to
|
|
|
|
|
* kill the session. We loop at least once through each
|
|
|
|
|
* analyser, and we may loop again if other analysers
|
|
|
|
|
* are added in the middle.
|
2010-01-06 17:53:24 -05:00
|
|
|
*
|
|
|
|
|
* We build a list of analysers to run. We evaluate all
|
|
|
|
|
* of these analysers in the order of the lower bit to
|
|
|
|
|
* the higher bit. This ordering is very important.
|
|
|
|
|
* An analyser will often add/remove other analysers,
|
|
|
|
|
* including itself. Any changes to itself have no effect
|
|
|
|
|
* on the loop. If it removes any other analysers, we
|
|
|
|
|
* want those analysers not to be called anymore during
|
|
|
|
|
* this loop. If it adds an analyser that is located
|
|
|
|
|
* after itself, we want it to be scheduled for being
|
|
|
|
|
* processed during the loop. If it adds an analyser
|
|
|
|
|
* which is located before it, we want it to switch to
|
|
|
|
|
* it immediately, even if it has already been called
|
|
|
|
|
* once but removed since.
|
|
|
|
|
*
|
|
|
|
|
* In order to achieve this, we compare the analyser
|
|
|
|
|
* list after the call with a copy of it before the
|
|
|
|
|
* call. The work list is fed with analyser bits that
|
|
|
|
|
* appeared during the call. Then we compare previous
|
|
|
|
|
* work list with the new one, and check the bits that
|
|
|
|
|
* appeared. If the lowest of these bits is lower than
|
|
|
|
|
* the current bit, it means we have enabled a previous
|
|
|
|
|
* analyser and must immediately loop again.
|
2008-11-30 17:15:34 -05:00
|
|
|
*/
|
2009-06-28 13:37:53 -04:00
|
|
|
|
2010-01-06 17:53:24 -05:00
|
|
|
ana_list = ana_back = s->req->analysers;
|
2010-01-07 18:32:27 -05:00
|
|
|
while (ana_list && max_loops--) {
|
2010-01-06 17:53:24 -05:00
|
|
|
/* Warning! ensure that analysers are always placed in ascending order! */
|
|
|
|
|
|
2010-08-03 08:02:05 -04:00
|
|
|
if (ana_list & AN_REQ_INSPECT_FE) {
|
|
|
|
|
if (!tcp_inspect_request(s, s->req, AN_REQ_INSPECT_FE))
|
2008-11-30 17:15:34 -05:00
|
|
|
break;
|
2010-08-03 08:02:05 -04:00
|
|
|
UPDATE_ANALYSERS(s->req->analysers, ana_list, ana_back, AN_REQ_INSPECT_FE);
|
2009-06-28 13:37:53 -04:00
|
|
|
}
|
2008-11-30 17:15:34 -05:00
|
|
|
|
2010-01-06 17:53:24 -05:00
|
|
|
if (ana_list & AN_REQ_WAIT_HTTP) {
|
2009-07-07 04:55:49 -04:00
|
|
|
if (!http_wait_for_request(s, s->req, AN_REQ_WAIT_HTTP))
|
2009-07-07 04:14:51 -04:00
|
|
|
break;
|
2010-01-06 17:53:24 -05:00
|
|
|
UPDATE_ANALYSERS(s->req->analysers, ana_list, ana_back, AN_REQ_WAIT_HTTP);
|
2009-07-07 04:14:51 -04:00
|
|
|
}
|
|
|
|
|
|
2010-01-06 17:53:24 -05:00
|
|
|
if (ana_list & AN_REQ_HTTP_PROCESS_FE) {
|
2009-07-07 09:10:31 -04:00
|
|
|
if (!http_process_req_common(s, s->req, AN_REQ_HTTP_PROCESS_FE, s->fe))
|
|
|
|
|
break;
|
2010-01-06 17:53:24 -05:00
|
|
|
UPDATE_ANALYSERS(s->req->analysers, ana_list, ana_back, AN_REQ_HTTP_PROCESS_FE);
|
2009-07-07 09:10:31 -04:00
|
|
|
}
|
|
|
|
|
|
2010-01-06 17:53:24 -05:00
|
|
|
if (ana_list & AN_REQ_SWITCHING_RULES) {
|
2009-07-07 09:10:31 -04:00
|
|
|
if (!process_switching_rules(s, s->req, AN_REQ_SWITCHING_RULES))
|
|
|
|
|
break;
|
2010-01-06 17:53:24 -05:00
|
|
|
UPDATE_ANALYSERS(s->req->analysers, ana_list, ana_back, AN_REQ_SWITCHING_RULES);
|
2009-07-07 09:10:31 -04:00
|
|
|
}
|
|
|
|
|
|
2010-08-03 08:02:05 -04:00
|
|
|
if (ana_list & AN_REQ_INSPECT_BE) {
|
|
|
|
|
if (!tcp_inspect_request(s, s->req, AN_REQ_INSPECT_BE))
|
|
|
|
|
break;
|
|
|
|
|
UPDATE_ANALYSERS(s->req->analysers, ana_list, ana_back, AN_REQ_INSPECT_BE);
|
|
|
|
|
}
|
|
|
|
|
|
2010-01-06 17:53:24 -05:00
|
|
|
if (ana_list & AN_REQ_HTTP_PROCESS_BE) {
|
2009-07-07 09:10:31 -04:00
|
|
|
if (!http_process_req_common(s, s->req, AN_REQ_HTTP_PROCESS_BE, s->be))
|
|
|
|
|
break;
|
2010-01-06 17:53:24 -05:00
|
|
|
UPDATE_ANALYSERS(s->req->analysers, ana_list, ana_back, AN_REQ_HTTP_PROCESS_BE);
|
2009-07-07 09:10:31 -04:00
|
|
|
}
|
|
|
|
|
|
2010-01-06 17:53:24 -05:00
|
|
|
if (ana_list & AN_REQ_HTTP_TARPIT) {
|
2009-07-07 04:55:49 -04:00
|
|
|
if (!http_process_tarpit(s, s->req, AN_REQ_HTTP_TARPIT))
|
2008-11-30 17:28:40 -05:00
|
|
|
break;
|
2010-01-06 17:53:24 -05:00
|
|
|
UPDATE_ANALYSERS(s->req->analysers, ana_list, ana_back, AN_REQ_HTTP_TARPIT);
|
2009-06-28 13:37:53 -04:00
|
|
|
}
|
2008-11-30 17:28:40 -05:00
|
|
|
|
2012-04-05 15:09:48 -04:00
|
|
|
if (ana_list & AN_REQ_SRV_RULES) {
|
|
|
|
|
if (!process_server_rules(s, s->req, AN_REQ_SRV_RULES))
|
|
|
|
|
break;
|
|
|
|
|
UPDATE_ANALYSERS(s->req->analysers, ana_list, ana_back, AN_REQ_SRV_RULES);
|
|
|
|
|
}
|
|
|
|
|
|
2010-01-06 17:53:24 -05:00
|
|
|
if (ana_list & AN_REQ_HTTP_INNER) {
|
2009-08-30 18:17:18 -04:00
|
|
|
if (!http_process_request(s, s->req, AN_REQ_HTTP_INNER))
|
|
|
|
|
break;
|
2010-01-06 17:53:24 -05:00
|
|
|
UPDATE_ANALYSERS(s->req->analysers, ana_list, ana_back, AN_REQ_HTTP_INNER);
|
2009-08-30 18:17:18 -04:00
|
|
|
}
|
|
|
|
|
|
2010-01-06 17:53:24 -05:00
|
|
|
if (ana_list & AN_REQ_HTTP_BODY) {
|
2014-04-10 05:16:06 -04:00
|
|
|
if (!http_wait_for_request_body(s, s->req, AN_REQ_HTTP_BODY))
|
2008-11-30 17:36:37 -05:00
|
|
|
break;
|
2010-01-06 17:53:24 -05:00
|
|
|
UPDATE_ANALYSERS(s->req->analysers, ana_list, ana_back, AN_REQ_HTTP_BODY);
|
2009-06-28 13:37:53 -04:00
|
|
|
}
|
2009-06-30 11:57:00 -04:00
|
|
|
|
2010-01-06 17:53:24 -05:00
|
|
|
if (ana_list & AN_REQ_PRST_RDP_COOKIE) {
|
2009-06-30 11:57:00 -04:00
|
|
|
if (!tcp_persist_rdp_cookie(s, s->req, AN_REQ_PRST_RDP_COOKIE))
|
|
|
|
|
break;
|
2010-01-06 17:53:24 -05:00
|
|
|
UPDATE_ANALYSERS(s->req->analysers, ana_list, ana_back, AN_REQ_PRST_RDP_COOKIE);
|
2009-06-30 11:57:00 -04:00
|
|
|
}
|
2009-12-27 16:54:55 -05:00
|
|
|
|
2010-01-04 09:47:17 -05:00
|
|
|
if (ana_list & AN_REQ_STICKING_RULES) {
|
|
|
|
|
if (!process_sticking_rules(s, s->req, AN_REQ_STICKING_RULES))
|
|
|
|
|
break;
|
|
|
|
|
UPDATE_ANALYSERS(s->req->analysers, ana_list, ana_back, AN_REQ_STICKING_RULES);
|
|
|
|
|
}
|
|
|
|
|
|
2010-01-06 17:53:24 -05:00
|
|
|
if (ana_list & AN_REQ_HTTP_XFER_BODY) {
|
2009-12-27 16:54:55 -05:00
|
|
|
if (!http_request_forward_body(s, s->req, AN_REQ_HTTP_XFER_BODY))
|
|
|
|
|
break;
|
2010-01-06 17:53:24 -05:00
|
|
|
UPDATE_ANALYSERS(s->req->analysers, ana_list, ana_back, AN_REQ_HTTP_XFER_BODY);
|
2009-12-27 16:54:55 -05:00
|
|
|
}
|
2010-01-07 18:32:27 -05:00
|
|
|
break;
|
|
|
|
|
}
|
2008-11-30 12:47:21 -05:00
|
|
|
}
|
2009-03-15 17:34:05 -04:00
|
|
|
|
2010-07-27 11:15:12 -04:00
|
|
|
rq_prod_last = s->si[0].state;
|
|
|
|
|
rq_cons_last = s->si[1].state;
|
2012-08-27 17:14:58 -04:00
|
|
|
s->req->flags &= ~CF_WAKE_ONCE;
|
2010-07-27 11:15:12 -04:00
|
|
|
rqf_last = s->req->flags;
|
|
|
|
|
|
2012-08-27 17:14:58 -04:00
|
|
|
if ((s->req->flags ^ flags) & CF_MASK_STATIC)
|
2009-06-21 16:43:05 -04:00
|
|
|
goto resync_request;
|
|
|
|
|
}
|
|
|
|
|
|
2010-01-06 18:09:04 -05:00
|
|
|
/* we'll monitor the request analysers while parsing the response,
|
|
|
|
|
* because some response analysers may indirectly enable new request
|
|
|
|
|
* analysers (eg: HTTP keep-alive).
|
|
|
|
|
*/
|
|
|
|
|
req_ana_back = s->req->analysers;
|
|
|
|
|
|
2009-06-21 16:43:05 -04:00
|
|
|
resync_response:
|
|
|
|
|
/* Analyse response */
|
|
|
|
|
|
2012-11-11 17:05:39 -05:00
|
|
|
if (((s->rep->flags & ~rpf_last) & CF_MASK_ANALYSER) ||
|
2014-06-23 09:22:31 -04:00
|
|
|
(s->rep->flags ^ rpf_last) & CF_MASK_STATIC ||
|
|
|
|
|
s->si[0].state != rp_cons_last ||
|
|
|
|
|
s->si[1].state != rp_prod_last) {
|
2009-06-21 16:43:05 -04:00
|
|
|
unsigned int flags = s->rep->flags;
|
|
|
|
|
|
2012-08-27 17:14:58 -04:00
|
|
|
if ((s->rep->flags & CF_MASK_ANALYSER) &&
|
2010-12-17 01:13:42 -05:00
|
|
|
(s->rep->analysers & AN_REQ_WAIT_HTTP)) {
|
|
|
|
|
/* Due to HTTP pipelining, the HTTP request analyser might be waiting
|
|
|
|
|
* for some free space in the response buffer, so we might need to call
|
|
|
|
|
* it when something changes in the response buffer, but still we pass
|
|
|
|
|
* it the request buffer. Note that the SI state might very well still
|
|
|
|
|
* be zero due to us returning a flow of redirects!
|
|
|
|
|
*/
|
|
|
|
|
s->rep->analysers &= ~AN_REQ_WAIT_HTTP;
|
2012-08-27 17:14:58 -04:00
|
|
|
s->req->flags |= CF_WAKE_ONCE;
|
2010-12-17 01:13:42 -05:00
|
|
|
}
|
|
|
|
|
|
2009-06-21 16:43:05 -04:00
|
|
|
if (s->rep->prod->state >= SI_ST_EST) {
|
2010-01-07 18:32:27 -05:00
|
|
|
int max_loops = global.tune.maxpollevents;
|
2010-01-06 17:53:24 -05:00
|
|
|
unsigned int ana_list;
|
|
|
|
|
unsigned int ana_back;
|
2009-10-18 16:53:08 -04:00
|
|
|
|
2010-01-06 18:20:41 -05:00
|
|
|
/* it's up to the analysers to stop disable reading or
|
|
|
|
|
* closing. Note: if an analyser disables any of these
|
|
|
|
|
* bits, it is responsible for enabling them again when
|
|
|
|
|
* it disables itself, so that other analysers are called
|
|
|
|
|
* in similar conditions.
|
|
|
|
|
*/
|
2012-08-27 18:06:31 -04:00
|
|
|
channel_auto_read(s->rep);
|
|
|
|
|
channel_auto_close(s->rep);
|
2009-10-18 16:53:08 -04:00
|
|
|
|
|
|
|
|
/* We will call all analysers for which a bit is set in
|
|
|
|
|
* s->rep->analysers, following the bit order from LSB
|
|
|
|
|
* to MSB. The analysers must remove themselves from
|
|
|
|
|
* the list when not needed. Any analyser may return 0
|
|
|
|
|
* to break out of the loop, either because of missing
|
|
|
|
|
* data to take a decision, or because it decides to
|
|
|
|
|
* kill the session. We loop at least once through each
|
|
|
|
|
* analyser, and we may loop again if other analysers
|
|
|
|
|
* are added in the middle.
|
|
|
|
|
*/
|
|
|
|
|
|
2010-01-06 17:53:24 -05:00
|
|
|
ana_list = ana_back = s->rep->analysers;
|
2010-01-07 18:32:27 -05:00
|
|
|
while (ana_list && max_loops--) {
|
2010-01-06 17:53:24 -05:00
|
|
|
/* Warning! ensure that analysers are always placed in ascending order! */
|
|
|
|
|
|
2010-09-23 11:56:44 -04:00
|
|
|
if (ana_list & AN_RES_INSPECT) {
|
|
|
|
|
if (!tcp_inspect_response(s, s->rep, AN_RES_INSPECT))
|
|
|
|
|
break;
|
|
|
|
|
UPDATE_ANALYSERS(s->rep->analysers, ana_list, ana_back, AN_RES_INSPECT);
|
|
|
|
|
}
|
|
|
|
|
|
2010-01-06 17:53:24 -05:00
|
|
|
if (ana_list & AN_RES_WAIT_HTTP) {
|
2009-10-18 16:53:08 -04:00
|
|
|
if (!http_wait_for_response(s, s->rep, AN_RES_WAIT_HTTP))
|
|
|
|
|
break;
|
2010-01-06 17:53:24 -05:00
|
|
|
UPDATE_ANALYSERS(s->rep->analysers, ana_list, ana_back, AN_RES_WAIT_HTTP);
|
2009-10-18 16:53:08 -04:00
|
|
|
}
|
|
|
|
|
|
2010-01-04 09:47:17 -05:00
|
|
|
if (ana_list & AN_RES_STORE_RULES) {
|
|
|
|
|
if (!process_store_rules(s, s->rep, AN_RES_STORE_RULES))
|
|
|
|
|
break;
|
|
|
|
|
UPDATE_ANALYSERS(s->rep->analysers, ana_list, ana_back, AN_RES_STORE_RULES);
|
|
|
|
|
}
|
|
|
|
|
|
2010-01-06 17:53:24 -05:00
|
|
|
if (ana_list & AN_RES_HTTP_PROCESS_BE) {
|
2009-10-18 16:53:08 -04:00
|
|
|
if (!http_process_res_common(s, s->rep, AN_RES_HTTP_PROCESS_BE, s->be))
|
|
|
|
|
break;
|
2010-01-06 17:53:24 -05:00
|
|
|
UPDATE_ANALYSERS(s->rep->analysers, ana_list, ana_back, AN_RES_HTTP_PROCESS_BE);
|
2009-10-18 16:53:08 -04:00
|
|
|
}
|
2009-12-27 16:54:55 -05:00
|
|
|
|
2010-01-06 17:53:24 -05:00
|
|
|
if (ana_list & AN_RES_HTTP_XFER_BODY) {
|
2009-12-27 16:54:55 -05:00
|
|
|
if (!http_response_forward_body(s, s->rep, AN_RES_HTTP_XFER_BODY))
|
|
|
|
|
break;
|
2010-01-06 17:53:24 -05:00
|
|
|
UPDATE_ANALYSERS(s->rep->analysers, ana_list, ana_back, AN_RES_HTTP_XFER_BODY);
|
2009-12-27 16:54:55 -05:00
|
|
|
}
|
2010-01-07 18:32:27 -05:00
|
|
|
break;
|
|
|
|
|
}
|
2009-06-21 16:43:05 -04:00
|
|
|
}
|
|
|
|
|
|
2010-07-27 11:15:12 -04:00
|
|
|
rp_cons_last = s->si[0].state;
|
|
|
|
|
rp_prod_last = s->si[1].state;
|
|
|
|
|
rpf_last = s->rep->flags;
|
|
|
|
|
|
2012-08-27 17:14:58 -04:00
|
|
|
if ((s->rep->flags ^ flags) & CF_MASK_STATIC)
|
2009-06-21 16:43:05 -04:00
|
|
|
goto resync_response;
|
|
|
|
|
}
|
|
|
|
|
|
2010-01-06 18:09:04 -05:00
|
|
|
/* maybe someone has added some request analysers, so we must check and loop */
|
|
|
|
|
if (s->req->analysers & ~req_ana_back)
|
|
|
|
|
goto resync_request;
|
|
|
|
|
|
2012-08-27 17:14:58 -04:00
|
|
|
if ((s->req->flags & ~rqf_last) & CF_MASK_ANALYSER)
|
2010-12-17 01:13:42 -05:00
|
|
|
goto resync_request;
|
|
|
|
|
|
2009-06-21 16:43:05 -04:00
|
|
|
/* FIXME: here we should call protocol handlers which rely on
|
|
|
|
|
* both buffers.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
2010-03-04 14:34:23 -05:00
|
|
|
* Now we propagate unhandled errors to the session. Normally
|
|
|
|
|
* we're just in a data phase here since it means we have not
|
|
|
|
|
* seen any analyser who could set an error status.
|
2009-06-21 16:43:05 -04:00
|
|
|
*/
|
2012-11-11 18:42:33 -05:00
|
|
|
srv = objt_server(s->target);
|
2010-11-11 08:28:47 -05:00
|
|
|
if (unlikely(!(s->flags & SN_ERR_MASK))) {
|
2012-08-27 17:14:58 -04:00
|
|
|
if (s->req->flags & (CF_READ_ERROR|CF_READ_TIMEOUT|CF_WRITE_ERROR|CF_WRITE_TIMEOUT)) {
|
2009-06-21 16:43:05 -04:00
|
|
|
/* Report it if the client got an error or a read timeout expired */
|
2009-03-15 17:34:05 -04:00
|
|
|
s->req->analysers = 0;
|
2012-08-27 17:14:58 -04:00
|
|
|
if (s->req->flags & CF_READ_ERROR) {
|
2011-03-10 17:25:56 -05:00
|
|
|
s->be->be_counters.cli_aborts++;
|
|
|
|
|
s->fe->fe_counters.cli_aborts++;
|
2011-03-10 10:55:02 -05:00
|
|
|
if (srv)
|
|
|
|
|
srv->counters.cli_aborts++;
|
2009-03-15 17:34:05 -04:00
|
|
|
s->flags |= SN_ERR_CLICL;
|
2010-03-04 14:34:23 -05:00
|
|
|
}
|
2012-08-27 17:14:58 -04:00
|
|
|
else if (s->req->flags & CF_READ_TIMEOUT) {
|
2011-03-10 17:25:56 -05:00
|
|
|
s->be->be_counters.cli_aborts++;
|
|
|
|
|
s->fe->fe_counters.cli_aborts++;
|
2011-03-10 10:55:02 -05:00
|
|
|
if (srv)
|
|
|
|
|
srv->counters.cli_aborts++;
|
2009-03-15 17:34:05 -04:00
|
|
|
s->flags |= SN_ERR_CLITO;
|
2010-03-04 14:34:23 -05:00
|
|
|
}
|
2012-08-27 17:14:58 -04:00
|
|
|
else if (s->req->flags & CF_WRITE_ERROR) {
|
2011-03-10 17:25:56 -05:00
|
|
|
s->be->be_counters.srv_aborts++;
|
|
|
|
|
s->fe->fe_counters.srv_aborts++;
|
2011-03-10 10:55:02 -05:00
|
|
|
if (srv)
|
|
|
|
|
srv->counters.srv_aborts++;
|
2009-03-15 17:34:05 -04:00
|
|
|
s->flags |= SN_ERR_SRVCL;
|
2010-03-04 14:34:23 -05:00
|
|
|
}
|
|
|
|
|
else {
|
2011-03-10 17:25:56 -05:00
|
|
|
s->be->be_counters.srv_aborts++;
|
|
|
|
|
s->fe->fe_counters.srv_aborts++;
|
2011-03-10 10:55:02 -05:00
|
|
|
if (srv)
|
|
|
|
|
srv->counters.srv_aborts++;
|
2009-03-15 17:34:05 -04:00
|
|
|
s->flags |= SN_ERR_SRVTO;
|
2010-03-04 14:34:23 -05:00
|
|
|
}
|
2009-03-15 17:34:05 -04:00
|
|
|
sess_set_term_flags(s);
|
|
|
|
|
}
|
2012-08-27 17:14:58 -04:00
|
|
|
else if (s->rep->flags & (CF_READ_ERROR|CF_READ_TIMEOUT|CF_WRITE_ERROR|CF_WRITE_TIMEOUT)) {
|
2009-06-21 16:43:05 -04:00
|
|
|
/* Report it if the server got an error or a read timeout expired */
|
|
|
|
|
s->rep->analysers = 0;
|
2012-08-27 17:14:58 -04:00
|
|
|
if (s->rep->flags & CF_READ_ERROR) {
|
2011-03-10 17:25:56 -05:00
|
|
|
s->be->be_counters.srv_aborts++;
|
|
|
|
|
s->fe->fe_counters.srv_aborts++;
|
2011-03-10 10:55:02 -05:00
|
|
|
if (srv)
|
|
|
|
|
srv->counters.srv_aborts++;
|
2009-06-21 16:43:05 -04:00
|
|
|
s->flags |= SN_ERR_SRVCL;
|
2010-03-04 14:34:23 -05:00
|
|
|
}
|
2012-08-27 17:14:58 -04:00
|
|
|
else if (s->rep->flags & CF_READ_TIMEOUT) {
|
2011-03-10 17:25:56 -05:00
|
|
|
s->be->be_counters.srv_aborts++;
|
|
|
|
|
s->fe->fe_counters.srv_aborts++;
|
2011-03-10 10:55:02 -05:00
|
|
|
if (srv)
|
|
|
|
|
srv->counters.srv_aborts++;
|
2009-06-21 16:43:05 -04:00
|
|
|
s->flags |= SN_ERR_SRVTO;
|
2010-03-04 14:34:23 -05:00
|
|
|
}
|
2012-08-27 17:14:58 -04:00
|
|
|
else if (s->rep->flags & CF_WRITE_ERROR) {
|
2011-03-10 17:25:56 -05:00
|
|
|
s->be->be_counters.cli_aborts++;
|
|
|
|
|
s->fe->fe_counters.cli_aborts++;
|
2011-03-10 10:55:02 -05:00
|
|
|
if (srv)
|
|
|
|
|
srv->counters.cli_aborts++;
|
2009-06-21 16:43:05 -04:00
|
|
|
s->flags |= SN_ERR_CLICL;
|
2010-03-04 14:34:23 -05:00
|
|
|
}
|
|
|
|
|
else {
|
2011-03-10 17:25:56 -05:00
|
|
|
s->be->be_counters.cli_aborts++;
|
|
|
|
|
s->fe->fe_counters.cli_aborts++;
|
2011-03-10 10:55:02 -05:00
|
|
|
if (srv)
|
|
|
|
|
srv->counters.cli_aborts++;
|
2010-03-04 14:34:23 -05:00
|
|
|
s->flags |= SN_ERR_CLITO;
|
|
|
|
|
}
|
2009-06-21 16:43:05 -04:00
|
|
|
sess_set_term_flags(s);
|
|
|
|
|
}
|
2009-03-15 17:34:05 -04:00
|
|
|
}
|
|
|
|
|
|
2009-06-21 16:43:05 -04:00
|
|
|
/*
|
|
|
|
|
* Here we take care of forwarding unhandled data. This also includes
|
|
|
|
|
* connection establishments and shutdown requests.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
2009-03-08 16:38:23 -04:00
|
|
|
/* If noone is interested in analysing data, it's time to forward
|
2009-09-20 06:07:52 -04:00
|
|
|
* everything. We configure the buffer to forward indefinitely.
|
2012-08-27 17:14:58 -04:00
|
|
|
* Note that we're checking CF_SHUTR_NOW as an indication of a possible
|
2012-08-27 18:06:31 -04:00
|
|
|
* recent call to channel_abort().
|
2008-12-14 11:31:54 -05:00
|
|
|
*/
|
2013-12-31 17:56:46 -05:00
|
|
|
if (unlikely(!s->req->analysers &&
|
2012-11-11 17:05:39 -05:00
|
|
|
!(s->req->flags & (CF_SHUTW|CF_SHUTR_NOW)) &&
|
2009-09-20 06:07:52 -04:00
|
|
|
(s->req->prod->state >= SI_ST_EST) &&
|
2013-12-31 17:56:46 -05:00
|
|
|
(s->req->to_forward != CHN_INFINITE_FORWARD))) {
|
2012-11-11 17:05:39 -05:00
|
|
|
/* This buffer is freewheeling, there's no analyser
|
2009-03-08 16:38:23 -04:00
|
|
|
* attached to it. If any data are left in, we'll permit them to
|
|
|
|
|
* move.
|
|
|
|
|
*/
|
2012-08-27 18:06:31 -04:00
|
|
|
channel_auto_read(s->req);
|
|
|
|
|
channel_auto_connect(s->req);
|
|
|
|
|
channel_auto_close(s->req);
|
2012-10-12 17:49:43 -04:00
|
|
|
buffer_flush(s->req->buf);
|
2009-03-08 16:38:23 -04:00
|
|
|
|
2010-11-07 14:26:56 -05:00
|
|
|
/* We'll let data flow between the producer (if still connected)
|
|
|
|
|
* to the consumer (which might possibly not be connected yet).
|
2009-03-08 16:38:23 -04:00
|
|
|
*/
|
2012-08-27 17:14:58 -04:00
|
|
|
if (!(s->req->flags & (CF_SHUTR|CF_SHUTW_NOW)))
|
2012-08-27 18:06:31 -04:00
|
|
|
channel_forward(s->req, CHN_INFINITE_FORWARD);
|
2008-12-14 11:31:54 -05:00
|
|
|
}
|
2008-12-13 15:12:26 -05:00
|
|
|
|
2009-03-08 16:38:23 -04:00
|
|
|
/* check if it is wise to enable kernel splicing to forward request data */
|
2012-08-27 17:14:58 -04:00
|
|
|
if (!(s->req->flags & (CF_KERN_SPLICING|CF_SHUTR)) &&
|
2009-03-08 16:38:23 -04:00
|
|
|
s->req->to_forward &&
|
|
|
|
|
(global.tune.options & GTUNE_USE_SPLICE) &&
|
2013-10-01 04:45:07 -04:00
|
|
|
(objt_conn(s->si[0].end) && __objt_conn(s->si[0].end)->xprt && __objt_conn(s->si[0].end)->xprt->rcv_pipe) &&
|
|
|
|
|
(objt_conn(s->si[1].end) && __objt_conn(s->si[1].end)->xprt && __objt_conn(s->si[1].end)->xprt->snd_pipe) &&
|
2009-03-08 16:38:23 -04:00
|
|
|
(pipes_used < global.maxpipes) &&
|
|
|
|
|
(((s->fe->options2|s->be->options2) & PR_O2_SPLIC_REQ) ||
|
|
|
|
|
(((s->fe->options2|s->be->options2) & PR_O2_SPLIC_AUT) &&
|
2012-08-27 17:14:58 -04:00
|
|
|
(s->req->flags & CF_STREAMER_FAST)))) {
|
|
|
|
|
s->req->flags |= CF_KERN_SPLICING;
|
2009-03-08 16:38:23 -04:00
|
|
|
}
|
|
|
|
|
|
2008-11-30 12:47:21 -05:00
|
|
|
/* reflect what the L7 analysers have seen last */
|
|
|
|
|
rqf_last = s->req->flags;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Now forward all shutdown requests between both sides of the buffer
|
|
|
|
|
*/
|
|
|
|
|
|
2009-09-19 15:04:57 -04:00
|
|
|
/* first, let's check if the request buffer needs to shutdown(write), which may
|
|
|
|
|
* happen either because the input is closed or because we want to force a close
|
2014-05-10 08:30:07 -04:00
|
|
|
* once the server has begun to respond. If a half-closed timeout is set, we adjust
|
|
|
|
|
* the other side's timeout as well.
|
2009-09-19 15:04:57 -04:00
|
|
|
*/
|
2012-11-11 17:05:39 -05:00
|
|
|
if (unlikely((s->req->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_AUTO_CLOSE|CF_SHUTR)) ==
|
2014-05-10 08:30:07 -04:00
|
|
|
(CF_AUTO_CLOSE|CF_SHUTR))) {
|
|
|
|
|
channel_shutw_now(s->req);
|
|
|
|
|
if (tick_isset(s->fe->timeout.clientfin)) {
|
|
|
|
|
s->rep->wto = s->fe->timeout.clientfin;
|
|
|
|
|
s->rep->wex = tick_add(now_ms, s->rep->wto);
|
|
|
|
|
}
|
|
|
|
|
}
|
2008-11-30 12:47:21 -05:00
|
|
|
|
|
|
|
|
/* shutdown(write) pending */
|
2012-08-27 17:14:58 -04:00
|
|
|
if (unlikely((s->req->flags & (CF_SHUTW|CF_SHUTW_NOW)) == CF_SHUTW_NOW &&
|
2013-06-21 02:20:19 -04:00
|
|
|
channel_is_empty(s->req))) {
|
|
|
|
|
if (s->req->flags & CF_READ_ERROR)
|
|
|
|
|
s->req->cons->flags |= SI_FL_NOLINGER;
|
2012-05-21 10:31:45 -04:00
|
|
|
si_shutw(s->req->cons);
|
2014-05-10 08:30:07 -04:00
|
|
|
if (tick_isset(s->be->timeout.serverfin)) {
|
|
|
|
|
s->rep->rto = s->be->timeout.serverfin;
|
|
|
|
|
s->rep->rex = tick_add(now_ms, s->rep->rto);
|
|
|
|
|
}
|
2013-06-21 02:20:19 -04:00
|
|
|
}
|
2008-11-30 12:47:21 -05:00
|
|
|
|
|
|
|
|
/* shutdown(write) done on server side, we must stop the client too */
|
2012-08-27 17:14:58 -04:00
|
|
|
if (unlikely((s->req->flags & (CF_SHUTW|CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTW &&
|
2008-12-07 07:05:04 -05:00
|
|
|
!s->req->analysers))
|
2012-08-27 18:06:31 -04:00
|
|
|
channel_shutr_now(s->req);
|
2008-11-30 12:47:21 -05:00
|
|
|
|
|
|
|
|
/* shutdown(read) pending */
|
2012-08-27 17:14:58 -04:00
|
|
|
if (unlikely((s->req->flags & (CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTR_NOW)) {
|
2012-05-13 08:48:59 -04:00
|
|
|
if (s->req->prod->flags & SI_FL_NOHALF)
|
|
|
|
|
s->req->prod->flags |= SI_FL_NOLINGER;
|
2012-05-21 10:31:45 -04:00
|
|
|
si_shutr(s->req->prod);
|
2014-05-10 08:30:07 -04:00
|
|
|
if (tick_isset(s->fe->timeout.clientfin)) {
|
|
|
|
|
s->rep->wto = s->fe->timeout.clientfin;
|
|
|
|
|
s->rep->wex = tick_add(now_ms, s->rep->wto);
|
|
|
|
|
}
|
2012-05-13 08:48:59 -04:00
|
|
|
}
|
2008-11-30 12:47:21 -05:00
|
|
|
|
2009-09-19 15:04:57 -04:00
|
|
|
/* it's possible that an upper layer has requested a connection setup or abort.
|
|
|
|
|
* There are 2 situations where we decide to establish a new connection :
|
|
|
|
|
* - there are data scheduled for emission in the buffer
|
2012-08-27 17:14:58 -04:00
|
|
|
* - the CF_AUTO_CONNECT flag is set (active connection)
|
2009-09-19 15:04:57 -04:00
|
|
|
*/
|
|
|
|
|
if (s->req->cons->state == SI_ST_INI) {
|
2012-08-27 17:14:58 -04:00
|
|
|
if (!(s->req->flags & CF_SHUTW)) {
|
|
|
|
|
if ((s->req->flags & CF_AUTO_CONNECT) || !channel_is_empty(s->req)) {
|
2013-09-29 11:19:56 -04:00
|
|
|
/* If we have an appctx, there is no connect method, so we
|
|
|
|
|
* immediately switch to the connected state, otherwise we
|
|
|
|
|
* perform a connection request.
|
2009-09-19 15:04:57 -04:00
|
|
|
*/
|
2010-05-31 05:57:51 -04:00
|
|
|
s->req->cons->state = SI_ST_REQ; /* new connection requested */
|
2010-06-01 04:36:43 -04:00
|
|
|
s->req->cons->conn_retries = s->be->conn_retries;
|
2009-09-19 15:04:57 -04:00
|
|
|
}
|
2009-08-16 12:27:24 -04:00
|
|
|
}
|
2009-09-20 02:19:25 -04:00
|
|
|
else {
|
2009-03-06 06:51:23 -05:00
|
|
|
s->req->cons->state = SI_ST_CLO; /* shutw+ini = abort */
|
2012-08-27 18:06:31 -04:00
|
|
|
channel_shutw_now(s->req); /* fix buffer flags upon abort */
|
|
|
|
|
channel_shutr_now(s->rep);
|
2009-09-20 02:19:25 -04:00
|
|
|
}
|
2009-03-06 06:51:23 -05:00
|
|
|
}
|
|
|
|
|
|
2008-11-30 12:47:21 -05:00
|
|
|
|
|
|
|
|
/* we may have a pending connection request, or a connection waiting
|
|
|
|
|
* for completion.
|
|
|
|
|
*/
|
|
|
|
|
if (s->si[1].state >= SI_ST_REQ && s->si[1].state < SI_ST_CON) {
|
|
|
|
|
do {
|
|
|
|
|
/* nb: step 1 might switch from QUE to ASS, but we first want
|
|
|
|
|
* to give a chance to step 2 to perform a redirect if needed.
|
|
|
|
|
*/
|
|
|
|
|
if (s->si[1].state != SI_ST_REQ)
|
|
|
|
|
sess_update_stream_int(s, &s->si[1]);
|
2013-12-31 17:16:50 -05:00
|
|
|
if (s->si[1].state == SI_ST_REQ)
|
2008-11-30 12:47:21 -05:00
|
|
|
sess_prepare_conn_req(s, &s->si[1]);
|
|
|
|
|
|
2013-12-31 17:32:12 -05:00
|
|
|
/* applets directly go to the ESTABLISHED state. Similarly,
|
|
|
|
|
* servers experience the same fate when their connection
|
|
|
|
|
* is reused.
|
|
|
|
|
*/
|
2013-12-31 17:16:50 -05:00
|
|
|
if (unlikely(s->si[1].state == SI_ST_EST))
|
|
|
|
|
sess_establish(s, &s->si[1]);
|
|
|
|
|
|
|
|
|
|
/* Now we can add the server name to a header (if requested) */
|
|
|
|
|
/* check for HTTP mode and proxy server_name_hdr_name != NULL */
|
|
|
|
|
if ((s->si[1].state >= SI_ST_CON) &&
|
|
|
|
|
(s->be->server_id_hdr_name != NULL) &&
|
|
|
|
|
(s->be->mode == PR_MODE_HTTP) &&
|
|
|
|
|
objt_server(s->target)) {
|
|
|
|
|
http_send_name_header(&s->txn, s->be, objt_server(s->target)->id);
|
2013-04-07 12:19:16 -04:00
|
|
|
}
|
|
|
|
|
|
2012-11-11 18:42:33 -05:00
|
|
|
srv = objt_server(s->target);
|
2011-03-10 10:55:02 -05:00
|
|
|
if (s->si[1].state == SI_ST_ASS && srv && srv->rdr_len && (s->flags & SN_REDIRECTABLE))
|
2012-12-27 05:30:54 -05:00
|
|
|
http_perform_server_redirect(s, &s->si[1]);
|
2008-11-30 12:47:21 -05:00
|
|
|
} while (s->si[1].state == SI_ST_ASS);
|
|
|
|
|
}
|
|
|
|
|
|
2009-06-21 16:43:05 -04:00
|
|
|
/* Benchmarks have shown that it's optimal to do a full resync now */
|
2009-03-08 14:20:25 -04:00
|
|
|
if (s->req->prod->state == SI_ST_DIS || s->req->cons->state == SI_ST_DIS)
|
2008-11-30 12:47:21 -05:00
|
|
|
goto resync_stream_interface;
|
|
|
|
|
|
2010-07-27 11:15:12 -04:00
|
|
|
/* otherwise we want to check if we need to resync the req buffer or not */
|
2012-08-27 17:14:58 -04:00
|
|
|
if ((s->req->flags ^ rqf_last) & CF_MASK_STATIC)
|
2009-03-08 14:20:25 -04:00
|
|
|
goto resync_request;
|
|
|
|
|
|
2009-06-21 16:43:05 -04:00
|
|
|
/* perform output updates to the response buffer */
|
2009-03-15 17:34:05 -04:00
|
|
|
|
2009-03-08 16:38:23 -04:00
|
|
|
/* If noone is interested in analysing data, it's time to forward
|
2009-09-20 06:07:52 -04:00
|
|
|
* everything. We configure the buffer to forward indefinitely.
|
2012-08-27 17:14:58 -04:00
|
|
|
* Note that we're checking CF_SHUTR_NOW as an indication of a possible
|
2012-08-27 18:06:31 -04:00
|
|
|
* recent call to channel_abort().
|
2008-12-14 11:31:54 -05:00
|
|
|
*/
|
2013-12-31 17:56:46 -05:00
|
|
|
if (unlikely(!s->rep->analysers &&
|
2012-11-11 17:05:39 -05:00
|
|
|
!(s->rep->flags & (CF_SHUTW|CF_SHUTR_NOW)) &&
|
2009-09-20 06:07:52 -04:00
|
|
|
(s->rep->prod->state >= SI_ST_EST) &&
|
2013-12-31 17:56:46 -05:00
|
|
|
(s->rep->to_forward != CHN_INFINITE_FORWARD))) {
|
2012-11-11 17:05:39 -05:00
|
|
|
/* This buffer is freewheeling, there's no analyser
|
2009-03-08 16:38:23 -04:00
|
|
|
* attached to it. If any data are left in, we'll permit them to
|
|
|
|
|
* move.
|
|
|
|
|
*/
|
2012-08-27 18:06:31 -04:00
|
|
|
channel_auto_read(s->rep);
|
|
|
|
|
channel_auto_close(s->rep);
|
2012-10-12 17:49:43 -04:00
|
|
|
buffer_flush(s->rep->buf);
|
2010-11-07 14:26:56 -05:00
|
|
|
|
|
|
|
|
/* We'll let data flow between the producer (if still connected)
|
|
|
|
|
* to the consumer.
|
|
|
|
|
*/
|
2012-08-27 17:14:58 -04:00
|
|
|
if (!(s->rep->flags & (CF_SHUTR|CF_SHUTW_NOW)))
|
2012-08-27 18:06:31 -04:00
|
|
|
channel_forward(s->rep, CHN_INFINITE_FORWARD);
|
2012-05-12 06:50:00 -04:00
|
|
|
|
|
|
|
|
/* if we have no analyser anymore in any direction and have a
|
2014-05-10 08:30:07 -04:00
|
|
|
* tunnel timeout set, use it now. Note that we must respect
|
|
|
|
|
* the half-closed timeouts as well.
|
2012-05-12 06:50:00 -04:00
|
|
|
*/
|
|
|
|
|
if (!s->req->analysers && s->be->timeout.tunnel) {
|
|
|
|
|
s->req->rto = s->req->wto = s->rep->rto = s->rep->wto =
|
|
|
|
|
s->be->timeout.tunnel;
|
2014-05-10 08:30:07 -04:00
|
|
|
|
|
|
|
|
if ((s->req->flags & CF_SHUTR) && tick_isset(s->fe->timeout.clientfin))
|
|
|
|
|
s->rep->wto = s->fe->timeout.clientfin;
|
|
|
|
|
if ((s->req->flags & CF_SHUTW) && tick_isset(s->be->timeout.serverfin))
|
|
|
|
|
s->rep->rto = s->be->timeout.serverfin;
|
|
|
|
|
if ((s->rep->flags & CF_SHUTR) && tick_isset(s->be->timeout.serverfin))
|
|
|
|
|
s->req->wto = s->be->timeout.serverfin;
|
|
|
|
|
if ((s->rep->flags & CF_SHUTW) && tick_isset(s->fe->timeout.clientfin))
|
|
|
|
|
s->req->rto = s->fe->timeout.clientfin;
|
|
|
|
|
|
|
|
|
|
s->req->rex = tick_add(now_ms, s->req->rto);
|
|
|
|
|
s->req->wex = tick_add(now_ms, s->req->wto);
|
|
|
|
|
s->rep->rex = tick_add(now_ms, s->rep->rto);
|
|
|
|
|
s->rep->wex = tick_add(now_ms, s->rep->wto);
|
2012-05-12 06:50:00 -04:00
|
|
|
}
|
2008-12-14 11:31:54 -05:00
|
|
|
}
|
2008-12-13 15:12:26 -05:00
|
|
|
|
2009-03-08 16:38:23 -04:00
|
|
|
/* check if it is wise to enable kernel splicing to forward response data */
|
2012-08-27 17:14:58 -04:00
|
|
|
if (!(s->rep->flags & (CF_KERN_SPLICING|CF_SHUTR)) &&
|
2009-03-08 16:38:23 -04:00
|
|
|
s->rep->to_forward &&
|
|
|
|
|
(global.tune.options & GTUNE_USE_SPLICE) &&
|
2013-10-01 04:45:07 -04:00
|
|
|
(objt_conn(s->si[0].end) && __objt_conn(s->si[0].end)->xprt && __objt_conn(s->si[0].end)->xprt->snd_pipe) &&
|
|
|
|
|
(objt_conn(s->si[1].end) && __objt_conn(s->si[1].end)->xprt && __objt_conn(s->si[1].end)->xprt->rcv_pipe) &&
|
2009-03-08 16:38:23 -04:00
|
|
|
(pipes_used < global.maxpipes) &&
|
|
|
|
|
(((s->fe->options2|s->be->options2) & PR_O2_SPLIC_RTR) ||
|
|
|
|
|
(((s->fe->options2|s->be->options2) & PR_O2_SPLIC_AUT) &&
|
2012-08-27 17:14:58 -04:00
|
|
|
(s->rep->flags & CF_STREAMER_FAST)))) {
|
|
|
|
|
s->rep->flags |= CF_KERN_SPLICING;
|
2009-03-08 16:38:23 -04:00
|
|
|
}
|
|
|
|
|
|
2008-11-30 12:47:21 -05:00
|
|
|
/* reflect what the L7 analysers have seen last */
|
|
|
|
|
rpf_last = s->rep->flags;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Now forward all shutdown requests between both sides of the buffer
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* FIXME: this is probably where we should produce error responses.
|
|
|
|
|
*/
|
|
|
|
|
|
2008-12-14 11:31:54 -05:00
|
|
|
/* first, let's check if the response buffer needs to shutdown(write) */
|
2012-11-11 17:05:39 -05:00
|
|
|
if (unlikely((s->rep->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_AUTO_CLOSE|CF_SHUTR)) ==
|
2014-05-10 08:30:07 -04:00
|
|
|
(CF_AUTO_CLOSE|CF_SHUTR))) {
|
2012-08-27 18:06:31 -04:00
|
|
|
channel_shutw_now(s->rep);
|
2014-05-10 08:30:07 -04:00
|
|
|
if (tick_isset(s->be->timeout.serverfin)) {
|
|
|
|
|
s->req->wto = s->be->timeout.serverfin;
|
|
|
|
|
s->req->wex = tick_add(now_ms, s->req->wto);
|
|
|
|
|
}
|
|
|
|
|
}
|
2008-11-30 12:47:21 -05:00
|
|
|
|
|
|
|
|
/* shutdown(write) pending */
|
2012-08-27 17:14:58 -04:00
|
|
|
if (unlikely((s->rep->flags & (CF_SHUTW|CF_SHUTW_NOW)) == CF_SHUTW_NOW &&
|
2014-05-10 08:30:07 -04:00
|
|
|
channel_is_empty(s->rep))) {
|
2012-05-21 10:31:45 -04:00
|
|
|
si_shutw(s->rep->cons);
|
2014-05-10 08:30:07 -04:00
|
|
|
if (tick_isset(s->fe->timeout.clientfin)) {
|
|
|
|
|
s->req->rto = s->fe->timeout.clientfin;
|
|
|
|
|
s->req->rex = tick_add(now_ms, s->req->rto);
|
|
|
|
|
}
|
|
|
|
|
}
|
2008-11-30 12:47:21 -05:00
|
|
|
|
|
|
|
|
/* shutdown(write) done on the client side, we must stop the server too */
|
2012-08-27 17:14:58 -04:00
|
|
|
if (unlikely((s->rep->flags & (CF_SHUTW|CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTW) &&
|
2008-12-07 07:05:04 -05:00
|
|
|
!s->rep->analysers)
|
2012-08-27 18:06:31 -04:00
|
|
|
channel_shutr_now(s->rep);
|
2008-11-30 12:47:21 -05:00
|
|
|
|
|
|
|
|
/* shutdown(read) pending */
|
2012-08-27 17:14:58 -04:00
|
|
|
if (unlikely((s->rep->flags & (CF_SHUTR|CF_SHUTR_NOW)) == CF_SHUTR_NOW)) {
|
2012-05-13 08:48:59 -04:00
|
|
|
if (s->rep->prod->flags & SI_FL_NOHALF)
|
|
|
|
|
s->rep->prod->flags |= SI_FL_NOLINGER;
|
2012-05-21 10:31:45 -04:00
|
|
|
si_shutr(s->rep->prod);
|
2014-05-10 08:30:07 -04:00
|
|
|
if (tick_isset(s->be->timeout.serverfin)) {
|
|
|
|
|
s->req->wto = s->be->timeout.serverfin;
|
|
|
|
|
s->req->wex = tick_add(now_ms, s->req->wto);
|
|
|
|
|
}
|
2012-05-13 08:48:59 -04:00
|
|
|
}
|
2008-11-30 12:47:21 -05:00
|
|
|
|
2009-03-08 14:20:25 -04:00
|
|
|
if (s->req->prod->state == SI_ST_DIS || s->req->cons->state == SI_ST_DIS)
|
2008-11-30 12:47:21 -05:00
|
|
|
goto resync_stream_interface;
|
|
|
|
|
|
2009-03-08 14:20:25 -04:00
|
|
|
if (s->req->flags != rqf_last)
|
|
|
|
|
goto resync_request;
|
|
|
|
|
|
2012-08-27 17:14:58 -04:00
|
|
|
if ((s->rep->flags ^ rpf_last) & CF_MASK_STATIC)
|
2009-03-08 14:20:25 -04:00
|
|
|
goto resync_response;
|
2008-11-30 12:47:21 -05:00
|
|
|
|
2009-09-05 14:57:35 -04:00
|
|
|
/* we're interested in getting wakeups again */
|
|
|
|
|
s->req->prod->flags &= ~SI_FL_DONT_WAKE;
|
|
|
|
|
s->req->cons->flags &= ~SI_FL_DONT_WAKE;
|
|
|
|
|
|
2008-11-30 12:47:21 -05:00
|
|
|
/* This is needed only when debugging is enabled, to indicate
|
|
|
|
|
* client-side or server-side close. Please note that in the unlikely
|
|
|
|
|
* event where both sides would close at once, the sequence is reported
|
|
|
|
|
* on the server side first.
|
|
|
|
|
*/
|
|
|
|
|
if (unlikely((global.mode & MODE_DEBUG) &&
|
|
|
|
|
(!(global.mode & MODE_QUIET) ||
|
|
|
|
|
(global.mode & MODE_VERBOSE)))) {
|
|
|
|
|
if (s->si[1].state == SI_ST_CLO &&
|
|
|
|
|
s->si[1].prev_state == SI_ST_EST) {
|
2012-10-29 11:51:55 -04:00
|
|
|
chunk_printf(&trash, "%08x:%s.srvcls[%04x:%04x]\n",
|
2008-11-30 12:47:21 -05:00
|
|
|
s->uniq_id, s->be->id,
|
2013-10-01 04:45:07 -04:00
|
|
|
objt_conn(s->si[0].end) ? (unsigned short)objt_conn(s->si[0].end)->t.sock.fd : -1,
|
|
|
|
|
objt_conn(s->si[1].end) ? (unsigned short)objt_conn(s->si[1].end)->t.sock.fd : -1);
|
2013-12-13 09:14:55 -05:00
|
|
|
shut_your_big_mouth_gcc(write(1, trash.str, trash.len));
|
2008-11-30 12:47:21 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (s->si[0].state == SI_ST_CLO &&
|
|
|
|
|
s->si[0].prev_state == SI_ST_EST) {
|
2012-10-29 11:51:55 -04:00
|
|
|
chunk_printf(&trash, "%08x:%s.clicls[%04x:%04x]\n",
|
2008-11-30 12:47:21 -05:00
|
|
|
s->uniq_id, s->be->id,
|
2013-10-01 04:45:07 -04:00
|
|
|
objt_conn(s->si[0].end) ? (unsigned short)objt_conn(s->si[0].end)->t.sock.fd : -1,
|
|
|
|
|
objt_conn(s->si[1].end) ? (unsigned short)objt_conn(s->si[1].end)->t.sock.fd : -1);
|
2013-12-13 09:14:55 -05:00
|
|
|
shut_your_big_mouth_gcc(write(1, trash.str, trash.len));
|
2008-11-30 12:47:21 -05:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (likely((s->rep->cons->state != SI_ST_CLO) ||
|
|
|
|
|
(s->req->cons->state > SI_ST_INI && s->req->cons->state < SI_ST_CLO))) {
|
|
|
|
|
|
|
|
|
|
if ((s->fe->options & PR_O_CONTSTATS) && (s->flags & SN_BE_ASSIGNED))
|
|
|
|
|
session_process_counters(s);
|
|
|
|
|
|
2013-09-29 11:19:56 -04:00
|
|
|
if (s->rep->cons->state == SI_ST_EST && obj_type(s->rep->cons->end) != OBJ_TYPE_APPCTX)
|
2012-05-21 10:31:45 -04:00
|
|
|
si_update(s->rep->cons);
|
2008-11-30 12:47:21 -05:00
|
|
|
|
2013-09-29 11:19:56 -04:00
|
|
|
if (s->req->cons->state == SI_ST_EST && obj_type(s->req->cons->end) != OBJ_TYPE_APPCTX)
|
2012-05-21 10:31:45 -04:00
|
|
|
si_update(s->req->cons);
|
2008-11-30 12:47:21 -05:00
|
|
|
|
2012-08-27 17:14:58 -04:00
|
|
|
s->req->flags &= ~(CF_READ_NULL|CF_READ_PARTIAL|CF_WRITE_NULL|CF_WRITE_PARTIAL|CF_READ_ATTACHED);
|
|
|
|
|
s->rep->flags &= ~(CF_READ_NULL|CF_READ_PARTIAL|CF_WRITE_NULL|CF_WRITE_PARTIAL|CF_READ_ATTACHED);
|
2008-11-30 12:47:21 -05:00
|
|
|
s->si[0].prev_state = s->si[0].state;
|
|
|
|
|
s->si[1].prev_state = s->si[1].state;
|
2008-12-14 07:26:20 -05:00
|
|
|
s->si[0].flags &= ~(SI_FL_ERR|SI_FL_EXP);
|
|
|
|
|
s->si[1].flags &= ~(SI_FL_ERR|SI_FL_EXP);
|
2008-11-30 12:47:21 -05:00
|
|
|
|
2014-06-23 09:22:31 -04:00
|
|
|
/* Trick: if a request is being waiting for the server to respond,
|
|
|
|
|
* and if we know the server can timeout, we don't want the timeout
|
|
|
|
|
* to expire on the client side first, but we're still interested
|
|
|
|
|
* in passing data from the client to the server (eg: POST). Thus,
|
|
|
|
|
* we can cancel the client's request timeout if the server's
|
|
|
|
|
* request timeout is set and the server has not yet sent a response.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
if ((s->rep->flags & (CF_AUTO_CLOSE|CF_SHUTR)) == 0 &&
|
|
|
|
|
(tick_isset(s->req->wex) || tick_isset(s->rep->rex))) {
|
|
|
|
|
s->req->flags |= CF_READ_NOEXP;
|
|
|
|
|
s->req->rex = TICK_ETERNITY;
|
|
|
|
|
}
|
|
|
|
|
|
2013-09-29 11:19:56 -04:00
|
|
|
/* When any of the stream interfaces is attached to an applet,
|
|
|
|
|
* we have to call it here. Note that this one may wake the
|
|
|
|
|
* task up again. If at least one applet was called, the current
|
|
|
|
|
* task might have been woken up, in which case we don't want it
|
|
|
|
|
* to be requeued to the wait queue but rather to the run queue
|
|
|
|
|
* to run ASAP. The bitwise "or" in the condition ensures that
|
|
|
|
|
* both functions are always called and that we wake up if at
|
|
|
|
|
* least one did something.
|
2009-09-05 14:57:35 -04:00
|
|
|
*/
|
2013-09-29 11:19:56 -04:00
|
|
|
if ((si_applet_call(s->req->cons) | si_applet_call(s->rep->cons)) != 0) {
|
2009-09-05 14:57:35 -04:00
|
|
|
if (task_in_rq(t)) {
|
|
|
|
|
t->expire = TICK_ETERNITY;
|
|
|
|
|
return t;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-08 08:49:17 -05:00
|
|
|
update_exp_and_leave:
|
2008-11-30 12:47:21 -05:00
|
|
|
t->expire = tick_first(tick_first(s->req->rex, s->req->wex),
|
|
|
|
|
tick_first(s->rep->rex, s->rep->wex));
|
|
|
|
|
if (s->req->analysers)
|
|
|
|
|
t->expire = tick_first(t->expire, s->req->analyse_exp);
|
|
|
|
|
|
|
|
|
|
if (s->si[0].exp)
|
|
|
|
|
t->expire = tick_first(t->expire, s->si[0].exp);
|
|
|
|
|
|
|
|
|
|
if (s->si[1].exp)
|
|
|
|
|
t->expire = tick_first(t->expire, s->si[1].exp);
|
|
|
|
|
|
|
|
|
|
#ifdef DEBUG_FULL
|
2009-03-28 05:47:26 -04:00
|
|
|
fprintf(stderr,
|
|
|
|
|
"[%u] queuing with exp=%u req->rex=%u req->wex=%u req->ana_exp=%u"
|
|
|
|
|
" rep->rex=%u rep->wex=%u, si[0].exp=%u, si[1].exp=%u, cs=%d, ss=%d\n",
|
|
|
|
|
now_ms, t->expire, s->req->rex, s->req->wex, s->req->analyse_exp,
|
|
|
|
|
s->rep->rex, s->rep->wex, s->si[0].exp, s->si[1].exp, s->si[0].state, s->si[1].state);
|
2008-11-30 12:47:21 -05:00
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
#ifdef DEBUG_DEV
|
|
|
|
|
/* this may only happen when no timeout is set or in case of an FSM bug */
|
2009-03-08 10:53:06 -04:00
|
|
|
if (!tick_isset(t->expire))
|
2008-11-30 12:47:21 -05:00
|
|
|
ABORT_NOW();
|
|
|
|
|
#endif
|
2009-03-08 04:38:41 -04:00
|
|
|
return t; /* nothing more to do */
|
2008-11-30 12:47:21 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
s->fe->feconn--;
|
|
|
|
|
if (s->flags & SN_BE_ASSIGNED)
|
|
|
|
|
s->be->beconn--;
|
2010-08-31 09:39:26 -04:00
|
|
|
jobs--;
|
2014-03-20 10:42:53 -04:00
|
|
|
if (s->listener) {
|
|
|
|
|
if (!(s->listener->options & LI_O_UNLIMITED))
|
|
|
|
|
actconn--;
|
|
|
|
|
s->listener->nbconn--;
|
|
|
|
|
if (s->listener->state == LI_FULL)
|
|
|
|
|
resume_listener(s->listener);
|
|
|
|
|
|
|
|
|
|
/* Dequeues all of the listeners waiting for a resource */
|
|
|
|
|
if (!LIST_ISEMPTY(&global_listener_queue))
|
|
|
|
|
dequeue_all_listeners(&global_listener_queue);
|
|
|
|
|
|
|
|
|
|
if (!LIST_ISEMPTY(&s->fe->listener_queue) &&
|
|
|
|
|
(!s->fe->fe_sps_lim || freq_ctr_remain(&s->fe->fe_sess_per_sec, s->fe->fe_sps_lim, 0) > 0))
|
|
|
|
|
dequeue_all_listeners(&s->fe->listener_queue);
|
|
|
|
|
}
|
2011-07-24 17:55:06 -04:00
|
|
|
|
2008-11-30 12:47:21 -05:00
|
|
|
if (unlikely((global.mode & MODE_DEBUG) &&
|
|
|
|
|
(!(global.mode & MODE_QUIET) || (global.mode & MODE_VERBOSE)))) {
|
2012-10-29 11:51:55 -04:00
|
|
|
chunk_printf(&trash, "%08x:%s.closed[%04x:%04x]\n",
|
2008-11-30 12:47:21 -05:00
|
|
|
s->uniq_id, s->be->id,
|
2013-10-01 04:45:07 -04:00
|
|
|
objt_conn(s->si[0].end) ? (unsigned short)objt_conn(s->si[0].end)->t.sock.fd : -1,
|
|
|
|
|
objt_conn(s->si[1].end) ? (unsigned short)objt_conn(s->si[1].end)->t.sock.fd : -1);
|
2013-12-13 09:14:55 -05:00
|
|
|
shut_your_big_mouth_gcc(write(1, trash.str, trash.len));
|
2008-11-30 12:47:21 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
s->logs.t_close = tv_ms_elapsed(&s->logs.tv_accept, &now);
|
|
|
|
|
session_process_counters(s);
|
|
|
|
|
|
2009-10-24 09:36:15 -04:00
|
|
|
if (s->txn.status) {
|
|
|
|
|
int n;
|
|
|
|
|
|
|
|
|
|
n = s->txn.status / 100;
|
|
|
|
|
if (n < 1 || n > 5)
|
|
|
|
|
n = 0;
|
|
|
|
|
|
2012-11-24 08:54:13 -05:00
|
|
|
if (s->fe->mode == PR_MODE_HTTP) {
|
2011-03-10 17:25:56 -05:00
|
|
|
s->fe->fe_counters.p.http.rsp[n]++;
|
2012-11-27 01:35:31 -05:00
|
|
|
if (s->comp_algo && (s->flags & SN_COMP_READY))
|
2012-11-24 08:54:13 -05:00
|
|
|
s->fe->fe_counters.p.http.comp_rsp++;
|
|
|
|
|
}
|
2010-02-26 04:30:28 -05:00
|
|
|
if ((s->flags & SN_BE_ASSIGNED) &&
|
2012-11-24 08:54:13 -05:00
|
|
|
(s->be->mode == PR_MODE_HTTP)) {
|
2011-03-10 17:25:56 -05:00
|
|
|
s->be->be_counters.p.http.rsp[n]++;
|
2012-11-24 08:54:13 -05:00
|
|
|
s->be->be_counters.p.http.cum_req++;
|
2012-11-27 01:35:31 -05:00
|
|
|
if (s->comp_algo && (s->flags & SN_COMP_READY))
|
2012-11-24 08:54:13 -05:00
|
|
|
s->be->be_counters.p.http.comp_rsp++;
|
|
|
|
|
}
|
2009-10-24 09:36:15 -04:00
|
|
|
}
|
|
|
|
|
|
2008-11-30 12:47:21 -05:00
|
|
|
/* let's do a final log if we need it */
|
BUG/MINOR: log: make log-format, unique-id-format and add-header more independant
It happens that all of them call parse_logformat_line() which sets
proxy->to_log with a number of flags affecting the line format for
all three users. For example, having a unique-id specified disables
the default log-format since fe->to_log is tested when the session
is established.
Similarly, having "option logasap" will cause "+" to be inserted in
unique-id or headers referencing some of the fields depending on
LW_BYTES.
This patch first removes most of the dependency on fe->to_log whenever
possible. The first possible cleanup is to stop checking fe->to_log
for being null, considering that it always contains at least LW_INIT
when any such usage is made of the log-format!
Also, some checks are wrong. s->logs.logwait cannot be nulled by
"logwait &= ~LW_*" since LW_INIT is always there. This results in
getting the wrong log at the end of a request or session when a
unique-id or add-header is set, because logwait is still not null
but the log-format is not checked.
Further cleanups are required. Most LW_* flags should be removed or at
least replaced with what they really mean (eg: depend on client-side
connection, depend on server-side connection, etc...) and this should
only affect logging, not other mechanisms.
This patch fixes the default log-format and tries to limit interferences
between the log formats, but does not pretend to do more for the moment,
since it's the most visible breakage.
2012-12-28 03:40:16 -05:00
|
|
|
if (!LIST_ISEMPTY(&s->fe->logformat) && s->logs.logwait &&
|
2008-11-30 12:47:21 -05:00
|
|
|
!(s->flags & SN_MONITOR) &&
|
|
|
|
|
(!(s->fe->options & PR_O_NULLNOLOG) || s->req->total)) {
|
2008-11-30 13:02:32 -05:00
|
|
|
s->do_log(s);
|
2008-11-30 12:47:21 -05:00
|
|
|
}
|
|
|
|
|
|
2014-06-17 06:19:18 -04:00
|
|
|
/* update time stats for this session */
|
|
|
|
|
session_update_time_stats(s);
|
|
|
|
|
|
2008-11-30 12:47:21 -05:00
|
|
|
/* the task MUST not be in the run queue anymore */
|
|
|
|
|
session_free(s);
|
2009-03-08 04:38:41 -04:00
|
|
|
task_delete(t);
|
2008-11-30 12:47:21 -05:00
|
|
|
task_free(t);
|
2009-03-08 04:38:41 -04:00
|
|
|
return NULL;
|
2008-11-30 12:47:21 -05:00
|
|
|
}
|
|
|
|
|
|
2014-06-17 06:19:18 -04:00
|
|
|
/* Update the session's backend and server time stats */
|
|
|
|
|
void session_update_time_stats(struct session *s)
|
|
|
|
|
{
|
|
|
|
|
int t_request;
|
|
|
|
|
int t_queue;
|
|
|
|
|
int t_connect;
|
|
|
|
|
int t_data;
|
|
|
|
|
int t_close;
|
|
|
|
|
struct server *srv;
|
|
|
|
|
|
|
|
|
|
t_request = 0;
|
|
|
|
|
t_queue = s->logs.t_queue;
|
|
|
|
|
t_connect = s->logs.t_connect;
|
|
|
|
|
t_close = s->logs.t_close;
|
|
|
|
|
t_data = s->logs.t_data;
|
|
|
|
|
|
|
|
|
|
if (s->be->mode != PR_MODE_HTTP)
|
|
|
|
|
t_data = t_connect;
|
|
|
|
|
|
|
|
|
|
if (t_connect < 0 || t_data < 0)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
if (tv_isge(&s->logs.tv_request, &s->logs.tv_accept))
|
|
|
|
|
t_request = tv_ms_elapsed(&s->logs.tv_accept, &s->logs.tv_request);
|
|
|
|
|
|
|
|
|
|
t_data -= t_connect;
|
|
|
|
|
t_connect -= t_queue;
|
|
|
|
|
t_queue -= t_request;
|
|
|
|
|
|
|
|
|
|
srv = objt_server(s->target);
|
|
|
|
|
if (srv) {
|
|
|
|
|
swrate_add(&srv->counters.q_time, TIME_STATS_SAMPLES, t_queue);
|
|
|
|
|
swrate_add(&srv->counters.c_time, TIME_STATS_SAMPLES, t_connect);
|
|
|
|
|
swrate_add(&srv->counters.d_time, TIME_STATS_SAMPLES, t_data);
|
|
|
|
|
swrate_add(&srv->counters.t_time, TIME_STATS_SAMPLES, t_close);
|
|
|
|
|
}
|
|
|
|
|
swrate_add(&s->be->be_counters.q_time, TIME_STATS_SAMPLES, t_queue);
|
|
|
|
|
swrate_add(&s->be->be_counters.c_time, TIME_STATS_SAMPLES, t_connect);
|
|
|
|
|
swrate_add(&s->be->be_counters.d_time, TIME_STATS_SAMPLES, t_data);
|
|
|
|
|
swrate_add(&s->be->be_counters.t_time, TIME_STATS_SAMPLES, t_close);
|
|
|
|
|
}
|
|
|
|
|
|
[BUG] fix the dequeuing logic to ensure that all requests get served
The dequeuing logic was completely wrong. First, a task was assigned
to all servers to process the queue, but this task was never scheduled
and was only woken up on session free. Second, there was no reservation
of server entries when a task was assigned a server. This means that
as long as the task was not connected to the server, its presence was
not accounted for. This was causing trouble when detecting whether or
not a server had reached maxconn. Third, during a redispatch, a session
could lose its place at the server's and get blocked because another
session at the same moment would have stolen the entry. Fourth, the
redispatch option did not work when maxqueue was reached for a server,
and it was not possible to do so without indefinitely hanging a session.
The root cause of all those problems was the lack of pre-reservation of
connections at the server's, and the lack of tracking of servers during
a redispatch. Everything relied on combinations of flags which could
appear similarly in quite distinct situations.
This patch is a major rework but there was no other solution, as the
internal logic was deeply flawed. The resulting code is cleaner, more
understandable, uses less magics and is overall more robust.
As an added bonus, "option redispatch" now works when maxqueue has
been reached on a server.
2008-06-20 09:04:11 -04:00
|
|
|
/*
|
|
|
|
|
* This function adjusts sess->srv_conn and maintains the previous and new
|
|
|
|
|
* server's served session counts. Setting newsrv to NULL is enough to release
|
|
|
|
|
* current connection slot. This function also notifies any LB algo which might
|
|
|
|
|
* expect to be informed about any change in the number of active sessions on a
|
|
|
|
|
* server.
|
|
|
|
|
*/
|
|
|
|
|
void sess_change_server(struct session *sess, struct server *newsrv)
|
|
|
|
|
{
|
|
|
|
|
if (sess->srv_conn == newsrv)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
if (sess->srv_conn) {
|
|
|
|
|
sess->srv_conn->served--;
|
|
|
|
|
if (sess->srv_conn->proxy->lbprm.server_drop_conn)
|
|
|
|
|
sess->srv_conn->proxy->lbprm.server_drop_conn(sess->srv_conn);
|
2011-06-21 01:34:57 -04:00
|
|
|
session_del_srv_conn(sess);
|
[BUG] fix the dequeuing logic to ensure that all requests get served
The dequeuing logic was completely wrong. First, a task was assigned
to all servers to process the queue, but this task was never scheduled
and was only woken up on session free. Second, there was no reservation
of server entries when a task was assigned a server. This means that
as long as the task was not connected to the server, its presence was
not accounted for. This was causing trouble when detecting whether or
not a server had reached maxconn. Third, during a redispatch, a session
could lose its place at the server's and get blocked because another
session at the same moment would have stolen the entry. Fourth, the
redispatch option did not work when maxqueue was reached for a server,
and it was not possible to do so without indefinitely hanging a session.
The root cause of all those problems was the lack of pre-reservation of
connections at the server's, and the lack of tracking of servers during
a redispatch. Everything relied on combinations of flags which could
appear similarly in quite distinct situations.
This patch is a major rework but there was no other solution, as the
internal logic was deeply flawed. The resulting code is cleaner, more
understandable, uses less magics and is overall more robust.
As an added bonus, "option redispatch" now works when maxqueue has
been reached on a server.
2008-06-20 09:04:11 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (newsrv) {
|
|
|
|
|
newsrv->served++;
|
|
|
|
|
if (newsrv->proxy->lbprm.server_take_conn)
|
|
|
|
|
newsrv->proxy->lbprm.server_take_conn(newsrv);
|
2011-06-21 01:34:57 -04:00
|
|
|
session_add_srv_conn(sess, newsrv);
|
[BUG] fix the dequeuing logic to ensure that all requests get served
The dequeuing logic was completely wrong. First, a task was assigned
to all servers to process the queue, but this task was never scheduled
and was only woken up on session free. Second, there was no reservation
of server entries when a task was assigned a server. This means that
as long as the task was not connected to the server, its presence was
not accounted for. This was causing trouble when detecting whether or
not a server had reached maxconn. Third, during a redispatch, a session
could lose its place at the server's and get blocked because another
session at the same moment would have stolen the entry. Fourth, the
redispatch option did not work when maxqueue was reached for a server,
and it was not possible to do so without indefinitely hanging a session.
The root cause of all those problems was the lack of pre-reservation of
connections at the server's, and the lack of tracking of servers during
a redispatch. Everything relied on combinations of flags which could
appear similarly in quite distinct situations.
This patch is a major rework but there was no other solution, as the
internal logic was deeply flawed. The resulting code is cleaner, more
understandable, uses less magics and is overall more robust.
As an added bonus, "option redispatch" now works when maxqueue has
been reached on a server.
2008-06-20 09:04:11 -04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2009-03-15 17:34:05 -04:00
|
|
|
/* Handle server-side errors for default protocols. It is called whenever a a
|
|
|
|
|
* connection setup is aborted or a request is aborted in queue. It sets the
|
|
|
|
|
* session termination flags so that the caller does not have to worry about
|
|
|
|
|
* them. It's installed as ->srv_error for the server-side stream_interface.
|
|
|
|
|
*/
|
|
|
|
|
void default_srv_error(struct session *s, struct stream_interface *si)
|
|
|
|
|
{
|
|
|
|
|
int err_type = si->err_type;
|
|
|
|
|
int err = 0, fin = 0;
|
|
|
|
|
|
|
|
|
|
if (err_type & SI_ET_QUEUE_ABRT) {
|
|
|
|
|
err = SN_ERR_CLICL;
|
|
|
|
|
fin = SN_FINST_Q;
|
|
|
|
|
}
|
|
|
|
|
else if (err_type & SI_ET_CONN_ABRT) {
|
|
|
|
|
err = SN_ERR_CLICL;
|
|
|
|
|
fin = SN_FINST_C;
|
|
|
|
|
}
|
|
|
|
|
else if (err_type & SI_ET_QUEUE_TO) {
|
|
|
|
|
err = SN_ERR_SRVTO;
|
|
|
|
|
fin = SN_FINST_Q;
|
|
|
|
|
}
|
|
|
|
|
else if (err_type & SI_ET_QUEUE_ERR) {
|
|
|
|
|
err = SN_ERR_SRVCL;
|
|
|
|
|
fin = SN_FINST_Q;
|
|
|
|
|
}
|
|
|
|
|
else if (err_type & SI_ET_CONN_TO) {
|
|
|
|
|
err = SN_ERR_SRVTO;
|
|
|
|
|
fin = SN_FINST_C;
|
|
|
|
|
}
|
|
|
|
|
else if (err_type & SI_ET_CONN_ERR) {
|
|
|
|
|
err = SN_ERR_SRVCL;
|
|
|
|
|
fin = SN_FINST_C;
|
|
|
|
|
}
|
2012-05-14 06:11:47 -04:00
|
|
|
else if (err_type & SI_ET_CONN_RES) {
|
|
|
|
|
err = SN_ERR_RESOURCE;
|
|
|
|
|
fin = SN_FINST_C;
|
|
|
|
|
}
|
2009-03-15 17:34:05 -04:00
|
|
|
else /* SI_ET_CONN_OTHER and others */ {
|
|
|
|
|
err = SN_ERR_INTERNAL;
|
|
|
|
|
fin = SN_FINST_C;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!(s->flags & SN_ERR_MASK))
|
|
|
|
|
s->flags |= err;
|
|
|
|
|
if (!(s->flags & SN_FINST_MASK))
|
|
|
|
|
s->flags |= fin;
|
|
|
|
|
}
|
[BUG] fix the dequeuing logic to ensure that all requests get served
The dequeuing logic was completely wrong. First, a task was assigned
to all servers to process the queue, but this task was never scheduled
and was only woken up on session free. Second, there was no reservation
of server entries when a task was assigned a server. This means that
as long as the task was not connected to the server, its presence was
not accounted for. This was causing trouble when detecting whether or
not a server had reached maxconn. Third, during a redispatch, a session
could lose its place at the server's and get blocked because another
session at the same moment would have stolen the entry. Fourth, the
redispatch option did not work when maxqueue was reached for a server,
and it was not possible to do so without indefinitely hanging a session.
The root cause of all those problems was the lack of pre-reservation of
connections at the server's, and the lack of tracking of servers during
a redispatch. Everything relied on combinations of flags which could
appear similarly in quite distinct situations.
This patch is a major rework but there was no other solution, as the
internal logic was deeply flawed. The resulting code is cleaner, more
understandable, uses less magics and is overall more robust.
As an added bonus, "option redispatch" now works when maxqueue has
been reached on a server.
2008-06-20 09:04:11 -04:00
|
|
|
|
2011-09-07 17:01:56 -04:00
|
|
|
/* kill a session and set the termination flags to <why> (one of SN_ERR_*) */
|
|
|
|
|
void session_shutdown(struct session *session, int why)
|
|
|
|
|
{
|
2012-08-27 17:14:58 -04:00
|
|
|
if (session->req->flags & (CF_SHUTW|CF_SHUTW_NOW))
|
2011-09-07 17:01:56 -04:00
|
|
|
return;
|
|
|
|
|
|
2012-08-27 18:06:31 -04:00
|
|
|
channel_shutw_now(session->req);
|
|
|
|
|
channel_shutr_now(session->rep);
|
2011-09-07 17:01:56 -04:00
|
|
|
session->task->nice = 1024;
|
|
|
|
|
if (!(session->flags & SN_ERR_MASK))
|
|
|
|
|
session->flags |= why;
|
|
|
|
|
task_wakeup(session->task, TASK_WOKEN_OTHER);
|
|
|
|
|
}
|
2010-06-14 15:04:55 -04:00
|
|
|
|
2010-06-18 11:46:06 -04:00
|
|
|
/************************************************************************/
|
|
|
|
|
/* All supported ACL keywords must be declared here. */
|
|
|
|
|
/************************************************************************/
|
|
|
|
|
|
2013-07-23 13:33:46 -04:00
|
|
|
/* Returns a pointer to a stkctr depending on the fetch keyword name.
|
|
|
|
|
* It is designed to be called as sc[0-9]_* sc_* or src_* exclusively.
|
2013-07-22 16:40:11 -04:00
|
|
|
* sc[0-9]_* will return a pointer to the respective field in the
|
2013-07-23 13:33:46 -04:00
|
|
|
* session <l4>. sc_* requires an UINT argument specifying the stick
|
|
|
|
|
* counter number. src_* will fill a locally allocated structure with
|
2013-07-22 16:40:11 -04:00
|
|
|
* the table and entry corresponding to what is specified with src_*.
|
2013-07-23 13:56:43 -04:00
|
|
|
* NULL may be returned if the designated stkctr is not tracked. For
|
|
|
|
|
* the sc_* and sc[0-9]_* forms, an optional table argument may be
|
|
|
|
|
* passed. When present, the currently tracked key is then looked up
|
|
|
|
|
* in the specified table instead of the current table. The purpose is
|
|
|
|
|
* to be able to convery multiple values per key (eg: have gpc0 from
|
|
|
|
|
* multiple tables).
|
2013-07-22 16:40:11 -04:00
|
|
|
*/
|
2014-07-15 13:06:18 -04:00
|
|
|
struct stkctr *
|
2013-07-22 16:40:11 -04:00
|
|
|
smp_fetch_sc_stkctr(struct session *l4, const struct arg *args, const char *kw)
|
|
|
|
|
{
|
|
|
|
|
static struct stkctr stkctr;
|
2014-04-09 07:25:42 -04:00
|
|
|
struct stksess *stksess;
|
2013-07-23 13:33:46 -04:00
|
|
|
unsigned int num = kw[2] - '0';
|
2013-07-23 13:56:43 -04:00
|
|
|
int arg = 0;
|
2013-07-22 16:40:11 -04:00
|
|
|
|
2013-07-23 13:56:43 -04:00
|
|
|
if (num == '_' - '0') {
|
|
|
|
|
/* sc_* variant, args[0] = ctr# (mandatory) */
|
|
|
|
|
num = args[arg++].data.uint;
|
2013-07-23 13:33:46 -04:00
|
|
|
if (num >= MAX_SESS_STKCTR)
|
|
|
|
|
return NULL;
|
2013-07-22 16:40:11 -04:00
|
|
|
}
|
2013-07-23 13:56:43 -04:00
|
|
|
else if (num > 9) { /* src_* variant, args[0] = table */
|
2013-10-01 04:45:07 -04:00
|
|
|
struct stktable_key *key;
|
|
|
|
|
struct connection *conn = objt_conn(l4->si[0].end);
|
|
|
|
|
|
|
|
|
|
if (!conn)
|
|
|
|
|
return NULL;
|
2013-07-22 16:40:11 -04:00
|
|
|
|
2014-04-14 08:35:40 -04:00
|
|
|
key = addr_to_stktable_key(&conn->addr.from, args->data.prx->table.type);
|
2013-07-22 16:40:11 -04:00
|
|
|
if (!key)
|
|
|
|
|
return NULL;
|
2013-10-01 04:45:07 -04:00
|
|
|
|
2013-07-22 16:40:11 -04:00
|
|
|
stkctr.table = &args->data.prx->table;
|
2014-01-28 17:18:23 -05:00
|
|
|
stkctr_set_entry(&stkctr, stktable_lookup_key(stkctr.table, key));
|
2013-07-22 16:40:11 -04:00
|
|
|
return &stkctr;
|
|
|
|
|
}
|
2013-07-23 13:56:43 -04:00
|
|
|
|
|
|
|
|
/* Here, <num> contains the counter number from 0 to 9 for
|
|
|
|
|
* the sc[0-9]_ form, or even higher using sc_(num) if needed.
|
|
|
|
|
* args[arg] is the first optional argument.
|
|
|
|
|
*/
|
2014-04-09 07:25:42 -04:00
|
|
|
stksess = stkctr_entry(&l4->stkctr[num]);
|
|
|
|
|
if (!stksess)
|
|
|
|
|
return NULL;
|
|
|
|
|
|
2013-07-23 13:56:43 -04:00
|
|
|
if (unlikely(args[arg].type == ARGT_TAB)) {
|
|
|
|
|
/* an alternate table was specified, let's look up the same key there */
|
|
|
|
|
stkctr.table = &args[arg].data.prx->table;
|
2014-04-09 07:25:42 -04:00
|
|
|
stkctr_set_entry(&stkctr, stktable_lookup(stkctr.table, stksess));
|
2013-07-23 13:56:43 -04:00
|
|
|
return &stkctr;
|
|
|
|
|
}
|
2014-04-09 07:25:42 -04:00
|
|
|
return &l4->stkctr[num];
|
2013-07-22 16:40:11 -04:00
|
|
|
}
|
|
|
|
|
|
2013-07-22 12:29:29 -04:00
|
|
|
/* set return a boolean indicating if the requested session counter is
|
|
|
|
|
* currently being tracked or not.
|
|
|
|
|
* Supports being called as "sc[0-9]_tracked" only.
|
|
|
|
|
*/
|
2013-06-03 09:15:22 -04:00
|
|
|
static int
|
2013-07-22 12:29:29 -04:00
|
|
|
smp_fetch_sc_tracked(struct proxy *px, struct session *l4, void *l7, unsigned int opt,
|
2013-07-22 10:29:32 -04:00
|
|
|
const struct arg *args, struct sample *smp, const char *kw)
|
2013-06-03 09:15:22 -04:00
|
|
|
{
|
|
|
|
|
smp->flags = SMP_F_VOL_TEST;
|
|
|
|
|
smp->type = SMP_T_BOOL;
|
2014-04-14 19:15:52 -04:00
|
|
|
smp->data.uint = !!smp_fetch_sc_stkctr(l4, args, kw);
|
2013-06-03 09:15:22 -04:00
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
2013-07-22 13:46:52 -04:00
|
|
|
/* set <smp> to the General Purpose Counter 0 value from the session's tracked
|
|
|
|
|
* frontend counters or from the src.
|
|
|
|
|
* Supports being called as "sc[0-9]_get_gpc0" or "src_get_gpc0" only. Value
|
|
|
|
|
* zero is returned if the key is new.
|
|
|
|
|
*/
|
[MINOR] session-counters: add a general purpose counter (gpc0)
This counter may be used to track anything. Two sets of ACLs are available
to manage it, one gets its value, and the other one increments its value
and returns it. In the second case, the entry is created if it did not
exist.
Thus it is possible for example to mark a source as being an abuser and
to keep it marked as long as it does not wait for the entry to expire :
# The rules below use gpc0 to track abusers, and reject them if
# a source has been marked as such. The track-counters statement
# automatically refreshes the entry which will not expire until a
# 1-minute silence is respected from the source. The second rule
# evaluates the second part if the first one is true, so GPC0 will
# be increased once the conn_rate is above 100/5s.
stick-table type ip size 200k expire 1m store conn_rate(5s),gpc0
tcp-request track-counters src
tcp-request reject if { trk_get_gpc0 gt 0 }
tcp-request reject if { trk_conn_rate gt 100 } { trk_inc_gpc0 gt 0}
Alternatively, it is possible to let the entry expire even in presence of
traffic by swapping the check for gpc0 and the track-counters statement :
stick-table type ip size 200k expire 1m store conn_rate(5s),gpc0
tcp-request reject if { src_get_gpc0 gt 0 }
tcp-request track-counters src
tcp-request reject if { trk_conn_rate gt 100 } { trk_inc_gpc0 gt 0}
It is also possible not to track counters at all, but entry lookups will
then be performed more often :
stick-table type ip size 200k expire 1m store conn_rate(5s),gpc0
tcp-request reject if { src_get_gpc0 gt 0 }
tcp-request reject if { src_conn_rate gt 100 } { src_inc_gpc0 gt 0}
The '0' at the end of the counter name is there because if we find that more
counters may be useful, other ones will be added.
2010-06-20 06:47:25 -04:00
|
|
|
static int
|
2013-07-22 13:46:52 -04:00
|
|
|
smp_fetch_sc_get_gpc0(struct proxy *px, struct session *l4, void *l7, unsigned int opt,
|
|
|
|
|
const struct arg *args, struct sample *smp, const char *kw)
|
[MINOR] session-counters: add a general purpose counter (gpc0)
This counter may be used to track anything. Two sets of ACLs are available
to manage it, one gets its value, and the other one increments its value
and returns it. In the second case, the entry is created if it did not
exist.
Thus it is possible for example to mark a source as being an abuser and
to keep it marked as long as it does not wait for the entry to expire :
# The rules below use gpc0 to track abusers, and reject them if
# a source has been marked as such. The track-counters statement
# automatically refreshes the entry which will not expire until a
# 1-minute silence is respected from the source. The second rule
# evaluates the second part if the first one is true, so GPC0 will
# be increased once the conn_rate is above 100/5s.
stick-table type ip size 200k expire 1m store conn_rate(5s),gpc0
tcp-request track-counters src
tcp-request reject if { trk_get_gpc0 gt 0 }
tcp-request reject if { trk_conn_rate gt 100 } { trk_inc_gpc0 gt 0}
Alternatively, it is possible to let the entry expire even in presence of
traffic by swapping the check for gpc0 and the track-counters statement :
stick-table type ip size 200k expire 1m store conn_rate(5s),gpc0
tcp-request reject if { src_get_gpc0 gt 0 }
tcp-request track-counters src
tcp-request reject if { trk_conn_rate gt 100 } { trk_inc_gpc0 gt 0}
It is also possible not to track counters at all, but entry lookups will
then be performed more often :
stick-table type ip size 200k expire 1m store conn_rate(5s),gpc0
tcp-request reject if { src_get_gpc0 gt 0 }
tcp-request reject if { src_conn_rate gt 100 } { src_inc_gpc0 gt 0}
The '0' at the end of the counter name is there because if we find that more
counters may be useful, other ones will be added.
2010-06-20 06:47:25 -04:00
|
|
|
{
|
2013-07-22 13:46:52 -04:00
|
|
|
struct stkctr *stkctr = smp_fetch_sc_stkctr(l4, args, kw);
|
|
|
|
|
|
|
|
|
|
if (!stkctr)
|
|
|
|
|
return 0;
|
|
|
|
|
|
2012-04-23 10:16:37 -04:00
|
|
|
smp->flags = SMP_F_VOL_TEST;
|
2012-04-23 12:53:56 -04:00
|
|
|
smp->type = SMP_T_UINT;
|
|
|
|
|
smp->data.uint = 0;
|
2013-07-22 13:46:52 -04:00
|
|
|
|
2014-01-28 17:18:23 -05:00
|
|
|
if (stkctr_entry(stkctr) != NULL) {
|
|
|
|
|
void *ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_GPC0);
|
[MINOR] session-counters: add a general purpose counter (gpc0)
This counter may be used to track anything. Two sets of ACLs are available
to manage it, one gets its value, and the other one increments its value
and returns it. In the second case, the entry is created if it did not
exist.
Thus it is possible for example to mark a source as being an abuser and
to keep it marked as long as it does not wait for the entry to expire :
# The rules below use gpc0 to track abusers, and reject them if
# a source has been marked as such. The track-counters statement
# automatically refreshes the entry which will not expire until a
# 1-minute silence is respected from the source. The second rule
# evaluates the second part if the first one is true, so GPC0 will
# be increased once the conn_rate is above 100/5s.
stick-table type ip size 200k expire 1m store conn_rate(5s),gpc0
tcp-request track-counters src
tcp-request reject if { trk_get_gpc0 gt 0 }
tcp-request reject if { trk_conn_rate gt 100 } { trk_inc_gpc0 gt 0}
Alternatively, it is possible to let the entry expire even in presence of
traffic by swapping the check for gpc0 and the track-counters statement :
stick-table type ip size 200k expire 1m store conn_rate(5s),gpc0
tcp-request reject if { src_get_gpc0 gt 0 }
tcp-request track-counters src
tcp-request reject if { trk_conn_rate gt 100 } { trk_inc_gpc0 gt 0}
It is also possible not to track counters at all, but entry lookups will
then be performed more often :
stick-table type ip size 200k expire 1m store conn_rate(5s),gpc0
tcp-request reject if { src_get_gpc0 gt 0 }
tcp-request reject if { src_conn_rate gt 100 } { src_inc_gpc0 gt 0}
The '0' at the end of the counter name is there because if we find that more
counters may be useful, other ones will be added.
2010-06-20 06:47:25 -04:00
|
|
|
if (!ptr)
|
|
|
|
|
return 0; /* parameter not stored */
|
2012-04-23 12:53:56 -04:00
|
|
|
smp->data.uint = stktable_data_cast(ptr, gpc0);
|
[MINOR] session-counters: add a general purpose counter (gpc0)
This counter may be used to track anything. Two sets of ACLs are available
to manage it, one gets its value, and the other one increments its value
and returns it. In the second case, the entry is created if it did not
exist.
Thus it is possible for example to mark a source as being an abuser and
to keep it marked as long as it does not wait for the entry to expire :
# The rules below use gpc0 to track abusers, and reject them if
# a source has been marked as such. The track-counters statement
# automatically refreshes the entry which will not expire until a
# 1-minute silence is respected from the source. The second rule
# evaluates the second part if the first one is true, so GPC0 will
# be increased once the conn_rate is above 100/5s.
stick-table type ip size 200k expire 1m store conn_rate(5s),gpc0
tcp-request track-counters src
tcp-request reject if { trk_get_gpc0 gt 0 }
tcp-request reject if { trk_conn_rate gt 100 } { trk_inc_gpc0 gt 0}
Alternatively, it is possible to let the entry expire even in presence of
traffic by swapping the check for gpc0 and the track-counters statement :
stick-table type ip size 200k expire 1m store conn_rate(5s),gpc0
tcp-request reject if { src_get_gpc0 gt 0 }
tcp-request track-counters src
tcp-request reject if { trk_conn_rate gt 100 } { trk_inc_gpc0 gt 0}
It is also possible not to track counters at all, but entry lookups will
then be performed more often :
stick-table type ip size 200k expire 1m store conn_rate(5s),gpc0
tcp-request reject if { src_get_gpc0 gt 0 }
tcp-request reject if { src_conn_rate gt 100 } { src_inc_gpc0 gt 0}
The '0' at the end of the counter name is there because if we find that more
counters may be useful, other ones will be added.
2010-06-20 06:47:25 -04:00
|
|
|
}
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
2013-07-22 17:47:07 -04:00
|
|
|
/* set <smp> to the General Purpose Counter 0's event rate from the session's
|
|
|
|
|
* tracked frontend counters or from the src.
|
|
|
|
|
* Supports being called as "sc[0-9]_gpc0_rate" or "src_gpc0_rate" only.
|
|
|
|
|
* Value zero is returned if the key is new.
|
|
|
|
|
*/
|
2013-05-29 09:54:14 -04:00
|
|
|
static int
|
2013-07-22 17:47:07 -04:00
|
|
|
smp_fetch_sc_gpc0_rate(struct proxy *px, struct session *l4, void *l7, unsigned int opt,
|
|
|
|
|
const struct arg *args, struct sample *smp, const char *kw)
|
2013-05-29 09:54:14 -04:00
|
|
|
{
|
2013-07-22 17:47:07 -04:00
|
|
|
struct stkctr *stkctr = smp_fetch_sc_stkctr(l4, args, kw);
|
|
|
|
|
|
|
|
|
|
if (!stkctr)
|
|
|
|
|
return 0;
|
|
|
|
|
|
2013-05-29 09:54:14 -04:00
|
|
|
smp->flags = SMP_F_VOL_TEST;
|
|
|
|
|
smp->type = SMP_T_UINT;
|
|
|
|
|
smp->data.uint = 0;
|
2014-01-28 17:18:23 -05:00
|
|
|
if (stkctr_entry(stkctr) != NULL) {
|
|
|
|
|
void *ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_GPC0_RATE);
|
2013-05-29 09:54:14 -04:00
|
|
|
if (!ptr)
|
|
|
|
|
return 0; /* parameter not stored */
|
|
|
|
|
smp->data.uint = read_freq_ctr_period(&stktable_data_cast(ptr, gpc0_rate),
|
2013-07-22 17:47:07 -04:00
|
|
|
stkctr->table->data_arg[STKTABLE_DT_GPC0_RATE].u);
|
2013-05-29 09:54:14 -04:00
|
|
|
}
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
2013-07-22 18:07:04 -04:00
|
|
|
/* Increment the General Purpose Counter 0 value from the session's tracked
|
|
|
|
|
* frontend counters and return it into temp integer.
|
|
|
|
|
* Supports being called as "sc[0-9]_inc_gpc0" or "src_inc_gpc0" only.
|
[MINOR] session-counters: add a general purpose counter (gpc0)
This counter may be used to track anything. Two sets of ACLs are available
to manage it, one gets its value, and the other one increments its value
and returns it. In the second case, the entry is created if it did not
exist.
Thus it is possible for example to mark a source as being an abuser and
to keep it marked as long as it does not wait for the entry to expire :
# The rules below use gpc0 to track abusers, and reject them if
# a source has been marked as such. The track-counters statement
# automatically refreshes the entry which will not expire until a
# 1-minute silence is respected from the source. The second rule
# evaluates the second part if the first one is true, so GPC0 will
# be increased once the conn_rate is above 100/5s.
stick-table type ip size 200k expire 1m store conn_rate(5s),gpc0
tcp-request track-counters src
tcp-request reject if { trk_get_gpc0 gt 0 }
tcp-request reject if { trk_conn_rate gt 100 } { trk_inc_gpc0 gt 0}
Alternatively, it is possible to let the entry expire even in presence of
traffic by swapping the check for gpc0 and the track-counters statement :
stick-table type ip size 200k expire 1m store conn_rate(5s),gpc0
tcp-request reject if { src_get_gpc0 gt 0 }
tcp-request track-counters src
tcp-request reject if { trk_conn_rate gt 100 } { trk_inc_gpc0 gt 0}
It is also possible not to track counters at all, but entry lookups will
then be performed more often :
stick-table type ip size 200k expire 1m store conn_rate(5s),gpc0
tcp-request reject if { src_get_gpc0 gt 0 }
tcp-request reject if { src_conn_rate gt 100 } { src_inc_gpc0 gt 0}
The '0' at the end of the counter name is there because if we find that more
counters may be useful, other ones will be added.
2010-06-20 06:47:25 -04:00
|
|
|
*/
|
|
|
|
|
static int
|
2013-07-22 18:07:04 -04:00
|
|
|
smp_fetch_sc_inc_gpc0(struct proxy *px, struct session *l4, void *l7, unsigned int opt,
|
|
|
|
|
const struct arg *args, struct sample *smp, const char *kw)
|
[MINOR] session-counters: add a general purpose counter (gpc0)
This counter may be used to track anything. Two sets of ACLs are available
to manage it, one gets its value, and the other one increments its value
and returns it. In the second case, the entry is created if it did not
exist.
Thus it is possible for example to mark a source as being an abuser and
to keep it marked as long as it does not wait for the entry to expire :
# The rules below use gpc0 to track abusers, and reject them if
# a source has been marked as such. The track-counters statement
# automatically refreshes the entry which will not expire until a
# 1-minute silence is respected from the source. The second rule
# evaluates the second part if the first one is true, so GPC0 will
# be increased once the conn_rate is above 100/5s.
stick-table type ip size 200k expire 1m store conn_rate(5s),gpc0
tcp-request track-counters src
tcp-request reject if { trk_get_gpc0 gt 0 }
tcp-request reject if { trk_conn_rate gt 100 } { trk_inc_gpc0 gt 0}
Alternatively, it is possible to let the entry expire even in presence of
traffic by swapping the check for gpc0 and the track-counters statement :
stick-table type ip size 200k expire 1m store conn_rate(5s),gpc0
tcp-request reject if { src_get_gpc0 gt 0 }
tcp-request track-counters src
tcp-request reject if { trk_conn_rate gt 100 } { trk_inc_gpc0 gt 0}
It is also possible not to track counters at all, but entry lookups will
then be performed more often :
stick-table type ip size 200k expire 1m store conn_rate(5s),gpc0
tcp-request reject if { src_get_gpc0 gt 0 }
tcp-request reject if { src_conn_rate gt 100 } { src_inc_gpc0 gt 0}
The '0' at the end of the counter name is there because if we find that more
counters may be useful, other ones will be added.
2010-06-20 06:47:25 -04:00
|
|
|
{
|
2013-07-22 18:07:04 -04:00
|
|
|
struct stkctr *stkctr = smp_fetch_sc_stkctr(l4, args, kw);
|
|
|
|
|
|
|
|
|
|
if (!stkctr)
|
|
|
|
|
return 0;
|
|
|
|
|
|
2012-04-23 10:16:37 -04:00
|
|
|
smp->flags = SMP_F_VOL_TEST;
|
2012-04-23 12:53:56 -04:00
|
|
|
smp->type = SMP_T_UINT;
|
|
|
|
|
smp->data.uint = 0;
|
2014-01-28 17:18:23 -05:00
|
|
|
if (stkctr_entry(stkctr) != NULL) {
|
2013-05-29 09:54:14 -04:00
|
|
|
void *ptr;
|
|
|
|
|
|
|
|
|
|
/* First, update gpc0_rate if it's tracked. Second, update its
|
|
|
|
|
* gpc0 if tracked. Returns gpc0's value otherwise the curr_ctr.
|
|
|
|
|
*/
|
2014-01-28 17:18:23 -05:00
|
|
|
ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_GPC0_RATE);
|
2013-05-29 09:54:14 -04:00
|
|
|
if (ptr) {
|
|
|
|
|
update_freq_ctr_period(&stktable_data_cast(ptr, gpc0_rate),
|
2013-07-22 18:07:04 -04:00
|
|
|
stkctr->table->data_arg[STKTABLE_DT_GPC0_RATE].u, 1);
|
2013-05-29 09:54:14 -04:00
|
|
|
smp->data.uint = (&stktable_data_cast(ptr, gpc0_rate))->curr_ctr;
|
|
|
|
|
}
|
|
|
|
|
|
2014-01-28 17:18:23 -05:00
|
|
|
ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_GPC0);
|
2013-05-29 09:54:14 -04:00
|
|
|
if (ptr)
|
|
|
|
|
smp->data.uint = ++stktable_data_cast(ptr, gpc0);
|
|
|
|
|
|
[MINOR] session-counters: add a general purpose counter (gpc0)
This counter may be used to track anything. Two sets of ACLs are available
to manage it, one gets its value, and the other one increments its value
and returns it. In the second case, the entry is created if it did not
exist.
Thus it is possible for example to mark a source as being an abuser and
to keep it marked as long as it does not wait for the entry to expire :
# The rules below use gpc0 to track abusers, and reject them if
# a source has been marked as such. The track-counters statement
# automatically refreshes the entry which will not expire until a
# 1-minute silence is respected from the source. The second rule
# evaluates the second part if the first one is true, so GPC0 will
# be increased once the conn_rate is above 100/5s.
stick-table type ip size 200k expire 1m store conn_rate(5s),gpc0
tcp-request track-counters src
tcp-request reject if { trk_get_gpc0 gt 0 }
tcp-request reject if { trk_conn_rate gt 100 } { trk_inc_gpc0 gt 0}
Alternatively, it is possible to let the entry expire even in presence of
traffic by swapping the check for gpc0 and the track-counters statement :
stick-table type ip size 200k expire 1m store conn_rate(5s),gpc0
tcp-request reject if { src_get_gpc0 gt 0 }
tcp-request track-counters src
tcp-request reject if { trk_conn_rate gt 100 } { trk_inc_gpc0 gt 0}
It is also possible not to track counters at all, but entry lookups will
then be performed more often :
stick-table type ip size 200k expire 1m store conn_rate(5s),gpc0
tcp-request reject if { src_get_gpc0 gt 0 }
tcp-request reject if { src_conn_rate gt 100 } { src_inc_gpc0 gt 0}
The '0' at the end of the counter name is there because if we find that more
counters may be useful, other ones will be added.
2010-06-20 06:47:25 -04:00
|
|
|
}
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
2013-07-22 18:10:35 -04:00
|
|
|
/* Clear the General Purpose Counter 0 value from the session's tracked
|
|
|
|
|
* frontend counters and return its previous value into temp integer.
|
|
|
|
|
* Supports being called as "sc[0-9]_clr_gpc0" or "src_clr_gpc0" only.
|
2011-08-12 19:45:16 -04:00
|
|
|
*/
|
|
|
|
|
static int
|
2013-07-22 18:10:35 -04:00
|
|
|
smp_fetch_sc_clr_gpc0(struct proxy *px, struct session *l4, void *l7, unsigned int opt,
|
|
|
|
|
const struct arg *args, struct sample *smp, const char *kw)
|
2011-08-12 19:45:16 -04:00
|
|
|
{
|
2013-07-22 18:10:35 -04:00
|
|
|
struct stkctr *stkctr = smp_fetch_sc_stkctr(l4, args, kw);
|
|
|
|
|
|
|
|
|
|
if (!stkctr)
|
|
|
|
|
return 0;
|
|
|
|
|
|
2012-04-23 10:16:37 -04:00
|
|
|
smp->flags = SMP_F_VOL_TEST;
|
2012-04-23 12:53:56 -04:00
|
|
|
smp->type = SMP_T_UINT;
|
|
|
|
|
smp->data.uint = 0;
|
2014-01-28 17:18:23 -05:00
|
|
|
if (stkctr_entry(stkctr) != NULL) {
|
|
|
|
|
void *ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_GPC0);
|
2011-08-12 19:45:16 -04:00
|
|
|
if (!ptr)
|
|
|
|
|
return 0; /* parameter not stored */
|
2012-04-23 12:53:56 -04:00
|
|
|
smp->data.uint = stktable_data_cast(ptr, gpc0);
|
2011-08-12 19:45:16 -04:00
|
|
|
stktable_data_cast(ptr, gpc0) = 0;
|
|
|
|
|
}
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
2013-07-22 18:22:50 -04:00
|
|
|
/* set <smp> to the cumulated number of connections from the session's tracked
|
|
|
|
|
* frontend counters. Supports being called as "sc[0-9]_conn_cnt" or
|
|
|
|
|
* "src_conn_cnt" only.
|
|
|
|
|
*/
|
2010-06-18 13:53:25 -04:00
|
|
|
static int
|
2013-07-22 18:22:50 -04:00
|
|
|
smp_fetch_sc_conn_cnt(struct proxy *px, struct session *l4, void *l7, unsigned int opt,
|
|
|
|
|
const struct arg *args, struct sample *smp, const char *kw)
|
2010-06-18 13:53:25 -04:00
|
|
|
{
|
2013-07-22 18:22:50 -04:00
|
|
|
struct stkctr *stkctr = smp_fetch_sc_stkctr(l4, args, kw);
|
|
|
|
|
|
|
|
|
|
if (!stkctr)
|
|
|
|
|
return 0;
|
|
|
|
|
|
2012-04-23 10:16:37 -04:00
|
|
|
smp->flags = SMP_F_VOL_TEST;
|
2012-04-23 12:53:56 -04:00
|
|
|
smp->type = SMP_T_UINT;
|
|
|
|
|
smp->data.uint = 0;
|
2014-01-28 17:18:23 -05:00
|
|
|
if (stkctr_entry(stkctr) != NULL) {
|
|
|
|
|
void *ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_CONN_CNT);
|
2010-06-18 13:53:25 -04:00
|
|
|
if (!ptr)
|
|
|
|
|
return 0; /* parameter not stored */
|
2012-04-23 12:53:56 -04:00
|
|
|
smp->data.uint = stktable_data_cast(ptr, conn_cnt);
|
2010-06-18 13:53:25 -04:00
|
|
|
}
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
2013-07-23 09:09:35 -04:00
|
|
|
/* set <smp> to the connection rate from the session's tracked frontend
|
|
|
|
|
* counters. Supports being called as "sc[0-9]_conn_rate" or "src_conn_rate"
|
|
|
|
|
* only.
|
|
|
|
|
*/
|
2010-06-20 05:19:22 -04:00
|
|
|
static int
|
2013-07-23 09:09:35 -04:00
|
|
|
smp_fetch_sc_conn_rate(struct proxy *px, struct session *l4, void *l7, unsigned int opt,
|
|
|
|
|
const struct arg *args, struct sample *smp, const char *kw)
|
2010-06-20 05:19:22 -04:00
|
|
|
{
|
2013-07-23 09:09:35 -04:00
|
|
|
struct stkctr *stkctr = smp_fetch_sc_stkctr(l4, args, kw);
|
|
|
|
|
|
|
|
|
|
if (!stkctr)
|
|
|
|
|
return 0;
|
|
|
|
|
|
2012-04-23 10:16:37 -04:00
|
|
|
smp->flags = SMP_F_VOL_TEST;
|
2012-04-23 12:53:56 -04:00
|
|
|
smp->type = SMP_T_UINT;
|
|
|
|
|
smp->data.uint = 0;
|
2014-01-28 17:18:23 -05:00
|
|
|
if (stkctr_entry(stkctr) != NULL) {
|
|
|
|
|
void *ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_CONN_RATE);
|
2010-06-20 05:19:22 -04:00
|
|
|
if (!ptr)
|
|
|
|
|
return 0; /* parameter not stored */
|
2012-04-23 12:53:56 -04:00
|
|
|
smp->data.uint = read_freq_ctr_period(&stktable_data_cast(ptr, conn_rate),
|
2013-07-23 09:09:35 -04:00
|
|
|
stkctr->table->data_arg[STKTABLE_DT_CONN_RATE].u);
|
2010-06-20 05:19:22 -04:00
|
|
|
}
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
2011-12-16 11:06:15 -05:00
|
|
|
/* set temp integer to the number of connections from the session's source address
|
2010-06-18 11:46:06 -04:00
|
|
|
* in the table pointed to by expr, after updating it.
|
2012-04-20 05:37:56 -04:00
|
|
|
* Accepts exactly 1 argument of type table.
|
2010-06-18 11:46:06 -04:00
|
|
|
*/
|
|
|
|
|
static int
|
MINOR: session: rename sample fetch functions and declare the sample keywords
The following sample fetch functions were only usable by ACLs but are now
usable by sample fetches too :
sc1_bytes_in_rate, sc1_bytes_out_rate, sc1_clr_gpc0, sc1_conn_cnt,
sc1_conn_cur, sc1_conn_rate, sc1_get_gpc0, sc1_http_err_cnt,
sc1_http_err_rate, sc1_http_req_cnt, sc1_http_req_rate, sc1_inc_gpc0,
sc1_kbytes_in, sc1_kbytes_out, sc1_sess_cnt, sc1_sess_rate, sc1_trackers,
sc2_bytes_in_rate, sc2_bytes_out_rate, sc2_clr_gpc0, sc2_conn_cnt,
sc2_conn_cur, sc2_conn_rate, sc2_get_gpc0, sc2_http_err_cnt,
sc2_http_err_rate, sc2_http_req_cnt, sc2_http_req_rate, sc2_inc_gpc0,
sc2_kbytes_in, sc2_kbytes_out, sc2_sess_cnt, sc2_sess_rate, sc2_trackers,
src_bytes_in_rate, src_bytes_out_rate, src_clr_gpc0, src_conn_cnt,
src_conn_cur, src_conn_rate, src_get_gpc0, src_http_err_cnt,
src_http_err_rate, src_http_req_cnt, src_http_req_rate, src_inc_gpc0,
src_kbytes_in, src_kbytes_out, src_sess_cnt, src_sess_rate,
src_updt_conn_cnt, table_avl, table_cnt,
The fetch functions have been renamed "smp_fetch_*".
2013-01-07 19:23:27 -05:00
|
|
|
smp_fetch_src_updt_conn_cnt(struct proxy *px, struct session *l4, void *l7, unsigned int opt,
|
2013-07-22 10:29:32 -04:00
|
|
|
const struct arg *args, struct sample *smp, const char *kw)
|
2010-06-18 11:46:06 -04:00
|
|
|
{
|
2013-10-01 04:45:07 -04:00
|
|
|
struct connection *conn = objt_conn(l4->si[0].end);
|
2010-06-18 11:46:06 -04:00
|
|
|
struct stksess *ts;
|
|
|
|
|
struct stktable_key *key;
|
|
|
|
|
void *ptr;
|
|
|
|
|
|
2013-10-01 04:45:07 -04:00
|
|
|
if (!conn)
|
|
|
|
|
return 0;
|
|
|
|
|
|
2014-04-14 08:35:40 -04:00
|
|
|
key = addr_to_stktable_key(&conn->addr.from, px->table.type);
|
2010-06-18 11:46:06 -04:00
|
|
|
if (!key)
|
2011-03-24 06:09:31 -04:00
|
|
|
return 0;
|
2010-06-18 11:46:06 -04:00
|
|
|
|
2012-04-23 17:55:44 -04:00
|
|
|
px = args->data.prx;
|
2010-06-18 11:46:06 -04:00
|
|
|
|
2010-06-20 06:27:21 -04:00
|
|
|
if ((ts = stktable_update_key(&px->table, key)) == NULL)
|
|
|
|
|
/* entry does not exist and could not be created */
|
|
|
|
|
return 0;
|
2010-06-18 11:46:06 -04:00
|
|
|
|
|
|
|
|
ptr = stktable_data_ptr(&px->table, ts, STKTABLE_DT_CONN_CNT);
|
|
|
|
|
if (!ptr)
|
|
|
|
|
return 0; /* parameter not stored in this table */
|
|
|
|
|
|
2012-04-23 12:53:56 -04:00
|
|
|
smp->type = SMP_T_UINT;
|
|
|
|
|
smp->data.uint = ++stktable_data_cast(ptr, conn_cnt);
|
2012-04-23 10:16:37 -04:00
|
|
|
smp->flags = SMP_F_VOL_TEST;
|
2010-06-18 11:46:06 -04:00
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
2013-07-23 09:17:53 -04:00
|
|
|
/* set <smp> to the number of concurrent connections from the session's tracked
|
|
|
|
|
* frontend counters. Supports being called as "sc[0-9]_conn_cur" or
|
|
|
|
|
* "src_conn_cur" only.
|
|
|
|
|
*/
|
2010-06-18 15:14:36 -04:00
|
|
|
static int
|
2013-07-23 09:17:53 -04:00
|
|
|
smp_fetch_sc_conn_cur(struct proxy *px, struct session *l4, void *l7, unsigned int opt,
|
|
|
|
|
const struct arg *args, struct sample *smp, const char *kw)
|
2010-06-18 15:14:36 -04:00
|
|
|
{
|
2013-07-23 09:17:53 -04:00
|
|
|
struct stkctr *stkctr = smp_fetch_sc_stkctr(l4, args, kw);
|
|
|
|
|
|
|
|
|
|
if (!stkctr)
|
|
|
|
|
return 0;
|
|
|
|
|
|
2012-04-23 10:16:37 -04:00
|
|
|
smp->flags = SMP_F_VOL_TEST;
|
2012-04-23 12:53:56 -04:00
|
|
|
smp->type = SMP_T_UINT;
|
|
|
|
|
smp->data.uint = 0;
|
2014-01-28 17:18:23 -05:00
|
|
|
if (stkctr_entry(stkctr) != NULL) {
|
|
|
|
|
void *ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_CONN_CUR);
|
2010-06-18 15:14:36 -04:00
|
|
|
if (!ptr)
|
|
|
|
|
return 0; /* parameter not stored */
|
2012-04-23 12:53:56 -04:00
|
|
|
smp->data.uint = stktable_data_cast(ptr, conn_cur);
|
2010-06-18 15:14:36 -04:00
|
|
|
}
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
2013-07-23 09:35:33 -04:00
|
|
|
/* set <smp> to the cumulated number of sessions from the session's tracked
|
|
|
|
|
* frontend counters. Supports being called as "sc[0-9]_sess_cnt" or
|
|
|
|
|
* "src_sess_cnt" only.
|
|
|
|
|
*/
|
2010-06-18 16:10:12 -04:00
|
|
|
static int
|
2013-07-23 09:35:33 -04:00
|
|
|
smp_fetch_sc_sess_cnt(struct proxy *px, struct session *l4, void *l7, unsigned int opt,
|
|
|
|
|
const struct arg *args, struct sample *smp, const char *kw)
|
2010-06-18 16:10:12 -04:00
|
|
|
{
|
2013-07-23 09:35:33 -04:00
|
|
|
struct stkctr *stkctr = smp_fetch_sc_stkctr(l4, args, kw);
|
|
|
|
|
|
|
|
|
|
if (!stkctr)
|
|
|
|
|
return 0;
|
|
|
|
|
|
2012-04-23 10:16:37 -04:00
|
|
|
smp->flags = SMP_F_VOL_TEST;
|
2012-04-23 12:53:56 -04:00
|
|
|
smp->type = SMP_T_UINT;
|
|
|
|
|
smp->data.uint = 0;
|
2014-01-28 17:18:23 -05:00
|
|
|
if (stkctr_entry(stkctr) != NULL) {
|
|
|
|
|
void *ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_SESS_CNT);
|
2010-06-18 16:10:12 -04:00
|
|
|
if (!ptr)
|
|
|
|
|
return 0; /* parameter not stored */
|
2012-04-23 12:53:56 -04:00
|
|
|
smp->data.uint = stktable_data_cast(ptr, sess_cnt);
|
2010-06-18 16:10:12 -04:00
|
|
|
}
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
2013-07-23 09:48:01 -04:00
|
|
|
/* set <smp> to the session rate from the session's tracked frontend counters.
|
|
|
|
|
* Supports being called as "sc[0-9]_sess_rate" or "src_sess_rate" only.
|
|
|
|
|
*/
|
2010-06-20 05:19:22 -04:00
|
|
|
static int
|
2013-07-23 09:48:01 -04:00
|
|
|
smp_fetch_sc_sess_rate(struct proxy *px, struct session *l4, void *l7, unsigned int opt,
|
|
|
|
|
const struct arg *args, struct sample *smp, const char *kw)
|
2010-06-20 05:19:22 -04:00
|
|
|
{
|
2013-07-23 09:48:01 -04:00
|
|
|
struct stkctr *stkctr = smp_fetch_sc_stkctr(l4, args, kw);
|
|
|
|
|
|
|
|
|
|
if (!stkctr)
|
|
|
|
|
return 0;
|
|
|
|
|
|
2012-04-23 10:16:37 -04:00
|
|
|
smp->flags = SMP_F_VOL_TEST;
|
2012-04-23 12:53:56 -04:00
|
|
|
smp->type = SMP_T_UINT;
|
|
|
|
|
smp->data.uint = 0;
|
2014-01-28 17:18:23 -05:00
|
|
|
if (stkctr_entry(stkctr) != NULL) {
|
|
|
|
|
void *ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_SESS_RATE);
|
2010-06-20 05:19:22 -04:00
|
|
|
if (!ptr)
|
|
|
|
|
return 0; /* parameter not stored */
|
2012-04-23 12:53:56 -04:00
|
|
|
smp->data.uint = read_freq_ctr_period(&stktable_data_cast(ptr, sess_rate),
|
2013-07-23 09:48:01 -04:00
|
|
|
stkctr->table->data_arg[STKTABLE_DT_SESS_RATE].u);
|
2010-06-20 05:19:22 -04:00
|
|
|
}
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
2013-07-23 09:55:19 -04:00
|
|
|
/* set <smp> to the cumulated number of HTTP requests from the session's tracked
|
|
|
|
|
* frontend counters. Supports being called as "sc[0-9]_http_req_cnt" or
|
|
|
|
|
* "src_http_req_cnt" only.
|
|
|
|
|
*/
|
2010-06-23 05:44:09 -04:00
|
|
|
static int
|
2013-07-23 09:55:19 -04:00
|
|
|
smp_fetch_sc_http_req_cnt(struct proxy *px, struct session *l4, void *l7, unsigned int opt,
|
|
|
|
|
const struct arg *args, struct sample *smp, const char *kw)
|
2010-06-23 05:44:09 -04:00
|
|
|
{
|
2013-07-23 09:55:19 -04:00
|
|
|
struct stkctr *stkctr = smp_fetch_sc_stkctr(l4, args, kw);
|
|
|
|
|
|
|
|
|
|
if (!stkctr)
|
|
|
|
|
return 0;
|
|
|
|
|
|
2012-04-23 10:16:37 -04:00
|
|
|
smp->flags = SMP_F_VOL_TEST;
|
2012-04-23 12:53:56 -04:00
|
|
|
smp->type = SMP_T_UINT;
|
|
|
|
|
smp->data.uint = 0;
|
2014-01-28 17:18:23 -05:00
|
|
|
if (stkctr_entry(stkctr) != NULL) {
|
|
|
|
|
void *ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_HTTP_REQ_CNT);
|
2010-06-23 05:44:09 -04:00
|
|
|
if (!ptr)
|
|
|
|
|
return 0; /* parameter not stored */
|
2012-04-23 12:53:56 -04:00
|
|
|
smp->data.uint = stktable_data_cast(ptr, http_req_cnt);
|
2010-06-23 05:44:09 -04:00
|
|
|
}
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
2013-07-23 10:04:37 -04:00
|
|
|
/* set <smp> to the HTTP request rate from the session's tracked frontend
|
|
|
|
|
* counters. Supports being called as "sc[0-9]_http_req_rate" or
|
|
|
|
|
* "src_http_req_rate" only.
|
|
|
|
|
*/
|
2010-06-23 05:44:09 -04:00
|
|
|
static int
|
2013-07-23 10:04:37 -04:00
|
|
|
smp_fetch_sc_http_req_rate(struct proxy *px, struct session *l4, void *l7, unsigned int opt,
|
|
|
|
|
const struct arg *args, struct sample *smp, const char *kw)
|
2010-06-23 05:44:09 -04:00
|
|
|
{
|
2013-07-23 10:04:37 -04:00
|
|
|
struct stkctr *stkctr = smp_fetch_sc_stkctr(l4, args, kw);
|
|
|
|
|
|
|
|
|
|
if (!stkctr)
|
|
|
|
|
return 0;
|
|
|
|
|
|
2012-04-23 10:16:37 -04:00
|
|
|
smp->flags = SMP_F_VOL_TEST;
|
2012-04-23 12:53:56 -04:00
|
|
|
smp->type = SMP_T_UINT;
|
|
|
|
|
smp->data.uint = 0;
|
2014-01-28 17:18:23 -05:00
|
|
|
if (stkctr_entry(stkctr) != NULL) {
|
|
|
|
|
void *ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_HTTP_REQ_RATE);
|
2010-06-23 05:44:09 -04:00
|
|
|
if (!ptr)
|
|
|
|
|
return 0; /* parameter not stored */
|
2012-04-23 12:53:56 -04:00
|
|
|
smp->data.uint = read_freq_ctr_period(&stktable_data_cast(ptr, http_req_rate),
|
2013-07-23 10:04:37 -04:00
|
|
|
stkctr->table->data_arg[STKTABLE_DT_HTTP_REQ_RATE].u);
|
2010-06-23 05:44:09 -04:00
|
|
|
}
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
2013-07-23 10:45:38 -04:00
|
|
|
/* set <smp> to the cumulated number of HTTP requests errors from the session's
|
|
|
|
|
* tracked frontend counters. Supports being called as "sc[0-9]_http_err_cnt" or
|
|
|
|
|
* "src_http_err_cnt" only.
|
|
|
|
|
*/
|
2010-06-23 05:44:09 -04:00
|
|
|
static int
|
2013-07-23 10:45:38 -04:00
|
|
|
smp_fetch_sc_http_err_cnt(struct proxy *px, struct session *l4, void *l7, unsigned int opt,
|
|
|
|
|
const struct arg *args, struct sample *smp, const char *kw)
|
2010-06-23 05:44:09 -04:00
|
|
|
{
|
2013-07-23 10:45:38 -04:00
|
|
|
struct stkctr *stkctr = smp_fetch_sc_stkctr(l4, args, kw);
|
|
|
|
|
|
|
|
|
|
if (!stkctr)
|
|
|
|
|
return 0;
|
|
|
|
|
|
2012-04-23 10:16:37 -04:00
|
|
|
smp->flags = SMP_F_VOL_TEST;
|
2012-04-23 12:53:56 -04:00
|
|
|
smp->type = SMP_T_UINT;
|
|
|
|
|
smp->data.uint = 0;
|
2014-01-28 17:18:23 -05:00
|
|
|
if (stkctr_entry(stkctr) != NULL) {
|
|
|
|
|
void *ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_HTTP_ERR_CNT);
|
2010-06-23 05:44:09 -04:00
|
|
|
if (!ptr)
|
|
|
|
|
return 0; /* parameter not stored */
|
2012-04-23 12:53:56 -04:00
|
|
|
smp->data.uint = stktable_data_cast(ptr, http_err_cnt);
|
2010-06-23 05:44:09 -04:00
|
|
|
}
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
2013-07-23 10:48:54 -04:00
|
|
|
/* set <smp> to the HTTP request error rate from the session's tracked frontend
|
|
|
|
|
* counters. Supports being called as "sc[0-9]_http_err_rate" or
|
|
|
|
|
* "src_http_err_rate" only.
|
|
|
|
|
*/
|
2010-06-23 05:44:09 -04:00
|
|
|
static int
|
2013-07-23 10:48:54 -04:00
|
|
|
smp_fetch_sc_http_err_rate(struct proxy *px, struct session *l4, void *l7, unsigned int opt,
|
|
|
|
|
const struct arg *args, struct sample *smp, const char *kw)
|
2010-06-23 05:44:09 -04:00
|
|
|
{
|
2013-07-23 10:48:54 -04:00
|
|
|
struct stkctr *stkctr = smp_fetch_sc_stkctr(l4, args, kw);
|
|
|
|
|
|
|
|
|
|
if (!stkctr)
|
|
|
|
|
return 0;
|
|
|
|
|
|
2012-04-23 10:16:37 -04:00
|
|
|
smp->flags = SMP_F_VOL_TEST;
|
2012-04-23 12:53:56 -04:00
|
|
|
smp->type = SMP_T_UINT;
|
|
|
|
|
smp->data.uint = 0;
|
2014-01-28 17:18:23 -05:00
|
|
|
if (stkctr_entry(stkctr) != NULL) {
|
|
|
|
|
void *ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_HTTP_ERR_RATE);
|
2010-06-23 05:44:09 -04:00
|
|
|
if (!ptr)
|
|
|
|
|
return 0; /* parameter not stored */
|
2012-04-23 12:53:56 -04:00
|
|
|
smp->data.uint = read_freq_ctr_period(&stktable_data_cast(ptr, http_err_rate),
|
2013-07-23 10:48:54 -04:00
|
|
|
stkctr->table->data_arg[STKTABLE_DT_HTTP_ERR_RATE].u);
|
2010-06-23 05:44:09 -04:00
|
|
|
}
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
2013-07-23 11:17:10 -04:00
|
|
|
/* set <smp> to the number of kbytes received from clients, as found in the
|
|
|
|
|
* session's tracked frontend counters. Supports being called as
|
|
|
|
|
* "sc[0-9]_kbytes_in" or "src_kbytes_in" only.
|
|
|
|
|
*/
|
2010-06-18 15:52:52 -04:00
|
|
|
static int
|
2013-07-23 11:17:10 -04:00
|
|
|
smp_fetch_sc_kbytes_in(struct proxy *px, struct session *l4, void *l7, unsigned int opt,
|
|
|
|
|
const struct arg *args, struct sample *smp, const char *kw)
|
2010-06-18 15:52:52 -04:00
|
|
|
{
|
2013-07-23 11:17:10 -04:00
|
|
|
struct stkctr *stkctr = smp_fetch_sc_stkctr(l4, args, kw);
|
|
|
|
|
|
|
|
|
|
if (!stkctr)
|
|
|
|
|
return 0;
|
|
|
|
|
|
2012-04-23 10:16:37 -04:00
|
|
|
smp->flags = SMP_F_VOL_TEST;
|
2012-04-23 12:53:56 -04:00
|
|
|
smp->type = SMP_T_UINT;
|
|
|
|
|
smp->data.uint = 0;
|
2014-01-28 17:18:23 -05:00
|
|
|
if (stkctr_entry(stkctr) != NULL) {
|
|
|
|
|
void *ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_BYTES_IN_CNT);
|
2010-06-18 15:52:52 -04:00
|
|
|
if (!ptr)
|
|
|
|
|
return 0; /* parameter not stored */
|
2012-04-23 12:53:56 -04:00
|
|
|
smp->data.uint = stktable_data_cast(ptr, bytes_in_cnt) >> 10;
|
2010-06-18 15:52:52 -04:00
|
|
|
}
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
2013-07-23 11:39:19 -04:00
|
|
|
/* set <smp> to the data rate received from clients in bytes/s, as found
|
|
|
|
|
* in the session's tracked frontend counters. Supports being called as
|
|
|
|
|
* "sc[0-9]_bytes_in_rate" or "src_bytes_in_rate" only.
|
2010-06-20 05:56:30 -04:00
|
|
|
*/
|
|
|
|
|
static int
|
2013-07-23 11:39:19 -04:00
|
|
|
smp_fetch_sc_bytes_in_rate(struct proxy *px, struct session *l4, void *l7, unsigned int opt,
|
|
|
|
|
const struct arg *args, struct sample *smp, const char *kw)
|
2010-06-20 05:56:30 -04:00
|
|
|
{
|
2013-07-23 11:39:19 -04:00
|
|
|
struct stkctr *stkctr = smp_fetch_sc_stkctr(l4, args, kw);
|
|
|
|
|
|
|
|
|
|
if (!stkctr)
|
|
|
|
|
return 0;
|
|
|
|
|
|
2012-04-23 10:16:37 -04:00
|
|
|
smp->flags = SMP_F_VOL_TEST;
|
2012-04-23 12:53:56 -04:00
|
|
|
smp->type = SMP_T_UINT;
|
|
|
|
|
smp->data.uint = 0;
|
2014-01-28 17:18:23 -05:00
|
|
|
if (stkctr_entry(stkctr) != NULL) {
|
|
|
|
|
void *ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_BYTES_IN_RATE);
|
2010-06-20 05:56:30 -04:00
|
|
|
if (!ptr)
|
|
|
|
|
return 0; /* parameter not stored */
|
2012-04-23 12:53:56 -04:00
|
|
|
smp->data.uint = read_freq_ctr_period(&stktable_data_cast(ptr, bytes_in_rate),
|
2013-07-23 11:39:19 -04:00
|
|
|
stkctr->table->data_arg[STKTABLE_DT_BYTES_IN_RATE].u);
|
2010-06-20 05:56:30 -04:00
|
|
|
}
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
2013-07-23 11:39:02 -04:00
|
|
|
/* set <smp> to the number of kbytes sent to clients, as found in the
|
|
|
|
|
* session's tracked frontend counters. Supports being called as
|
|
|
|
|
* "sc[0-9]_kbytes_out" or "src_kbytes_out" only.
|
|
|
|
|
*/
|
2010-06-18 15:52:52 -04:00
|
|
|
static int
|
2013-07-23 11:39:02 -04:00
|
|
|
smp_fetch_sc_kbytes_out(struct proxy *px, struct session *l4, void *l7, unsigned int opt,
|
|
|
|
|
const struct arg *args, struct sample *smp, const char *kw)
|
2010-06-18 15:52:52 -04:00
|
|
|
{
|
2013-07-23 11:39:02 -04:00
|
|
|
struct stkctr *stkctr = smp_fetch_sc_stkctr(l4, args, kw);
|
|
|
|
|
|
|
|
|
|
if (!stkctr)
|
|
|
|
|
return 0;
|
|
|
|
|
|
2012-04-23 10:16:37 -04:00
|
|
|
smp->flags = SMP_F_VOL_TEST;
|
2012-04-23 12:53:56 -04:00
|
|
|
smp->type = SMP_T_UINT;
|
|
|
|
|
smp->data.uint = 0;
|
2014-01-28 17:18:23 -05:00
|
|
|
if (stkctr_entry(stkctr) != NULL) {
|
|
|
|
|
void *ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_BYTES_OUT_CNT);
|
2010-06-18 12:33:32 -04:00
|
|
|
if (!ptr)
|
|
|
|
|
return 0; /* parameter not stored */
|
2012-04-23 12:53:56 -04:00
|
|
|
smp->data.uint = stktable_data_cast(ptr, bytes_out_cnt) >> 10;
|
2010-06-18 12:33:32 -04:00
|
|
|
}
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
2013-07-23 12:26:32 -04:00
|
|
|
/* set <smp> to the data rate sent to clients in bytes/s, as found in the
|
|
|
|
|
* session's tracked frontend counters. Supports being called as
|
|
|
|
|
* "sc[0-9]_bytes_out_rate" or "src_bytes_out_rate" only.
|
2010-06-20 05:56:30 -04:00
|
|
|
*/
|
|
|
|
|
static int
|
2013-07-23 12:26:32 -04:00
|
|
|
smp_fetch_sc_bytes_out_rate(struct proxy *px, struct session *l4, void *l7, unsigned int opt,
|
|
|
|
|
const struct arg *args, struct sample *smp, const char *kw)
|
2010-06-20 05:56:30 -04:00
|
|
|
{
|
2013-07-23 12:26:32 -04:00
|
|
|
struct stkctr *stkctr = smp_fetch_sc_stkctr(l4, args, kw);
|
|
|
|
|
|
|
|
|
|
if (!stkctr)
|
|
|
|
|
return 0;
|
|
|
|
|
|
2012-04-23 10:16:37 -04:00
|
|
|
smp->flags = SMP_F_VOL_TEST;
|
2012-04-23 12:53:56 -04:00
|
|
|
smp->type = SMP_T_UINT;
|
|
|
|
|
smp->data.uint = 0;
|
2014-01-28 17:18:23 -05:00
|
|
|
if (stkctr_entry(stkctr) != NULL) {
|
|
|
|
|
void *ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_BYTES_OUT_RATE);
|
2010-06-20 05:56:30 -04:00
|
|
|
if (!ptr)
|
|
|
|
|
return 0; /* parameter not stored */
|
2012-04-23 12:53:56 -04:00
|
|
|
smp->data.uint = read_freq_ctr_period(&stktable_data_cast(ptr, bytes_out_rate),
|
2013-07-23 12:26:32 -04:00
|
|
|
stkctr->table->data_arg[STKTABLE_DT_BYTES_OUT_RATE].u);
|
2010-06-20 05:56:30 -04:00
|
|
|
}
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
2013-07-23 12:32:02 -04:00
|
|
|
/* set <smp> to the number of active trackers on the SC entry in the session's
|
|
|
|
|
* tracked frontend counters. Supports being called as "sc[0-9]_trackers" only.
|
|
|
|
|
*/
|
2012-12-09 06:16:43 -05:00
|
|
|
static int
|
2013-07-23 12:32:02 -04:00
|
|
|
smp_fetch_sc_trackers(struct proxy *px, struct session *l4, void *l7, unsigned int opt,
|
2013-07-22 10:29:32 -04:00
|
|
|
const struct arg *args, struct sample *smp, const char *kw)
|
2012-12-09 06:16:43 -05:00
|
|
|
{
|
2013-07-23 12:32:02 -04:00
|
|
|
struct stkctr *stkctr = smp_fetch_sc_stkctr(l4, args, kw);
|
2012-12-09 06:16:43 -05:00
|
|
|
|
2013-07-23 12:32:02 -04:00
|
|
|
if (!stkctr)
|
2013-05-28 12:32:20 -04:00
|
|
|
return 0;
|
|
|
|
|
|
2013-07-23 12:32:02 -04:00
|
|
|
smp->flags = SMP_F_VOL_TEST;
|
|
|
|
|
smp->type = SMP_T_UINT;
|
2014-01-28 17:18:23 -05:00
|
|
|
smp->data.uint = stkctr_entry(stkctr)->ref_cnt;
|
2013-07-23 12:32:02 -04:00
|
|
|
return 1;
|
2013-05-28 12:32:20 -04:00
|
|
|
}
|
|
|
|
|
|
2012-04-19 11:16:54 -04:00
|
|
|
/* set temp integer to the number of used entries in the table pointed to by expr.
|
2012-04-20 05:37:56 -04:00
|
|
|
* Accepts exactly 1 argument of type table.
|
2012-04-19 11:16:54 -04:00
|
|
|
*/
|
2011-03-28 18:57:02 -04:00
|
|
|
static int
|
MINOR: session: rename sample fetch functions and declare the sample keywords
The following sample fetch functions were only usable by ACLs but are now
usable by sample fetches too :
sc1_bytes_in_rate, sc1_bytes_out_rate, sc1_clr_gpc0, sc1_conn_cnt,
sc1_conn_cur, sc1_conn_rate, sc1_get_gpc0, sc1_http_err_cnt,
sc1_http_err_rate, sc1_http_req_cnt, sc1_http_req_rate, sc1_inc_gpc0,
sc1_kbytes_in, sc1_kbytes_out, sc1_sess_cnt, sc1_sess_rate, sc1_trackers,
sc2_bytes_in_rate, sc2_bytes_out_rate, sc2_clr_gpc0, sc2_conn_cnt,
sc2_conn_cur, sc2_conn_rate, sc2_get_gpc0, sc2_http_err_cnt,
sc2_http_err_rate, sc2_http_req_cnt, sc2_http_req_rate, sc2_inc_gpc0,
sc2_kbytes_in, sc2_kbytes_out, sc2_sess_cnt, sc2_sess_rate, sc2_trackers,
src_bytes_in_rate, src_bytes_out_rate, src_clr_gpc0, src_conn_cnt,
src_conn_cur, src_conn_rate, src_get_gpc0, src_http_err_cnt,
src_http_err_rate, src_http_req_cnt, src_http_req_rate, src_inc_gpc0,
src_kbytes_in, src_kbytes_out, src_sess_cnt, src_sess_rate,
src_updt_conn_cnt, table_avl, table_cnt,
The fetch functions have been renamed "smp_fetch_*".
2013-01-07 19:23:27 -05:00
|
|
|
smp_fetch_table_cnt(struct proxy *px, struct session *l4, void *l7, unsigned int opt,
|
2013-07-22 10:29:32 -04:00
|
|
|
const struct arg *args, struct sample *smp, const char *kw)
|
2011-03-28 18:57:02 -04:00
|
|
|
{
|
2012-04-23 10:16:37 -04:00
|
|
|
smp->flags = SMP_F_VOL_TEST;
|
2012-04-23 12:53:56 -04:00
|
|
|
smp->type = SMP_T_UINT;
|
2012-04-23 17:55:44 -04:00
|
|
|
smp->data.uint = args->data.prx->table.current;
|
2011-03-28 18:57:02 -04:00
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
2012-04-19 11:16:54 -04:00
|
|
|
/* set temp integer to the number of free entries in the table pointed to by expr.
|
2012-04-20 05:37:56 -04:00
|
|
|
* Accepts exactly 1 argument of type table.
|
2012-04-19 11:16:54 -04:00
|
|
|
*/
|
2011-03-28 18:57:02 -04:00
|
|
|
static int
|
MINOR: session: rename sample fetch functions and declare the sample keywords
The following sample fetch functions were only usable by ACLs but are now
usable by sample fetches too :
sc1_bytes_in_rate, sc1_bytes_out_rate, sc1_clr_gpc0, sc1_conn_cnt,
sc1_conn_cur, sc1_conn_rate, sc1_get_gpc0, sc1_http_err_cnt,
sc1_http_err_rate, sc1_http_req_cnt, sc1_http_req_rate, sc1_inc_gpc0,
sc1_kbytes_in, sc1_kbytes_out, sc1_sess_cnt, sc1_sess_rate, sc1_trackers,
sc2_bytes_in_rate, sc2_bytes_out_rate, sc2_clr_gpc0, sc2_conn_cnt,
sc2_conn_cur, sc2_conn_rate, sc2_get_gpc0, sc2_http_err_cnt,
sc2_http_err_rate, sc2_http_req_cnt, sc2_http_req_rate, sc2_inc_gpc0,
sc2_kbytes_in, sc2_kbytes_out, sc2_sess_cnt, sc2_sess_rate, sc2_trackers,
src_bytes_in_rate, src_bytes_out_rate, src_clr_gpc0, src_conn_cnt,
src_conn_cur, src_conn_rate, src_get_gpc0, src_http_err_cnt,
src_http_err_rate, src_http_req_cnt, src_http_req_rate, src_inc_gpc0,
src_kbytes_in, src_kbytes_out, src_sess_cnt, src_sess_rate,
src_updt_conn_cnt, table_avl, table_cnt,
The fetch functions have been renamed "smp_fetch_*".
2013-01-07 19:23:27 -05:00
|
|
|
smp_fetch_table_avl(struct proxy *px, struct session *l4, void *l7, unsigned int opt,
|
2013-07-22 10:29:32 -04:00
|
|
|
const struct arg *args, struct sample *smp, const char *kw)
|
2011-03-28 18:57:02 -04:00
|
|
|
{
|
2012-04-23 17:55:44 -04:00
|
|
|
px = args->data.prx;
|
2012-04-23 10:16:37 -04:00
|
|
|
smp->flags = SMP_F_VOL_TEST;
|
2012-04-23 12:53:56 -04:00
|
|
|
smp->type = SMP_T_UINT;
|
|
|
|
|
smp->data.uint = px->table.size - px->table.current;
|
2011-03-28 18:57:02 -04:00
|
|
|
return 1;
|
|
|
|
|
}
|
2010-06-18 11:46:06 -04:00
|
|
|
|
2012-04-19 12:42:05 -04:00
|
|
|
/* Note: must not be declared <const> as its list will be overwritten.
|
|
|
|
|
* Please take care of keeping this list alphabetically sorted.
|
|
|
|
|
*/
|
2013-06-21 17:16:39 -04:00
|
|
|
static struct acl_kw_list acl_kws = {ILH, {
|
MINOR: session: rename sample fetch functions and declare the sample keywords
The following sample fetch functions were only usable by ACLs but are now
usable by sample fetches too :
sc1_bytes_in_rate, sc1_bytes_out_rate, sc1_clr_gpc0, sc1_conn_cnt,
sc1_conn_cur, sc1_conn_rate, sc1_get_gpc0, sc1_http_err_cnt,
sc1_http_err_rate, sc1_http_req_cnt, sc1_http_req_rate, sc1_inc_gpc0,
sc1_kbytes_in, sc1_kbytes_out, sc1_sess_cnt, sc1_sess_rate, sc1_trackers,
sc2_bytes_in_rate, sc2_bytes_out_rate, sc2_clr_gpc0, sc2_conn_cnt,
sc2_conn_cur, sc2_conn_rate, sc2_get_gpc0, sc2_http_err_cnt,
sc2_http_err_rate, sc2_http_req_cnt, sc2_http_req_rate, sc2_inc_gpc0,
sc2_kbytes_in, sc2_kbytes_out, sc2_sess_cnt, sc2_sess_rate, sc2_trackers,
src_bytes_in_rate, src_bytes_out_rate, src_clr_gpc0, src_conn_cnt,
src_conn_cur, src_conn_rate, src_get_gpc0, src_http_err_cnt,
src_http_err_rate, src_http_req_cnt, src_http_req_rate, src_inc_gpc0,
src_kbytes_in, src_kbytes_out, src_sess_cnt, src_sess_rate,
src_updt_conn_cnt, table_avl, table_cnt,
The fetch functions have been renamed "smp_fetch_*".
2013-01-07 19:23:27 -05:00
|
|
|
{ /* END */ },
|
|
|
|
|
}};
|
|
|
|
|
|
|
|
|
|
/* Note: must not be declared <const> as its list will be overwritten.
|
|
|
|
|
* Please take care of keeping this list alphabetically sorted.
|
|
|
|
|
*/
|
2013-06-21 17:16:39 -04:00
|
|
|
static struct sample_fetch_kw_list smp_fetch_keywords = {ILH, {
|
2013-07-23 13:56:43 -04:00
|
|
|
{ "sc_bytes_in_rate", smp_fetch_sc_bytes_in_rate, ARG2(1,UINT,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc_bytes_out_rate", smp_fetch_sc_bytes_out_rate, ARG2(1,UINT,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc_clr_gpc0", smp_fetch_sc_clr_gpc0, ARG2(1,UINT,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc_conn_cnt", smp_fetch_sc_conn_cnt, ARG2(1,UINT,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc_conn_cur", smp_fetch_sc_conn_cur, ARG2(1,UINT,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc_conn_rate", smp_fetch_sc_conn_rate, ARG2(1,UINT,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc_get_gpc0", smp_fetch_sc_get_gpc0, ARG2(1,UINT,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc_gpc0_rate", smp_fetch_sc_gpc0_rate, ARG2(1,UINT,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc_http_err_cnt", smp_fetch_sc_http_err_cnt, ARG2(1,UINT,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc_http_err_rate", smp_fetch_sc_http_err_rate, ARG2(1,UINT,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc_http_req_cnt", smp_fetch_sc_http_req_cnt, ARG2(1,UINT,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc_http_req_rate", smp_fetch_sc_http_req_rate, ARG2(1,UINT,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc_inc_gpc0", smp_fetch_sc_inc_gpc0, ARG2(1,UINT,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc_kbytes_in", smp_fetch_sc_kbytes_in, ARG2(1,UINT,TAB), NULL, SMP_T_UINT, SMP_USE_L4CLI, },
|
|
|
|
|
{ "sc_kbytes_out", smp_fetch_sc_kbytes_out, ARG2(1,UINT,TAB), NULL, SMP_T_UINT, SMP_USE_L4CLI, },
|
|
|
|
|
{ "sc_sess_cnt", smp_fetch_sc_sess_cnt, ARG2(1,UINT,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc_sess_rate", smp_fetch_sc_sess_rate, ARG2(1,UINT,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc_tracked", smp_fetch_sc_tracked, ARG2(1,UINT,TAB), NULL, SMP_T_BOOL, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc_trackers", smp_fetch_sc_trackers, ARG2(1,UINT,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc0_bytes_in_rate", smp_fetch_sc_bytes_in_rate, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc0_bytes_out_rate", smp_fetch_sc_bytes_out_rate, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc0_clr_gpc0", smp_fetch_sc_clr_gpc0, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc0_conn_cnt", smp_fetch_sc_conn_cnt, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc0_conn_cur", smp_fetch_sc_conn_cur, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc0_conn_rate", smp_fetch_sc_conn_rate, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc0_get_gpc0", smp_fetch_sc_get_gpc0, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc0_gpc0_rate", smp_fetch_sc_gpc0_rate, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc0_http_err_cnt", smp_fetch_sc_http_err_cnt, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc0_http_err_rate", smp_fetch_sc_http_err_rate, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc0_http_req_cnt", smp_fetch_sc_http_req_cnt, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc0_http_req_rate", smp_fetch_sc_http_req_rate, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc0_inc_gpc0", smp_fetch_sc_inc_gpc0, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc0_kbytes_in", smp_fetch_sc_kbytes_in, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_L4CLI, },
|
|
|
|
|
{ "sc0_kbytes_out", smp_fetch_sc_kbytes_out, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_L4CLI, },
|
|
|
|
|
{ "sc0_sess_cnt", smp_fetch_sc_sess_cnt, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc0_sess_rate", smp_fetch_sc_sess_rate, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc0_tracked", smp_fetch_sc_tracked, ARG1(0,TAB), NULL, SMP_T_BOOL, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc0_trackers", smp_fetch_sc_trackers, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc1_bytes_in_rate", smp_fetch_sc_bytes_in_rate, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc1_bytes_out_rate", smp_fetch_sc_bytes_out_rate, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc1_clr_gpc0", smp_fetch_sc_clr_gpc0, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc1_conn_cnt", smp_fetch_sc_conn_cnt, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc1_conn_cur", smp_fetch_sc_conn_cur, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc1_conn_rate", smp_fetch_sc_conn_rate, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc1_get_gpc0", smp_fetch_sc_get_gpc0, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc1_gpc0_rate", smp_fetch_sc_gpc0_rate, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc1_http_err_cnt", smp_fetch_sc_http_err_cnt, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc1_http_err_rate", smp_fetch_sc_http_err_rate, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc1_http_req_cnt", smp_fetch_sc_http_req_cnt, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc1_http_req_rate", smp_fetch_sc_http_req_rate, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc1_inc_gpc0", smp_fetch_sc_inc_gpc0, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc1_kbytes_in", smp_fetch_sc_kbytes_in, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_L4CLI, },
|
|
|
|
|
{ "sc1_kbytes_out", smp_fetch_sc_kbytes_out, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_L4CLI, },
|
|
|
|
|
{ "sc1_sess_cnt", smp_fetch_sc_sess_cnt, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc1_sess_rate", smp_fetch_sc_sess_rate, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc1_tracked", smp_fetch_sc_tracked, ARG1(0,TAB), NULL, SMP_T_BOOL, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc1_trackers", smp_fetch_sc_trackers, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc2_bytes_in_rate", smp_fetch_sc_bytes_in_rate, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc2_bytes_out_rate", smp_fetch_sc_bytes_out_rate, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc2_clr_gpc0", smp_fetch_sc_clr_gpc0, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc2_conn_cnt", smp_fetch_sc_conn_cnt, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc2_conn_cur", smp_fetch_sc_conn_cur, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc2_conn_rate", smp_fetch_sc_conn_rate, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc2_get_gpc0", smp_fetch_sc_get_gpc0, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc2_gpc0_rate", smp_fetch_sc_gpc0_rate, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc2_http_err_cnt", smp_fetch_sc_http_err_cnt, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc2_http_err_rate", smp_fetch_sc_http_err_rate, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc2_http_req_cnt", smp_fetch_sc_http_req_cnt, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc2_http_req_rate", smp_fetch_sc_http_req_rate, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc2_inc_gpc0", smp_fetch_sc_inc_gpc0, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc2_kbytes_in", smp_fetch_sc_kbytes_in, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_L4CLI, },
|
|
|
|
|
{ "sc2_kbytes_out", smp_fetch_sc_kbytes_out, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_L4CLI, },
|
|
|
|
|
{ "sc2_sess_cnt", smp_fetch_sc_sess_cnt, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc2_sess_rate", smp_fetch_sc_sess_rate, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc2_tracked", smp_fetch_sc_tracked, ARG1(0,TAB), NULL, SMP_T_BOOL, SMP_USE_INTRN, },
|
|
|
|
|
{ "sc2_trackers", smp_fetch_sc_trackers, ARG1(0,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "src_bytes_in_rate", smp_fetch_sc_bytes_in_rate, ARG1(1,TAB), NULL, SMP_T_UINT, SMP_USE_L4CLI, },
|
|
|
|
|
{ "src_bytes_out_rate", smp_fetch_sc_bytes_out_rate, ARG1(1,TAB), NULL, SMP_T_UINT, SMP_USE_L4CLI, },
|
|
|
|
|
{ "src_clr_gpc0", smp_fetch_sc_clr_gpc0, ARG1(1,TAB), NULL, SMP_T_UINT, SMP_USE_L4CLI, },
|
|
|
|
|
{ "src_conn_cnt", smp_fetch_sc_conn_cnt, ARG1(1,TAB), NULL, SMP_T_UINT, SMP_USE_L4CLI, },
|
|
|
|
|
{ "src_conn_cur", smp_fetch_sc_conn_cur, ARG1(1,TAB), NULL, SMP_T_UINT, SMP_USE_L4CLI, },
|
|
|
|
|
{ "src_conn_rate", smp_fetch_sc_conn_rate, ARG1(1,TAB), NULL, SMP_T_UINT, SMP_USE_L4CLI, },
|
|
|
|
|
{ "src_get_gpc0", smp_fetch_sc_get_gpc0, ARG1(1,TAB), NULL, SMP_T_UINT, SMP_USE_L4CLI, },
|
|
|
|
|
{ "src_gpc0_rate", smp_fetch_sc_gpc0_rate, ARG1(1,TAB), NULL, SMP_T_UINT, SMP_USE_L4CLI, },
|
|
|
|
|
{ "src_http_err_cnt", smp_fetch_sc_http_err_cnt, ARG1(1,TAB), NULL, SMP_T_UINT, SMP_USE_L4CLI, },
|
|
|
|
|
{ "src_http_err_rate", smp_fetch_sc_http_err_rate, ARG1(1,TAB), NULL, SMP_T_UINT, SMP_USE_L4CLI, },
|
|
|
|
|
{ "src_http_req_cnt", smp_fetch_sc_http_req_cnt, ARG1(1,TAB), NULL, SMP_T_UINT, SMP_USE_L4CLI, },
|
|
|
|
|
{ "src_http_req_rate", smp_fetch_sc_http_req_rate, ARG1(1,TAB), NULL, SMP_T_UINT, SMP_USE_L4CLI, },
|
|
|
|
|
{ "src_inc_gpc0", smp_fetch_sc_inc_gpc0, ARG1(1,TAB), NULL, SMP_T_UINT, SMP_USE_L4CLI, },
|
|
|
|
|
{ "src_kbytes_in", smp_fetch_sc_kbytes_in, ARG1(1,TAB), NULL, SMP_T_UINT, SMP_USE_L4CLI, },
|
|
|
|
|
{ "src_kbytes_out", smp_fetch_sc_kbytes_out, ARG1(1,TAB), NULL, SMP_T_UINT, SMP_USE_L4CLI, },
|
|
|
|
|
{ "src_sess_cnt", smp_fetch_sc_sess_cnt, ARG1(1,TAB), NULL, SMP_T_UINT, SMP_USE_L4CLI, },
|
|
|
|
|
{ "src_sess_rate", smp_fetch_sc_sess_rate, ARG1(1,TAB), NULL, SMP_T_UINT, SMP_USE_L4CLI, },
|
|
|
|
|
{ "src_updt_conn_cnt", smp_fetch_src_updt_conn_cnt, ARG1(1,TAB), NULL, SMP_T_UINT, SMP_USE_L4CLI, },
|
|
|
|
|
{ "table_avl", smp_fetch_table_avl, ARG1(1,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
|
|
|
|
{ "table_cnt", smp_fetch_table_cnt, ARG1(1,TAB), NULL, SMP_T_UINT, SMP_USE_INTRN, },
|
MINOR: session: rename sample fetch functions and declare the sample keywords
The following sample fetch functions were only usable by ACLs but are now
usable by sample fetches too :
sc1_bytes_in_rate, sc1_bytes_out_rate, sc1_clr_gpc0, sc1_conn_cnt,
sc1_conn_cur, sc1_conn_rate, sc1_get_gpc0, sc1_http_err_cnt,
sc1_http_err_rate, sc1_http_req_cnt, sc1_http_req_rate, sc1_inc_gpc0,
sc1_kbytes_in, sc1_kbytes_out, sc1_sess_cnt, sc1_sess_rate, sc1_trackers,
sc2_bytes_in_rate, sc2_bytes_out_rate, sc2_clr_gpc0, sc2_conn_cnt,
sc2_conn_cur, sc2_conn_rate, sc2_get_gpc0, sc2_http_err_cnt,
sc2_http_err_rate, sc2_http_req_cnt, sc2_http_req_rate, sc2_inc_gpc0,
sc2_kbytes_in, sc2_kbytes_out, sc2_sess_cnt, sc2_sess_rate, sc2_trackers,
src_bytes_in_rate, src_bytes_out_rate, src_clr_gpc0, src_conn_cnt,
src_conn_cur, src_conn_rate, src_get_gpc0, src_http_err_cnt,
src_http_err_rate, src_http_req_cnt, src_http_req_rate, src_inc_gpc0,
src_kbytes_in, src_kbytes_out, src_sess_cnt, src_sess_rate,
src_updt_conn_cnt, table_avl, table_cnt,
The fetch functions have been renamed "smp_fetch_*".
2013-01-07 19:23:27 -05:00
|
|
|
{ /* END */ },
|
2010-06-18 11:46:06 -04:00
|
|
|
}};
|
|
|
|
|
|
|
|
|
|
__attribute__((constructor))
|
|
|
|
|
static void __session_init(void)
|
|
|
|
|
{
|
MINOR: session: rename sample fetch functions and declare the sample keywords
The following sample fetch functions were only usable by ACLs but are now
usable by sample fetches too :
sc1_bytes_in_rate, sc1_bytes_out_rate, sc1_clr_gpc0, sc1_conn_cnt,
sc1_conn_cur, sc1_conn_rate, sc1_get_gpc0, sc1_http_err_cnt,
sc1_http_err_rate, sc1_http_req_cnt, sc1_http_req_rate, sc1_inc_gpc0,
sc1_kbytes_in, sc1_kbytes_out, sc1_sess_cnt, sc1_sess_rate, sc1_trackers,
sc2_bytes_in_rate, sc2_bytes_out_rate, sc2_clr_gpc0, sc2_conn_cnt,
sc2_conn_cur, sc2_conn_rate, sc2_get_gpc0, sc2_http_err_cnt,
sc2_http_err_rate, sc2_http_req_cnt, sc2_http_req_rate, sc2_inc_gpc0,
sc2_kbytes_in, sc2_kbytes_out, sc2_sess_cnt, sc2_sess_rate, sc2_trackers,
src_bytes_in_rate, src_bytes_out_rate, src_clr_gpc0, src_conn_cnt,
src_conn_cur, src_conn_rate, src_get_gpc0, src_http_err_cnt,
src_http_err_rate, src_http_req_cnt, src_http_req_rate, src_inc_gpc0,
src_kbytes_in, src_kbytes_out, src_sess_cnt, src_sess_rate,
src_updt_conn_cnt, table_avl, table_cnt,
The fetch functions have been renamed "smp_fetch_*".
2013-01-07 19:23:27 -05:00
|
|
|
sample_register_fetches(&smp_fetch_keywords);
|
2010-06-18 11:46:06 -04:00
|
|
|
acl_register_keywords(&acl_kws);
|
|
|
|
|
}
|
|
|
|
|
|
2006-06-25 20:48:02 -04:00
|
|
|
/*
|
|
|
|
|
* Local variables:
|
|
|
|
|
* c-indent-level: 8
|
|
|
|
|
* c-basic-offset: 8
|
|
|
|
|
* End:
|
|
|
|
|
*/
|