2006-06-25 20:48:02 -04:00
|
|
|
/*
|
2010-02-26 05:12:27 -05:00
|
|
|
* include/types/global.h
|
|
|
|
|
* Global variables.
|
|
|
|
|
*
|
2012-11-11 11:42:00 -05:00
|
|
|
* Copyright (C) 2000-2012 Willy Tarreau - w@1wt.eu
|
2010-02-26 05:12:27 -05:00
|
|
|
*
|
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
|
* License as published by the Free Software Foundation, version 2.1
|
|
|
|
|
* exclusively.
|
|
|
|
|
*
|
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
|
*
|
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
|
* License along with this library; if not, write to the Free Software
|
|
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
|
|
|
*/
|
2006-06-25 20:48:02 -04:00
|
|
|
|
|
|
|
|
#ifndef _TYPES_GLOBAL_H
|
|
|
|
|
#define _TYPES_GLOBAL_H
|
|
|
|
|
|
|
|
|
|
#include <netinet/in.h>
|
|
|
|
|
|
2006-06-29 12:54:54 -04:00
|
|
|
#include <common/config.h>
|
2014-07-15 12:05:58 -04:00
|
|
|
#include <common/standard.h>
|
2017-04-21 10:47:03 -04:00
|
|
|
#include <common/hathreads.h>
|
|
|
|
|
|
2012-08-24 13:22:53 -04:00
|
|
|
#include <types/freq_ctr.h>
|
2012-09-12 16:58:11 -04:00
|
|
|
#include <types/listener.h>
|
2009-08-16 11:41:45 -04:00
|
|
|
#include <types/proxy.h>
|
2006-06-25 20:48:02 -04:00
|
|
|
#include <types/task.h>
|
2016-11-09 05:36:17 -05:00
|
|
|
#include <types/vars.h>
|
2006-06-25 20:48:02 -04:00
|
|
|
|
2012-10-02 12:42:10 -04:00
|
|
|
#ifndef UNIX_MAX_PATH
|
|
|
|
|
#define UNIX_MAX_PATH 108
|
|
|
|
|
#endif
|
|
|
|
|
|
2006-06-25 20:48:02 -04:00
|
|
|
/* modes of operation (global.mode) */
|
2007-10-18 07:53:22 -04:00
|
|
|
#define MODE_DEBUG 0x01
|
|
|
|
|
#define MODE_DAEMON 0x02
|
|
|
|
|
#define MODE_QUIET 0x04
|
|
|
|
|
#define MODE_CHECK 0x08
|
|
|
|
|
#define MODE_VERBOSE 0x10
|
|
|
|
|
#define MODE_STARTING 0x20
|
|
|
|
|
#define MODE_FOREGROUND 0x40
|
2017-06-01 11:38:50 -04:00
|
|
|
#define MODE_MWORKER 0x80 /* Master Worker */
|
2006-06-25 20:48:02 -04:00
|
|
|
|
2006-11-12 17:57:19 -05:00
|
|
|
/* list of last checks to perform, depending on config options */
|
|
|
|
|
#define LSTCHK_CAP_BIND 0x00000001 /* check that we can bind to any port */
|
2015-08-20 13:35:14 -04:00
|
|
|
#define LSTCHK_NETADM 0x00000002 /* check that we have CAP_NET_ADMIN */
|
2006-06-25 20:48:02 -04:00
|
|
|
|
2009-01-25 09:42:27 -05:00
|
|
|
/* Global tuning options */
|
|
|
|
|
/* available polling mechanisms */
|
|
|
|
|
#define GTUNE_USE_SELECT (1<<0)
|
|
|
|
|
#define GTUNE_USE_POLL (1<<1)
|
|
|
|
|
#define GTUNE_USE_EPOLL (1<<2)
|
|
|
|
|
#define GTUNE_USE_KQUEUE (1<<3)
|
2009-01-25 10:03:28 -05:00
|
|
|
/* platform-specific options */
|
2012-11-11 11:42:00 -05:00
|
|
|
#define GTUNE_USE_SPLICE (1<<4)
|
2014-04-14 09:56:58 -04:00
|
|
|
#define GTUNE_USE_GAI (1<<5)
|
2016-09-12 17:42:20 -04:00
|
|
|
#define GTUNE_USE_REUSEPORT (1<<6)
|
2016-11-07 15:03:16 -05:00
|
|
|
#define GTUNE_RESOLVE_DONTFAIL (1<<7)
|
2009-01-25 09:42:27 -05:00
|
|
|
|
2017-04-05 19:05:05 -04:00
|
|
|
#define GTUNE_SOCKET_TRANSFER (1<<8)
|
2017-11-24 16:02:34 -05:00
|
|
|
#define GTUNE_NOEXIT_ONFAILURE (1<<9)
|
2017-11-20 09:58:35 -05:00
|
|
|
#define GTUNE_USE_SYSTEMD (1<<10)
|
2017-04-05 19:05:05 -04:00
|
|
|
|
2009-10-10 11:13:00 -04:00
|
|
|
/* Access level for a stats socket */
|
|
|
|
|
#define ACCESS_LVL_NONE 0
|
|
|
|
|
#define ACCESS_LVL_USER 1
|
|
|
|
|
#define ACCESS_LVL_OPER 2
|
|
|
|
|
#define ACCESS_LVL_ADMIN 3
|
2017-05-23 18:57:40 -04:00
|
|
|
#define ACCESS_LVL_MASK 0x3
|
|
|
|
|
|
2017-05-26 11:42:10 -04:00
|
|
|
#define ACCESS_FD_LISTENERS 0x4 /* expose listeners FDs on stats socket */
|
2009-01-25 09:42:27 -05:00
|
|
|
|
2014-01-29 06:24:34 -05:00
|
|
|
/* SSL server verify mode */
|
|
|
|
|
enum {
|
|
|
|
|
SSL_SERVER_VERIFY_NONE = 0,
|
|
|
|
|
SSL_SERVER_VERIFY_REQUIRED = 1,
|
|
|
|
|
};
|
|
|
|
|
|
2006-06-25 20:48:02 -04:00
|
|
|
/* FIXME : this will have to be redefined correctly */
|
|
|
|
|
struct global {
|
2006-11-12 17:57:19 -05:00
|
|
|
int uid;
|
|
|
|
|
int gid;
|
2014-06-19 23:30:16 -04:00
|
|
|
int external_check;
|
2006-11-12 17:57:19 -05:00
|
|
|
int nbproc;
|
2017-08-29 09:37:10 -04:00
|
|
|
int nbthread;
|
2017-03-23 17:44:13 -04:00
|
|
|
unsigned int hard_stop_after; /* maximum time allowed to perform a soft-stop */
|
2011-09-07 08:38:31 -04:00
|
|
|
int maxconn, hardmaxconn;
|
2012-09-06 05:58:37 -04:00
|
|
|
int maxsslconn;
|
2015-01-15 15:34:39 -05:00
|
|
|
int ssl_session_max_cost; /* how many bytes an SSL session may cost */
|
|
|
|
|
int ssl_handshake_max_cost; /* how many bytes an SSL handshake may use */
|
2015-01-15 15:32:40 -05:00
|
|
|
int ssl_used_frontend; /* non-zero if SSL is used in a frontend */
|
|
|
|
|
int ssl_used_backend; /* non-zero if SSL is used in a backend */
|
2017-12-06 07:51:49 -05:00
|
|
|
int ssl_used_async_engines; /* number of used async engines */
|
2014-01-29 06:24:34 -05:00
|
|
|
unsigned int ssl_server_verify; /* default verify mode on servers side */
|
2011-09-07 09:17:21 -04:00
|
|
|
struct freq_ctr conn_per_sec;
|
2013-10-07 12:51:07 -04:00
|
|
|
struct freq_ctr sess_per_sec;
|
2013-10-07 14:01:52 -04:00
|
|
|
struct freq_ctr ssl_per_sec;
|
2014-05-28 06:28:58 -04:00
|
|
|
struct freq_ctr ssl_fe_keys_per_sec;
|
|
|
|
|
struct freq_ctr ssl_be_keys_per_sec;
|
2012-11-09 11:05:39 -05:00
|
|
|
struct freq_ctr comp_bps_in; /* bytes per second, before http compression */
|
|
|
|
|
struct freq_ctr comp_bps_out; /* bytes per second, after http compression */
|
2011-09-07 09:17:21 -04:00
|
|
|
int cps_lim, cps_max;
|
2013-10-07 12:51:07 -04:00
|
|
|
int sps_lim, sps_max;
|
2013-10-07 14:01:52 -04:00
|
|
|
int ssl_lim, ssl_max;
|
2014-05-28 06:28:58 -04:00
|
|
|
int ssl_fe_keys_max, ssl_be_keys_max;
|
2014-05-28 10:47:01 -04:00
|
|
|
unsigned int shctx_lookups, shctx_misses;
|
2012-11-09 11:05:39 -05:00
|
|
|
int comp_rate_lim; /* HTTP compression rate limit */
|
2009-01-18 14:39:42 -05:00
|
|
|
int maxpipes; /* max # of pipes */
|
2006-11-12 17:57:19 -05:00
|
|
|
int maxsock; /* max # of sockets */
|
|
|
|
|
int rlimit_nofile; /* default ulimit-n value : 0=unset */
|
2015-12-14 06:46:07 -05:00
|
|
|
int rlimit_memmax_all; /* default all-process memory limit in megs ; 0=unset */
|
|
|
|
|
int rlimit_memmax; /* default per-process memory limit in megs ; 0=unset */
|
2012-11-20 05:25:20 -05:00
|
|
|
long maxzlibmem; /* max RAM for zlib in bytes */
|
2006-11-12 17:57:19 -05:00
|
|
|
int mode;
|
2014-01-25 05:01:50 -05:00
|
|
|
unsigned int req_count; /* request counter (HTTP or TCP session) for logs and unique_id */
|
2006-11-12 17:57:19 -05:00
|
|
|
int last_checks;
|
2007-10-14 17:40:01 -04:00
|
|
|
int spread_checks;
|
2014-04-25 04:46:47 -04:00
|
|
|
int max_spread_checks;
|
2014-06-27 12:10:07 -04:00
|
|
|
int max_syslog_len;
|
2006-11-12 17:57:19 -05:00
|
|
|
char *chroot;
|
|
|
|
|
char *pidfile;
|
2009-10-02 16:51:14 -04:00
|
|
|
char *node, *desc; /* node name & description */
|
2018-07-13 05:56:34 -04:00
|
|
|
struct buffer log_tag; /* name for syslog */
|
2011-10-12 11:50:54 -04:00
|
|
|
struct list logsrvs;
|
2010-12-29 11:05:48 -05:00
|
|
|
char *log_send_hostname; /* set hostname in syslog header */
|
2015-08-23 03:22:25 -04:00
|
|
|
char *server_state_base; /* path to a directory where server state files can be found */
|
2015-08-23 03:54:31 -04:00
|
|
|
char *server_state_file; /* path to the file where server states are loaded from */
|
2007-06-03 11:16:49 -04:00
|
|
|
struct {
|
|
|
|
|
int maxpollevents; /* max number of poll events at once */
|
2008-01-06 05:22:57 -05:00
|
|
|
int maxaccept; /* max number of consecutive accept() */
|
2009-01-25 09:42:27 -05:00
|
|
|
int options; /* various tuning options */
|
2018-05-24 12:59:04 -04:00
|
|
|
int runqueue_depth;/* max number of tasks to run at once */
|
2009-03-21 15:43:57 -04:00
|
|
|
int recv_enough; /* how many input bytes at once are "enough" */
|
2009-08-17 01:23:33 -04:00
|
|
|
int bufsize; /* buffer size in bytes, defaults to BUFSIZE */
|
|
|
|
|
int maxrewrite; /* buffer max rewrite size in bytes, defaults to MAXREWRITE */
|
MAJOR: session: only wake up as many sessions as available buffers permit
We've already experimented with three wake up algorithms when releasing
buffers : the first naive one used to wake up far too many sessions,
causing many of them not to get any buffer. The second approach which
was still in use prior to this patch consisted in waking up either 1
or 2 sessions depending on the number of FDs we had released. And this
was still inaccurate. The third one tried to cover the accuracy issues
of the second and took into consideration the number of FDs the sessions
would be willing to use, but most of the time we ended up waking up too
many of them for nothing, or deadlocking by lack of buffers.
This patch completely removes the need to allocate two buffers at once.
Instead it splits allocations into critical and non-critical ones and
implements a reserve in the pool for this. The deadlock situation happens
when all buffers are be allocated for requests pending in a maxconn-limited
server queue, because then there's no more way to allocate buffers for
responses, and these responses are critical to release the servers's
connection in order to release the pending requests. In fact maxconn on
a server creates a dependence between sessions and particularly between
oldest session's responses and latest session's requests. Thus, it is
mandatory to get a free buffer for a response in order to release a
server connection which will permit to release a request buffer.
Since we definitely have non-symmetrical buffers, we need to implement
this logic in the buffer allocation mechanism. What this commit does is
implement a reserve of buffers which can only be allocated for responses
and that will never be allocated for requests. This is made possible by
the requester indicating how much margin it wants to leave after the
allocation succeeds. Thus it is a cooperative allocation mechanism : the
requester (process_session() in general) prefers not to get a buffer in
order to respect other's need for response buffers. The session management
code always knows if a buffer will be used for requests or responses, so
that is not difficult :
- either there's an applet on the initiator side and we really need
the request buffer (since currently the applet is called in the
context of the session)
- or we have a connection and we really need the response buffer (in
order to support building and sending an error message back)
This reserve ensures that we don't take all allocatable buffers for
requests waiting in a queue. The downside is that all the extra buffers
are really allocated to ensure they can be allocated. But with small
values it is not an issue.
With this change, we don't observe any more deadlocks even when running
with maxconn 1 on a server under severely constrained memory conditions.
The code becomes a bit tricky, it relies on the scheduler's run queue to
estimate how many sessions are already expected to run so that it doesn't
wake up everyone with too few resources. A better solution would probably
consist in having two queues, one for urgent requests and one for normal
requests. A failed allocation for a session dealing with an error, a
connection event, or the need for a response (or request when there's an
applet on the left) would go to the urgent request queue, while other
requests would go to the other queue. Urgent requests would be served
from 1 entry in the pool, while the regular ones would be served only
according to the reserve. Despite not yet having this, it works
remarkably well.
This mechanism is quite efficient, we don't perform too many wake up calls
anymore. For 1 million sessions elapsed during massive memory contention,
we observe about 4.5M calls to process_session() compared to 4.0M without
memory constraints. Previously we used to observe up to 16M calls, which
rougly means 12M failures.
During a test run under high memory constraints (limit enforced to 27 MB
instead of the 58 MB normally needed), performance used to drop by 53% prior
to this patch. Now with this patch instead it *increases* by about 1.5%.
The best effect of this change is that by limiting the memory usage to about
2/3 to 3/4 of what is needed by default, it's possible to increase performance
by up to about 18% mainly due to the fact that pools are reused more often
and remain hot in the CPU cache (observed on regular HTTP traffic with 20k
objects, buffers.limit = maxconn/10, buffers.reserve = limit/2).
Below is an example of scenario which used to cause a deadlock previously :
- connection is received
- two buffers are allocated in process_session() then released
- one is allocated when receiving an HTTP request
- the second buffer is allocated then released in process_session()
for request parsing then connection establishment.
- poll() says we can send, so the request buffer is sent and released
- process session gets notified that the connection is now established
and allocates two buffers then releases them
- all other sessions do the same till one cannot get the request buffer
without hitting the margin
- and now the server responds. stream_interface allocates the response
buffer and manages to get it since it's higher priority being for a
response.
- but process_session() cannot allocate the request buffer anymore
=> We could end up with all buffers used by responses so that none may
be allocated for a request in process_session().
When the applet processing leaves the session context, the test will have
to be changed so that we always allocate a response buffer regardless of
the left side (eg: H2->H1 gateway). A final improvement would consists in
being able to only retry the failed I/O operation without waking up a
task, but to date all experiments to achieve this have proven not to be
reliable enough.
2014-11-26 19:11:56 -05:00
|
|
|
int reserved_bufs; /* how many buffers can only be allocated for response */
|
2014-12-23 16:52:37 -05:00
|
|
|
int buf_limit; /* if not null, how many total buffers may only be allocated */
|
2010-01-21 11:43:04 -05:00
|
|
|
int client_sndbuf; /* set client sndbuf to this value if not null */
|
|
|
|
|
int client_rcvbuf; /* set client rcvbuf to this value if not null */
|
|
|
|
|
int server_sndbuf; /* set server sndbuf to this value if not null */
|
|
|
|
|
int server_rcvbuf; /* set server rcvbuf to this value if not null */
|
2010-10-04 14:39:20 -04:00
|
|
|
int chksize; /* check buffer size in bytes, defaults to BUFSIZE */
|
2011-10-23 15:14:29 -04:00
|
|
|
int pipesize; /* pipe size in bytes, system defaults if zero */
|
2011-10-24 13:14:41 -04:00
|
|
|
int max_http_hdr; /* max number of HTTP headers, use MAX_HTTP_HDR if zero */
|
2017-05-18 02:58:41 -04:00
|
|
|
int requri_len; /* max len of request URI, use REQURI_LEN if zero */
|
2012-11-21 18:17:38 -05:00
|
|
|
int cookie_len; /* max length of cookie captures */
|
2015-04-29 10:24:50 -04:00
|
|
|
int pattern_cache; /* max number of entries in the pattern cache. */
|
2012-09-03 06:10:29 -04:00
|
|
|
int sslcachesize; /* SSL cache size in session, defaults to 20000 */
|
2012-11-09 06:33:10 -05:00
|
|
|
int comp_maxlevel; /* max HTTP compression level */
|
2014-02-12 10:35:14 -05:00
|
|
|
unsigned short idle_timer; /* how long before an empty buffer is considered idle (ms) */
|
2007-06-03 11:16:49 -04:00
|
|
|
} tune;
|
2010-10-22 11:59:25 -04:00
|
|
|
struct {
|
|
|
|
|
char *prefix; /* path prefix of unix bind socket */
|
|
|
|
|
struct { /* UNIX socket permissions */
|
|
|
|
|
uid_t uid; /* -1 to leave unchanged */
|
|
|
|
|
gid_t gid; /* -1 to leave unchanged */
|
|
|
|
|
mode_t mode; /* 0 to leave unchanged */
|
|
|
|
|
} ux;
|
|
|
|
|
} unix_bind;
|
2018-01-20 12:12:15 -05:00
|
|
|
struct proxy *stats_fe; /* the frontend holding the stats settings */
|
|
|
|
|
struct vars vars; /* list of variables for the process scope. */
|
2012-11-16 10:12:27 -05:00
|
|
|
#ifdef USE_CPU_AFFINITY
|
2017-11-22 10:50:41 -05:00
|
|
|
struct {
|
|
|
|
|
unsigned long proc[LONGBITS]; /* list of CPU masks for the 32/64 first processes */
|
2018-01-20 12:19:22 -05:00
|
|
|
unsigned long thread[LONGBITS][MAX_THREADS]; /* list of CPU masks for the 32/64 first threads per process */
|
2017-11-22 10:50:41 -05:00
|
|
|
} cpu_map;
|
2012-11-16 10:12:27 -05:00
|
|
|
#endif
|
2006-06-25 20:48:02 -04:00
|
|
|
};
|
|
|
|
|
|
2018-01-20 13:30:13 -05:00
|
|
|
/* per-thread activity reports. It's important that it's aligned on cache lines
|
|
|
|
|
* because some elements will be updated very often. Most counters are OK on
|
|
|
|
|
* 32-bit since this will be used during debugging sessions for troubleshooting
|
|
|
|
|
* in iterative mode.
|
|
|
|
|
*/
|
|
|
|
|
struct activity {
|
|
|
|
|
unsigned int loops; // complete loops in run_poll_loop()
|
|
|
|
|
unsigned int wake_cache; // active fd_cache prevented poll() from sleeping
|
|
|
|
|
unsigned int wake_tasks; // active tasks prevented poll() from sleeping
|
|
|
|
|
unsigned int wake_signal; // pending signal prevented poll() from sleeping
|
|
|
|
|
unsigned int poll_exp; // number of times poll() sees an expired timeout (includes wake_*)
|
|
|
|
|
unsigned int poll_drop; // poller dropped a dead FD from the update list
|
|
|
|
|
unsigned int poll_dead; // poller woke up with a dead FD
|
|
|
|
|
unsigned int poll_skip; // poller skipped another thread's FD
|
|
|
|
|
unsigned int fd_skip; // fd cache skipped another thread's FD
|
|
|
|
|
unsigned int fd_lock; // fd cache skipped a locked FD
|
|
|
|
|
unsigned int fd_del; // fd cache detected a deleted FD
|
|
|
|
|
unsigned int conn_dead; // conn_fd_handler woke up on an FD indicating a dead connection
|
|
|
|
|
unsigned int stream; // calls to process_stream()
|
|
|
|
|
unsigned int empty_rq; // calls to process_runnable_tasks() with nothing for the thread
|
|
|
|
|
unsigned int long_rq; // process_runnable_tasks() left with tasks in the run queue
|
MEDIUM: time: measure the time stolen by other threads
The purpose is to detect if threads or processes are competing for the
same CPU. This can happen when threads are incorrectly bound, or after a
reload if the previous process still has an important activity. With
threads this situation is problematic because a preempted thread holding
a lock will block other ones waiting for this lock to be released.
A first attempt consisted in measuring the cumulated lost time more
precisely but the system's scheduler is smart enough to try to limit the
thread preemption rate by mostly context switching during poll()'s blank
periods, so most of the time lost is not seen. In essence this is good
because it means a thread is not preempted with a lock held, and even
regarding the rendez-vous point it cannot prevent the other ones from
making progress. But still it happens tens to hundreds of times per
second that a thread might be preempted, so it's still possible to detect
that the situation is happening, thus it's interesting to measure and
report its frequency.
Each time we enter the poller, we check the CPU time spent working and
see if we've lost time doing something else. To limit false positives,
we're only interested in losses of 500 microseconds or more (i.e. half
a clock tick on a 1 kHz system). If so, it indicates that some time was
stolen by another thread or process. Note that we purposely store some
sub-millisecond counters so that under heavy traffic with a 1 kHz clock,
it's still possible to measure something without being subject to the
risk of rounding errors (i.e. if exactly 1 ms is stolen it's possible
that the time difference could often be slightly lower).
This counter of lost CPU time slots time is reported in "show activity"
in numbers of milliseconds of CPU lost per second, per 15s, and total
over the process' life. By definition, the per-second counter cannot
report values larger than 1000 per thread per second and the 15s one
will be limited to 15000/s in the worst case, but it's possible that
peak values exceed such thresholds after long pauses.
2018-10-17 13:01:24 -04:00
|
|
|
unsigned int cpust_total; // sum of half-ms stolen per thread
|
|
|
|
|
struct freq_ctr cpust_1s; // avg amount of half-ms stolen over last second
|
|
|
|
|
struct freq_ctr_period cpust_15s; // avg amount of half-ms stolen over last 15s
|
2018-01-20 13:30:13 -05:00
|
|
|
char __pad[0]; // unused except to check remaining room
|
|
|
|
|
char __end[0] __attribute__((aligned(64))); // align size to 64.
|
|
|
|
|
};
|
|
|
|
|
|
2006-06-25 20:48:02 -04:00
|
|
|
extern struct global global;
|
2018-01-20 13:30:13 -05:00
|
|
|
extern struct activity activity[MAX_THREADS];
|
2006-06-25 20:48:02 -04:00
|
|
|
extern int pid; /* current process id */
|
2007-11-04 17:35:08 -05:00
|
|
|
extern int relative_pid; /* process id starting at 1 */
|
2017-11-10 13:08:14 -05:00
|
|
|
extern unsigned long pid_bit; /* bit corresponding to the process id */
|
2006-06-25 20:48:02 -04:00
|
|
|
extern int actconn; /* # of active sessions */
|
2010-08-31 09:39:26 -04:00
|
|
|
extern int listeners;
|
2017-09-15 02:18:11 -04:00
|
|
|
extern int jobs; /* # of active jobs (listeners, sessions, open devices) */
|
2018-07-13 05:56:34 -04:00
|
|
|
extern THREAD_LOCAL struct buffer trash;
|
2010-08-25 06:58:59 -04:00
|
|
|
extern int nb_oldpids; /* contains the number of old pids found */
|
2006-06-25 20:48:02 -04:00
|
|
|
extern const int zero;
|
|
|
|
|
extern const int one;
|
2007-10-11 14:48:58 -04:00
|
|
|
extern const struct linger nolinger;
|
2006-06-25 20:48:02 -04:00
|
|
|
extern int stopping; /* non zero means stopping in progress */
|
2017-03-23 17:44:13 -04:00
|
|
|
extern int killed; /* non zero means a hard-stop is triggered */
|
2009-08-16 04:08:02 -04:00
|
|
|
extern char hostname[MAX_HOSTNAME_LEN];
|
2010-09-23 12:30:22 -04:00
|
|
|
extern char localpeer[MAX_HOSTNAME_LEN];
|
2011-07-24 16:58:00 -04:00
|
|
|
extern struct list global_listener_queue; /* list of the temporarily limited listeners */
|
2011-08-01 14:57:55 -04:00
|
|
|
extern struct task *global_listener_queue_task;
|
2014-04-28 16:27:06 -04:00
|
|
|
extern unsigned int warned; /* bitfield of a few warnings to emit just once */
|
2018-07-26 11:55:11 -04:00
|
|
|
extern volatile unsigned long sleeping_thread_mask;
|
2014-04-28 16:27:06 -04:00
|
|
|
|
|
|
|
|
/* bit values to go with "warned" above */
|
2014-04-28 16:28:02 -04:00
|
|
|
#define WARN_BLOCK_DEPRECATED 0x00000001
|
2015-05-26 06:18:29 -04:00
|
|
|
/* unassigned : 0x00000002 */
|
2014-04-28 16:37:32 -04:00
|
|
|
#define WARN_REDISPATCH_DEPRECATED 0x00000004
|
2014-04-28 16:56:38 -04:00
|
|
|
#define WARN_CLITO_DEPRECATED 0x00000008
|
|
|
|
|
#define WARN_SRVTO_DEPRECATED 0x00000010
|
|
|
|
|
#define WARN_CONTO_DEPRECATED 0x00000020
|
2014-04-28 16:27:06 -04:00
|
|
|
|
|
|
|
|
/* to be used with warned and WARN_* */
|
|
|
|
|
static inline int already_warned(unsigned int warning)
|
|
|
|
|
{
|
|
|
|
|
if (warned & warning)
|
|
|
|
|
return 1;
|
|
|
|
|
warned |= warning;
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
2006-06-25 20:48:02 -04:00
|
|
|
|
2017-03-23 17:44:13 -04:00
|
|
|
void deinit(void);
|
2016-12-21 12:43:10 -05:00
|
|
|
void hap_register_build_opts(const char *str, int must_free);
|
2016-12-21 13:57:00 -05:00
|
|
|
void hap_register_post_check(int (*fct)());
|
2016-12-21 14:46:26 -05:00
|
|
|
void hap_register_post_deinit(void (*fct)());
|
2016-12-21 12:43:10 -05:00
|
|
|
|
2017-07-25 10:52:58 -04:00
|
|
|
void hap_register_per_thread_init(int (*fct)());
|
|
|
|
|
void hap_register_per_thread_deinit(void (*fct)());
|
|
|
|
|
|
2006-06-25 20:48:02 -04:00
|
|
|
#endif /* _TYPES_GLOBAL_H */
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Local variables:
|
|
|
|
|
* c-indent-level: 8
|
|
|
|
|
* c-basic-offset: 8
|
|
|
|
|
* End:
|
|
|
|
|
*/
|