2006-06-25 20:48:02 -04:00
|
|
|
/*
|
2009-10-18 01:25:52 -04:00
|
|
|
* include/types/fd.h
|
MAJOR: polling: rework the whole polling system
This commit heavily changes the polling system in order to definitely
fix the frequent breakage of SSL which needs to remember the last
EAGAIN before deciding whether to poll or not. Now we have a state per
direction for each FD, as opposed to a previous and current state
previously. An FD can have up to 8 different states for each direction,
each of which being the result of a 3-bit combination. These 3 bits
indicate a wish to access the FD, the readiness of the FD and the
subscription of the FD to the polling system.
This means that it will now be possible to remember the state of a
file descriptor across disable/enable sequences that generally happen
during forwarding, where enabling reading on a previously disabled FD
would result in forgetting the EAGAIN flag it met last time.
Several new state manipulation functions have been introduced or
adapted :
- fd_want_{recv,send} : enable receiving/sending on the FD regardless
of its state (sets the ACTIVE flag) ;
- fd_stop_{recv,send} : stop receiving/sending on the FD regardless
of its state (clears the ACTIVE flag) ;
- fd_cant_{recv,send} : report a failure to receive/send on the FD
corresponding to EAGAIN (clears the READY flag) ;
- fd_may_{recv,send} : report the ability to receive/send on the FD
as reported by poll() (sets the READY flag) ;
Some functions are used to report the current FD status :
- fd_{recv,send}_active
- fd_{recv,send}_ready
- fd_{recv,send}_polled
Some functions were removed :
- fd_ev_clr(), fd_ev_set(), fd_ev_rem(), fd_ev_wai()
The POLLHUP/POLLERR flags are now reported as ready so that the I/O layers
knows it can try to access the file descriptor to get this information.
In order to simplify the conditions to add/remove cache entries, a new
function fd_alloc_or_release_cache_entry() was created to be used from
pollers while scanning for updates.
The following pollers have been updated :
ev_select() : done, built, tested on Linux 3.10
ev_poll() : done, built, tested on Linux 3.10
ev_epoll() : done, built, tested on Linux 3.10 & 3.13
ev_kqueue() : done, built, tested on OpenBSD 5.2
2014-01-10 10:58:45 -05:00
|
|
|
* File descriptors states - check src/fd.c for explanations.
|
2009-10-18 01:25:52 -04:00
|
|
|
*
|
MAJOR: polling: rework the whole polling system
This commit heavily changes the polling system in order to definitely
fix the frequent breakage of SSL which needs to remember the last
EAGAIN before deciding whether to poll or not. Now we have a state per
direction for each FD, as opposed to a previous and current state
previously. An FD can have up to 8 different states for each direction,
each of which being the result of a 3-bit combination. These 3 bits
indicate a wish to access the FD, the readiness of the FD and the
subscription of the FD to the polling system.
This means that it will now be possible to remember the state of a
file descriptor across disable/enable sequences that generally happen
during forwarding, where enabling reading on a previously disabled FD
would result in forgetting the EAGAIN flag it met last time.
Several new state manipulation functions have been introduced or
adapted :
- fd_want_{recv,send} : enable receiving/sending on the FD regardless
of its state (sets the ACTIVE flag) ;
- fd_stop_{recv,send} : stop receiving/sending on the FD regardless
of its state (clears the ACTIVE flag) ;
- fd_cant_{recv,send} : report a failure to receive/send on the FD
corresponding to EAGAIN (clears the READY flag) ;
- fd_may_{recv,send} : report the ability to receive/send on the FD
as reported by poll() (sets the READY flag) ;
Some functions are used to report the current FD status :
- fd_{recv,send}_active
- fd_{recv,send}_ready
- fd_{recv,send}_polled
Some functions were removed :
- fd_ev_clr(), fd_ev_set(), fd_ev_rem(), fd_ev_wai()
The POLLHUP/POLLERR flags are now reported as ready so that the I/O layers
knows it can try to access the file descriptor to get this information.
In order to simplify the conditions to add/remove cache entries, a new
function fd_alloc_or_release_cache_entry() was created to be used from
pollers while scanning for updates.
The following pollers have been updated :
ev_select() : done, built, tested on Linux 3.10
ev_poll() : done, built, tested on Linux 3.10
ev_epoll() : done, built, tested on Linux 3.10 & 3.13
ev_kqueue() : done, built, tested on OpenBSD 5.2
2014-01-10 10:58:45 -05:00
|
|
|
* Copyright (C) 2000-2014 Willy Tarreau - w@1wt.eu
|
2009-10-18 01:25:52 -04:00
|
|
|
*
|
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
|
* License as published by the Free Software Foundation, version 2.1
|
|
|
|
|
* exclusively.
|
|
|
|
|
*
|
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
|
*
|
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
|
* License along with this library; if not, write to the Free Software
|
|
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
|
|
|
*/
|
2006-06-25 20:48:02 -04:00
|
|
|
|
|
|
|
|
#ifndef _TYPES_FD_H
|
|
|
|
|
#define _TYPES_FD_H
|
|
|
|
|
|
2006-06-29 11:53:05 -04:00
|
|
|
#include <common/config.h>
|
MAJOR: threads/fd: Make fd stuffs thread-safe
Many changes have been made to do so. First, the fd_updt array, where all
pending FDs for polling are stored, is now a thread-local array. Then 3 locks
have been added to protect, respectively, the fdtab array, the fd_cache array
and poll information. In addition, a lock for each entry in the fdtab array has
been added to protect all accesses to a specific FD or its information.
For pollers, according to the poller, the way to manage the concurrency is
different. There is a poller loop on each thread. So the set of monitored FDs
may need to be protected. epoll and kqueue are thread-safe per-se, so there few
things to do to protect these pollers. This is not possible with select and
poll, so there is no sharing between the threads. The poller on each thread is
independant from others.
Finally, per-thread init/deinit functions are used for each pollers and for FD
part for manage thread-local ressources.
Now, you must be carefull when a FD is created during the HAProxy startup. All
update on the FD state must be made in the threads context and never before
their creation. This is mandatory because fd_updt array is thread-local and
initialized only for threads. Because there is no pollers for the main one, this
array remains uninitialized in this context. For this reason, listeners are now
enabled in run_thread_poll_loop function, just like the worker pipe.
2017-05-29 04:40:41 -04:00
|
|
|
#include <common/hathreads.h>
|
2019-08-27 05:08:17 -04:00
|
|
|
#include <common/ist.h>
|
2012-09-02 16:34:23 -04:00
|
|
|
#include <types/port_range.h>
|
2006-06-25 20:48:02 -04:00
|
|
|
|
2012-11-11 09:02:54 -05:00
|
|
|
/* Direction for each FD event update */
|
2006-07-29 10:59:06 -04:00
|
|
|
enum {
|
|
|
|
|
DIR_RD=0,
|
|
|
|
|
DIR_WR=1,
|
|
|
|
|
};
|
2006-06-25 20:48:02 -04:00
|
|
|
|
2012-11-11 09:02:54 -05:00
|
|
|
/* Polling status flags returned in fdtab[].ev :
|
2008-01-18 11:20:13 -05:00
|
|
|
* FD_POLL_IN remains set as long as some data is pending for read.
|
|
|
|
|
* FD_POLL_OUT remains set as long as the fd accepts to write data.
|
|
|
|
|
* FD_POLL_ERR and FD_POLL_ERR remain set forever (until processed).
|
|
|
|
|
*/
|
2007-04-15 05:31:05 -04:00
|
|
|
#define FD_POLL_IN 0x01
|
|
|
|
|
#define FD_POLL_PRI 0x02
|
|
|
|
|
#define FD_POLL_OUT 0x04
|
|
|
|
|
#define FD_POLL_ERR 0x08
|
|
|
|
|
#define FD_POLL_HUP 0x10
|
|
|
|
|
|
2008-01-18 11:20:13 -05:00
|
|
|
#define FD_POLL_DATA (FD_POLL_IN | FD_POLL_OUT)
|
|
|
|
|
#define FD_POLL_STICKY (FD_POLL_ERR | FD_POLL_HUP)
|
2007-04-15 05:31:05 -04:00
|
|
|
|
2020-02-21 10:26:19 -05:00
|
|
|
/* FD_EV_* are the values used in fdtab[].state to define the polling states in
|
|
|
|
|
* each direction. Most of them are manipulated using test-and-set operations
|
|
|
|
|
* which require the bit position in the mask, which is given in the _BIT
|
|
|
|
|
* variant.
|
|
|
|
|
*/
|
MAJOR: polling: rework the whole polling system
This commit heavily changes the polling system in order to definitely
fix the frequent breakage of SSL which needs to remember the last
EAGAIN before deciding whether to poll or not. Now we have a state per
direction for each FD, as opposed to a previous and current state
previously. An FD can have up to 8 different states for each direction,
each of which being the result of a 3-bit combination. These 3 bits
indicate a wish to access the FD, the readiness of the FD and the
subscription of the FD to the polling system.
This means that it will now be possible to remember the state of a
file descriptor across disable/enable sequences that generally happen
during forwarding, where enabling reading on a previously disabled FD
would result in forgetting the EAGAIN flag it met last time.
Several new state manipulation functions have been introduced or
adapted :
- fd_want_{recv,send} : enable receiving/sending on the FD regardless
of its state (sets the ACTIVE flag) ;
- fd_stop_{recv,send} : stop receiving/sending on the FD regardless
of its state (clears the ACTIVE flag) ;
- fd_cant_{recv,send} : report a failure to receive/send on the FD
corresponding to EAGAIN (clears the READY flag) ;
- fd_may_{recv,send} : report the ability to receive/send on the FD
as reported by poll() (sets the READY flag) ;
Some functions are used to report the current FD status :
- fd_{recv,send}_active
- fd_{recv,send}_ready
- fd_{recv,send}_polled
Some functions were removed :
- fd_ev_clr(), fd_ev_set(), fd_ev_rem(), fd_ev_wai()
The POLLHUP/POLLERR flags are now reported as ready so that the I/O layers
knows it can try to access the file descriptor to get this information.
In order to simplify the conditions to add/remove cache entries, a new
function fd_alloc_or_release_cache_entry() was created to be used from
pollers while scanning for updates.
The following pollers have been updated :
ev_select() : done, built, tested on Linux 3.10
ev_poll() : done, built, tested on Linux 3.10
ev_epoll() : done, built, tested on Linux 3.10 & 3.13
ev_kqueue() : done, built, tested on OpenBSD 5.2
2014-01-10 10:58:45 -05:00
|
|
|
|
BUG/MEDIUM: fd/threads: fix excessive CPU usage on multi-thread accept
While experimenting with potentially improved fairness and latency using
ticket locks on a Ryzen 16-thread/8-core, a very strange situation happened
a lot for some levels of traffic. Around 300k connections per second, no
more connections would be accepted on the multi-threaded listener but all
others would continue to work fine. All attempts to trace showed that the
threads were all in the trylock in the fd cache, or in the spinlock of
fd_update_events(), or in the one of fd_may_recv(). But as indicated this
was not a deadlock since the process continues to work fine.
After quite some investigation it appeared that the issue is caused by a
lack of fairness between the fdcache's trylock and these functions' spin
locks above. In fact, regardless of the success or failure of the fdcache's
attempt at grabbing the lock, the poller was calling fd_update_events()
which locks the FD once for something that can be done with a CAS, and
then calls fd_may_recv() with another lock for something that most often
didn't change. The high contention on these spinlocks leaves no chance to
any other thread to grab the lock using trylock(), and once this happens,
there is no thread left to process incoming connection events nor to stop
polling on the FD, leaving all threads at 100% CPU but partially operational.
This patch addresses the issue by using bit-test-and-set instead of the OR
in fd_may_recv() / fd_may_send() so that nothing is done if the FD was
already configured as expected. It does the same in fd_update_events()
using a CAS to check if the FD's events need to be changed at all or not.
With this patch applied, it became impossible to reproduce the issue, and
now there's no way to saturate all 16 CPUs with the load used for testing,
as no more than 1350-1400 were noticed at 300+kcps vs 1600.
Ideally this patch should go further and try to remove the remaining
incarnations of the fdlock as this seems possible, but it's difficult
enough to be done in a distinct patch that will not have to be backported.
It is possible that workloads involving a high connection rate may slightly
benefit from this patch and observe a slightly lower CPU usage even when
the service doesn't misbehave.
This patch must be backported to 2.0 and 1.9.
2019-07-08 17:09:03 -04:00
|
|
|
/* bits positions for a few flags */
|
2019-09-04 07:22:50 -04:00
|
|
|
#define FD_EV_ACTIVE_R_BIT 0
|
2019-09-06 12:27:02 -04:00
|
|
|
#define FD_EV_READY_R_BIT 1
|
|
|
|
|
#define FD_EV_SHUT_R_BIT 2
|
2020-02-26 10:12:45 -05:00
|
|
|
/* unused: 3 */
|
2019-09-04 07:22:50 -04:00
|
|
|
|
|
|
|
|
#define FD_EV_ACTIVE_W_BIT 4
|
2019-09-06 12:27:02 -04:00
|
|
|
#define FD_EV_READY_W_BIT 5
|
|
|
|
|
#define FD_EV_SHUT_W_BIT 6
|
2020-02-26 10:12:45 -05:00
|
|
|
#define FD_EV_ERR_RW_BIT 7
|
BUG/MEDIUM: fd/threads: fix excessive CPU usage on multi-thread accept
While experimenting with potentially improved fairness and latency using
ticket locks on a Ryzen 16-thread/8-core, a very strange situation happened
a lot for some levels of traffic. Around 300k connections per second, no
more connections would be accepted on the multi-threaded listener but all
others would continue to work fine. All attempts to trace showed that the
threads were all in the trylock in the fd cache, or in the spinlock of
fd_update_events(), or in the one of fd_may_recv(). But as indicated this
was not a deadlock since the process continues to work fine.
After quite some investigation it appeared that the issue is caused by a
lack of fairness between the fdcache's trylock and these functions' spin
locks above. In fact, regardless of the success or failure of the fdcache's
attempt at grabbing the lock, the poller was calling fd_update_events()
which locks the FD once for something that can be done with a CAS, and
then calls fd_may_recv() with another lock for something that most often
didn't change. The high contention on these spinlocks leaves no chance to
any other thread to grab the lock using trylock(), and once this happens,
there is no thread left to process incoming connection events nor to stop
polling on the FD, leaving all threads at 100% CPU but partially operational.
This patch addresses the issue by using bit-test-and-set instead of the OR
in fd_may_recv() / fd_may_send() so that nothing is done if the FD was
already configured as expected. It does the same in fd_update_events()
using a CAS to check if the FD's events need to be changed at all or not.
With this patch applied, it became impossible to reproduce the issue, and
now there's no way to saturate all 16 CPUs with the load used for testing,
as no more than 1350-1400 were noticed at 300+kcps vs 1600.
Ideally this patch should go further and try to remove the remaining
incarnations of the fdlock as this seems possible, but it's difficult
enough to be done in a distinct patch that will not have to be backported.
It is possible that workloads involving a high connection rate may slightly
benefit from this patch and observe a slightly lower CPU usage even when
the service doesn't misbehave.
This patch must be backported to 2.0 and 1.9.
2019-07-08 17:09:03 -04:00
|
|
|
|
2020-02-21 10:26:19 -05:00
|
|
|
/* and flag values */
|
|
|
|
|
#define FD_EV_ACTIVE_R (1U << FD_EV_ACTIVE_R_BIT)
|
|
|
|
|
#define FD_EV_ACTIVE_W (1U << FD_EV_ACTIVE_W_BIT)
|
2012-11-11 09:02:54 -05:00
|
|
|
#define FD_EV_ACTIVE_RW (FD_EV_ACTIVE_R | FD_EV_ACTIVE_W)
|
|
|
|
|
|
2020-02-21 10:26:19 -05:00
|
|
|
#define FD_EV_READY_R (1U << FD_EV_READY_R_BIT)
|
|
|
|
|
#define FD_EV_READY_W (1U << FD_EV_READY_W_BIT)
|
MAJOR: polling: rework the whole polling system
This commit heavily changes the polling system in order to definitely
fix the frequent breakage of SSL which needs to remember the last
EAGAIN before deciding whether to poll or not. Now we have a state per
direction for each FD, as opposed to a previous and current state
previously. An FD can have up to 8 different states for each direction,
each of which being the result of a 3-bit combination. These 3 bits
indicate a wish to access the FD, the readiness of the FD and the
subscription of the FD to the polling system.
This means that it will now be possible to remember the state of a
file descriptor across disable/enable sequences that generally happen
during forwarding, where enabling reading on a previously disabled FD
would result in forgetting the EAGAIN flag it met last time.
Several new state manipulation functions have been introduced or
adapted :
- fd_want_{recv,send} : enable receiving/sending on the FD regardless
of its state (sets the ACTIVE flag) ;
- fd_stop_{recv,send} : stop receiving/sending on the FD regardless
of its state (clears the ACTIVE flag) ;
- fd_cant_{recv,send} : report a failure to receive/send on the FD
corresponding to EAGAIN (clears the READY flag) ;
- fd_may_{recv,send} : report the ability to receive/send on the FD
as reported by poll() (sets the READY flag) ;
Some functions are used to report the current FD status :
- fd_{recv,send}_active
- fd_{recv,send}_ready
- fd_{recv,send}_polled
Some functions were removed :
- fd_ev_clr(), fd_ev_set(), fd_ev_rem(), fd_ev_wai()
The POLLHUP/POLLERR flags are now reported as ready so that the I/O layers
knows it can try to access the file descriptor to get this information.
In order to simplify the conditions to add/remove cache entries, a new
function fd_alloc_or_release_cache_entry() was created to be used from
pollers while scanning for updates.
The following pollers have been updated :
ev_select() : done, built, tested on Linux 3.10
ev_poll() : done, built, tested on Linux 3.10
ev_epoll() : done, built, tested on Linux 3.10 & 3.13
ev_kqueue() : done, built, tested on OpenBSD 5.2
2014-01-10 10:58:45 -05:00
|
|
|
#define FD_EV_READY_RW (FD_EV_READY_R | FD_EV_READY_W)
|
|
|
|
|
|
2019-09-06 12:27:02 -04:00
|
|
|
/* note that when FD_EV_SHUT is set, ACTIVE and READY are cleared */
|
2020-02-21 10:26:19 -05:00
|
|
|
#define FD_EV_SHUT_R (1U << FD_EV_SHUT_R_BIT)
|
|
|
|
|
#define FD_EV_SHUT_W (1U << FD_EV_SHUT_W_BIT)
|
2019-09-06 12:27:02 -04:00
|
|
|
#define FD_EV_SHUT_RW (FD_EV_SHUT_R | FD_EV_SHUT_W)
|
|
|
|
|
|
2020-02-26 10:12:45 -05:00
|
|
|
/* note that when FD_EV_ERR is set, SHUT is also set. Also, ERR is for both
|
|
|
|
|
* directions at once (write error, socket dead, etc).
|
|
|
|
|
*/
|
|
|
|
|
#define FD_EV_ERR_RW (1U << FD_EV_ERR_RW_BIT)
|
2019-09-06 12:27:02 -04:00
|
|
|
|
2016-11-17 08:22:52 -05:00
|
|
|
|
|
|
|
|
/* This is the value used to mark a file descriptor as dead. This value is
|
|
|
|
|
* negative, this is important so that tests on fd < 0 properly match. It
|
2018-11-25 16:34:43 -05:00
|
|
|
* also has the nice property of being highly negative but neither overflowing
|
|
|
|
|
* nor changing sign on 32-bit machines when multiplied by sizeof(fdtab).
|
2016-11-17 08:22:52 -05:00
|
|
|
* This ensures that any unexpected dereference of such an uninitialized
|
|
|
|
|
* file descriptor will lead to so large a dereference that it will crash
|
|
|
|
|
* the process at the exact location of the bug with a clean stack trace
|
|
|
|
|
* instead of causing silent manipulation of other FDs. And it's readable
|
|
|
|
|
* when found in a dump.
|
|
|
|
|
*/
|
|
|
|
|
#define DEAD_FD_MAGIC 0xFDDEADFD
|
|
|
|
|
|
MEDIUM: fd/threads: Make sure we don't miss a fd cache entry.
An fd cache entry might be removed and added at the end of the list, while
another thread is parsing it, if that happens, we may miss fd cache entries,
to avoid that, add a new field in the struct fdtab, "added_mask", which
contains a mask for potentially affected threads, if it is set, the
corresponding thread will set its bit in fd_cache_mask, to avoid waiting in
poll while it may have more work to do.
2018-01-31 12:07:29 -05:00
|
|
|
/* fdlist_entry: entry used by the fd cache.
|
|
|
|
|
* >= 0 means we're in the cache and gives the FD of the next in the cache,
|
|
|
|
|
* -1 means we're in the cache and the last element,
|
|
|
|
|
* -2 means the entry is locked,
|
|
|
|
|
* <= -3 means not in the cache, and next element is -4-fd
|
|
|
|
|
*
|
|
|
|
|
* It must remain 8-aligned so that aligned CAS operations may be done on both
|
|
|
|
|
* entries at once.
|
|
|
|
|
*/
|
2018-01-24 12:17:56 -05:00
|
|
|
struct fdlist_entry {
|
MEDIUM: fd/threads: Make sure we don't miss a fd cache entry.
An fd cache entry might be removed and added at the end of the list, while
another thread is parsing it, if that happens, we may miss fd cache entries,
to avoid that, add a new field in the struct fdtab, "added_mask", which
contains a mask for potentially affected threads, if it is set, the
corresponding thread will set its bit in fd_cache_mask, to avoid waiting in
poll while it may have more work to do.
2018-01-31 12:07:29 -05:00
|
|
|
int next;
|
|
|
|
|
int prev;
|
2018-01-24 12:17:56 -05:00
|
|
|
} __attribute__ ((aligned(8)));
|
|
|
|
|
|
MEDIUM: fd/threads: Make sure we don't miss a fd cache entry.
An fd cache entry might be removed and added at the end of the list, while
another thread is parsing it, if that happens, we may miss fd cache entries,
to avoid that, add a new field in the struct fdtab, "added_mask", which
contains a mask for potentially affected threads, if it is set, the
corresponding thread will set its bit in fd_cache_mask, to avoid waiting in
poll while it may have more work to do.
2018-01-31 12:07:29 -05:00
|
|
|
/* head of the fd cache */
|
2018-01-24 12:17:56 -05:00
|
|
|
struct fdlist {
|
MEDIUM: fd/threads: Make sure we don't miss a fd cache entry.
An fd cache entry might be removed and added at the end of the list, while
another thread is parsing it, if that happens, we may miss fd cache entries,
to avoid that, add a new field in the struct fdtab, "added_mask", which
contains a mask for potentially affected threads, if it is set, the
corresponding thread will set its bit in fd_cache_mask, to avoid waiting in
poll while it may have more work to do.
2018-01-31 12:07:29 -05:00
|
|
|
int first;
|
|
|
|
|
int last;
|
2018-01-24 12:17:56 -05:00
|
|
|
} __attribute__ ((aligned(8)));
|
|
|
|
|
|
2006-06-25 20:48:02 -04:00
|
|
|
/* info about one given fd */
|
|
|
|
|
struct fdtab {
|
2020-04-16 14:51:34 -04:00
|
|
|
unsigned long running_mask; /* mask of thread IDs currently using the fd */
|
2020-02-27 11:26:13 -05:00
|
|
|
unsigned long thread_mask; /* mask of thread IDs authorized to process the fd */
|
2018-01-20 17:53:50 -05:00
|
|
|
unsigned long update_mask; /* mask of thread IDs having an update for fd */
|
2018-04-25 10:58:25 -04:00
|
|
|
struct fdlist_entry update; /* Entry in the global update list */
|
2016-04-14 05:13:20 -04:00
|
|
|
void (*iocb)(int fd); /* I/O handler */
|
2012-07-06 08:54:49 -04:00
|
|
|
void *owner; /* the connection or listener associated with this fd, NULL if closed */
|
2020-02-21 10:26:19 -05:00
|
|
|
unsigned char state; /* FD state for read and write directions (FD_EV_*) */
|
2007-04-15 05:31:05 -04:00
|
|
|
unsigned char ev; /* event seen in return of poll() : FD_POLL_* */
|
2013-12-15 08:19:38 -05:00
|
|
|
unsigned char linger_risk:1; /* 1 if we must kill lingering before closing */
|
2014-05-20 08:28:24 -04:00
|
|
|
unsigned char cloned:1; /* 1 if a cloned socket, requires EPOLL_CTL_DEL on close */
|
2019-08-30 08:36:10 -04:00
|
|
|
unsigned char initialized:1; /* 1 if init phase was done on this fd (e.g. set non-blocking) */
|
2019-08-30 08:33:11 -04:00
|
|
|
}
|
|
|
|
|
#ifdef USE_THREAD
|
|
|
|
|
/* only align on cache lines when using threads; 32-bit small archs
|
|
|
|
|
* can put everything in 32-bytes when threads are disabled.
|
|
|
|
|
*/
|
|
|
|
|
__attribute__((aligned(64)))
|
|
|
|
|
#endif
|
|
|
|
|
;
|
2009-10-18 01:25:52 -04:00
|
|
|
|
|
|
|
|
/* less often used information */
|
|
|
|
|
struct fdinfo {
|
2009-06-10 05:09:37 -04:00
|
|
|
struct port_range *port_range; /* optional port range to bind to */
|
2009-10-18 01:25:52 -04:00
|
|
|
int local_port; /* optional local port */
|
2006-06-25 20:48:02 -04:00
|
|
|
};
|
|
|
|
|
|
2007-04-08 10:39:58 -04:00
|
|
|
/*
|
|
|
|
|
* Poller descriptors.
|
|
|
|
|
* - <name> is initialized by the poller's register() function, and should not
|
|
|
|
|
* be allocated, just linked to.
|
|
|
|
|
* - <pref> is initialized by the poller's register() function. It is set to 0
|
|
|
|
|
* by default, meaning the poller is disabled. init() should set it to 0 in
|
|
|
|
|
* case of failure. term() must set it to 0. A generic unoptimized select()
|
|
|
|
|
* poller should set it to 100.
|
|
|
|
|
* - <private> is initialized by the poller's init() function, and cleaned by
|
|
|
|
|
* the term() function.
|
2012-11-11 15:02:34 -05:00
|
|
|
* - clo() should be used to do indicate the poller that fd will be closed.
|
2019-05-28 10:44:05 -04:00
|
|
|
* - poll() calls the poller, expiring at <exp>, or immediately if <wake> is set
|
2017-03-13 06:38:28 -04:00
|
|
|
* - flags indicate what the poller supports (HAP_POLL_F_*)
|
2007-04-08 10:39:58 -04:00
|
|
|
*/
|
2017-03-13 06:38:28 -04:00
|
|
|
|
2019-11-28 12:17:33 -05:00
|
|
|
#define HAP_POLL_F_RDHUP 0x00000001 /* the poller notifies of HUP with reads */
|
|
|
|
|
#define HAP_POLL_F_ERRHUP 0x00000002 /* the poller reports ERR and HUP */
|
2017-03-13 06:38:28 -04:00
|
|
|
|
2007-04-08 10:39:58 -04:00
|
|
|
struct poller {
|
|
|
|
|
void *private; /* any private data for the poller */
|
2020-02-25 01:38:05 -05:00
|
|
|
void (*clo)(const int fd); /* mark <fd> as closed */
|
|
|
|
|
void (*poll)(struct poller *p, int exp, int wake); /* the poller itself */
|
|
|
|
|
int (*init)(struct poller *p); /* poller initialization */
|
|
|
|
|
void (*term)(struct poller *p); /* termination of this poller */
|
|
|
|
|
int (*test)(struct poller *p); /* pre-init check of the poller */
|
|
|
|
|
int (*fork)(struct poller *p); /* post-fork re-opening */
|
2007-04-08 10:39:58 -04:00
|
|
|
const char *name; /* poller name */
|
2017-03-13 06:38:28 -04:00
|
|
|
unsigned int flags; /* HAP_POLL_F_* */
|
2007-04-08 10:39:58 -04:00
|
|
|
int pref; /* try pollers with higher preference first */
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
extern struct poller cur_poller; /* the current poller */
|
|
|
|
|
extern int nbpollers;
|
|
|
|
|
#define MAX_POLLERS 10
|
|
|
|
|
extern struct poller pollers[MAX_POLLERS]; /* all registered pollers */
|
|
|
|
|
|
2006-06-25 20:48:02 -04:00
|
|
|
extern struct fdtab *fdtab; /* array of all the file descriptors */
|
2009-10-18 01:25:52 -04:00
|
|
|
extern struct fdinfo *fdinfo; /* less-often used infos for file descriptors */
|
2006-06-25 20:48:02 -04:00
|
|
|
extern int totalconn; /* total # of terminated sessions */
|
|
|
|
|
extern int actconn; /* # of active sessions */
|
|
|
|
|
|
|
|
|
|
#endif /* _TYPES_FD_H */
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Local variables:
|
|
|
|
|
* c-indent-level: 8
|
|
|
|
|
* c-basic-offset: 8
|
|
|
|
|
* End:
|
|
|
|
|
*/
|