mirror of
https://github.com/haproxy/haproxy.git
synced 2026-02-12 15:23:08 -05:00
Compare commits
161 commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f47b800ac3 | ||
|
|
d13164e105 | ||
|
|
b90b312a50 | ||
|
|
1592ed9854 | ||
|
|
f9b3319f48 | ||
|
|
aad212954f | ||
|
|
b26f0cc45a | ||
|
|
b3a44158fb | ||
|
|
8e16fd2cf1 | ||
|
|
4aa974f949 | ||
|
|
d80f0143c9 | ||
|
|
b65df062be | ||
|
|
a8f50cff7e | ||
|
|
c622ed23c8 | ||
|
|
f5f9c008b1 | ||
|
|
ea92b0ef01 | ||
|
|
2ac0d12790 | ||
|
|
c724693b95 | ||
|
|
e2631ee5f7 | ||
|
|
a7b2353cb3 | ||
|
|
3b45beb465 | ||
|
|
64c5d45a26 | ||
|
|
62239539bf | ||
|
|
91a5b67b25 | ||
|
|
ecffaa6d5a | ||
|
|
a1db464c3e | ||
|
|
5dff6e439d | ||
|
|
d7cdd2c7f4 | ||
|
|
5753c14e84 | ||
|
|
3115eb82a6 | ||
|
|
07195a1af4 | ||
|
|
a603811aac | ||
|
|
e152913327 | ||
|
|
7ac5088c50 | ||
|
|
817003aa31 | ||
|
|
dc6cf224dd | ||
|
|
87ea407cce | ||
|
|
a8bc83bea5 | ||
|
|
2c8ad11b73 | ||
|
|
2a07dc9c24 | ||
|
|
9dd7cf769e | ||
|
|
bf7a2808fc | ||
|
|
9766211cf0 | ||
|
|
9e023ae930 | ||
|
|
68e9fb73fd | ||
|
|
143f5a5c0d | ||
|
|
b6bdb2553b | ||
|
|
3edf600859 | ||
|
|
cddeea58cd | ||
|
|
3674afe8a0 | ||
|
|
2527d9dcd1 | ||
|
|
f26562bcb7 | ||
|
|
abc1947e19 | ||
|
|
02e6375017 | ||
|
|
da728aa0f6 | ||
|
|
23e8ed6ea6 | ||
|
|
fa094d0b61 | ||
|
|
869a997a68 | ||
|
|
48d9c90ff2 | ||
|
|
35d63cc3c7 | ||
|
|
bb36836d76 | ||
|
|
a79a67b52f | ||
|
|
a9df6947b4 | ||
|
|
3ca2a83fc0 | ||
|
|
cb3fd012cd | ||
|
|
bbab0ac4d0 | ||
|
|
6995fe60c3 | ||
|
|
0ea601127e | ||
|
|
0ebef67132 | ||
|
|
9b1faee4c9 | ||
|
|
d2ccc19fde | ||
|
|
f4cd1e74ba | ||
|
|
1a3252e956 | ||
|
|
e9e4821db5 | ||
|
|
4e7c07736a | ||
|
|
c267d24f57 | ||
|
|
a3e9a04435 | ||
|
|
be68ecc37d | ||
|
|
a66b4881d7 | ||
|
|
9e9083d0e2 | ||
|
|
2eda6e1cbe | ||
|
|
b52c60d366 | ||
|
|
116983ad94 | ||
|
|
848e0cd052 | ||
|
|
434e979046 | ||
|
|
6c0ea1fe73 | ||
|
|
f535d3e031 | ||
|
|
ac877a25dd | ||
|
|
b3f7d43248 | ||
|
|
21b192e799 | ||
|
|
c7004be964 | ||
|
|
eb5279b154 | ||
|
|
fbc98ebcda | ||
|
|
2d8d2b4247 | ||
|
|
9f766b2056 | ||
|
|
95e8483b35 | ||
|
|
25564b6075 | ||
|
|
7e85391a9e | ||
|
|
44c491ae6b | ||
|
|
0c3b212aab | ||
|
|
6f5def3cbd | ||
|
|
9156d5f775 | ||
|
|
14e890d85e | ||
|
|
818b32addc | ||
|
|
b4f64c0abf | ||
|
|
d38b918da1 | ||
|
|
12dc9325a7 | ||
|
|
eebb448f49 | ||
|
|
0a464215c5 | ||
|
|
b8e91f619a | ||
|
|
bd8d70413e | ||
|
|
90c5618ed5 | ||
|
|
a3ee35cbfc | ||
|
|
447d73dc99 | ||
|
|
362ff2628f | ||
|
|
aba18bac71 | ||
|
|
39da1845fc | ||
|
|
4b73a3ed29 | ||
|
|
e3a782adb5 | ||
|
|
416b87d5db | ||
|
|
2b45b7bf4f | ||
|
|
c431034037 | ||
|
|
f0e64de753 | ||
|
|
6870551a57 | ||
|
|
16f035d555 | ||
|
|
82907d5621 | ||
|
|
797ec6ede5 | ||
|
|
21fb0a3f58 | ||
|
|
2d26d353ce | ||
|
|
12975c5c37 | ||
|
|
2f6aab9211 | ||
|
|
a209c35f30 | ||
|
|
6249698840 | ||
|
|
1397982599 | ||
|
|
7e1fed4b7a | ||
|
|
2ec387cdc2 | ||
|
|
7f4b053b26 | ||
|
|
7aa839296d | ||
|
|
82196eb74e | ||
|
|
da813ae4d7 | ||
|
|
5495c88441 | ||
|
|
37057feb80 | ||
|
|
fcd4d4a7aa | ||
|
|
04545cb2b7 | ||
|
|
b1cfeeef21 | ||
|
|
022cb3ab7f | ||
|
|
c0f64fc36a | ||
|
|
96faf71f87 | ||
|
|
2560cce7c5 | ||
|
|
880bbeeda4 | ||
|
|
875bbaa7fc | ||
|
|
46088b7ad0 | ||
|
|
a203ce6854 | ||
|
|
6e1718ce4b | ||
|
|
dbe52cc23e | ||
|
|
623aa725a2 | ||
|
|
dbba442740 | ||
|
|
c17ed69bf3 | ||
|
|
91cff75908 | ||
|
|
4aff6d1c25 | ||
|
|
5322bd3785 |
122 changed files with 7205 additions and 2868 deletions
4
.github/matrix.py
vendored
4
.github/matrix.py
vendored
|
|
@ -222,7 +222,7 @@ def main(ref_name):
|
|||
"OPENSSL_VERSION=1.0.2u",
|
||||
"OPENSSL_VERSION=1.1.1s",
|
||||
"OPENSSL_VERSION=3.5.1",
|
||||
"QUICTLS=yes",
|
||||
"QUICTLS_VERSION=OpenSSL_1_1_1w-quic1",
|
||||
"WOLFSSL_VERSION=5.7.0",
|
||||
"AWS_LC_VERSION=1.39.0",
|
||||
# "BORINGSSL=yes",
|
||||
|
|
@ -261,7 +261,7 @@ def main(ref_name):
|
|||
except:
|
||||
pass
|
||||
|
||||
if ssl == "BORINGSSL=yes" or ssl == "QUICTLS=yes" or "LIBRESSL" in ssl or "WOLFSSL" in ssl or "AWS_LC" in ssl or openssl_supports_quic:
|
||||
if ssl == "BORINGSSL=yes" or "QUICTLS" in ssl or "LIBRESSL" in ssl or "WOLFSSL" in ssl or "AWS_LC" in ssl or openssl_supports_quic:
|
||||
flags.append("USE_QUIC=1")
|
||||
|
||||
matrix.append(
|
||||
|
|
|
|||
2
.github/workflows/coverity.yml
vendored
2
.github/workflows/coverity.yml
vendored
|
|
@ -27,7 +27,7 @@ jobs:
|
|||
libsystemd-dev
|
||||
- name: Install QUICTLS
|
||||
run: |
|
||||
QUICTLS=yes scripts/build-ssl.sh
|
||||
QUICTLS_VERSION=OpenSSL_1_1_1w-quic1 scripts/build-ssl.sh
|
||||
- name: Download Coverity build tool
|
||||
run: |
|
||||
wget -c -N https://scan.coverity.com/download/linux64 --post-data "token=${{ secrets.COVERITY_SCAN_TOKEN }}&project=Haproxy" -O coverity_tool.tar.gz
|
||||
|
|
|
|||
2
.github/workflows/cross-zoo.yml
vendored
2
.github/workflows/cross-zoo.yml
vendored
|
|
@ -104,7 +104,7 @@ jobs:
|
|||
|
||||
- name: install quictls
|
||||
run: |
|
||||
QUICTLS_EXTRA_ARGS="--cross-compile-prefix=${{ matrix.platform.arch }}- ${{ matrix.platform.target }}" QUICTLS=yes scripts/build-ssl.sh
|
||||
QUICTLS_EXTRA_ARGS="--cross-compile-prefix=${{ matrix.platform.arch }}- ${{ matrix.platform.target }}" QUICTLS_VERSION=OpenSSL_1_1_1w-quic1 scripts/build-ssl.sh
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
|
|
|
|||
25
.github/workflows/fedora-rawhide.yml
vendored
25
.github/workflows/fedora-rawhide.yml
vendored
|
|
@ -1,4 +1,4 @@
|
|||
name: Fedora/Rawhide/QuicTLS
|
||||
name: Fedora/Rawhide/OpenSSL
|
||||
|
||||
on:
|
||||
schedule:
|
||||
|
|
@ -13,10 +13,10 @@ jobs:
|
|||
strategy:
|
||||
matrix:
|
||||
platform: [
|
||||
{ name: x64, cc: gcc, QUICTLS_EXTRA_ARGS: "", ADDLIB_ATOMIC: "", ARCH_FLAGS: "" },
|
||||
{ name: x64, cc: clang, QUICTLS_EXTRA_ARGS: "", ADDLIB_ATOMIC: "", ARCH_FLAGS: "" },
|
||||
{ name: x86, cc: gcc, QUICTLS_EXTRA_ARGS: "-m32 linux-generic32", ADDLIB_ATOMIC: "-latomic", ARCH_FLAGS: "-m32" },
|
||||
{ name: x86, cc: clang, QUICTLS_EXTRA_ARGS: "-m32 linux-generic32", ADDLIB_ATOMIC: "-latomic", ARCH_FLAGS: "-m32" }
|
||||
{ name: x64, cc: gcc, ADDLIB_ATOMIC: "", ARCH_FLAGS: "" },
|
||||
{ name: x64, cc: clang, ADDLIB_ATOMIC: "", ARCH_FLAGS: "" },
|
||||
{ name: x86, cc: gcc, ADDLIB_ATOMIC: "-latomic", ARCH_FLAGS: "-m32" },
|
||||
{ name: x86, cc: clang, ADDLIB_ATOMIC: "-latomic", ARCH_FLAGS: "-m32" }
|
||||
]
|
||||
fail-fast: false
|
||||
name: ${{ matrix.platform.cc }}.${{ matrix.platform.name }}
|
||||
|
|
@ -28,11 +28,9 @@ jobs:
|
|||
- uses: actions/checkout@v5
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
dnf -y install awk diffutils git pcre-devel zlib-devel pcre2-devel 'perl(FindBin)' perl-IPC-Cmd 'perl(File::Copy)' 'perl(File::Compare)' lua-devel socat findutils systemd-devel clang
|
||||
dnf -y install 'perl(FindBin)' 'perl(File::Compare)' perl-IPC-Cmd 'perl(File::Copy)' glibc-devel.i686 lua-devel.i686 lua-devel.x86_64 systemd-devel.i686 zlib-ng-compat-devel.i686 pcre-devel.i686 libatomic.i686
|
||||
dnf -y install awk diffutils git pcre-devel zlib-devel pcre2-devel 'perl(FindBin)' perl-IPC-Cmd 'perl(File::Copy)' 'perl(File::Compare)' lua-devel socat findutils systemd-devel clang openssl-devel.x86_64
|
||||
dnf -y install 'perl(FindBin)' 'perl(File::Compare)' perl-IPC-Cmd 'perl(File::Copy)' glibc-devel.i686 lua-devel.i686 lua-devel.x86_64 systemd-devel.i686 zlib-ng-compat-devel.i686 pcre-devel.i686 libatomic.i686 openssl-devel.i686
|
||||
- uses: ./.github/actions/setup-vtest
|
||||
- name: Install QuicTLS
|
||||
run: QUICTLS=yes QUICTLS_EXTRA_ARGS="${{ matrix.platform.QUICTLS_EXTRA_ARGS }}" scripts/build-ssl.sh
|
||||
- name: Build contrib tools
|
||||
run: |
|
||||
make admin/halog/halog
|
||||
|
|
@ -41,7 +39,7 @@ jobs:
|
|||
make dev/hpack/decode dev/hpack/gen-enc dev/hpack/gen-rht
|
||||
- name: Compile HAProxy with ${{ matrix.platform.cc }}
|
||||
run: |
|
||||
make -j3 CC=${{ matrix.platform.cc }} V=1 ERR=1 TARGET=linux-glibc DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" USE_OPENSSL=1 USE_QUIC=1 USE_ZLIB=1 USE_PCRE=1 USE_PCRE_JIT=1 USE_LUA=1 ADDLIB="${{ matrix.platform.ADDLIB_ATOMIC }} -Wl,-rpath,${HOME}/opt/lib" SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include ARCH_FLAGS="${{ matrix.platform.ARCH_FLAGS }}"
|
||||
make -j3 CC=${{ matrix.platform.cc }} V=1 ERR=1 TARGET=linux-glibc DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" USE_PROMEX=1 USE_OPENSSL=1 USE_QUIC=1 USE_ZLIB=1 USE_PCRE=1 USE_PCRE_JIT=1 USE_LUA=1 ADDLIB="${{ matrix.platform.ADDLIB_ATOMIC }}" ARCH_FLAGS="${{ matrix.platform.ARCH_FLAGS }}"
|
||||
make install
|
||||
- name: Show HAProxy version
|
||||
id: show-version
|
||||
|
|
@ -51,6 +49,13 @@ jobs:
|
|||
echo "::endgroup::"
|
||||
haproxy -vv
|
||||
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
||||
#
|
||||
# TODO: review this workaround later
|
||||
- name: relax crypto policies
|
||||
run: |
|
||||
dnf -y install crypto-policies-scripts
|
||||
echo LEGACY > /etc/crypto-policies/config
|
||||
update-crypto-policies
|
||||
- name: Run VTest for HAProxy ${{ steps.show-version.outputs.version }}
|
||||
id: vtest
|
||||
run: |
|
||||
|
|
|
|||
2
.github/workflows/openssl-ech.yml
vendored
2
.github/workflows/openssl-ech.yml
vendored
|
|
@ -28,7 +28,7 @@ jobs:
|
|||
run: env SSL_LIB=${HOME}/opt/ scripts/build-curl.sh
|
||||
- name: Compile HAProxy
|
||||
run: |
|
||||
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
|
||||
make -j$(nproc) CC=gcc TARGET=linux-glibc \
|
||||
USE_QUIC=1 USE_OPENSSL=1 USE_ECH=1 \
|
||||
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
|
||||
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
|
||||
|
|
|
|||
2
.github/workflows/quictls.yml
vendored
2
.github/workflows/quictls.yml
vendored
|
|
@ -23,7 +23,7 @@ jobs:
|
|||
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
|
||||
sudo apt-get --no-install-recommends -y install socat gdb
|
||||
- name: Install QuicTLS
|
||||
run: env QUICTLS=yes QUICTLS_URL=https://github.com/quictls/quictls scripts/build-ssl.sh
|
||||
run: env QUICTLS_VERSION=main QUICTLS_URL=https://github.com/quictls/quictls scripts/build-ssl.sh
|
||||
- name: Compile HAProxy
|
||||
run: |
|
||||
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
|
||||
|
|
|
|||
2
.github/workflows/vtest.yml
vendored
2
.github/workflows/vtest.yml
vendored
|
|
@ -57,7 +57,7 @@ jobs:
|
|||
echo "key=$(echo ${{ matrix.name }} | sha256sum | awk '{print $1}')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Cache SSL libs
|
||||
if: ${{ matrix.ssl && matrix.ssl != 'stock' && matrix.ssl != 'BORINGSSL=yes' && matrix.ssl != 'QUICTLS=yes' }}
|
||||
if: ${{ matrix.ssl && matrix.ssl != 'stock' && matrix.ssl != 'BORINGSSL=yes' && !contains(matrix.ssl, 'QUICTLS') }}
|
||||
id: cache_ssl
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
|
|
|
|||
119
CHANGELOG
119
CHANGELOG
|
|
@ -1,6 +1,125 @@
|
|||
ChangeLog :
|
||||
===========
|
||||
|
||||
2026/02/04 : 3.4-dev4
|
||||
- BUG/MEDIUM: hlua: fix invalid lua_pcall() usage in hlua_traceback()
|
||||
- BUG/MINOR: hlua: consume error object if ignored after a failing lua_pcall()
|
||||
- BUG/MINOR: promex: Detach promex from the server on error dump its metrics dump
|
||||
- BUG/MEDIUM: mux-h1: Skip UNUSED htx block when formating the start line
|
||||
- BUG/MINOR: proto_tcp: Properly report support for HAVE_TCP_MD5SIG feature
|
||||
- BUG/MINOR: config: check capture pool creations for failures
|
||||
- BUG/MINOR: stick-tables: abort startup on stk_ctr pool creation failure
|
||||
- MEDIUM: pools: better check for size rounding overflow on registration
|
||||
- DOC: reg-tests: update VTest upstream link in the starting guide
|
||||
- BUG/MINOR: ssl: Properly manage alloc failures in SSL passphrase callback
|
||||
- BUG/MINOR: ssl: Encrypted keys could not be loaded when given alongside certificate
|
||||
- MINOR: ssl: display libssl errors on private key loading
|
||||
- BUG/MAJOR: applet: Don't call I/O handler if the applet was shut
|
||||
- MINOR: ssl: allow to disable certificate compression
|
||||
- BUG/MINOR: ssl: fix error message of tune.ssl.certificate-compression
|
||||
- DOC: config: mention some possible TLS versions restrictions for kTLS
|
||||
- OPTIM: server: move queueslength in server struct
|
||||
- OPTIM: proxy: separate queues fields from served
|
||||
- OPTIM: server: get rid of the last use of _ha_barrier_full()
|
||||
- DOC: config: mention that idle connection sharing is per thread-group
|
||||
- MEDIUM: h1: strictly verify quoting in chunk extensions
|
||||
- BUG/MINOR: config/ssl: fix spelling of "expose-experimental-directives"
|
||||
- BUG/MEDIUM: ssl: fix msg callbacks on QUIC connections
|
||||
- MEDIUM: ssl: remove connection from msg callback args
|
||||
- MEDIUM: ssl: porting to X509_STORE_get1_objects() for OpenSSL 4.0
|
||||
- REGTESTS: ssl: make reg-tests compatible with OpenSSL 4.0
|
||||
- DOC: internals: cleanup few typos in master-worker documentation
|
||||
- BUG/MEDIUM: applet: Fix test on shut flags for legacy applets
|
||||
- MINOR: quic: Fix build with USE_QUIC_OPENSSL_COMPAT
|
||||
- MEDIUM: tcpcheck: add post-80 option for mysql-check to support MySQL 8.x
|
||||
- BUG/MEDIUM: threads: Atomically set TH_FL_SLEEPING and clr FL_NOTIFIED
|
||||
- BUG/MINOR: cpu-topo: count cores not cpus to distinguish core types
|
||||
- DOC: config: mention the limitation on server id range for consistent hash
|
||||
- MEDIUM: backend: make "balance random" consider req rate when loads are equal
|
||||
- BUG/MINOR: config: Fix setting of alt_proto
|
||||
|
||||
2026/01/22 : 3.4-dev3
|
||||
- BUILD: ssl: strchr definition changed in C23
|
||||
- BUILD: tools: memchr definition changed in C23
|
||||
- BUG/MINOR: cfgparse: wrong section name upon error
|
||||
- MINOR: cfgparse: Refactor "userlist" parser to print it in -dKall operation
|
||||
- BUILD: sockpair: fix build issue on macOS related to variable-length arrays
|
||||
- BUG/MINOR: cli/stick-tables: argument to "show table" is optional
|
||||
- REGTESTS: ssl: Fix reg-tests curve check
|
||||
- CI: github: remove ERR=1 temporarly from the ECH job
|
||||
- BUG/MINOR: ech/quic: enable ech configuration also for quic listeners
|
||||
- MEDIUM: config: warn if some userlist hashes are too slow
|
||||
- MINOR: cfgparse: remove duplicate "force-persist" in common kw list
|
||||
- MINOR: sample: also support retrieving fc.timer.handshake without a stream
|
||||
- MINOR: tcp-sample: permit retrieving tcp_info from the connection/session stage
|
||||
- CLEANUP: connection: Remove outdated note about CO_FL `0x00002000` being unused
|
||||
- MINOR: receiver: Dynamically alloc the "members" field of shard_info
|
||||
- MINOR: stats: Increase the tgid from 8bits to 16bits
|
||||
- BUG/MINOR: stats-file: Use a 16bits variable when loading tgid
|
||||
- BUG/MINOR: hlua_fcn: fix broken yield for Patref:add_bulk()
|
||||
- BUG/MINOR: hlua_fcn: ensure Patref:add_bulk() is given a table object before using it
|
||||
- BUG/MINOR: net_helper: fix IPv6 header length processing
|
||||
- MEDIUM: counters: Dynamically allocate per-thread group counters
|
||||
- MEDIUM: counters: Remove some extra tests
|
||||
- BUG/MEDIUM: threads: Fix binding thread on bind.
|
||||
- BUG/MEDIUM: quic: fix ACK ECN frame parsing
|
||||
- MEDIUM: counters: mostly revert da813ae4d7cb77137ed
|
||||
- BUG/MINOR: http_act: fix deinit performed on uninitialized lf_expr in release_http_map()
|
||||
- MINOR: queues: Turn non_empty_tgids into a long array.
|
||||
- MINOR: threads: Eliminate all_tgroups_mask.
|
||||
- BUG/MEDIUM: queues: Fix arithmetic when feeling non_empty_tgids
|
||||
- MEDIUM: thread: Turn the group mask in thread set into a group counter
|
||||
- BUG/MINOR: proxy: free persist_rules
|
||||
- MEDIUM: stream: refactor switching-rules processing
|
||||
- REGTESTS: add test on backend switching rules selection
|
||||
- MEDIUM: proxy: do not select a backend if disabled
|
||||
- MEDIUM: proxy: implement publish/unpublish backend CLI
|
||||
- MINOR: stats: report BE unpublished status
|
||||
- MINOR: cfgparse: adapt warnif_cond_conflicts() error output
|
||||
- MEDIUM: proxy: force traffic on unpublished/disabled backends
|
||||
- MINOR: ssl: Factorize AES GCM data processing
|
||||
- MINOR: ssl: Add new aes_cbc_enc/_dec converters
|
||||
- REGTESTS: ssl: Add tests for new aes cbc converters
|
||||
- MINOR: jwe: Add new jwt_decrypt_secret converter
|
||||
- MINOR: jwe: Add new jwt_decrypt_cert converter
|
||||
- REGTESTS: jwe: Add jwt_decrypt_secret and jwt_decrypt_cert tests
|
||||
- DOC: jwe: Add doc for jwt_decrypt converters
|
||||
- MINOR: jwe: Some algorithms not supported by AWS-LC
|
||||
- REGTESTS: jwe: Fix tests of algorithms not supported by AWS-LC
|
||||
- BUG/MINOR: cfgparse: fix "default" prefix parsing
|
||||
- REORG/MINOR: cfgparse: eliminate code duplication by lshift_args()
|
||||
- MEDIUM: systemd: implement directory loading
|
||||
- CI: github: switch monthly Fedora Rawhide build to OpenSSL
|
||||
- SCRIPTS: build-ssl: use QUICTLS_VERSION instead of QUICTLS=yes
|
||||
- CI: github: define the right quictls version in each jobs
|
||||
- CI: github: fix vtest.yml with "not quictls"
|
||||
- MINOR: cli: use srv_drop() when server was created using new_server()
|
||||
- BUG/MINOR: server: ensure server is detached from proxy list before being freed
|
||||
- BUG/MEDIUM: promex: server iteration may rely on stale server
|
||||
- SCRIPTS: build-ssl: clone the quictls branch directly
|
||||
- SCRIPTS: build-ssl: fix quictls build for 1.1.1 versions
|
||||
- BUG/MEDIUM: log: parsing log-forward options may result in segfault
|
||||
- DOC: proxy-protocol: Add SSL client certificate TLV
|
||||
- DOC: fix typos in the documentation files
|
||||
- DOC: fix mismatched quotes typos around words in the documentation files
|
||||
- REORG: cfgparse: move peers parsing to cfgparse-peers.c
|
||||
- MINOR: tools: add chunk_escape_string() helper function
|
||||
- MINOR: vars: store variable names for runtime access
|
||||
- MINOR: vars: implement dump_all_vars() sample fetch
|
||||
- DOC: vars: document dump_all_vars() sample fetch
|
||||
- BUG/MEDIUM: ssl: fix error path on generate-certificates
|
||||
- BUG/MEDIUM: ssl: fix generate-certificates option when SNI greater than 64bytes
|
||||
- BUG/MEDIUM: mux-quic: prevent BUG_ON() on aborted uni stream close
|
||||
- REGTESTS: ssl: fix generate-certificates w/ LibreSSL
|
||||
- SCRIPTS: build: enable symbols in AWS-LC builds
|
||||
- BUG/MINOR: proxy: fix deinit crash on defaults with duplicate name
|
||||
- BUG/MEDIUM: debug: only dump Lua state when panicking
|
||||
- MINOR: proxy: remove proxy_preset_defaults()
|
||||
- MINOR: proxy: refactor defaults proxies API
|
||||
- MINOR: proxy: simplify defaults proxies list storage
|
||||
- MEDIUM: cfgparse: do not store unnamed defaults in name tree
|
||||
- MEDIUM: proxy: implement persistent named defaults
|
||||
|
||||
2026/01/07 : 3.4-dev2
|
||||
- BUG/MEDIUM: mworker/listener: ambiguous use of RX_F_INHERITED with shards
|
||||
- BUG/MEDIUM: http-ana: Properly detect client abort when forwarding response (v2)
|
||||
|
|
|
|||
5
Makefile
5
Makefile
|
|
@ -643,7 +643,7 @@ ifneq ($(USE_OPENSSL:0=),)
|
|||
OPTIONS_OBJS += src/ssl_sock.o src/ssl_ckch.o src/ssl_ocsp.o src/ssl_crtlist.o \
|
||||
src/ssl_sample.o src/cfgparse-ssl.o src/ssl_gencert.o \
|
||||
src/ssl_utils.o src/jwt.o src/ssl_clienthello.o src/jws.o src/acme.o \
|
||||
src/ssl_trace.o
|
||||
src/ssl_trace.o src/jwe.o
|
||||
endif
|
||||
|
||||
ifneq ($(USE_ENGINE:0=),)
|
||||
|
|
@ -1002,7 +1002,8 @@ OBJS += src/mux_h2.o src/mux_h1.o src/mux_fcgi.o src/log.o \
|
|||
src/ebsttree.o src/freq_ctr.o src/systemd.o src/init.o \
|
||||
src/http_acl.o src/dict.o src/dgram.o src/pipe.o \
|
||||
src/hpack-huff.o src/hpack-enc.o src/ebtree.o src/hash.o \
|
||||
src/httpclient_cli.o src/version.o src/ncbmbuf.o src/ech.o
|
||||
src/httpclient_cli.o src/version.o src/ncbmbuf.o src/ech.o \
|
||||
src/cfgparse-peers.o
|
||||
|
||||
ifneq ($(TRACE),)
|
||||
OBJS += src/calltrace.o
|
||||
|
|
|
|||
2
VERDATE
2
VERDATE
|
|
@ -1,2 +1,2 @@
|
|||
$Format:%ci$
|
||||
2026/01/07
|
||||
2026/02/04
|
||||
|
|
|
|||
2
VERSION
2
VERSION
|
|
@ -1 +1 @@
|
|||
3.4-dev2
|
||||
3.4-dev4
|
||||
|
|
|
|||
|
|
@ -82,6 +82,7 @@ struct promex_ctx {
|
|||
unsigned field_num; /* current field number (ST_I_PX_* etc) */
|
||||
unsigned mod_field_num; /* first field number of the current module (ST_I_PX_* etc) */
|
||||
int obj_state; /* current state among PROMEX_{FRONT|BACK|SRV|LI}_STATE_* */
|
||||
struct watcher srv_watch; /* watcher to automatically update next pointer */
|
||||
struct list modules; /* list of promex modules to export */
|
||||
struct eb_root filters; /* list of filters to apply on metrics name */
|
||||
};
|
||||
|
|
@ -1244,15 +1245,17 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
|
|||
if ((px->flags & PR_FL_DISABLED) || px->uuid <= 0 || !(px->cap & PR_CAP_BE))
|
||||
goto next_px;
|
||||
|
||||
if (!sv)
|
||||
if (!sv) {
|
||||
watcher_attach(&ctx->srv_watch, px->srv);
|
||||
sv = px->srv;
|
||||
}
|
||||
|
||||
while (sv) {
|
||||
labels[lb_idx].name = ist("server");
|
||||
labels[lb_idx].value = ist2(sv->id, strlen(sv->id));
|
||||
|
||||
if (!stats_fill_sv_line(px, sv, 0, stats, ST_I_PX_MAX, &(ctx->field_num)))
|
||||
return -1;
|
||||
goto error;
|
||||
|
||||
if ((ctx->flags & PROMEX_FL_NO_MAINT_SRV) && (sv->cur_admin & SRV_ADMF_MAINT))
|
||||
goto next_sv;
|
||||
|
|
@ -1397,10 +1400,11 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
|
|||
&val, labels, &out, max))
|
||||
goto full;
|
||||
next_sv:
|
||||
sv = sv->next;
|
||||
sv = watcher_next(&ctx->srv_watch, sv->next);
|
||||
}
|
||||
|
||||
next_px:
|
||||
watcher_detach(&ctx->srv_watch);
|
||||
px = px->next;
|
||||
}
|
||||
ctx->flags |= PROMEX_FL_METRIC_HDR;
|
||||
|
|
@ -1451,8 +1455,10 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
|
|||
if ((px->flags & PR_FL_DISABLED) || px->uuid <= 0 || !(px->cap & PR_CAP_BE))
|
||||
goto next_px2;
|
||||
|
||||
if (!sv)
|
||||
if (!sv) {
|
||||
watcher_attach(&ctx->srv_watch, px->srv);
|
||||
sv = px->srv;
|
||||
}
|
||||
|
||||
while (sv) {
|
||||
labels[lb_idx].name = ist("server");
|
||||
|
|
@ -1467,7 +1473,7 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
|
|||
|
||||
counters = EXTRA_COUNTERS_GET(sv->extra_counters, mod);
|
||||
if (!mod->fill_stats(counters, stats + ctx->field_num, &ctx->mod_field_num))
|
||||
return -1;
|
||||
goto error;
|
||||
|
||||
val = stats[ctx->field_num + ctx->mod_field_num];
|
||||
metric.type = ((val.type == FN_GAUGE) ? PROMEX_MT_GAUGE : PROMEX_MT_COUNTER);
|
||||
|
|
@ -1477,10 +1483,11 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
|
|||
goto full;
|
||||
|
||||
next_sv2:
|
||||
sv = sv->next;
|
||||
sv = watcher_next(&ctx->srv_watch, sv->next);
|
||||
}
|
||||
|
||||
next_px2:
|
||||
watcher_detach(&ctx->srv_watch);
|
||||
px = px->next;
|
||||
}
|
||||
ctx->flags |= PROMEX_FL_METRIC_HDR;
|
||||
|
|
@ -1500,11 +1507,6 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
|
|||
return -1; /* Unexpected and unrecoverable error */
|
||||
}
|
||||
|
||||
/* Decrement server refcount if it was saved through ctx.p[1]. */
|
||||
srv_drop(ctx->p[1]);
|
||||
if (sv)
|
||||
srv_take(sv);
|
||||
|
||||
/* Save pointers (0=current proxy, 1=current server, 2=current stats module) of the current context */
|
||||
ctx->p[0] = px;
|
||||
ctx->p[1] = sv;
|
||||
|
|
@ -1513,6 +1515,10 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
|
|||
full:
|
||||
ret = 0;
|
||||
goto end;
|
||||
|
||||
error:
|
||||
watcher_detach(&ctx->srv_watch);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Dump metrics of module <mod>. It returns 1 on success, 0 if <out> is full and
|
||||
|
|
@ -2027,6 +2033,7 @@ static int promex_appctx_init(struct appctx *appctx)
|
|||
LIST_INIT(&ctx->modules);
|
||||
ctx->filters = EB_ROOT;
|
||||
appctx->st0 = PROMEX_ST_INIT;
|
||||
watcher_init(&ctx->srv_watch, &ctx->p[1], offsetof(struct server, watcher_list));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -2040,10 +2047,8 @@ static void promex_appctx_release(struct appctx *appctx)
|
|||
struct promex_metric_filter *flt;
|
||||
struct eb32_node *node, *next;
|
||||
|
||||
if (appctx->st1 == PROMEX_DUMPER_SRV) {
|
||||
struct server *srv = objt_server(ctx->p[1]);
|
||||
srv_drop(srv);
|
||||
}
|
||||
if (appctx->st1 == PROMEX_DUMPER_SRV)
|
||||
watcher_detach(&ctx->srv_watch);
|
||||
|
||||
list_for_each_entry_safe(ref, back, &ctx->modules, list) {
|
||||
LIST_DELETE(&ref->list);
|
||||
|
|
|
|||
|
|
@ -6,9 +6,9 @@ Wants=network-online.target
|
|||
[Service]
|
||||
EnvironmentFile=-/etc/default/haproxy
|
||||
EnvironmentFile=-/etc/sysconfig/haproxy
|
||||
Environment="CONFIG=/etc/haproxy/haproxy.cfg" "PIDFILE=/run/haproxy.pid" "EXTRAOPTS=-S /run/haproxy-master.sock"
|
||||
ExecStart=@SBINDIR@/haproxy -Ws -f $CONFIG -p $PIDFILE $EXTRAOPTS
|
||||
ExecReload=@SBINDIR@/haproxy -Ws -f $CONFIG -c $EXTRAOPTS
|
||||
Environment="CONFIG=/etc/haproxy/haproxy.cfg" "PIDFILE=/run/haproxy.pid" "CFGDIR=/etc/haproxy/conf.d" "EXTRAOPTS=-S /run/haproxy-master.sock"
|
||||
ExecStart=@SBINDIR@/haproxy -Ws -f $CONFIG -f $CFGDIR -p $PIDFILE $EXTRAOPTS
|
||||
ExecReload=@SBINDIR@/haproxy -Ws -f $CONFIG -f $CFGDIR -c $EXTRAOPTS
|
||||
ExecReload=/bin/kill -USR2 $MAINPID
|
||||
KillMode=mixed
|
||||
Restart=always
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
A number of contributors are often embarrassed with coding style issues, they
|
||||
don't always know if they're doing it right, especially since the coding style
|
||||
has elvoved along the years. What is explained here is not necessarily what is
|
||||
has evolved along the years. What is explained here is not necessarily what is
|
||||
applied in the code, but new code should as much as possible conform to this
|
||||
style. Coding style fixes happen when code is replaced. It is useless to send
|
||||
patches to fix coding style only, they will be rejected, unless they belong to
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@
|
|||
Configuration Manual
|
||||
----------------------
|
||||
version 3.4
|
||||
2026/01/07
|
||||
2026/02/04
|
||||
|
||||
|
||||
This document covers the configuration language as implemented in the version
|
||||
|
|
@ -1871,6 +1871,7 @@ The following keywords are supported in the "global" section :
|
|||
- tune.bufsize
|
||||
- tune.bufsize.small
|
||||
- tune.comp.maxlevel
|
||||
- tune.defaults.purge
|
||||
- tune.disable-fast-forward
|
||||
- tune.disable-zero-copy-forwarding
|
||||
- tune.epoll.mask-events
|
||||
|
|
@ -1982,6 +1983,7 @@ The following keywords are supported in the "global" section :
|
|||
- tune.ssl.cachesize
|
||||
- tune.ssl.capture-buffer-size
|
||||
- tune.ssl.capture-cipherlist-size (deprecated)
|
||||
- tune.ssl.certificate-compression
|
||||
- tune.ssl.default-dh-param
|
||||
- tune.ssl.force-private-cache
|
||||
- tune.ssl.hard-maxrecord
|
||||
|
|
@ -2753,7 +2755,7 @@ h2-workaround-bogus-websocket-clients
|
|||
automatically downgrade to http/1.1 for the websocket tunnel, specify h2
|
||||
support on the bind line using "alpn" without an explicit "proto" keyword. If
|
||||
this statement was previously activated, this can be disabled by prefixing
|
||||
the keyword with "no'.
|
||||
the keyword with "no".
|
||||
|
||||
hard-stop-after <time>
|
||||
Defines the maximum time allowed to perform a clean soft-stop.
|
||||
|
|
@ -4003,7 +4005,7 @@ profiling.memory { on | off }
|
|||
use in production. The same may be achieved at run time on the CLI using the
|
||||
"set profiling memory" command, please consult the management manual.
|
||||
|
||||
profiling.tasks { auto | on | off }
|
||||
profiling.tasks { auto | on | off | lock | no-lock | memory | no-memory }*
|
||||
Enables ('on') or disables ('off') per-task CPU profiling. When set to 'auto'
|
||||
the profiling automatically turns on a thread when it starts to suffer from
|
||||
an average latency of 1000 microseconds or higher as reported in the
|
||||
|
|
@ -4014,6 +4016,18 @@ profiling.tasks { auto | on | off }
|
|||
systems, containers, or virtual machines, or when the system swaps (which
|
||||
must absolutely never happen on a load balancer).
|
||||
|
||||
When task profiling is enabled, HAProxy can also collect the time each task
|
||||
spends with a lock held or waiting for a lock, as well as the time spent
|
||||
waiting for a memory allocation to succeed in case of a pool cache miss. This
|
||||
can sometimes help understand certain causes of latency. For this, the extra
|
||||
keywords "lock" (to enable lock time collection), "no-lock" (to disable it),
|
||||
"memory" (to enable memory allocation time collection) or "no-memory" (to
|
||||
disable it) may additionally be passed. By default they are not enabled since
|
||||
they can have a non-negligible CPU impact on highly loaded systems (3-10%).
|
||||
Note that the overhead is only taken when profiling is effectively running,
|
||||
so that when running in "auto" mode, it will only appear when HAProxy decides
|
||||
to turn it on.
|
||||
|
||||
CPU profiling per task can be very convenient to report where the time is
|
||||
spent and which requests have what effect on which other request. Enabling
|
||||
it will typically affect the overall's performance by less than 1%, thus it
|
||||
|
|
@ -4121,6 +4135,19 @@ tune.comp.maxlevel <number>
|
|||
Each stream using compression initializes the compression algorithm with
|
||||
this value. The default value is 1.
|
||||
|
||||
tune.defaults.purge
|
||||
For dynamic backends support, all named defaults sections are now kept in
|
||||
memory after parsing. This is necessary as backend added at runtime must be
|
||||
based on a named defaults for its configuration.
|
||||
|
||||
This may consume significant memory if the number of defaults instances is
|
||||
important. In this case and if dynamic backend feature is unnecessary, it's
|
||||
possible to use this option to force deletion of defaults section after
|
||||
parsing. It is still mandatory though to keep referenced defaults section
|
||||
which contain settings whose cannot be copied by their referencing proxies.
|
||||
For example, this is the case if the defaults section defines TCP/HTTP rules
|
||||
or a tcpcheck ruleset.
|
||||
|
||||
tune.disable-fast-forward
|
||||
Disables the data fast-forwarding. It is a mechanism to optimize the data
|
||||
forwarding by passing data directly from a side to the other one without
|
||||
|
|
@ -4512,7 +4539,11 @@ tune.idle-pool.shared { on | off }
|
|||
disabling this option without setting a conservative value on "pool-low-conn"
|
||||
for all servers relying on connection reuse to achieve a high performance
|
||||
level, otherwise connections might be closed very often as the thread count
|
||||
increases.
|
||||
increases. Note that in any case, connections are only shared between threads
|
||||
of the same thread group. This means that systems with many NUMA nodes may
|
||||
show slightly more persistent connections while machines with unified caches
|
||||
and many CPU cores per node may experience higher CPU usage. In the latter
|
||||
case, the "max-thread-per-group" tunable may be used to improve the behavior.
|
||||
|
||||
tune.idletimer <timeout>
|
||||
Sets the duration after which HAProxy will consider that an empty buffer is
|
||||
|
|
@ -5296,6 +5327,22 @@ tune.ssl.capture-cipherlist-size <number> (deprecated)
|
|||
formats. If the value is 0 (default value) the capture is disabled,
|
||||
otherwise a buffer is allocated for each SSL/TLS connection.
|
||||
|
||||
tune.ssl.certificate-compression { auto | off }
|
||||
This setting allows to configure the certificate compression support which is
|
||||
an extension (RFC 8879) to TLS 1.3.
|
||||
|
||||
When set to "auto" it uses the default value of the TLS library.
|
||||
|
||||
With "off" it tries to explicitely disable the support of the feature.
|
||||
HAProxy won't try to send compressed certificates anymore nor accept
|
||||
compressed certificates.
|
||||
|
||||
Configures both backend and frontend sides.
|
||||
|
||||
This keyword is supported by OpenSSL >= 3.2.0.
|
||||
|
||||
The default value is auto.
|
||||
|
||||
tune.ssl.default-dh-param <number>
|
||||
Sets the maximum size of the Diffie-Hellman parameters used for generating
|
||||
the ephemeral/temporary Diffie-Hellman key in case of DHE key exchange. The
|
||||
|
|
@ -5718,7 +5765,7 @@ It is possible to chain a TCP frontend to an HTTP backend. It is pointless if
|
|||
only HTTP traffic is handled. But it may be used to handle several protocols
|
||||
within the same frontend. In this case, the client's connection is first handled
|
||||
as a raw tcp connection before being upgraded to HTTP. Before the upgrade, the
|
||||
content processings are performend on raw data. Once upgraded, data is parsed
|
||||
content processings are performed on raw data. Once upgraded, data is parsed
|
||||
and stored using an internal representation called HTX and it is no longer
|
||||
possible to rely on raw representation. There is no way to go back.
|
||||
|
||||
|
|
@ -5736,7 +5783,7 @@ HTTP/2 upgrade, applicative streams are distinct and all frontend rules are
|
|||
evaluated systematically on each one. And as said, the first stream, the TCP
|
||||
one, is destroyed, but only after the frontend rules were evaluated.
|
||||
|
||||
There is another importnat point to understand when HTTP processings are
|
||||
There is another important point to understand when HTTP processings are
|
||||
performed from a TCP proxy. While HAProxy is able to parse HTTP/1 in-fly from
|
||||
tcp-request content rules, it is not possible for HTTP/2. Only the HTTP/2
|
||||
preface can be parsed. This is a huge limitation regarding the HTTP content
|
||||
|
|
@ -5820,6 +5867,7 @@ errorloc302 X X X X
|
|||
errorloc303 X X X X
|
||||
error-log-format X X X -
|
||||
force-persist - - X X
|
||||
force-be-switch - X X -
|
||||
filter - X X X
|
||||
fullconn X - X X
|
||||
guid - X X X
|
||||
|
|
@ -6247,8 +6295,16 @@ balance url_param <param> [check_post]
|
|||
will take away N-1 of the highest loaded servers at the
|
||||
expense of performance. With very high values, the algorithm
|
||||
will converge towards the leastconn's result but much slower.
|
||||
In addition, for large server farms with very low loads (or
|
||||
perfect balance), comparing loads will often lead to a tie,
|
||||
so in case of equal loads between all measured servers, their
|
||||
request rate over the last second are compared, which allows
|
||||
to better balance server usage over time in the same spirit
|
||||
as roundrobin does, and smooth consistent hash unfairness.
|
||||
The default value is 2, which generally shows very good
|
||||
distribution and performance. This algorithm is also known as
|
||||
distribution and performance. For large farms with low loads
|
||||
(less than a few requests per second per server), it may help
|
||||
to raise it to 3 or even 4. This algorithm is also known as
|
||||
the Power of Two Random Choices and is described here :
|
||||
http://www.eecs.harvard.edu/~michaelm/postscripts/handbook2001.pdf
|
||||
|
||||
|
|
@ -7103,6 +7159,9 @@ default_backend <backend>
|
|||
used when no rule has matched. It generally is the dynamic backend which
|
||||
will catch all undetermined requests.
|
||||
|
||||
If a backend is disabled or unpublished, default_backend rules targetting it
|
||||
will be ignored and stream processing will remain on the original proxy.
|
||||
|
||||
Example :
|
||||
|
||||
use_backend dynamic if url_dyn
|
||||
|
|
@ -7146,7 +7205,11 @@ disabled
|
|||
is possible to disable many instances at once by adding the "disabled"
|
||||
keyword in a "defaults" section.
|
||||
|
||||
See also : "enabled"
|
||||
By default, a disabled backend cannot be selected for content-switching.
|
||||
However, a portion of the traffic can ignore this when "force-be-switch" is
|
||||
used.
|
||||
|
||||
See also : "enabled", "force-be-switch"
|
||||
|
||||
|
||||
dispatch <address>:<port> (deprecated)
|
||||
|
|
@ -7556,6 +7619,19 @@ force-persist { if | unless } <condition>
|
|||
and section 7 about ACL usage.
|
||||
|
||||
|
||||
force-be-switch { if | unless } <condition>
|
||||
Allow content switching to select a backend instance even if it is disabled
|
||||
or unpublished. This rule can be used by admins to test traffic to services
|
||||
prior to expose them to the outside world.
|
||||
|
||||
May be used in the following contexts: tcp, http
|
||||
|
||||
May be used in sections: defaults | frontend | listen | backend
|
||||
no | yes | yes | no
|
||||
|
||||
See also : "disabled"
|
||||
|
||||
|
||||
filter <name> [param*]
|
||||
Add the filter <name> in the filter list attached to the proxy.
|
||||
|
||||
|
|
@ -10765,7 +10841,7 @@ no option logasap
|
|||
logging.
|
||||
|
||||
|
||||
option mysql-check [ user <username> [ { post-41 | pre-41 } ] ]
|
||||
option mysql-check [ user <username> [ { post-41 | pre-41 | post-80 } ] ]
|
||||
Use MySQL health checks for server testing
|
||||
|
||||
May be used in the following contexts: tcp
|
||||
|
|
@ -10778,6 +10854,12 @@ option mysql-check [ user <username> [ { post-41 | pre-41 } ] ]
|
|||
server.
|
||||
post-41 Send post v4.1 client compatible checks (the default)
|
||||
pre-41 Send pre v4.1 client compatible checks
|
||||
post-80 Send post v8.0 client compatible checks with CLIENT_PLUGIN_AUTH
|
||||
capability set and mysql_native_password as the authentication
|
||||
plugin. Use this option when connecting to MySQL 8.0+ servers
|
||||
where the health check user is created with mysql_native_password
|
||||
authentication. Example:
|
||||
CREATE USER 'haproxy'@'%' IDENTIFIED WITH mysql_native_password BY '';
|
||||
|
||||
If you specify a username, the check consists of sending two MySQL packet,
|
||||
one Client Authentication packet, and one QUIT packet, to correctly close
|
||||
|
|
@ -14752,14 +14834,17 @@ use_backend <backend> [{if | unless} <condition>]
|
|||
|
||||
There may be as many "use_backend" rules as desired. All of these rules are
|
||||
evaluated in their declaration order, and the first one which matches will
|
||||
assign the backend.
|
||||
assign the backend. This is even the case if the backend is considered as
|
||||
down. However, if a matching rule targets a disabled or unpublished backend,
|
||||
it is ignored instead and rules evaluation continue.
|
||||
|
||||
In the first form, the backend will be used if the condition is met. In the
|
||||
second form, the backend will be used if the condition is not met. If no
|
||||
condition is valid, the backend defined with "default_backend" will be used.
|
||||
If no default backend is defined, either the servers in the same section are
|
||||
used (in case of a "listen" section) or, in case of a frontend, no server is
|
||||
used and a 503 service unavailable response is returned.
|
||||
condition is valid, the backend defined with "default_backend" will be used
|
||||
unless it is disabled or unpublished. If no default backend is available,
|
||||
either the servers in the same section are used (in case of a "listen"
|
||||
section) or, in case of a frontend, no server is used and a 503 service
|
||||
unavailable response is returned.
|
||||
|
||||
Note that it is possible to switch from a TCP frontend to an HTTP backend. In
|
||||
this case, either the frontend has already checked that the protocol is HTTP,
|
||||
|
|
@ -17165,7 +17250,9 @@ interface <interface>
|
|||
ktls <on|off> [ EXPERIMENTAL ]
|
||||
Enables or disables ktls for those sockets. If enabled, kTLS will be used
|
||||
if the kernel supports it and the cipher is compatible. This is only
|
||||
available on Linux kernel 4.17 and above.
|
||||
available on Linux kernel 4.17 and above. Please note that some network
|
||||
drivers and/or TLS stacks might restrict kTLS usage to TLS v1.2 only. See
|
||||
also "force-tlsv12".
|
||||
|
||||
label <label>
|
||||
Sets an optional label for these sockets. It could be used group sockets by
|
||||
|
|
@ -18268,7 +18355,10 @@ hash-key <key>
|
|||
|
||||
id The node keys will be derived from the server's numeric
|
||||
identifier as set from "id" or which defaults to its position
|
||||
in the server list.
|
||||
in the server list. This is the default. Note that only the 28
|
||||
lowest bits of the ID will be used (i.e. (id % 268435456)), so
|
||||
better only use values comprised between 1 and this value to
|
||||
avoid overlap.
|
||||
|
||||
addr The node keys will be derived from the server's address, when
|
||||
available, or else fall back on "id".
|
||||
|
|
@ -18280,7 +18370,9 @@ hash-key <key>
|
|||
HAProxy processes are balancing traffic to the same set of servers. If the
|
||||
server order of each process is different (because, for example, DNS records
|
||||
were resolved in different orders) then this will allow each independent
|
||||
HAProxy processes to agree on routing decisions.
|
||||
HAProxy processes to agree on routing decisions. Note: "balance random" also
|
||||
uses "hash-type consistent", and the quality of the distribution will depend
|
||||
on the quality of the keys.
|
||||
|
||||
id <value>
|
||||
May be used in the following contexts: tcp, http, log
|
||||
|
|
@ -18425,9 +18517,10 @@ See also: "option tcp-check", "option httpchk"
|
|||
ktls <on|off> [ EXPERIMENTAL ]
|
||||
May be used in the following contexts: tcp, http, log, peers, ring
|
||||
|
||||
Enables or disables ktls for those sockets. If enabled, kTLS will be used
|
||||
if the kernel supports it and the cipher is compatible.
|
||||
This is only available on Linux.
|
||||
Enables or disables ktls for those sockets. If enabled, kTLS will be used if
|
||||
the kernel supports it and the cipher is compatible. This is only available
|
||||
on Linux 4.17 and above. Please note that some network drivers and/or TLS
|
||||
stacks might restrict kTLS usage to TLS v1.2 only. See also "force-tlsv12".
|
||||
|
||||
log-bufsize <bufsize>
|
||||
May be used in the following contexts: log
|
||||
|
|
@ -19329,7 +19422,7 @@ strict-maxconn
|
|||
then never establish more connections to a server than maxconn, and try to
|
||||
reuse or kill connections if needed. Please note, however, than it may lead
|
||||
to failed requests in case we can't establish a new connection, and no
|
||||
idle connection is available. This can happen when 'private" connections
|
||||
idle connection is available. This can happen when "private" connections
|
||||
are established, connections tied only to a session, because authentication
|
||||
happened.
|
||||
|
||||
|
|
@ -20456,6 +20549,8 @@ The following keywords are supported:
|
|||
51d.single(prop[,prop*]) string string
|
||||
add(value) integer integer
|
||||
add_item(delim[,var[,suff]]) string string
|
||||
aes_cbc_dec(bits,nonce,key[,<aad>]) binary binary
|
||||
aes_cbc_enc(bits,nonce,key[,<aad>]) binary binary
|
||||
aes_gcm_dec(bits,nonce,key,aead_tag[,aad]) binary binary
|
||||
aes_gcm_enc(bits,nonce,key,aead_tag[,aad]) binary binary
|
||||
and(value) integer integer
|
||||
|
|
@ -20512,6 +20607,9 @@ ip.ver binary integer
|
|||
ipmask(mask4[,mask6]) address address
|
||||
json([input-code]) string string
|
||||
json_query(json_path[,output_type]) string _outtype_
|
||||
jwt_decrypt_jwk(<jwk>) string binary
|
||||
jwt_decrypt_cert(<cert>) string binary
|
||||
jwt_decrypt_secret(<secret>) string binary
|
||||
jwt_header_query([json_path[,output_type]]) string string
|
||||
jwt_payload_query([json_path[,output_type]]) string string
|
||||
-- keyword -------------------------------------+- input type + output type -
|
||||
|
|
@ -20680,6 +20778,31 @@ add_item(<delim>[,<var>[,<suff>]])
|
|||
http-request set-var(req.tagged) 'var(req.tagged),add_item(",",req.score1),add_item(",",req.score2)'
|
||||
http-request set-var(req.tagged) 'var(req.tagged),add_item(",",,(site1))' if src,in_table(site1)
|
||||
|
||||
aes_cbc_dec(<bits>,<nonce>,<key>[,<aad>])
|
||||
Decrypts the raw byte input using the AES128-CBC, AES192-CBC or AES256-CBC
|
||||
algorithm, depending on the <bits> parameter. All other parameters need to be
|
||||
base64 encoded and the returned result is in raw byte format. The <aad>
|
||||
parameter is optional. If the <aad> validation fails, the converter doesn't
|
||||
return any data.
|
||||
The <nonce>, <key> and <aad> can either be strings or variables. This
|
||||
converter requires at least OpenSSL 1.0.1.
|
||||
|
||||
Example:
|
||||
http-response set-header X-Decrypted-Text %[var(txn.enc),\
|
||||
aes_cbc_dec(128,txn.nonce,Zm9vb2Zvb29mb29wZm9vbw==)]
|
||||
|
||||
aes_cbc_enc(<bits>,<nonce>,<key>[,<aad>])
|
||||
Encrypts the raw byte input using the AES128-CBC, AES192-CBC or AES256-CBC
|
||||
algorithm, depending on the <bits> parameter. <nonce>, <key> and <aad>
|
||||
parameters must be base64 encoded.
|
||||
The <aad> parameter is optional. The returned result is in raw byte format.
|
||||
The <nonce>, <key> and <aad> can either be strings or variables. This
|
||||
converter requires at least OpenSSL 1.0.1.
|
||||
|
||||
Example:
|
||||
http-response set-header X-Encrypted-Text %[var(txn.plain),\
|
||||
aes_cbc_enc(128,txn.nonce,Zm9vb2Zvb29mb29wZm9vbw==)]
|
||||
|
||||
aes_gcm_dec(<bits>,<nonce>,<key>,<aead_tag>[,<aad>])
|
||||
Decrypts the raw byte input using the AES128-GCM, AES192-GCM or AES256-GCM
|
||||
algorithm, depending on the <bits> parameter. All other parameters need to be
|
||||
|
|
@ -21129,9 +21252,9 @@ ip.fp([<mode>])
|
|||
can be used to distinguish between multiple apparently identical hosts. The
|
||||
real-world use case is to refine the identification of misbehaving hosts
|
||||
between a shared IP address to avoid blocking legitimate users when only one
|
||||
is misbehaving and needs to be blocked. The converter builds a 7-byte binary
|
||||
block based on the input. The bytes of the fingerprint are arranged like
|
||||
this:
|
||||
is misbehaving and needs to be blocked. The converter builds a 8-byte minimum
|
||||
binary block based on the input. The bytes of the fingerprint are arranged
|
||||
like this:
|
||||
- byte 0: IP TOS field (see ip.tos)
|
||||
- byte 1:
|
||||
- bit 7: IPv6 (1) / IPv4 (0)
|
||||
|
|
@ -21146,10 +21269,13 @@ ip.fp([<mode>])
|
|||
- bits 3..0: TCP window scaling + 1 (1..15) / 0 (no WS advertised)
|
||||
- byte 3..4: tcp.win
|
||||
- byte 5..6: tcp.options.mss, or zero if absent
|
||||
- byte 7: 1 bit per present TCP option, with options 2 to 8 being mapped to
|
||||
bits 0..6 respectively, and bit 7 indicating the presence of any
|
||||
option from 9 to 255.
|
||||
|
||||
The <mode> argument permits to append more information to the fingerprint. By
|
||||
default, when the <mode> argument is not set or is zero, the fingerprint is
|
||||
solely made of the 7 bytes described above. If <mode> is specified as another
|
||||
solely made of the 8 bytes described above. If <mode> is specified as another
|
||||
value, it then corresponds to the sum of the following values, and the
|
||||
respective components will be concatenated to the fingerprint, in the order
|
||||
below:
|
||||
|
|
@ -21159,7 +21285,7 @@ ip.fp([<mode>])
|
|||
- 4: the source IP address is appended to the fingerprint, which adds
|
||||
4 bytes for IPv4 and 16 for IPv6.
|
||||
|
||||
Example: make a 12..24 bytes fingerprint using the base FP, the TTL and the
|
||||
Example: make a 13..25 bytes fingerprint using the base FP, the TTL and the
|
||||
source address (1+4=5):
|
||||
|
||||
frontend test
|
||||
|
|
@ -21311,22 +21437,110 @@ json_query(<json_path>[,<output_type>])
|
|||
# get the value of the key 'iss' from a JWT Bearer token
|
||||
http-request set-var(txn.token_payload) req.hdr(Authorization),word(2,.),ub64dec,json_query('$.iss')
|
||||
|
||||
jwt_decrypt_cert(<cert>)
|
||||
Performs a signature validation of a JSON Web Token following the JSON Web
|
||||
Encryption format (see RFC 7516) given in input and return its content
|
||||
decrypted thanks to the certificate provided.
|
||||
The <cert> parameter must be a path to an already loaded certificate (that
|
||||
can be dumped via the "dump ssl cert" CLI command). The certificate must have
|
||||
its "jwt" option explicitely set to "on" (see "jwt" crt-list option). It can
|
||||
be provided directly or via a variable.
|
||||
The only tokens managed yet are the ones using the Compact Serialization
|
||||
format (five dot-separated base64-url encoded strings).
|
||||
|
||||
This converter can be used for tokens that have an algorithm ("alg" field of
|
||||
the JOSE header) among the following: RSA1_5, RSA-OAEP or RSA-OAEP-256.
|
||||
|
||||
The JWE token must be provided base64url-encoded and the output will be
|
||||
provided "raw". If an error happens during token parsing, signature
|
||||
verification or content decryption, an empty string will be returned.
|
||||
|
||||
Example:
|
||||
# Get a JWT from the authorization header, put its decrypted content in an
|
||||
# HTTP header
|
||||
http-request set-var(txn.bearer) http_auth_bearer
|
||||
http-request set-header X-Decrypted %[var(txn.bearer),jwt_decrypt_cert("/foo/bar.pem")]
|
||||
|
||||
jwt_decrypt_jwk(<jwk>)
|
||||
Performs a signature validation of a JSON Web Token following the JSON Web
|
||||
Encryption format (see RFC 7516) given in input and return its content
|
||||
decrypted thanks to the provided JSON Web Key (RFC7517).
|
||||
The <jwk> parameter must be a valid JWK of type 'oct' or 'RSA' ('kty' field
|
||||
of the JSON key) that can be provided either as a string or via a variable.
|
||||
|
||||
The only tokens managed yet are the ones using the Compact Serialization
|
||||
format (five dot-separated base64-url encoded strings).
|
||||
|
||||
This converter can be used to decode token that have a symmetric-type
|
||||
algorithm ("alg" field of the JOSE header) among the following: A128KW,
|
||||
A192KW, A256KW, A128GCMKW, A192GCMKW, A256GCMKW, dir. In this case, we expect
|
||||
the provided JWK to be of the 'oct' type. Please note that the A128KW and
|
||||
A192KW algorithms are not available on AWS-LC and decryption will not work.
|
||||
This converter also manages tokens that have an algorithm ("alg" field of
|
||||
the JOSE header) among the following: RSA1_5, RSA-OAEP or RSA-OAEP-256. In
|
||||
such a case an 'RSA' type JWK representing a private key must be provided.
|
||||
|
||||
The JWE token must be provided base64url-encoded and the output will be
|
||||
provided "raw". If an error happens during token parsing, signature
|
||||
verification or content decryption, an empty string will be returned.
|
||||
|
||||
Because of the way quotes, commas and double quotes are treated in the
|
||||
configuration, the contents of the JWK must be properly escaped for this
|
||||
converter to work properly (see section 2.2 for more information).
|
||||
|
||||
Example:
|
||||
# Get a JWT from the authorization header, put its decrypted content in an
|
||||
# HTTP header
|
||||
http-request set-var(txn.bearer) http_auth_bearer
|
||||
http-request set-header X-Decrypted %[var(txn.bearer),jwt_decrypt_secret(\'{\"kty\":\"oct\",\"k\":\"wAsgsg\"}\')
|
||||
|
||||
# or via a variable
|
||||
http-request set-var(txn.bearer) http_auth_bearer
|
||||
http-request set-var(txn.jwk) str(\'{\"kty\":\"oct\",\"k\":\"Q-NFLlghQ\"}\')
|
||||
http-request set-header X-Decrypted %[var(txn.bearer),jwt_decrypt_jwk(txn.jwk)
|
||||
|
||||
jwt_decrypt_secret(<secret>)
|
||||
Performs a signature validation of a JSON Web Token following the JSON Web
|
||||
Encryption format (see RFC 7516) given in input and return its content
|
||||
decrypted thanks to the base64-encoded secret provided. The secret can be
|
||||
given as a string or via a variable.
|
||||
The only tokens managed yet are the ones using the Compact Serialization
|
||||
format (five dot-separated base64-url encoded strings).
|
||||
|
||||
This converter can be used for tokens that have an algorithm ("alg" field of
|
||||
the JOSE header) among the following: A128KW, A192KW, A256KW, A128GCMKW,
|
||||
A192GCMKW, A256GCMKW, dir. Please note that the A128KW and A192KW algorithms
|
||||
are not available on AWS-LC and decryption will not work.
|
||||
|
||||
The JWE token must be provided base64url-encoded and the output will be
|
||||
provided "raw". If an error happens during token parsing, signature
|
||||
verification or content decryption, an empty string will be returned.
|
||||
|
||||
Example:
|
||||
# Get a JWT from the authorization header, put its decrypted content in an
|
||||
# HTTP header
|
||||
http-request set-var(txn.bearer) http_auth_bearer
|
||||
http-request set-header X-Decrypted %[var(txn.bearer),jwt_decrypt_secret("GawgguFyGrWKav7AX4VKUg")]
|
||||
|
||||
jwt_header_query([<json_path>[,<output_type>]])
|
||||
When given a JSON Web Token (JWT) in input, either returns the decoded header
|
||||
part of the token (the first base64-url encoded part of the JWT) if no
|
||||
parameter is given, or performs a json_query on the decoded header part of
|
||||
the token. See "json_query" converter for details about the accepted
|
||||
json_path and output_type parameters.
|
||||
This converter can be used with tokens that are either JWS or JWE tokens as
|
||||
long as they are in the Compact Serialization format.
|
||||
|
||||
Please note that this converter is only available when HAProxy has been
|
||||
compiled with USE_OPENSSL.
|
||||
|
||||
jwt_payload_query([<json_path>[,<output_type>]])
|
||||
When given a JSON Web Token (JWT) in input, either returns the decoded
|
||||
payload part of the token (the second base64-url encoded part of the JWT) if
|
||||
no parameter is given, or performs a json_query on the decoded payload part
|
||||
of the token. See "json_query" converter for details about the accepted
|
||||
json_path and output_type parameters.
|
||||
When given a JSON Web Token (JWT) of the JSON Web Signed (JWS) format in
|
||||
input, either returns the decoded payload part of the token (the second
|
||||
base64-url encoded part of the JWT) if no parameter is given, or performs a
|
||||
json_query on the decoded payload part of the token. See "json_query"
|
||||
converter for details about the accepted json_path and output_type
|
||||
parameters.
|
||||
|
||||
Please note that this converter is only available when HAProxy has been
|
||||
compiled with USE_OPENSSL.
|
||||
|
|
@ -23443,6 +23657,71 @@ var(<var-name>[,<default>]) : undefined
|
|||
return it as a string. Empty strings are permitted. See section 2.8 about
|
||||
variables for details.
|
||||
|
||||
dump_all_vars([<scope>][,<prefix>][,<delimiter>]) : string
|
||||
Returns a list of all variables in the specified scope, optionally filtered
|
||||
by name prefix and with a customizable delimiter.
|
||||
|
||||
Output format: var1=value1<delim>var2=value2<delim>...
|
||||
|
||||
Value encoding by type:
|
||||
- Strings: quoted and escaped (", \, \r, \n, \b, \0)
|
||||
Example: txn.name="John \"Doe\""
|
||||
- Binary: hex-encoded with 'x' prefix, unquoted
|
||||
Example: txn.data=x48656c6c6f
|
||||
- Integers: unquoted decimal
|
||||
Example: txn.count=42
|
||||
- Booleans: unquoted "true" or "false"
|
||||
Example: txn.active=true
|
||||
- Addresses: unquoted IP address string
|
||||
Example: txn.client=192.168.1.1
|
||||
- HTTP Methods: quoted string
|
||||
Example: req.method="GET"
|
||||
|
||||
Arguments:
|
||||
- <scope> (optional): sess, txn, req, res, or proc. If omitted, all these
|
||||
scopes are visited in the same order as presented here.
|
||||
|
||||
- <prefix> (optional): filters variables whose names start with the
|
||||
specified prefix (after removing the scope prefix).
|
||||
Performance note: When using prefix filtering, all variables in the scope
|
||||
are still visited. This should not be used with configurations involving
|
||||
thousands of variables.
|
||||
|
||||
- <delimiter> (optional): string to separate variables. Defaults to ", "
|
||||
(comma-space). Can be customized to any string. As a reminder, in order
|
||||
to pass commas or spaces in a function argument, they need to be enclosed
|
||||
in simple or double quotes (if the expression itself is already within
|
||||
quotes, use the other ones).
|
||||
|
||||
Return value:
|
||||
- On success: string containing all matching variables
|
||||
- On failure: empty (sample fetch fails) if output buffer is too small.
|
||||
The function will not truncate output; it fails completely to avoid
|
||||
partial data.
|
||||
|
||||
This is particularly useful for debugging, logging, or exporting variable
|
||||
states.
|
||||
|
||||
Examples:
|
||||
# Dump all transaction variables
|
||||
http-request return string %[dump_all_vars(txn)]
|
||||
|
||||
# Dump only variables starting with "user"
|
||||
http-request set-header X-User-Vars "%[dump_all_vars(txn,user)]"
|
||||
|
||||
# Dump all process variables
|
||||
http-request return string %[dump_all_vars(proc)]
|
||||
|
||||
# Custom delimiter (semicolon)
|
||||
http-request set-header X-Vars "%[dump_all_vars(txn,,; )]"
|
||||
|
||||
# Force the default delimiter (comma space)
|
||||
http-request set-header X-Vars "%[dump_all_vars(txn,,', ')]"
|
||||
|
||||
# Prefix filter with custom delimiter
|
||||
http-request set-header X-Session "%[dump_all_vars(sess,user,|)]"
|
||||
|
||||
|
||||
wait_end : boolean
|
||||
This fetch either returns true when the inspection period is over, or does
|
||||
not fetch. It is only used in ACLs, in conjunction with content analysis to
|
||||
|
|
@ -31501,8 +31780,9 @@ ocsp-update [ off | on ]
|
|||
failure" or "Error during insertion" errors.
|
||||
|
||||
jwt [ off | on ]
|
||||
Allow for this certificate to be used for JWT validation via the
|
||||
"jwt_verify_cert" converter when set to 'on'. Its value default to 'off'.
|
||||
Allow for this certificate to be used for JWT validation or decryption via
|
||||
the "jwt_verify_cert", "jwt_decrypt_cert" or "jwt_decrypt" converters when
|
||||
set to 'on'. Its value defaults to 'off'.
|
||||
|
||||
When set to 'on' for a given certificate, the CLI command "del ssl cert" will
|
||||
not work. In order to be deleted, a certificate must not be used, either for
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
The buffer list API allows one to share a certain amount of buffers between
|
||||
multiple entities, which will each see their own as lists of buffers, while
|
||||
keeping a sharedd free list. The immediate use case is for muxes, which may
|
||||
keeping a shared free list. The immediate use case is for muxes, which may
|
||||
want to allocate up to a certain number of buffers per connection, shared
|
||||
among all streams. In this case, each stream will first request a new list
|
||||
for its own use, then may request extra entries from the free list. At any
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ default init, this was controversial but fedora and archlinux already uses it.
|
|||
At this time HAProxy still had a multi-process model, and the way haproxy is
|
||||
working was incompatible with the daemon mode.
|
||||
|
||||
Systemd is compatible with traditionnal forking services, but somehow HAProxy
|
||||
Systemd is compatible with traditional forking services, but somehow HAProxy
|
||||
is different. To work correctly, systemd needs a main PID, this is the PID of
|
||||
the process that systemd will supervises.
|
||||
|
||||
|
|
@ -45,7 +45,7 @@ However the wrapper suffered from several problems:
|
|||
|
||||
### mworker V1
|
||||
|
||||
HAProxy 1.8 got ride of the wrapper which was replaced by the master worker
|
||||
HAProxy 1.8 got rid of the wrapper which was replaced by the master worker
|
||||
mode. This first version was basically a reintegration of the wrapper features
|
||||
within HAProxy. HAProxy is launched with the -W flag, read the configuration and
|
||||
then fork. In mworker mode, the master is usually launched as a root process,
|
||||
|
|
@ -86,7 +86,7 @@ retrieved automatically.
|
|||
The master is supervising the workers, when a current worker (not a previous one
|
||||
from before the reload) is exiting without being asked for a reload, the master
|
||||
will emit an "exit-on-failure" error and will kill every workers with a SIGTERM
|
||||
and exits with the same error code than the failed master, this behavior can be
|
||||
and exits with the same error code than the failed worker, this behavior can be
|
||||
changed by using the "no exit-on-failure" option in the global section.
|
||||
|
||||
While the master is supervising the workers using the wait() function, the
|
||||
|
|
@ -186,8 +186,8 @@ number that can be found in HAPROXY_PROCESSES. With this change the stats socket
|
|||
in the configuration is less useful and everything can be done from the master
|
||||
CLI.
|
||||
|
||||
With 2.7, the reload mecanism of the master CLI evolved, with previous versions,
|
||||
this mecanism was asynchronous, so once the `reload` command was received, the
|
||||
With 2.7, the reload mechanism of the master CLI evolved, with previous versions,
|
||||
this mechanism was asynchronous, so once the `reload` command was received, the
|
||||
master would reload, the active master CLI connection was closed, and there was
|
||||
no way to return a status as a response to the `reload` command. To achieve a
|
||||
synchronous reload, a dedicated sockpair is used, one side uses a master CLI
|
||||
|
|
@ -208,3 +208,38 @@ starts with -st to achieve a hard stop on the previous worker.
|
|||
Version 3.0 got rid of the libsystemd dependencies for sd_notify() after the
|
||||
events of xz/openssh, the function is now implemented directly in haproxy in
|
||||
src/systemd.c.
|
||||
|
||||
### mworker V3
|
||||
|
||||
This version was implemented with HAProxy 3.1, the goal was to stop parsing and
|
||||
applying the configuration in the master process.
|
||||
|
||||
One of the caveats of the previous implementation was that the parser could take
|
||||
a lot of time, and the master process would be stuck in the parser instead of
|
||||
handling its polling loop, signals etc. Some parts of the configuration parsing
|
||||
could also be less reliable with third-party code (EXTRA_OBJS), it could, for
|
||||
example, allow opening FDs and not closing them before the reload which
|
||||
would crash the master after a few reloads.
|
||||
|
||||
The startup of the master-worker was reorganized this way:
|
||||
|
||||
- the "discovery" mode, which is a lighter configuration parsing step, only
|
||||
applies the configuration which need to be effective for the master process.
|
||||
For example, "master-worker", "mworker-max-reloads" and less than 20 other
|
||||
keywords that are identified by KWF_DISCOVERY in the code. It is really fast
|
||||
as it don't need all the configuration to be applied in the master process.
|
||||
|
||||
- the master will then fork a worker, with a PROC_O_INIT flag. This worker has
|
||||
a temporary sockpair connected to the master CLI. Once the worker is forked,
|
||||
the master initializes its configuration and starts its polling loop.
|
||||
|
||||
- The newly forked worker will try to parse the configuration, which could
|
||||
result in a failure (exit 1), or any bad error code. In case of success, the
|
||||
worker will send a "READY" message to the master CLI then close this FD. At
|
||||
this step everything was initialized and the worker can enter its polling
|
||||
loop.
|
||||
|
||||
- The master then waits for the worker, it could:
|
||||
* receive the READY message over the mCLI, resulting in a successful loading
|
||||
of haproxy
|
||||
* receive a SIGCHLD, meaning the worker exited and couldn't load
|
||||
|
|
|
|||
|
|
@ -1693,7 +1693,7 @@ A small team of trusted developers will receive it and will be able to propose
|
|||
a fix. We usually don't use embargoes and once a fix is available it gets
|
||||
merged. In some rare circumstances it can happen that a release is coordinated
|
||||
with software vendors. Please note that this process usually messes up with
|
||||
eveyone's work, and that rushed up releases can sometimes introduce new bugs,
|
||||
everyone's work, and that rushed up releases can sometimes introduce new bugs,
|
||||
so it's best avoided unless strictly necessary; as such, there is often little
|
||||
consideration for reports that needlessly cause such extra burden, and the best
|
||||
way to see your work credited usually is to provide a working fix, which will
|
||||
|
|
|
|||
|
|
@ -1725,6 +1725,30 @@ add acl [@<ver>] <acl> <pattern>
|
|||
This command cannot be used if the reference <acl> is a name also used with
|
||||
a map. In this case, the "add map" command must be used instead.
|
||||
|
||||
add backend <name> from <defproxy> [mode <mode>] [guid <guid>] [ EXPERIMENTAL ]
|
||||
Instantiate a new backend proxy with the name <name>.
|
||||
|
||||
Only TCP or HTTP proxies can be created. All of the settings are inherited
|
||||
from <defproxy> default proxy instance. By default, it is mandatory to
|
||||
specify the backend mode via the argument of the same name, unless <defproxy>
|
||||
already defines it explicitely. It is also possible to use an optional GUID
|
||||
argument if wanted.
|
||||
|
||||
Servers can be added via the command "add server". The backend is initialized
|
||||
in the unpublished state. Once considered ready for traffic, use "publish
|
||||
backend" to expose the newly created instance.
|
||||
|
||||
All named default proxies can be used, given that they validate the same
|
||||
inheritance rules applied during configuration parsing. There is some
|
||||
exceptions though, for example when the mode is neither TCP nor HTTP. Another
|
||||
exception is that it is not yet possible to use a default proxies which
|
||||
reference custom HTTP errors, for example via the errorfiles or http-rules
|
||||
keywords.
|
||||
|
||||
This command is restricted and can only be issued on sockets configured for
|
||||
level "admin". Moreover, this feature is still considered in development so it
|
||||
also requires experimental mode (see "experimental-mode on").
|
||||
|
||||
add map [@<ver>] <map> <key> <value>
|
||||
add map [@<ver>] <map> <payload>
|
||||
Add an entry into the map <map> to associate the value <value> to the key
|
||||
|
|
@ -2474,6 +2498,11 @@ prompt [help | n | i | p | timed]*
|
|||
advanced scripts, and the non-interactive mode (default) to basic scripts.
|
||||
Note that the non-interactive mode is not available for the master socket.
|
||||
|
||||
publish backend <backend>
|
||||
Activates content switching to a backend instance. This is the reverse
|
||||
operation of "unpublish backend" command. This command is restricted and can
|
||||
only be issued on sockets configured for levels "operator" or "admin".
|
||||
|
||||
quit
|
||||
Close the connection when in interactive mode.
|
||||
|
||||
|
|
@ -2529,7 +2558,8 @@ set maxconn global <maxconn>
|
|||
delayed until the threshold is reached. A value of zero restores the initial
|
||||
setting.
|
||||
|
||||
set profiling { tasks | memory } { auto | on | off }
|
||||
set profiling memory { on | off }
|
||||
set profiling tasks { auto | on | off | lock | no-lock | memory | no-memory }
|
||||
Enables or disables CPU or memory profiling for the indicated subsystem. This
|
||||
is equivalent to setting or clearing the "profiling" settings in the "global"
|
||||
section of the configuration file. Please also see "show profiling". Note
|
||||
|
|
@ -2539,6 +2569,13 @@ set profiling { tasks | memory } { auto | on | off }
|
|||
on the linux-glibc target), and requires USE_MEMORY_PROFILING to be set at
|
||||
compile time.
|
||||
|
||||
. For tasks profiling, it is possible to enable or disable the collection of
|
||||
per-task lock and memory timings at runtime, but the change is only taken
|
||||
into account next time the profiler switches from off/auto to on (either
|
||||
automatically or manually). Thus when using "no-lock" to disable per-task
|
||||
lock profiling and save CPU cycles, it is recommended to flip the task
|
||||
profiling off then on to commit the change.
|
||||
|
||||
set rate-limit connections global <value>
|
||||
Change the process-wide connection rate limit, which is set by the global
|
||||
'maxconnrate' setting. A value of zero disables the limitation. This limit
|
||||
|
|
@ -2842,6 +2879,13 @@ operator
|
|||
increased. It also drops expert and experimental mode. See also "show cli
|
||||
level".
|
||||
|
||||
unpublish backend <backend>
|
||||
Marks the backend as unqualified for future traffic selection. In effect,
|
||||
use_backend / default_backend rules which reference it are ignored and the
|
||||
next content switching rules are evaluated. Contrary to disabled backends,
|
||||
servers health checks remain active. This command is restricted and can only
|
||||
be issued on sockets configured for levels "operator" or "admin".
|
||||
|
||||
user
|
||||
Decrease the CLI level of the current CLI session to user. It can't be
|
||||
increased. It also drops expert and experimental mode. See also "show cli
|
||||
|
|
@ -3615,7 +3659,7 @@ show stat [domain <resolvers|proxy>] [{<iid>|<proxy>} <type> <sid>] \
|
|||
format" described in the section above. In short, the second column (after the
|
||||
first ':') indicates the origin, nature, scope and persistence state of the
|
||||
variable. The third column indicates the field type, among "s32", "s64",
|
||||
"u32", "u64", "flt' and "str". Then the fourth column is the value itself,
|
||||
"u32", "u64", "flt" and "str". Then the fourth column is the value itself,
|
||||
which the consumer knows how to parse thanks to column 3 and how to process
|
||||
thanks to column 2.
|
||||
|
||||
|
|
|
|||
|
|
@ -30,6 +30,7 @@ Revision history
|
|||
2020/03/05 - added the unique ID TLV type (Tim Düsterhus)
|
||||
2025/09/09 - added SSL-related TLVs for key exchange group and signature
|
||||
scheme (Steven Collison)
|
||||
2026/01/15 - added SSL client certificate TLV (Simon Ser)
|
||||
|
||||
1. Background
|
||||
|
||||
|
|
@ -536,20 +537,21 @@ the information they choose to publish.
|
|||
|
||||
The following types have already been registered for the <type> field :
|
||||
|
||||
#define PP2_TYPE_ALPN 0x01
|
||||
#define PP2_TYPE_AUTHORITY 0x02
|
||||
#define PP2_TYPE_CRC32C 0x03
|
||||
#define PP2_TYPE_NOOP 0x04
|
||||
#define PP2_TYPE_UNIQUE_ID 0x05
|
||||
#define PP2_TYPE_SSL 0x20
|
||||
#define PP2_SUBTYPE_SSL_VERSION 0x21
|
||||
#define PP2_SUBTYPE_SSL_CN 0x22
|
||||
#define PP2_SUBTYPE_SSL_CIPHER 0x23
|
||||
#define PP2_SUBTYPE_SSL_SIG_ALG 0x24
|
||||
#define PP2_SUBTYPE_SSL_KEY_ALG 0x25
|
||||
#define PP2_SUBTYPE_SSL_GROUP 0x26
|
||||
#define PP2_SUBTYPE_SSL_SIG_SCHEME 0x27
|
||||
#define PP2_TYPE_NETNS 0x30
|
||||
#define PP2_TYPE_ALPN 0x01
|
||||
#define PP2_TYPE_AUTHORITY 0x02
|
||||
#define PP2_TYPE_CRC32C 0x03
|
||||
#define PP2_TYPE_NOOP 0x04
|
||||
#define PP2_TYPE_UNIQUE_ID 0x05
|
||||
#define PP2_TYPE_SSL 0x20
|
||||
#define PP2_SUBTYPE_SSL_VERSION 0x21
|
||||
#define PP2_SUBTYPE_SSL_CN 0x22
|
||||
#define PP2_SUBTYPE_SSL_CIPHER 0x23
|
||||
#define PP2_SUBTYPE_SSL_SIG_ALG 0x24
|
||||
#define PP2_SUBTYPE_SSL_KEY_ALG 0x25
|
||||
#define PP2_SUBTYPE_SSL_GROUP 0x26
|
||||
#define PP2_SUBTYPE_SSL_SIG_SCHEME 0x27
|
||||
#define PP2_SUBTYPE_SSL_CLIENT_CERT 0x28
|
||||
#define PP2_TYPE_NETNS 0x30
|
||||
|
||||
|
||||
2.2.1 PP2_TYPE_ALPN
|
||||
|
|
@ -625,7 +627,10 @@ For the type PP2_TYPE_SSL, the value is itself a defined like this :
|
|||
uint8_t client;
|
||||
uint32_t verify;
|
||||
struct pp2_tlv sub_tlv[0];
|
||||
};
|
||||
} __attribute__((packed));
|
||||
|
||||
Note the "packed" attribute which indicates that each field starts immediately
|
||||
after the previous one (i.e. without type-specific alignment nor padding).
|
||||
|
||||
The <verify> field will be zero if the client presented a certificate
|
||||
and it was successfully verified, and non-zero otherwise.
|
||||
|
|
@ -672,6 +677,10 @@ The second level TLV PP2_SUBTYPE_SSL_SIG_SCHEME provides the US-ASCII string
|
|||
name of the algorithm the frontend used to sign the ServerKeyExchange or
|
||||
CertificateVerify message, for example "rsa_pss_rsae_sha256".
|
||||
|
||||
The optional second level TLV PP2_SUBTYPE_SSL_CLIENT_CERT provides the raw
|
||||
X.509 client certificate encoded in ASN.1 DER. The frontend may choose to omit
|
||||
this TLV depending on configuration.
|
||||
|
||||
In all cases, the string representation (in UTF8) of the Common Name field
|
||||
(OID: 2.5.4.3) of the client certificate's Distinguished Name, is appended
|
||||
using the TLV format and the type PP2_SUBTYPE_SSL_CN. E.g. "example.com".
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ vtest installation
|
|||
------------------------
|
||||
|
||||
To use vtest you will have to download and compile the recent vtest
|
||||
sources found at https://github.com/vtest/VTest.
|
||||
sources found at https://github.com/vtest/VTest2.
|
||||
|
||||
To compile vtest:
|
||||
|
||||
|
|
|
|||
|
|
@ -33,6 +33,8 @@
|
|||
#define HA_PROF_TASKS_MASK 0x00000003 /* per-task CPU profiling mask */
|
||||
|
||||
#define HA_PROF_MEMORY 0x00000004 /* memory profiling */
|
||||
#define HA_PROF_TASKS_MEM 0x00000008 /* per-task CPU profiling with memory */
|
||||
#define HA_PROF_TASKS_LOCK 0x00000010 /* per-task CPU profiling with locks */
|
||||
|
||||
|
||||
#ifdef USE_MEMORY_PROFILING
|
||||
|
|
|
|||
|
|
@ -192,6 +192,7 @@ struct lbprm {
|
|||
void (*server_requeue)(struct server *); /* function used to place the server where it must be */
|
||||
void (*proxy_deinit)(struct proxy *); /* to be called when we're destroying the proxy */
|
||||
void (*server_deinit)(struct server *); /* to be called when we're destroying the server */
|
||||
int (*server_init)(struct server *); /* initialize a freshly added server (runtime); <0=fail. */
|
||||
};
|
||||
|
||||
#endif /* _HAPROXY_BACKEND_T_H */
|
||||
|
|
|
|||
|
|
@ -69,6 +69,7 @@ int backend_parse_balance(const char **args, char **err, struct proxy *curproxy)
|
|||
int tcp_persist_rdp_cookie(struct stream *s, struct channel *req, int an_bit);
|
||||
|
||||
int be_downtime(struct proxy *px);
|
||||
int be_supports_dynamic_srv(struct proxy *px, char **msg);
|
||||
void recount_servers(struct proxy *px);
|
||||
void update_backend_weight(struct proxy *px);
|
||||
|
||||
|
|
@ -85,10 +86,20 @@ static inline int be_usable_srv(struct proxy *be)
|
|||
return be->srv_bck;
|
||||
}
|
||||
|
||||
/* Returns true if <be> backend can be used as target to a switching rules. */
|
||||
static inline int be_is_eligible(const struct proxy *be)
|
||||
{
|
||||
/* A disabled or unpublished backend cannot be selected for traffic.
|
||||
* Note that STOPPED state is ignored as there is a risk of breaking
|
||||
* requests during soft-stop.
|
||||
*/
|
||||
return !(be->flags & (PR_FL_DISABLED|PR_FL_BE_UNPUBLISHED));
|
||||
}
|
||||
|
||||
/* set the time of last session on the backend */
|
||||
static inline void be_set_sess_last(struct proxy *be)
|
||||
{
|
||||
if (be->be_counters.shared.tg[tgid - 1])
|
||||
if (be->be_counters.shared.tg)
|
||||
HA_ATOMIC_STORE(&be->be_counters.shared.tg[tgid - 1]->last_sess, ns_to_sec(now_ns));
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -111,6 +111,7 @@ extern char *cursection;
|
|||
extern int non_global_section_parsed;
|
||||
|
||||
extern struct proxy *curproxy;
|
||||
extern struct proxy *last_defproxy;
|
||||
extern char initial_cwd[PATH_MAX];
|
||||
|
||||
int cfg_parse_global(const char *file, int linenum, char **args, int inv);
|
||||
|
|
@ -140,7 +141,7 @@ int warnif_misplaced_tcp_req_sess(struct proxy *proxy, const char *file, int lin
|
|||
int warnif_misplaced_tcp_req_cont(struct proxy *proxy, const char *file, int line, const char *arg, const char *arg2);
|
||||
int warnif_misplaced_tcp_res_cont(struct proxy *proxy, const char *file, int line, const char *arg, const char *arg2);
|
||||
int warnif_misplaced_quic_init(struct proxy *proxy, const char *file, int line, const char *arg, const char *arg2);
|
||||
int warnif_cond_conflicts(const struct acl_cond *cond, unsigned int where, const char *file, int line);
|
||||
int warnif_cond_conflicts(const struct acl_cond *cond, unsigned int where, char **err);
|
||||
int warnif_tcp_http_cond(const struct proxy *px, const struct acl_cond *cond);
|
||||
int too_many_args_idx(int maxarg, int index, char **args, char **msg, int *err_code);
|
||||
int too_many_args(int maxarg, char **args, char **msg, int *err_code);
|
||||
|
|
|
|||
|
|
@ -146,7 +146,6 @@ enum {
|
|||
|
||||
CO_FL_WANT_SPLICING = 0x00001000, /* we wish to use splicing on the connection when possible */
|
||||
CO_FL_SSL_NO_CACHED_INFO = 0x00002000, /* Don't use any cached information when creating a new SSL connection */
|
||||
/* unused: 0x00002000 */
|
||||
|
||||
CO_FL_EARLY_SSL_HS = 0x00004000, /* We have early data pending, don't start SSL handshake yet */
|
||||
CO_FL_EARLY_DATA = 0x00008000, /* At least some of the data are early data */
|
||||
|
|
|
|||
|
|
@ -66,7 +66,7 @@ struct counters_shared {
|
|||
COUNTERS_SHARED;
|
||||
struct {
|
||||
COUNTERS_SHARED_TG;
|
||||
} *tg[MAX_TGROUPS];
|
||||
} **tg;
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
@ -101,7 +101,7 @@ struct fe_counters_shared_tg {
|
|||
|
||||
struct fe_counters_shared {
|
||||
COUNTERS_SHARED;
|
||||
struct fe_counters_shared_tg *tg[MAX_TGROUPS];
|
||||
struct fe_counters_shared_tg **tg;
|
||||
};
|
||||
|
||||
/* counters used by listeners and frontends */
|
||||
|
|
@ -160,7 +160,7 @@ struct be_counters_shared_tg {
|
|||
|
||||
struct be_counters_shared {
|
||||
COUNTERS_SHARED;
|
||||
struct be_counters_shared_tg *tg[MAX_TGROUPS];
|
||||
struct be_counters_shared_tg **tg;
|
||||
};
|
||||
|
||||
/* counters used by servers and backends */
|
||||
|
|
|
|||
|
|
@ -43,11 +43,13 @@ void counters_be_shared_drop(struct be_counters_shared *counters);
|
|||
*/
|
||||
#define COUNTERS_SHARED_LAST_OFFSET(scounters, type, offset) \
|
||||
({ \
|
||||
unsigned long last = HA_ATOMIC_LOAD((type *)((char *)scounters[0] + offset));\
|
||||
unsigned long last = 0; \
|
||||
unsigned long now_seconds = ns_to_sec(now_ns); \
|
||||
int it; \
|
||||
\
|
||||
for (it = 1; (it < global.nbtgroups && scounters[it]); it++) { \
|
||||
if (scounters) \
|
||||
last = HA_ATOMIC_LOAD((type *)((char *)scounters[0] + offset));\
|
||||
for (it = 1; (it < global.nbtgroups && scounters); it++) { \
|
||||
unsigned long cur = HA_ATOMIC_LOAD((type *)((char *)scounters[it] + offset));\
|
||||
if ((now_seconds - cur) < (now_seconds - last)) \
|
||||
last = cur; \
|
||||
|
|
@ -74,7 +76,7 @@ void counters_be_shared_drop(struct be_counters_shared *counters);
|
|||
uint64_t __ret = 0; \
|
||||
int it; \
|
||||
\
|
||||
for (it = 0; (it < global.nbtgroups && scounters[it]); it++) \
|
||||
for (it = 0; (it < global.nbtgroups && scounters); it++) \
|
||||
__ret += rfunc((type *)((char *)scounters[it] + offset)); \
|
||||
__ret; \
|
||||
})
|
||||
|
|
@ -94,7 +96,7 @@ void counters_be_shared_drop(struct be_counters_shared *counters);
|
|||
uint64_t __ret = 0; \
|
||||
int it; \
|
||||
\
|
||||
for (it = 0; (it < global.nbtgroups && scounters[it]); it++) \
|
||||
for (it = 0; (it < global.nbtgroups && scounters); it++) \
|
||||
__ret += rfunc(&scounters[it]->elem, arg1, arg2); \
|
||||
__ret; \
|
||||
})
|
||||
|
|
|
|||
|
|
@ -67,7 +67,7 @@
|
|||
#define GTUNE_USE_SYSTEMD (1<<10)
|
||||
|
||||
#define GTUNE_BUSY_POLLING (1<<11)
|
||||
/* (1<<12) unused */
|
||||
#define GTUNE_PURGE_DEFAULTS (1<<12)
|
||||
#define GTUNE_SET_DUMPABLE (1<<13)
|
||||
#define GTUNE_USE_EVPORTS (1<<14)
|
||||
#define GTUNE_STRICT_LIMITS (1<<15)
|
||||
|
|
|
|||
|
|
@ -263,6 +263,8 @@ static inline int h1_parse_chunk_size(const struct buffer *buf, int start, int s
|
|||
const char *ptr_old = ptr;
|
||||
const char *end = b_wrap(buf);
|
||||
uint64_t chunk = 0;
|
||||
int backslash = 0;
|
||||
int quote = 0;
|
||||
|
||||
stop -= start; // bytes left
|
||||
start = stop; // bytes to transfer
|
||||
|
|
@ -327,13 +329,37 @@ static inline int h1_parse_chunk_size(const struct buffer *buf, int start, int s
|
|||
if (--stop == 0)
|
||||
return 0;
|
||||
|
||||
while (!HTTP_IS_CRLF(*ptr)) {
|
||||
/* The loop seeks the first CRLF or non-tab CTL char
|
||||
* and stops there. If a backslash/quote is active,
|
||||
* it's an error. If none, we assume it's the CRLF
|
||||
* and go back to the top of the loop checking for
|
||||
* CR then LF. This way CTLs, lone LF etc are handled
|
||||
* in the fallback path. This allows to protect
|
||||
* remotes against their own possibly non-compliant
|
||||
* chunk-ext parser which could mistakenly skip a
|
||||
* quoted CRLF. Chunk-ext are not used anyway, except
|
||||
* by attacks.
|
||||
*/
|
||||
while (!HTTP_IS_CTL(*ptr) || HTTP_IS_SPHT(*ptr)) {
|
||||
if (backslash)
|
||||
backslash = 0; // escaped char
|
||||
else if (*ptr == '\\' && quote)
|
||||
backslash = 1;
|
||||
else if (*ptr == '\\') // backslash not permitted outside quotes
|
||||
goto error;
|
||||
else if (*ptr == '"') // begin/end of quoted-pair
|
||||
quote = !quote;
|
||||
if (++ptr >= end)
|
||||
ptr = b_orig(buf);
|
||||
if (--stop == 0)
|
||||
return 0;
|
||||
}
|
||||
/* we have a CRLF now, loop above */
|
||||
|
||||
/* mismatched quotes / backslashes end here */
|
||||
if (quote || backslash)
|
||||
goto error;
|
||||
|
||||
/* CTLs (CRLF) fall to the common check */
|
||||
continue;
|
||||
}
|
||||
else
|
||||
|
|
|
|||
|
|
@ -184,6 +184,7 @@ enum {
|
|||
PERSIST_TYPE_NONE = 0, /* no persistence */
|
||||
PERSIST_TYPE_FORCE, /* force-persist */
|
||||
PERSIST_TYPE_IGNORE, /* ignore-persist */
|
||||
PERSIST_TYPE_BE_SWITCH, /* force-be-switch */
|
||||
};
|
||||
|
||||
/* final results for http-request rules */
|
||||
|
|
|
|||
|
|
@ -147,14 +147,14 @@ __attribute__((constructor)) static void __initcb_##linenum() \
|
|||
#define _DECLARE_INITCALL(...) \
|
||||
__DECLARE_INITCALL(__VA_ARGS__)
|
||||
|
||||
/* This requires that function <function> is called with pointer argument
|
||||
* <argument> during init stage <stage> which must be one of init_stage.
|
||||
/* This requires that function <function> is called without arguments
|
||||
* during init stage <stage> which must be one of init_stage.
|
||||
*/
|
||||
#define INITCALL0(stage, function) \
|
||||
_DECLARE_INITCALL(stage, __LINE__, function, 0, 0, 0)
|
||||
|
||||
/* This requires that function <function> is called with pointer argument
|
||||
* <argument> during init stage <stage> which must be one of init_stage.
|
||||
* <arg1> during init stage <stage> which must be one of init_stage.
|
||||
*/
|
||||
#define INITCALL1(stage, function, arg1) \
|
||||
_DECLARE_INITCALL(stage, __LINE__, function, arg1, 0, 0)
|
||||
|
|
|
|||
|
|
@ -380,6 +380,14 @@ static inline unsigned long ERR_peek_error_func(const char **func)
|
|||
|
||||
#endif
|
||||
|
||||
#if (HA_OPENSSL_VERSION_NUMBER >= 0x40000000L) && !defined(OPENSSL_IS_AWSLC) && !defined(LIBRESSL_VERSION_NUMBER) && !defined(USE_OPENSSL_WOLFSSL)
|
||||
# define X509_STORE_getX_objects(x) X509_STORE_get1_objects(x)
|
||||
# define sk_X509_OBJECT_popX_free(x, y) sk_X509_OBJECT_pop_free(x,y)
|
||||
#else
|
||||
# define X509_STORE_getX_objects(x) X509_STORE_get0_objects(x)
|
||||
# define sk_X509_OBJECT_popX_free(x, y) ({})
|
||||
#endif
|
||||
|
||||
#if (HA_OPENSSL_VERSION_NUMBER >= 0x1010000fL) || (defined(LIBRESSL_VERSION_NUMBER) && LIBRESSL_VERSION_NUMBER >= 0x2070200fL)
|
||||
#define __OPENSSL_110_CONST__ const
|
||||
#else
|
||||
|
|
|
|||
|
|
@ -72,8 +72,8 @@ struct pool_registration {
|
|||
struct list list; /* link element */
|
||||
const char *name; /* name of the pool */
|
||||
const char *file; /* where the pool is declared */
|
||||
ullong size; /* expected object size */
|
||||
unsigned int line; /* line in the file where the pool is declared, 0 if none */
|
||||
unsigned int size; /* expected object size */
|
||||
unsigned int flags; /* MEM_F_* */
|
||||
unsigned int type_align; /* type-imposed alignment; 0=unspecified */
|
||||
unsigned int align; /* expected alignment; 0=unspecified */
|
||||
|
|
|
|||
|
|
@ -183,7 +183,7 @@ unsigned long long pool_total_allocated(void);
|
|||
unsigned long long pool_total_used(void);
|
||||
void pool_flush(struct pool_head *pool);
|
||||
void pool_gc(struct pool_head *pool_ctx);
|
||||
struct pool_head *create_pool_with_loc(const char *name, unsigned int size, unsigned int align,
|
||||
struct pool_head *create_pool_with_loc(const char *name, ullong size, unsigned int align,
|
||||
unsigned int flags, const char *file, unsigned int line);
|
||||
struct pool_head *create_pool_from_reg(const char *name, struct pool_registration *reg);
|
||||
void create_pool_callback(struct pool_head **ptr, char *name, struct pool_registration *reg);
|
||||
|
|
|
|||
|
|
@ -242,11 +242,12 @@ enum PR_SRV_STATE_FILE {
|
|||
/* Proxy flags */
|
||||
#define PR_FL_DISABLED 0x01 /* The proxy was disabled in the configuration (not at runtime) */
|
||||
#define PR_FL_STOPPED 0x02 /* The proxy was stopped */
|
||||
#define PR_FL_READY 0x04 /* The proxy is ready to be used (initialized and configured) */
|
||||
#define PR_FL_DEF_EXPLICIT_MODE 0x04 /* Proxy mode is explicitely defined - only used for defaults instance */
|
||||
#define PR_FL_EXPLICIT_REF 0x08 /* The default proxy is explicitly referenced by another proxy */
|
||||
#define PR_FL_IMPLICIT_REF 0x10 /* The default proxy is implicitly referenced by another proxy */
|
||||
#define PR_FL_PAUSED 0x20 /* The proxy was paused at run time (reversible) */
|
||||
#define PR_FL_CHECKED 0x40 /* The proxy configuration was fully checked (including postparsing checks) */
|
||||
#define PR_FL_BE_UNPUBLISHED 0x80 /* The proxy cannot be targetted by content switching rules */
|
||||
|
||||
struct stream;
|
||||
|
||||
|
|
@ -315,6 +316,7 @@ struct proxy {
|
|||
unsigned long last_change; /* internal use only: last time the proxy state was changed */
|
||||
|
||||
struct list global_list; /* list member for global proxy list */
|
||||
struct list el; /* attach point in various list - currently used only on defaults_list for defaults section */
|
||||
|
||||
unsigned int maxconn; /* max # of active streams on the frontend */
|
||||
|
||||
|
|
@ -506,10 +508,16 @@ struct proxy {
|
|||
EXTRA_COUNTERS(extra_counters_be);
|
||||
|
||||
THREAD_ALIGN();
|
||||
unsigned int queueslength; /* Sum of the length of each queue */
|
||||
/* these ones change all the time */
|
||||
int served; /* # of active sessions currently being served */
|
||||
int totpend; /* total number of pending connections on this instance (for stats) */
|
||||
unsigned int feconn, beconn; /* # of active frontend and backends streams */
|
||||
|
||||
THREAD_ALIGN();
|
||||
/* these ones are only changed when queues are involved, but checked
|
||||
* all the time.
|
||||
*/
|
||||
unsigned int queueslength; /* Sum of the length of each queue */
|
||||
int totpend; /* total number of pending connections on this instance (for stats) */
|
||||
};
|
||||
|
||||
struct switching_rule {
|
||||
|
|
|
|||
|
|
@ -39,6 +39,9 @@ extern struct list proxies;
|
|||
extern struct ceb_root *used_proxy_id; /* list of proxy IDs in use */
|
||||
extern unsigned int error_snapshot_id; /* global ID assigned to each error then incremented */
|
||||
extern struct ceb_root *proxy_by_name; /* tree of proxies sorted by name */
|
||||
extern struct list defaults_list; /* all defaults proxies list */
|
||||
|
||||
extern unsigned int dynpx_next_id;
|
||||
|
||||
extern const struct cfg_opt cfg_opts[];
|
||||
extern const struct cfg_opt cfg_opts2[];
|
||||
|
|
@ -58,6 +61,7 @@ void deinit_proxy(struct proxy *p);
|
|||
void free_proxy(struct proxy *p);
|
||||
const char *proxy_cap_str(int cap);
|
||||
const char *proxy_mode_str(int mode);
|
||||
enum pr_mode str_to_proxy_mode(const char *mode);
|
||||
const char *proxy_find_best_option(const char *word, const char **extra);
|
||||
uint proxy_get_next_id(uint from);
|
||||
void proxy_store_name(struct proxy *px);
|
||||
|
|
@ -67,13 +71,14 @@ struct proxy *proxy_find_best_match(int cap, const char *name, int id, int *diff
|
|||
int proxy_cfg_ensure_no_http(struct proxy *curproxy);
|
||||
int proxy_cfg_ensure_no_log(struct proxy *curproxy);
|
||||
void init_new_proxy(struct proxy *p);
|
||||
void proxy_preset_defaults(struct proxy *defproxy);
|
||||
void proxy_free_defaults(struct proxy *defproxy);
|
||||
void proxy_destroy_defaults(struct proxy *px);
|
||||
void proxy_destroy_all_unref_defaults(void);
|
||||
void proxy_ref_defaults(struct proxy *px, struct proxy *defpx);
|
||||
|
||||
void defaults_px_destroy(struct proxy *px);
|
||||
void defaults_px_destroy_all_unref(void);
|
||||
void defaults_px_detach(struct proxy *px);
|
||||
void defaults_px_ref_all(void);
|
||||
void defaults_px_unref_all(void);
|
||||
int proxy_ref_defaults(struct proxy *px, struct proxy *defpx, char **errmsg);
|
||||
void proxy_unref_defaults(struct proxy *px);
|
||||
void proxy_unref_or_destroy_defaults(struct proxy *px);
|
||||
int setup_new_proxy(struct proxy *px, const char *name, unsigned int cap, char **errmsg);
|
||||
struct proxy *alloc_new_proxy(const char *name, unsigned int cap,
|
||||
char **errmsg);
|
||||
|
|
@ -94,6 +99,7 @@ int resolve_stick_rule(struct proxy *curproxy, struct sticking_rule *mrule);
|
|||
void free_stick_rules(struct list *rules);
|
||||
void free_server_rules(struct list *srules);
|
||||
int proxy_init_per_thr(struct proxy *px);
|
||||
int proxy_finalize(struct proxy *px, int *err_code);
|
||||
|
||||
/*
|
||||
* This function returns a string containing the type of the proxy in a format
|
||||
|
|
@ -166,12 +172,12 @@ static inline int proxy_abrt_close(const struct proxy *px)
|
|||
/* increase the number of cumulated connections received on the designated frontend */
|
||||
static inline void proxy_inc_fe_conn_ctr(struct listener *l, struct proxy *fe)
|
||||
{
|
||||
if (fe->fe_counters.shared.tg[tgid - 1])
|
||||
if (fe->fe_counters.shared.tg) {
|
||||
_HA_ATOMIC_INC(&fe->fe_counters.shared.tg[tgid - 1]->cum_conn);
|
||||
if (l && l->counters && l->counters->shared.tg[tgid - 1])
|
||||
_HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->cum_conn);
|
||||
if (fe->fe_counters.shared.tg[tgid - 1])
|
||||
update_freq_ctr(&fe->fe_counters.shared.tg[tgid - 1]->conn_per_sec, 1);
|
||||
}
|
||||
if (l && l->counters && l->counters->shared.tg)
|
||||
_HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->cum_conn);
|
||||
HA_ATOMIC_UPDATE_MAX(&fe->fe_counters.cps_max,
|
||||
update_freq_ctr(&fe->fe_counters._conn_per_sec, 1));
|
||||
}
|
||||
|
|
@ -179,12 +185,12 @@ static inline void proxy_inc_fe_conn_ctr(struct listener *l, struct proxy *fe)
|
|||
/* increase the number of cumulated connections accepted by the designated frontend */
|
||||
static inline void proxy_inc_fe_sess_ctr(struct listener *l, struct proxy *fe)
|
||||
{
|
||||
if (fe->fe_counters.shared.tg[tgid - 1])
|
||||
if (fe->fe_counters.shared.tg) {
|
||||
_HA_ATOMIC_INC(&fe->fe_counters.shared.tg[tgid - 1]->cum_sess);
|
||||
if (l && l->counters && l->counters->shared.tg[tgid - 1])
|
||||
_HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->cum_sess);
|
||||
if (fe->fe_counters.shared.tg[tgid - 1])
|
||||
update_freq_ctr(&fe->fe_counters.shared.tg[tgid - 1]->sess_per_sec, 1);
|
||||
}
|
||||
if (l && l->counters && l->counters->shared.tg)
|
||||
_HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->cum_sess);
|
||||
HA_ATOMIC_UPDATE_MAX(&fe->fe_counters.sps_max,
|
||||
update_freq_ctr(&fe->fe_counters._sess_per_sec, 1));
|
||||
}
|
||||
|
|
@ -199,19 +205,19 @@ static inline void proxy_inc_fe_cum_sess_ver_ctr(struct listener *l, struct prox
|
|||
http_ver > sizeof(fe->fe_counters.shared.tg[tgid - 1]->cum_sess_ver) / sizeof(*fe->fe_counters.shared.tg[tgid - 1]->cum_sess_ver))
|
||||
return;
|
||||
|
||||
if (fe->fe_counters.shared.tg[tgid - 1])
|
||||
if (fe->fe_counters.shared.tg)
|
||||
_HA_ATOMIC_INC(&fe->fe_counters.shared.tg[tgid - 1]->cum_sess_ver[http_ver - 1]);
|
||||
if (l && l->counters && l->counters->shared.tg[tgid - 1])
|
||||
if (l && l->counters && l->counters->shared.tg && l->counters->shared.tg[tgid - 1])
|
||||
_HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->cum_sess_ver[http_ver - 1]);
|
||||
}
|
||||
|
||||
/* increase the number of cumulated streams on the designated backend */
|
||||
static inline void proxy_inc_be_ctr(struct proxy *be)
|
||||
{
|
||||
if (be->be_counters.shared.tg[tgid - 1])
|
||||
if (be->be_counters.shared.tg) {
|
||||
_HA_ATOMIC_INC(&be->be_counters.shared.tg[tgid - 1]->cum_sess);
|
||||
if (be->be_counters.shared.tg[tgid - 1])
|
||||
update_freq_ctr(&be->be_counters.shared.tg[tgid - 1]->sess_per_sec, 1);
|
||||
}
|
||||
HA_ATOMIC_UPDATE_MAX(&be->be_counters.sps_max,
|
||||
update_freq_ctr(&be->be_counters._sess_per_sec, 1));
|
||||
}
|
||||
|
|
@ -226,12 +232,12 @@ static inline void proxy_inc_fe_req_ctr(struct listener *l, struct proxy *fe,
|
|||
if (http_ver >= sizeof(fe->fe_counters.shared.tg[tgid - 1]->p.http.cum_req) / sizeof(*fe->fe_counters.shared.tg[tgid - 1]->p.http.cum_req))
|
||||
return;
|
||||
|
||||
if (fe->fe_counters.shared.tg[tgid - 1])
|
||||
if (fe->fe_counters.shared.tg) {
|
||||
_HA_ATOMIC_INC(&fe->fe_counters.shared.tg[tgid - 1]->p.http.cum_req[http_ver]);
|
||||
if (l && l->counters && l->counters->shared.tg[tgid - 1])
|
||||
_HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->p.http.cum_req[http_ver]);
|
||||
if (fe->fe_counters.shared.tg[tgid - 1])
|
||||
update_freq_ctr(&fe->fe_counters.shared.tg[tgid - 1]->req_per_sec, 1);
|
||||
}
|
||||
if (l && l->counters && l->counters->shared.tg)
|
||||
_HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->p.http.cum_req[http_ver]);
|
||||
HA_ATOMIC_UPDATE_MAX(&fe->fe_counters.p.http.rps_max,
|
||||
update_freq_ctr(&fe->fe_counters.p.http._req_per_sec, 1));
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,8 +20,7 @@
|
|||
#define QUIC_OPENSSL_COMPAT_CLIENT_APPLICATION "CLIENT_TRAFFIC_SECRET_0"
|
||||
#define QUIC_OPENSSL_COMPAT_SERVER_APPLICATION "SERVER_TRAFFIC_SECRET_0"
|
||||
|
||||
void quic_tls_compat_msg_callback(struct connection *conn,
|
||||
int write_p, int version, int content_type,
|
||||
void quic_tls_compat_msg_callback(int write_p, int version, int content_type,
|
||||
const void *buf, size_t len, SSL *ssl);
|
||||
int quic_tls_compat_init(struct bind_conf *bind_conf, SSL_CTX *ctx);
|
||||
void quic_tls_compat_keylog_callback(const SSL *ssl, const char *line);
|
||||
|
|
|
|||
|
|
@ -65,7 +65,7 @@ struct shard_info {
|
|||
uint nbgroups; /* number of groups in this shard (=#rx); Zero = unused. */
|
||||
uint nbthreads; /* number of threads in this shard (>=nbgroups) */
|
||||
struct receiver *ref; /* first one, reference for FDs to duplicate */
|
||||
struct receiver *members[MAX_TGROUPS]; /* all members of the shard (one per thread group) */
|
||||
struct receiver **members; /* all members of the shard (one per thread group) */
|
||||
};
|
||||
|
||||
/* This describes a receiver with all its characteristics (address, options, etc) */
|
||||
|
|
|
|||
|
|
@ -63,6 +63,7 @@ int smp_expr_output_type(struct sample_expr *expr);
|
|||
int c_none(struct sample *smp);
|
||||
int c_pseudo(struct sample *smp);
|
||||
int smp_dup(struct sample *smp);
|
||||
int sample_check_arg_base64(struct arg *arg, char **err);
|
||||
|
||||
/*
|
||||
* This function just apply a cast on sample. It returns 0 if the cast is not
|
||||
|
|
|
|||
|
|
@ -383,7 +383,6 @@ struct server {
|
|||
unsigned next_eweight; /* next pending eweight to commit */
|
||||
unsigned cumulative_weight; /* weight of servers prior to this one in the same group, for chash balancing */
|
||||
int maxqueue; /* maximum number of pending connections allowed */
|
||||
unsigned int queueslength; /* Sum of the length of each queue */
|
||||
int shard; /* shard (in peers protocol context only) */
|
||||
int log_bufsize; /* implicit ring bufsize (for log server only - in log backend) */
|
||||
|
||||
|
|
@ -406,6 +405,7 @@ struct server {
|
|||
unsigned int max_used_conns; /* Max number of used connections (the counter is reset at each connection purges */
|
||||
unsigned int est_need_conns; /* Estimate on the number of needed connections (max of curr and previous max_used) */
|
||||
unsigned int curr_sess_idle_conns; /* Current number of idle connections attached to a session instead of idle/safe trees. */
|
||||
unsigned int queueslength; /* Sum of the length of each queue */
|
||||
|
||||
/* elements only used during boot, do not perturb and plug the hole */
|
||||
struct guid_node guid; /* GUID global tree node */
|
||||
|
|
|
|||
|
|
@ -207,7 +207,7 @@ static inline void server_index_id(struct proxy *px, struct server *srv)
|
|||
/* increase the number of cumulated streams on the designated server */
|
||||
static inline void srv_inc_sess_ctr(struct server *s)
|
||||
{
|
||||
if (s->counters.shared.tg[tgid - 1]) {
|
||||
if (s->counters.shared.tg) {
|
||||
_HA_ATOMIC_INC(&s->counters.shared.tg[tgid - 1]->cum_sess);
|
||||
update_freq_ctr(&s->counters.shared.tg[tgid - 1]->sess_per_sec, 1);
|
||||
}
|
||||
|
|
@ -218,7 +218,7 @@ static inline void srv_inc_sess_ctr(struct server *s)
|
|||
/* set the time of last session on the designated server */
|
||||
static inline void srv_set_sess_last(struct server *s)
|
||||
{
|
||||
if (s->counters.shared.tg[tgid - 1])
|
||||
if (s->counters.shared.tg)
|
||||
HA_ATOMIC_STORE(&s->counters.shared.tg[tgid - 1]->last_sess, ns_to_sec(now_ns));
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -80,6 +80,7 @@ void ssl_store_delete_cafile_entry(struct cafile_entry *ca_e);
|
|||
int ssl_store_load_ca_from_buf(struct cafile_entry *ca_e, char *cert_buf, int append);
|
||||
int ssl_store_load_locations_file(char *path, int create_if_none, enum cafile_type type);
|
||||
int __ssl_store_load_locations_file(char *path, int create_if_none, enum cafile_type type, int shuterror);
|
||||
const char *ha_default_cert_dir();
|
||||
|
||||
extern struct cert_exts cert_exts[];
|
||||
extern int (*ssl_commit_crlfile_cb)(const char *path, X509_STORE *ctx, char **err);
|
||||
|
|
|
|||
|
|
@ -194,7 +194,7 @@ struct issuer_chain {
|
|||
|
||||
struct connection;
|
||||
|
||||
typedef void (*ssl_sock_msg_callback_func)(struct connection *conn,
|
||||
typedef void (*ssl_sock_msg_callback_func)(
|
||||
int write_p, int version, int content_type,
|
||||
const void *buf, size_t len, SSL *ssl);
|
||||
|
||||
|
|
@ -338,6 +338,8 @@ struct global_ssl {
|
|||
int renegotiate; /* Renegotiate mode (SSL_RENEGOTIATE_ flag) */
|
||||
char **passphrase_cmd;
|
||||
int passphrase_cmd_args_cnt;
|
||||
|
||||
unsigned int certificate_compression:1; /* allow to explicitely disable certificate compression */
|
||||
};
|
||||
|
||||
/* The order here matters for picking a default context,
|
||||
|
|
@ -361,6 +363,7 @@ struct passphrase_cb_data {
|
|||
const char *path;
|
||||
struct ckch_data *ckch_data;
|
||||
int passphrase_idx;
|
||||
int callback_called;
|
||||
};
|
||||
|
||||
#endif /* USE_OPENSSL */
|
||||
|
|
|
|||
|
|
@ -57,6 +57,9 @@ const char *nid2nist(int nid);
|
|||
const char *sigalg2str(int sigalg);
|
||||
const char *curveid2str(int curve_id);
|
||||
|
||||
int aes_process(struct buffer *data, struct buffer *nonce, struct buffer *key, int key_size,
|
||||
struct buffer *aead_tag, struct buffer *aad, struct buffer *out, int decrypt, int gcm);
|
||||
|
||||
#endif /* _HAPROXY_SSL_UTILS_H */
|
||||
#endif /* USE_OPENSSL */
|
||||
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ enum stfile_domain {
|
|||
};
|
||||
|
||||
#define SHM_STATS_FILE_VER_MAJOR 1
|
||||
#define SHM_STATS_FILE_VER_MINOR 1
|
||||
#define SHM_STATS_FILE_VER_MINOR 2
|
||||
|
||||
#define SHM_STATS_FILE_HEARTBEAT_TIMEOUT 60 /* passed this delay (seconds) process which has not
|
||||
* sent heartbeat will be considered down
|
||||
|
|
@ -64,9 +64,9 @@ struct shm_stats_file_hdr {
|
|||
*/
|
||||
struct shm_stats_file_object {
|
||||
char guid[GUID_MAX_LEN + 1];
|
||||
uint8_t tgid; // thread group ID from 1 to 64
|
||||
uint16_t tgid; // thread group ID
|
||||
uint8_t type; // SHM_STATS_FILE_OBJECT_TYPE_* to know how to handle object.data
|
||||
ALWAYS_PAD(6); // 6 bytes hole, ensure it remains the same size 32 vs 64 bits arch
|
||||
ALWAYS_PAD(5); // 5 bytes hole, ensure it remains the same size 32 vs 64 bits arch
|
||||
uint64_t users; // bitfield that corresponds to users of the object (see shm_stats_file_hdr slots)
|
||||
/* as the struct may hold any of the types described here, let's make it
|
||||
* so it may store up to the heaviest one using an union
|
||||
|
|
|
|||
|
|
@ -60,7 +60,6 @@ extern int thread_cpus_enabled_at_boot;
|
|||
/* Only way found to replace variables with constants that are optimized away
|
||||
* at build time.
|
||||
*/
|
||||
enum { all_tgroups_mask = 1UL };
|
||||
enum { tid_bit = 1UL };
|
||||
enum { tid = 0 };
|
||||
enum { tgid = 1 };
|
||||
|
|
@ -208,7 +207,6 @@ void wait_for_threads_completion();
|
|||
void set_thread_cpu_affinity();
|
||||
unsigned long long ha_get_pthread_id(unsigned int thr);
|
||||
|
||||
extern volatile unsigned long all_tgroups_mask;
|
||||
extern volatile unsigned int rdv_requests;
|
||||
extern volatile unsigned int isolated_thread;
|
||||
extern THREAD_LOCAL unsigned int tid; /* The thread id */
|
||||
|
|
@ -364,15 +362,19 @@ static inline unsigned long thread_isolated()
|
|||
extern uint64_t now_mono_time(void); \
|
||||
if (_LK_ != _LK_UN) { \
|
||||
th_ctx->lock_level += bal; \
|
||||
if (unlikely(th_ctx->flags & TH_FL_TASK_PROFILING)) \
|
||||
if (unlikely((th_ctx->flags & (TH_FL_TASK_PROFILING|TH_FL_TASK_PROFILING_L)) == \
|
||||
(TH_FL_TASK_PROFILING|TH_FL_TASK_PROFILING_L))) \
|
||||
lock_start = now_mono_time(); \
|
||||
} \
|
||||
(void)(expr); \
|
||||
if (_LK_ == _LK_UN) { \
|
||||
th_ctx->lock_level += bal; \
|
||||
if (th_ctx->lock_level == 0 && unlikely(th_ctx->flags & TH_FL_TASK_PROFILING)) \
|
||||
if (th_ctx->lock_level == 0 &&\
|
||||
unlikely((th_ctx->flags & (TH_FL_TASK_PROFILING|TH_FL_TASK_PROFILING_L)) == \
|
||||
(TH_FL_TASK_PROFILING|TH_FL_TASK_PROFILING_L))) \
|
||||
th_ctx->locked_total += now_mono_time() - th_ctx->lock_start_date; \
|
||||
} else if (unlikely(th_ctx->flags & TH_FL_TASK_PROFILING)) { \
|
||||
} else if (unlikely((th_ctx->flags & (TH_FL_TASK_PROFILING|TH_FL_TASK_PROFILING_L)) == \
|
||||
(TH_FL_TASK_PROFILING|TH_FL_TASK_PROFILING_L))) { \
|
||||
uint64_t now = now_mono_time(); \
|
||||
if (lock_start) \
|
||||
th_ctx->lock_wait_total += now - lock_start; \
|
||||
|
|
@ -386,7 +388,8 @@ static inline unsigned long thread_isolated()
|
|||
typeof(expr) _expr = (expr); \
|
||||
if (_expr == 0) { \
|
||||
th_ctx->lock_level += bal; \
|
||||
if (unlikely(th_ctx->flags & TH_FL_TASK_PROFILING)) { \
|
||||
if (unlikely((th_ctx->flags & (TH_FL_TASK_PROFILING|TH_FL_TASK_PROFILING_L)) == \
|
||||
(TH_FL_TASK_PROFILING|TH_FL_TASK_PROFILING_L))) { \
|
||||
if (_LK_ == _LK_UN && th_ctx->lock_level == 0) \
|
||||
th_ctx->locked_total += now_mono_time() - th_ctx->lock_start_date; \
|
||||
else if (_LK_ != _LK_UN && th_ctx->lock_level == 1) \
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ struct thread_set {
|
|||
ulong abs[(MAX_THREADS + LONGBITS - 1) / LONGBITS];
|
||||
ulong rel[MAX_TGROUPS];
|
||||
};
|
||||
ulong grps; /* bit field of all non-empty groups, 0 for abs */
|
||||
ulong nbgrps; /* Number of thread groups, 0 for abs */
|
||||
};
|
||||
|
||||
/* tasklet classes */
|
||||
|
|
@ -69,6 +69,8 @@ enum {
|
|||
#define TH_FL_IN_DBG_HANDLER 0x00000100 /* thread currently in the debug signal handler */
|
||||
#define TH_FL_IN_WDT_HANDLER 0x00000200 /* thread currently in the wdt signal handler */
|
||||
#define TH_FL_IN_ANY_HANDLER 0x00000380 /* mask to test if the thread is in any signal handler */
|
||||
#define TH_FL_TASK_PROFILING_L 0x00000400 /* task profiling in locks (also requires TASK_PROFILING) */
|
||||
#define TH_FL_TASK_PROFILING_M 0x00000800 /* task profiling in mem alloc (also requires TASK_PROFILING) */
|
||||
|
||||
/* we have 4 buffer-wait queues, in highest to lowest emergency order */
|
||||
#define DYNBUF_NBQ 4
|
||||
|
|
|
|||
|
|
@ -77,7 +77,7 @@ static inline int thread_set_nth_group(const struct thread_set *ts, int n)
|
|||
{
|
||||
int i;
|
||||
|
||||
if (ts->grps) {
|
||||
if (ts->nbgrps) {
|
||||
for (i = 0; i < MAX_TGROUPS; i++)
|
||||
if (ts->rel[i] && !n--)
|
||||
return i + 1;
|
||||
|
|
@ -95,7 +95,7 @@ static inline ulong thread_set_nth_tmask(const struct thread_set *ts, int n)
|
|||
{
|
||||
int i;
|
||||
|
||||
if (ts->grps) {
|
||||
if (ts->nbgrps) {
|
||||
for (i = 0; i < MAX_TGROUPS; i++)
|
||||
if (ts->rel[i] && !n--)
|
||||
return ts->rel[i];
|
||||
|
|
@ -111,7 +111,7 @@ static inline void thread_set_pin_grp1(struct thread_set *ts, ulong mask)
|
|||
{
|
||||
int i;
|
||||
|
||||
ts->grps = 1;
|
||||
ts->nbgrps = 1;
|
||||
ts->rel[0] = mask;
|
||||
for (i = 1; i < MAX_TGROUPS; i++)
|
||||
ts->rel[i] = 0;
|
||||
|
|
|
|||
|
|
@ -466,6 +466,13 @@ char *escape_string(char *start, char *stop,
|
|||
const char escape, const long *map,
|
||||
const char *string, const char *string_stop);
|
||||
|
||||
/*
|
||||
* Appends a quoted and escaped string to a chunk buffer. The string is
|
||||
* enclosed in double quotes and special characters are escaped with backslash.
|
||||
* Returns 0 on success, -1 if the buffer is too small (output is rolled back).
|
||||
*/
|
||||
int chunk_escape_string(struct buffer *chunk, const char *str, size_t len);
|
||||
|
||||
/* Below are RFC8949 compliant cbor encode helper functions, see source
|
||||
* file for functions descriptions
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -57,12 +57,15 @@ struct vars {
|
|||
};
|
||||
|
||||
#define VDF_PARENT_CTX 0x00000001 // Set if the variable is related to the parent stream
|
||||
#define VDF_NAME_ALLOCATED 0x00000002 // Set if name was allocated and must be freed
|
||||
|
||||
/* This struct describes a variable as found in an arg_data */
|
||||
struct var_desc {
|
||||
uint64_t name_hash;
|
||||
enum vars_scope scope;
|
||||
uint flags; /*VDF_* */
|
||||
const char *name; /* variable name (not owned) */
|
||||
size_t name_len; /* variable name length */
|
||||
};
|
||||
|
||||
struct var {
|
||||
|
|
@ -70,6 +73,7 @@ struct var {
|
|||
uint64_t name_hash; /* XXH3() of the variable's name, indexed by <name_node> */
|
||||
uint flags; // VF_*
|
||||
/* 32-bit hole here */
|
||||
char *name; /* variable name (allocated) */
|
||||
struct sample_data data; /* data storage. */
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
varnishtest "Test the support for tcp-md5sig option (linux only)"
|
||||
|
||||
feature cmd "$HAPROXY_PROGRAM -cc 'feature(HAVE_TCP_MD5SIG)'"
|
||||
feature cmd "$HAPROXY_PROGRAM -cc 'feature(HAVE_WORKING_TCP_MD5SIG)'"
|
||||
feature ignore_unknown_macro
|
||||
|
||||
haproxy h1 -conf {
|
||||
|
|
|
|||
85
reg-tests/converter/aes_cbc.vtc
Normal file
85
reg-tests/converter/aes_cbc.vtc
Normal file
|
|
@ -0,0 +1,85 @@
|
|||
varnishtest "aes_cbc converter Test"
|
||||
feature cmd "$HAPROXY_PROGRAM -cc 'feature(OPENSSL)'"
|
||||
feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(3.4-dev2)'"
|
||||
|
||||
feature ignore_unknown_macro
|
||||
|
||||
server s1 {
|
||||
rxreq
|
||||
txresp -hdr "Connection: close"
|
||||
} -repeat 2 -start
|
||||
|
||||
|
||||
haproxy h1 -conf {
|
||||
global
|
||||
.if feature(THREAD)
|
||||
thread-groups 1
|
||||
.endif
|
||||
|
||||
# WT: limit false-positives causing "HTTP header incomplete" due to
|
||||
# idle server connections being randomly used and randomly expiring
|
||||
# under us.
|
||||
tune.idle-pool.shared off
|
||||
|
||||
defaults
|
||||
mode http
|
||||
timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
|
||||
timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
|
||||
timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
|
||||
|
||||
frontend fe
|
||||
bind "fd@${fe}"
|
||||
|
||||
http-request set-var(txn.plain) str("Hello from HAProxy AES-CBC")
|
||||
http-request set-var(txn.short_nonce) str("MTIzNDU2Nzg5MDEy")
|
||||
http-request set-var(txn.nonce) str("MTIzNDU2Nzg5MDEyMzQ1Ng==")
|
||||
http-request set-var(txn.key) str("Zm9vb2Zvb29mb29vb29vbw==")
|
||||
|
||||
# AES-CBC enc with vars + dec with strings
|
||||
http-request set-var(txn.encrypted1) var(txn.plain),aes_cbc_enc(128,txn.nonce,txn.key),base64
|
||||
http-after-response set-header X-Encrypted1 %[var(txn.encrypted1)]
|
||||
http-request set-var(txn.decrypted1) var(txn.encrypted1),b64dec,aes_cbc_dec(128,"MTIzNDU2Nzg5MDEyMzQ1Ng==","Zm9vb2Zvb29mb29vb29vbw==")
|
||||
http-after-response set-header X-Decrypted1 %[var(txn.decrypted1)]
|
||||
|
||||
# AES-CBC enc with strings + dec with vars
|
||||
http-request set-var(txn.encrypted2) var(txn.plain),aes_cbc_enc(128,"MTIzNDU2Nzg5MDEyMzQ1Ng==","Zm9vb2Zvb29mb29vb29vbw=="),base64
|
||||
http-after-response set-header X-Encrypted2 %[var(txn.encrypted2)]
|
||||
http-request set-var(txn.decrypted2) var(txn.encrypted2),b64dec,aes_cbc_dec(128,txn.nonce,txn.key)
|
||||
http-after-response set-header X-Decrypted2 %[var(txn.decrypted2)]
|
||||
|
||||
# AES-CBC + AAD enc with vars + dec with strings
|
||||
http-request set-var(txn.aad) str("dGVzdAo=")
|
||||
http-request set-var(txn.encrypted3) var(txn.plain),aes_cbc_enc(128,txn.nonce,txn.key,txn.aad),base64
|
||||
http-after-response set-header X-Encrypted3 %[var(txn.encrypted3)]
|
||||
http-request set-var(txn.decrypted3) var(txn.encrypted3),b64dec,aes_cbc_dec(128,"MTIzNDU2Nzg5MDEyMzQ1Ng==","Zm9vb2Zvb29mb29vb29vbw==","dGVzdAo=")
|
||||
http-after-response set-header X-Decrypted3 %[var(txn.decrypted3)]
|
||||
|
||||
# AES-CBC + AAD enc with strings + enc with strings
|
||||
http-request set-var(txn.encrypted4) var(txn.plain),aes_cbc_enc(128,"MTIzNDU2Nzg5MDEyMzQ1Ng==","Zm9vb2Zvb29mb29vb29vbw==","dGVzdAo="),base64
|
||||
http-after-response set-header X-Encrypted4 %[var(txn.encrypted4)]
|
||||
http-request set-var(txn.decrypted4) var(txn.encrypted4),b64dec,aes_cbc_dec(128,txn.nonce,txn.key,txn.aad)
|
||||
http-after-response set-header X-Decrypted4 %[var(txn.decrypted4)]
|
||||
|
||||
# AES-CBC enc with short nonce (var) + dec with short nonce (string)
|
||||
http-request set-var(txn.encrypted5) var(txn.plain),aes_cbc_enc(128,txn.short_nonce,txn.key),base64
|
||||
http-after-response set-header X-Encrypted5 %[var(txn.encrypted5)]
|
||||
http-request set-var(txn.decrypted5) var(txn.encrypted5),b64dec,aes_cbc_dec(128,"MTIzNDU2Nzg5MDEy","Zm9vb2Zvb29mb29vb29vbw==")
|
||||
http-after-response set-header X-Decrypted5 %[var(txn.decrypted5)]
|
||||
|
||||
default_backend be
|
||||
|
||||
backend be
|
||||
server s1 ${s1_addr}:${s1_port}
|
||||
|
||||
} -start
|
||||
|
||||
client c1 -connect ${h1_fe_sock} {
|
||||
txreq
|
||||
rxresp
|
||||
expect resp.http.x-decrypted1 == "Hello from HAProxy AES-CBC"
|
||||
expect resp.http.x-decrypted2 == "Hello from HAProxy AES-CBC"
|
||||
expect resp.http.x-decrypted3 == "Hello from HAProxy AES-CBC"
|
||||
expect resp.http.x-decrypted4 == "Hello from HAProxy AES-CBC"
|
||||
expect resp.http.x-decrypted5 == "Hello from HAProxy AES-CBC"
|
||||
|
||||
} -run
|
||||
264
reg-tests/jwt/jwt_decrypt.vtc
Normal file
264
reg-tests/jwt/jwt_decrypt.vtc
Normal file
|
|
@ -0,0 +1,264 @@
|
|||
#REGTEST_TYPE=devel
|
||||
|
||||
# This reg-test checks the behaviour of the jwt_decrypt_secret,
|
||||
# jwt_decrypt_cert and jwt_decrypt_jwk converters that decode a JSON Web
|
||||
# Encryption (JWE) token, check its signature and decrypt its content (RFC
|
||||
# 7516).
|
||||
# The tokens have two tiers of encryption, one that is used to encrypt a secret
|
||||
# ("alg" field of the JOSE header) and this secret is then used to
|
||||
# encrypt/decrypt the data contained in the token ("enc" field of the JOSE
|
||||
# header).
|
||||
# This reg-test tests a subset of alg/enc combination.
|
||||
#
|
||||
# AWS-LC does not support A128KW algorithm so for tests that use it, we will
|
||||
# have a hardcoded "AWS-LC UNMANAGED" value put in the response header instead
|
||||
# of the decrypted contents.
|
||||
|
||||
varnishtest "Test the 'jwt_decrypt_jwk' functionalities"
|
||||
feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(3.4-dev2)'"
|
||||
feature cmd "$HAPROXY_PROGRAM -cc 'feature(OPENSSL) && openssl_version_atleast(1.1.1)'"
|
||||
feature ignore_unknown_macro
|
||||
|
||||
server s1 -repeat 20 {
|
||||
rxreq
|
||||
txresp
|
||||
} -start
|
||||
|
||||
haproxy h1 -conf {
|
||||
global
|
||||
.if feature(THREAD)
|
||||
thread-groups 1
|
||||
.endif
|
||||
|
||||
.if !ssllib_name_startswith(AWS-LC)
|
||||
tune.ssl.default-dh-param 2048
|
||||
.endif
|
||||
tune.ssl.capture-buffer-size 1
|
||||
stats socket "${tmpdir}/h1/stats" level admin
|
||||
|
||||
crt-base "${testdir}"
|
||||
key-base "${testdir}"
|
||||
|
||||
defaults
|
||||
mode http
|
||||
timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
|
||||
timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
|
||||
timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
|
||||
|
||||
crt-store
|
||||
# Private key built out of following JWK:
|
||||
# { "kty": "RSA", "e": "AQAB", "n": "wsqJbopx18NQFYLYOq4ZeMSE89yGiEankUpf25yV8QqroKUGrASj_OeqTWUjwPGKTN1vGFFuHYxiJeAUQH2qQPmg9Oqk6-ATBEKn9COKYniQ5459UxCwmZA2RL6ufhrNyq0JF3GfXkjLDBfhU9zJJEOhknsA0L_c-X4AI3d_NbFdMqxNe1V_UWAlLcbKdwO6iC9fAvwUmDQxgy6R0DC1CMouQpenMRcALaSHar1cm4K-syoNobv3HEuqgZ3s6-hOOSqauqAO0GUozPpaIA7OeruyRl5sTWT0r-iz39bchID2bIKtcqLiFcSYPLBcxmsaQCqRlGhmv6stjTCLV1yT9w", "kid": "ff3c5c96-392e-46ef-a839-6ff16027af78", "d": "b9hXfQ8lOtw8mX1dpqPcoElGhbczz_-xq2znCXQpbBPSZBUddZvchRSH5pSSKPEHlgb3CSGIdpLqsBCv0C_XmCM9ViN8uqsYgDO9uCLIDK5plWttbkqA_EufvW03R9UgIKWmOL3W4g4t-C2mBb8aByaGGVNjLnlb6i186uBsPGkvaeLHbQcRQKAvhOUTeNiyiiCbUGJwCm4avMiZrsz1r81Y1Z5izo0ERxdZymxM3FRZ9vjTB-6DtitvTXXnaAm1JTu6TIpj38u2mnNLkGMbflOpgelMNKBZVxSmfobIbFN8CHVc1UqLK2ElsZ9RCQANgkMHlMkOMj-XT0wHa3VBUQ", "p": "8mgriveKJAp1S7SHqirQAfZafxVuAK_A2QBYPsAUhikfBOvN0HtZjgurPXSJSdgR8KbWV7ZjdJM_eOivIb_XiuAaUdIOXbLRet7t9a_NJtmX9iybhoa9VOJFMBq_rbnbbte2kq0-FnXmv3cukbC2LaEw3aEcDgyURLCgWFqt7M0", "q": "zbbTv5421GowOfKVEuVoA35CEWgl8mdasnEZac2LWxMwKExikKU5LLacLQlcOt7A6n1ZGUC2wyH8mstO5tV34Eug3fnNrbnxFUEE_ZB_njs_rtZnwz57AoUXOXVnd194seIZF9PjdzZcuwXwXbrZ2RSVW8if_ZH5OVYEM1EsA9M", "dp": "1BaIYmIKn1X3InGlcSFcNRtSOnaJdFhRpotCqkRssKUx2qBlxs7ln_5dqLtZkx5VM_UE_GE7yzc6BZOwBxtOftdsr8HVh-14ksSR9rAGEsO2zVBiEuW4qZf_aQM-ScWfU--wcczZ0dT-Ou8P87Bk9K9fjcn0PeaLoz3WTPepzNE", "dq": "kYw2u4_UmWvcXVOeV_VKJ5aQZkJ6_sxTpodRBMPyQmkMHKcW4eKU1mcJju_deqWadw5jGPPpm5yTXm5UkAwfOeookoWpGa7CvVf4kPNI6Aphn3GBjunJHNpPuU6w-wvomGsxd-NqQDGNYKHuFFMcyXO_zWXglQdP_1o1tJ1M-BM", "qi": "j94Ens784M8zsfwWoJhYq9prcSZOGgNbtFWQZO8HP8pcNM9ls7YA4snTtAS_B4peWWFAFZ0LSKPCxAvJnrq69ocmEKEk7ss1Jo062f9pLTQ6cnhMjev3IqLocIFt5Vbsg_PWYpFSR7re6FRbF9EYOM7F2-HRv1idxKCWoyQfBqk" }
|
||||
load crt rsa1_5.pem key rsa1_5.key jwt on
|
||||
# Private key built out of following JWK:
|
||||
# { "kty": "RSA", "e": "AQAB", "n": "wsqJbopx18NQFYLYOq4ZeMSE89yGiEankUpf25yV8QqroKUGrASj_OeqTWUjwPGKTN1vGFFuHYxiJeAUQH2qQPmg9Oqk6-ATBEKn9COKYniQ5459UxCwmZA2RL6ufhrNyq0JF3GfXkjLDBfhU9zJJEOhknsA0L_c-X4AI3d_NbFdMqxNe1V_UWAlLcbKdwO6iC9fAvwUmDQxgy6R0DC1CMouQpenMRcALaSHar1cm4K-syoNobv3HEuqgZ3s6-hOOSqauqAO0GUozPpaIA7OeruyRl5sTWT0r-iz39bchID2bIKtcqLiFcSYPLBcxmsaQCqRlGhmv6stjTCLV1yT9w", "kid": "ff3c5c96-392e-46ef-a839-6ff16027af78", "d": "b9hXfQ8lOtw8mX1dpqPcoElGhbczz_-xq2znCXQpbBPSZBUddZvchRSH5pSSKPEHlgb3CSGIdpLqsBCv0C_XmCM9ViN8uqsYgDO9uCLIDK5plWttbkqA_EufvW03R9UgIKWmOL3W4g4t-C2mBb8aByaGGVNjLnlb6i186uBsPGkvaeLHbQcRQKAvhOUTeNiyiiCbUGJwCm4avMiZrsz1r81Y1Z5izo0ERxdZymxM3FRZ9vjTB-6DtitvTXXnaAm1JTu6TIpj38u2mnNLkGMbflOpgelMNKBZVxSmfobIbFN8CHVc1UqLK2ElsZ9RCQANgkMHlMkOMj-XT0wHa3VBUQ", "p": "8mgriveKJAp1S7SHqirQAfZafxVuAK_A2QBYPsAUhikfBOvN0HtZjgurPXSJSdgR8KbWV7ZjdJM_eOivIb_XiuAaUdIOXbLRet7t9a_NJtmX9iybhoa9VOJFMBq_rbnbbte2kq0-FnXmv3cukbC2LaEw3aEcDgyURLCgWFqt7M0", "q": "zbbTv5421GowOfKVEuVoA35CEWgl8mdasnEZac2LWxMwKExikKU5LLacLQlcOt7A6n1ZGUC2wyH8mstO5tV34Eug3fnNrbnxFUEE_ZB_njs_rtZnwz57AoUXOXVnd194seIZF9PjdzZcuwXwXbrZ2RSVW8if_ZH5OVYEM1EsA9M", "dp": "1BaIYmIKn1X3InGlcSFcNRtSOnaJdFhRpotCqkRssKUx2qBlxs7ln_5dqLtZkx5VM_UE_GE7yzc6BZOwBxtOftdsr8HVh-14ksSR9rAGEsO2zVBiEuW4qZf_aQM-ScWfU--wcczZ0dT-Ou8P87Bk9K9fjcn0PeaLoz3WTPepzNE", "dq": "kYw2u4_UmWvcXVOeV_VKJ5aQZkJ6_sxTpodRBMPyQmkMHKcW4eKU1mcJju_deqWadw5jGPPpm5yTXm5UkAwfOeookoWpGa7CvVf4kPNI6Aphn3GBjunJHNpPuU6w-wvomGsxd-NqQDGNYKHuFFMcyXO_zWXglQdP_1o1tJ1M-BM", "qi": "j94Ens784M8zsfwWoJhYq9prcSZOGgNbtFWQZO8HP8pcNM9ls7YA4snTtAS_B4peWWFAFZ0LSKPCxAvJnrq69ocmEKEk7ss1Jo062f9pLTQ6cnhMjev3IqLocIFt5Vbsg_PWYpFSR7re6FRbF9EYOM7F2-HRv1idxKCWoyQfBqk" }
|
||||
load crt rsa_oeap.pem key rsa_oeap.key jwt on
|
||||
|
||||
listen main-fe
|
||||
bind "fd@${mainfe}"
|
||||
|
||||
use_backend secret_based_alg if { path_beg /secret }
|
||||
use_backend pem_based_alg if { path_beg /pem }
|
||||
use_backend jwk if { path_beg /jwk }
|
||||
default_backend dflt
|
||||
|
||||
|
||||
backend secret_based_alg
|
||||
|
||||
http-request set-var(txn.jwe) http_auth_bearer
|
||||
http-request set-var(txn.secret) hdr(X-Secret),ub64dec,base64
|
||||
|
||||
http-request set-var(txn.decrypted) var(txn.jwe),jwt_decrypt_secret(txn.secret)
|
||||
|
||||
.if ssllib_name_startswith(AWS-LC)
|
||||
acl aws_unmanaged var(txn.jwe),jwt_header_query('$.alg') -m str "A128KW"
|
||||
http-request set-var(txn.decrypted) str("AWS-LC UNMANAGED") if aws_unmanaged
|
||||
.endif
|
||||
|
||||
http-response set-header X-Decrypted %[var(txn.decrypted)]
|
||||
server s1 ${s1_addr}:${s1_port}
|
||||
|
||||
backend pem_based_alg
|
||||
|
||||
http-request set-var(txn.jwe) http_auth_bearer
|
||||
http-request set-var(txn.pem) hdr(X-PEM)
|
||||
|
||||
http-request set-var(txn.decrypted) var(txn.jwe),jwt_decrypt_cert(txn.pem)
|
||||
|
||||
http-after-response set-header X-Decrypted %[var(txn.decrypted)]
|
||||
server s1 ${s1_addr}:${s1_port}
|
||||
|
||||
backend jwk
|
||||
|
||||
http-request set-var(txn.jwe) http_auth_bearer
|
||||
http-request set-var(txn.jwk) req.fhdr(X-JWK)
|
||||
|
||||
http-request set-var(txn.decrypted) var(txn.jwe),jwt_decrypt_jwk(txn.jwk)
|
||||
|
||||
.if ssllib_name_startswith(AWS-LC)
|
||||
acl aws_unmanaged var(txn.jwe),jwt_header_query('$.alg') -m str "A128KW"
|
||||
http-request set-var(txn.decrypted) str("AWS-LC UNMANAGED") if aws_unmanaged
|
||||
.endif
|
||||
|
||||
http-after-response set-header X-Decrypted %[var(txn.decrypted)]
|
||||
server s1 ${s1_addr}:${s1_port}
|
||||
|
||||
|
||||
backend dflt
|
||||
server s1 ${s1_addr}:${s1_port}
|
||||
|
||||
|
||||
} -start
|
||||
|
||||
|
||||
|
||||
|
||||
#ALG: dir
|
||||
#ENC: A256GCM
|
||||
#KEY: {"kty":"oct", "k":"ZMpktzGq1g6_r4fKVdnx9OaYr4HjxPjIs7l7SwAsgsg"}
|
||||
client c1_1 -connect ${h1_mainfe_sock} {
|
||||
txreq -url "/secret" -hdr "Authorization: Bearer eyJhbGciOiAiZGlyIiwgImVuYyI6ICJBMjU2R0NNIn0..hxCk0nP4aVNpgfb7.inlyAZtUzDCTpD_9iuWx.Pyu90cmgkXenMIVu9RUp8w" -hdr "X-Secret: ZMpktzGq1g6_r4fKVdnx9OaYr4HjxPjIs7l7SwAsgsg"
|
||||
rxresp
|
||||
expect resp.http.x-decrypted == "Setec Astronomy"
|
||||
|
||||
txreq -url "/jwk" -hdr "Authorization: Bearer eyJhbGciOiAiZGlyIiwgImVuYyI6ICJBMjU2R0NNIn0..hxCk0nP4aVNpgfb7.inlyAZtUzDCTpD_9iuWx.Pyu90cmgkXenMIVu9RUp8w" -hdr "X-JWK: {\"kty\":\"oct\", \"k\":\"ZMpktzGq1g6_r4fKVdnx9OaYr4HjxPjIs7l7SwAsgsg\"}"
|
||||
rxresp
|
||||
expect resp.http.x-decrypted == "Setec Astronomy"
|
||||
} -run
|
||||
|
||||
|
||||
#ALG: dir
|
||||
#ENC: A256GCM
|
||||
#KEY: {"kty":"oct", "k":"ZMpktzGq1g6_r4fKVdnx9OaYr4HjxPjIs7l7SwAsgsg"}
|
||||
# Token is modified to have an invalid tag
|
||||
client c1_2 -connect ${h1_mainfe_sock} {
|
||||
txreq -url "/secret" -hdr "Authorization: Bearer eyJhbGciOiAiZGlyIiwgImVuYyI6ICJBMjU2R0NNIn0..hxCk0nP4aVNpgfb7.inlyAZtUzDCTpD_9iuWx.Pyu90cmgkXenMIVu9RUp8v" -hdr "X-Secret: ZMpktzGq1g6_r4fKVdnx9OaYr4HjxPjIs7l7SwAsgsg"
|
||||
rxresp
|
||||
expect resp.http.x-decrypted == ""
|
||||
|
||||
txreq -url "/jwk" -hdr "Authorization: Bearer eyJhbGciOiAiZGlyIiwgImVuYyI6ICJBMjU2R0NNIn0..hxCk0nP4aVNpgfb7.inlyAZtUzDCTpD_9iuWx.Pyu90cmgkXenMIVu9RUp8v" -hdr "X-JWK: {\"kty\":\"oct\", \"k\":\"ZMpktzGq1g6_r4fKVdnx9OaYr4HjxPjIs7l7SwAsgsg\"}"
|
||||
rxresp
|
||||
expect resp.http.x-decrypted == ""
|
||||
} -run
|
||||
|
||||
|
||||
#ALG: dir
|
||||
#ENC: A256GCM
|
||||
#KEY: {"kty":"oct", "k":"ZMpktzGq1g6_r4fKVdnx9OaYr4HjxPjIs7l7SwAsgsg"}
|
||||
# Wrong secret
|
||||
client c1_3 -connect ${h1_mainfe_sock} {
|
||||
txreq -url "/secret" -hdr "Authorization: Bearer eyJhbGciOiAiZGlyIiwgImVuYyI6ICJBMjU2R0NNIn0..hxCk0nP4aVNpgfb7.inlyAZtUzDCTpD_9iuWx.Pyu90cmgkXenMIVu9RUp8w" -hdr "X-Secret: zMpktzGq1g6_r4fKVdnx9OaYr4HjxPjIs7l7SwAsgsg"
|
||||
rxresp
|
||||
expect resp.http.x-decrypted == ""
|
||||
|
||||
txreq -url "/jwk" -hdr "Authorization: Bearer eyJhbGciOiAiZGlyIiwgImVuYyI6ICJBMjU2R0NNIn0..hxCk0nP4aVNpgfb7.inlyAZtUzDCTpD_9iuWx.Pyu90cmgkXenMIVu9RUp8v" -hdr "X-JWK: {\"kty\":\"oct\", \"k\":\"zMpktzGq1g6_r4fKVdnx9OaYr4HjxPjIs7l7SwAsgsg\"}"
|
||||
rxresp
|
||||
expect resp.http.x-decrypted == ""
|
||||
} -run
|
||||
|
||||
|
||||
#ALG: A128KW
|
||||
#ENC: A128CBC-HS256
|
||||
#KEY: {"kty":"oct", "k":"3921VrO5TrLvPQ-NFLlghQ"}
|
||||
client c2_1 -connect ${h1_mainfe_sock} {
|
||||
txreq -url "/secret" -hdr "Authorization: Bearer eyJhbGciOiAiQTEyOEtXIiwgImVuYyI6ICJBMTI4Q0JDLUhTMjU2In0.AaOyP1zNjsywJOoQ941JJWT4LQIDlpy3UibM_48HrsoCJ5ENpQhfbQ.h2ZBUiy9ofvcDZOwV2iVJA.K0FhK6ri44ZWmtFUtJRpiZSeT8feKX5grFpU8xG5026bGXAdZADO4ZkQ8DRvSEE9DwNIlK6cIEoSavm12gSzQVXajz3MWv5U6VbK5gPFCeFjJfMPmdQ9THIi-hapcueSxYz2rkcGxo3iP3ixE_bww8UB_XlQvnokhFxtf8NushMkjef4RDrW5vQu4j_qPbqG334msDKmFi8Klprs6JktrADeEJ0bPGN80NKEWp7XPcCbfmcwYe-9z_tPw_KJcQhLpQevfPLfVI4WjPgPxYNGw03qKYnLD7oTjr9qCrQmzUVXutlhxfpD3UQr11SJu8q19Ug82bON-GRd2CjpSrErQq42dd0_mWjG9iDqjqpYFBK9DV_qawy2dxFbfIcCsnb6ewifjoJLiFg2OT7-YdTaC7kqaXeE1JpA-OtMXN72FUDrnQ8r9ifj_VpMNvBf_36dbOCT-cGwIOI8Pf6HH2smXULhtBv9q-qO2zyScpmliqZDXUqmvQ8rxi-xYI2hijV80jo14teZgIotWsZE2FrMPJTkegDmh4cG5UzoUsQxzPhXqHvkss6Hv7h-_fmvXvXY1AZ8T8bL1qM4bS8mKpewmGtjmU6S220tL60ieT2QL0vmTFlJkOE8uFreWlPnxNKBix_zj4Smhg1zS_sl7GoXhp5Q_QY3MOMM5-gCAALY0crqLLWtHswElVOiJSyd64T9HFyXm7Rleqq2kLXmTvDhOR6lzMnA0rcGP7lQGYlLZgFiicsMY722XlKI3v1-cJYvj2RZMPe1ijBLFFTqyPeCBkbsDC3XCpWhMByNHSHKN3t-NJmQBIC-89ZeOMU-WBtqrDDi_CMnaz9mwkyt3P7ja_fVskc4KKBBlMVYDZ3DJeJw3Kg9Pie0XlqHkD6W1vyAWjOM2z76Rh_3553dLAH1HxNRwidLjq3SvoaX3TOU5O2_omFGPBek7QdzhNBGLgv6Zlul_XxZq9UGiVo1jrnkd40_vAZQRL6NyMxGBEij_b8F_wDMz5njrL-a0c2Y5mMno-q8gmM4sFKI1BS5HsrUAw.PFFSFlDslALnebAdaqS_MA" -hdr "X-Secret: 3921VrO5TrLvPQ-NFLlghQ"
|
||||
rxresp
|
||||
expect resp.http.x-decrypted ~ "(Sed ut perspiciatis unde omnis iste natus error sit voluptatem doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo veritatis et quasi architecto beatae vitae dicta sunt explicabo\\. Nemo ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt\\. porro quisquam est, qui dolorem ipsum quia dolor sit amet, adipisci velit, sed quia non numquam eius modi tempora incidunt ut dolore magnam aliquam quaerat voluptatem\\. Ut enim ad minima veniam, nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut ea commodi consequatur\\? Quis autem vel eum iure reprehenderit qui in voluptate velit esse quam nihil molestiae consequatur, vel illum qui eum fugiat quo voluptas nulla pariatur\\?|AWS-LC UNMANAGED)"
|
||||
|
||||
|
||||
txreq -url "/jwk" -hdr "Authorization: Bearer eyJhbGciOiAiQTEyOEtXIiwgImVuYyI6ICJBMTI4Q0JDLUhTMjU2In0.AaOyP1zNjsywJOoQ941JJWT4LQIDlpy3UibM_48HrsoCJ5ENpQhfbQ.h2ZBUiy9ofvcDZOwV2iVJA.K0FhK6ri44ZWmtFUtJRpiZSeT8feKX5grFpU8xG5026bGXAdZADO4ZkQ8DRvSEE9DwNIlK6cIEoSavm12gSzQVXajz3MWv5U6VbK5gPFCeFjJfMPmdQ9THIi-hapcueSxYz2rkcGxo3iP3ixE_bww8UB_XlQvnokhFxtf8NushMkjef4RDrW5vQu4j_qPbqG334msDKmFi8Klprs6JktrADeEJ0bPGN80NKEWp7XPcCbfmcwYe-9z_tPw_KJcQhLpQevfPLfVI4WjPgPxYNGw03qKYnLD7oTjr9qCrQmzUVXutlhxfpD3UQr11SJu8q19Ug82bON-GRd2CjpSrErQq42dd0_mWjG9iDqjqpYFBK9DV_qawy2dxFbfIcCsnb6ewifjoJLiFg2OT7-YdTaC7kqaXeE1JpA-OtMXN72FUDrnQ8r9ifj_VpMNvBf_36dbOCT-cGwIOI8Pf6HH2smXULhtBv9q-qO2zyScpmliqZDXUqmvQ8rxi-xYI2hijV80jo14teZgIotWsZE2FrMPJTkegDmh4cG5UzoUsQxzPhXqHvkss6Hv7h-_fmvXvXY1AZ8T8bL1qM4bS8mKpewmGtjmU6S220tL60ieT2QL0vmTFlJkOE8uFreWlPnxNKBix_zj4Smhg1zS_sl7GoXhp5Q_QY3MOMM5-gCAALY0crqLLWtHswElVOiJSyd64T9HFyXm7Rleqq2kLXmTvDhOR6lzMnA0rcGP7lQGYlLZgFiicsMY722XlKI3v1-cJYvj2RZMPe1ijBLFFTqyPeCBkbsDC3XCpWhMByNHSHKN3t-NJmQBIC-89ZeOMU-WBtqrDDi_CMnaz9mwkyt3P7ja_fVskc4KKBBlMVYDZ3DJeJw3Kg9Pie0XlqHkD6W1vyAWjOM2z76Rh_3553dLAH1HxNRwidLjq3SvoaX3TOU5O2_omFGPBek7QdzhNBGLgv6Zlul_XxZq9UGiVo1jrnkd40_vAZQRL6NyMxGBEij_b8F_wDMz5njrL-a0c2Y5mMno-q8gmM4sFKI1BS5HsrUAw.PFFSFlDslALnebAdaqS_MA" -hdr "X-JWK: {\"kty\":\"oct\", \"k\":\"3921VrO5TrLvPQ-NFLlghQ\"}"
|
||||
rxresp
|
||||
expect resp.http.x-decrypted ~ "(Sed ut perspiciatis unde omnis iste natus error sit voluptatem doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo veritatis et quasi architecto beatae vitae dicta sunt explicabo\\. Nemo ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt\\. porro quisquam est, qui dolorem ipsum quia dolor sit amet, adipisci velit, sed quia non numquam eius modi tempora incidunt ut dolore magnam aliquam quaerat voluptatem\\. Ut enim ad minima veniam, nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut ea commodi consequatur\\? Quis autem vel eum iure reprehenderit qui in voluptate velit esse quam nihil molestiae consequatur, vel illum qui eum fugiat quo voluptas nulla pariatur\\?|AWS-LC UNMANAGED)"
|
||||
} -run
|
||||
|
||||
|
||||
#ALG: A128KW
|
||||
#ENC: A128CBC-HS256
|
||||
#KEY: {"kty":"oct", "k":"3921VrO5TrLvPQ-NFLlghQ"}
|
||||
# Token is modified to have an invalid tag
|
||||
client c2_2 -connect ${h1_mainfe_sock} {
|
||||
txreq -url "/secret" -hdr "Authorization: Bearer eyJhbGciOiAiQTEyOEtXIiwgImVuYyI6ICJBMTI4Q0JDLUhTMjU2In0.AaOyP1zNjsywJOoQ941JJWT4LQIDlpy3UibM_48HrsoCJ5ENpQhfbQ.h2ZBUiy9ofvcDZOwV2iVJA.K0FhK6ri44ZWmtFUtJRpiZSeT8feKX5grFpU8xG5026bGXAdZADO4ZkQ8DRvSEE9DwNIlK6cIEoSavm12gSzQVXajz3MWv5U6VbK5gPFCeFjJfMPmdQ9THIi-hapcueSxYz2rkcGxo3iP3ixE_bww8UB_XlQvnokhFxtf8NushMkjef4RDrW5vQu4j_qPbqG334msDKmFi8Klprs6JktrADeEJ0bPGN80NKEWp7XPcCbfmcwYe-9z_tPw_KJcQhLpQevfPLfVI4WjPgPxYNGw03qKYnLD7oTjr9qCrQmzUVXutlhxfpD3UQr11SJu8q19Ug82bON-GRd2CjpSrErQq42dd0_mWjG9iDqjqpYFBK9DV_qawy2dxFbfIcCsnb6ewifjoJLiFg2OT7-YdTaC7kqaXeE1JpA-OtMXN72FUDrnQ8r9ifj_VpMNvBf_36dbOCT-cGwIOI8Pf6HH2smXULhtBv9q-qO2zyScpmliqZDXUqmvQ8rxi-xYI2hijV80jo14teZgIotWsZE2FrMPJTkegDmh4cG5UzoUsQxzPhXqHvkss6Hv7h-_fmvXvXY1AZ8T8bL1qM4bS8mKpewmGtjmU6S220tL60ieT2QL0vmTFlJkOE8uFreWlPnxNKBix_zj4Smhg1zS_sl7GoXhp5Q_QY3MOMM5-gCAALY0crqLLWtHswElVOiJSyd64T9HFyXm7Rleqq2kLXmTvDhOR6lzMnA0rcGP7lQGYlLZgFiicsMY722XlKI3v1-cJYvj2RZMPe1ijBLFFTqyPeCBkbsDC3XCpWhMByNHSHKN3t-NJmQBIC-89ZeOMU-WBtqrDDi_CMnaz9mwkyt3P7ja_fVskc4KKBBlMVYDZ3DJeJw3Kg9Pie0XlqHkD6W1vyAWjOM2z76Rh_3553dLAH1HxNRwidLjq3SvoaX3TOU5O2_omFGPBek7QdzhNBGLgv6Zlul_XxZq9UGiVo1jrnkd40_vAZQRL6NyMxGBEij_b8F_wDMz5njrL-a0c2Y5mMno-q8gmM4sFKI1BS5HsrUAw.PFFSFlDslALnebAdaqS_Ma" -hdr "X-Secret: 3921VrO5TrLvPQ-NFLlghQ"
|
||||
rxresp
|
||||
expect resp.http.x-decrypted ~ "(|AWS-LC UNMANAGED)"
|
||||
} -run
|
||||
|
||||
|
||||
|
||||
#ALG: A256GCMKW
|
||||
#ENC: A256CBC-HS512
|
||||
#KEY: {"k":"vof8hNUaHiMw_0o3EGVPtBOPDDWJ62b8kQWE2ufSjIE","kty":"oct"}
|
||||
client c3 -connect ${h1_mainfe_sock} {
|
||||
txreq -url "/secret" -hdr "Authorization: Bearer eyJhbGciOiJBMjU2R0NNS1ciLCJlbmMiOiJBMjU2Q0JDLUhTNTEyIiwiaXYiOiJRclluZUNxVmVldExzN1FKIiwidGFnIjoieFEyeFI2SHdBUngzeDJUdFg5UFVSZyJ9.wk4eJtdTKOPsic4IBtVcppO6Sp6LfXmxHzBvHZtU0Sk7JCVqhAghkeAw0qWJ5XsdwSneIlZ4rGygtnafFl4Thw.ylzjPBsgJ4qefDQZ_jUVpA.xX0XhdL4KTSZfRvHuZD1_Dh-XrfZogRsBHpgxkDZdYk.w8LPVak5maNeQpSWgCIGGsj26SLQZTx6nAmkvDQKFIA" -hdr "X-Secret: vof8hNUaHiMw_0o3EGVPtBOPDDWJ62b8kQWE2ufSjIE"
|
||||
rxresp
|
||||
expect resp.http.x-decrypted == "My Encrypted message"
|
||||
} -run
|
||||
|
||||
|
||||
# RFC7516 JWE
|
||||
# https://datatracker.ietf.org/doc/html/rfc7516#appendix-A.3
|
||||
#ALG: A128KW
|
||||
#ENC: A128CBC-HS256
|
||||
#KEY: {"kty":"oct", "k":"GawgguFyGrWKav7AX4VKUg" }
|
||||
client c4 -connect ${h1_mainfe_sock} {
|
||||
txreq -url "/secret" -hdr "Authorization: Bearer eyJhbGciOiJBMTI4S1ciLCJlbmMiOiJBMTI4Q0JDLUhTMjU2In0.6KB707dM9YTIgHtLvtgWQ8mKwboJW3of9locizkDTHzBC2IlrT1oOQ.AxY8DCtDaGlsbGljb3RoZQ.KDlTtXchhZTGufMYmOYGS4HffxPSUrfmqCHXaI9wOGY.U0m_YmjN04DJvceFICbCVQ" -hdr "X-Secret: GawgguFyGrWKav7AX4VKUg"
|
||||
rxresp
|
||||
expect resp.http.x-decrypted ~ "(Live long and prosper\\.|AWS-LC UNMANAGED)"
|
||||
} -run
|
||||
|
||||
|
||||
#ALG: A256GCMKW
|
||||
#ENC: A192CBC-HS384
|
||||
#KEY: {"k":"vprpatiNyI-biJY57qr8Gg4--4Rycgb2G5yO1_myYAw","kty":"oct"}
|
||||
client c5 -connect ${h1_mainfe_sock} {
|
||||
txreq -url "/secret" -hdr "Authorization: Bearer eyJhbGciOiJBMjU2R0NNS1ciLCJlbmMiOiJBMTkyQ0JDLUhTMzg0IiwiaXYiOiJzVE81QjlPRXFuaUhCX3dYIiwidGFnIjoid2M1ZnRpYUFnNGNOR1JkZzNWQ3FXdyJ9.2zqnM9zeNU-eAMp5h2uFJyxbHHKsZs9YAYKzOcIF3d3Q9uq1TMQAvqOIuXw3kU9o.hh5aObIoIMR6Ke0rXm6V1A.R7U-4OlqOR6f2C1b3nI5bFqZBIGNBgza7FfoPEgrQT8.asJCzUAHCuxS7o8Ut4ENfaY5RluLB35F" -hdr "X-Secret: vprpatiNyI-biJY57qr8Gg4--4Rycgb2G5yO1_myYAw"
|
||||
rxresp
|
||||
expect resp.http.x-decrypted == "My Encrypted message"
|
||||
|
||||
txreq -url "/jwk" -hdr "Authorization: Bearer eyJhbGciOiJBMjU2R0NNS1ciLCJlbmMiOiJBMTkyQ0JDLUhTMzg0IiwiaXYiOiJzVE81QjlPRXFuaUhCX3dYIiwidGFnIjoid2M1ZnRpYUFnNGNOR1JkZzNWQ3FXdyJ9.2zqnM9zeNU-eAMp5h2uFJyxbHHKsZs9YAYKzOcIF3d3Q9uq1TMQAvqOIuXw3kU9o.hh5aObIoIMR6Ke0rXm6V1A.R7U-4OlqOR6f2C1b3nI5bFqZBIGNBgza7FfoPEgrQT8.asJCzUAHCuxS7o8Ut4ENfaY5RluLB35F" -hdr "X-JWK: {\"k\":\"vprpatiNyI-biJY57qr8Gg4--4Rycgb2G5yO1_myYAw\",\"kty\":\"oct\"}"
|
||||
rxresp
|
||||
expect resp.http.x-decrypted == "My Encrypted message"
|
||||
} -run
|
||||
|
||||
|
||||
#ALG: RSA1_5
|
||||
#ENC: A256GCM
|
||||
client c6 -connect ${h1_mainfe_sock} {
|
||||
txreq -url "/pem" -hdr "Authorization: Bearer eyJhbGciOiAiUlNBMV81IiwgImVuYyI6ICJBMjU2R0NNIn0.ew8AbprGcd_J73-CZPIsE1YonD9rtcL7VCuOOuVkrpS_9UzA9_kMh1yw20u-b5rKJAhmFMCQPXl44ro6IzOeHu8E2X_NlPEnQfyNVQ4R1HB_E9sSk5BLxOH3aHkVUh0I-e2eDDj-pdI3OrdjZtnZEBeQ7tpMcoBEbn1VGg7Pmw4qtdS-0qnDSs-PttU-cejjgPUNLRU8UdoRVC9uJKacJms110QugDuFuMYTTSU2nbIYh0deCMRAuKGWt0Ii6EMYW2JaJ7JfXag59Ar1uylQPyEVrocnOsDuB9xnp2jd796qCPdKxBK9yKUnwjal4SQpYbutr40QzG1S4MsKaUorLg.0el2ruY0mm2s7LUR.X5RI6dF06Y_dbAr8meb-6SG5enj5noto9nzgQU5HDrYdiUofPptIf6E-FikKUM9QR4pY9SyphqbPYeAN1ZYVxBrR8tUf4Do2kw1biuuRAmuIyytpmxwvY946T3ctu1Zw3Ymwe-jWXX08EngzssvzFOGT66gkdufrTkC45Fkr0RBOmWa5OVVg_VR6LwcivtQMmlArlrwbaDmmLqt_2p7afT0UksEz4loq0sskw-p7GbhB2lpzXoDnijdHrQkftRbVCiDbK4-qGr7IRFb0YOHvyVFr-kmDoJv2Zsg_rPKV1LkYmPJUbVDo9T3RAcLinlKPK4ZPC_2bWj3M9BvfOq1HeuyVWzX2Cb1mHFdxXFGqaLPfsE0VOfn0GqL7oHVbuczYYw2eKdmiw5LEMwuuJEdYDE9IIFEe8oRB4hNZ0XMYB6oqqZejD0Fh6nqlj5QUrTYpTSE-3LkgK2zRJ0oZFXZyHCB426bmViuE0mXF7twkQep09g0U35-jFBZcSYBDvZZL1t5d_YEQ0QtO0mEeEpGb0Pvk_EsSMFib7NxClz4_rdtwWCFuM4uFOS5vrQMiMqi_TadhLxrugRFhJpsibuScCiJ7eNDrUvwSWEwv1U593MUX3guDq_ONOo_49EOJSyRJtQCNC6FW6GLWSz9TCo6g5LCnXt-pqwu0Iymr7ZTQ3MTsdq2G55JM2e6SdG43iET8r235hynmXHKPUYHlSjsC2AEAY_pGDO0akIhf4wDVIM5rytn-rjQf-29ZJp05g6KPe-EaN1C-X7aBGhgAEgnX-iaXXbotpGeKRTNj2jAG1UrkYi6BGHxluiXJ8jH_LjHuxKyzIObqK8p28ePDKRL-jyNTrvGW2uorgb_u7HGmWYIWLTI7obnZ5vw3MbkjcwEd4bX5JXUj2rRsUWMlZSSFVO9Wgf7MBvcLsyF0Yqun3p0bi__edmcqNF_uuYZT-8jkUlMborqIDDCYYqIolgi5R1Bmut-gFYq6xyfEncxOi50xmYon50UulVnAH-up_RELGtCjmAivaJb8.upVY733IMAT8YbMab2PZnw" -hdr "X-PEM: ${testdir}/rsa1_5.pem"
|
||||
rxresp
|
||||
expect resp.http.x-decrypted == "Sed ut perspiciatis unde omnis iste natus error sit voluptatem doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. porro quisquam est, qui dolorem ipsum quia dolor sit amet, adipisci velit, sed quia non numquam eius modi tempora incidunt ut dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam, nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut ea commodi consequatur? Quis autem vel eum iure reprehenderit qui in voluptate velit esse quam nihil molestiae consequatur, vel illum qui eum fugiat quo voluptas nulla pariatur?"
|
||||
|
||||
|
||||
txreq -url "/jwk" -hdr "Authorization: Bearer eyJhbGciOiAiUlNBMV81IiwgImVuYyI6ICJBMjU2R0NNIn0.ew8AbprGcd_J73-CZPIsE1YonD9rtcL7VCuOOuVkrpS_9UzA9_kMh1yw20u-b5rKJAhmFMCQPXl44ro6IzOeHu8E2X_NlPEnQfyNVQ4R1HB_E9sSk5BLxOH3aHkVUh0I-e2eDDj-pdI3OrdjZtnZEBeQ7tpMcoBEbn1VGg7Pmw4qtdS-0qnDSs-PttU-cejjgPUNLRU8UdoRVC9uJKacJms110QugDuFuMYTTSU2nbIYh0deCMRAuKGWt0Ii6EMYW2JaJ7JfXag59Ar1uylQPyEVrocnOsDuB9xnp2jd796qCPdKxBK9yKUnwjal4SQpYbutr40QzG1S4MsKaUorLg.0el2ruY0mm2s7LUR.X5RI6dF06Y_dbAr8meb-6SG5enj5noto9nzgQU5HDrYdiUofPptIf6E-FikKUM9QR4pY9SyphqbPYeAN1ZYVxBrR8tUf4Do2kw1biuuRAmuIyytpmxwvY946T3ctu1Zw3Ymwe-jWXX08EngzssvzFOGT66gkdufrTkC45Fkr0RBOmWa5OVVg_VR6LwcivtQMmlArlrwbaDmmLqt_2p7afT0UksEz4loq0sskw-p7GbhB2lpzXoDnijdHrQkftRbVCiDbK4-qGr7IRFb0YOHvyVFr-kmDoJv2Zsg_rPKV1LkYmPJUbVDo9T3RAcLinlKPK4ZPC_2bWj3M9BvfOq1HeuyVWzX2Cb1mHFdxXFGqaLPfsE0VOfn0GqL7oHVbuczYYw2eKdmiw5LEMwuuJEdYDE9IIFEe8oRB4hNZ0XMYB6oqqZejD0Fh6nqlj5QUrTYpTSE-3LkgK2zRJ0oZFXZyHCB426bmViuE0mXF7twkQep09g0U35-jFBZcSYBDvZZL1t5d_YEQ0QtO0mEeEpGb0Pvk_EsSMFib7NxClz4_rdtwWCFuM4uFOS5vrQMiMqi_TadhLxrugRFhJpsibuScCiJ7eNDrUvwSWEwv1U593MUX3guDq_ONOo_49EOJSyRJtQCNC6FW6GLWSz9TCo6g5LCnXt-pqwu0Iymr7ZTQ3MTsdq2G55JM2e6SdG43iET8r235hynmXHKPUYHlSjsC2AEAY_pGDO0akIhf4wDVIM5rytn-rjQf-29ZJp05g6KPe-EaN1C-X7aBGhgAEgnX-iaXXbotpGeKRTNj2jAG1UrkYi6BGHxluiXJ8jH_LjHuxKyzIObqK8p28ePDKRL-jyNTrvGW2uorgb_u7HGmWYIWLTI7obnZ5vw3MbkjcwEd4bX5JXUj2rRsUWMlZSSFVO9Wgf7MBvcLsyF0Yqun3p0bi__edmcqNF_uuYZT-8jkUlMborqIDDCYYqIolgi5R1Bmut-gFYq6xyfEncxOi50xmYon50UulVnAH-up_RELGtCjmAivaJb8.upVY733IMAT8YbMab2PZnw" \
|
||||
-hdr "X-JWK: { \"kty\": \"RSA\", \"e\": \"AQAB\", \"n\": \"wsqJbopx18NQFYLYOq4ZeMSE89yGiEankUpf25yV8QqroKUGrASj_OeqTWUjwPGKTN1vGFFuHYxiJeAUQH2qQPmg9Oqk6-ATBEKn9COKYniQ5459UxCwmZA2RL6ufhrNyq0JF3GfXkjLDBfhU9zJJEOhknsA0L_c-X4AI3d_NbFdMqxNe1V_UWAlLcbKdwO6iC9fAvwUmDQxgy6R0DC1CMouQpenMRcALaSHar1cm4K-syoNobv3HEuqgZ3s6-hOOSqauqAO0GUozPpaIA7OeruyRl5sTWT0r-iz39bchID2bIKtcqLiFcSYPLBcxmsaQCqRlGhmv6stjTCLV1yT9w\", \"kid\": \"ff3c5c96-392e-46ef-a839-6ff16027af78\", \"d\": \"b9hXfQ8lOtw8mX1dpqPcoElGhbczz_-xq2znCXQpbBPSZBUddZvchRSH5pSSKPEHlgb3CSGIdpLqsBCv0C_XmCM9ViN8uqsYgDO9uCLIDK5plWttbkqA_EufvW03R9UgIKWmOL3W4g4t-C2mBb8aByaGGVNjLnlb6i186uBsPGkvaeLHbQcRQKAvhOUTeNiyiiCbUGJwCm4avMiZrsz1r81Y1Z5izo0ERxdZymxM3FRZ9vjTB-6DtitvTXXnaAm1JTu6TIpj38u2mnNLkGMbflOpgelMNKBZVxSmfobIbFN8CHVc1UqLK2ElsZ9RCQANgkMHlMkOMj-XT0wHa3VBUQ\", \"p\": \"8mgriveKJAp1S7SHqirQAfZafxVuAK_A2QBYPsAUhikfBOvN0HtZjgurPXSJSdgR8KbWV7ZjdJM_eOivIb_XiuAaUdIOXbLRet7t9a_NJtmX9iybhoa9VOJFMBq_rbnbbte2kq0-FnXmv3cukbC2LaEw3aEcDgyURLCgWFqt7M0\", \"q\": \"zbbTv5421GowOfKVEuVoA35CEWgl8mdasnEZac2LWxMwKExikKU5LLacLQlcOt7A6n1ZGUC2wyH8mstO5tV34Eug3fnNrbnxFUEE_ZB_njs_rtZnwz57AoUXOXVnd194seIZF9PjdzZcuwXwXbrZ2RSVW8if_ZH5OVYEM1EsA9M\", \"dp\": \"1BaIYmIKn1X3InGlcSFcNRtSOnaJdFhRpotCqkRssKUx2qBlxs7ln_5dqLtZkx5VM_UE_GE7yzc6BZOwBxtOftdsr8HVh-14ksSR9rAGEsO2zVBiEuW4qZf_aQM-ScWfU--wcczZ0dT-Ou8P87Bk9K9fjcn0PeaLoz3WTPepzNE\", \"dq\": \"kYw2u4_UmWvcXVOeV_VKJ5aQZkJ6_sxTpodRBMPyQmkMHKcW4eKU1mcJju_deqWadw5jGPPpm5yTXm5UkAwfOeookoWpGa7CvVf4kPNI6Aphn3GBjunJHNpPuU6w-wvomGsxd-NqQDGNYKHuFFMcyXO_zWXglQdP_1o1tJ1M-BM\", \"qi\": \"j94Ens784M8zsfwWoJhYq9prcSZOGgNbtFWQZO8HP8pcNM9ls7YA4snTtAS_B4peWWFAFZ0LSKPCxAvJnrq69ocmEKEk7ss1Jo062f9pLTQ6cnhMjev3IqLocIFt5Vbsg_PWYpFSR7re6FRbF9EYOM7F2-HRv1idxKCWoyQfBqk\" }"
|
||||
rxresp
|
||||
expect resp.http.x-decrypted == "Sed ut perspiciatis unde omnis iste natus error sit voluptatem doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. porro quisquam est, qui dolorem ipsum quia dolor sit amet, adipisci velit, sed quia non numquam eius modi tempora incidunt ut dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam, nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut ea commodi consequatur? Quis autem vel eum iure reprehenderit qui in voluptate velit esse quam nihil molestiae consequatur, vel illum qui eum fugiat quo voluptas nulla pariatur?"
|
||||
} -run
|
||||
|
||||
|
||||
#ALG: RSA-OAEP
|
||||
#ENC: A256GCM
|
||||
client c7 -connect ${h1_mainfe_sock} {
|
||||
txreq -url "/pem" -hdr "Authorization: Bearer eyJhbGciOiAiUlNBLU9BRVAiLCAiZW5jIjogIkEyNTZHQ00ifQ.Os33U1HEY92lrpup2E-HNttBW26shGSCafqNbVfs1rwWB__B-0dRAiKg4OtIrIXVCN7oQMqLr9RFRO6Gb-OAPIr-59FETLSXP8K_3uNcy-jdKrpKLbv8wgisEYqBJj4BysZQjuWgUgJ7Dvx28_zIUg0FJGOwxtpX2SUWxEgw5CPRgRrENJDJ2EYA6wuX9SbfarhQR4uPN7pdRKZ0ZQN6_5H3H9pWJ4WNnsQ0wjChKTsdR3kHOvygiUmdYSEWGe6LBQLSBQCnQim1pr--GBOHvDf2g4Je9EDFrrO1icFDbBdJ8I4ol4ixglLEnBCTHdhYd_lVe0i5JcxxHF8hmemAYQ.IOphaFIcCosKyXcN.KEjWfV2yBKLuMLX20mtEvrQ-P_oKWkdgZabx0FgRLqjSorD7DS3aIXLMEmyrOYd4kGHKCMg2Fvg61xKvI2FsQviA5LgHtx0QKmFARacP8kBl8vFPMEg2WtW0rIImTc1tj4C0PM9A0TbyDohtcoN9UYosrw5GyPOlHwIFwWosLA9WHqp00MAfAu3JOa4CwuMXsORGzeIyb7X-jg_bbG_9xkVUsgZpaCUX447a3QmKLJVBfQpeEO_PuYbds-MvIU9m4uYzWplNeHnf3B1dh9p6o4Ml6OEp-0G_4Nd4UmMz_g9A-TatH-A__MAC9Mx1Wj1cDn5M3upcrAyu2JLQ48A-Qa2ocElhQ4ODzwbgbC5PS34Mlm_x18zqL-0Fw3ckhzgoAyDBoRO6SaNmsKb1wQ6QGbwBJx1jC51hpzBHRv3pUlegsHXgq7OWN1x1tDJvRc_DHMa23Mheg-aKJcliP846Dduq2_Hve3md30C0hbrP1OMF5ZJSVu4kUo7UFaZA_6hhcoGvvyEGDMnPH5SznrrsyHGIre-WOdXCObZNkDV6Qn0sqAP_vkj_6Dj965W8ksCKk6ye409cB4mnqfLv3dUtGLV8o8VtCLIEs2G62lwaDGrX4HB-pZ6jea2qH6UvgwK5WT-VzrypSQcVoWCKopln2gtO1JROKmbOiL9f8dfbLKqYSRB6ppMxh5Euddx_eNikZfLEcXfq2Grwyrj0NLP82AFSxSYf3BpYqpOhSxca0gx0psb8tCwq3sqmh5Bp_qmKIOthXb6k-9R_Ng6cRTp132OnDEXEDtvDv59WJWHuo4qACyrg7jUlrh4dAYwYke1yBgVcqK5JwVnmKDjnx9vRGFSD9esrL8MpGiP6uUeN3AXiv7OSb83hDdwTTQU5nvitHWKS72Mb1FRPdDXUxooiyShAkV5Spo3YNl4EHkm6lnlJ-kC3BFlxYqYd5a_vtqA-ywR7ozWo1GtMBjYycq2s9Kp8FnqI2cTWobOCjMxaej4CXaRA4IwhjC1u6OTCvxP70MWYT0pJPjUS.k9i0Lw9MfJs4Rp-_uwIEeA" -hdr "X-PEM: ${testdir}/rsa_oeap.pem"
|
||||
rxresp
|
||||
expect resp.http.x-decrypted == "Sed ut perspiciatis unde omnis iste natus error sit voluptatem doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. porro quisquam est, qui dolorem ipsum quia dolor sit amet, adipisci velit, sed quia non numquam eius modi tempora incidunt ut dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam, nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut ea commodi consequatur? Quis autem vel eum iure reprehenderit qui in voluptate velit esse quam nihil molestiae consequatur, vel illum qui eum fugiat quo voluptas nulla pariatur?"
|
||||
} -run
|
||||
|
||||
|
||||
# Test 'jwt_decrypt_jwk' error cases
|
||||
client c8 -connect ${h1_mainfe_sock} {
|
||||
# Invalid 'oct' JWK
|
||||
txreq -url "/jwk" -hdr "Authorization: Bearer eyJhbGciOiJBMjU2R0NNS1ciLCJlbmMiOiJBMTkyQ0JDLUhTMzg0IiwiaXYiOiJzVE81QjlPRXFuaUhCX3dYIiwidGFnIjoid2M1ZnRpYUFnNGNOR1JkZzNWQ3FXdyJ9.2zqnM9zeNU-eAMp5h2uFJyxbHHKsZs9YAYKzOcIF3d3Q9uq1TMQAvqOIuXw3kU9o.hh5aObIoIMR6Ke0rXm6V1A.R7U-4OlqOR6f2C1b3nI5bFqZBIGNBgza7FfoPEgrQT8.asJCzUAHCuxS7o8Ut4ENfaY5RluLB35F" -hdr "X-JWK: {\"k\":\"invalid\",\"kty\":\"oct\"}"
|
||||
rxresp
|
||||
expect resp.http.x-decrypted == ""
|
||||
|
||||
# Wrong JWK type
|
||||
txreq -url "/jwk" -hdr "Authorization: Bearer eyJhbGciOiJBMjU2R0NNS1ciLCJlbmMiOiJBMTkyQ0JDLUhTMzg0IiwiaXYiOiJzVE81QjlPRXFuaUhCX3dYIiwidGFnIjoid2M1ZnRpYUFnNGNOR1JkZzNWQ3FXdyJ9.2zqnM9zeNU-eAMp5h2uFJyxbHHKsZs9YAYKzOcIF3d3Q9uq1TMQAvqOIuXw3kU9o.hh5aObIoIMR6Ke0rXm6V1A.R7U-4OlqOR6f2C1b3nI5bFqZBIGNBgza7FfoPEgrQT8.asJCzUAHCuxS7o8Ut4ENfaY5RluLB35F" -hdr "X-JWK: {\"k\":\"invalid\",\"kty\":\"RSA\"}"
|
||||
rxresp
|
||||
expect resp.http.x-decrypted == ""
|
||||
|
||||
# Invalid 'RSA' JWK (truncated 'qi')
|
||||
txreq -url "/jwk" -hdr "Authorization: Bearer eyJhbGciOiAiUlNBMV81IiwgImVuYyI6ICJBMjU2R0NNIn0.ew8AbprGcd_J73-CZPIsE1YonD9rtcL7VCuOOuVkrpS_9UzA9_kMh1yw20u-b5rKJAhmFMCQPXl44ro6IzOeHu8E2X_NlPEnQfyNVQ4R1HB_E9sSk5BLxOH3aHkVUh0I-e2eDDj-pdI3OrdjZtnZEBeQ7tpMcoBEbn1VGg7Pmw4qtdS-0qnDSs-PttU-cejjgPUNLRU8UdoRVC9uJKacJms110QugDuFuMYTTSU2nbIYh0deCMRAuKGWt0Ii6EMYW2JaJ7JfXag59Ar1uylQPyEVrocnOsDuB9xnp2jd796qCPdKxBK9yKUnwjal4SQpYbutr40QzG1S4MsKaUorLg.0el2ruY0mm2s7LUR.X5RI6dF06Y_dbAr8meb-6SG5enj5noto9nzgQU5HDrYdiUofPptIf6E-FikKUM9QR4pY9SyphqbPYeAN1ZYVxBrR8tUf4Do2kw1biuuRAmuIyytpmxwvY946T3ctu1Zw3Ymwe-jWXX08EngzssvzFOGT66gkdufrTkC45Fkr0RBOmWa5OVVg_VR6LwcivtQMmlArlrwbaDmmLqt_2p7afT0UksEz4loq0sskw-p7GbhB2lpzXoDnijdHrQkftRbVCiDbK4-qGr7IRFb0YOHvyVFr-kmDoJv2Zsg_rPKV1LkYmPJUbVDo9T3RAcLinlKPK4ZPC_2bWj3M9BvfOq1HeuyVWzX2Cb1mHFdxXFGqaLPfsE0VOfn0GqL7oHVbuczYYw2eKdmiw5LEMwuuJEdYDE9IIFEe8oRB4hNZ0XMYB6oqqZejD0Fh6nqlj5QUrTYpTSE-3LkgK2zRJ0oZFXZyHCB426bmViuE0mXF7twkQep09g0U35-jFBZcSYBDvZZL1t5d_YEQ0QtO0mEeEpGb0Pvk_EsSMFib7NxClz4_rdtwWCFuM4uFOS5vrQMiMqi_TadhLxrugRFhJpsibuScCiJ7eNDrUvwSWEwv1U593MUX3guDq_ONOo_49EOJSyRJtQCNC6FW6GLWSz9TCo6g5LCnXt-pqwu0Iymr7ZTQ3MTsdq2G55JM2e6SdG43iET8r235hynmXHKPUYHlSjsC2AEAY_pGDO0akIhf4wDVIM5rytn-rjQf-29ZJp05g6KPe-EaN1C-X7aBGhgAEgnX-iaXXbotpGeKRTNj2jAG1UrkYi6BGHxluiXJ8jH_LjHuxKyzIObqK8p28ePDKRL-jyNTrvGW2uorgb_u7HGmWYIWLTI7obnZ5vw3MbkjcwEd4bX5JXUj2rRsUWMlZSSFVO9Wgf7MBvcLsyF0Yqun3p0bi__edmcqNF_uuYZT-8jkUlMborqIDDCYYqIolgi5R1Bmut-gFYq6xyfEncxOi50xmYon50UulVnAH-up_RELGtCjmAivaJb8.upVY733IMAT8YbMab2PZnw" \
|
||||
-hdr "X-JWK: { \"kty\": \"RSA\", \"e\": \"AQAB\", \"n\": \"wsqJbopx18NQFYLYOq4ZeMSE89yGiEankUpf25yV8QqroKUGrASj_OeqTWUjwPGKTN1vGFFuHYxiJeAUQH2qQPmg9Oqk6-ATBEKn9COKYniQ5459UxCwmZA2RL6ufhrNyq0JF3GfXkjLDBfhU9zJJEOhknsA0L_c-X4AI3d_NbFdMqxNe1V_UWAlLcbKdwO6iC9fAvwUmDQxgy6R0DC1CMouQpenMRcALaSHar1cm4K-syoNobv3HEuqgZ3s6-hOOSqauqAO0GUozPpaIA7OeruyRl5sTWT0r-iz39bchID2bIKtcqLiFcSYPLBcxmsaQCqRlGhmv6stjTCLV1yT9w\", \"kid\": \"ff3c5c96-392e-46ef-a839-6ff16027af78\", \"d\": \"b9hXfQ8lOtw8mX1dpqPcoElGhbczz_-xq2znCXQpbBPSZBUddZvchRSH5pSSKPEHlgb3CSGIdpLqsBCv0C_XmCM9ViN8uqsYgDO9uCLIDK5plWttbkqA_EufvW03R9UgIKWmOL3W4g4t-C2mBb8aByaGGVNjLnlb6i186uBsPGkvaeLHbQcRQKAvhOUTeNiyiiCbUGJwCm4avMiZrsz1r81Y1Z5izo0ERxdZymxM3FRZ9vjTB-6DtitvTXXnaAm1JTu6TIpj38u2mnNLkGMbflOpgelMNKBZVxSmfobIbFN8CHVc1UqLK2ElsZ9RCQANgkMHlMkOMj-XT0wHa3VBUQ\", \"p\": \"8mgriveKJAp1S7SHqirQAfZafxVuAK_A2QBYPsAUhikfBOvN0HtZjgurPXSJSdgR8KbWV7ZjdJM_eOivIb_XiuAaUdIOXbLRet7t9a_NJtmX9iybhoa9VOJFMBq_rbnbbte2kq0-FnXmv3cukbC2LaEw3aEcDgyURLCgWFqt7M0\", \"q\": \"zbbTv5421GowOfKVEuVoA35CEWgl8mdasnEZac2LWxMwKExikKU5LLacLQlcOt7A6n1ZGUC2wyH8mstO5tV34Eug3fnNrbnxFUEE_ZB_njs_rtZnwz57AoUXOXVnd194seIZF9PjdzZcuwXwXbrZ2RSVW8if_ZH5OVYEM1EsA9M\", \"dp\": \"1BaIYmIKn1X3InGlcSFcNRtSOnaJdFhRpotCqkRssKUx2qBlxs7ln_5dqLtZkx5VM_UE_GE7yzc6BZOwBxtOftdsr8HVh-14ksSR9rAGEsO2zVBiEuW4qZf_aQM-ScWfU--wcczZ0dT-Ou8P87Bk9K9fjcn0PeaLoz3WTPepzNE\", \"dq\": \"kYw2u4_UmWvcXVOeV_VKJ5aQZkJ6_sxTpodRBMPyQmkMHKcW4eKU1mcJju_deqWadw5jGPPpm5yTXm5UkAwfOeookoWpGa7CvVf4kPNI6Aphn3GBjunJHNpPuU6w-wvomGsxd-NqQDGNYKHuFFMcyXO_zWXglQdP_1o1tJ1M-BM\", \"qi\": \"j94Ens784M8zsfwWoJhYq9prcSZOGgNbtFWQZO8HP8pcNM9ls7YA4snTtAS_B4peWWFAFZ0LSKPCxAvJnrq69ocmEKEk7ss1Jo062f9pLTQ6cnhMjev3IqLocIFt5\" }"
|
||||
rxresp
|
||||
expect resp.http.x-decrypted == ""
|
||||
} -run
|
||||
|
||||
27
reg-tests/jwt/rsa1_5.key
Normal file
27
reg-tests/jwt/rsa1_5.key
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEpQIBAAKCAQEAwsqJbopx18NQFYLYOq4ZeMSE89yGiEankUpf25yV8QqroKUG
|
||||
rASj/OeqTWUjwPGKTN1vGFFuHYxiJeAUQH2qQPmg9Oqk6+ATBEKn9COKYniQ5459
|
||||
UxCwmZA2RL6ufhrNyq0JF3GfXkjLDBfhU9zJJEOhknsA0L/c+X4AI3d/NbFdMqxN
|
||||
e1V/UWAlLcbKdwO6iC9fAvwUmDQxgy6R0DC1CMouQpenMRcALaSHar1cm4K+syoN
|
||||
obv3HEuqgZ3s6+hOOSqauqAO0GUozPpaIA7OeruyRl5sTWT0r+iz39bchID2bIKt
|
||||
cqLiFcSYPLBcxmsaQCqRlGhmv6stjTCLV1yT9wIDAQABAoIBAG/YV30PJTrcPJl9
|
||||
Xaaj3KBJRoW3M8//sats5wl0KWwT0mQVHXWb3IUUh+aUkijxB5YG9wkhiHaS6rAQ
|
||||
r9Av15gjPVYjfLqrGIAzvbgiyAyuaZVrbW5KgPxLn71tN0fVICClpji91uIOLfgt
|
||||
pgW/GgcmhhlTYy55W+otfOrgbDxpL2nix20HEUCgL4TlE3jYsoogm1BicApuGrzI
|
||||
ma7M9a/NWNWeYs6NBEcXWcpsTNxUWfb40wfug7Yrb01152gJtSU7ukyKY9/Ltppz
|
||||
S5BjG35TqYHpTDSgWVcUpn6GyGxTfAh1XNVKiythJbGfUQkADYJDB5TJDjI/l09M
|
||||
B2t1QVECgYEA8mgriveKJAp1S7SHqirQAfZafxVuAK/A2QBYPsAUhikfBOvN0HtZ
|
||||
jgurPXSJSdgR8KbWV7ZjdJM/eOivIb/XiuAaUdIOXbLRet7t9a/NJtmX9iybhoa9
|
||||
VOJFMBq/rbnbbte2kq0+FnXmv3cukbC2LaEw3aEcDgyURLCgWFqt7M0CgYEAzbbT
|
||||
v5421GowOfKVEuVoA35CEWgl8mdasnEZac2LWxMwKExikKU5LLacLQlcOt7A6n1Z
|
||||
GUC2wyH8mstO5tV34Eug3fnNrbnxFUEE/ZB/njs/rtZnwz57AoUXOXVnd194seIZ
|
||||
F9PjdzZcuwXwXbrZ2RSVW8if/ZH5OVYEM1EsA9MCgYEA1BaIYmIKn1X3InGlcSFc
|
||||
NRtSOnaJdFhRpotCqkRssKUx2qBlxs7ln/5dqLtZkx5VM/UE/GE7yzc6BZOwBxtO
|
||||
ftdsr8HVh+14ksSR9rAGEsO2zVBiEuW4qZf/aQM+ScWfU++wcczZ0dT+Ou8P87Bk
|
||||
9K9fjcn0PeaLoz3WTPepzNECgYEAkYw2u4/UmWvcXVOeV/VKJ5aQZkJ6/sxTpodR
|
||||
BMPyQmkMHKcW4eKU1mcJju/deqWadw5jGPPpm5yTXm5UkAwfOeookoWpGa7CvVf4
|
||||
kPNI6Aphn3GBjunJHNpPuU6w+wvomGsxd+NqQDGNYKHuFFMcyXO/zWXglQdP/1o1
|
||||
tJ1M+BMCgYEAj94Ens784M8zsfwWoJhYq9prcSZOGgNbtFWQZO8HP8pcNM9ls7YA
|
||||
4snTtAS/B4peWWFAFZ0LSKPCxAvJnrq69ocmEKEk7ss1Jo062f9pLTQ6cnhMjev3
|
||||
IqLocIFt5Vbsg/PWYpFSR7re6FRbF9EYOM7F2+HRv1idxKCWoyQfBqk=
|
||||
-----END RSA PRIVATE KEY-----
|
||||
21
reg-tests/jwt/rsa1_5.pem
Normal file
21
reg-tests/jwt/rsa1_5.pem
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIDizCCAnOgAwIBAgIUWKLX2P4KNDw9kBROSjFXWa/kjtowDQYJKoZIhvcNAQEL
|
||||
BQAwVTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
|
||||
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDEOMAwGA1UEAwwFYWEuYmIwHhcNMjUx
|
||||
MjA0MTYyMTE2WhcNMjYxMjA0MTYyMTE2WjBVMQswCQYDVQQGEwJBVTETMBEGA1UE
|
||||
CAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRk
|
||||
MQ4wDAYDVQQDDAVhYS5iYjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
|
||||
AMLKiW6KcdfDUBWC2DquGXjEhPPchohGp5FKX9uclfEKq6ClBqwEo/znqk1lI8Dx
|
||||
ikzdbxhRbh2MYiXgFEB9qkD5oPTqpOvgEwRCp/QjimJ4kOeOfVMQsJmQNkS+rn4a
|
||||
zcqtCRdxn15IywwX4VPcySRDoZJ7ANC/3Pl+ACN3fzWxXTKsTXtVf1FgJS3GyncD
|
||||
uogvXwL8FJg0MYMukdAwtQjKLkKXpzEXAC2kh2q9XJuCvrMqDaG79xxLqoGd7Ovo
|
||||
TjkqmrqgDtBlKMz6WiAOznq7skZebE1k9K/os9/W3ISA9myCrXKi4hXEmDywXMZr
|
||||
GkAqkZRoZr+rLY0wi1dck/cCAwEAAaNTMFEwHQYDVR0OBBYEFD+wduQlsKCoxfO5
|
||||
U1W7Urqs+oTbMB8GA1UdIwQYMBaAFD+wduQlsKCoxfO5U1W7Urqs+oTbMA8GA1Ud
|
||||
EwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAANfh6jY8+3XQ16SH7Pa07MK
|
||||
ncnQuZqMemYUQzieBL15zftdpd0vYjOfaN5UAQ7ODVAb/iTF4nnADl0VwOocqEiR
|
||||
vfaqwJTmKiNDjyIp1SJjhkRcYu3hmDXTZOzhuFxoZALe7OzWFgSjf3fX2IOOBfH+
|
||||
HBqviTuMi53oURWv/ISPXk+Dr7LaCmm1rEjRq8PINJ2Ni6cN90UvHOrHdl+ty2o/
|
||||
C3cQWIZrsNM6agUfiNiPCWz6x+Z4t+zP7+EorCM7CKKLGnycPUJE2I6H8bJmIHHS
|
||||
ITNmUO5juLawQ5h2m5Wu/BCY3rlLU9SLrmWAAHm6lFJb0XzFgqhiCz7lxYofj8c=
|
||||
-----END CERTIFICATE-----
|
||||
28
reg-tests/jwt/rsa_oeap.key
Normal file
28
reg-tests/jwt/rsa_oeap.key
Normal file
|
|
@ -0,0 +1,28 @@
|
|||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEpQIBAAKCAQEAwsqJbopx18NQFYLYOq4ZeMSE89yGiEankUpf25yV8QqroKUG
|
||||
rASj/OeqTWUjwPGKTN1vGFFuHYxiJeAUQH2qQPmg9Oqk6+ATBEKn9COKYniQ5459
|
||||
UxCwmZA2RL6ufhrNyq0JF3GfXkjLDBfhU9zJJEOhknsA0L/c+X4AI3d/NbFdMqxN
|
||||
e1V/UWAlLcbKdwO6iC9fAvwUmDQxgy6R0DC1CMouQpenMRcALaSHar1cm4K+syoN
|
||||
obv3HEuqgZ3s6+hOOSqauqAO0GUozPpaIA7OeruyRl5sTWT0r+iz39bchID2bIKt
|
||||
cqLiFcSYPLBcxmsaQCqRlGhmv6stjTCLV1yT9wIDAQABAoIBAG/YV30PJTrcPJl9
|
||||
Xaaj3KBJRoW3M8//sats5wl0KWwT0mQVHXWb3IUUh+aUkijxB5YG9wkhiHaS6rAQ
|
||||
r9Av15gjPVYjfLqrGIAzvbgiyAyuaZVrbW5KgPxLn71tN0fVICClpji91uIOLfgt
|
||||
pgW/GgcmhhlTYy55W+otfOrgbDxpL2nix20HEUCgL4TlE3jYsoogm1BicApuGrzI
|
||||
ma7M9a/NWNWeYs6NBEcXWcpsTNxUWfb40wfug7Yrb01152gJtSU7ukyKY9/Ltppz
|
||||
S5BjG35TqYHpTDSgWVcUpn6GyGxTfAh1XNVKiythJbGfUQkADYJDB5TJDjI/l09M
|
||||
B2t1QVECgYEA8mgriveKJAp1S7SHqirQAfZafxVuAK/A2QBYPsAUhikfBOvN0HtZ
|
||||
jgurPXSJSdgR8KbWV7ZjdJM/eOivIb/XiuAaUdIOXbLRet7t9a/NJtmX9iybhoa9
|
||||
VOJFMBq/rbnbbte2kq0+FnXmv3cukbC2LaEw3aEcDgyURLCgWFqt7M0CgYEAzbbT
|
||||
v5421GowOfKVEuVoA35CEWgl8mdasnEZac2LWxMwKExikKU5LLacLQlcOt7A6n1Z
|
||||
GUC2wyH8mstO5tV34Eug3fnNrbnxFUEE/ZB/njs/rtZnwz57AoUXOXVnd194seIZ
|
||||
F9PjdzZcuwXwXbrZ2RSVW8if/ZH5OVYEM1EsA9MCgYEA1BaIYmIKn1X3InGlcSFc
|
||||
NRtSOnaJdFhRpotCqkRssKUx2qBlxs7ln/5dqLtZkx5VM/UE/GE7yzc6BZOwBxtO
|
||||
ftdsr8HVh+14ksSR9rAGEsO2zVBiEuW4qZf/aQM+ScWfU++wcczZ0dT+Ou8P87Bk
|
||||
9K9fjcn0PeaLoz3WTPepzNECgYEAkYw2u4/UmWvcXVOeV/VKJ5aQZkJ6/sxTpodR
|
||||
BMPyQmkMHKcW4eKU1mcJju/deqWadw5jGPPpm5yTXm5UkAwfOeookoWpGa7CvVf4
|
||||
kPNI6Aphn3GBjunJHNpPuU6w+wvomGsxd+NqQDGNYKHuFFMcyXO/zWXglQdP/1o1
|
||||
tJ1M+BMCgYEAj94Ens784M8zsfwWoJhYq9prcSZOGgNbtFWQZO8HP8pcNM9ls7YA
|
||||
4snTtAS/B4peWWFAFZ0LSKPCxAvJnrq69ocmEKEk7ss1Jo062f9pLTQ6cnhMjev3
|
||||
IqLocIFt5Vbsg/PWYpFSR7re6FRbF9EYOM7F2+HRv1idxKCWoyQfBqk=
|
||||
-----END RSA PRIVATE KEY-----
|
||||
|
||||
22
reg-tests/jwt/rsa_oeap.pem
Normal file
22
reg-tests/jwt/rsa_oeap.pem
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIDjTCCAnWgAwIBAgIUHGhD07tC9adNLCkSBNrfrhFUX9IwDQYJKoZIhvcNAQEL
|
||||
BQAwVTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
|
||||
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDEOMAwGA1UEAwwFYWEuYmIwIBcNMjUx
|
||||
MjA1MTMxOTQ0WhgPMjA1MzA0MjIxMzE5NDRaMFUxCzAJBgNVBAYTAkFVMRMwEQYD
|
||||
VQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBM
|
||||
dGQxDjAMBgNVBAMMBWFhLmJiMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
|
||||
AQEAwsqJbopx18NQFYLYOq4ZeMSE89yGiEankUpf25yV8QqroKUGrASj/OeqTWUj
|
||||
wPGKTN1vGFFuHYxiJeAUQH2qQPmg9Oqk6+ATBEKn9COKYniQ5459UxCwmZA2RL6u
|
||||
fhrNyq0JF3GfXkjLDBfhU9zJJEOhknsA0L/c+X4AI3d/NbFdMqxNe1V/UWAlLcbK
|
||||
dwO6iC9fAvwUmDQxgy6R0DC1CMouQpenMRcALaSHar1cm4K+syoNobv3HEuqgZ3s
|
||||
6+hOOSqauqAO0GUozPpaIA7OeruyRl5sTWT0r+iz39bchID2bIKtcqLiFcSYPLBc
|
||||
xmsaQCqRlGhmv6stjTCLV1yT9wIDAQABo1MwUTAdBgNVHQ4EFgQUP7B25CWwoKjF
|
||||
87lTVbtSuqz6hNswHwYDVR0jBBgwFoAUP7B25CWwoKjF87lTVbtSuqz6hNswDwYD
|
||||
VR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEArDl4gSwqpriAFjWcAtWE
|
||||
sTLTxNgbnkARDeyhQ1dj6rj9xCccBU6WN07r639c9S0lsMb+jeQU9EJFoVtX91jM
|
||||
fymumOWMDY/CYm41PkHqcF6hEup5dfAeDnN/OoDjXwgTU74Y3lF/sldeS06KorCp
|
||||
O9ROyq3mM9n4EtFAAEEN2Esyy1d1CJiMYKHdYRKycMwgcu1pm9n1up4ivdgLY+BH
|
||||
XhnJPuKmmU3FauYlXzfcijUPAAuJdm3PZ+i4SNGsTa49tXOkHMED31EOjaAEzuX0
|
||||
rWij715QkL/RIp8lPxeAvHqxavQIDtfjojFD21Cx+jIGuNcfrGNkzNjfS7AF+1+W
|
||||
jA==
|
||||
-----END CERTIFICATE-----
|
||||
84
reg-tests/proxy/cli_add_backend.vtc
Normal file
84
reg-tests/proxy/cli_add_backend.vtc
Normal file
|
|
@ -0,0 +1,84 @@
|
|||
varnishtest "Add backend via cli"
|
||||
|
||||
feature ignore_unknown_macro
|
||||
|
||||
haproxy hsrv -conf {
|
||||
global
|
||||
.if feature(THREAD)
|
||||
thread-groups 1
|
||||
.endif
|
||||
|
||||
defaults
|
||||
mode http
|
||||
timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
|
||||
timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
|
||||
timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
|
||||
|
||||
frontend fe
|
||||
bind "fd@${fe}"
|
||||
http-request return status 200
|
||||
} -start
|
||||
|
||||
haproxy h1 -conf {
|
||||
global
|
||||
.if feature(THREAD)
|
||||
thread-groups 1
|
||||
.endif
|
||||
|
||||
defaults
|
||||
mode http
|
||||
timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
|
||||
timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
|
||||
timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
|
||||
|
||||
frontend fe
|
||||
bind "fd@${feS}"
|
||||
force-be-switch if { req.hdr("x-admin") "1" }
|
||||
use_backend %[req.hdr(x-be)]
|
||||
|
||||
defaults def
|
||||
|
||||
defaults def_http
|
||||
mode http
|
||||
} -start
|
||||
|
||||
client c1 -connect ${h1_feS_sock} {
|
||||
txreq -hdr "x-be: be"
|
||||
rxresp
|
||||
expect resp.status == 503
|
||||
} -run
|
||||
|
||||
haproxy h1 -cli {
|
||||
# non existent backend
|
||||
send "experimental-mode on; add backend be from def"
|
||||
expect ~ "Mode is required"
|
||||
|
||||
send "experimental-mode on; add backend be from def_http"
|
||||
expect ~ "New backend registered."
|
||||
|
||||
send "add server be/srv ${hsrv_fe_addr}:${hsrv_fe_port}"
|
||||
expect ~ "New server registered."
|
||||
send "enable server be/srv"
|
||||
expect ~ ".*"
|
||||
}
|
||||
|
||||
client c1 -connect ${h1_feS_sock} {
|
||||
txreq -hdr "x-be: be"
|
||||
rxresp
|
||||
expect resp.status == 503
|
||||
|
||||
txreq -hdr "x-be: be" -hdr "x-admin: 1"
|
||||
rxresp
|
||||
expect resp.status == 200
|
||||
} -run
|
||||
|
||||
haproxy h1 -cli {
|
||||
send "publish backend be"
|
||||
expect ~ "Backend published."
|
||||
}
|
||||
|
||||
client c1 -connect ${h1_feS_sock} {
|
||||
txreq -hdr "x-be: be"
|
||||
rxresp
|
||||
expect resp.status == 200
|
||||
} -run
|
||||
|
|
@ -51,7 +51,7 @@ haproxy h1 -cli {
|
|||
|
||||
# invalid load-balancing algo
|
||||
send "add server other/s1 ${s1_addr}:${s1_port}"
|
||||
expect ~ "Backend must use a dynamic load balancing to support dynamic servers."
|
||||
expect ~ "backend 'other' uses a non dynamic load balancing method"
|
||||
|
||||
# invalid mux proto
|
||||
send "add server other2/s1 ${s1_addr}:${s1_port} proto h2"
|
||||
|
|
|
|||
|
|
@ -145,7 +145,7 @@ haproxy h1 -cli {
|
|||
send "show ssl ca-file ${testdir}/certs/set_cafile_interCA1.crt:2"
|
||||
expect !~ ".*SHA1 FingerPrint: 4FFF535278883264693CEA72C4FAD13F995D0098"
|
||||
send "show ssl ca-file ${testdir}/certs/set_cafile_interCA1.crt:2"
|
||||
expect ~ ".*SHA1 FingerPrint: 3D3D1D10AD74A8135F05A818E10E5FA91433954D"
|
||||
expect ~ ".*SHA1 FingerPrint: 3D3D1D10AD74A8135F05A818E10E5FA91433954D|5F8DAE4B2099A09F9BDDAFD7E9D900F0CE49977C"
|
||||
}
|
||||
|
||||
client c1 -connect ${h1_clearverifiedlst_sock} {
|
||||
|
|
|
|||
|
|
@ -86,9 +86,7 @@ haproxy h1 -cli {
|
|||
expect ~ "\\*${testdir}/certs/interCA2_crl_empty.pem"
|
||||
|
||||
send "show ssl crl-file \\*${testdir}/certs/interCA2_crl_empty.pem"
|
||||
expect ~ "Revoked Certificates:"
|
||||
send "show ssl crl-file \\*${testdir}/certs/interCA2_crl_empty.pem:1"
|
||||
expect ~ "Serial Number: 1008"
|
||||
expect ~ "Revoked Certificates:\n.*Serial Number: 1008"
|
||||
}
|
||||
|
||||
# This connection should still succeed since the transaction was not committed
|
||||
|
|
|
|||
|
|
@ -150,7 +150,7 @@ client c5 -connect ${h1_clearlst_sock} {
|
|||
# Use another SNI - the server certificate should be generated and different
|
||||
# than the default one
|
||||
client c6 -connect ${h1_clearlst_sock} {
|
||||
txreq -url "/P-384" -hdr "x-sni: unknown-sni.com"
|
||||
txreq -url "/P-384" -hdr "x-sni: sni-longer-sni-longer-sni-longer.sni-longer-than-64-bytes-unknown-sni.com"
|
||||
rxresp
|
||||
expect resp.status == 200
|
||||
expect resp.http.x-ssl-sig_alg == "ecdsa-with-SHA256"
|
||||
|
|
@ -165,7 +165,7 @@ client c6 -connect ${h1_clearlst_sock} {
|
|||
# The curve with the highest priority is X25519 for OpenSSL 1.1.1 and later,
|
||||
# and P-256 for OpenSSL 1.0.2.
|
||||
shell {
|
||||
echo "Q" | openssl s_client -unix "${tmpdir}/ssl.sock" -servername server.ecdsa.com -tls1_2 2>/dev/null | grep -E "Server Temp Key: (ECDH, P-256, 256 bits|ECDH, prime256v1, 256 bits|X25519, 253 bits)"
|
||||
echo "Q" | openssl s_client -unix "${tmpdir}/ssl.sock" -servername server.ecdsa.com -tls1_2 2>/dev/null | grep -E "(Server|Peer) Temp Key: (ECDH, P-256, 256 bits|ECDH, prime256v1, 256 bits|X25519, 253 bits)"
|
||||
}
|
||||
|
||||
shell {
|
||||
|
|
|
|||
172
reg-tests/stream/test_content_switching.vtc
Normal file
172
reg-tests/stream/test_content_switching.vtc
Normal file
|
|
@ -0,0 +1,172 @@
|
|||
varnishtest "Ensure switching-rules conformance with backend eligibility"
|
||||
|
||||
feature ignore_unknown_macro
|
||||
|
||||
haproxy hsrv -conf {
|
||||
global
|
||||
.if feature(THREAD)
|
||||
thread-groups 1
|
||||
.endif
|
||||
|
||||
defaults
|
||||
mode http
|
||||
timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
|
||||
timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
|
||||
timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
|
||||
|
||||
frontend fe
|
||||
bind "fd@${feS}"
|
||||
http-request return status 200 hdr "x-be" "li"
|
||||
} -start
|
||||
|
||||
haproxy h1 -conf {
|
||||
global
|
||||
.if feature(THREAD)
|
||||
thread-groups 1
|
||||
.endif
|
||||
|
||||
defaults
|
||||
mode http
|
||||
timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
|
||||
timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
|
||||
timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
|
||||
|
||||
frontend fe
|
||||
bind "fd@${fe1S}"
|
||||
|
||||
use_backend %[req.hdr("x-target")] if { req.hdr("x-dyn") "1" }
|
||||
use_backend be if { req.hdr("x-target") "be" }
|
||||
|
||||
frontend fe_default
|
||||
bind "fd@${fe2S}"
|
||||
|
||||
force-be-switch if { req.hdr("x-force") "1" }
|
||||
use_backend %[req.hdr("x-target")] if { req.hdr("x-dyn") "1" }
|
||||
use_backend be_disabled if { req.hdr("x-target") "be_disabled" }
|
||||
use_backend be
|
||||
use_backend be2
|
||||
default_backend be_default
|
||||
|
||||
listen li
|
||||
bind "fd@${liS}"
|
||||
use_backend %[req.hdr("x-target")] if { req.hdr("x-dyn") "1" }
|
||||
server srv ${hsrv_feS_sock}
|
||||
|
||||
backend be
|
||||
http-request return status 200 hdr "x-be" %[be_name]
|
||||
|
||||
backend be2
|
||||
http-request return status 200 hdr "x-be" %[be_name]
|
||||
|
||||
backend be_disabled
|
||||
disabled
|
||||
http-request return status 200 hdr "x-be" %[be_name]
|
||||
|
||||
backend be_default
|
||||
http-request return status 200 hdr "x-be" %[be_name]
|
||||
} -start
|
||||
|
||||
client c1 -connect ${h1_fe1S_sock} {
|
||||
# Dynamic rule matching
|
||||
txreq -hdr "x-dyn: 1" -hdr "x-target: be"
|
||||
rxresp
|
||||
expect resp.status == 200
|
||||
expect resp.http.x-be == "be"
|
||||
|
||||
# Dynamic rule no match -> 503 expected
|
||||
txreq -hdr "x-dyn: 1" -hdr "x-target: be_unknown"
|
||||
rxresp
|
||||
expect resp.status == 503
|
||||
} -run
|
||||
|
||||
# Connect to frontend with default backend set
|
||||
client c2 -connect ${h1_fe2S_sock} {
|
||||
# Dynamic rule matching
|
||||
txreq -hdr "x-dyn: 1" -hdr "x-target: be"
|
||||
rxresp
|
||||
expect resp.status == 200
|
||||
expect resp.http.x-be == "be"
|
||||
|
||||
# Dynamic rule no match -> use default backend
|
||||
txreq -hdr "x-dyn: 1" -hdr "x-target: be_unknown"
|
||||
rxresp
|
||||
expect resp.status == 200
|
||||
expect resp.http.x-be == "be_default"
|
||||
|
||||
# Static rule on disabled backend -> continue to next rule
|
||||
txreq -hdr "x-target: be_disabled"
|
||||
rxresp
|
||||
expect resp.status == 200
|
||||
expect resp.http.x-be == "be"
|
||||
} -run
|
||||
|
||||
# Connect to listen proxy type
|
||||
client c3 -connect ${h1_liS_sock} {
|
||||
# Dynamic rule matching
|
||||
txreq -hdr "x-dyn: 1" -hdr "x-target: be"
|
||||
rxresp
|
||||
expect resp.status == 200
|
||||
expect resp.http.x-be == "be"
|
||||
|
||||
# Dynamic rule no match -> stay on current proxy instance
|
||||
txreq -hdr "x-dyn: 1" -hdr "x-target: be_unknown"
|
||||
rxresp
|
||||
expect resp.status == 200
|
||||
expect resp.http.x-be == "li"
|
||||
} -run
|
||||
|
||||
haproxy h1 -cli {
|
||||
send "unpublish backend be_unknown"
|
||||
expect ~ "No such backend."
|
||||
|
||||
send "unpublish backend be_disabled"
|
||||
expect ~ "No effect on a disabled backend."
|
||||
|
||||
send "unpublish backend be"
|
||||
expect ~ "Backend unpublished."
|
||||
}
|
||||
|
||||
client c4 -connect ${h1_fe2S_sock} {
|
||||
# Static rule on unpublished backend -> continue to next rule
|
||||
txreq
|
||||
rxresp
|
||||
expect resp.status == 200
|
||||
expect resp.http.x-be == "be2"
|
||||
|
||||
# Dynamic rule on unpublished backend -> continue to next rule
|
||||
txreq -hdr "x-dyn: 1" -hdr "x-target: be"
|
||||
rxresp
|
||||
expect resp.status == 200
|
||||
expect resp.http.x-be == "be2"
|
||||
|
||||
# Static rule matching on unpublished backend with force-be-switch
|
||||
txreq -hdr "x-force: 1"
|
||||
rxresp
|
||||
expect resp.status == 200
|
||||
expect resp.http.x-be == "be"
|
||||
|
||||
# Dynamic rule matching on unpublished backend with force-be-switch
|
||||
txreq -hdr "x-dyn: 1" -hdr "x-target: be" -hdr "x-force: 1"
|
||||
rxresp
|
||||
expect resp.status == 200
|
||||
expect resp.http.x-be == "be"
|
||||
} -run
|
||||
|
||||
haproxy h1 -cli {
|
||||
send "publish backend be"
|
||||
expect ~ "Backend published."
|
||||
}
|
||||
|
||||
client c5 -connect ${h1_fe2S_sock} {
|
||||
# Static rule matching on republished backend
|
||||
txreq -hdr "x-target: be"
|
||||
rxresp
|
||||
expect resp.status == 200
|
||||
expect resp.http.x-be == "be"
|
||||
|
||||
# Dynamic rule matching on republished backend
|
||||
txreq -hdr "x-dyn: 1" -hdr "x-target: be"
|
||||
rxresp
|
||||
expect resp.status == 200
|
||||
expect resp.http.x-be == "be"
|
||||
} -run
|
||||
|
|
@ -156,7 +156,7 @@ build_aws_lc () {
|
|||
mkdir -p build
|
||||
cd build
|
||||
cmake -version
|
||||
cmake -DCMAKE_BUILD_TYPE=Release -DBUILD_SHARED_LIBS=1 -DDISABLE_GO=1 -DDISABLE_PERL=1 \
|
||||
cmake -DCMAKE_BUILD_TYPE=RelWithDebInfo -DBUILD_SHARED_LIBS=1 -DDISABLE_GO=1 -DDISABLE_PERL=1 \
|
||||
-DCMAKE_BUILD_WITH_INSTALL_RPATH=ON -DCMAKE_INSTALL_RPATH=${BUILDSSL_DESTDIR}/lib \
|
||||
-DBUILD_TESTING=0 -DCMAKE_INSTALL_PREFIX=${BUILDSSL_DESTDIR} ..
|
||||
make -j$(nproc)
|
||||
|
|
@ -184,7 +184,7 @@ build_aws_lc_fips () {
|
|||
mkdir -p build
|
||||
cd build
|
||||
cmake -version
|
||||
cmake -DCMAKE_BUILD_TYPE=Release -DFIPS=1 -DBUILD_SHARED_LIBS=1 \
|
||||
cmake -DCMAKE_BUILD_TYPE=RelWithDebInfo -DFIPS=1 -DBUILD_SHARED_LIBS=1 \
|
||||
-DCMAKE_BUILD_WITH_INSTALL_RPATH=ON -DCMAKE_INSTALL_RPATH=${BUILDSSL_DESTDIR}/lib \
|
||||
-DBUILD_TESTING=0 -DCMAKE_INSTALL_PREFIX=${BUILDSSL_DESTDIR} ..
|
||||
make -j$(nproc)
|
||||
|
|
@ -196,10 +196,11 @@ build_aws_lc_fips () {
|
|||
|
||||
download_quictls () {
|
||||
if [ ! -d "${BUILDSSL_TMPDIR}/quictls" ]; then
|
||||
git clone --depth=1 ${QUICTLS_URL} ${BUILDSSL_TMPDIR}/quictls
|
||||
git clone -b "${QUICTLS_VERSION}" --depth=1 ${QUICTLS_URL} ${BUILDSSL_TMPDIR}/quictls
|
||||
else
|
||||
(
|
||||
cd ${BUILDSSL_TMPDIR}/quictls
|
||||
git checkout "${QUICTLS_VERSION}" || exit 1
|
||||
git pull
|
||||
)
|
||||
fi
|
||||
|
|
@ -221,7 +222,11 @@ build_quictls () {
|
|||
cp -r include/* ${BUILDSSL_DESTDIR}/include
|
||||
else
|
||||
./config shared no-tests ${QUICTLS_EXTRA_ARGS:-} --prefix="${BUILDSSL_DESTDIR}" --openssldir="${BUILDSSL_DESTDIR}" --libdir=lib -DPURIFY
|
||||
make -j$(nproc) build_sw
|
||||
if [ -z "${QUICTLS_VERSION##OpenSSL_1_1_1*}" ]; then
|
||||
make all
|
||||
else
|
||||
make -j$(nproc) build_sw
|
||||
fi
|
||||
make install_sw
|
||||
fi
|
||||
}
|
||||
|
|
@ -287,7 +292,7 @@ if [ ! -z ${AWS_LC_FIPS_VERSION+x} ]; then
|
|||
build_aws_lc_fips
|
||||
fi
|
||||
|
||||
if [ ! -z ${QUICTLS+x} ]; then
|
||||
if [ ! -z ${QUICTLS_VERSION+x} ]; then
|
||||
download_quictls
|
||||
build_quictls
|
||||
fi
|
||||
|
|
|
|||
|
|
@ -1,10 +1,12 @@
|
|||
#!/bin/sh
|
||||
|
||||
DESTDIR=${DESTDIR:-${PWD}/../vtest/}
|
||||
TMPDIR=${TMPDIR:-$(mktemp -d)}
|
||||
set -eux
|
||||
|
||||
curl -fsSL https://github.com/vtest/VTest2/archive/main.tar.gz -o VTest.tar.gz
|
||||
mkdir ../vtest
|
||||
tar xvf VTest.tar.gz -C ../vtest --strip-components=1
|
||||
curl -fsSL https://github.com/vtest/VTest2/archive/main.tar.gz -o "${TMPDIR}/VTest.tar.gz"
|
||||
mkdir -p "${TMPDIR}/vtest"
|
||||
tar xvf ${TMPDIR}/VTest.tar.gz -C "${TMPDIR}/vtest" --strip-components=1
|
||||
# Special flags due to: https://github.com/vtest/VTest/issues/12
|
||||
|
||||
# Note: do not use "make -C ../vtest", otherwise MAKEFLAGS contains "w"
|
||||
|
|
@ -13,7 +15,7 @@ tar xvf VTest.tar.gz -C ../vtest --strip-components=1
|
|||
# MFLAGS works on BSD but misses variable definitions on GNU Make.
|
||||
# Better just avoid the -C and do the cd ourselves then.
|
||||
|
||||
cd ../vtest
|
||||
cd "${TMPDIR}/vtest"
|
||||
|
||||
set +e
|
||||
CPUS=${CPUS:-$(nproc 2>/dev/null)}
|
||||
|
|
@ -28,3 +30,6 @@ if test -f /opt/homebrew/include/pcre2.h; then
|
|||
else
|
||||
make -j${CPUS} FLAGS="-O2 -s -Wall"
|
||||
fi
|
||||
|
||||
mkdir -p "${DESTDIR}"
|
||||
cp "${TMPDIR}/vtest/vtest" "${DESTDIR}"
|
||||
|
|
|
|||
149
src/activity.c
149
src/activity.c
|
|
@ -659,8 +659,20 @@ void activity_count_runtime(uint32_t run_time)
|
|||
if (!(_HA_ATOMIC_LOAD(&th_ctx->flags) & TH_FL_TASK_PROFILING)) {
|
||||
if (unlikely((profiling & HA_PROF_TASKS_MASK) == HA_PROF_TASKS_ON ||
|
||||
((profiling & HA_PROF_TASKS_MASK) == HA_PROF_TASKS_AON &&
|
||||
swrate_avg(run_time, TIME_STATS_SAMPLES) >= up)))
|
||||
swrate_avg(run_time, TIME_STATS_SAMPLES) >= up))) {
|
||||
|
||||
if (profiling & HA_PROF_TASKS_LOCK)
|
||||
_HA_ATOMIC_OR(&th_ctx->flags, TH_FL_TASK_PROFILING_L);
|
||||
else
|
||||
_HA_ATOMIC_AND(&th_ctx->flags, ~TH_FL_TASK_PROFILING_L);
|
||||
|
||||
if (profiling & HA_PROF_TASKS_MEM)
|
||||
_HA_ATOMIC_OR(&th_ctx->flags, TH_FL_TASK_PROFILING_M);
|
||||
else
|
||||
_HA_ATOMIC_AND(&th_ctx->flags, ~TH_FL_TASK_PROFILING_M);
|
||||
|
||||
_HA_ATOMIC_OR(&th_ctx->flags, TH_FL_TASK_PROFILING);
|
||||
}
|
||||
} else {
|
||||
if (unlikely((profiling & HA_PROF_TASKS_MASK) == HA_PROF_TASKS_OFF ||
|
||||
((profiling & HA_PROF_TASKS_MASK) == HA_PROF_TASKS_AOFF &&
|
||||
|
|
@ -692,26 +704,41 @@ static int cfg_parse_prof_memory(char **args, int section_type, struct proxy *cu
|
|||
}
|
||||
#endif // USE_MEMORY_PROFILING
|
||||
|
||||
/* config parser for global "profiling.tasks", accepts "on" or "off" */
|
||||
/* config parser for global "profiling.tasks", accepts "on", "off", 'auto",
|
||||
* "lock", "no-lock", "memory", "no-memory".
|
||||
*/
|
||||
static int cfg_parse_prof_tasks(char **args, int section_type, struct proxy *curpx,
|
||||
const struct proxy *defpx, const char *file, int line,
|
||||
char **err)
|
||||
{
|
||||
if (too_many_args(1, args, err, NULL))
|
||||
return -1;
|
||||
int arg;
|
||||
|
||||
if (strcmp(args[1], "on") == 0) {
|
||||
profiling = (profiling & ~HA_PROF_TASKS_MASK) | HA_PROF_TASKS_ON;
|
||||
HA_ATOMIC_STORE(&prof_task_start_ns, now_ns);
|
||||
for (arg = 1; *args[arg]; arg++) {
|
||||
if (strcmp(args[arg], "on") == 0) {
|
||||
profiling = (profiling & ~HA_PROF_TASKS_MASK) | HA_PROF_TASKS_ON;
|
||||
HA_ATOMIC_STORE(&prof_task_start_ns, now_ns);
|
||||
}
|
||||
else if (strcmp(args[arg], "auto") == 0) {
|
||||
profiling = (profiling & ~HA_PROF_TASKS_MASK) | HA_PROF_TASKS_AOFF;
|
||||
HA_ATOMIC_STORE(&prof_task_start_ns, now_ns);
|
||||
}
|
||||
else if (strcmp(args[arg], "off") == 0)
|
||||
profiling = (profiling & ~HA_PROF_TASKS_MASK) | HA_PROF_TASKS_OFF;
|
||||
else if (strcmp(args[arg], "lock") == 0)
|
||||
profiling |= HA_PROF_TASKS_LOCK;
|
||||
else if (strcmp(args[arg], "no-lock") == 0)
|
||||
profiling &= ~HA_PROF_TASKS_LOCK;
|
||||
else if (strcmp(args[arg], "memory") == 0)
|
||||
profiling |= HA_PROF_TASKS_MEM;
|
||||
else if (strcmp(args[arg], "no-memory") == 0)
|
||||
profiling &= ~HA_PROF_TASKS_MEM;
|
||||
else
|
||||
break;
|
||||
}
|
||||
else if (strcmp(args[1], "auto") == 0) {
|
||||
profiling = (profiling & ~HA_PROF_TASKS_MASK) | HA_PROF_TASKS_AOFF;
|
||||
HA_ATOMIC_STORE(&prof_task_start_ns, now_ns);
|
||||
}
|
||||
else if (strcmp(args[1], "off") == 0)
|
||||
profiling = (profiling & ~HA_PROF_TASKS_MASK) | HA_PROF_TASKS_OFF;
|
||||
else {
|
||||
memprintf(err, "'%s' expects either 'on', 'auto', or 'off' but got '%s'.", args[0], args[1]);
|
||||
|
||||
/* either no arg or invalid arg */
|
||||
if (arg == 1 || *args[arg]) {
|
||||
memprintf(err, "'%s' expects a combination of either 'on', 'auto', 'off', 'lock', 'no-lock', 'memory', or 'no-memory', but got '%s'.", args[0], args[arg]);
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
|
|
@ -720,6 +747,8 @@ static int cfg_parse_prof_tasks(char **args, int section_type, struct proxy *cur
|
|||
/* parse a "set profiling" command. It always returns 1. */
|
||||
static int cli_parse_set_profiling(char **args, char *payload, struct appctx *appctx, void *private)
|
||||
{
|
||||
int arg;
|
||||
|
||||
if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
|
||||
return 1;
|
||||
|
||||
|
|
@ -765,52 +794,66 @@ static int cli_parse_set_profiling(char **args, char *payload, struct appctx *ap
|
|||
if (strcmp(args[2], "tasks") != 0)
|
||||
return cli_err(appctx, "Expects either 'tasks' or 'memory'.\n");
|
||||
|
||||
if (strcmp(args[3], "on") == 0) {
|
||||
unsigned int old = profiling;
|
||||
int i;
|
||||
for (arg = 3; *args[arg]; arg++) {
|
||||
if (strcmp(args[arg], "on") == 0) {
|
||||
unsigned int old = profiling;
|
||||
int i;
|
||||
|
||||
while (!_HA_ATOMIC_CAS(&profiling, &old, (old & ~HA_PROF_TASKS_MASK) | HA_PROF_TASKS_ON))
|
||||
;
|
||||
while (!_HA_ATOMIC_CAS(&profiling, &old, (old & ~HA_PROF_TASKS_MASK) | HA_PROF_TASKS_ON))
|
||||
;
|
||||
|
||||
HA_ATOMIC_STORE(&prof_task_start_ns, now_ns);
|
||||
HA_ATOMIC_STORE(&prof_task_stop_ns, 0);
|
||||
HA_ATOMIC_STORE(&prof_task_start_ns, now_ns);
|
||||
HA_ATOMIC_STORE(&prof_task_stop_ns, 0);
|
||||
|
||||
/* also flush current profiling stats */
|
||||
for (i = 0; i < SCHED_ACT_HASH_BUCKETS; i++) {
|
||||
HA_ATOMIC_STORE(&sched_activity[i].calls, 0);
|
||||
HA_ATOMIC_STORE(&sched_activity[i].cpu_time, 0);
|
||||
HA_ATOMIC_STORE(&sched_activity[i].lat_time, 0);
|
||||
HA_ATOMIC_STORE(&sched_activity[i].lkw_time, 0);
|
||||
HA_ATOMIC_STORE(&sched_activity[i].lkd_time, 0);
|
||||
HA_ATOMIC_STORE(&sched_activity[i].mem_time, 0);
|
||||
HA_ATOMIC_STORE(&sched_activity[i].func, NULL);
|
||||
HA_ATOMIC_STORE(&sched_activity[i].caller, NULL);
|
||||
/* also flush current profiling stats */
|
||||
for (i = 0; i < SCHED_ACT_HASH_BUCKETS; i++) {
|
||||
HA_ATOMIC_STORE(&sched_activity[i].calls, 0);
|
||||
HA_ATOMIC_STORE(&sched_activity[i].cpu_time, 0);
|
||||
HA_ATOMIC_STORE(&sched_activity[i].lat_time, 0);
|
||||
HA_ATOMIC_STORE(&sched_activity[i].lkw_time, 0);
|
||||
HA_ATOMIC_STORE(&sched_activity[i].lkd_time, 0);
|
||||
HA_ATOMIC_STORE(&sched_activity[i].mem_time, 0);
|
||||
HA_ATOMIC_STORE(&sched_activity[i].func, NULL);
|
||||
HA_ATOMIC_STORE(&sched_activity[i].caller, NULL);
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (strcmp(args[3], "auto") == 0) {
|
||||
unsigned int old = profiling;
|
||||
unsigned int new;
|
||||
else if (strcmp(args[arg], "auto") == 0) {
|
||||
unsigned int old = profiling;
|
||||
unsigned int new;
|
||||
|
||||
do {
|
||||
if ((old & HA_PROF_TASKS_MASK) >= HA_PROF_TASKS_AON)
|
||||
new = (old & ~HA_PROF_TASKS_MASK) | HA_PROF_TASKS_AON;
|
||||
else
|
||||
new = (old & ~HA_PROF_TASKS_MASK) | HA_PROF_TASKS_AOFF;
|
||||
} while (!_HA_ATOMIC_CAS(&profiling, &old, new));
|
||||
do {
|
||||
if ((old & HA_PROF_TASKS_MASK) >= HA_PROF_TASKS_AON)
|
||||
new = (old & ~HA_PROF_TASKS_MASK) | HA_PROF_TASKS_AON;
|
||||
else
|
||||
new = (old & ~HA_PROF_TASKS_MASK) | HA_PROF_TASKS_AOFF;
|
||||
} while (!_HA_ATOMIC_CAS(&profiling, &old, new));
|
||||
|
||||
HA_ATOMIC_STORE(&prof_task_start_ns, now_ns);
|
||||
HA_ATOMIC_STORE(&prof_task_stop_ns, 0);
|
||||
}
|
||||
else if (strcmp(args[3], "off") == 0) {
|
||||
unsigned int old = profiling;
|
||||
while (!_HA_ATOMIC_CAS(&profiling, &old, (old & ~HA_PROF_TASKS_MASK) | HA_PROF_TASKS_OFF))
|
||||
;
|
||||
HA_ATOMIC_STORE(&prof_task_start_ns, now_ns);
|
||||
HA_ATOMIC_STORE(&prof_task_stop_ns, 0);
|
||||
}
|
||||
else if (strcmp(args[arg], "off") == 0) {
|
||||
unsigned int old = profiling;
|
||||
while (!_HA_ATOMIC_CAS(&profiling, &old, (old & ~HA_PROF_TASKS_MASK) | HA_PROF_TASKS_OFF))
|
||||
;
|
||||
|
||||
if (HA_ATOMIC_LOAD(&prof_task_start_ns))
|
||||
HA_ATOMIC_STORE(&prof_task_stop_ns, now_ns);
|
||||
if (HA_ATOMIC_LOAD(&prof_task_start_ns))
|
||||
HA_ATOMIC_STORE(&prof_task_stop_ns, now_ns);
|
||||
}
|
||||
else if (strcmp(args[arg], "lock") == 0)
|
||||
HA_ATOMIC_OR(&profiling, HA_PROF_TASKS_LOCK);
|
||||
else if (strcmp(args[arg], "no-lock") == 0)
|
||||
HA_ATOMIC_AND(&profiling, ~HA_PROF_TASKS_LOCK);
|
||||
else if (strcmp(args[arg], "memory") == 0)
|
||||
HA_ATOMIC_OR(&profiling, HA_PROF_TASKS_MEM);
|
||||
else if (strcmp(args[arg], "no-memory") == 0)
|
||||
HA_ATOMIC_AND(&profiling, ~HA_PROF_TASKS_MEM);
|
||||
else
|
||||
break; // unknown arg
|
||||
}
|
||||
else
|
||||
return cli_err(appctx, "Expects 'on', 'auto', or 'off'.\n");
|
||||
|
||||
/* either no arg or invalid one */
|
||||
if (arg == 3 || *args[arg])
|
||||
return cli_err(appctx, "Expects a combination of either 'on', 'auto', 'off', 'lock', 'no-lock', 'memory' or 'no-memory'.\n");
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
|
|
|||
13
src/applet.c
13
src/applet.c
|
|
@ -848,7 +848,12 @@ struct task *task_run_applet(struct task *t, void *context, unsigned int state)
|
|||
|
||||
input = applet_output_data(app);
|
||||
output = co_data(oc);
|
||||
app->applet->fct(app);
|
||||
|
||||
/* Don't call I/O handler if the applet was shut (release callback was
|
||||
* already called)
|
||||
*/
|
||||
if (!se_fl_test(app->sedesc, SE_FL_SHR | SE_FL_SHW))
|
||||
app->applet->fct(app);
|
||||
|
||||
TRACE_POINT(APPLET_EV_PROCESS, app);
|
||||
|
||||
|
|
@ -945,7 +950,11 @@ struct task *task_process_applet(struct task *t, void *context, unsigned int sta
|
|||
applet_need_more_data(app);
|
||||
applet_have_no_more_data(app);
|
||||
|
||||
app->applet->fct(app);
|
||||
/* Don't call I/O handler if the applet was shut (release callback was
|
||||
* already called)
|
||||
*/
|
||||
if (!applet_fl_test(app, APPCTX_FL_SHUTDOWN))
|
||||
app->applet->fct(app);
|
||||
|
||||
TRACE_POINT(APPLET_EV_PROCESS, app);
|
||||
|
||||
|
|
|
|||
|
|
@ -59,6 +59,7 @@
|
|||
#include <haproxy/task.h>
|
||||
#include <haproxy/ticks.h>
|
||||
#include <haproxy/time.h>
|
||||
#include <haproxy/tools.h>
|
||||
#include <haproxy/trace.h>
|
||||
|
||||
#define TRACE_SOURCE &trace_strm
|
||||
|
|
@ -576,9 +577,20 @@ struct server *get_server_rnd(struct stream *s, const struct server *avoid)
|
|||
/* compare the new server to the previous best choice and pick
|
||||
* the one with the least currently served requests.
|
||||
*/
|
||||
if (prev && prev != curr &&
|
||||
curr->served * prev->cur_eweight > prev->served * curr->cur_eweight)
|
||||
curr = prev;
|
||||
if (prev && prev != curr) {
|
||||
uint64_t wcurr = (uint64_t)curr->served * prev->cur_eweight;
|
||||
uint64_t wprev = (uint64_t)prev->served * curr->cur_eweight;
|
||||
|
||||
if (wcurr > wprev)
|
||||
curr = prev;
|
||||
else if (wcurr == wprev && curr->counters.shared.tg && prev->counters.shared.tg) {
|
||||
/* same load: pick the lowest weighted request rate */
|
||||
wcurr = read_freq_ctr_period_estimate(&curr->counters._sess_per_sec, MS_TO_TICKS(1000));
|
||||
wprev = read_freq_ctr_period_estimate(&prev->counters._sess_per_sec, MS_TO_TICKS(1000));
|
||||
if (wprev * curr->cur_eweight < wcurr * prev->cur_eweight)
|
||||
curr = prev;
|
||||
}
|
||||
}
|
||||
} while (--draws > 0);
|
||||
|
||||
/* if the selected server is full, pretend we have none so that we reach
|
||||
|
|
@ -823,7 +835,7 @@ int assign_server(struct stream *s)
|
|||
else if (srv != prev_srv) {
|
||||
if (s->be_tgcounters)
|
||||
_HA_ATOMIC_INC(&s->be_tgcounters->cum_lbconn);
|
||||
if (srv->counters.shared.tg[tgid - 1])
|
||||
if (srv->counters.shared.tg)
|
||||
_HA_ATOMIC_INC(&srv->counters.shared.tg[tgid - 1]->cum_lbconn);
|
||||
}
|
||||
stream_set_srv_target(s, srv);
|
||||
|
|
@ -998,12 +1010,12 @@ int assign_server_and_queue(struct stream *s)
|
|||
s->txn->flags |= TX_CK_DOWN;
|
||||
}
|
||||
s->flags |= SF_REDISP;
|
||||
if (prev_srv->counters.shared.tg[tgid - 1])
|
||||
if (prev_srv->counters.shared.tg)
|
||||
_HA_ATOMIC_INC(&prev_srv->counters.shared.tg[tgid - 1]->redispatches);
|
||||
if (s->be_tgcounters)
|
||||
_HA_ATOMIC_INC(&s->be_tgcounters->redispatches);
|
||||
} else {
|
||||
if (prev_srv->counters.shared.tg[tgid - 1])
|
||||
if (prev_srv->counters.shared.tg)
|
||||
_HA_ATOMIC_INC(&prev_srv->counters.shared.tg[tgid - 1]->retries);
|
||||
if (s->be_tgcounters)
|
||||
_HA_ATOMIC_INC(&s->be_tgcounters->retries);
|
||||
|
|
@ -3044,6 +3056,27 @@ int be_downtime(struct proxy *px) {
|
|||
return ns_to_sec(now_ns) - px->last_change + px->down_time;
|
||||
}
|
||||
|
||||
/* Checks if <px> backend supports the addition of servers at runtime. Either a
|
||||
* backend or a defaults proxy are supported. If proxy is incompatible, <msg>
|
||||
* will be allocated to contain a textual explaination.
|
||||
*/
|
||||
int be_supports_dynamic_srv(struct proxy *px, char **msg)
|
||||
{
|
||||
if (px->lbprm.algo && !(px->lbprm.algo & BE_LB_PROP_DYN)) {
|
||||
memprintf(msg, "%s '%s' uses a non dynamic load balancing method",
|
||||
proxy_cap_str(px->cap), px->id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (px->mode == PR_MODE_SYSLOG) {
|
||||
memprintf(msg, "%s '%s' uses mode log",
|
||||
proxy_cap_str(px->cap), px->id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function returns a string containing the balancing
|
||||
* mode of the proxy in a format suitable for stats.
|
||||
|
|
|
|||
|
|
@ -2133,11 +2133,11 @@ enum act_return http_action_req_cache_use(struct act_rule *rule, struct proxy *p
|
|||
return ACT_RET_CONT;
|
||||
|
||||
if (px == strm_fe(s)) {
|
||||
if (px->fe_counters.shared.tg[tgid - 1])
|
||||
if (px->fe_counters.shared.tg)
|
||||
_HA_ATOMIC_INC(&px->fe_counters.shared.tg[tgid - 1]->p.http.cache_lookups);
|
||||
}
|
||||
else {
|
||||
if (px->be_counters.shared.tg[tgid - 1])
|
||||
if (px->be_counters.shared.tg)
|
||||
_HA_ATOMIC_INC(&px->be_counters.shared.tg[tgid - 1]->p.http.cache_lookups);
|
||||
}
|
||||
|
||||
|
|
@ -2226,11 +2226,11 @@ enum act_return http_action_req_cache_use(struct act_rule *rule, struct proxy *p
|
|||
should_send_notmodified_response(cache, htxbuf(&s->req.buf), res);
|
||||
|
||||
if (px == strm_fe(s)) {
|
||||
if (px->fe_counters.shared.tg[tgid - 1])
|
||||
if (px->fe_counters.shared.tg)
|
||||
_HA_ATOMIC_INC(&px->fe_counters.shared.tg[tgid - 1]->p.http.cache_hits);
|
||||
}
|
||||
else {
|
||||
if (px->be_counters.shared.tg[tgid - 1])
|
||||
if (px->be_counters.shared.tg)
|
||||
_HA_ATOMIC_INC(&px->be_counters.shared.tg[tgid - 1]->p.http.cache_hits);
|
||||
}
|
||||
return ACT_RET_CONT;
|
||||
|
|
|
|||
|
|
@ -1423,6 +1423,9 @@ static int cfg_parse_global_tune_opts(char **args, int section_type,
|
|||
|
||||
return 0;
|
||||
}
|
||||
else if (strcmp(args[0], "tune.defaults.purge") == 0) {
|
||||
global.tune.options |= GTUNE_PURGE_DEFAULTS;
|
||||
}
|
||||
else if (strcmp(args[0], "tune.pattern.cache-size") == 0) {
|
||||
if (*(args[1]) == 0) {
|
||||
memprintf(err, "'%s' expects a positive numeric value", args[0]);
|
||||
|
|
@ -1869,6 +1872,7 @@ static struct cfg_kw_list cfg_kws = {ILH, {
|
|||
{ CFG_GLOBAL, "tune.bufsize", cfg_parse_global_tune_opts },
|
||||
{ CFG_GLOBAL, "tune.chksize", cfg_parse_global_unsupported_opts },
|
||||
{ CFG_GLOBAL, "tune.comp.maxlevel", cfg_parse_global_tune_opts },
|
||||
{ CFG_GLOBAL, "tune.defaults.purge", cfg_parse_global_tune_opts },
|
||||
{ CFG_GLOBAL, "tune.disable-fast-forward", cfg_parse_global_tune_forward_opts },
|
||||
{ CFG_GLOBAL, "tune.disable-zero-copy-forwarding", cfg_parse_global_tune_forward_opts },
|
||||
{ CFG_GLOBAL, "tune.glitches.kill.cpu-usage", cfg_parse_global_tune_opts },
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ static const char *common_kw_list[] = {
|
|||
"server-state-file-name", "max-session-srv-conns", "capture",
|
||||
"retries", "http-request", "http-response", "http-after-response",
|
||||
"http-send-name-header", "block", "redirect", "use_backend",
|
||||
"use-server", "force-persist", "ignore-persist", "force-persist",
|
||||
"use-server", "force-persist", "ignore-persist",
|
||||
"stick-table", "stick", "stats", "option", "default_backend",
|
||||
"http-reuse", "monitor", "transparent", "maxconn", "backlog",
|
||||
"fullconn", "dispatch", "balance", "hash-type",
|
||||
|
|
@ -299,7 +299,6 @@ int cfg_parse_listen_match_option(const char *file, int linenum, int kwm,
|
|||
int cfg_parse_listen(const char *file, int linenum, char **args, int kwm)
|
||||
{
|
||||
static struct proxy *curr_defproxy = NULL;
|
||||
static struct proxy *last_defproxy = NULL;
|
||||
const char *err;
|
||||
int rc;
|
||||
int err_code = 0;
|
||||
|
|
@ -388,35 +387,49 @@ int cfg_parse_listen(const char *file, int linenum, char **args, int kwm)
|
|||
err_code |= ERR_ALERT | ERR_FATAL;
|
||||
}
|
||||
|
||||
if (*args[1] && rc & PR_CAP_DEF) {
|
||||
/* for default proxies, if another one has the same
|
||||
* name and was explicitly referenced, this is an error
|
||||
* that we must reject. E.g.
|
||||
* defaults def
|
||||
* backend bck from def
|
||||
* defaults def
|
||||
if (rc & PR_CAP_DEF) {
|
||||
/* If last defaults is unnamed, it will be made
|
||||
* invisible by the current newer section. It must be
|
||||
* freed unless it is still referenced by proxies.
|
||||
*/
|
||||
curproxy = proxy_find_by_name(args[1], PR_CAP_DEF, 0);
|
||||
if (curproxy && curproxy->flags & PR_FL_EXPLICIT_REF) {
|
||||
ha_alert("Parsing [%s:%d]: %s '%s' has the same name as another defaults section declared at"
|
||||
" %s:%d which was explicitly referenced hence cannot be replaced. Please remove or"
|
||||
" rename one of the offending defaults section.\n",
|
||||
file, linenum, proxy_cap_str(rc), args[1],
|
||||
curproxy->conf.file, curproxy->conf.line);
|
||||
err_code |= ERR_ALERT | ERR_ABORT;
|
||||
goto out;
|
||||
if (last_defproxy && last_defproxy->id[0] == '\0' &&
|
||||
!last_defproxy->conf.refcount) {
|
||||
defaults_px_destroy(last_defproxy);
|
||||
}
|
||||
last_defproxy = NULL;
|
||||
|
||||
/* if the other proxy exists, we don't need to keep it
|
||||
* since neither will support being explicitly referenced
|
||||
* so let's drop it from the index but keep a reference to
|
||||
* its location for error messages.
|
||||
*/
|
||||
if (curproxy) {
|
||||
file_prev = curproxy->conf.file;
|
||||
line_prev = curproxy->conf.line;
|
||||
proxy_unref_or_destroy_defaults(curproxy);
|
||||
curproxy = NULL;
|
||||
/* If current defaults is named, check collision with previous instances. */
|
||||
if (*args[1]) {
|
||||
curproxy = proxy_find_by_name(args[1], PR_CAP_DEF, 0);
|
||||
|
||||
/* for default proxies, if another one has the same
|
||||
* name and was explicitly referenced, this is an error
|
||||
* that we must reject. E.g.
|
||||
* defaults def
|
||||
* backend bck from def
|
||||
* defaults def
|
||||
*/
|
||||
if (curproxy && curproxy->flags & PR_FL_EXPLICIT_REF) {
|
||||
ha_alert("Parsing [%s:%d]: %s '%s' has the same name as another defaults section declared at"
|
||||
" %s:%d which was explicitly referenced hence cannot be replaced. Please remove or"
|
||||
" rename one of the offending defaults section.\n",
|
||||
file, linenum, proxy_cap_str(rc), args[1],
|
||||
curproxy->conf.file, curproxy->conf.line);
|
||||
err_code |= ERR_ALERT | ERR_ABORT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* if the other proxy exists, we don't need to keep it
|
||||
* since neither will support being explicitly referenced
|
||||
* so let's drop it from the index but keep a reference to
|
||||
* its location for error messages.
|
||||
*/
|
||||
if (curproxy) {
|
||||
file_prev = curproxy->conf.file;
|
||||
line_prev = curproxy->conf.line;
|
||||
defaults_px_detach(curproxy);
|
||||
curproxy = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -488,87 +501,14 @@ int cfg_parse_listen(const char *file, int linenum, char **args, int kwm)
|
|||
curproxy->conf.file_prev = file_prev;
|
||||
curproxy->conf.line_prev = line_prev;
|
||||
|
||||
if (curr_defproxy && (!LIST_ISEMPTY(&curr_defproxy->http_req_rules) ||
|
||||
!LIST_ISEMPTY(&curr_defproxy->http_res_rules) ||
|
||||
!LIST_ISEMPTY(&curr_defproxy->http_after_res_rules) ||
|
||||
!LIST_ISEMPTY(&curr_defproxy->tcp_req.l4_rules) ||
|
||||
!LIST_ISEMPTY(&curr_defproxy->tcp_req.l5_rules) ||
|
||||
!LIST_ISEMPTY(&curr_defproxy->tcp_req.inspect_rules) ||
|
||||
!LIST_ISEMPTY(&curr_defproxy->tcp_rep.inspect_rules))) {
|
||||
/* If the current default proxy defines TCP/HTTP rules, the
|
||||
* current proxy will keep a reference on it. But some sanity
|
||||
* checks are performed first:
|
||||
*
|
||||
* - It cannot be used to init a defaults section
|
||||
* - It cannot be used to init a listen section
|
||||
* - It cannot be used to init backend and frontend sections at
|
||||
* same time. It can be used to init several sections of the
|
||||
* same type only.
|
||||
* - It cannot define L4/L5 TCP rules if it is used to init
|
||||
* backend sections.
|
||||
* - It cannot define 'tcp-response content' rules if it
|
||||
* is used to init frontend sections.
|
||||
*
|
||||
* If no error is found, refcount of the default proxy is incremented.
|
||||
*/
|
||||
|
||||
/* Note: Add tcpcheck_rules too if unresolve args become allowed in defaults section */
|
||||
if (rc & PR_CAP_DEF) {
|
||||
ha_alert("parsing [%s:%d]: a defaults section cannot inherit from a defaults section defining TCP/HTTP rules (defaults section at %s:%d).\n",
|
||||
file, linenum, curr_defproxy->conf.file, curr_defproxy->conf.line);
|
||||
err_code |= ERR_ALERT | ERR_ABORT;
|
||||
}
|
||||
else if ((rc & PR_CAP_LISTEN) == PR_CAP_LISTEN) {
|
||||
ha_alert("parsing [%s:%d]: a listen section cannot inherit from a defaults section defining TCP/HTTP rules.\n",
|
||||
file, linenum);
|
||||
err_code |= ERR_ALERT | ERR_ABORT;
|
||||
}
|
||||
else {
|
||||
char defcap = (curr_defproxy->cap & PR_CAP_LISTEN);
|
||||
|
||||
if ((defcap == PR_CAP_BE || defcap == PR_CAP_FE) && (rc & PR_CAP_LISTEN) != defcap) {
|
||||
ha_alert("parsing [%s:%d]: frontends and backends cannot inherit from the same defaults section"
|
||||
" if it defines TCP/HTTP rules (defaults section at %s:%d).\n",
|
||||
file, linenum, curr_defproxy->conf.file, curr_defproxy->conf.line);
|
||||
err_code |= ERR_ALERT | ERR_ABORT;
|
||||
}
|
||||
else if (!(rc & PR_CAP_FE) && (!LIST_ISEMPTY(&curr_defproxy->tcp_req.l4_rules) ||
|
||||
!LIST_ISEMPTY(&curr_defproxy->tcp_req.l5_rules))) {
|
||||
ha_alert("parsing [%s:%d]: a backend section cannot inherit from a defaults section defining"
|
||||
" 'tcp-request connection' or 'tcp-request session' rules (defaults section at %s:%d).\n",
|
||||
file, linenum, curr_defproxy->conf.file, curr_defproxy->conf.line);
|
||||
err_code |= ERR_ALERT | ERR_ABORT;
|
||||
}
|
||||
else if (!(rc & PR_CAP_BE) && !LIST_ISEMPTY(&curr_defproxy->tcp_rep.inspect_rules)) {
|
||||
ha_alert("parsing [%s:%d]: a frontend section cannot inherit from a defaults section defining"
|
||||
" 'tcp-response content' rules (defaults section at %s:%d).\n",
|
||||
file, linenum, curr_defproxy->conf.file, curr_defproxy->conf.line);
|
||||
err_code |= ERR_ALERT | ERR_ABORT;
|
||||
}
|
||||
else {
|
||||
curr_defproxy->cap = (curr_defproxy->cap & ~PR_CAP_LISTEN) | (rc & PR_CAP_LISTEN);
|
||||
proxy_ref_defaults(curproxy, curr_defproxy);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (curr_defproxy && (curr_defproxy->tcpcheck_rules.flags & TCPCHK_RULES_PROTO_CHK) &&
|
||||
(curproxy->cap & PR_CAP_LISTEN) == PR_CAP_BE) {
|
||||
/* If the current default proxy defines tcpcheck rules, the
|
||||
* current proxy will keep a reference on it. but only if the
|
||||
* current proxy has the backend capability.
|
||||
*/
|
||||
proxy_ref_defaults(curproxy, curr_defproxy);
|
||||
}
|
||||
|
||||
if ((rc & PR_CAP_BE) && curr_defproxy && (curr_defproxy->nb_req_cap || curr_defproxy->nb_rsp_cap)) {
|
||||
ha_alert("parsing [%s:%d]: backend or defaults sections cannot inherit from a defaults section defining"
|
||||
" capptures (defaults section at %s:%d).\n",
|
||||
file, linenum, curr_defproxy->conf.file, curr_defproxy->conf.line);
|
||||
err_code |= ERR_ALERT | ERR_ABORT;
|
||||
if (curr_defproxy) {
|
||||
err_code = proxy_ref_defaults(curproxy, curr_defproxy, &errmsg);
|
||||
if (err_code)
|
||||
ha_alert("parsing [%s:%d]: %s.\n", file, linenum, errmsg);
|
||||
}
|
||||
|
||||
if (rc & PR_CAP_DEF) {
|
||||
LIST_APPEND(&defaults_list, &curproxy->el);
|
||||
/* last and current proxies must be updated to this one */
|
||||
curr_defproxy = last_defproxy = curproxy;
|
||||
} else {
|
||||
|
|
@ -693,23 +633,32 @@ int cfg_parse_listen(const char *file, int linenum, char **args, int kwm)
|
|||
goto out;
|
||||
}
|
||||
else if (strcmp(args[0], "mode") == 0) { /* sets the proxy mode */
|
||||
enum pr_mode mode;
|
||||
if (alertif_too_many_args(1, file, linenum, args, &err_code))
|
||||
goto out;
|
||||
|
||||
if (strcmp(args[1], "http") == 0) curproxy->mode = PR_MODE_HTTP;
|
||||
else if (strcmp(args[1], "tcp") == 0) curproxy->mode = PR_MODE_TCP;
|
||||
else if (strcmp(args[1], "log") == 0 && (curproxy->cap & PR_CAP_BE)) curproxy->mode = PR_MODE_SYSLOG;
|
||||
else if (strcmp(args[1], "spop") == 0 && (curproxy->cap & PR_CAP_BE)) curproxy->mode = PR_MODE_SPOP;
|
||||
else if (strcmp(args[1], "health") == 0) {
|
||||
if (unlikely(strcmp(args[1], "health") == 0)) {
|
||||
ha_alert("parsing [%s:%d] : 'mode health' doesn't exist anymore. Please use 'http-request return status 200' instead.\n", file, linenum);
|
||||
err_code |= ERR_ALERT | ERR_FATAL;
|
||||
goto out;
|
||||
}
|
||||
else {
|
||||
|
||||
mode = str_to_proxy_mode(args[1]);
|
||||
if (!mode) {
|
||||
ha_alert("parsing [%s:%d] : unknown proxy mode '%s'.\n", file, linenum, args[1]);
|
||||
err_code |= ERR_ALERT | ERR_FATAL;
|
||||
goto out;
|
||||
}
|
||||
else if ((mode == PR_MODE_SYSLOG || mode == PR_MODE_SPOP) &&
|
||||
!(curproxy->cap & PR_CAP_BE)) {
|
||||
ha_alert("parsing [%s:%d] : mode %s is only applicable on proxies with backend capability.\n", file, linenum, proxy_mode_str(mode));
|
||||
err_code |= ERR_ALERT | ERR_FATAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
curproxy->mode = mode;
|
||||
if (curproxy->cap & PR_CAP_DEF)
|
||||
curproxy->flags |= PR_FL_DEF_EXPLICIT_MODE;
|
||||
}
|
||||
else if (strcmp(args[0], "id") == 0) {
|
||||
struct proxy *conflict;
|
||||
|
|
@ -1395,7 +1344,9 @@ int cfg_parse_listen(const char *file, int linenum, char **args, int kwm)
|
|||
where |= SMP_VAL_FE_HRQ_HDR;
|
||||
if (curproxy->cap & PR_CAP_BE)
|
||||
where |= SMP_VAL_BE_HRQ_HDR;
|
||||
err_code |= warnif_cond_conflicts(rule->cond, where, file, linenum);
|
||||
err_code |= warnif_cond_conflicts(rule->cond, where, &errmsg);
|
||||
if (err_code)
|
||||
ha_warning("parsing [%s:%d] : '%s.\n'", file, linenum, errmsg);
|
||||
|
||||
LIST_APPEND(&curproxy->http_req_rules, &rule->list);
|
||||
}
|
||||
|
|
@ -1428,7 +1379,9 @@ int cfg_parse_listen(const char *file, int linenum, char **args, int kwm)
|
|||
where |= SMP_VAL_FE_HRS_HDR;
|
||||
if (curproxy->cap & PR_CAP_BE)
|
||||
where |= SMP_VAL_BE_HRS_HDR;
|
||||
err_code |= warnif_cond_conflicts(rule->cond, where, file, linenum);
|
||||
err_code |= warnif_cond_conflicts(rule->cond, where, &errmsg);
|
||||
if (err_code)
|
||||
ha_warning("parsing [%s:%d] : '%s.\n'", file, linenum, errmsg);
|
||||
|
||||
LIST_APPEND(&curproxy->http_res_rules, &rule->list);
|
||||
}
|
||||
|
|
@ -1460,7 +1413,9 @@ int cfg_parse_listen(const char *file, int linenum, char **args, int kwm)
|
|||
where |= SMP_VAL_FE_HRS_HDR;
|
||||
if (curproxy->cap & PR_CAP_BE)
|
||||
where |= SMP_VAL_BE_HRS_HDR;
|
||||
err_code |= warnif_cond_conflicts(rule->cond, where, file, linenum);
|
||||
err_code |= warnif_cond_conflicts(rule->cond, where, &errmsg);
|
||||
if (err_code)
|
||||
ha_warning("parsing [%s:%d] : '%s.\n'", file, linenum, errmsg);
|
||||
|
||||
LIST_APPEND(&curproxy->http_after_res_rules, &rule->list);
|
||||
}
|
||||
|
|
@ -1522,7 +1477,9 @@ int cfg_parse_listen(const char *file, int linenum, char **args, int kwm)
|
|||
where |= SMP_VAL_FE_HRQ_HDR;
|
||||
if (curproxy->cap & PR_CAP_BE)
|
||||
where |= SMP_VAL_BE_HRQ_HDR;
|
||||
err_code |= warnif_cond_conflicts(rule->cond, where, file, linenum);
|
||||
err_code |= warnif_cond_conflicts(rule->cond, where, &errmsg);
|
||||
if (err_code)
|
||||
ha_warning("parsing [%s:%d] : '%s.\n'", file, linenum, errmsg);
|
||||
}
|
||||
else if (strcmp(args[0], "use_backend") == 0) {
|
||||
struct switching_rule *rule;
|
||||
|
|
@ -1550,7 +1507,9 @@ int cfg_parse_listen(const char *file, int linenum, char **args, int kwm)
|
|||
goto out;
|
||||
}
|
||||
|
||||
err_code |= warnif_cond_conflicts(cond, SMP_VAL_FE_SET_BCK, file, linenum);
|
||||
err_code |= warnif_cond_conflicts(cond, SMP_VAL_FE_SET_BCK, &errmsg);
|
||||
if (err_code)
|
||||
ha_warning("parsing [%s:%d] : '%s.\n'", file, linenum, errmsg);
|
||||
}
|
||||
else if (*args[2]) {
|
||||
ha_alert("parsing [%s:%d] : unexpected keyword '%s' after switching rule, only 'if' and 'unless' are allowed.\n",
|
||||
|
|
@ -1611,7 +1570,9 @@ int cfg_parse_listen(const char *file, int linenum, char **args, int kwm)
|
|||
goto out;
|
||||
}
|
||||
|
||||
err_code |= warnif_cond_conflicts(cond, SMP_VAL_BE_SET_SRV, file, linenum);
|
||||
err_code |= warnif_cond_conflicts(cond, SMP_VAL_BE_SET_SRV, &errmsg);
|
||||
if (err_code)
|
||||
ha_warning("parsing [%s:%d] : '%s.\n'", file, linenum, errmsg);
|
||||
|
||||
rule = calloc(1, sizeof(*rule));
|
||||
if (!rule)
|
||||
|
|
@ -1664,7 +1625,9 @@ int cfg_parse_listen(const char *file, int linenum, char **args, int kwm)
|
|||
/* note: BE_REQ_CNT is the first one after FE_SET_BCK, which is
|
||||
* where force-persist is applied.
|
||||
*/
|
||||
err_code |= warnif_cond_conflicts(cond, SMP_VAL_BE_REQ_CNT, file, linenum);
|
||||
err_code |= warnif_cond_conflicts(cond, SMP_VAL_BE_REQ_CNT, &errmsg);
|
||||
if (err_code)
|
||||
ha_warning("parsing [%s:%d] : '%s.\n'", file, linenum, errmsg);
|
||||
|
||||
rule = calloc(1, sizeof(*rule));
|
||||
if (!rule) {
|
||||
|
|
@ -1828,9 +1791,11 @@ int cfg_parse_listen(const char *file, int linenum, char **args, int kwm)
|
|||
goto out;
|
||||
}
|
||||
if (flags & STK_ON_RSP)
|
||||
err_code |= warnif_cond_conflicts(cond, SMP_VAL_BE_STO_RUL, file, linenum);
|
||||
err_code |= warnif_cond_conflicts(cond, SMP_VAL_BE_STO_RUL, &errmsg);
|
||||
else
|
||||
err_code |= warnif_cond_conflicts(cond, SMP_VAL_BE_SET_SRV, file, linenum);
|
||||
err_code |= warnif_cond_conflicts(cond, SMP_VAL_BE_SET_SRV, &errmsg);
|
||||
if (err_code)
|
||||
ha_warning("parsing [%s:%d] : '%s.\n'", file, linenum, errmsg);
|
||||
|
||||
rule = calloc(1, sizeof(*rule));
|
||||
if (!rule) {
|
||||
|
|
@ -1886,7 +1851,9 @@ int cfg_parse_listen(const char *file, int linenum, char **args, int kwm)
|
|||
where |= SMP_VAL_FE_HRQ_HDR;
|
||||
if (curproxy->cap & PR_CAP_BE)
|
||||
where |= SMP_VAL_BE_HRQ_HDR;
|
||||
err_code |= warnif_cond_conflicts(cond, where, file, linenum);
|
||||
err_code |= warnif_cond_conflicts(cond, where, &errmsg);
|
||||
if (err_code)
|
||||
ha_warning("parsing [%s:%d] : '%s.\n'", file, linenum, errmsg);
|
||||
|
||||
rule = calloc(1, sizeof(*rule));
|
||||
if (!rule) {
|
||||
|
|
@ -1964,7 +1931,9 @@ int cfg_parse_listen(const char *file, int linenum, char **args, int kwm)
|
|||
where |= SMP_VAL_FE_HRQ_HDR;
|
||||
if (curproxy->cap & PR_CAP_BE)
|
||||
where |= SMP_VAL_BE_HRQ_HDR;
|
||||
err_code |= warnif_cond_conflicts(rule->cond, where, file, linenum);
|
||||
err_code |= warnif_cond_conflicts(rule->cond, where, &errmsg);
|
||||
if (err_code)
|
||||
ha_warning("parsing [%s:%d] : '%s.\n'", file, linenum, errmsg);
|
||||
LIST_APPEND(&curproxy->uri_auth->http_req_rules, &rule->list);
|
||||
|
||||
} else if (strcmp(args[1], "auth") == 0) {
|
||||
|
|
|
|||
599
src/cfgparse-peers.c
Normal file
599
src/cfgparse-peers.c
Normal file
|
|
@ -0,0 +1,599 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||
|
||||
/*
|
||||
* Configuration parser for peers section
|
||||
*/
|
||||
|
||||
#define _GNU_SOURCE
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <errno.h>
|
||||
|
||||
#include <haproxy/api.h>
|
||||
#include <haproxy/cfgparse.h>
|
||||
#include <haproxy/errors.h>
|
||||
#include <haproxy/global.h>
|
||||
#include <haproxy/listener.h>
|
||||
#include <haproxy/log.h>
|
||||
#include <haproxy/peers.h>
|
||||
#include <haproxy/proxy.h>
|
||||
#include <haproxy/server.h>
|
||||
#include <haproxy/stick_table.h>
|
||||
#include <haproxy/tools.h>
|
||||
|
||||
/* Allocate and initialize the frontend of a "peers" section found in
|
||||
* file <file> at line <linenum> with <id> as ID.
|
||||
* Return 0 if succeeded, -1 if not.
|
||||
* Note that this function may be called from "default-server"
|
||||
* or "peer" lines.
|
||||
*/
|
||||
static int init_peers_frontend(const char *file, int linenum,
|
||||
const char *id, struct peers *peers)
|
||||
{
|
||||
struct proxy *p;
|
||||
char *errmsg = NULL;
|
||||
|
||||
if (peers->peers_fe) {
|
||||
p = peers->peers_fe;
|
||||
goto out;
|
||||
}
|
||||
|
||||
p = alloc_new_proxy(NULL, PR_CAP_FE | PR_CAP_BE, &errmsg);
|
||||
if (!p) {
|
||||
ha_alert("parsing [%s:%d] : %s\n", file, linenum, errmsg);
|
||||
ha_free(&errmsg);
|
||||
return -1;
|
||||
}
|
||||
|
||||
peers_setup_frontend(p);
|
||||
p->parent = peers;
|
||||
/* Finally store this frontend. */
|
||||
peers->peers_fe = p;
|
||||
|
||||
out:
|
||||
if (id && !p->id)
|
||||
p->id = strdup(id);
|
||||
drop_file_name(&p->conf.file);
|
||||
p->conf.args.file = p->conf.file = copy_file_name(file);
|
||||
if (linenum != -1)
|
||||
p->conf.args.line = p->conf.line = linenum;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Only change ->file, ->line and ->arg struct bind_conf member values
|
||||
* if already present.
|
||||
*/
|
||||
static struct bind_conf *bind_conf_uniq_alloc(struct proxy *p,
|
||||
const char *file, int line,
|
||||
const char *arg, struct xprt_ops *xprt)
|
||||
{
|
||||
struct bind_conf *bind_conf;
|
||||
|
||||
if (!LIST_ISEMPTY(&p->conf.bind)) {
|
||||
bind_conf = LIST_ELEM((&p->conf.bind)->n, typeof(bind_conf), by_fe);
|
||||
/*
|
||||
* We keep bind_conf->file and bind_conf->line unchanged
|
||||
* to make them available for error messages
|
||||
*/
|
||||
if (arg) {
|
||||
free(bind_conf->arg);
|
||||
bind_conf->arg = strdup(arg);
|
||||
}
|
||||
}
|
||||
else {
|
||||
bind_conf = bind_conf_alloc(p, file, line, arg, xprt);
|
||||
}
|
||||
|
||||
return bind_conf;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate a new struct peer parsed at line <linenum> in file <file>
|
||||
* to be added to <peers>.
|
||||
* Returns the new allocated structure if succeeded, NULL if not.
|
||||
*/
|
||||
static struct peer *cfg_peers_add_peer(struct peers *peers,
|
||||
const char *file, int linenum,
|
||||
const char *id, int local)
|
||||
{
|
||||
struct peer *p;
|
||||
|
||||
p = calloc(1, sizeof *p);
|
||||
if (!p) {
|
||||
ha_alert("parsing [%s:%d] : out of memory.\n", file, linenum);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* the peers are linked backwards first */
|
||||
peers->count++;
|
||||
p->peers = peers;
|
||||
p->next = peers->remote;
|
||||
peers->remote = p;
|
||||
p->conf.file = strdup(file);
|
||||
p->conf.line = linenum;
|
||||
p->last_change = ns_to_sec(now_ns);
|
||||
HA_SPIN_INIT(&p->lock);
|
||||
if (id)
|
||||
p->id = strdup(id);
|
||||
if (local) {
|
||||
p->local = 1;
|
||||
peers->local = p;
|
||||
}
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
/*
|
||||
* Parse a line in a <peers> section.
|
||||
* Returns the error code, 0 if OK, or any combination of :
|
||||
* - ERR_ABORT: must abort ASAP
|
||||
* - ERR_FATAL: we can continue parsing but not start the service
|
||||
* - ERR_WARN: a warning has been emitted
|
||||
* - ERR_ALERT: an alert has been emitted
|
||||
* Only the two first ones can stop processing, the two others are just
|
||||
* indicators.
|
||||
*/
|
||||
int cfg_parse_peers(const char *file, int linenum, char **args, int kwm)
|
||||
{
|
||||
static struct peers *curpeers = NULL;
|
||||
static struct sockaddr_storage *bind_addr = NULL;
|
||||
static int nb_shards = 0;
|
||||
struct peer *newpeer = NULL;
|
||||
const char *err;
|
||||
struct bind_conf *bind_conf;
|
||||
int err_code = 0;
|
||||
char *errmsg = NULL;
|
||||
static int bind_line, peer_line;
|
||||
|
||||
if (strcmp(args[0], "bind") == 0 || strcmp(args[0], "default-bind") == 0) {
|
||||
int cur_arg;
|
||||
struct bind_conf *bind_conf;
|
||||
int ret;
|
||||
|
||||
cur_arg = 1;
|
||||
|
||||
if (init_peers_frontend(file, linenum, NULL, curpeers) != 0) {
|
||||
err_code |= ERR_ALERT | ERR_ABORT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
bind_conf = bind_conf_uniq_alloc(curpeers->peers_fe, file, linenum,
|
||||
args[1], xprt_get(XPRT_RAW));
|
||||
if (!bind_conf) {
|
||||
ha_alert("parsing [%s:%d] : '%s %s' : cannot allocate memory.\n", file, linenum, args[0], args[1]);
|
||||
err_code |= ERR_FATAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
bind_conf->maxaccept = 1;
|
||||
bind_conf->accept = session_accept_fd;
|
||||
bind_conf->options |= BC_O_UNLIMITED; /* don't make the peers subject to global limits */
|
||||
|
||||
if (*args[0] == 'b') {
|
||||
struct listener *l;
|
||||
|
||||
if (peer_line) {
|
||||
ha_alert("parsing [%s:%d] : mixing \"peer\" and \"bind\" line is forbidden\n", file, linenum);
|
||||
err_code |= ERR_ALERT | ERR_FATAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!LIST_ISEMPTY(&bind_conf->listeners)) {
|
||||
ha_alert("parsing [%s:%d] : One listener per \"peers\" section is authorized but another is already configured at [%s:%d].\n", file, linenum, bind_conf->file, bind_conf->line);
|
||||
err_code |= ERR_FATAL;
|
||||
}
|
||||
|
||||
if (!str2listener(args[1], curpeers->peers_fe, bind_conf, file, linenum, &errmsg)) {
|
||||
if (errmsg && *errmsg) {
|
||||
indent_msg(&errmsg, 2);
|
||||
ha_alert("parsing [%s:%d] : '%s %s' : %s\n", file, linenum, args[0], args[1], errmsg);
|
||||
}
|
||||
else
|
||||
ha_alert("parsing [%s:%d] : '%s %s' : error encountered while parsing listening address %s.\n",
|
||||
file, linenum, args[0], args[1], args[1]);
|
||||
err_code |= ERR_FATAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Only one listener supported. Compare first listener
|
||||
* against the last one. It must be the same one.
|
||||
*/
|
||||
if (bind_conf->listeners.n != bind_conf->listeners.p) {
|
||||
ha_alert("parsing [%s:%d] : Only one listener per \"peers\" section is authorized. Multiple listening addresses or port range are not supported.\n", file, linenum);
|
||||
err_code |= ERR_ALERT | ERR_FATAL;
|
||||
goto out;
|
||||
}
|
||||
/*
|
||||
* Newly allocated listener is at the end of the list
|
||||
*/
|
||||
l = LIST_ELEM(bind_conf->listeners.p, typeof(l), by_bind);
|
||||
bind_addr = &l->rx.addr;
|
||||
|
||||
global.maxsock++; /* for the listening socket */
|
||||
|
||||
bind_line = 1;
|
||||
if (cfg_peers->local) {
|
||||
/* Local peer already defined using "server" line has no
|
||||
* address yet, we should update its server's addr:port
|
||||
* settings
|
||||
*/
|
||||
newpeer = cfg_peers->local;
|
||||
BUG_ON(!newpeer->srv);
|
||||
newpeer->srv->addr = *bind_addr;
|
||||
newpeer->srv->svc_port = get_host_port(bind_addr);
|
||||
}
|
||||
else {
|
||||
/* This peer is local.
|
||||
* Note that we do not set the peer ID. This latter is initialized
|
||||
* when parsing "peer" or "server" line.
|
||||
*/
|
||||
newpeer = cfg_peers_add_peer(curpeers, file, linenum, NULL, 1);
|
||||
if (!newpeer) {
|
||||
err_code |= ERR_ALERT | ERR_ABORT;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
cur_arg++;
|
||||
}
|
||||
|
||||
ret = bind_parse_args_list(bind_conf, args, cur_arg, cursection, file, linenum);
|
||||
err_code |= ret;
|
||||
if (ret != 0)
|
||||
goto out;
|
||||
}
|
||||
else if (strcmp(args[0], "default-server") == 0) {
|
||||
if (init_peers_frontend(file, -1, NULL, curpeers) != 0) {
|
||||
err_code |= ERR_ALERT | ERR_ABORT;
|
||||
goto out;
|
||||
}
|
||||
err_code |= parse_server(file, linenum, args, curpeers->peers_fe, NULL,
|
||||
SRV_PARSE_DEFAULT_SERVER|SRV_PARSE_IN_PEER_SECTION|SRV_PARSE_INITIAL_RESOLVE);
|
||||
}
|
||||
else if (strcmp(args[0], "log") == 0) {
|
||||
if (init_peers_frontend(file, linenum, NULL, curpeers) != 0) {
|
||||
err_code |= ERR_ALERT | ERR_ABORT;
|
||||
goto out;
|
||||
}
|
||||
if (!parse_logger(args, &curpeers->peers_fe->loggers, (kwm == KWM_NO), file, linenum, &errmsg)) {
|
||||
ha_alert("parsing [%s:%d] : %s : %s\n", file, linenum, args[0], errmsg);
|
||||
err_code |= ERR_ALERT | ERR_FATAL;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
else if (strcmp(args[0], "peers") == 0) { /* new peers section */
|
||||
/* Initialize these static variables when entering a new "peers" section*/
|
||||
bind_line = peer_line = 0;
|
||||
bind_addr = NULL;
|
||||
if (!*args[1]) {
|
||||
ha_alert("parsing [%s:%d] : missing name for peers section.\n", file, linenum);
|
||||
err_code |= ERR_ALERT | ERR_ABORT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (alertif_too_many_args(1, file, linenum, args, &err_code)) {
|
||||
err_code |= ERR_ABORT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = invalid_char(args[1]);
|
||||
if (err) {
|
||||
ha_alert("parsing [%s:%d] : character '%c' is not permitted in '%s' name '%s'.\n",
|
||||
file, linenum, *err, args[0], args[1]);
|
||||
err_code |= ERR_ALERT | ERR_ABORT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (curpeers = cfg_peers; curpeers != NULL; curpeers = curpeers->next) {
|
||||
/*
|
||||
* If there are two proxies with the same name only following
|
||||
* combinations are allowed:
|
||||
*/
|
||||
if (strcmp(curpeers->id, args[1]) == 0) {
|
||||
ha_alert("Parsing [%s:%d]: peers section '%s' has the same name as another peers section declared at %s:%d.\n",
|
||||
file, linenum, args[1], curpeers->conf.file, curpeers->conf.line);
|
||||
err_code |= ERR_ALERT | ERR_FATAL;
|
||||
}
|
||||
}
|
||||
|
||||
if ((curpeers = calloc(1, sizeof(*curpeers))) == NULL) {
|
||||
ha_alert("parsing [%s:%d] : out of memory.\n", file, linenum);
|
||||
err_code |= ERR_ALERT | ERR_ABORT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
curpeers->next = cfg_peers;
|
||||
cfg_peers = curpeers;
|
||||
curpeers->conf.file = strdup(file);
|
||||
curpeers->conf.line = linenum;
|
||||
curpeers->last_change = ns_to_sec(now_ns);
|
||||
curpeers->id = strdup(args[1]);
|
||||
curpeers->disabled = 0;
|
||||
}
|
||||
else if (strcmp(args[0], "peer") == 0 ||
|
||||
strcmp(args[0], "server") == 0) { /* peer or server definition */
|
||||
struct server *prev_srv;
|
||||
int local_peer, peer;
|
||||
int parse_addr = 0;
|
||||
|
||||
peer = *args[0] == 'p';
|
||||
local_peer = strcmp(args[1], localpeer) == 0;
|
||||
/* The local peer may have already partially been parsed on a "bind" line. */
|
||||
if (*args[0] == 'p') {
|
||||
if (bind_line) {
|
||||
ha_alert("parsing [%s:%d] : mixing \"peer\" and \"bind\" line is forbidden\n", file, linenum);
|
||||
err_code |= ERR_ALERT | ERR_FATAL;
|
||||
goto out;
|
||||
}
|
||||
peer_line = 1;
|
||||
}
|
||||
if (cfg_peers->local && !cfg_peers->local->id && local_peer) {
|
||||
/* The local peer has already been initialized on a "bind" line.
|
||||
* Let's use it and store its ID.
|
||||
*/
|
||||
newpeer = cfg_peers->local;
|
||||
newpeer->id = strdup(localpeer);
|
||||
}
|
||||
else {
|
||||
if (local_peer && cfg_peers->local) {
|
||||
ha_alert("parsing [%s:%d] : '%s %s' : local peer name already referenced at %s:%d. %s\n",
|
||||
file, linenum, args[0], args[1],
|
||||
curpeers->peers_fe->conf.file, curpeers->peers_fe->conf.line, cfg_peers->local->id);
|
||||
err_code |= ERR_FATAL;
|
||||
goto out;
|
||||
}
|
||||
newpeer = cfg_peers_add_peer(curpeers, file, linenum, args[1], local_peer);
|
||||
if (!newpeer) {
|
||||
err_code |= ERR_ALERT | ERR_ABORT;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
/* Line number and peer ID are updated only if this peer is the local one. */
|
||||
if (init_peers_frontend(file,
|
||||
newpeer->local ? linenum: -1,
|
||||
newpeer->local ? newpeer->id : NULL,
|
||||
curpeers) != 0) {
|
||||
err_code |= ERR_ALERT | ERR_ABORT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* This initializes curpeer->peers->peers_fe->srv.
|
||||
* The server address is parsed only if we are parsing a "peer" line,
|
||||
* or if we are parsing a "server" line and the current peer is not the local one.
|
||||
*/
|
||||
parse_addr = (peer || !local_peer) ? SRV_PARSE_PARSE_ADDR : 0;
|
||||
prev_srv = curpeers->peers_fe->srv;
|
||||
err_code |= parse_server(file, linenum, args, curpeers->peers_fe, NULL,
|
||||
SRV_PARSE_IN_PEER_SECTION|parse_addr|SRV_PARSE_INITIAL_RESOLVE);
|
||||
if (curpeers->peers_fe->srv == prev_srv) {
|
||||
/* parse_server didn't add a server:
|
||||
* Remove the newly allocated peer.
|
||||
*/
|
||||
struct peer *p;
|
||||
|
||||
/* while it is tolerated to have a "server" line without address, it isn't
|
||||
* the case for a "peer" line
|
||||
*/
|
||||
if (peer) {
|
||||
ha_warning("parsing [%s:%d] : '%s %s' : ignoring invalid peer definition (missing address:port)\n",
|
||||
file, linenum, args[0], args[1]);
|
||||
err_code |= ERR_WARN;
|
||||
}
|
||||
else {
|
||||
ha_diag_warning("parsing [%s:%d] : '%s %s' : ignoring server (not a local peer, valid address:port is expected)\n",
|
||||
file, linenum, args[0], args[1]);
|
||||
}
|
||||
|
||||
p = curpeers->remote;
|
||||
curpeers->remote = curpeers->remote->next;
|
||||
free(p->id);
|
||||
free(p);
|
||||
if (local_peer) {
|
||||
/* we only get there with incomplete "peer"
|
||||
* line for local peer (missing address):
|
||||
*
|
||||
* reset curpeers and curpeers fields
|
||||
* that are local peer related
|
||||
*/
|
||||
curpeers->local = NULL;
|
||||
ha_free(&curpeers->peers_fe->id);
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!parse_addr && bind_addr) {
|
||||
/* local peer declared using "server": has name but no
|
||||
* address: we use the known "bind" line addr settings
|
||||
* as implicit server's addr and port.
|
||||
*/
|
||||
curpeers->peers_fe->srv->addr = *bind_addr;
|
||||
curpeers->peers_fe->srv->svc_port = get_host_port(bind_addr);
|
||||
}
|
||||
|
||||
if (nb_shards && curpeers->peers_fe->srv->shard > nb_shards) {
|
||||
ha_warning("parsing [%s:%d] : '%s %s' : %d peer shard greater value than %d shards value is ignored.\n",
|
||||
file, linenum, args[0], args[1], curpeers->peers_fe->srv->shard, nb_shards);
|
||||
curpeers->peers_fe->srv->shard = 0;
|
||||
err_code |= ERR_WARN;
|
||||
}
|
||||
|
||||
if (curpeers->peers_fe->srv->init_addr_methods || curpeers->peers_fe->srv->resolvers_id ||
|
||||
curpeers->peers_fe->srv->do_check || curpeers->peers_fe->srv->do_agent) {
|
||||
ha_warning("parsing [%s:%d] : '%s %s' : init_addr, resolvers, check and agent are ignored for peers.\n", file, linenum, args[0], args[1]);
|
||||
err_code |= ERR_WARN;
|
||||
}
|
||||
|
||||
HA_SPIN_INIT(&newpeer->lock);
|
||||
|
||||
newpeer->srv = curpeers->peers_fe->srv;
|
||||
if (!newpeer->local)
|
||||
goto out;
|
||||
|
||||
/* The lines above are reserved to "peer" lines. */
|
||||
if (*args[0] == 's')
|
||||
goto out;
|
||||
|
||||
bind_conf = bind_conf_uniq_alloc(curpeers->peers_fe, file, linenum, args[2], xprt_get(XPRT_RAW));
|
||||
if (!bind_conf) {
|
||||
ha_alert("parsing [%s:%d] : '%s %s' : Cannot allocate memory.\n", file, linenum, args[0], args[1]);
|
||||
err_code |= ERR_FATAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
bind_conf->maxaccept = 1;
|
||||
bind_conf->accept = session_accept_fd;
|
||||
bind_conf->options |= BC_O_UNLIMITED; /* don't make the peers subject to global limits */
|
||||
|
||||
if (!LIST_ISEMPTY(&bind_conf->listeners)) {
|
||||
ha_alert("parsing [%s:%d] : One listener per \"peers\" section is authorized but another is already configured at [%s:%d].\n", file, linenum, bind_conf->file, bind_conf->line);
|
||||
err_code |= ERR_FATAL;
|
||||
}
|
||||
|
||||
if (!str2listener(args[2], curpeers->peers_fe, bind_conf, file, linenum, &errmsg)) {
|
||||
if (errmsg && *errmsg) {
|
||||
indent_msg(&errmsg, 2);
|
||||
ha_alert("parsing [%s:%d] : '%s %s' : %s\n", file, linenum, args[0], args[1], errmsg);
|
||||
}
|
||||
else
|
||||
ha_alert("parsing [%s:%d] : '%s %s' : error encountered while parsing listening address %s.\n",
|
||||
file, linenum, args[0], args[1], args[2]);
|
||||
err_code |= ERR_FATAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
global.maxsock++; /* for the listening socket */
|
||||
}
|
||||
else if (strcmp(args[0], "shards") == 0) {
|
||||
char *endptr;
|
||||
|
||||
if (!*args[1]) {
|
||||
ha_alert("parsing [%s:%d] : '%s' : missing value\n", file, linenum, args[0]);
|
||||
err_code |= ERR_FATAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
curpeers->nb_shards = strtol(args[1], &endptr, 10);
|
||||
if (*endptr != '\0') {
|
||||
ha_alert("parsing [%s:%d] : '%s' : expects an integer argument, found '%s'\n",
|
||||
file, linenum, args[0], args[1]);
|
||||
err_code |= ERR_FATAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!curpeers->nb_shards) {
|
||||
ha_alert("parsing [%s:%d] : '%s' : expects a strictly positive integer argument\n",
|
||||
file, linenum, args[0]);
|
||||
err_code |= ERR_FATAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
nb_shards = curpeers->nb_shards;
|
||||
}
|
||||
else if (strcmp(args[0], "table") == 0) {
|
||||
struct stktable *t, *other;
|
||||
char *id;
|
||||
size_t prefix_len;
|
||||
|
||||
/* Line number and peer ID are updated only if this peer is the local one. */
|
||||
if (init_peers_frontend(file, -1, NULL, curpeers) != 0) {
|
||||
err_code |= ERR_ALERT | ERR_ABORT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Build the stick-table name, concatenating the "peers" section name
|
||||
* followed by a '/' character and the table name argument.
|
||||
*/
|
||||
chunk_reset(&trash);
|
||||
if (!chunk_strcpy(&trash, curpeers->id)) {
|
||||
ha_alert("parsing [%s:%d]: '%s %s' : stick-table name too long.\n",
|
||||
file, linenum, args[0], args[1]);
|
||||
err_code |= ERR_ALERT | ERR_FATAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
prefix_len = trash.data;
|
||||
if (!chunk_memcat(&trash, "/", 1) || !chunk_strcat(&trash, args[1])) {
|
||||
ha_alert("parsing [%s:%d]: '%s %s' : stick-table name too long.\n",
|
||||
file, linenum, args[0], args[1]);
|
||||
err_code |= ERR_ALERT | ERR_FATAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
t = calloc(1, sizeof *t);
|
||||
id = strdup(trash.area);
|
||||
if (!t || !id) {
|
||||
ha_alert("parsing [%s:%d]: '%s %s' : memory allocation failed\n",
|
||||
file, linenum, args[0], args[1]);
|
||||
free(t);
|
||||
free(id);
|
||||
err_code |= ERR_ALERT | ERR_FATAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
other = stktable_find_by_name(trash.area);
|
||||
if (other) {
|
||||
ha_alert("parsing [%s:%d] : stick-table name '%s' conflicts with table declared in %s '%s' at %s:%d.\n",
|
||||
file, linenum, args[1],
|
||||
other->proxy ? proxy_cap_str(other->proxy->cap) : "peers",
|
||||
other->proxy ? other->id : other->peers.p->id,
|
||||
other->conf.file, other->conf.line);
|
||||
err_code |= ERR_ALERT | ERR_FATAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
||||
err_code |= parse_stick_table(file, linenum, args, t, id, id + prefix_len, curpeers);
|
||||
if (err_code & ERR_FATAL) {
|
||||
free(t);
|
||||
free(id);
|
||||
goto out;
|
||||
}
|
||||
|
||||
stktable_store_name(t);
|
||||
t->next = stktables_list;
|
||||
stktables_list = t;
|
||||
}
|
||||
else if (strcmp(args[0], "disabled") == 0) { /* disables this peers section */
|
||||
curpeers->disabled |= PR_FL_DISABLED;
|
||||
}
|
||||
else if (strcmp(args[0], "enabled") == 0) { /* enables this peers section (used to revert a disabled default) */
|
||||
curpeers->disabled = 0;
|
||||
}
|
||||
else if (*args[0] != 0) {
|
||||
struct peers_kw_list *pkwl;
|
||||
int index;
|
||||
int rc = -1;
|
||||
|
||||
list_for_each_entry(pkwl, &peers_keywords.list, list) {
|
||||
for (index = 0; pkwl->kw[index].kw != NULL; index++) {
|
||||
if (strcmp(pkwl->kw[index].kw, args[0]) == 0) {
|
||||
rc = pkwl->kw[index].parse(args, curpeers, file, linenum, &errmsg);
|
||||
if (rc < 0) {
|
||||
ha_alert("parsing [%s:%d] : %s\n", file, linenum, errmsg);
|
||||
err_code |= ERR_ALERT | ERR_FATAL;
|
||||
goto out;
|
||||
}
|
||||
else if (rc > 0) {
|
||||
ha_warning("parsing [%s:%d] : %s\n", file, linenum, errmsg);
|
||||
err_code |= ERR_WARN;
|
||||
goto out;
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ha_alert("parsing [%s:%d] : unknown keyword '%s' in '%s' section\n", file, linenum, args[0], cursection);
|
||||
err_code |= ERR_ALERT | ERR_FATAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
free(errmsg);
|
||||
return err_code;
|
||||
}
|
||||
|
||||
REGISTER_CONFIG_SECTION("peers", cfg_parse_peers, NULL);
|
||||
|
|
@ -496,6 +496,36 @@ static int ssl_parse_global_keylog(char **args, int section_type, struct proxy *
|
|||
}
|
||||
#endif
|
||||
|
||||
/* Allow to explicitely disable certificate compression when set to "off" */
|
||||
#ifdef SSL_OP_NO_RX_CERTIFICATE_COMPRESSION
|
||||
static int ssl_parse_certificate_compression(char **args, int section_type, struct proxy *curpx,
|
||||
const struct proxy *defpx, const char *file, int line,
|
||||
char **err)
|
||||
{
|
||||
if (too_many_args(1, args, err, NULL))
|
||||
return -1;
|
||||
|
||||
if (strcmp(args[1], "auto") == 0)
|
||||
global_ssl.certificate_compression = 1;
|
||||
else if (strcmp(args[1], "off") == 0)
|
||||
global_ssl.certificate_compression = 0;
|
||||
else {
|
||||
memprintf(err, "'%s' expects either 'auto' or 'off' but got '%s'.", args[0], args[1]); return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static int ssl_parse_certificate_compression(char **args, int section_type, struct proxy *curpx,
|
||||
const struct proxy *defpx, const char *file, int line,
|
||||
char **err)
|
||||
{
|
||||
memprintf(err, "'%s' is not supported by your TLS library. "
|
||||
"It is known to work only with OpenSSL >= 3.2.0.", args[0]);
|
||||
return -1;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* parse "ssl.force-private-cache".
|
||||
* Returns <0 on alert, >0 on warning, 0 on success.
|
||||
*/
|
||||
|
|
@ -943,7 +973,7 @@ static int ssl_bind_parse_ktls(char **args, int cur_arg, struct proxy *px, struc
|
|||
return ERR_ALERT | ERR_FATAL;
|
||||
}
|
||||
if (!experimental_directives_allowed) {
|
||||
memprintf(err, "'%s' directive is experimental, must be allowed via a global 'expose-experimental-directive'", args[cur_arg]);
|
||||
memprintf(err, "'%s' directive is experimental, must be allowed via a global 'expose-experimental-directives'", args[cur_arg]);
|
||||
return ERR_ALERT | ERR_FATAL;
|
||||
}
|
||||
if (!strcasecmp(args[cur_arg + 1], "on")) {
|
||||
|
|
@ -2020,7 +2050,7 @@ static int srv_parse_ktls(char **args, int *cur_arg, struct proxy *px, struct se
|
|||
}
|
||||
|
||||
if (!experimental_directives_allowed) {
|
||||
memprintf(err, "'%s' directive is experimental, must be allowed via a global 'expose-experimental-directive'", args[*cur_arg]);
|
||||
memprintf(err, "'%s' directive is experimental, must be allowed via a global 'expose-experimental-directives'", args[*cur_arg]);
|
||||
return ERR_ALERT | ERR_FATAL;
|
||||
}
|
||||
|
||||
|
|
@ -2759,6 +2789,7 @@ static struct cfg_kw_list cfg_kws = {ILH, {
|
|||
{ CFG_GLOBAL, "ssl-security-level", ssl_parse_security_level },
|
||||
{ CFG_GLOBAL, "ssl-skip-self-issued-ca", ssl_parse_skip_self_issued_ca },
|
||||
{ CFG_GLOBAL, "tune.ssl.cachesize", ssl_parse_global_int },
|
||||
{ CFG_GLOBAL, "tune.ssl.certificate-compression", ssl_parse_certificate_compression },
|
||||
{ CFG_GLOBAL, "tune.ssl.default-dh-param", ssl_parse_global_default_dh },
|
||||
{ CFG_GLOBAL, "tune.ssl.force-private-cache", ssl_parse_global_private_cache },
|
||||
{ CFG_GLOBAL, "tune.ssl.lifetime", ssl_parse_global_lifetime },
|
||||
|
|
|
|||
2297
src/cfgparse.c
2297
src/cfgparse.c
File diff suppressed because it is too large
Load diff
|
|
@ -513,7 +513,7 @@ void set_server_check_status(struct check *check, short status, const char *desc
|
|||
if ((!(check->state & CHK_ST_AGENT) ||
|
||||
(check->status >= HCHK_STATUS_L57DATA)) &&
|
||||
(check->health > 0)) {
|
||||
if (s->counters.shared.tg[tgid - 1])
|
||||
if (s->counters.shared.tg)
|
||||
_HA_ATOMIC_INC(&s->counters.shared.tg[tgid - 1]->failed_checks);
|
||||
report = 1;
|
||||
check->health--;
|
||||
|
|
@ -741,7 +741,7 @@ void __health_adjust(struct server *s, short status)
|
|||
HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
|
||||
|
||||
HA_ATOMIC_STORE(&s->consecutive_errors, 0);
|
||||
if (s->counters.shared.tg[tgid - 1])
|
||||
if (s->counters.shared.tg)
|
||||
_HA_ATOMIC_INC(&s->counters.shared.tg[tgid - 1]->failed_hana);
|
||||
|
||||
if (s->check.fastinter) {
|
||||
|
|
|
|||
15
src/cli.c
15
src/cli.c
|
|
@ -3372,8 +3372,14 @@ read_again:
|
|||
target_pid = s->pcli_next_pid;
|
||||
/* we can connect now */
|
||||
s->target = pcli_pid_to_server(target_pid);
|
||||
if (objt_server(s->target))
|
||||
s->sv_tgcounters = __objt_server(s->target)->counters.shared.tg[tgid - 1];
|
||||
if (objt_server(s->target)) {
|
||||
struct server *srv = __objt_server(s->target);
|
||||
|
||||
if (srv->counters.shared.tg)
|
||||
s->sv_tgcounters = srv->counters.shared.tg[tgid - 1];
|
||||
else
|
||||
s->sv_tgcounters = NULL;
|
||||
}
|
||||
|
||||
if (!s->target)
|
||||
goto server_disconnect;
|
||||
|
|
@ -3732,9 +3738,8 @@ int mworker_cli_attach_server(char **errmsg)
|
|||
error:
|
||||
|
||||
list_for_each_entry(child, &proc_list, list) {
|
||||
free((char *)child->srv->conf.file); /* cast because of const char * */
|
||||
free(child->srv->id);
|
||||
srv_free(&child->srv);
|
||||
srv_detach(child->srv);
|
||||
srv_drop(child->srv);
|
||||
}
|
||||
free(msg);
|
||||
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ static void _counters_shared_drop(void *counters)
|
|||
if (!shared)
|
||||
return;
|
||||
|
||||
while (it < global.nbtgroups && shared->tg[it]) {
|
||||
while (it < global.nbtgroups && shared->tg && shared->tg[it]) {
|
||||
if (shared->flags & COUNTERS_SHARED_F_LOCAL) {
|
||||
/* memory was allocated using calloc(), simply free it */
|
||||
free(shared->tg[it]);
|
||||
|
|
@ -53,6 +53,7 @@ static void _counters_shared_drop(void *counters)
|
|||
}
|
||||
it += 1;
|
||||
}
|
||||
free(shared->tg);
|
||||
}
|
||||
|
||||
/* release a shared fe counters struct */
|
||||
|
|
@ -86,6 +87,14 @@ static int _counters_shared_prepare(struct counters_shared *shared,
|
|||
if (!guid->key || !shm_stats_file_hdr)
|
||||
shared->flags |= COUNTERS_SHARED_F_LOCAL;
|
||||
|
||||
if (!shared->tg) {
|
||||
shared->tg = calloc(global.nbtgroups, sizeof(*shared->tg));
|
||||
if (!shared->tg) {
|
||||
memprintf(errmsg, "couldn't allocate memory for shared counters");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
while (it < global.nbtgroups) {
|
||||
if (shared->flags & COUNTERS_SHARED_F_LOCAL) {
|
||||
size_t tg_size;
|
||||
|
|
|
|||
|
|
@ -686,7 +686,10 @@ int _cmp_cluster_avg_capa(const void *a, const void *b)
|
|||
{
|
||||
const struct ha_cpu_cluster *l = (const struct ha_cpu_cluster *)a;
|
||||
const struct ha_cpu_cluster *r = (const struct ha_cpu_cluster *)b;
|
||||
return r->capa - l->capa;
|
||||
|
||||
if (!r->nb_cores || !l->nb_cores)
|
||||
return r->nb_cores - l->nb_cores;
|
||||
return r->capa * l->nb_cores - l->capa * r->nb_cores;
|
||||
}
|
||||
|
||||
/* re-order a cluster array by cluster index only */
|
||||
|
|
@ -1669,7 +1672,7 @@ static int cpu_policy_performance(int policy, int tmin, int tmax, int gmin, int
|
|||
|
||||
capa = 0;
|
||||
for (cluster = 0; cluster < cpu_topo_maxcpus; cluster++) {
|
||||
if (capa && ha_cpu_clusters[cluster].capa * 10 < ha_cpu_clusters[cluster].nb_cpu * capa * 8) {
|
||||
if (capa && ha_cpu_clusters[cluster].capa * 10 < ha_cpu_clusters[cluster].nb_cores * capa * 8) {
|
||||
/* This cluster is made of cores delivering less than
|
||||
* 80% of the performance of those of the previous
|
||||
* cluster, previous one, we're not interested in
|
||||
|
|
@ -1680,8 +1683,8 @@ static int cpu_policy_performance(int policy, int tmin, int tmax, int gmin, int
|
|||
ha_cpu_topo[cpu].st |= HA_CPU_F_IGNORED;
|
||||
}
|
||||
}
|
||||
else if (ha_cpu_clusters[cluster].nb_cpu)
|
||||
capa = ha_cpu_clusters[cluster].capa / ha_cpu_clusters[cluster].nb_cpu;
|
||||
else if (ha_cpu_clusters[cluster].nb_cores)
|
||||
capa = ha_cpu_clusters[cluster].capa / ha_cpu_clusters[cluster].nb_cores;
|
||||
else
|
||||
capa = 0;
|
||||
}
|
||||
|
|
@ -1714,7 +1717,7 @@ static int cpu_policy_efficiency(int policy, int tmin, int tmax, int gmin, int g
|
|||
|
||||
capa = 0;
|
||||
for (cluster = cpu_topo_maxcpus - 1; cluster >= 0; cluster--) {
|
||||
if (capa && ha_cpu_clusters[cluster].capa * 8 >= ha_cpu_clusters[cluster].nb_cpu * capa * 10) {
|
||||
if (capa && ha_cpu_clusters[cluster].capa * 8 >= ha_cpu_clusters[cluster].nb_cores * capa * 10) {
|
||||
/* This cluster is made of cores each at last 25% faster
|
||||
* than those of the previous cluster, previous one, we're
|
||||
* not interested in using it.
|
||||
|
|
@ -1724,8 +1727,8 @@ static int cpu_policy_efficiency(int policy, int tmin, int tmax, int gmin, int g
|
|||
ha_cpu_topo[cpu].st |= HA_CPU_F_IGNORED;
|
||||
}
|
||||
}
|
||||
else if (ha_cpu_clusters[cluster].nb_cpu)
|
||||
capa = ha_cpu_clusters[cluster].capa / ha_cpu_clusters[cluster].nb_cpu;
|
||||
else if (ha_cpu_clusters[cluster].nb_cores)
|
||||
capa = ha_cpu_clusters[cluster].capa / ha_cpu_clusters[cluster].nb_cores;
|
||||
else
|
||||
capa = 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -605,7 +605,11 @@ void ha_task_dump(struct buffer *buf, const struct task *task, const char *pfx)
|
|||
chunk_appendf(buf, "%sCurrent executing a Lua HTTP service -- ", pfx);
|
||||
}
|
||||
|
||||
if (hlua && hlua->T) {
|
||||
/* only dump the Lua stack on panic because the approach is often
|
||||
* destructive and the running program might not recover from this
|
||||
* if called during warnings or "show threads".
|
||||
*/
|
||||
if (hlua && hlua->T && (get_tainted() & TAINTED_PANIC)) {
|
||||
chunk_appendf(buf, "stack traceback:\n ");
|
||||
append_prefixed_str(buf, hlua_traceback(hlua->T, "\n "), pfx, '\n', 0);
|
||||
}
|
||||
|
|
|
|||
32
src/h1_htx.c
32
src/h1_htx.c
|
|
@ -724,14 +724,42 @@ static size_t h1_parse_full_contig_chunks(struct h1m *h1m, struct htx **dsthtx,
|
|||
break;
|
||||
}
|
||||
else if (likely(end[ridx] == ';')) {
|
||||
int backslash = 0;
|
||||
int quote = 0;
|
||||
|
||||
/* chunk extension, ends at next CRLF */
|
||||
if (!++ridx)
|
||||
goto end_parsing;
|
||||
while (!HTTP_IS_CRLF(end[ridx])) {
|
||||
|
||||
/* The loop seeks the first CRLF or non-tab CTL char
|
||||
* and stops there. If a backslash/quote is active,
|
||||
* it's an error. If none, we assume it's the CRLF
|
||||
* and go back to the top of the loop checking for
|
||||
* CR then LF. This way CTLs, lone LF etc are handled
|
||||
* in the fallback path. This allows to protect
|
||||
* remotes against their own possibly non-compliant
|
||||
* chunk-ext parser which could mistakenly skip a
|
||||
* quoted CRLF. Chunk-ext are not used anyway, except
|
||||
* by attacks.
|
||||
*/
|
||||
while (!HTTP_IS_CTL(end[ridx]) || HTTP_IS_SPHT(end[ridx])) {
|
||||
if (backslash)
|
||||
backslash = 0; // escaped char
|
||||
else if (end[ridx] == '\\' && quote)
|
||||
backslash = 1;
|
||||
else if (end[ridx] == '\\') // backslash not permitted outside quotes
|
||||
goto parsing_error;
|
||||
else if (end[ridx] == '"') // begin/end of quoted-pair
|
||||
quote = !quote;
|
||||
if (!++ridx)
|
||||
goto end_parsing;
|
||||
}
|
||||
/* we have a CRLF now, loop above */
|
||||
|
||||
/* mismatched quotes / backslashes end here */
|
||||
if (quote || backslash)
|
||||
goto parsing_error;
|
||||
|
||||
/* CTLs (CRLF) fall to the common check */
|
||||
continue;
|
||||
}
|
||||
else {
|
||||
|
|
|
|||
105
src/haproxy.c
105
src/haproxy.c
|
|
@ -205,6 +205,7 @@ struct global global = {
|
|||
#endif
|
||||
/* by default allow clients which use a privileged port for TCP only */
|
||||
.clt_privileged_ports = HA_PROTO_TCP,
|
||||
.maxthrpertgroup = MAX_THREADS_PER_GROUP,
|
||||
/* others NULL OK */
|
||||
};
|
||||
|
||||
|
|
@ -603,6 +604,48 @@ void display_version()
|
|||
}
|
||||
}
|
||||
|
||||
/* compare a feature string, ignoring the first character (-/+)
|
||||
used for qsort */
|
||||
static int feat_cmp(const void *a, const void *b)
|
||||
{
|
||||
const struct ist *ia = a;
|
||||
const struct ist *ib = b;
|
||||
|
||||
struct ist sa = istadv(*ia, 1);
|
||||
struct ist sb = istadv(*ib, 1);
|
||||
|
||||
return istdiff(sa, sb);
|
||||
}
|
||||
|
||||
/* split the feature list into an allocated sorted array of ist
|
||||
the return ptr must be freed by the caller */
|
||||
static struct ist *split_feature_list()
|
||||
{
|
||||
struct ist *out;
|
||||
struct ist tmp = ist(build_features);
|
||||
|
||||
int n = 1; /* last element don't have a ' ' */
|
||||
int i = 0;
|
||||
|
||||
for (i = 0; build_features[i] != '\0'; i++) {
|
||||
if (build_features[i] == ' ')
|
||||
n++;
|
||||
}
|
||||
out = calloc(n + 1, sizeof(*out)); // last elem is NULL
|
||||
if (!out)
|
||||
goto end;
|
||||
|
||||
i = 0;
|
||||
while (tmp.len)
|
||||
out[i++] = istsplit(&tmp, ' ');
|
||||
|
||||
qsort(out, n, sizeof(struct ist), feat_cmp);
|
||||
|
||||
end:
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
/* display_mode:
|
||||
* 0 = short version (e.g., "3.3.1")
|
||||
* 1 = full version (e.g., "3.3.1-dev5-1bb975-71")
|
||||
|
|
@ -642,14 +685,23 @@ void display_version_plain(int display_mode)
|
|||
static void display_build_opts()
|
||||
{
|
||||
const char **opt;
|
||||
struct ist *feat_list = NULL, *tmp;
|
||||
|
||||
printf("Build options : %s"
|
||||
"\n\nFeature list : %s"
|
||||
"\n\nDefault settings :"
|
||||
feat_list = split_feature_list();
|
||||
|
||||
printf("Build options : %s", build_opts_string);
|
||||
printf("\n\nFeature list :");
|
||||
for (tmp = feat_list;tmp->ptr;tmp++)
|
||||
if (!isttest(istist(*tmp, ist("HAVE_WORKING_"))))
|
||||
printf(" %.*s", (int)tmp->len, tmp->ptr);
|
||||
printf("\nDetected feature list :");
|
||||
for (tmp = feat_list;tmp->ptr;tmp++)
|
||||
if (isttest(istist(*tmp, ist("HAVE_WORKING_"))))
|
||||
printf(" %.*s", (int)tmp->len, tmp->ptr);
|
||||
printf("\n\nDefault settings :"
|
||||
"\n bufsize = %d, maxrewrite = %d, maxpollevents = %d"
|
||||
"\n\n",
|
||||
build_opts_string,
|
||||
build_features, BUFSIZE, MAXREWRITE, MAX_POLL_EVENTS);
|
||||
BUFSIZE, MAXREWRITE, MAX_POLL_EVENTS);
|
||||
|
||||
for (opt = NULL; (opt = hap_get_next_build_opt(opt)); puts(*opt))
|
||||
;
|
||||
|
|
@ -668,6 +720,7 @@ static void display_build_opts()
|
|||
putchar('\n');
|
||||
list_filters(stdout);
|
||||
putchar('\n');
|
||||
ha_free(&feat_list);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -1435,11 +1488,15 @@ static void init_early(int argc, char **argv)
|
|||
len = strlen(progname);
|
||||
progname = strdup(progname);
|
||||
if (!progname) {
|
||||
ha_alert("Cannot allocate memory for log_tag.\n");
|
||||
ha_alert("Cannot allocate memory for progname.\n");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
chunk_initlen(&global.log_tag, strdup(progname), len, len);
|
||||
if (b_orig(&global.log_tag) == NULL) {
|
||||
ha_alert("Cannot allocate memory for log_tag.\n");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
}
|
||||
|
||||
/* handles program arguments. Very minimal parsing is performed, variables are
|
||||
|
|
@ -2098,8 +2155,20 @@ static void step_init_2(int argc, char** argv)
|
|||
struct pre_check_fct *prcf;
|
||||
const char *cc, *cflags, *opts;
|
||||
|
||||
/* destroy unreferenced defaults proxies */
|
||||
proxy_destroy_all_unref_defaults();
|
||||
/* Free last defaults if it is unnamed and unreferenced. */
|
||||
if (last_defproxy && last_defproxy->id[0] == '\0' &&
|
||||
!last_defproxy->conf.refcount) {
|
||||
defaults_px_destroy(last_defproxy);
|
||||
}
|
||||
last_defproxy = NULL; /* This variable is not used after parsing. */
|
||||
|
||||
if (global.tune.options & GTUNE_PURGE_DEFAULTS) {
|
||||
/* destroy unreferenced defaults proxies */
|
||||
defaults_px_destroy_all_unref();
|
||||
}
|
||||
else {
|
||||
defaults_px_ref_all();
|
||||
}
|
||||
|
||||
list_for_each_entry(prcf, &pre_check_list, list) {
|
||||
err_code |= prcf->fct();
|
||||
|
|
@ -2742,8 +2811,13 @@ void deinit(void)
|
|||
* they are respectively cleaned up in sink_deinit() and deinit_log_forward()
|
||||
*/
|
||||
|
||||
/* destroy all referenced defaults proxies */
|
||||
proxy_destroy_all_unref_defaults();
|
||||
/* If named defaults were preserved, ensure refcount is resetted. */
|
||||
if (!(global.tune.options & GTUNE_PURGE_DEFAULTS))
|
||||
defaults_px_unref_all();
|
||||
/* All proxies are removed now, so every defaults should also be freed
|
||||
* when their refcount reached zero.
|
||||
*/
|
||||
BUG_ON(!LIST_ISEMPTY(&defaults_list));
|
||||
|
||||
userlist_free(userlist);
|
||||
|
||||
|
|
@ -2901,9 +2975,11 @@ void run_poll_loop()
|
|||
if (thread_has_tasks())
|
||||
activity[tid].wake_tasks++;
|
||||
else {
|
||||
_HA_ATOMIC_OR(&th_ctx->flags, TH_FL_SLEEPING);
|
||||
_HA_ATOMIC_AND(&th_ctx->flags, ~TH_FL_NOTIFIED);
|
||||
__ha_barrier_atomic_store();
|
||||
unsigned int flags = _HA_ATOMIC_LOAD(&th_ctx->flags);
|
||||
|
||||
while (unlikely(!HA_ATOMIC_CAS(&th_ctx->flags, &flags, (flags | TH_FL_SLEEPING) & ~TH_FL_NOTIFIED)))
|
||||
__ha_cpu_relax();
|
||||
|
||||
if (thread_has_tasks()) {
|
||||
activity[tid].wake_tasks++;
|
||||
_HA_ATOMIC_AND(&th_ctx->flags, ~TH_FL_SLEEPING);
|
||||
|
|
@ -3127,8 +3203,7 @@ void *run_thread_poll_loop(void *data)
|
|||
ptff->fct();
|
||||
|
||||
#ifdef USE_THREAD
|
||||
if (!_HA_ATOMIC_AND_FETCH(&ha_tgroup_info[ti->tgid-1].threads_enabled, ~ti->ltid_bit))
|
||||
_HA_ATOMIC_AND(&all_tgroups_mask, ~tg->tgid_bit);
|
||||
_HA_ATOMIC_AND(&ha_tgroup_info[ti->tgid-1].threads_enabled, ~ti->ltid_bit);
|
||||
_HA_ATOMIC_AND_FETCH(&tg_ctx->stopping_threads, ~ti->ltid_bit);
|
||||
if (tid > 0)
|
||||
pthread_exit(NULL);
|
||||
|
|
|
|||
13
src/hlua.c
13
src/hlua.c
|
|
@ -273,6 +273,7 @@ static const char *hlua_tostring_safe(lua_State *L, int index)
|
|||
break;
|
||||
default:
|
||||
/* error was caught */
|
||||
lua_pop(L, 1); // consume the lua object pushed on the stack since we ignore it
|
||||
return NULL;
|
||||
}
|
||||
return str;
|
||||
|
|
@ -323,6 +324,7 @@ static const char *hlua_pushvfstring_safe(lua_State *L, const char *fmt, va_list
|
|||
break;
|
||||
default:
|
||||
/* error was caught */
|
||||
lua_pop(L, 1); // consume the lua object pushed on the stack since we ignore it
|
||||
dst = NULL;
|
||||
}
|
||||
va_end(cpy_argp);
|
||||
|
|
@ -870,6 +872,7 @@ void hlua_unref(lua_State *L, int ref)
|
|||
__LJMP static int _hlua_traceback(lua_State *L)
|
||||
{
|
||||
lua_Debug *ar = lua_touserdata(L, 1);
|
||||
int ret;
|
||||
|
||||
/* Fill fields:
|
||||
* 'S': fills in the fields source, short_src, linedefined, lastlinedefined, and what;
|
||||
|
|
@ -877,7 +880,10 @@ __LJMP static int _hlua_traceback(lua_State *L)
|
|||
* 'n': fills in the field name and namewhat;
|
||||
* 't': fills in the field istailcall;
|
||||
*/
|
||||
return lua_getinfo(L, "Slnt", ar);
|
||||
ret = lua_getinfo(L, "Slnt", ar);
|
||||
if (!ret)
|
||||
WILL_LJMP(luaL_error(L, "unexpected"));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -896,10 +902,11 @@ const char *hlua_traceback(lua_State *L, const char* sep)
|
|||
lua_pushlightuserdata(L, &ar);
|
||||
|
||||
/* safe getinfo */
|
||||
switch (lua_pcall(L, 1, 1, 0)) {
|
||||
switch (lua_pcall(L, 1, 0, 0)) {
|
||||
case LUA_OK:
|
||||
break;
|
||||
default:
|
||||
lua_pop(L, 1); // consume the lua object pushed on the stack since we ignore it
|
||||
goto end; // abort
|
||||
}
|
||||
|
||||
|
|
@ -998,6 +1005,7 @@ static int hlua_pusherror(lua_State *L, const char *fmt, ...)
|
|||
case LUA_OK:
|
||||
break;
|
||||
default:
|
||||
lua_pop(L, 1); // consume the lua object pushed on the stack since we ignore it
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
|
|
@ -10188,6 +10196,7 @@ static int hlua_new_event_sub_safe(lua_State *L, struct event_hdl_sub *sub)
|
|||
return 1;
|
||||
default:
|
||||
/* error was caught */
|
||||
lua_pop(L, 1); // consume the lua object pushed on the stack since we ignore it
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2796,6 +2796,11 @@ static int _hlua_patref_add_bulk(lua_State *L, int status, lua_KContext ctx)
|
|||
int count = 0;
|
||||
int ret;
|
||||
|
||||
if (!lua_istable(L, 2)) {
|
||||
luaL_argerror(L, 2, "argument is expected to be a table");
|
||||
return 0; // not reached
|
||||
}
|
||||
|
||||
if ((ref->flags & HLUA_PATREF_FL_GEN) &&
|
||||
pat_ref_may_commit(ref->ptr, ref->curr_gen))
|
||||
curr_gen = ref->curr_gen;
|
||||
|
|
@ -2808,17 +2813,6 @@ static int _hlua_patref_add_bulk(lua_State *L, int status, lua_KContext ctx)
|
|||
const char *key;
|
||||
const char *value = NULL;
|
||||
|
||||
/* check if we may do something to try to prevent thread contention,
|
||||
* unless we run from body/init state where hlua_yieldk is no-op
|
||||
*/
|
||||
if (count > 100 && hlua_gethlua(L)) {
|
||||
/* let's yield and wait for being called again to continue where we left off */
|
||||
HA_RWLOCK_WRUNLOCK(PATREF_LOCK, &ref->ptr->lock);
|
||||
hlua_yieldk(L, 0, 0, _hlua_patref_add_bulk, TICK_ETERNITY, HLUA_CTRLYIELD); // continue
|
||||
return 0; // not reached
|
||||
|
||||
}
|
||||
|
||||
if (ref->ptr->flags & PAT_REF_SMP) {
|
||||
/* key:val table */
|
||||
luaL_checktype(L, -2, LUA_TSTRING);
|
||||
|
|
@ -2843,6 +2837,17 @@ static int _hlua_patref_add_bulk(lua_State *L, int status, lua_KContext ctx)
|
|||
/* removes 'value'; keeps 'key' for next iteration */
|
||||
lua_pop(L, 1);
|
||||
count += 1;
|
||||
|
||||
/* check if we may do something to try to prevent thread contention,
|
||||
* unless we run from body/init state where hlua_yieldk is no-op
|
||||
*/
|
||||
if (count > 100 && hlua_gethlua(L)) {
|
||||
/* let's yield and wait for being called again to continue where we left off */
|
||||
HA_RWLOCK_WRUNLOCK(PATREF_LOCK, &ref->ptr->lock);
|
||||
hlua_yieldk(L, 0, 0, _hlua_patref_add_bulk, TICK_ETERNITY, HLUA_CTRLYIELD); // continue
|
||||
return 0; // not reached
|
||||
|
||||
}
|
||||
}
|
||||
HA_RWLOCK_WRUNLOCK(PATREF_LOCK, &ref->ptr->lock);
|
||||
lua_pushboolean(L, 1);
|
||||
|
|
|
|||
|
|
@ -960,6 +960,12 @@ static enum act_parse_ret parse_http_req_capture(const char **args, int *orig_ar
|
|||
hdr->namelen = 0;
|
||||
hdr->len = len;
|
||||
hdr->pool = create_pool("caphdr", hdr->len + 1, MEM_F_SHARED);
|
||||
if (!hdr->pool) {
|
||||
memprintf(err, "out of memory");
|
||||
free(hdr);
|
||||
release_sample_expr(expr);
|
||||
return ACT_RET_PRS_ERR;
|
||||
}
|
||||
hdr->index = px->nb_req_cap++;
|
||||
|
||||
px->req_cap = hdr;
|
||||
|
|
@ -2005,6 +2011,8 @@ static enum act_parse_ret parse_http_set_map(const char **args, int *orig_arg, s
|
|||
}
|
||||
rule->action_ptr = http_action_set_map;
|
||||
rule->release_ptr = release_http_map;
|
||||
lf_expr_init(&rule->arg.map.key);
|
||||
lf_expr_init(&rule->arg.map.value);
|
||||
|
||||
cur_arg = *orig_arg;
|
||||
if (rule->action == 1 && (!*args[cur_arg] || !*args[cur_arg+1])) {
|
||||
|
|
@ -2040,7 +2048,6 @@ static enum act_parse_ret parse_http_set_map(const char **args, int *orig_arg, s
|
|||
}
|
||||
|
||||
/* key pattern */
|
||||
lf_expr_init(&rule->arg.map.key);
|
||||
if (!parse_logformat_string(args[cur_arg], px, &rule->arg.map.key, LOG_OPT_NONE, cap, err)) {
|
||||
free(rule->arg.map.ref);
|
||||
return ACT_RET_PRS_ERR;
|
||||
|
|
@ -2049,7 +2056,6 @@ static enum act_parse_ret parse_http_set_map(const char **args, int *orig_arg, s
|
|||
if (rule->action == 1) {
|
||||
/* value pattern for set-map only */
|
||||
cur_arg++;
|
||||
lf_expr_init(&rule->arg.map.value);
|
||||
if (!parse_logformat_string(args[cur_arg], px, &rule->arg.map.value, LOG_OPT_NONE, cap, err)) {
|
||||
free(rule->arg.map.ref);
|
||||
return ACT_RET_PRS_ERR;
|
||||
|
|
|
|||
|
|
@ -951,8 +951,14 @@ int httpclient_applet_init(struct appctx *appctx)
|
|||
|
||||
s = appctx_strm(appctx);
|
||||
s->target = target;
|
||||
if (objt_server(s->target))
|
||||
s->sv_tgcounters = __objt_server(s->target)->counters.shared.tg[tgid - 1];
|
||||
if (objt_server(s->target)) {
|
||||
struct server *srv = __objt_server(s->target);
|
||||
|
||||
if (srv->counters.shared.tg)
|
||||
s->sv_tgcounters = __objt_server(s->target)->counters.shared.tg[tgid - 1];
|
||||
else
|
||||
s->sv_tgcounters = NULL;
|
||||
}
|
||||
|
||||
/* set the "timeout server" */
|
||||
s->scb->ioto = hc->timeout_server;
|
||||
|
|
|
|||
|
|
@ -552,6 +552,32 @@ struct server *chash_get_next_server(struct proxy *p, struct server *srvtoavoid)
|
|||
return srv;
|
||||
}
|
||||
|
||||
/* Allocates and initializes lb nodes for server <srv>. Returns < 0 on error.
|
||||
* This is called by chash_init_server_tree() as well as from srv_alloc_lb()
|
||||
* for runtime addition.
|
||||
*/
|
||||
int chash_server_init(struct server *srv)
|
||||
{
|
||||
int node;
|
||||
|
||||
srv->lb_nodes = calloc(srv->lb_nodes_tot, sizeof(*srv->lb_nodes));
|
||||
if (!srv->lb_nodes)
|
||||
return -1;
|
||||
|
||||
srv->lb_server_key = chash_compute_server_key(srv);
|
||||
for (node = 0; node < srv->lb_nodes_tot; node++) {
|
||||
srv->lb_nodes[node].server = srv;
|
||||
srv->lb_nodes[node].node.key = chash_compute_node_key(srv, node);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Releases the allocated lb_nodes for this server */
|
||||
void chash_server_deinit(struct server *srv)
|
||||
{
|
||||
ha_free(&srv->lb_nodes);
|
||||
}
|
||||
|
||||
/* This function is responsible for building the active and backup trees for
|
||||
* consistent hashing. The servers receive an array of initialized nodes
|
||||
* with their assigned keys. It also sets p->lbprm.wdiv to the eweight to
|
||||
|
|
@ -562,11 +588,12 @@ int chash_init_server_tree(struct proxy *p)
|
|||
{
|
||||
struct server *srv;
|
||||
struct eb_root init_head = EB_ROOT;
|
||||
int node;
|
||||
|
||||
p->lbprm.set_server_status_up = chash_set_server_status_up;
|
||||
p->lbprm.set_server_status_down = chash_set_server_status_down;
|
||||
p->lbprm.update_server_eweight = chash_update_server_weight;
|
||||
p->lbprm.server_init = chash_server_init;
|
||||
p->lbprm.server_deinit = chash_server_deinit;
|
||||
p->lbprm.server_take_conn = NULL;
|
||||
p->lbprm.server_drop_conn = NULL;
|
||||
|
||||
|
|
@ -588,17 +615,11 @@ int chash_init_server_tree(struct proxy *p)
|
|||
srv->lb_tree = (srv->flags & SRV_F_BACKUP) ? &p->lbprm.chash.bck : &p->lbprm.chash.act;
|
||||
srv->lb_nodes_tot = srv->uweight * BE_WEIGHT_SCALE;
|
||||
srv->lb_nodes_now = 0;
|
||||
srv->lb_nodes = calloc(srv->lb_nodes_tot,
|
||||
sizeof(*srv->lb_nodes));
|
||||
if (!srv->lb_nodes) {
|
||||
|
||||
if (chash_server_init(srv) < 0) {
|
||||
ha_alert("failed to allocate lb_nodes for server %s.\n", srv->id);
|
||||
return -1;
|
||||
}
|
||||
srv->lb_server_key = chash_compute_server_key(srv);
|
||||
for (node = 0; node < srv->lb_nodes_tot; node++) {
|
||||
srv->lb_nodes[node].server = srv;
|
||||
srv->lb_nodes[node].node.key = chash_compute_node_key(srv, node);
|
||||
}
|
||||
|
||||
if (srv_currently_usable(srv))
|
||||
chash_queue_dequeue_srv(srv);
|
||||
|
|
|
|||
|
|
@ -879,6 +879,11 @@ struct shard_info *shard_info_attach(struct receiver *rx, struct shard_info *si)
|
|||
return NULL;
|
||||
|
||||
si->ref = rx;
|
||||
si->members = calloc(global.nbtgroups, sizeof(*si->members));
|
||||
if (si->members == NULL) {
|
||||
free(si);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
rx->shard_info = si;
|
||||
|
|
@ -921,8 +926,10 @@ void shard_info_detach(struct receiver *rx)
|
|||
si->members[si->nbgroups] = NULL;
|
||||
si->ref = si->members[0];
|
||||
|
||||
if (!si->nbgroups)
|
||||
if (!si->nbgroups) {
|
||||
free(si->members);
|
||||
free(si);
|
||||
}
|
||||
}
|
||||
|
||||
/* clones listener <src> and returns the new one. All dynamically allocated
|
||||
|
|
@ -1110,7 +1117,7 @@ void listener_accept(struct listener *l)
|
|||
int max = 0;
|
||||
int it;
|
||||
|
||||
for (it = 0; (it < global.nbtgroups && p->fe_counters.shared.tg[it]); it++)
|
||||
for (it = 0; (it < global.nbtgroups && p->fe_counters.shared.tg && p->fe_counters.shared.tg[it]); it++)
|
||||
max += freq_ctr_remain(&p->fe_counters.shared.tg[it]->sess_per_sec, p->fe_sps_lim, 0);
|
||||
|
||||
if (unlikely(!max)) {
|
||||
|
|
@ -1749,7 +1756,8 @@ int bind_complete_thread_setup(struct bind_conf *bind_conf, int *err_code)
|
|||
struct listener *li, *new_li, *ref;
|
||||
struct thread_set new_ts;
|
||||
int shard, shards, todo, done, grp, dups;
|
||||
ulong mask, gmask, bit;
|
||||
ulong mask, bit;
|
||||
int nbgrps;
|
||||
int cfgerr = 0;
|
||||
char *err;
|
||||
|
||||
|
|
@ -1781,7 +1789,7 @@ int bind_complete_thread_setup(struct bind_conf *bind_conf, int *err_code)
|
|||
}
|
||||
}
|
||||
else if (shards == -2)
|
||||
shards = protocol_supports_flag(li->rx.proto, PROTO_F_REUSEPORT_SUPPORTED) ? my_popcountl(bind_conf->thread_set.grps) : 1;
|
||||
shards = protocol_supports_flag(li->rx.proto, PROTO_F_REUSEPORT_SUPPORTED) ? bind_conf->thread_set.nbgrps : 1;
|
||||
|
||||
/* no more shards than total threads */
|
||||
if (shards > todo)
|
||||
|
|
@ -1814,25 +1822,25 @@ int bind_complete_thread_setup(struct bind_conf *bind_conf, int *err_code)
|
|||
|
||||
/* take next unassigned bit */
|
||||
bit = (bind_conf->thread_set.rel[grp] & ~mask) & -(bind_conf->thread_set.rel[grp] & ~mask);
|
||||
if (!new_ts.rel[grp])
|
||||
new_ts.nbgrps++;
|
||||
new_ts.rel[grp] |= bit;
|
||||
mask |= bit;
|
||||
new_ts.grps |= 1UL << grp;
|
||||
|
||||
done += shards;
|
||||
};
|
||||
|
||||
BUG_ON(!new_ts.grps); // no more bits left unassigned
|
||||
BUG_ON(!new_ts.nbgrps); // no more group ?
|
||||
|
||||
/* Create all required listeners for all bound groups. If more than one group is
|
||||
* needed, the first receiver serves as a reference, and subsequent ones point to
|
||||
* it. We already have a listener available in new_li() so we only allocate a new
|
||||
* one if we're not on the last one. We count the remaining groups by copying their
|
||||
* mask into <gmask> and dropping the lowest bit at the end of the loop until there
|
||||
* is no more. Ah yes, it's not pretty :-/
|
||||
* one if we're not on the last one.
|
||||
*
|
||||
*/
|
||||
ref = new_li;
|
||||
gmask = new_ts.grps;
|
||||
for (dups = 0; gmask; dups++) {
|
||||
nbgrps = new_ts.nbgrps;
|
||||
for (dups = 0; nbgrps; dups++) {
|
||||
/* assign the first (and only) thread and group */
|
||||
new_li->rx.bind_thread = thread_set_nth_tmask(&new_ts, dups);
|
||||
new_li->rx.bind_tgroup = thread_set_nth_group(&new_ts, dups);
|
||||
|
|
@ -1849,8 +1857,8 @@ int bind_complete_thread_setup(struct bind_conf *bind_conf, int *err_code)
|
|||
new_li->rx.flags |= ref->rx.flags & RX_F_INHERITED_SOCK;
|
||||
}
|
||||
|
||||
gmask &= gmask - 1; // drop lowest bit
|
||||
if (gmask) {
|
||||
nbgrps--;
|
||||
if (nbgrps) {
|
||||
/* yet another listener expected in this shard, let's
|
||||
* chain it.
|
||||
*/
|
||||
|
|
@ -2665,7 +2673,7 @@ static int bind_parse_thread(char **args, int cur_arg, struct proxy *px, struct
|
|||
|
||||
l = LIST_NEXT(&conf->listeners, struct listener *, by_bind);
|
||||
if (l->rx.addr.ss_family == AF_CUST_RHTTP_SRV &&
|
||||
atleast2(conf->thread_set.grps)) {
|
||||
conf->thread_set.nbgrps >= 2) {
|
||||
memprintf(err, "'%s' : reverse HTTP bind cannot span multiple thread groups.", args[cur_arg]);
|
||||
return ERR_ALERT | ERR_FATAL;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6276,7 +6276,10 @@ int cfg_parse_log_forward(const char *file, int linenum, char **args, int kwm)
|
|||
|
||||
/* only consider options that are frontend oriented and log oriented, such options may be set
|
||||
* in px->options2 because px->options is already full of tcp/http oriented options
|
||||
* also, cfg_parse_listen_match_option() assumes global curproxy variable points to
|
||||
* currently evaluated proxy
|
||||
*/
|
||||
curproxy = cfg_log_forward;
|
||||
if (cfg_parse_listen_match_option(file, linenum, kwm, cfg_opts3, &err_code, args,
|
||||
PR_MODE_SYSLOG, PR_CAP_FE,
|
||||
&cfg_log_forward->options3, &cfg_log_forward->no_options3))
|
||||
|
|
|
|||
|
|
@ -2488,8 +2488,10 @@ static size_t h1_make_reqline(struct h1s *h1s, struct h1m *h1m, struct htx *htx,
|
|||
goto end;
|
||||
type = htx_get_blk_type(blk);
|
||||
sz = htx_get_blksz(blk);
|
||||
if (type == HTX_BLK_UNUSED)
|
||||
if (type == HTX_BLK_UNUSED) {
|
||||
htx_remove_blk(htx, blk);
|
||||
continue;
|
||||
}
|
||||
if (type != HTX_BLK_REQ_SL || sz > count)
|
||||
goto error;
|
||||
break;
|
||||
|
|
@ -2577,8 +2579,10 @@ static size_t h1_make_stline(struct h1s *h1s, struct h1m *h1m, struct htx *htx,
|
|||
type = htx_get_blk_type(blk);
|
||||
sz = htx_get_blksz(blk);
|
||||
|
||||
if (type == HTX_BLK_UNUSED)
|
||||
if (type == HTX_BLK_UNUSED) {
|
||||
htx_remove_blk(htx, blk);
|
||||
continue;
|
||||
}
|
||||
if (type != HTX_BLK_RES_SL || sz > count)
|
||||
goto error;
|
||||
break;
|
||||
|
|
|
|||
|
|
@ -462,6 +462,13 @@ static void qcs_close_remote(struct qcs *qcs)
|
|||
qcs->st = QC_SS_CLO;
|
||||
}
|
||||
|
||||
/* Cancel STOP_SENDING emission as it is now unneeded. */
|
||||
if (qcs->st == QC_SS_CLO && (qcs->flags & QC_SF_TO_STOP_SENDING)) {
|
||||
qcs->flags &= ~QC_SF_TO_STOP_SENDING;
|
||||
/* Remove from send_list. Necessary to ensure BUG_ON() below is not triggered. */
|
||||
LIST_DEL_INIT(&qcs->el_send);
|
||||
}
|
||||
|
||||
if (qcs_is_completed(qcs)) {
|
||||
BUG_ON(LIST_INLIST(&qcs->el_send));
|
||||
TRACE_STATE("add stream in purg_list", QMUX_EV_QCS_RECV, qcs->qcc->conn, qcs);
|
||||
|
|
|
|||
|
|
@ -652,8 +652,8 @@ static int sample_conv_tcp_win(const struct arg *arg_p, struct sample *smp, void
|
|||
/* Builds a binary fingerprint of the IP+TCP input contents that are supposed
|
||||
* to rely essentially on the client stack's settings. This can be used for
|
||||
* example to selectively block bad behaviors at one IP address without
|
||||
* blocking others. The resulting fingerprint is a binary block of 56 to 376
|
||||
* bytes long (56 being the fixed part and the rest depending on the provided
|
||||
* blocking others. The resulting fingerprint is a binary block of 64 to 384
|
||||
* bits long (64 being the fixed part and the rest depending on the provided
|
||||
* TCP extensions).
|
||||
*/
|
||||
static int sample_conv_ip_fp(const struct arg *arg_p, struct sample *smp, void *private)
|
||||
|
|
@ -668,6 +668,7 @@ static int sample_conv_ip_fp(const struct arg *arg_p, struct sample *smp, void *
|
|||
uchar tcpflags;
|
||||
uchar tcplen;
|
||||
uchar tcpws;
|
||||
uchar opts;
|
||||
ushort pktlen;
|
||||
ushort tcpwin;
|
||||
ushort tcpmss;
|
||||
|
|
@ -706,7 +707,7 @@ static int sample_conv_ip_fp(const struct arg *arg_p, struct sample *smp, void *
|
|||
if (smp->data.u.str.data < 40)
|
||||
return 0;
|
||||
|
||||
pktlen = read_n16(smp->data.u.str.area + 4);
|
||||
pktlen = 40 + read_n16(smp->data.u.str.area + 4);
|
||||
// extension/next proto => ext present if !tcp && !udp
|
||||
ipext = smp->data.u.str.area[6];
|
||||
ipext = ipext != 6 && ipext != 17;
|
||||
|
|
@ -719,8 +720,8 @@ static int sample_conv_ip_fp(const struct arg *arg_p, struct sample *smp, void *
|
|||
else
|
||||
return 0;
|
||||
|
||||
/* prepare trash to contain at least 7 bytes */
|
||||
trash->data = 7;
|
||||
/* prepare trash to contain at least 8 bytes */
|
||||
trash->data = 8;
|
||||
|
||||
/* store the TOS in the FP's first byte */
|
||||
trash->area[0] = iptos;
|
||||
|
|
@ -763,9 +764,11 @@ static int sample_conv_ip_fp(const struct arg *arg_p, struct sample *smp, void *
|
|||
(tcpflags >> 6 << 0); // CWR, ECE
|
||||
|
||||
tcpmss = tcpws = 0;
|
||||
opts = 0;
|
||||
ofs = 20;
|
||||
while (ofs < tcplen) {
|
||||
size_t next;
|
||||
uchar opt;
|
||||
|
||||
if (smp->data.u.str.area[ofs] == 0) // kind0=end of options
|
||||
break;
|
||||
|
|
@ -782,17 +785,24 @@ static int sample_conv_ip_fp(const struct arg *arg_p, struct sample *smp, void *
|
|||
break;
|
||||
|
||||
/* option is complete, take a copy of it */
|
||||
if (mode & 2) // mode & 2: append tcp.options_list
|
||||
trash->area[trash->data++] = smp->data.u.str.area[ofs];
|
||||
opt = smp->data.u.str.area[ofs];
|
||||
|
||||
if (smp->data.u.str.area[ofs] == 2 /* MSS */) {
|
||||
if (mode & 2) // mode & 2: append tcp.options_list
|
||||
trash->area[trash->data++] = opt;
|
||||
|
||||
if (opt == 2 /* MSS */) {
|
||||
tcpmss = read_n16(smp->data.u.str.area + ofs + 2);
|
||||
}
|
||||
else if (smp->data.u.str.area[ofs] == 3 /* WS */) {
|
||||
else if (opt == 3 /* WS */) {
|
||||
tcpws = (uchar)smp->data.u.str.area[ofs + 2];
|
||||
/* output from 1 to 15, thus 0=not found */
|
||||
tcpws = tcpws > 14 ? 15 : tcpws + 1;
|
||||
}
|
||||
|
||||
/* keep a presence mask of opts 2..8 and others */
|
||||
if (opt >= 2)
|
||||
opts |= 1 << (opt < 9 ? opt - 2 : 7);
|
||||
|
||||
ofs = next;
|
||||
}
|
||||
|
||||
|
|
@ -803,6 +813,9 @@ static int sample_conv_ip_fp(const struct arg *arg_p, struct sample *smp, void *
|
|||
write_n16(trash->area + 3, tcpwin);
|
||||
write_n16(trash->area + 5, tcpmss);
|
||||
|
||||
/* the the bit mask of present options */
|
||||
trash->area[7] = opts;
|
||||
|
||||
/* mode 4: append source IP address */
|
||||
if (mode & 4) {
|
||||
iplen = (ipver == 4) ? 4 : 16;
|
||||
|
|
|
|||
34
src/pool.c
34
src/pool.c
|
|
@ -302,7 +302,7 @@ static int mem_should_fail(const struct pool_head *pool)
|
|||
* registration struct. Use create_pool() instead which does it for free.
|
||||
* The alignment will be stored as-is in the registration.
|
||||
*/
|
||||
struct pool_head *create_pool_with_loc(const char *name, unsigned int size,
|
||||
struct pool_head *create_pool_with_loc(const char *name, ullong size,
|
||||
unsigned int align, unsigned int flags,
|
||||
const char *file, unsigned int line)
|
||||
{
|
||||
|
|
@ -335,7 +335,8 @@ struct pool_head *create_pool_from_reg(const char *name, struct pool_registratio
|
|||
{
|
||||
unsigned int extra_mark, extra_caller, extra;
|
||||
unsigned int flags = reg->flags;
|
||||
unsigned int size = reg->size;
|
||||
ullong reg_size = reg->size; // copy of the originally requested size
|
||||
ullong size = reg_size;
|
||||
unsigned int alignment = reg->align;
|
||||
struct pool_head *pool = NULL;
|
||||
struct pool_head *entry;
|
||||
|
|
@ -374,6 +375,9 @@ struct pool_head *create_pool_from_reg(const char *name, struct pool_registratio
|
|||
extra_caller = (pool_debugging & POOL_DBG_CALLER) ? POOL_EXTRA_CALLER : 0;
|
||||
extra = extra_mark + extra_caller;
|
||||
|
||||
if (size > 0xFFFFFFFFULL || (size + extra) > 0xFFFFFFFFULL || (uint)(size + extra) < (uint)reg_size)
|
||||
goto ovf;
|
||||
|
||||
if (!(pool_debugging & POOL_DBG_NO_CACHE)) {
|
||||
/* we'll store two lists there, we need the room for this. Let's
|
||||
* make sure it's always OK even when including the extra word
|
||||
|
|
@ -392,7 +396,7 @@ struct pool_head *create_pool_from_reg(const char *name, struct pool_registratio
|
|||
*/
|
||||
if (!(flags & MEM_F_EXACT)) {
|
||||
align = (pool_debugging & POOL_DBG_TAG) ? sizeof(void *) : 16;
|
||||
size = ((size + align - 1) & -align);
|
||||
size = ((size + align - 1) & -(ullong)align);
|
||||
}
|
||||
|
||||
if (pool_debugging & POOL_DBG_BACKUP) {
|
||||
|
|
@ -402,6 +406,9 @@ struct pool_head *create_pool_from_reg(const char *name, struct pool_registratio
|
|||
extra += size;
|
||||
}
|
||||
|
||||
if (size > 0xFFFFFFFFULL || (size + extra) > 0xFFFFFFFFULL || (uint)(size + extra) < (uint)reg_size)
|
||||
goto ovf;
|
||||
|
||||
/* TODO: thread: we do not lock pool list for now because all pools are
|
||||
* created during HAProxy startup (so before threads creation) */
|
||||
start = &pools;
|
||||
|
|
@ -496,6 +503,11 @@ struct pool_head *create_pool_from_reg(const char *name, struct pool_registratio
|
|||
|
||||
fail:
|
||||
return pool;
|
||||
ovf:
|
||||
ha_alert("Failed to create pool '%s' of size '%llu': overflow detected due to too large "
|
||||
"a configured size and/or configured pool options. Aborting.\n",
|
||||
name, reg_size);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Tries to allocate an object for the pool <pool> using the system's allocator
|
||||
|
|
@ -794,7 +806,8 @@ void pool_put_to_cache(struct pool_head *pool, void *ptr, const void *caller)
|
|||
if (unlikely(pool_cache_bytes > global.tune.pool_cache_size * 3 / 4)) {
|
||||
uint64_t mem_wait_start = 0;
|
||||
|
||||
if (unlikely(th_ctx->flags & TH_FL_TASK_PROFILING))
|
||||
if (unlikely((th_ctx->flags & (TH_FL_TASK_PROFILING|TH_FL_TASK_PROFILING_M)) ==
|
||||
(TH_FL_TASK_PROFILING|TH_FL_TASK_PROFILING_M)))
|
||||
mem_wait_start = now_mono_time();
|
||||
|
||||
if (ph->count >= 16 + pool_cache_count / 8 + CONFIG_HAP_POOL_CLUSTER_SIZE)
|
||||
|
|
@ -957,7 +970,8 @@ void pool_gc(struct pool_head *pool_ctx)
|
|||
uint64_t mem_wait_start = 0;
|
||||
int isolated = thread_isolated();
|
||||
|
||||
if (unlikely(th_ctx->flags & TH_FL_TASK_PROFILING))
|
||||
if (unlikely((th_ctx->flags & (TH_FL_TASK_PROFILING|TH_FL_TASK_PROFILING_M)) ==
|
||||
(TH_FL_TASK_PROFILING|TH_FL_TASK_PROFILING_M)))
|
||||
mem_wait_start = now_mono_time();
|
||||
|
||||
if (!isolated)
|
||||
|
|
@ -1019,7 +1033,8 @@ void *__pool_alloc(struct pool_head *pool, unsigned int flags)
|
|||
/* count allocation time only for cache misses */
|
||||
uint64_t mem_wait_start = 0;
|
||||
|
||||
if (unlikely(th_ctx->flags & TH_FL_TASK_PROFILING))
|
||||
if (unlikely((th_ctx->flags & (TH_FL_TASK_PROFILING|TH_FL_TASK_PROFILING_M)) ==
|
||||
(TH_FL_TASK_PROFILING|TH_FL_TASK_PROFILING_M)))
|
||||
mem_wait_start = now_mono_time();
|
||||
|
||||
p = pool_alloc_nocache(pool, caller);
|
||||
|
|
@ -1097,7 +1112,8 @@ void __pool_free(struct pool_head *pool, void *ptr)
|
|||
global.tune.pool_cache_size < pool->size)) {
|
||||
uint64_t mem_wait_start = 0;
|
||||
|
||||
if (unlikely(th_ctx->flags & TH_FL_TASK_PROFILING))
|
||||
if (unlikely((th_ctx->flags & (TH_FL_TASK_PROFILING|TH_FL_TASK_PROFILING_M)) ==
|
||||
(TH_FL_TASK_PROFILING|TH_FL_TASK_PROFILING_M)))
|
||||
mem_wait_start = now_mono_time();
|
||||
|
||||
pool_free_nocache(pool, ptr);
|
||||
|
|
@ -1428,7 +1444,7 @@ int dump_pools_info(struct appctx *appctx)
|
|||
struct pool_registration *reg;
|
||||
|
||||
list_for_each_entry(reg, &ctx->pool_info[i].entry->regs, list) {
|
||||
chunk_appendf(&trash, " > %-12s: size=%u flags=%#x align=%u", reg->name, reg->size, reg->flags, reg->align);
|
||||
chunk_appendf(&trash, " > %-12s: size=%llu flags=%#x align=%u", reg->name, reg->size, reg->flags, reg->align);
|
||||
if (reg->file && reg->line)
|
||||
chunk_appendf(&trash, " [%s:%u]", reg->file, reg->line);
|
||||
chunk_appendf(&trash, "\n");
|
||||
|
|
@ -1651,7 +1667,7 @@ void create_pool_callback(struct pool_head **ptr, char *name, struct pool_regist
|
|||
{
|
||||
*ptr = create_pool_from_reg(name, reg);
|
||||
if (!*ptr) {
|
||||
ha_alert("Failed to allocate pool '%s' of size %u : %s. Aborting.\n",
|
||||
ha_alert("Failed to allocate pool '%s' of size %llu : %s. Aborting.\n",
|
||||
name, reg->size, strerror(errno));
|
||||
exit(1);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -237,12 +237,15 @@ int send_fd_uxst(int fd, int send_fd)
|
|||
struct iovec iov;
|
||||
struct msghdr msghdr;
|
||||
|
||||
char cmsgbuf[CMSG_SPACE(sizeof(int))] = {0};
|
||||
char buf[CMSG_SPACE(sizeof(int))] = {0};
|
||||
char cmsgbuf[CMSG_SPACE(sizeof(int))];
|
||||
char buf[CMSG_SPACE(sizeof(int))];
|
||||
struct cmsghdr *cmsg = (void *)buf;
|
||||
|
||||
int *fdptr;
|
||||
|
||||
memset(cmsgbuf, 0, sizeof(cmsgbuf));
|
||||
memset(buf, 0, sizeof(buf));
|
||||
|
||||
iov.iov_base = iobuf;
|
||||
iov.iov_len = sizeof(iobuf);
|
||||
|
||||
|
|
|
|||
|
|
@ -1028,7 +1028,32 @@ static int tcp_get_info(struct connection *conn, long long int *info, int info_n
|
|||
|
||||
static void __proto_tcp_init(void)
|
||||
{
|
||||
#if defined(__linux__) && !defined(TCP_MD5SIG)
|
||||
#if defined(__linux__) && defined(TCP_MD5SIG)
|
||||
/* check if the setsockopt works to register a line in haproxy -vv */
|
||||
struct sockaddr_in *addr;
|
||||
int fd;
|
||||
struct tcp_md5sig md5 = {};
|
||||
|
||||
|
||||
addr = (struct sockaddr_in *)&md5.tcpm_addr;
|
||||
|
||||
addr->sin_family = AF_INET;
|
||||
addr->sin_port = 0;
|
||||
addr->sin_addr.s_addr = htonl(0x7F000001);
|
||||
|
||||
fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
|
||||
if (fd < 0) {
|
||||
goto end;
|
||||
}
|
||||
md5.tcpm_keylen = strlcpy2((char*)md5.tcpm_key, "foobar", sizeof(md5.tcpm_key));
|
||||
if (setsockopt(fd, IPPROTO_TCP, TCP_MD5SIG, &md5, sizeof(md5)) < 0) {
|
||||
goto end;
|
||||
}
|
||||
hap_register_feature("HAVE_WORKING_TCP_MD5SIG");
|
||||
end:
|
||||
if (fd >= 0)
|
||||
close(fd);
|
||||
|
||||
hap_register_feature("HAVE_TCP_MD5SIG");
|
||||
#endif
|
||||
}
|
||||
|
|
|
|||
1872
src/proxy.c
1872
src/proxy.c
File diff suppressed because it is too large
Load diff
35
src/queue.c
35
src/queue.c
|
|
@ -105,11 +105,8 @@ unsigned int srv_dynamic_maxconn(const struct server *s)
|
|||
{
|
||||
unsigned int max;
|
||||
|
||||
if (s->proxy->beconn >= s->proxy->fullconn)
|
||||
/* no fullconn or proxy is full */
|
||||
max = s->maxconn;
|
||||
else if (s->minconn == s->maxconn)
|
||||
/* static limit */
|
||||
if (s->minconn == s->maxconn || s->proxy->beconn >= s->proxy->fullconn)
|
||||
/* static limit, or no fullconn or proxy is full */
|
||||
max = s->maxconn;
|
||||
else max = MAX(s->minconn,
|
||||
s->proxy->beconn * s->maxconn / s->proxy->fullconn);
|
||||
|
|
@ -386,11 +383,23 @@ int process_srv_queue(struct server *s)
|
|||
{
|
||||
struct server *ref = s->track ? s->track : s;
|
||||
struct proxy *p = s->proxy;
|
||||
uint64_t non_empty_tgids = all_tgroups_mask;
|
||||
long non_empty_tgids[(global.nbtgroups / LONGBITS) + 1];
|
||||
int maxconn;
|
||||
int done = 0;
|
||||
int px_ok;
|
||||
int cur_tgrp;
|
||||
int i = global.nbtgroups;
|
||||
int curgrpnb = i;
|
||||
|
||||
|
||||
while (i >= LONGBITS) {
|
||||
non_empty_tgids[(global.nbtgroups - i) / LONGBITS] = ULONG_MAX;
|
||||
i -= LONGBITS;
|
||||
}
|
||||
while (i > 0) {
|
||||
ha_bit_set(global.nbtgroups - i, non_empty_tgids);
|
||||
i--;
|
||||
}
|
||||
|
||||
/* if a server is not usable or backup and must not be used
|
||||
* to dequeue backend requests.
|
||||
|
|
@ -420,7 +429,7 @@ int process_srv_queue(struct server *s)
|
|||
* to our thread group, then we'll get one from a different one, to
|
||||
* be sure those actually get processed too.
|
||||
*/
|
||||
while (non_empty_tgids != 0
|
||||
while (curgrpnb != 0
|
||||
&& (done < global.tune.maxpollevents || !s->served) &&
|
||||
s->served < (maxconn = srv_dynamic_maxconn(s))) {
|
||||
int self_served;
|
||||
|
|
@ -431,8 +440,8 @@ int process_srv_queue(struct server *s)
|
|||
* from our own thread-group queue.
|
||||
*/
|
||||
self_served = _HA_ATOMIC_LOAD(&s->per_tgrp[tgid - 1].self_served) % (MAX_SELF_USE_QUEUE + 1);
|
||||
if ((self_served == MAX_SELF_USE_QUEUE && non_empty_tgids != (1UL << (tgid - 1))) ||
|
||||
!(non_empty_tgids & (1UL << (tgid - 1)))) {
|
||||
if ((self_served == MAX_SELF_USE_QUEUE && (curgrpnb > 1 || !ha_bit_test(tgid - 1, non_empty_tgids))) ||
|
||||
!ha_bit_test(tgid - 1, non_empty_tgids)) {
|
||||
unsigned int old_served, new_served;
|
||||
|
||||
/*
|
||||
|
|
@ -452,7 +461,7 @@ int process_srv_queue(struct server *s)
|
|||
*/
|
||||
while (new_served == tgid ||
|
||||
new_served == global.nbtgroups + 1 ||
|
||||
!(non_empty_tgids & (1UL << (new_served - 1)))) {
|
||||
!ha_bit_test(new_served - 1, non_empty_tgids)) {
|
||||
if (new_served == global.nbtgroups + 1)
|
||||
new_served = 1;
|
||||
else
|
||||
|
|
@ -468,7 +477,8 @@ int process_srv_queue(struct server *s)
|
|||
to_dequeue = MAX_SELF_USE_QUEUE - self_served;
|
||||
}
|
||||
if (HA_ATOMIC_XCHG(&s->per_tgrp[cur_tgrp - 1].dequeuing, 1)) {
|
||||
non_empty_tgids &= ~(1UL << (cur_tgrp - 1));
|
||||
ha_bit_clr(cur_tgrp - 1, non_empty_tgids);
|
||||
curgrpnb--;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
@ -479,7 +489,8 @@ int process_srv_queue(struct server *s)
|
|||
* the served field, only if it is < maxconn.
|
||||
*/
|
||||
if (!pendconn_process_next_strm(s, p, px_ok, cur_tgrp)) {
|
||||
non_empty_tgids &= ~(1UL << (cur_tgrp - 1));
|
||||
ha_bit_clr(cur_tgrp - 1, non_empty_tgids);
|
||||
curgrpnb--;
|
||||
break;
|
||||
}
|
||||
to_dequeue--;
|
||||
|
|
|
|||
|
|
@ -363,11 +363,16 @@ static int quic_parse_ack_ecn_frame(struct quic_frame *frm, struct quic_conn *qc
|
|||
const unsigned char **pos, const unsigned char *end)
|
||||
{
|
||||
struct qf_ack *ack_frm = &frm->ack;
|
||||
/* TODO implement ECN advertising */
|
||||
uint64_t ect0, ect1, ecn_ce;
|
||||
|
||||
return quic_dec_int(&ack_frm->largest_ack, pos, end) &&
|
||||
quic_dec_int(&ack_frm->ack_delay, pos, end) &&
|
||||
quic_dec_int(&ack_frm->first_ack_range, pos, end) &&
|
||||
quic_dec_int(&ack_frm->ack_range_num, pos, end);
|
||||
quic_dec_int(&ack_frm->ack_delay, pos, end) &&
|
||||
quic_dec_int(&ack_frm->ack_range_num, pos, end) &&
|
||||
quic_dec_int(&ack_frm->first_ack_range, pos, end) &&
|
||||
quic_dec_int(&ect0, pos, end) &&
|
||||
quic_dec_int(&ect1, pos, end) &&
|
||||
quic_dec_int(&ecn_ce, pos, end);
|
||||
}
|
||||
|
||||
/* Encode a RESET_STREAM frame at <pos> buffer position.
|
||||
|
|
@ -1161,7 +1166,12 @@ int qc_parse_frm(struct quic_frame *frm, struct quic_rx_packet *pkt,
|
|||
goto leave;
|
||||
}
|
||||
|
||||
quic_dec_int(&frm->type, pos, end);
|
||||
if (!quic_dec_int(&frm->type, pos, end)) {
|
||||
TRACE_ERROR("malformed frame type", QUIC_EV_CONN_PRSFRM, qc);
|
||||
quic_set_connection_close(qc, quic_err_transport(QC_ERR_FRAME_ENCODING_ERROR));
|
||||
goto leave;
|
||||
}
|
||||
|
||||
if (!quic_frame_type_is_known(frm->type)) {
|
||||
/* RFC 9000 12.4. Frames and Frame Types
|
||||
*
|
||||
|
|
|
|||
|
|
@ -359,8 +359,7 @@ leave:
|
|||
}
|
||||
|
||||
/* Callback use to parse TLS messages for <ssl> TLS session. */
|
||||
void quic_tls_compat_msg_callback(struct connection *conn,
|
||||
int write_p, int version, int content_type,
|
||||
void quic_tls_compat_msg_callback(int write_p, int version, int content_type,
|
||||
const void *buf, size_t len, SSL *ssl)
|
||||
{
|
||||
unsigned int alert;
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue