mirror of
https://github.com/haproxy/haproxy.git
synced 2026-03-09 01:20:40 -04:00
Compare commits
292 commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9b3345237a | ||
|
|
2a0cf52cfc | ||
|
|
551e5f5fd4 | ||
|
|
2a2989bb23 | ||
|
|
cbebdb4ba8 | ||
|
|
9549b05b94 | ||
|
|
5e14904fef | ||
|
|
af6b9a0967 | ||
|
|
bfe5a2c3d7 | ||
|
|
b48c9a1465 | ||
|
|
fcfabd0d90 | ||
|
|
5d02d33ee1 | ||
|
|
1544842801 | ||
|
|
88bc2bdfc9 | ||
|
|
0087651128 | ||
|
|
65d3416da5 | ||
|
|
7d48e80da5 | ||
|
|
9b22f22858 | ||
|
|
25d6e65aae | ||
|
|
96286b2a84 | ||
|
|
306931dfb1 | ||
|
|
4791501011 | ||
|
|
e0728ebcf4 | ||
|
|
97a63835af | ||
|
|
50fb37e5fe | ||
|
|
cd9f159210 | ||
|
|
3eadf887f7 | ||
|
|
9f1e9ee0ed | ||
|
|
4939f18ff7 | ||
|
|
b2ba3c6662 | ||
|
|
7bfb66d2b1 | ||
|
|
7a474855b4 | ||
|
|
88765b69e0 | ||
|
|
9951f9cf85 | ||
|
|
e6a8ef5521 | ||
|
|
7fe1a92bb3 | ||
|
|
a779d0d23a | ||
|
|
0a02acecf3 | ||
|
|
cdcdc016cc | ||
|
|
54b614d2b5 | ||
|
|
e38b86e72c | ||
|
|
7315428615 | ||
|
|
b1441c6440 | ||
|
|
940e1820f6 | ||
|
|
9c7cf1c684 | ||
|
|
4120faf289 | ||
|
|
58830990d0 | ||
|
|
13c3445163 | ||
|
|
f41e684e9a | ||
|
|
e07a75c764 | ||
|
|
2f5030c847 | ||
|
|
712055f2f8 | ||
|
|
6145f52d9c | ||
|
|
f64aa036d8 | ||
|
|
f58b2698ce | ||
|
|
a7d1c59a92 | ||
|
|
98c8c5e16e | ||
|
|
5ddfbd4b03 | ||
|
|
053887cc98 | ||
|
|
7f725f0754 | ||
|
|
7bf3020952 | ||
|
|
ad1e00b2ac | ||
|
|
f521c2ce2d | ||
|
|
ee1f0527c6 | ||
|
|
f3127df74d | ||
|
|
ebbdfc5915 | ||
|
|
dd1990a97a | ||
|
|
20376c54e2 | ||
|
|
4bcfc09acf | ||
|
|
08623228a1 | ||
|
|
d166894fef | ||
|
|
dd55f2246e | ||
|
|
78549c66c5 | ||
|
|
5000f0b2ef | ||
|
|
00a106059e | ||
|
|
9db62d408a | ||
|
|
b604064980 | ||
|
|
9019a5db93 | ||
|
|
de0eddf512 | ||
|
|
a60e1fcf7f | ||
|
|
7ac47910a2 | ||
|
|
04a9f86a85 | ||
|
|
8dd22a62a4 | ||
|
|
95a9f472d2 | ||
|
|
56fc12d6fa | ||
|
|
2b463e9b1f | ||
|
|
9910af6117 | ||
|
|
fb5e280e0d | ||
|
|
709c3be845 | ||
|
|
44932b6c41 | ||
|
|
e67e36c9eb | ||
|
|
cad6e0b3da | ||
|
|
5af42fa342 | ||
|
|
89c75b0777 | ||
|
|
84837b6e70 | ||
|
|
ca5332a9c3 | ||
|
|
a9dc8e2587 | ||
|
|
05d73aa81c | ||
|
|
c528824094 | ||
|
|
868dd3e88b | ||
|
|
c2b5446292 | ||
|
|
bbd8492c22 | ||
|
|
bf363a7135 | ||
|
|
c44d6c6c71 | ||
|
|
dfa8907a3d | ||
|
|
bb3304c6af | ||
|
|
a5a053e612 | ||
|
|
5aa30847ae | ||
|
|
ca5c07b677 | ||
|
|
8d54cda0af | ||
|
|
e63722fed4 | ||
|
|
0f95e73032 | ||
|
|
5f26cf162c | ||
|
|
b8cb8e1a65 | ||
|
|
db360d466b | ||
|
|
92581043fb | ||
|
|
8927426f78 | ||
|
|
f71b2f4338 | ||
|
|
de5fc2f515 | ||
|
|
2b0fc33114 | ||
|
|
f5a182c7e7 | ||
|
|
028940725a | ||
|
|
41a71aec3d | ||
|
|
09bf116242 | ||
|
|
2b7849fd02 | ||
|
|
04a4d242c9 | ||
|
|
0bb686a72d | ||
|
|
b007b7aa04 | ||
|
|
c9d47804d1 | ||
|
|
2bf091e9da | ||
|
|
1dc20a630a | ||
|
|
6013f4baeb | ||
|
|
234ce775c3 | ||
|
|
5d3bca4b17 | ||
|
|
36b1fba871 | ||
|
|
b0240bcfaf | ||
|
|
c71ef2969b | ||
|
|
55e9c67381 | ||
|
|
194a67600e | ||
|
|
92e3635679 | ||
|
|
6ab86ca14c | ||
|
|
5a079d1811 | ||
|
|
c26c721312 | ||
|
|
27e1ec8ca9 | ||
|
|
3e6d030ce2 | ||
|
|
dfe1de4335 | ||
|
|
cfa30dea4e | ||
|
|
a324616cdb | ||
|
|
5737fc9518 | ||
|
|
9ecd0011c1 | ||
|
|
4c6ca0b471 | ||
|
|
dfc4085413 | ||
|
|
bd25c63067 | ||
|
|
ce912271db | ||
|
|
d89ec33a34 | ||
|
|
ee309bafcf | ||
|
|
f559c202fb | ||
|
|
4f27a72d19 | ||
|
|
0c6f2207fc | ||
|
|
783db96ccb | ||
|
|
a8887e55a0 | ||
|
|
e62e8de5a7 | ||
|
|
bc586b4138 | ||
|
|
8b27dfdfb0 | ||
|
|
36282ae348 | ||
|
|
f82ace414b | ||
|
|
53b7150357 | ||
|
|
5965a6e1d2 | ||
|
|
9ad9def126 | ||
|
|
fae478dae5 | ||
|
|
6bf450b7fe | ||
|
|
fc89ff76c7 | ||
|
|
806c8c830d | ||
|
|
1b1a0b3bae | ||
|
|
0aca25f725 | ||
|
|
9e17087aeb | ||
|
|
b248b1c021 | ||
|
|
829002d459 | ||
|
|
92307b5fec | ||
|
|
ccb075fa1b | ||
|
|
1b7843f1c1 | ||
|
|
8e0c2599b6 | ||
|
|
cda056b9f4 | ||
|
|
2f94f61c31 | ||
|
|
18a78956cb | ||
|
|
4c275c7d17 | ||
|
|
f3003d1508 | ||
|
|
747ff09818 | ||
|
|
1274c21a42 | ||
|
|
0016d45a9c | ||
|
|
e0d1cdff6a | ||
|
|
86df0e206e | ||
|
|
df8e05815c | ||
|
|
1d2490c5ae | ||
|
|
cb63e899d9 | ||
|
|
076ec9443c | ||
|
|
f5d03bbe13 | ||
|
|
23aeb72798 | ||
|
|
8031bf6e03 | ||
|
|
0fad24b5da | ||
|
|
1d1daff7c4 | ||
|
|
d8ff676592 | ||
|
|
ea3b1bb866 | ||
|
|
734a139c52 | ||
|
|
7098b4f93a | ||
|
|
d8f219b380 | ||
|
|
6342705cee | ||
|
|
2d6e9e15cd | ||
|
|
5689605c8e | ||
|
|
d71e2e73ea | ||
|
|
bb6cfbe754 | ||
|
|
e88b219331 | ||
|
|
f47b800ac3 | ||
|
|
d13164e105 | ||
|
|
b90b312a50 | ||
|
|
1592ed9854 | ||
|
|
f9b3319f48 | ||
|
|
aad212954f | ||
|
|
b26f0cc45a | ||
|
|
b3a44158fb | ||
|
|
8e16fd2cf1 | ||
|
|
4aa974f949 | ||
|
|
d80f0143c9 | ||
|
|
b65df062be | ||
|
|
a8f50cff7e | ||
|
|
c622ed23c8 | ||
|
|
f5f9c008b1 | ||
|
|
ea92b0ef01 | ||
|
|
2ac0d12790 | ||
|
|
c724693b95 | ||
|
|
e2631ee5f7 | ||
|
|
a7b2353cb3 | ||
|
|
3b45beb465 | ||
|
|
64c5d45a26 | ||
|
|
62239539bf | ||
|
|
91a5b67b25 | ||
|
|
ecffaa6d5a | ||
|
|
a1db464c3e | ||
|
|
5dff6e439d | ||
|
|
d7cdd2c7f4 | ||
|
|
5753c14e84 | ||
|
|
3115eb82a6 | ||
|
|
07195a1af4 | ||
|
|
a603811aac | ||
|
|
e152913327 | ||
|
|
7ac5088c50 | ||
|
|
817003aa31 | ||
|
|
dc6cf224dd | ||
|
|
87ea407cce | ||
|
|
a8bc83bea5 | ||
|
|
2c8ad11b73 | ||
|
|
2a07dc9c24 | ||
|
|
9dd7cf769e | ||
|
|
bf7a2808fc | ||
|
|
9766211cf0 | ||
|
|
9e023ae930 | ||
|
|
68e9fb73fd | ||
|
|
143f5a5c0d | ||
|
|
b6bdb2553b | ||
|
|
3edf600859 | ||
|
|
cddeea58cd | ||
|
|
3674afe8a0 | ||
|
|
2527d9dcd1 | ||
|
|
f26562bcb7 | ||
|
|
abc1947e19 | ||
|
|
02e6375017 | ||
|
|
da728aa0f6 | ||
|
|
23e8ed6ea6 | ||
|
|
fa094d0b61 | ||
|
|
869a997a68 | ||
|
|
48d9c90ff2 | ||
|
|
35d63cc3c7 | ||
|
|
bb36836d76 | ||
|
|
a79a67b52f | ||
|
|
a9df6947b4 | ||
|
|
3ca2a83fc0 | ||
|
|
cb3fd012cd | ||
|
|
bbab0ac4d0 | ||
|
|
6995fe60c3 | ||
|
|
0ea601127e | ||
|
|
0ebef67132 | ||
|
|
9b1faee4c9 | ||
|
|
d2ccc19fde | ||
|
|
f4cd1e74ba | ||
|
|
1a3252e956 | ||
|
|
e9e4821db5 | ||
|
|
4e7c07736a | ||
|
|
c267d24f57 | ||
|
|
a3e9a04435 | ||
|
|
be68ecc37d | ||
|
|
a66b4881d7 | ||
|
|
9e9083d0e2 |
187 changed files with 9535 additions and 3531 deletions
7
.github/matrix.py
vendored
7
.github/matrix.py
vendored
|
|
@ -275,11 +275,8 @@ def main(ref_name):
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
# macOS
|
# macOS on dev branches
|
||||||
|
if "haproxy-" not in ref_name:
|
||||||
if "haproxy-" in ref_name:
|
|
||||||
os = "macos-13" # stable branch
|
|
||||||
else:
|
|
||||||
os = "macos-26" # development branch
|
os = "macos-26" # development branch
|
||||||
|
|
||||||
TARGET = "osx"
|
TARGET = "osx"
|
||||||
|
|
|
||||||
3
.github/workflows/contrib.yml
vendored
3
.github/workflows/contrib.yml
vendored
|
|
@ -11,9 +11,6 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- name: Compile admin/halog/halog
|
|
||||||
run: |
|
|
||||||
make admin/halog/halog
|
|
||||||
- name: Compile dev/flags/flags
|
- name: Compile dev/flags/flags
|
||||||
run: |
|
run: |
|
||||||
make dev/flags/flags
|
make dev/flags/flags
|
||||||
|
|
|
||||||
71
.github/workflows/quic-interop-aws-lc.yml
vendored
71
.github/workflows/quic-interop-aws-lc.yml
vendored
|
|
@ -11,7 +11,7 @@ on:
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
combined-build-and-run:
|
||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||||
permissions:
|
permissions:
|
||||||
|
|
@ -21,84 +21,47 @@ jobs:
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
|
|
||||||
- name: Log in to the Container registry
|
- name: Update Docker to the latest
|
||||||
uses: docker/login-action@v3
|
uses: docker/setup-docker-action@v4
|
||||||
with:
|
|
||||||
registry: ghcr.io
|
|
||||||
username: ${{ github.actor }}
|
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Build and push Docker image
|
- name: Build Docker image
|
||||||
id: push
|
id: push
|
||||||
uses: docker/build-push-action@v5
|
uses: docker/build-push-action@v6
|
||||||
with:
|
with:
|
||||||
context: https://github.com/haproxytech/haproxy-qns.git
|
context: https://github.com/haproxytech/haproxy-qns.git
|
||||||
push: true
|
platforms: linux/amd64
|
||||||
build-args: |
|
build-args: |
|
||||||
SSLLIB=AWS-LC
|
SSLLIB=AWS-LC
|
||||||
tags: ghcr.io/${{ github.repository }}:aws-lc
|
tags: local:aws-lc
|
||||||
|
|
||||||
- name: Cleanup registry
|
|
||||||
uses: actions/delete-package-versions@v5
|
|
||||||
with:
|
|
||||||
owner: ${{ github.repository_owner }}
|
|
||||||
package-name: 'haproxy'
|
|
||||||
package-type: container
|
|
||||||
min-versions-to-keep: 1
|
|
||||||
delete-only-untagged-versions: 'true'
|
|
||||||
|
|
||||||
run:
|
|
||||||
needs: build
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
suite: [
|
|
||||||
{ client: chrome, tests: "http3" },
|
|
||||||
{ client: picoquic, tests: "handshake,transfer,longrtt,chacha20,multiplexing,retry,resumption,zerortt,http3,blackhole,keyupdate,ecn,amplificationlimit,handshakeloss,transferloss,handshakecorruption,transfercorruption,ipv6,v2" },
|
|
||||||
{ client: quic-go, tests: "handshake,transfer,longrtt,chacha20,multiplexing,retry,resumption,zerortt,http3,blackhole,keyupdate,ecn,amplificationlimit,handshakeloss,transferloss,handshakecorruption,transfercorruption,ipv6,v2" },
|
|
||||||
{ client: ngtcp2, tests: "handshake,transfer,longrtt,chacha20,multiplexing,retry,resumption,zerortt,http3,blackhole,keyupdate,ecn,amplificationlimit,handshakeloss,transferloss,handshakecorruption,transfercorruption,ipv6,v2" }
|
|
||||||
]
|
|
||||||
fail-fast: false
|
|
||||||
|
|
||||||
name: ${{ matrix.suite.client }}
|
|
||||||
runs-on: ubuntu-24.04
|
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v5
|
|
||||||
|
|
||||||
- name: Log in to the Container registry
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
registry: ghcr.io
|
|
||||||
username: ${{ github.actor }}
|
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Install tshark
|
- name: Install tshark
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get update
|
sudo apt-get update
|
||||||
sudo apt-get -y install tshark
|
sudo apt-get -y install tshark
|
||||||
|
|
||||||
- name: Pull image
|
|
||||||
run: |
|
|
||||||
docker pull ghcr.io/${{ github.repository }}:aws-lc
|
|
||||||
|
|
||||||
- name: Run
|
- name: Run
|
||||||
run: |
|
run: |
|
||||||
git clone https://github.com/quic-interop/quic-interop-runner
|
git clone https://github.com/quic-interop/quic-interop-runner
|
||||||
cd quic-interop-runner
|
cd quic-interop-runner
|
||||||
pip install -r requirements.txt --break-system-packages
|
pip install -r requirements.txt --break-system-packages
|
||||||
python run.py -j result.json -l logs -r haproxy=ghcr.io/${{ github.repository }}:aws-lc -t ${{ matrix.suite.tests }} -c ${{ matrix.suite.client }} -s haproxy
|
python run.py -j result.json -l logs-chrome -r haproxy=local:aws-lc -t "http3" -c chrome -s haproxy
|
||||||
|
python run.py -j result.json -l logs-picoquic -r haproxy=local:aws-lc -t "handshake,transfer,longrtt,chacha20,multiplexing,retry,resumption,zerortt,http3,blackhole,keyupdate,ecn,amplificationlimit,handshakeloss,transferloss,handshakecorruption,transfercorruption,ipv6,v2" -c picoquic -s haproxy
|
||||||
|
python run.py -j result.json -l logs-quic-go -r haproxy=local:aws-lc -t "handshake,transfer,longrtt,chacha20,multiplexing,retry,resumption,zerortt,http3,blackhole,keyupdate,ecn,amplificationlimit,handshakeloss,transferloss,handshakecorruption,transfercorruption,ipv6,v2" -c quic-go -s haproxy
|
||||||
|
python run.py -j result.json -l logs-ngtcp2 -r haproxy=local:aws-lc -t "handshake,transfer,longrtt,chacha20,multiplexing,retry,resumption,zerortt,http3,blackhole,keyupdate,ecn,amplificationlimit,handshakeloss,transferloss,handshakecorruption,transfercorruption,ipv6,v2" -c ngtcp2 -s haproxy
|
||||||
|
|
||||||
- name: Delete succeeded logs
|
- name: Delete succeeded logs
|
||||||
if: failure()
|
if: failure()
|
||||||
run: |
|
run: |
|
||||||
cd quic-interop-runner/logs/haproxy_${{ matrix.suite.client }}
|
for client in chrome picoquic quic-go ngtcp2; do
|
||||||
|
pushd quic-interop-runner/logs-${client}/haproxy_${client}
|
||||||
cat ../../result.json | jq -r '.results[][] | select(.result=="succeeded") | .name' | xargs rm -rf
|
cat ../../result.json | jq -r '.results[][] | select(.result=="succeeded") | .name' | xargs rm -rf
|
||||||
|
popd
|
||||||
|
done
|
||||||
|
|
||||||
- name: Logs upload
|
- name: Logs upload
|
||||||
if: failure()
|
if: failure()
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: logs-${{ matrix.suite.client }}
|
name: logs
|
||||||
path: quic-interop-runner/logs/
|
path: quic-interop-runner/logs*/
|
||||||
retention-days: 6
|
retention-days: 6
|
||||||
|
|
|
||||||
67
.github/workflows/quic-interop-libressl.yml
vendored
67
.github/workflows/quic-interop-libressl.yml
vendored
|
|
@ -11,7 +11,7 @@ on:
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
combined-build-and-run:
|
||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||||
permissions:
|
permissions:
|
||||||
|
|
@ -21,82 +21,45 @@ jobs:
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
|
|
||||||
- name: Log in to the Container registry
|
- name: Update Docker to the latest
|
||||||
uses: docker/login-action@v3
|
uses: docker/setup-docker-action@v4
|
||||||
with:
|
|
||||||
registry: ghcr.io
|
|
||||||
username: ${{ github.actor }}
|
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Build and push Docker image
|
- name: Build Docker image
|
||||||
id: push
|
id: push
|
||||||
uses: docker/build-push-action@v5
|
uses: docker/build-push-action@v6
|
||||||
with:
|
with:
|
||||||
context: https://github.com/haproxytech/haproxy-qns.git
|
context: https://github.com/haproxytech/haproxy-qns.git
|
||||||
push: true
|
platforms: linux/amd64
|
||||||
build-args: |
|
build-args: |
|
||||||
SSLLIB=LibreSSL
|
SSLLIB=LibreSSL
|
||||||
tags: ghcr.io/${{ github.repository }}:libressl
|
tags: local:libressl
|
||||||
|
|
||||||
- name: Cleanup registry
|
|
||||||
uses: actions/delete-package-versions@v5
|
|
||||||
with:
|
|
||||||
owner: ${{ github.repository_owner }}
|
|
||||||
package-name: 'haproxy'
|
|
||||||
package-type: container
|
|
||||||
min-versions-to-keep: 1
|
|
||||||
delete-only-untagged-versions: 'true'
|
|
||||||
|
|
||||||
run:
|
|
||||||
needs: build
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
suite: [
|
|
||||||
{ client: picoquic, tests: "handshake,transfer,longrtt,chacha20,multiplexing,retry,http3,blackhole,amplificationlimit,handshakeloss,transferloss,handshakecorruption,transfercorruption,v2" },
|
|
||||||
{ client: quic-go, tests: "handshake,transfer,longrtt,chacha20,multiplexing,retry,http3,blackhole,amplificationlimit,transferloss,transfercorruption,v2" }
|
|
||||||
]
|
|
||||||
fail-fast: false
|
|
||||||
|
|
||||||
name: ${{ matrix.suite.client }}
|
|
||||||
runs-on: ubuntu-24.04
|
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v5
|
|
||||||
|
|
||||||
- name: Log in to the Container registry
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
registry: ghcr.io
|
|
||||||
username: ${{ github.actor }}
|
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Install tshark
|
- name: Install tshark
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get update
|
sudo apt-get update
|
||||||
sudo apt-get -y install tshark
|
sudo apt-get -y install tshark
|
||||||
|
|
||||||
- name: Pull image
|
|
||||||
run: |
|
|
||||||
docker pull ghcr.io/${{ github.repository }}:libressl
|
|
||||||
|
|
||||||
- name: Run
|
- name: Run
|
||||||
run: |
|
run: |
|
||||||
git clone https://github.com/quic-interop/quic-interop-runner
|
git clone https://github.com/quic-interop/quic-interop-runner
|
||||||
cd quic-interop-runner
|
cd quic-interop-runner
|
||||||
pip install -r requirements.txt --break-system-packages
|
pip install -r requirements.txt --break-system-packages
|
||||||
python run.py -j result.json -l logs -r haproxy=ghcr.io/${{ github.repository }}:libressl -t ${{ matrix.suite.tests }} -c ${{ matrix.suite.client }} -s haproxy
|
python run.py -j result.json -l logs-picoquic -r haproxy=local:libressl -t "handshake,transfer,longrtt,chacha20,multiplexing,retry,http3,blackhole,amplificationlimit,handshakeloss,transferloss,handshakecorruption,transfercorruption,v2" -c picoquic -s haproxy
|
||||||
|
python run.py -j result.json -l logs-quic-go -r haproxy=local:libressl -t "handshake,transfer,longrtt,chacha20,multiplexing,retry,http3,blackhole,amplificationlimit,transferloss,transfercorruption,v2" -c quic-go -s haproxy
|
||||||
|
|
||||||
- name: Delete succeeded logs
|
- name: Delete succeeded logs
|
||||||
if: failure()
|
if: failure()
|
||||||
run: |
|
run: |
|
||||||
cd quic-interop-runner/logs/haproxy_${{ matrix.suite.client }}
|
for client in picoquic quic-go; do
|
||||||
|
pushd quic-interop-runner/logs-${client}/haproxy_${client}
|
||||||
cat ../../result.json | jq -r '.results[][] | select(.result=="succeeded") | .name' | xargs rm -rf
|
cat ../../result.json | jq -r '.results[][] | select(.result=="succeeded") | .name' | xargs rm -rf
|
||||||
|
popd
|
||||||
|
done
|
||||||
|
|
||||||
- name: Logs upload
|
- name: Logs upload
|
||||||
if: failure()
|
if: failure()
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: logs-${{ matrix.suite.client }}
|
name: logs
|
||||||
path: quic-interop-runner/logs/
|
path: quic-interop-runner/logs*/
|
||||||
retention-days: 6
|
retention-days: 6
|
||||||
|
|
|
||||||
1
.github/workflows/windows.yml
vendored
1
.github/workflows/windows.yml
vendored
|
|
@ -18,6 +18,7 @@ jobs:
|
||||||
msys2:
|
msys2:
|
||||||
name: ${{ matrix.name }}
|
name: ${{ matrix.name }}
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
|
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
shell: msys2 {0}
|
shell: msys2 {0}
|
||||||
|
|
|
||||||
285
CHANGELOG
285
CHANGELOG
|
|
@ -1,6 +1,291 @@
|
||||||
ChangeLog :
|
ChangeLog :
|
||||||
===========
|
===========
|
||||||
|
|
||||||
|
2026/03/05 : 3.4-dev6
|
||||||
|
- CLEANUP: acme: remove duplicate includes
|
||||||
|
- BUG/MINOR: proxy: detect strdup error on server auto SNI
|
||||||
|
- BUG/MINOR: server: set auto SNI for dynamic servers
|
||||||
|
- BUG/MINOR: server: enable no-check-sni-auto for dynamic servers
|
||||||
|
- MINOR: haterm: provide -b and -c options (RSA key size, ECDSA curves)
|
||||||
|
- MINOR: haterm: add long options for QUIC and TCP "bind" settings
|
||||||
|
- BUG/MINOR: haterm: missing allocation check in copy_argv()
|
||||||
|
- BUG/MINOR: quic: fix counters used on BE side
|
||||||
|
- MINOR: quic: add BUG_ON() on half_open_conn counter access from BE
|
||||||
|
- BUG/MINOR: quic/h3: display QUIC/H3 backend module on HTML stats
|
||||||
|
- BUG/MINOR: acme: acme_ctx_destroy() leaks auth->dns
|
||||||
|
- BUG/MINOR: acme: wrong labels logic always memprintf errmsg
|
||||||
|
- MINOR: ssl: clarify error reporting for unsupported keywords
|
||||||
|
- BUG/MINOR: acme: fix incorrect number of arguments allowed in config
|
||||||
|
- CLEANUP: haterm: remove unreachable labels hstream_add_data()
|
||||||
|
- CLEANUP: haterm: avoid static analyzer warnings about rand() use
|
||||||
|
- CLEANUP: ssl: Remove a useless variable from ssl_gen_x509()
|
||||||
|
- CI: use the latest docker for QUIC Interop
|
||||||
|
- CI: remove redundant "halog" compilation
|
||||||
|
- CLENAUP: cfgparse: accept-invalid-http-* does not support "no"/"defaults"
|
||||||
|
- BUG/MEDIUM: spoe: Acquire context buffer in applet before consuming a frame
|
||||||
|
- MINOR: traces: always mark trace_source as thread-aligned
|
||||||
|
- MINOR: ncbmbuf: improve itbmap_next() code
|
||||||
|
- MINOR: proxy: improve code when checking server name conflicts
|
||||||
|
- MINOR: quic: add a new metric for ncbuf failures
|
||||||
|
- BUG/MINOR: haterm: cannot reset default "haterm" mode
|
||||||
|
- BUG/MEDIUM: cpu-topo: Distribute CPUs fairly across groups
|
||||||
|
- BUG/MINOR: quic: missing app ops init during backend 0-RTT sessions
|
||||||
|
- CLEANUP: ssl: remove outdated comments
|
||||||
|
- MINOR: mux-h2: also count glitches on invalid trailers
|
||||||
|
- MINOR: mux-h2: add a new setting, "tune.h2.log-errors" to tweak error logging
|
||||||
|
- BUG/MEDIUM: mux-h2: make sure to always report pending errors to the stream
|
||||||
|
- BUG/MINOR: server: adjust initialization order for dynamic servers
|
||||||
|
- CLEANUP: tree-wide: drop a few useless null-checks before free()
|
||||||
|
- CLEANUP: quic-stats: include counters from quic_stats
|
||||||
|
- REORG: stats/counters: move extra_counters to counters not stats
|
||||||
|
- CLEANUP: stats: drop stats.h / stats-t.h where not needed
|
||||||
|
- MEDIUM: counters: change the fill_stats() API to pass the module and extra_counters
|
||||||
|
- CLEANUP: counters: only retrieve zeroes for unallocated extra_counters
|
||||||
|
- MEDIUM: counters: add a dedicated storage for extra_counters in various structs
|
||||||
|
- MINOR: counters: store a tgroup step for extra_counters to access multiple tgroups
|
||||||
|
- MEDIUM: counters: store the number of thread groups accessing extra_counters
|
||||||
|
- MINOR: counters: add EXTRA_COUNTERS_BASE() to retrieve extra_counters base storage
|
||||||
|
- MEDIUM: counters: return aggregate extra counters in ->fill_stats()
|
||||||
|
- MEDIUM: counters: make EXTRA_COUNTERS_GET() consider tgid
|
||||||
|
- BUG/MINOR: call EXTRA_COUNTERS_FREE() before srv_free_params() in srv_drop()
|
||||||
|
- MINOR: promex: test applet resume in stress mode
|
||||||
|
- BUG/MINOR: promex: fix server iteration when last server is deleted
|
||||||
|
- BUG/MINOR: proxy: add dynamic backend into ID tree
|
||||||
|
- MINOR: proxy: convert proxy flags to uint
|
||||||
|
- MINOR: server: refactor srv_detach()
|
||||||
|
- MINOR: proxy: define a basic "del backend" CLI
|
||||||
|
- MINOR: proxy: define proxy watcher member
|
||||||
|
- MINOR: stats: protect proxy iteration via watcher
|
||||||
|
- MINOR: promex: use watcher to iterate over backend instances
|
||||||
|
- MINOR: lua: use watcher for proxies iterator
|
||||||
|
- MINOR: proxy: add refcount to proxies
|
||||||
|
- MINOR: proxy: rename default refcount to avoid confusion
|
||||||
|
- MINOR: server: take proxy refcount when deleting a server
|
||||||
|
- MINOR: lua: handle proxy refcount
|
||||||
|
- MINOR: proxy: prevent backend removal when unsupported
|
||||||
|
- MINOR: proxy: prevent deletion of backend referenced by config elements
|
||||||
|
- MINOR: proxy: prevent backend deletion if server still exists in it
|
||||||
|
- MINOR: server: mark backend removal as forbidden if QUIC was used
|
||||||
|
- MINOR: cli: implement wait on be-removable
|
||||||
|
- MINOR: proxy: add comment for defaults_px_ref/unref_all()
|
||||||
|
- MEDIUM: proxy: add lock for global accesses during proxy free
|
||||||
|
- MEDIUM: proxy: add lock for global accesses during default free
|
||||||
|
- MINOR: proxy: use atomic ops for default proxy refcount
|
||||||
|
- MEDIUM: proxy: implement backend deletion
|
||||||
|
- REGTESTS: add a test on "del backend"
|
||||||
|
- REGTESTS: complete "del backend" with unnamed defaults ref free
|
||||||
|
- BUG/MINOR: hlua: fix return with push nil on proxy check
|
||||||
|
- BUG/MEDIUM: stream: Handle TASK_WOKEN_RES as a stream event
|
||||||
|
- MINOR: quic: use signed char type for ALPN manipulation
|
||||||
|
- MINOR: quic/h3: reorganize stream reject after MUX closure
|
||||||
|
- MINOR: mux-quic: add function for ALPN to app-ops conversion
|
||||||
|
- MEDIUM: quic/mux-quic: adjust app-ops install
|
||||||
|
- MINOR: quic: use server cache for ALPN on BE side
|
||||||
|
- BUG/MEDIUM: hpack: correctly deal with too large decoded numbers
|
||||||
|
- BUG/MAJOR: qpack: unchecked length passed to huffman decoder
|
||||||
|
- BUG/MINOR: qpack: fix 1-byte OOB read in qpack_decode_fs_pfx()
|
||||||
|
- BUG/MINOR: quic: fix OOB read in preferred_address transport parameter
|
||||||
|
- BUG/MEDIUM: qpack: correctly deal with too large decoded numbers
|
||||||
|
- BUG/MINOR: hlua: Properly enable/disable line receives from HTTP applet
|
||||||
|
- BUG/MEDIUM: hlua: Fix end of request detection when retrieving payload
|
||||||
|
- BUG/MINOR: hlua: Properly enable/disable receives for TCP applets
|
||||||
|
- MINOR: htx: Add a function to retrieve the HTTP version from a start-line
|
||||||
|
- MINOR: h1-htx: Reports non-HTTP version via dedicated flags
|
||||||
|
- BUG/MINOR: h1-htx: Be sure that H1 response version starts by "HTTP/"
|
||||||
|
- MINOR: http-ana: Save the message version in the http_msg structure
|
||||||
|
- MEDIUM: http-fetch: Rework how HTTP message version is retrieved
|
||||||
|
- MEDIUM: http-ana: Use the version of the opposite side for internal messages
|
||||||
|
- DEBUG: stream: Display the currently running rule in stream dump
|
||||||
|
- MINOR: filters: Use filter API as far as poissible to break loops on filters
|
||||||
|
- MINOR: filters: Set last_entity when a filter fails on stream_start callback
|
||||||
|
- MINOR: stream: Display the currently running filter per channel in stream dump
|
||||||
|
- DOC: config: Use the right alias for %B
|
||||||
|
- BUG/MINOR: channel: Increase the stconn bytes_in value in channel_add_input()
|
||||||
|
- BUG/MINOR: sample: Fix sample to retrieve the number of bytes received and sent
|
||||||
|
- BUG/MINOR: http-ana: Increment scf bytes_out value if an haproxy error is sent
|
||||||
|
- BUG/MAJOR: fcgi: Fix param decoding by properly checking its size
|
||||||
|
- BUG/MAJOR: resolvers: Properly lowered the names found in DNS response
|
||||||
|
- BUG/MEDIUM: mux-fcgi: Use a safe loop to resume each stream eligible for sending
|
||||||
|
- MINOR: mux-fcgi: Use a dedicated function to resume streams eligible for sending
|
||||||
|
- CLEANUP: qpack: simplify length checks in qpack_decode_fs()
|
||||||
|
- MINOR: counters: Introduce COUNTERS_UPDATE_MAX()
|
||||||
|
- MINOR: listeners: Update the frequency counters separately when needed
|
||||||
|
- MINOR: proxies: Update beconn separately
|
||||||
|
- MINOR: stats: Add an option to disable the calculation of max counters
|
||||||
|
|
||||||
|
2026/02/19 : 3.4-dev5
|
||||||
|
- DOC: internals: addd mworker V3 internals
|
||||||
|
- BUG/MINOR: threads: Initialize maxthrpertgroup earlier.
|
||||||
|
- BUG/MEDIUM: threads: Differ checking the max threads per group number
|
||||||
|
- BUG/MINOR: startup: fix allocation error message of progname string
|
||||||
|
- BUG/MINOR: startup: handle a possible strdup() failure
|
||||||
|
- MINOR: cfgparse: validate defaults proxies separately
|
||||||
|
- MINOR: cfgparse: move proxy post-init in a dedicated function
|
||||||
|
- MINOR: proxy: refactor proxy inheritance of a defaults section
|
||||||
|
- MINOR: proxy: refactor mode parsing
|
||||||
|
- MINOR: backend: add function to check support for dynamic servers
|
||||||
|
- MINOR: proxy: define "add backend" handler
|
||||||
|
- MINOR: proxy: parse mode on dynamic backend creation
|
||||||
|
- MINOR: proxy: parse guid on dynamic backend creation
|
||||||
|
- MINOR: proxy: check default proxy compatibility on "add backend"
|
||||||
|
- MEDIUM: proxy: implement dynamic backend creation
|
||||||
|
- MINOR: proxy: assign dynamic proxy ID
|
||||||
|
- REGTESTS: add dynamic backend creation test
|
||||||
|
- BUG/MINOR: proxy: fix clang build error on "add backend" handler
|
||||||
|
- BUG/MINOR: proxy: fix null dereference in "add backend" handler
|
||||||
|
- MINOR: net_helper: extend the ip.fp output with an option presence mask
|
||||||
|
- BUG/MINOR: proxy: fix default ALPN bind settings
|
||||||
|
- CLEANUP: lb-chash: free lb_nodes from chash's deinit(), not global
|
||||||
|
- BUG/MEDIUM: lb-chash: always properly initialize lb_nodes with dynamic servers
|
||||||
|
- CLEANUP: haproxy: fix bad line wrapping in run_poll_loop()
|
||||||
|
- MINOR: activity: support setting/clearing lock/memory watching for task profiling
|
||||||
|
- MEDIUM: activity: apply and use new finegrained task profiling settings
|
||||||
|
- MINOR: activity: allow to switch per-task lock/memory profiling at runtime
|
||||||
|
- MINOR: startup: Add the SSL lib verify directory in haproxy -vv
|
||||||
|
- BUG/MINOR: ssl: SSL_CERT_DIR environment variable doesn't affect haproxy
|
||||||
|
- CLEANUP: initcall: adjust comments to INITCALL{0,1} macros
|
||||||
|
- DOC: proxy-proto: underline the packed attribute for struct pp2_tlv_ssl
|
||||||
|
- MINOR: queues: Check minconn first in srv_dynamic_maxconn()
|
||||||
|
- MINOR: servers: Call process_srv_queue() without lock when possible
|
||||||
|
- BUG/MINOR: quic: ensure handshake speed up is only run once per conn
|
||||||
|
- BUG/MAJOR: quic: reject invalid token
|
||||||
|
- BUG/MAJOR: quic: fix parsing frame type
|
||||||
|
- MINOR: ssl: Missing '\n' in error message
|
||||||
|
- MINOR: jwt: Convert an RSA JWK into an EVP_PKEY
|
||||||
|
- MINOR: jwt: Add new jwt_decrypt_jwk converter
|
||||||
|
- REGTESTS: jwt: Add new "jwt_decrypt_jwk" tests
|
||||||
|
- MINOR: startup: Add HAVE_WORKING_TCP_MD5SIG in haproxy -vv
|
||||||
|
- MINOR: startup: sort the feature list in haproxy -vv
|
||||||
|
- MINOR: startup: show the list of detected features at runtime with haproxy -vv
|
||||||
|
- SCRIPTS: build-vtest: allow to set a TMPDIR and a DESTDIR
|
||||||
|
- MINOR: filters: rework RESUME_FILTER_* macros as inline functions
|
||||||
|
- MINOR: filters: rework filter iteration for channel related callback functions
|
||||||
|
- MEDIUM: filters: use per-channel filter list when relevant
|
||||||
|
- DEV: gdb: add a utility to find the post-mortem address from a core
|
||||||
|
- BUG/MINOR: deviceatlas: add missing return on error in config parsers
|
||||||
|
- BUG/MINOR: deviceatlas: add NULL checks on strdup() results in config parsers
|
||||||
|
- BUG/MEDIUM: deviceatlas: fix resource leaks on init error paths
|
||||||
|
- BUG/MINOR: deviceatlas: fix off-by-one in da_haproxy_conv()
|
||||||
|
- BUG/MINOR: deviceatlas: fix cookie vlen using wrong length after extraction
|
||||||
|
- BUG/MINOR: deviceatlas: fix double-checked locking race in checkinst
|
||||||
|
- BUG/MINOR: deviceatlas: fix resource leak on hot-reload compile failure
|
||||||
|
- BUG/MINOR: deviceatlas: fix deinit to only finalize when initialized
|
||||||
|
- BUG/MINOR: deviceatlas: set cache_size on hot-reloaded atlas instance
|
||||||
|
- MINOR: deviceatlas: check getproptype return and remove pprop indirection
|
||||||
|
- MINOR: deviceatlas: increase DA_MAX_HEADERS and header buffer sizes
|
||||||
|
- MINOR: deviceatlas: define header_evidence_entry in dummy library header
|
||||||
|
- MINOR: deviceatlas: precompute maxhdrlen to skip oversized headers early
|
||||||
|
- CLEANUP: deviceatlas: add unlikely hints and minor code tidying
|
||||||
|
- DEV: gdb: use unsigned longs to display pools memory usage
|
||||||
|
- BUG/MINOR: ssl: lack crtlist_dup_ssl_conf() declaration
|
||||||
|
- BUG/MINOR: ssl: double-free on error path w/ ssl-f-use parser
|
||||||
|
- BUG/MINOR: ssl: fix leak in ssl-f-use parser upon error
|
||||||
|
- BUG/MINOR: ssl: clarify ssl-f-use errors in post-section parsing
|
||||||
|
- BUG/MINOR: ssl: error with ssl-f-use when no "crt"
|
||||||
|
- MEDIUM: backend: make "balance random" consider tg local req rate when loads are equal
|
||||||
|
- BUG/MAJOR: Revert "MEDIUM: mux-quic: add BUG_ON if sending on locally closed QCS"
|
||||||
|
- BUG/MEDIUM: h3: reject frontend CONNECT as currently not implemented
|
||||||
|
- MINOR: mux-quic: add BUG_ON_STRESS() when draining data on closed stream
|
||||||
|
- REGTESTS: fix quoting in feature cmd which prevents test execution
|
||||||
|
- BUG/MEDIUM: mux-h2/quic: Stop sending via fast-forward if stream is closed
|
||||||
|
- BUG/MEDIUM: mux-h1: Stop sending vi fast-forward for unexpected states
|
||||||
|
- BUG/MEDIUM: applet: Fix test on shut flags for legacy applets (v2)
|
||||||
|
- DEV: term-events: Fix hanshake events decoding
|
||||||
|
- BUG/MINOR: flt-trace: Properly compute length of the first DATA block
|
||||||
|
- MINOR: flt-trace: Add an option to limit the amount of data forwarded
|
||||||
|
- CLEANUP: compression: Remove unused static buffers
|
||||||
|
- BUG/MEDIUM: shctx: Use the next block when data exactly filled a block
|
||||||
|
- BUG/MINOR: http-ana: Stop to wait for body on client error/abort
|
||||||
|
- MINOR: stconn: Add missing SC_FL_NO_FASTFWD flag in sc_show_flags
|
||||||
|
- REORG: stconn: Move functions related to channel buffers to sc_strm.h
|
||||||
|
- BUG/MEDIUM: jwe: fix timing side-channel and dead code in JWE decryption
|
||||||
|
- MINOR: tree-wide: Use the buffer size instead of global setting when possible
|
||||||
|
- MINOR: buffers: Swap buffers of same size only
|
||||||
|
- BUG/MINOR: config: Check buffer pool creation for failures
|
||||||
|
- MEDIUM: cache: Don't rely on a chunk to store messages payload
|
||||||
|
- MEDIUM: stream: Limit number of synchronous send per stream wakeup
|
||||||
|
- MEDIUM: compression: Be sure to never compress more than a chunk at once
|
||||||
|
- MEDIUM: mux-h1/mux-h2/mux-fcgi/h3: Disable 0-copy for buffers of different size
|
||||||
|
- MEDIUM: applet: Disable 0-copy for buffers of different size
|
||||||
|
- MINOR: h1-htx: Disable 0-copy for buffers of different size
|
||||||
|
- MEDIUM: stream: Offer buffers of default size only
|
||||||
|
- BUG/MEDIUM: htx: Fix function used to change part of a block value when defrag
|
||||||
|
- MEDIUM: htx: Refactor transfer of htx blocks to merge DATA blocks if possible
|
||||||
|
- MEDIUM: htx: Refactor htx defragmentation to merge data blocks
|
||||||
|
- MEDIUM: htx: Improve detection of fragmented/unordered HTX messages
|
||||||
|
- MINOR: http-ana: Do a defrag on unaligned HTX message when waiting for payload
|
||||||
|
- MINOR: http-fetch: Use pointer to HTX DATA block when retrieving HTX body
|
||||||
|
- MEDIUM: dynbuf: Add a pool for large buffers with a configurable size
|
||||||
|
- MEDIUM: chunk: Add support for large chunks
|
||||||
|
- MEDIUM: stconn: Properly handle large buffers during a receive
|
||||||
|
- MEDIUM: sample: Get chunks with a size dependent on input data when necessary
|
||||||
|
- MEDIUM: http-fetch: Be able to use large chunks when necessary
|
||||||
|
- MINPR: htx: Get large chunk if necessary to perform a defrag
|
||||||
|
- MEDIUM: http-ana: Use a large buffer if necessary when waiting for body
|
||||||
|
- MINOR: dynbuf: Add helpers to know if a buffer is a default or a large buffer
|
||||||
|
- MINOR: config: reject configs using HTTP with large bufsize >= 256 MB
|
||||||
|
- CI: do not use ghcr.io for Quic Interop workflows
|
||||||
|
- BUG/MEDIUM: ssl: SSL backend sessions used after free
|
||||||
|
- CI: vtest: move the vtest2 URL to vinyl-cache.org
|
||||||
|
- CI: github: disable windows.yml by default on unofficials repo
|
||||||
|
- MEDIUM: Add connect/queue/tarpit timeouts to set-timeout
|
||||||
|
- CLEANUP: mux-h1: Remove unneeded null check
|
||||||
|
- DOC: remove openssl no-deprecated CI image
|
||||||
|
- BUG/MINOR: acme: fix X509_NAME leak when X509_set_issuer_name() fails
|
||||||
|
- BUG/MINOR: backend: check delay MUX before conn_prepare()
|
||||||
|
- OPTIM: backend: reduce contention when checking MUX init with ALPN
|
||||||
|
- DOC: configuration: add the ACME wiki page link
|
||||||
|
- MINOR: ssl/ckch: Move EVP_PKEY and cert code generation from acme
|
||||||
|
- MINOR: ssl/ckch: certificates generation from "load" "crt-store" directive
|
||||||
|
- MINOR: trace: add definitions for haterm streams
|
||||||
|
- MINOR: init: allow a fileless init mode
|
||||||
|
- MEDIUM: init: allow the redefinition of argv[] parsing function
|
||||||
|
- MINOR: stconn: stream instantiation from proxy callback
|
||||||
|
- MINOR: haterm: add haterm HTTP server
|
||||||
|
- MINOR: haterm: new "haterm" utility
|
||||||
|
- MINOR: haterm: increase thread-local pool size
|
||||||
|
- BUG/MEDIUM: stats-file: fix shm-stats-file recover when all process slots are full
|
||||||
|
- BUG/MINOR: stats-file: manipulate shm-stats-file heartbeat using unsigned int
|
||||||
|
- BUG/MEDIUM: stats-file: detect and fix inconsistent shared clock when resuming from shm-stats-file
|
||||||
|
- CI: github: only enable OS X on development branches
|
||||||
|
|
||||||
|
2026/02/04 : 3.4-dev4
|
||||||
|
- BUG/MEDIUM: hlua: fix invalid lua_pcall() usage in hlua_traceback()
|
||||||
|
- BUG/MINOR: hlua: consume error object if ignored after a failing lua_pcall()
|
||||||
|
- BUG/MINOR: promex: Detach promex from the server on error dump its metrics dump
|
||||||
|
- BUG/MEDIUM: mux-h1: Skip UNUSED htx block when formating the start line
|
||||||
|
- BUG/MINOR: proto_tcp: Properly report support for HAVE_TCP_MD5SIG feature
|
||||||
|
- BUG/MINOR: config: check capture pool creations for failures
|
||||||
|
- BUG/MINOR: stick-tables: abort startup on stk_ctr pool creation failure
|
||||||
|
- MEDIUM: pools: better check for size rounding overflow on registration
|
||||||
|
- DOC: reg-tests: update VTest upstream link in the starting guide
|
||||||
|
- BUG/MINOR: ssl: Properly manage alloc failures in SSL passphrase callback
|
||||||
|
- BUG/MINOR: ssl: Encrypted keys could not be loaded when given alongside certificate
|
||||||
|
- MINOR: ssl: display libssl errors on private key loading
|
||||||
|
- BUG/MAJOR: applet: Don't call I/O handler if the applet was shut
|
||||||
|
- MINOR: ssl: allow to disable certificate compression
|
||||||
|
- BUG/MINOR: ssl: fix error message of tune.ssl.certificate-compression
|
||||||
|
- DOC: config: mention some possible TLS versions restrictions for kTLS
|
||||||
|
- OPTIM: server: move queueslength in server struct
|
||||||
|
- OPTIM: proxy: separate queues fields from served
|
||||||
|
- OPTIM: server: get rid of the last use of _ha_barrier_full()
|
||||||
|
- DOC: config: mention that idle connection sharing is per thread-group
|
||||||
|
- MEDIUM: h1: strictly verify quoting in chunk extensions
|
||||||
|
- BUG/MINOR: config/ssl: fix spelling of "expose-experimental-directives"
|
||||||
|
- BUG/MEDIUM: ssl: fix msg callbacks on QUIC connections
|
||||||
|
- MEDIUM: ssl: remove connection from msg callback args
|
||||||
|
- MEDIUM: ssl: porting to X509_STORE_get1_objects() for OpenSSL 4.0
|
||||||
|
- REGTESTS: ssl: make reg-tests compatible with OpenSSL 4.0
|
||||||
|
- DOC: internals: cleanup few typos in master-worker documentation
|
||||||
|
- BUG/MEDIUM: applet: Fix test on shut flags for legacy applets
|
||||||
|
- MINOR: quic: Fix build with USE_QUIC_OPENSSL_COMPAT
|
||||||
|
- MEDIUM: tcpcheck: add post-80 option for mysql-check to support MySQL 8.x
|
||||||
|
- BUG/MEDIUM: threads: Atomically set TH_FL_SLEEPING and clr FL_NOTIFIED
|
||||||
|
- BUG/MINOR: cpu-topo: count cores not cpus to distinguish core types
|
||||||
|
- DOC: config: mention the limitation on server id range for consistent hash
|
||||||
|
- MEDIUM: backend: make "balance random" consider req rate when loads are equal
|
||||||
|
- BUG/MINOR: config: Fix setting of alt_proto
|
||||||
|
|
||||||
2026/01/22 : 3.4-dev3
|
2026/01/22 : 3.4-dev3
|
||||||
- BUILD: ssl: strchr definition changed in C23
|
- BUILD: ssl: strchr definition changed in C23
|
||||||
- BUILD: tools: memchr definition changed in C23
|
- BUILD: tools: memchr definition changed in C23
|
||||||
|
|
|
||||||
15
Makefile
15
Makefile
|
|
@ -956,6 +956,7 @@ endif # obsolete targets
|
||||||
endif # TARGET
|
endif # TARGET
|
||||||
|
|
||||||
OBJS =
|
OBJS =
|
||||||
|
HATERM_OBJS =
|
||||||
|
|
||||||
ifneq ($(EXTRA_OBJS),)
|
ifneq ($(EXTRA_OBJS),)
|
||||||
OBJS += $(EXTRA_OBJS)
|
OBJS += $(EXTRA_OBJS)
|
||||||
|
|
@ -1003,12 +1004,14 @@ OBJS += src/mux_h2.o src/mux_h1.o src/mux_fcgi.o src/log.o \
|
||||||
src/http_acl.o src/dict.o src/dgram.o src/pipe.o \
|
src/http_acl.o src/dict.o src/dgram.o src/pipe.o \
|
||||||
src/hpack-huff.o src/hpack-enc.o src/ebtree.o src/hash.o \
|
src/hpack-huff.o src/hpack-enc.o src/ebtree.o src/hash.o \
|
||||||
src/httpclient_cli.o src/version.o src/ncbmbuf.o src/ech.o \
|
src/httpclient_cli.o src/version.o src/ncbmbuf.o src/ech.o \
|
||||||
src/cfgparse-peers.o
|
src/cfgparse-peers.o src/haterm.o
|
||||||
|
|
||||||
ifneq ($(TRACE),)
|
ifneq ($(TRACE),)
|
||||||
OBJS += src/calltrace.o
|
OBJS += src/calltrace.o
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
HATERM_OBJS += $(OBJS) src/haterm_init.o
|
||||||
|
|
||||||
# Used only for forced dependency checking. May be cleared during development.
|
# Used only for forced dependency checking. May be cleared during development.
|
||||||
INCLUDES = $(wildcard include/*/*.h)
|
INCLUDES = $(wildcard include/*/*.h)
|
||||||
DEP = $(INCLUDES) .build_opts
|
DEP = $(INCLUDES) .build_opts
|
||||||
|
|
@ -1040,7 +1043,7 @@ IGNORE_OPTS=help install install-man install-doc install-bin \
|
||||||
uninstall clean tags cscope tar git-tar version update-version \
|
uninstall clean tags cscope tar git-tar version update-version \
|
||||||
opts reg-tests reg-tests-help unit-tests admin/halog/halog dev/flags/flags \
|
opts reg-tests reg-tests-help unit-tests admin/halog/halog dev/flags/flags \
|
||||||
dev/haring/haring dev/ncpu/ncpu dev/poll/poll dev/tcploop/tcploop \
|
dev/haring/haring dev/ncpu/ncpu dev/poll/poll dev/tcploop/tcploop \
|
||||||
dev/term_events/term_events
|
dev/term_events/term_events dev/gdb/pm-from-core
|
||||||
|
|
||||||
ifneq ($(TARGET),)
|
ifneq ($(TARGET),)
|
||||||
ifeq ($(filter $(firstword $(MAKECMDGOALS)),$(IGNORE_OPTS)),)
|
ifeq ($(filter $(firstword $(MAKECMDGOALS)),$(IGNORE_OPTS)),)
|
||||||
|
|
@ -1056,6 +1059,9 @@ endif # non-empty target
|
||||||
haproxy: $(OPTIONS_OBJS) $(OBJS)
|
haproxy: $(OPTIONS_OBJS) $(OBJS)
|
||||||
$(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS)
|
$(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS)
|
||||||
|
|
||||||
|
haterm: $(OPTIONS_OBJS) $(HATERM_OBJS)
|
||||||
|
$(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS)
|
||||||
|
|
||||||
objsize: haproxy
|
objsize: haproxy
|
||||||
$(Q)objdump -t $^|grep ' g '|grep -F '.text'|awk '{print $$5 FS $$6}'|sort
|
$(Q)objdump -t $^|grep ' g '|grep -F '.text'|awk '{print $$5 FS $$6}'|sort
|
||||||
|
|
||||||
|
|
@ -1071,6 +1077,9 @@ admin/dyncookie/dyncookie: admin/dyncookie/dyncookie.o
|
||||||
dev/flags/flags: dev/flags/flags.o
|
dev/flags/flags: dev/flags/flags.o
|
||||||
$(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS)
|
$(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS)
|
||||||
|
|
||||||
|
dev/gdb/pm-from-core: dev/gdb/pm-from-core.o
|
||||||
|
$(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS)
|
||||||
|
|
||||||
dev/haring/haring: dev/haring/haring.o
|
dev/haring/haring: dev/haring/haring.o
|
||||||
$(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS)
|
$(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS)
|
||||||
|
|
||||||
|
|
@ -1169,7 +1178,7 @@ distclean: clean
|
||||||
$(Q)rm -f admin/dyncookie/dyncookie
|
$(Q)rm -f admin/dyncookie/dyncookie
|
||||||
$(Q)rm -f dev/haring/haring dev/ncpu/ncpu{,.so} dev/poll/poll dev/tcploop/tcploop
|
$(Q)rm -f dev/haring/haring dev/ncpu/ncpu{,.so} dev/poll/poll dev/tcploop/tcploop
|
||||||
$(Q)rm -f dev/hpack/decode dev/hpack/gen-enc dev/hpack/gen-rht
|
$(Q)rm -f dev/hpack/decode dev/hpack/gen-enc dev/hpack/gen-rht
|
||||||
$(Q)rm -f dev/qpack/decode
|
$(Q)rm -f dev/qpack/decode dev/gdb/pm-from-core
|
||||||
|
|
||||||
tags:
|
tags:
|
||||||
$(Q)find src include \( -name '*.c' -o -name '*.h' \) -print0 | \
|
$(Q)find src include \( -name '*.c' -o -name '*.h' \) -print0 | \
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,6 @@
|
||||||
|
|
||||||
[](https://github.com/haproxy/haproxy/actions/workflows/musl.yml)
|
[](https://github.com/haproxy/haproxy/actions/workflows/musl.yml)
|
||||||
[](https://github.com/haproxy/haproxy/actions/workflows/aws-lc.yml)
|
[](https://github.com/haproxy/haproxy/actions/workflows/aws-lc.yml)
|
||||||
[](https://github.com/haproxy/haproxy/actions/workflows/openssl-nodeprecated.yml)
|
|
||||||
[](https://github.com/haproxy/haproxy/actions/workflows/illumos.yml)
|
[](https://github.com/haproxy/haproxy/actions/workflows/illumos.yml)
|
||||||
[](https://github.com/haproxy/haproxy/actions/workflows/netbsd.yml)
|
[](https://github.com/haproxy/haproxy/actions/workflows/netbsd.yml)
|
||||||
[](https://cirrus-ci.com/github/haproxy/haproxy/)
|
[](https://cirrus-ci.com/github/haproxy/haproxy/)
|
||||||
|
|
|
||||||
2
VERDATE
2
VERDATE
|
|
@ -1,2 +1,2 @@
|
||||||
$Format:%ci$
|
$Format:%ci$
|
||||||
2026/01/22
|
2026/03/05
|
||||||
|
|
|
||||||
2
VERSION
2
VERSION
|
|
@ -1 +1 @@
|
||||||
3.4-dev3
|
3.4-dev6
|
||||||
|
|
|
||||||
|
|
@ -31,6 +31,7 @@ static struct {
|
||||||
da_atlas_t atlas;
|
da_atlas_t atlas;
|
||||||
da_evidence_id_t useragentid;
|
da_evidence_id_t useragentid;
|
||||||
da_severity_t loglevel;
|
da_severity_t loglevel;
|
||||||
|
size_t maxhdrlen;
|
||||||
char separator;
|
char separator;
|
||||||
unsigned char daset:1;
|
unsigned char daset:1;
|
||||||
} global_deviceatlas = {
|
} global_deviceatlas = {
|
||||||
|
|
@ -42,6 +43,7 @@ static struct {
|
||||||
.atlasmap = NULL,
|
.atlasmap = NULL,
|
||||||
.atlasfd = -1,
|
.atlasfd = -1,
|
||||||
.useragentid = 0,
|
.useragentid = 0,
|
||||||
|
.maxhdrlen = 0,
|
||||||
.daset = 0,
|
.daset = 0,
|
||||||
.separator = '|',
|
.separator = '|',
|
||||||
};
|
};
|
||||||
|
|
@ -57,6 +59,10 @@ static int da_json_file(char **args, int section_type, struct proxy *curpx,
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
global_deviceatlas.jsonpath = strdup(args[1]);
|
global_deviceatlas.jsonpath = strdup(args[1]);
|
||||||
|
if (unlikely(global_deviceatlas.jsonpath == NULL)) {
|
||||||
|
memprintf(err, "deviceatlas json file : out of memory.\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -73,6 +79,7 @@ static int da_log_level(char **args, int section_type, struct proxy *curpx,
|
||||||
loglevel = atol(args[1]);
|
loglevel = atol(args[1]);
|
||||||
if (loglevel < 0 || loglevel > 3) {
|
if (loglevel < 0 || loglevel > 3) {
|
||||||
memprintf(err, "deviceatlas log level : expects a log level between 0 and 3, %s given.\n", args[1]);
|
memprintf(err, "deviceatlas log level : expects a log level between 0 and 3, %s given.\n", args[1]);
|
||||||
|
return -1;
|
||||||
} else {
|
} else {
|
||||||
global_deviceatlas.loglevel = (da_severity_t)loglevel;
|
global_deviceatlas.loglevel = (da_severity_t)loglevel;
|
||||||
}
|
}
|
||||||
|
|
@ -101,6 +108,10 @@ static int da_properties_cookie(char **args, int section_type, struct proxy *cur
|
||||||
return -1;
|
return -1;
|
||||||
} else {
|
} else {
|
||||||
global_deviceatlas.cookiename = strdup(args[1]);
|
global_deviceatlas.cookiename = strdup(args[1]);
|
||||||
|
if (unlikely(global_deviceatlas.cookiename == NULL)) {
|
||||||
|
memprintf(err, "deviceatlas cookie name : out of memory.\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
global_deviceatlas.cookienamelen = strlen(global_deviceatlas.cookiename);
|
global_deviceatlas.cookienamelen = strlen(global_deviceatlas.cookiename);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
@ -119,6 +130,7 @@ static int da_cache_size(char **args, int section_type, struct proxy *curpx,
|
||||||
cachesize = atol(args[1]);
|
cachesize = atol(args[1]);
|
||||||
if (cachesize < 0 || cachesize > DA_CACHE_MAX) {
|
if (cachesize < 0 || cachesize > DA_CACHE_MAX) {
|
||||||
memprintf(err, "deviceatlas cache size : expects a cache size between 0 and %d, %s given.\n", DA_CACHE_MAX, args[1]);
|
memprintf(err, "deviceatlas cache size : expects a cache size between 0 and %d, %s given.\n", DA_CACHE_MAX, args[1]);
|
||||||
|
return -1;
|
||||||
} else {
|
} else {
|
||||||
#ifdef APINOCACHE
|
#ifdef APINOCACHE
|
||||||
fprintf(stdout, "deviceatlas cache size : no-op, its support is disabled.\n");
|
fprintf(stdout, "deviceatlas cache size : no-op, its support is disabled.\n");
|
||||||
|
|
@ -165,7 +177,7 @@ static int init_deviceatlas(void)
|
||||||
da_status_t status;
|
da_status_t status;
|
||||||
|
|
||||||
jsonp = fopen(global_deviceatlas.jsonpath, "r");
|
jsonp = fopen(global_deviceatlas.jsonpath, "r");
|
||||||
if (jsonp == 0) {
|
if (unlikely(jsonp == 0)) {
|
||||||
ha_alert("deviceatlas : '%s' json file has invalid path or is not readable.\n",
|
ha_alert("deviceatlas : '%s' json file has invalid path or is not readable.\n",
|
||||||
global_deviceatlas.jsonpath);
|
global_deviceatlas.jsonpath);
|
||||||
err_code |= ERR_ALERT | ERR_FATAL;
|
err_code |= ERR_ALERT | ERR_FATAL;
|
||||||
|
|
@ -177,9 +189,11 @@ static int init_deviceatlas(void)
|
||||||
status = da_atlas_compile(jsonp, da_haproxy_read, da_haproxy_seek,
|
status = da_atlas_compile(jsonp, da_haproxy_read, da_haproxy_seek,
|
||||||
&global_deviceatlas.atlasimgptr, &atlasimglen);
|
&global_deviceatlas.atlasimgptr, &atlasimglen);
|
||||||
fclose(jsonp);
|
fclose(jsonp);
|
||||||
if (status != DA_OK) {
|
if (unlikely(status != DA_OK)) {
|
||||||
ha_alert("deviceatlas : '%s' json file is invalid.\n",
|
ha_alert("deviceatlas : '%s' json file is invalid.\n",
|
||||||
global_deviceatlas.jsonpath);
|
global_deviceatlas.jsonpath);
|
||||||
|
free(global_deviceatlas.atlasimgptr);
|
||||||
|
da_fini();
|
||||||
err_code |= ERR_ALERT | ERR_FATAL;
|
err_code |= ERR_ALERT | ERR_FATAL;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
@ -187,8 +201,10 @@ static int init_deviceatlas(void)
|
||||||
status = da_atlas_open(&global_deviceatlas.atlas, extraprops,
|
status = da_atlas_open(&global_deviceatlas.atlas, extraprops,
|
||||||
global_deviceatlas.atlasimgptr, atlasimglen);
|
global_deviceatlas.atlasimgptr, atlasimglen);
|
||||||
|
|
||||||
if (status != DA_OK) {
|
if (unlikely(status != DA_OK)) {
|
||||||
ha_alert("deviceatlas : data could not be compiled.\n");
|
ha_alert("deviceatlas : data could not be compiled.\n");
|
||||||
|
free(global_deviceatlas.atlasimgptr);
|
||||||
|
da_fini();
|
||||||
err_code |= ERR_ALERT | ERR_FATAL;
|
err_code |= ERR_ALERT | ERR_FATAL;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
@ -197,11 +213,28 @@ static int init_deviceatlas(void)
|
||||||
|
|
||||||
if (global_deviceatlas.cookiename == 0) {
|
if (global_deviceatlas.cookiename == 0) {
|
||||||
global_deviceatlas.cookiename = strdup(DA_COOKIENAME_DEFAULT);
|
global_deviceatlas.cookiename = strdup(DA_COOKIENAME_DEFAULT);
|
||||||
|
if (unlikely(global_deviceatlas.cookiename == NULL)) {
|
||||||
|
ha_alert("deviceatlas : out of memory.\n");
|
||||||
|
da_atlas_close(&global_deviceatlas.atlas);
|
||||||
|
free(global_deviceatlas.atlasimgptr);
|
||||||
|
da_fini();
|
||||||
|
err_code |= ERR_ALERT | ERR_FATAL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
global_deviceatlas.cookienamelen = strlen(global_deviceatlas.cookiename);
|
global_deviceatlas.cookienamelen = strlen(global_deviceatlas.cookiename);
|
||||||
}
|
}
|
||||||
|
|
||||||
global_deviceatlas.useragentid = da_atlas_header_evidence_id(&global_deviceatlas.atlas,
|
global_deviceatlas.useragentid = da_atlas_header_evidence_id(&global_deviceatlas.atlas,
|
||||||
"user-agent");
|
"user-agent");
|
||||||
|
{
|
||||||
|
size_t hi;
|
||||||
|
global_deviceatlas.maxhdrlen = 16;
|
||||||
|
for (hi = 0; hi < global_deviceatlas.atlas.header_evidence_count; hi++) {
|
||||||
|
size_t nl = strlen(global_deviceatlas.atlas.header_priorities[hi].name);
|
||||||
|
if (nl > global_deviceatlas.maxhdrlen)
|
||||||
|
global_deviceatlas.maxhdrlen = nl;
|
||||||
|
}
|
||||||
|
}
|
||||||
if ((global_deviceatlas.atlasfd = shm_open(ATLASMAPNM, O_RDWR, 0660)) != -1) {
|
if ((global_deviceatlas.atlasfd = shm_open(ATLASMAPNM, O_RDWR, 0660)) != -1) {
|
||||||
global_deviceatlas.atlasmap = mmap(NULL, ATLASTOKSZ, PROT_READ | PROT_WRITE, MAP_SHARED, global_deviceatlas.atlasfd, 0);
|
global_deviceatlas.atlasmap = mmap(NULL, ATLASTOKSZ, PROT_READ | PROT_WRITE, MAP_SHARED, global_deviceatlas.atlasfd, 0);
|
||||||
if (global_deviceatlas.atlasmap == MAP_FAILED) {
|
if (global_deviceatlas.atlasmap == MAP_FAILED) {
|
||||||
|
|
@ -231,15 +264,13 @@ static void deinit_deviceatlas(void)
|
||||||
free(global_deviceatlas.cookiename);
|
free(global_deviceatlas.cookiename);
|
||||||
da_atlas_close(&global_deviceatlas.atlas);
|
da_atlas_close(&global_deviceatlas.atlas);
|
||||||
free(global_deviceatlas.atlasimgptr);
|
free(global_deviceatlas.atlasimgptr);
|
||||||
|
da_fini();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (global_deviceatlas.atlasfd != -1) {
|
if (global_deviceatlas.atlasfd != -1) {
|
||||||
munmap(global_deviceatlas.atlasmap, ATLASTOKSZ);
|
munmap(global_deviceatlas.atlasmap, ATLASTOKSZ);
|
||||||
close(global_deviceatlas.atlasfd);
|
close(global_deviceatlas.atlasfd);
|
||||||
shm_unlink(ATLASMAPNM);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
da_fini();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void da_haproxy_checkinst(void)
|
static void da_haproxy_checkinst(void)
|
||||||
|
|
@ -258,6 +289,10 @@ static void da_haproxy_checkinst(void)
|
||||||
da_property_decl_t extraprops[1] = {{NULL, 0}};
|
da_property_decl_t extraprops[1] = {{NULL, 0}};
|
||||||
#ifdef USE_THREAD
|
#ifdef USE_THREAD
|
||||||
HA_SPIN_LOCK(OTHER_LOCK, &dadwsch_lock);
|
HA_SPIN_LOCK(OTHER_LOCK, &dadwsch_lock);
|
||||||
|
if (base[0] == 0) {
|
||||||
|
HA_SPIN_UNLOCK(OTHER_LOCK, &dadwsch_lock);
|
||||||
|
return;
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
strlcpy2(atlasp, base + sizeof(char), sizeof(atlasp));
|
strlcpy2(atlasp, base + sizeof(char), sizeof(atlasp));
|
||||||
jsonp = fopen(atlasp, "r");
|
jsonp = fopen(atlasp, "r");
|
||||||
|
|
@ -275,10 +310,20 @@ static void da_haproxy_checkinst(void)
|
||||||
fclose(jsonp);
|
fclose(jsonp);
|
||||||
if (status == DA_OK) {
|
if (status == DA_OK) {
|
||||||
if (da_atlas_open(&inst, extraprops, cnew, atlassz) == DA_OK) {
|
if (da_atlas_open(&inst, extraprops, cnew, atlassz) == DA_OK) {
|
||||||
|
inst.config.cache_size = global_deviceatlas.cachesize;
|
||||||
da_atlas_close(&global_deviceatlas.atlas);
|
da_atlas_close(&global_deviceatlas.atlas);
|
||||||
free(global_deviceatlas.atlasimgptr);
|
free(global_deviceatlas.atlasimgptr);
|
||||||
global_deviceatlas.atlasimgptr = cnew;
|
global_deviceatlas.atlasimgptr = cnew;
|
||||||
global_deviceatlas.atlas = inst;
|
global_deviceatlas.atlas = inst;
|
||||||
|
{
|
||||||
|
size_t hi;
|
||||||
|
global_deviceatlas.maxhdrlen = 16;
|
||||||
|
for (hi = 0; hi < inst.header_evidence_count; hi++) {
|
||||||
|
size_t nl = strlen(inst.header_priorities[hi].name);
|
||||||
|
if (nl > global_deviceatlas.maxhdrlen)
|
||||||
|
global_deviceatlas.maxhdrlen = nl;
|
||||||
|
}
|
||||||
|
}
|
||||||
base[0] = 0;
|
base[0] = 0;
|
||||||
ha_notice("deviceatlas : new instance, data file date `%s`.\n",
|
ha_notice("deviceatlas : new instance, data file date `%s`.\n",
|
||||||
da_getdatacreationiso8601(&global_deviceatlas.atlas));
|
da_getdatacreationiso8601(&global_deviceatlas.atlas));
|
||||||
|
|
@ -286,6 +331,8 @@ static void da_haproxy_checkinst(void)
|
||||||
ha_alert("deviceatlas : instance update failed.\n");
|
ha_alert("deviceatlas : instance update failed.\n");
|
||||||
free(cnew);
|
free(cnew);
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
free(cnew);
|
||||||
}
|
}
|
||||||
#ifdef USE_THREAD
|
#ifdef USE_THREAD
|
||||||
HA_SPIN_UNLOCK(OTHER_LOCK, &dadwsch_lock);
|
HA_SPIN_UNLOCK(OTHER_LOCK, &dadwsch_lock);
|
||||||
|
|
@ -297,7 +344,7 @@ static void da_haproxy_checkinst(void)
|
||||||
static int da_haproxy(const struct arg *args, struct sample *smp, da_deviceinfo_t *devinfo)
|
static int da_haproxy(const struct arg *args, struct sample *smp, da_deviceinfo_t *devinfo)
|
||||||
{
|
{
|
||||||
struct buffer *tmp;
|
struct buffer *tmp;
|
||||||
da_propid_t prop, *pprop;
|
da_propid_t prop;
|
||||||
da_status_t status;
|
da_status_t status;
|
||||||
da_type_t proptype;
|
da_type_t proptype;
|
||||||
const char *propname;
|
const char *propname;
|
||||||
|
|
@ -317,13 +364,15 @@ static int da_haproxy(const struct arg *args, struct sample *smp, da_deviceinfo_
|
||||||
chunk_appendf(tmp, "%c", global_deviceatlas.separator);
|
chunk_appendf(tmp, "%c", global_deviceatlas.separator);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
pprop = ∝
|
if (unlikely(da_atlas_getproptype(&global_deviceatlas.atlas, prop, &proptype) != DA_OK)) {
|
||||||
da_atlas_getproptype(&global_deviceatlas.atlas, *pprop, &proptype);
|
chunk_appendf(tmp, "%c", global_deviceatlas.separator);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
switch (proptype) {
|
switch (proptype) {
|
||||||
case DA_TYPE_BOOLEAN: {
|
case DA_TYPE_BOOLEAN: {
|
||||||
bool val;
|
bool val;
|
||||||
status = da_getpropboolean(devinfo, *pprop, &val);
|
status = da_getpropboolean(devinfo, prop, &val);
|
||||||
if (status == DA_OK) {
|
if (status == DA_OK) {
|
||||||
chunk_appendf(tmp, "%d", val);
|
chunk_appendf(tmp, "%d", val);
|
||||||
}
|
}
|
||||||
|
|
@ -332,7 +381,7 @@ static int da_haproxy(const struct arg *args, struct sample *smp, da_deviceinfo_
|
||||||
case DA_TYPE_INTEGER:
|
case DA_TYPE_INTEGER:
|
||||||
case DA_TYPE_NUMBER: {
|
case DA_TYPE_NUMBER: {
|
||||||
long val;
|
long val;
|
||||||
status = da_getpropinteger(devinfo, *pprop, &val);
|
status = da_getpropinteger(devinfo, prop, &val);
|
||||||
if (status == DA_OK) {
|
if (status == DA_OK) {
|
||||||
chunk_appendf(tmp, "%ld", val);
|
chunk_appendf(tmp, "%ld", val);
|
||||||
}
|
}
|
||||||
|
|
@ -340,7 +389,7 @@ static int da_haproxy(const struct arg *args, struct sample *smp, da_deviceinfo_
|
||||||
}
|
}
|
||||||
case DA_TYPE_STRING: {
|
case DA_TYPE_STRING: {
|
||||||
const char *val;
|
const char *val;
|
||||||
status = da_getpropstring(devinfo, *pprop, &val);
|
status = da_getpropstring(devinfo, prop, &val);
|
||||||
if (status == DA_OK) {
|
if (status == DA_OK) {
|
||||||
chunk_appendf(tmp, "%s", val);
|
chunk_appendf(tmp, "%s", val);
|
||||||
}
|
}
|
||||||
|
|
@ -371,29 +420,26 @@ static int da_haproxy_conv(const struct arg *args, struct sample *smp, void *pri
|
||||||
{
|
{
|
||||||
da_deviceinfo_t devinfo;
|
da_deviceinfo_t devinfo;
|
||||||
da_status_t status;
|
da_status_t status;
|
||||||
const char *useragent;
|
char useragentbuf[1024];
|
||||||
char useragentbuf[1024] = { 0 };
|
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (global_deviceatlas.daset == 0 || smp->data.u.str.data == 0) {
|
if (unlikely(global_deviceatlas.daset == 0) || smp->data.u.str.data == 0) {
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
da_haproxy_checkinst();
|
da_haproxy_checkinst();
|
||||||
|
|
||||||
i = smp->data.u.str.data > sizeof(useragentbuf) ? sizeof(useragentbuf) : smp->data.u.str.data;
|
i = smp->data.u.str.data > sizeof(useragentbuf) - 1 ? sizeof(useragentbuf) - 1 : smp->data.u.str.data;
|
||||||
memcpy(useragentbuf, smp->data.u.str.area, i - 1);
|
memcpy(useragentbuf, smp->data.u.str.area, i);
|
||||||
useragentbuf[i - 1] = 0;
|
useragentbuf[i] = 0;
|
||||||
|
|
||||||
useragent = (const char *)useragentbuf;
|
|
||||||
|
|
||||||
status = da_search(&global_deviceatlas.atlas, &devinfo,
|
status = da_search(&global_deviceatlas.atlas, &devinfo,
|
||||||
global_deviceatlas.useragentid, useragent, 0);
|
global_deviceatlas.useragentid, useragentbuf, 0);
|
||||||
|
|
||||||
return status != DA_OK ? 0 : da_haproxy(args, smp, &devinfo);
|
return status != DA_OK ? 0 : da_haproxy(args, smp, &devinfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define DA_MAX_HEADERS 24
|
#define DA_MAX_HEADERS 32
|
||||||
|
|
||||||
static int da_haproxy_fetch(const struct arg *args, struct sample *smp, const char *kw, void *private)
|
static int da_haproxy_fetch(const struct arg *args, struct sample *smp, const char *kw, void *private)
|
||||||
{
|
{
|
||||||
|
|
@ -403,10 +449,10 @@ static int da_haproxy_fetch(const struct arg *args, struct sample *smp, const ch
|
||||||
struct channel *chn;
|
struct channel *chn;
|
||||||
struct htx *htx;
|
struct htx *htx;
|
||||||
struct htx_blk *blk;
|
struct htx_blk *blk;
|
||||||
char vbuf[DA_MAX_HEADERS][1024] = {{ 0 }};
|
char vbuf[DA_MAX_HEADERS][1024];
|
||||||
int i, nbh = 0;
|
int i, nbh = 0;
|
||||||
|
|
||||||
if (global_deviceatlas.daset == 0) {
|
if (unlikely(global_deviceatlas.daset == 0)) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -414,18 +460,17 @@ static int da_haproxy_fetch(const struct arg *args, struct sample *smp, const ch
|
||||||
|
|
||||||
chn = (smp->strm ? &smp->strm->req : NULL);
|
chn = (smp->strm ? &smp->strm->req : NULL);
|
||||||
htx = smp_prefetch_htx(smp, chn, NULL, 1);
|
htx = smp_prefetch_htx(smp, chn, NULL, 1);
|
||||||
if (!htx)
|
if (unlikely(!htx))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
i = 0;
|
|
||||||
for (blk = htx_get_first_blk(htx); nbh < DA_MAX_HEADERS && blk; blk = htx_get_next_blk(htx, blk)) {
|
for (blk = htx_get_first_blk(htx); nbh < DA_MAX_HEADERS && blk; blk = htx_get_next_blk(htx, blk)) {
|
||||||
size_t vlen;
|
size_t vlen;
|
||||||
char *pval;
|
char *pval;
|
||||||
da_evidence_id_t evid;
|
da_evidence_id_t evid;
|
||||||
enum htx_blk_type type;
|
enum htx_blk_type type;
|
||||||
struct ist n, v;
|
struct ist n, v;
|
||||||
char hbuf[24] = { 0 };
|
char hbuf[64];
|
||||||
char tval[1024] = { 0 };
|
char tval[1024];
|
||||||
|
|
||||||
type = htx_get_blk_type(blk);
|
type = htx_get_blk_type(blk);
|
||||||
|
|
||||||
|
|
@ -438,20 +483,18 @@ static int da_haproxy_fetch(const struct arg *args, struct sample *smp, const ch
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* The HTTP headers used by the DeviceAtlas API are not longer */
|
if (n.len > global_deviceatlas.maxhdrlen || n.len >= sizeof(hbuf)) {
|
||||||
if (n.len >= sizeof(hbuf)) {
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
memcpy(hbuf, n.ptr, n.len);
|
memcpy(hbuf, n.ptr, n.len);
|
||||||
hbuf[n.len] = 0;
|
hbuf[n.len] = 0;
|
||||||
pval = v.ptr;
|
|
||||||
vlen = v.len;
|
|
||||||
evid = -1;
|
evid = -1;
|
||||||
i = v.len > sizeof(tval) - 1 ? sizeof(tval) - 1 : v.len;
|
i = v.len > sizeof(tval) - 1 ? sizeof(tval) - 1 : v.len;
|
||||||
memcpy(tval, v.ptr, i);
|
memcpy(tval, v.ptr, i);
|
||||||
tval[i] = 0;
|
tval[i] = 0;
|
||||||
pval = tval;
|
pval = tval;
|
||||||
|
vlen = i;
|
||||||
|
|
||||||
if (strcasecmp(hbuf, "Accept-Language") == 0) {
|
if (strcasecmp(hbuf, "Accept-Language") == 0) {
|
||||||
evid = da_atlas_accept_language_evidence_id(&global_deviceatlas.atlas);
|
evid = da_atlas_accept_language_evidence_id(&global_deviceatlas.atlas);
|
||||||
|
|
@ -469,7 +512,7 @@ static int da_haproxy_fetch(const struct arg *args, struct sample *smp, const ch
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
vlen -= global_deviceatlas.cookienamelen - 1;
|
vlen = pl;
|
||||||
pval = p;
|
pval = p;
|
||||||
evid = da_atlas_clientprop_evidence_id(&global_deviceatlas.atlas);
|
evid = da_atlas_clientprop_evidence_id(&global_deviceatlas.atlas);
|
||||||
} else {
|
} else {
|
||||||
|
|
|
||||||
|
|
@ -141,6 +141,11 @@ enum {
|
||||||
DA_INITIAL_MEMORY_ESTIMATE = 1024 * 1024 * 14
|
DA_INITIAL_MEMORY_ESTIMATE = 1024 * 1024 * 14
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct header_evidence_entry {
|
||||||
|
const char *name;
|
||||||
|
da_evidence_id_t id;
|
||||||
|
};
|
||||||
|
|
||||||
struct da_config {
|
struct da_config {
|
||||||
unsigned int cache_size;
|
unsigned int cache_size;
|
||||||
unsigned int __reserved[15]; /* enough reserved keywords for future use */
|
unsigned int __reserved[15]; /* enough reserved keywords for future use */
|
||||||
|
|
|
||||||
|
|
@ -36,6 +36,7 @@
|
||||||
#include <haproxy/stats.h>
|
#include <haproxy/stats.h>
|
||||||
#include <haproxy/stconn.h>
|
#include <haproxy/stconn.h>
|
||||||
#include <haproxy/stream.h>
|
#include <haproxy/stream.h>
|
||||||
|
#include <haproxy/stress.h>
|
||||||
#include <haproxy/task.h>
|
#include <haproxy/task.h>
|
||||||
#include <haproxy/tools.h>
|
#include <haproxy/tools.h>
|
||||||
#include <haproxy/version.h>
|
#include <haproxy/version.h>
|
||||||
|
|
@ -82,6 +83,7 @@ struct promex_ctx {
|
||||||
unsigned field_num; /* current field number (ST_I_PX_* etc) */
|
unsigned field_num; /* current field number (ST_I_PX_* etc) */
|
||||||
unsigned mod_field_num; /* first field number of the current module (ST_I_PX_* etc) */
|
unsigned mod_field_num; /* first field number of the current module (ST_I_PX_* etc) */
|
||||||
int obj_state; /* current state among PROMEX_{FRONT|BACK|SRV|LI}_STATE_* */
|
int obj_state; /* current state among PROMEX_{FRONT|BACK|SRV|LI}_STATE_* */
|
||||||
|
struct watcher px_watch; /* watcher to automatically update next pointer */
|
||||||
struct watcher srv_watch; /* watcher to automatically update next pointer */
|
struct watcher srv_watch; /* watcher to automatically update next pointer */
|
||||||
struct list modules; /* list of promex modules to export */
|
struct list modules; /* list of promex modules to export */
|
||||||
struct eb_root filters; /* list of filters to apply on metrics name */
|
struct eb_root filters; /* list of filters to apply on metrics name */
|
||||||
|
|
@ -347,6 +349,10 @@ static int promex_dump_ts(struct appctx *appctx, struct ist prefix,
|
||||||
istcat(&n, prefix, PROMEX_MAX_NAME_LEN);
|
istcat(&n, prefix, PROMEX_MAX_NAME_LEN);
|
||||||
istcat(&n, name, PROMEX_MAX_NAME_LEN);
|
istcat(&n, name, PROMEX_MAX_NAME_LEN);
|
||||||
|
|
||||||
|
/* In stress mode, force yielding on each metric. */
|
||||||
|
if (STRESS_RUN1(istlen(*out), 0))
|
||||||
|
goto full;
|
||||||
|
|
||||||
if ((ctx->flags & PROMEX_FL_METRIC_HDR) &&
|
if ((ctx->flags & PROMEX_FL_METRIC_HDR) &&
|
||||||
!promex_dump_ts_header(n, desc, type, out, max))
|
!promex_dump_ts_header(n, desc, type, out, max))
|
||||||
goto full;
|
goto full;
|
||||||
|
|
@ -626,8 +632,6 @@ static int promex_dump_front_metrics(struct appctx *appctx, struct htx *htx)
|
||||||
}
|
}
|
||||||
|
|
||||||
list_for_each_entry_from(mod, &stats_module_list[STATS_DOMAIN_PROXY], list) {
|
list_for_each_entry_from(mod, &stats_module_list[STATS_DOMAIN_PROXY], list) {
|
||||||
void *counters;
|
|
||||||
|
|
||||||
if (!(stats_px_get_cap(mod->domain_flags) & STATS_PX_CAP_FE))
|
if (!(stats_px_get_cap(mod->domain_flags) & STATS_PX_CAP_FE))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
|
@ -664,8 +668,7 @@ static int promex_dump_front_metrics(struct appctx *appctx, struct htx *htx)
|
||||||
if ((px->flags & PR_FL_DISABLED) || px->uuid <= 0 || !(px->cap & PR_CAP_FE))
|
if ((px->flags & PR_FL_DISABLED) || px->uuid <= 0 || !(px->cap & PR_CAP_FE))
|
||||||
goto next_px2;
|
goto next_px2;
|
||||||
|
|
||||||
counters = EXTRA_COUNTERS_GET(px->extra_counters_fe, mod);
|
if (!mod->fill_stats(mod, px->extra_counters_fe, stats + ctx->field_num, &ctx->mod_field_num))
|
||||||
if (!mod->fill_stats(counters, stats + ctx->field_num, &ctx->mod_field_num))
|
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
val = stats[ctx->field_num + ctx->mod_field_num];
|
val = stats[ctx->field_num + ctx->mod_field_num];
|
||||||
|
|
@ -817,8 +820,6 @@ static int promex_dump_listener_metrics(struct appctx *appctx, struct htx *htx)
|
||||||
}
|
}
|
||||||
|
|
||||||
list_for_each_entry_from(mod, &stats_module_list[STATS_DOMAIN_PROXY], list) {
|
list_for_each_entry_from(mod, &stats_module_list[STATS_DOMAIN_PROXY], list) {
|
||||||
void *counters;
|
|
||||||
|
|
||||||
if (!(stats_px_get_cap(mod->domain_flags) & STATS_PX_CAP_LI))
|
if (!(stats_px_get_cap(mod->domain_flags) & STATS_PX_CAP_LI))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
|
@ -864,8 +865,7 @@ static int promex_dump_listener_metrics(struct appctx *appctx, struct htx *htx)
|
||||||
labels[lb_idx+1].name = ist("mod");
|
labels[lb_idx+1].name = ist("mod");
|
||||||
labels[lb_idx+1].value = ist2(mod->name, strlen(mod->name));
|
labels[lb_idx+1].value = ist2(mod->name, strlen(mod->name));
|
||||||
|
|
||||||
counters = EXTRA_COUNTERS_GET(li->extra_counters, mod);
|
if (!mod->fill_stats(mod, li->extra_counters, stats + ctx->field_num, &ctx->mod_field_num))
|
||||||
if (!mod->fill_stats(counters, stats + ctx->field_num, &ctx->mod_field_num))
|
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
val = stats[ctx->field_num + ctx->mod_field_num];
|
val = stats[ctx->field_num + ctx->mod_field_num];
|
||||||
|
|
@ -941,9 +941,6 @@ static int promex_dump_back_metrics(struct appctx *appctx, struct htx *htx)
|
||||||
if (promex_filter_metric(appctx, prefix, name))
|
if (promex_filter_metric(appctx, prefix, name))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (!px)
|
|
||||||
px = proxies_list;
|
|
||||||
|
|
||||||
while (px) {
|
while (px) {
|
||||||
struct promex_label labels[PROMEX_MAX_LABELS-1] = {};
|
struct promex_label labels[PROMEX_MAX_LABELS-1] = {};
|
||||||
unsigned int srv_state_count[PROMEX_SRV_STATE_COUNT] = { 0 };
|
unsigned int srv_state_count[PROMEX_SRV_STATE_COUNT] = { 0 };
|
||||||
|
|
@ -1098,9 +1095,16 @@ static int promex_dump_back_metrics(struct appctx *appctx, struct htx *htx)
|
||||||
&val, labels, &out, max))
|
&val, labels, &out, max))
|
||||||
goto full;
|
goto full;
|
||||||
next_px:
|
next_px:
|
||||||
px = px->next;
|
px = watcher_next(&ctx->px_watch, px->next);
|
||||||
}
|
}
|
||||||
|
watcher_detach(&ctx->px_watch);
|
||||||
ctx->flags |= PROMEX_FL_METRIC_HDR;
|
ctx->flags |= PROMEX_FL_METRIC_HDR;
|
||||||
|
|
||||||
|
/* Prepare a new iteration for the next stat column.
|
||||||
|
* Update ctx.p[0] via watcher.
|
||||||
|
*/
|
||||||
|
watcher_attach(&ctx->px_watch, proxies_list);
|
||||||
|
px = proxies_list;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Skip extra counters */
|
/* Skip extra counters */
|
||||||
|
|
@ -1113,8 +1117,6 @@ static int promex_dump_back_metrics(struct appctx *appctx, struct htx *htx)
|
||||||
}
|
}
|
||||||
|
|
||||||
list_for_each_entry_from(mod, &stats_module_list[STATS_DOMAIN_PROXY], list) {
|
list_for_each_entry_from(mod, &stats_module_list[STATS_DOMAIN_PROXY], list) {
|
||||||
void *counters;
|
|
||||||
|
|
||||||
if (!(stats_px_get_cap(mod->domain_flags) & STATS_PX_CAP_BE))
|
if (!(stats_px_get_cap(mod->domain_flags) & STATS_PX_CAP_BE))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
|
@ -1125,9 +1127,6 @@ static int promex_dump_back_metrics(struct appctx *appctx, struct htx *htx)
|
||||||
if (promex_filter_metric(appctx, prefix, name))
|
if (promex_filter_metric(appctx, prefix, name))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (!px)
|
|
||||||
px = proxies_list;
|
|
||||||
|
|
||||||
while (px) {
|
while (px) {
|
||||||
struct promex_label labels[PROMEX_MAX_LABELS-1] = {};
|
struct promex_label labels[PROMEX_MAX_LABELS-1] = {};
|
||||||
struct promex_metric metric;
|
struct promex_metric metric;
|
||||||
|
|
@ -1151,8 +1150,7 @@ static int promex_dump_back_metrics(struct appctx *appctx, struct htx *htx)
|
||||||
if ((px->flags & PR_FL_DISABLED) || px->uuid <= 0 || !(px->cap & PR_CAP_BE))
|
if ((px->flags & PR_FL_DISABLED) || px->uuid <= 0 || !(px->cap & PR_CAP_BE))
|
||||||
goto next_px2;
|
goto next_px2;
|
||||||
|
|
||||||
counters = EXTRA_COUNTERS_GET(px->extra_counters_be, mod);
|
if (!mod->fill_stats(mod, px->extra_counters_be, stats + ctx->field_num, &ctx->mod_field_num))
|
||||||
if (!mod->fill_stats(counters, stats + ctx->field_num, &ctx->mod_field_num))
|
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
val = stats[ctx->field_num + ctx->mod_field_num];
|
val = stats[ctx->field_num + ctx->mod_field_num];
|
||||||
|
|
@ -1163,25 +1161,39 @@ static int promex_dump_back_metrics(struct appctx *appctx, struct htx *htx)
|
||||||
goto full;
|
goto full;
|
||||||
|
|
||||||
next_px2:
|
next_px2:
|
||||||
px = px->next;
|
px = watcher_next(&ctx->px_watch, px->next);
|
||||||
}
|
}
|
||||||
|
watcher_detach(&ctx->px_watch);
|
||||||
ctx->flags |= PROMEX_FL_METRIC_HDR;
|
ctx->flags |= PROMEX_FL_METRIC_HDR;
|
||||||
|
|
||||||
|
/* Prepare a new iteration for the next stat column.
|
||||||
|
* Update ctx.p[0] via watcher.
|
||||||
|
*/
|
||||||
|
watcher_attach(&ctx->px_watch, proxies_list);
|
||||||
|
px = proxies_list;
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx->field_num += mod->stats_count;
|
ctx->field_num += mod->stats_count;
|
||||||
ctx->mod_field_num = 0;
|
ctx->mod_field_num = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
px = NULL;
|
|
||||||
mod = NULL;
|
|
||||||
|
|
||||||
end:
|
end:
|
||||||
|
if (ret) {
|
||||||
|
watcher_detach(&ctx->px_watch);
|
||||||
|
mod = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
if (out.len) {
|
if (out.len) {
|
||||||
if (!htx_add_data_atonce(htx, out))
|
if (!htx_add_data_atonce(htx, out)) {
|
||||||
|
watcher_detach(&ctx->px_watch);
|
||||||
return -1; /* Unexpected and unrecoverable error */
|
return -1; /* Unexpected and unrecoverable error */
|
||||||
}
|
}
|
||||||
/* Save pointers (0=current proxy, 1=current stats module) of the current context */
|
}
|
||||||
ctx->p[0] = px;
|
|
||||||
|
/* Save pointers of the current context for dump resumption :
|
||||||
|
* 0=current proxy, 1=current stats module
|
||||||
|
* Note that p[0] is already automatically updated via px_watch.
|
||||||
|
*/
|
||||||
ctx->p[1] = mod;
|
ctx->p[1] = mod;
|
||||||
return ret;
|
return ret;
|
||||||
full:
|
full:
|
||||||
|
|
@ -1223,9 +1235,6 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
|
||||||
if (promex_filter_metric(appctx, prefix, name))
|
if (promex_filter_metric(appctx, prefix, name))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (!px)
|
|
||||||
px = proxies_list;
|
|
||||||
|
|
||||||
while (px) {
|
while (px) {
|
||||||
struct promex_label labels[PROMEX_MAX_LABELS-1] = {};
|
struct promex_label labels[PROMEX_MAX_LABELS-1] = {};
|
||||||
enum promex_mt_type type;
|
enum promex_mt_type type;
|
||||||
|
|
@ -1245,17 +1254,12 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
|
||||||
if ((px->flags & PR_FL_DISABLED) || px->uuid <= 0 || !(px->cap & PR_CAP_BE))
|
if ((px->flags & PR_FL_DISABLED) || px->uuid <= 0 || !(px->cap & PR_CAP_BE))
|
||||||
goto next_px;
|
goto next_px;
|
||||||
|
|
||||||
if (!sv) {
|
|
||||||
watcher_attach(&ctx->srv_watch, px->srv);
|
|
||||||
sv = px->srv;
|
|
||||||
}
|
|
||||||
|
|
||||||
while (sv) {
|
while (sv) {
|
||||||
labels[lb_idx].name = ist("server");
|
labels[lb_idx].name = ist("server");
|
||||||
labels[lb_idx].value = ist2(sv->id, strlen(sv->id));
|
labels[lb_idx].value = ist2(sv->id, strlen(sv->id));
|
||||||
|
|
||||||
if (!stats_fill_sv_line(px, sv, 0, stats, ST_I_PX_MAX, &(ctx->field_num)))
|
if (!stats_fill_sv_line(px, sv, 0, stats, ST_I_PX_MAX, &(ctx->field_num)))
|
||||||
return -1;
|
goto error;
|
||||||
|
|
||||||
if ((ctx->flags & PROMEX_FL_NO_MAINT_SRV) && (sv->cur_admin & SRV_ADMF_MAINT))
|
if ((ctx->flags & PROMEX_FL_NO_MAINT_SRV) && (sv->cur_admin & SRV_ADMF_MAINT))
|
||||||
goto next_sv;
|
goto next_sv;
|
||||||
|
|
@ -1405,9 +1409,25 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
|
||||||
|
|
||||||
next_px:
|
next_px:
|
||||||
watcher_detach(&ctx->srv_watch);
|
watcher_detach(&ctx->srv_watch);
|
||||||
px = px->next;
|
px = watcher_next(&ctx->px_watch, px->next);
|
||||||
|
if (px) {
|
||||||
|
/* Update ctx.p[1] via watcher. */
|
||||||
|
watcher_attach(&ctx->srv_watch, px->srv);
|
||||||
|
sv = ctx->p[1];
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
watcher_detach(&ctx->px_watch);
|
||||||
ctx->flags |= PROMEX_FL_METRIC_HDR;
|
ctx->flags |= PROMEX_FL_METRIC_HDR;
|
||||||
|
|
||||||
|
/* Prepare a new iteration for the next stat column.
|
||||||
|
* Update ctx.p[0]/p[1] via px_watch/srv_watch.
|
||||||
|
*/
|
||||||
|
watcher_attach(&ctx->px_watch, proxies_list);
|
||||||
|
px = proxies_list;
|
||||||
|
if (likely(px)) {
|
||||||
|
watcher_attach(&ctx->srv_watch, px->srv);
|
||||||
|
sv = ctx->p[1];
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Skip extra counters */
|
/* Skip extra counters */
|
||||||
|
|
@ -1420,8 +1440,6 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
|
||||||
}
|
}
|
||||||
|
|
||||||
list_for_each_entry_from(mod, &stats_module_list[STATS_DOMAIN_PROXY], list) {
|
list_for_each_entry_from(mod, &stats_module_list[STATS_DOMAIN_PROXY], list) {
|
||||||
void *counters;
|
|
||||||
|
|
||||||
if (!(stats_px_get_cap(mod->domain_flags) & STATS_PX_CAP_SRV))
|
if (!(stats_px_get_cap(mod->domain_flags) & STATS_PX_CAP_SRV))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
|
@ -1432,9 +1450,6 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
|
||||||
if (promex_filter_metric(appctx, prefix, name))
|
if (promex_filter_metric(appctx, prefix, name))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (!px)
|
|
||||||
px = proxies_list;
|
|
||||||
|
|
||||||
while (px) {
|
while (px) {
|
||||||
struct promex_label labels[PROMEX_MAX_LABELS-1] = {};
|
struct promex_label labels[PROMEX_MAX_LABELS-1] = {};
|
||||||
struct promex_metric metric;
|
struct promex_metric metric;
|
||||||
|
|
@ -1455,11 +1470,6 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
|
||||||
if ((px->flags & PR_FL_DISABLED) || px->uuid <= 0 || !(px->cap & PR_CAP_BE))
|
if ((px->flags & PR_FL_DISABLED) || px->uuid <= 0 || !(px->cap & PR_CAP_BE))
|
||||||
goto next_px2;
|
goto next_px2;
|
||||||
|
|
||||||
if (!sv) {
|
|
||||||
watcher_attach(&ctx->srv_watch, px->srv);
|
|
||||||
sv = px->srv;
|
|
||||||
}
|
|
||||||
|
|
||||||
while (sv) {
|
while (sv) {
|
||||||
labels[lb_idx].name = ist("server");
|
labels[lb_idx].name = ist("server");
|
||||||
labels[lb_idx].value = ist2(sv->id, strlen(sv->id));
|
labels[lb_idx].value = ist2(sv->id, strlen(sv->id));
|
||||||
|
|
@ -1471,9 +1481,8 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
|
||||||
goto next_sv2;
|
goto next_sv2;
|
||||||
|
|
||||||
|
|
||||||
counters = EXTRA_COUNTERS_GET(sv->extra_counters, mod);
|
if (!mod->fill_stats(mod, sv->extra_counters, stats + ctx->field_num, &ctx->mod_field_num))
|
||||||
if (!mod->fill_stats(counters, stats + ctx->field_num, &ctx->mod_field_num))
|
goto error;
|
||||||
return -1;
|
|
||||||
|
|
||||||
val = stats[ctx->field_num + ctx->mod_field_num];
|
val = stats[ctx->field_num + ctx->mod_field_num];
|
||||||
metric.type = ((val.type == FN_GAUGE) ? PROMEX_MT_GAUGE : PROMEX_MT_COUNTER);
|
metric.type = ((val.type == FN_GAUGE) ? PROMEX_MT_GAUGE : PROMEX_MT_COUNTER);
|
||||||
|
|
@ -1488,33 +1497,57 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
|
||||||
|
|
||||||
next_px2:
|
next_px2:
|
||||||
watcher_detach(&ctx->srv_watch);
|
watcher_detach(&ctx->srv_watch);
|
||||||
px = px->next;
|
px = watcher_next(&ctx->px_watch, px->next);
|
||||||
|
if (px) {
|
||||||
|
/* Update ctx.p[1] via watcher. */
|
||||||
|
watcher_attach(&ctx->srv_watch, px->srv);
|
||||||
|
sv = ctx->p[1];
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
watcher_detach(&ctx->px_watch);
|
||||||
ctx->flags |= PROMEX_FL_METRIC_HDR;
|
ctx->flags |= PROMEX_FL_METRIC_HDR;
|
||||||
|
|
||||||
|
/* Prepare a new iteration for the next stat column.
|
||||||
|
* Update ctx.p[0]/p[1] via px_watch/srv_watch.
|
||||||
|
*/
|
||||||
|
watcher_attach(&ctx->px_watch, proxies_list);
|
||||||
|
px = proxies_list;
|
||||||
|
if (likely(px)) {
|
||||||
|
watcher_attach(&ctx->srv_watch, px->srv);
|
||||||
|
sv = ctx->p[1];
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx->field_num += mod->stats_count;
|
ctx->field_num += mod->stats_count;
|
||||||
ctx->mod_field_num = 0;
|
ctx->mod_field_num = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
px = NULL;
|
|
||||||
sv = NULL;
|
|
||||||
mod = NULL;
|
|
||||||
|
|
||||||
end:
|
end:
|
||||||
|
if (ret) {
|
||||||
|
watcher_detach(&ctx->px_watch);
|
||||||
|
watcher_detach(&ctx->srv_watch);
|
||||||
|
mod = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
if (out.len) {
|
if (out.len) {
|
||||||
if (!htx_add_data_atonce(htx, out))
|
if (!htx_add_data_atonce(htx, out))
|
||||||
return -1; /* Unexpected and unrecoverable error */
|
return -1; /* Unexpected and unrecoverable error */
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Save pointers (0=current proxy, 1=current server, 2=current stats module) of the current context */
|
/* Save pointers of the current context for dump resumption :
|
||||||
ctx->p[0] = px;
|
* 0=current proxy, 1=current server, 2=current stats module
|
||||||
ctx->p[1] = sv;
|
* Note that p[0]/p[1] are already automatically updated via px_watch/srv_watch.
|
||||||
|
*/
|
||||||
ctx->p[2] = mod;
|
ctx->p[2] = mod;
|
||||||
return ret;
|
return ret;
|
||||||
full:
|
full:
|
||||||
ret = 0;
|
ret = 0;
|
||||||
goto end;
|
goto end;
|
||||||
|
|
||||||
|
error:
|
||||||
|
watcher_detach(&ctx->px_watch);
|
||||||
|
watcher_detach(&ctx->srv_watch);
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Dump metrics of module <mod>. It returns 1 on success, 0 if <out> is full and
|
/* Dump metrics of module <mod>. It returns 1 on success, 0 if <out> is full and
|
||||||
|
|
@ -1735,6 +1768,11 @@ static int promex_dump_metrics(struct appctx *appctx, struct htx *htx)
|
||||||
ctx->field_num = ST_I_PX_PXNAME;
|
ctx->field_num = ST_I_PX_PXNAME;
|
||||||
ctx->mod_field_num = 0;
|
ctx->mod_field_num = 0;
|
||||||
appctx->st1 = PROMEX_DUMPER_BACK;
|
appctx->st1 = PROMEX_DUMPER_BACK;
|
||||||
|
|
||||||
|
if (ctx->flags & PROMEX_FL_SCOPE_BACK) {
|
||||||
|
/* Update ctx.p[0] via watcher. */
|
||||||
|
watcher_attach(&ctx->px_watch, proxies_list);
|
||||||
|
}
|
||||||
__fallthrough;
|
__fallthrough;
|
||||||
|
|
||||||
case PROMEX_DUMPER_BACK:
|
case PROMEX_DUMPER_BACK:
|
||||||
|
|
@ -1752,6 +1790,15 @@ static int promex_dump_metrics(struct appctx *appctx, struct htx *htx)
|
||||||
ctx->field_num = ST_I_PX_PXNAME;
|
ctx->field_num = ST_I_PX_PXNAME;
|
||||||
ctx->mod_field_num = 0;
|
ctx->mod_field_num = 0;
|
||||||
appctx->st1 = PROMEX_DUMPER_SRV;
|
appctx->st1 = PROMEX_DUMPER_SRV;
|
||||||
|
|
||||||
|
if (ctx->flags & PROMEX_FL_SCOPE_SERVER) {
|
||||||
|
/* Update ctx.p[0] via watcher. */
|
||||||
|
watcher_attach(&ctx->px_watch, proxies_list);
|
||||||
|
if (likely(proxies_list)) {
|
||||||
|
/* Update ctx.p[1] via watcher. */
|
||||||
|
watcher_attach(&ctx->srv_watch, proxies_list->srv);
|
||||||
|
}
|
||||||
|
}
|
||||||
__fallthrough;
|
__fallthrough;
|
||||||
|
|
||||||
case PROMEX_DUMPER_SRV:
|
case PROMEX_DUMPER_SRV:
|
||||||
|
|
@ -2029,6 +2076,7 @@ static int promex_appctx_init(struct appctx *appctx)
|
||||||
LIST_INIT(&ctx->modules);
|
LIST_INIT(&ctx->modules);
|
||||||
ctx->filters = EB_ROOT;
|
ctx->filters = EB_ROOT;
|
||||||
appctx->st0 = PROMEX_ST_INIT;
|
appctx->st0 = PROMEX_ST_INIT;
|
||||||
|
watcher_init(&ctx->px_watch, &ctx->p[0], offsetof(struct proxy, watcher_list));
|
||||||
watcher_init(&ctx->srv_watch, &ctx->p[1], offsetof(struct server, watcher_list));
|
watcher_init(&ctx->srv_watch, &ctx->p[1], offsetof(struct server, watcher_list));
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
@ -2043,6 +2091,11 @@ static void promex_appctx_release(struct appctx *appctx)
|
||||||
struct promex_metric_filter *flt;
|
struct promex_metric_filter *flt;
|
||||||
struct eb32_node *node, *next;
|
struct eb32_node *node, *next;
|
||||||
|
|
||||||
|
if (appctx->st1 == PROMEX_DUMPER_BACK ||
|
||||||
|
appctx->st1 == PROMEX_DUMPER_SRV) {
|
||||||
|
watcher_detach(&ctx->px_watch);
|
||||||
|
}
|
||||||
|
|
||||||
if (appctx->st1 == PROMEX_DUMPER_SRV)
|
if (appctx->st1 == PROMEX_DUMPER_SRV)
|
||||||
watcher_detach(&ctx->srv_watch);
|
watcher_detach(&ctx->srv_watch);
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,11 +1,10 @@
|
||||||
#!/bin/bash
|
#!/bin/sh
|
||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
export VERBOSE=1
|
export VERBOSE=1
|
||||||
export TIMEOUT=90
|
export TIMEOUT=90
|
||||||
export MASTER_SOCKET=${MASTER_SOCKET:-/var/run/haproxy-master.sock}
|
export MASTER_SOCKET="${MASTER_SOCKET:-/var/run/haproxy-master.sock}"
|
||||||
export RET=
|
|
||||||
|
|
||||||
alert() {
|
alert() {
|
||||||
if [ "$VERBOSE" -ge "1" ]; then
|
if [ "$VERBOSE" -ge "1" ]; then
|
||||||
|
|
@ -15,32 +14,38 @@ alert() {
|
||||||
|
|
||||||
|
|
||||||
reload() {
|
reload() {
|
||||||
while read -r line; do
|
if [ -S "$MASTER_SOCKET" ]; then
|
||||||
|
socat_addr="UNIX-CONNECT:${MASTER_SOCKET}"
|
||||||
if [ "$line" = "Success=0" ]; then
|
|
||||||
RET=1
|
|
||||||
elif [ "$line" = "Success=1" ]; then
|
|
||||||
RET=0
|
|
||||||
elif [ "$line" = "Another reload is still in progress." ]; then
|
|
||||||
alert "$line"
|
|
||||||
elif [ "$line" = "--" ]; then
|
|
||||||
continue;
|
|
||||||
else
|
else
|
||||||
if [ "$RET" = 1 ] && [ "$VERBOSE" = "2" ]; then
|
case "$MASTER_SOCKET" in
|
||||||
echo "$line" >&2
|
*:[0-9]*)
|
||||||
elif [ "$VERBOSE" = "3" ]; then
|
socat_addr="TCP:${MASTER_SOCKET}"
|
||||||
echo "$line" >&2
|
;;
|
||||||
fi
|
*)
|
||||||
fi
|
alert "Invalid master socket address '${MASTER_SOCKET}': expected a UNIX socket file or <host>:<port>"
|
||||||
|
|
||||||
done < <(echo "reload" | socat -t"${TIMEOUT}" "${MASTER_SOCKET}" -)
|
|
||||||
|
|
||||||
if [ -z "$RET" ]; then
|
|
||||||
alert "Couldn't finish the reload before the timeout (${TIMEOUT})."
|
|
||||||
return 1
|
return 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
fi
|
fi
|
||||||
|
|
||||||
return "$RET"
|
echo "reload" | socat -t"${TIMEOUT}" "$socat_addr" - | {
|
||||||
|
read -r status || { alert "No status received (connection error or timeout after ${TIMEOUT}s)."; exit 1; }
|
||||||
|
case "$status" in
|
||||||
|
"Success=1") ret=0 ;;
|
||||||
|
"Success=0") ret=1 ;;
|
||||||
|
*) alert "Unexpected response: '$status'"; exit 1 ;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
read -r _ # consume "--"
|
||||||
|
|
||||||
|
if [ "$VERBOSE" -ge 3 ] || { [ "$ret" = 1 ] && [ "$VERBOSE" -ge 2 ]; }; then
|
||||||
|
cat >&2
|
||||||
|
else
|
||||||
|
cat >/dev/null
|
||||||
|
fi
|
||||||
|
|
||||||
|
exit "$ret"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
usage() {
|
usage() {
|
||||||
|
|
@ -52,12 +57,12 @@ usage() {
|
||||||
echo " EXPERIMENTAL script!"
|
echo " EXPERIMENTAL script!"
|
||||||
echo ""
|
echo ""
|
||||||
echo "Options:"
|
echo "Options:"
|
||||||
echo " -S, --master-socket <path> Use the master socket at <path> (default: ${MASTER_SOCKET})"
|
echo " -S, --master-socket <addr> Unix socket path or <host>:<port> (default: ${MASTER_SOCKET})"
|
||||||
echo " -d, --debug Debug mode, set -x"
|
echo " -d, --debug Debug mode, set -x"
|
||||||
echo " -t, --timeout Timeout (socat -t) (default: ${TIMEOUT})"
|
echo " -t, --timeout Timeout (socat -t) (default: ${TIMEOUT})"
|
||||||
echo " -s, --silent Silent mode (no output)"
|
echo " -s, --silent Silent mode (no output)"
|
||||||
echo " -v, --verbose Verbose output (output from haproxy on failure)"
|
echo " -v, --verbose Verbose output (output from haproxy on failure)"
|
||||||
echo " -vv Even more verbose output (output from haproxy on success and failure)"
|
echo " -vv --verbose=all Very verbose output (output from haproxy on success and failure)"
|
||||||
echo " -h, --help This help"
|
echo " -h, --help This help"
|
||||||
echo ""
|
echo ""
|
||||||
echo "Examples:"
|
echo "Examples:"
|
||||||
|
|
@ -84,7 +89,7 @@ main() {
|
||||||
VERBOSE=2
|
VERBOSE=2
|
||||||
shift
|
shift
|
||||||
;;
|
;;
|
||||||
-vv|--verbose)
|
-vv|--verbose=all)
|
||||||
VERBOSE=3
|
VERBOSE=3
|
||||||
shift
|
shift
|
||||||
;;
|
;;
|
||||||
|
|
|
||||||
141
dev/gdb/pm-from-core.c
Normal file
141
dev/gdb/pm-from-core.c
Normal file
|
|
@ -0,0 +1,141 @@
|
||||||
|
/*
|
||||||
|
* Find the post-mortem offset from a core dump
|
||||||
|
*
|
||||||
|
* Copyright (C) 2026 Willy Tarreau <w@1wt.eu>
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining
|
||||||
|
* a copy of this software and associated documentation files (the
|
||||||
|
* "Software"), to deal in the Software without restriction, including
|
||||||
|
* without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
* distribute, sublicense, and/or sell copies of the Software, and to
|
||||||
|
* permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
* the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice shall be
|
||||||
|
* included in all copies or substantial portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||||
|
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
||||||
|
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||||
|
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
||||||
|
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||||
|
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||||
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||||
|
* OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* Note: builds with no option under glibc, and can be built as a minimal
|
||||||
|
* uploadable static executable using nolibc as well:
|
||||||
|
gcc -o pm-from-core -nostdinc -nostdlib -s -Os -static -fno-ident \
|
||||||
|
-fno-exceptions -fno-asynchronous-unwind-tables -fno-unwind-tables \
|
||||||
|
-Wl,--gc-sections,--orphan-handling=discard,-znoseparate-code \
|
||||||
|
-I /path/to/nolibc-sysroot/include pm-from-core.c
|
||||||
|
*/
|
||||||
|
#define _GNU_SOURCE
|
||||||
|
#include <sys/mman.h>
|
||||||
|
#include <sys/stat.h>
|
||||||
|
#include <elf.h>
|
||||||
|
#include <fcntl.h>
|
||||||
|
#include <stdint.h>
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <string.h>
|
||||||
|
|
||||||
|
#if defined(__GLIBC__)
|
||||||
|
# define my_memmem memmem
|
||||||
|
#else
|
||||||
|
void *my_memmem(const void *haystack, size_t haystacklen,
|
||||||
|
const void *needle, size_t needlelen)
|
||||||
|
{
|
||||||
|
while (haystacklen >= needlelen) {
|
||||||
|
if (!memcmp(haystack, needle, needlelen))
|
||||||
|
return (void*)haystack;
|
||||||
|
haystack++;
|
||||||
|
haystacklen--;
|
||||||
|
}
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define MAGIC "POST-MORTEM STARTS HERE+7654321\0"
|
||||||
|
|
||||||
|
int main(int argc, char **argv)
|
||||||
|
{
|
||||||
|
Elf64_Ehdr *ehdr;
|
||||||
|
Elf64_Phdr *phdr;
|
||||||
|
struct stat st;
|
||||||
|
uint8_t *mem;
|
||||||
|
int i, fd;
|
||||||
|
|
||||||
|
if (argc < 2) {
|
||||||
|
printf("Usage: %s <core_file>\n", argv[0]);
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
fd = open(argv[1], O_RDONLY);
|
||||||
|
|
||||||
|
/* Let's just map the core dump as an ELF header */
|
||||||
|
fstat(fd, &st);
|
||||||
|
mem = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
|
||||||
|
if (mem == MAP_FAILED) {
|
||||||
|
perror("mmap()");
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* get the program headers */
|
||||||
|
ehdr = (Elf64_Ehdr *)mem;
|
||||||
|
|
||||||
|
/* check that it's really a core. Should be "\x7fELF" */
|
||||||
|
if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0) {
|
||||||
|
fprintf(stderr, "ELF magic not found.\n");
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ehdr->e_ident[EI_CLASS] != ELFCLASS64) {
|
||||||
|
fprintf(stderr, "Only 64-bit ELF supported.\n");
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ehdr->e_type != ET_CORE) {
|
||||||
|
fprintf(stderr, "ELF type %d, not a core dump.\n", ehdr->e_type);
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* OK we can safely go with program headers */
|
||||||
|
phdr = (Elf64_Phdr *)(mem + ehdr->e_phoff);
|
||||||
|
|
||||||
|
for (i = 0; i < ehdr->e_phnum; i++) {
|
||||||
|
uint64_t size = phdr[i].p_filesz;
|
||||||
|
uint64_t offset = phdr[i].p_offset;
|
||||||
|
uint64_t vaddr = phdr[i].p_vaddr;
|
||||||
|
uint64_t found_ofs;
|
||||||
|
uint8_t *found;
|
||||||
|
|
||||||
|
if (phdr[i].p_type != PT_LOAD)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
//printf("Scanning segment %d...\n", ehdr->e_phnum);
|
||||||
|
//printf("\r%-5d: off=%lx va=%lx sz=%lx ", i, (long)offset, (long)vaddr, (long)size);
|
||||||
|
if (!size)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (size >= 1048576) // don't scan large segments
|
||||||
|
continue;
|
||||||
|
|
||||||
|
found = my_memmem(mem + offset, size, MAGIC, sizeof(MAGIC) - 1);
|
||||||
|
if (!found)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
found_ofs = found - (mem + offset);
|
||||||
|
|
||||||
|
printf("Found post-mortem magic in segment %d:\n", i);
|
||||||
|
printf(" Core File Offset: 0x%lx (0x%lx + 0x%lx)\n", offset + found_ofs, offset, found_ofs);
|
||||||
|
printf(" Runtime VAddr: 0x%lx (0x%lx + 0x%lx)\n", vaddr + found_ofs, vaddr, found_ofs);
|
||||||
|
printf(" Segment Size: 0x%lx\n", size);
|
||||||
|
printf("\nIn gdb, copy-paste this line:\n\n pm_init 0x%lx\n\n", vaddr + found_ofs);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
//printf("\r%75s\n", "\r");
|
||||||
|
printf("post-mortem magic not found\n");
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
@ -14,8 +14,8 @@ define pools_dump
|
||||||
set $idx=$idx + 1
|
set $idx=$idx + 1
|
||||||
end
|
end
|
||||||
|
|
||||||
set $mem = $total * $e->size
|
set $mem = (unsigned long)$total * $e->size
|
||||||
printf "list=%#lx pool_head=%p name=%s size=%u alloc=%u used=%u mem=%u\n", $p, $e, $e->name, $e->size, $total, $used, $mem
|
printf "list=%#lx pool_head=%p name=%s size=%u alloc=%u used=%u mem=%lu\n", $p, $e, $e->name, $e->size, $total, $used, $mem
|
||||||
set $p = *(void **)$p
|
set $p = *(void **)$p
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
|
||||||
|
|
@ -30,8 +30,8 @@ static const char *tevt_fd_types[16] = {
|
||||||
};
|
};
|
||||||
|
|
||||||
static const char *tevt_hs_types[16] = {
|
static const char *tevt_hs_types[16] = {
|
||||||
[ 0] = "-", [ 1] = "-", [ 2] = "-", [ 3] = "rcv_err",
|
[ 0] = "-", [ 1] = "-", [ 2] = "-", [ 3] = "-",
|
||||||
[ 4] = "snd_err", [ 5] = "-", [ 6] = "-", [ 7] = "-",
|
[ 4] = "snd_err", [ 5] = "truncated_shutr", [ 6] = "truncated_rcv_err", [ 7] = "-",
|
||||||
[ 8] = "-", [ 9] = "-", [10] = "-", [11] = "-",
|
[ 8] = "-", [ 9] = "-", [10] = "-", [11] = "-",
|
||||||
[12] = "-", [13] = "-", [14] = "-", [15] = "-",
|
[12] = "-", [13] = "-", [14] = "-", [15] = "-",
|
||||||
};
|
};
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,7 @@
|
||||||
Configuration Manual
|
Configuration Manual
|
||||||
----------------------
|
----------------------
|
||||||
version 3.4
|
version 3.4
|
||||||
2026/01/22
|
2026/03/05
|
||||||
|
|
||||||
|
|
||||||
This document covers the configuration language as implemented in the version
|
This document covers the configuration language as implemented in the version
|
||||||
|
|
@ -1869,6 +1869,7 @@ The following keywords are supported in the "global" section :
|
||||||
- tune.buffers.limit
|
- tune.buffers.limit
|
||||||
- tune.buffers.reserve
|
- tune.buffers.reserve
|
||||||
- tune.bufsize
|
- tune.bufsize
|
||||||
|
- tune.bufsize.large
|
||||||
- tune.bufsize.small
|
- tune.bufsize.small
|
||||||
- tune.comp.maxlevel
|
- tune.comp.maxlevel
|
||||||
- tune.defaults.purge
|
- tune.defaults.purge
|
||||||
|
|
@ -1983,6 +1984,7 @@ The following keywords are supported in the "global" section :
|
||||||
- tune.ssl.cachesize
|
- tune.ssl.cachesize
|
||||||
- tune.ssl.capture-buffer-size
|
- tune.ssl.capture-buffer-size
|
||||||
- tune.ssl.capture-cipherlist-size (deprecated)
|
- tune.ssl.capture-cipherlist-size (deprecated)
|
||||||
|
- tune.ssl.certificate-compression
|
||||||
- tune.ssl.default-dh-param
|
- tune.ssl.default-dh-param
|
||||||
- tune.ssl.force-private-cache
|
- tune.ssl.force-private-cache
|
||||||
- tune.ssl.hard-maxrecord
|
- tune.ssl.hard-maxrecord
|
||||||
|
|
@ -3602,6 +3604,11 @@ ssl-skip-self-issued-ca
|
||||||
certificates. It's useless for BoringSSL, .issuer is ignored because ocsp
|
certificates. It's useless for BoringSSL, .issuer is ignored because ocsp
|
||||||
bits does not need it. Requires at least OpenSSL 1.0.2.
|
bits does not need it. Requires at least OpenSSL 1.0.2.
|
||||||
|
|
||||||
|
stats calculate-max-counters [on|off]
|
||||||
|
Activates or deactivates the calculation of stats max counters. If you
|
||||||
|
don't need them, deactivating them may increase performances a bit.
|
||||||
|
The default is on.
|
||||||
|
|
||||||
stats maxconn <connections>
|
stats maxconn <connections>
|
||||||
By default, the stats socket is limited to 10 concurrent connections. It is
|
By default, the stats socket is limited to 10 concurrent connections. It is
|
||||||
possible to change this value with "stats maxconn".
|
possible to change this value with "stats maxconn".
|
||||||
|
|
@ -4004,7 +4011,7 @@ profiling.memory { on | off }
|
||||||
use in production. The same may be achieved at run time on the CLI using the
|
use in production. The same may be achieved at run time on the CLI using the
|
||||||
"set profiling memory" command, please consult the management manual.
|
"set profiling memory" command, please consult the management manual.
|
||||||
|
|
||||||
profiling.tasks { auto | on | off }
|
profiling.tasks { auto | on | off | lock | no-lock | memory | no-memory }*
|
||||||
Enables ('on') or disables ('off') per-task CPU profiling. When set to 'auto'
|
Enables ('on') or disables ('off') per-task CPU profiling. When set to 'auto'
|
||||||
the profiling automatically turns on a thread when it starts to suffer from
|
the profiling automatically turns on a thread when it starts to suffer from
|
||||||
an average latency of 1000 microseconds or higher as reported in the
|
an average latency of 1000 microseconds or higher as reported in the
|
||||||
|
|
@ -4015,6 +4022,18 @@ profiling.tasks { auto | on | off }
|
||||||
systems, containers, or virtual machines, or when the system swaps (which
|
systems, containers, or virtual machines, or when the system swaps (which
|
||||||
must absolutely never happen on a load balancer).
|
must absolutely never happen on a load balancer).
|
||||||
|
|
||||||
|
When task profiling is enabled, HAProxy can also collect the time each task
|
||||||
|
spends with a lock held or waiting for a lock, as well as the time spent
|
||||||
|
waiting for a memory allocation to succeed in case of a pool cache miss. This
|
||||||
|
can sometimes help understand certain causes of latency. For this, the extra
|
||||||
|
keywords "lock" (to enable lock time collection), "no-lock" (to disable it),
|
||||||
|
"memory" (to enable memory allocation time collection) or "no-memory" (to
|
||||||
|
disable it) may additionally be passed. By default they are not enabled since
|
||||||
|
they can have a non-negligible CPU impact on highly loaded systems (3-10%).
|
||||||
|
Note that the overhead is only taken when profiling is effectively running,
|
||||||
|
so that when running in "auto" mode, it will only appear when HAProxy decides
|
||||||
|
to turn it on.
|
||||||
|
|
||||||
CPU profiling per task can be very convenient to report where the time is
|
CPU profiling per task can be very convenient to report where the time is
|
||||||
spent and which requests have what effect on which other request. Enabling
|
spent and which requests have what effect on which other request. Enabling
|
||||||
it will typically affect the overall's performance by less than 1%, thus it
|
it will typically affect the overall's performance by less than 1%, thus it
|
||||||
|
|
@ -4105,6 +4124,17 @@ tune.bufsize <size>
|
||||||
value set using this parameter will automatically be rounded up to the next
|
value set using this parameter will automatically be rounded up to the next
|
||||||
multiple of 8 on 32-bit machines and 16 on 64-bit machines.
|
multiple of 8 on 32-bit machines and 16 on 64-bit machines.
|
||||||
|
|
||||||
|
tune.bufsize.large <size>
|
||||||
|
Sets the size in butes for large buffers. By defaults, support for large
|
||||||
|
buffers is not enabled, it must explicitly be enable by setting this value.
|
||||||
|
|
||||||
|
These buffers are designed to be used in some specific contexts where more
|
||||||
|
data must be bufferized without changing the size of regular buffers. The
|
||||||
|
large buffers are not implicitly used.
|
||||||
|
|
||||||
|
Note that when large buffers are configured, three special large buffers will
|
||||||
|
be allocated for each threads during startup for internal usage.
|
||||||
|
|
||||||
tune.bufsize.small <size>
|
tune.bufsize.small <size>
|
||||||
Sets the size in bytes for small buffers. The defaults value is 1024.
|
Sets the size in bytes for small buffers. The defaults value is 1024.
|
||||||
|
|
||||||
|
|
@ -4453,6 +4483,16 @@ tune.h2.initial-window-size <number>
|
||||||
specific settings tune.h2.fe.initial-window-size and
|
specific settings tune.h2.fe.initial-window-size and
|
||||||
tune.h2.be.initial-window-size.
|
tune.h2.be.initial-window-size.
|
||||||
|
|
||||||
|
tune.h2.log-errors { none | connection | stream }
|
||||||
|
Sets the level of errors in the H2 demultiplexer that will generate a log.
|
||||||
|
The default is "stream", which means that any decoding error encountered in
|
||||||
|
the demultiplexer will lead to the emission of a log. The "connection" value
|
||||||
|
indicates that only logs that result in invalidating the connection will
|
||||||
|
produce a log. Finally, "none" indicates that no decoding error will produce
|
||||||
|
any log. It is recommended to set at least "connection" in order to detect
|
||||||
|
protocol anomalies, even if this means temporarily switching to "none" during
|
||||||
|
difficult periods.
|
||||||
|
|
||||||
tune.h2.max-concurrent-streams <number>
|
tune.h2.max-concurrent-streams <number>
|
||||||
Sets the default HTTP/2 maximum number of concurrent streams per connection
|
Sets the default HTTP/2 maximum number of concurrent streams per connection
|
||||||
(i.e. the number of outstanding requests on a single connection). This value
|
(i.e. the number of outstanding requests on a single connection). This value
|
||||||
|
|
@ -4526,7 +4566,11 @@ tune.idle-pool.shared { on | off }
|
||||||
disabling this option without setting a conservative value on "pool-low-conn"
|
disabling this option without setting a conservative value on "pool-low-conn"
|
||||||
for all servers relying on connection reuse to achieve a high performance
|
for all servers relying on connection reuse to achieve a high performance
|
||||||
level, otherwise connections might be closed very often as the thread count
|
level, otherwise connections might be closed very often as the thread count
|
||||||
increases.
|
increases. Note that in any case, connections are only shared between threads
|
||||||
|
of the same thread group. This means that systems with many NUMA nodes may
|
||||||
|
show slightly more persistent connections while machines with unified caches
|
||||||
|
and many CPU cores per node may experience higher CPU usage. In the latter
|
||||||
|
case, the "max-thread-per-group" tunable may be used to improve the behavior.
|
||||||
|
|
||||||
tune.idletimer <timeout>
|
tune.idletimer <timeout>
|
||||||
Sets the duration after which HAProxy will consider that an empty buffer is
|
Sets the duration after which HAProxy will consider that an empty buffer is
|
||||||
|
|
@ -5310,6 +5354,22 @@ tune.ssl.capture-cipherlist-size <number> (deprecated)
|
||||||
formats. If the value is 0 (default value) the capture is disabled,
|
formats. If the value is 0 (default value) the capture is disabled,
|
||||||
otherwise a buffer is allocated for each SSL/TLS connection.
|
otherwise a buffer is allocated for each SSL/TLS connection.
|
||||||
|
|
||||||
|
tune.ssl.certificate-compression { auto | off }
|
||||||
|
This setting allows to configure the certificate compression support which is
|
||||||
|
an extension (RFC 8879) to TLS 1.3.
|
||||||
|
|
||||||
|
When set to "auto" it uses the default value of the TLS library.
|
||||||
|
|
||||||
|
With "off" it tries to explicitely disable the support of the feature.
|
||||||
|
HAProxy won't try to send compressed certificates anymore nor accept
|
||||||
|
compressed certificates.
|
||||||
|
|
||||||
|
Configures both backend and frontend sides.
|
||||||
|
|
||||||
|
This keyword is supported by OpenSSL >= 3.2.0.
|
||||||
|
|
||||||
|
The default value is auto.
|
||||||
|
|
||||||
tune.ssl.default-dh-param <number>
|
tune.ssl.default-dh-param <number>
|
||||||
Sets the maximum size of the Diffie-Hellman parameters used for generating
|
Sets the maximum size of the Diffie-Hellman parameters used for generating
|
||||||
the ephemeral/temporary Diffie-Hellman key in case of DHE key exchange. The
|
the ephemeral/temporary Diffie-Hellman key in case of DHE key exchange. The
|
||||||
|
|
@ -6262,8 +6322,16 @@ balance url_param <param> [check_post]
|
||||||
will take away N-1 of the highest loaded servers at the
|
will take away N-1 of the highest loaded servers at the
|
||||||
expense of performance. With very high values, the algorithm
|
expense of performance. With very high values, the algorithm
|
||||||
will converge towards the leastconn's result but much slower.
|
will converge towards the leastconn's result but much slower.
|
||||||
|
In addition, for large server farms with very low loads (or
|
||||||
|
perfect balance), comparing loads will often lead to a tie,
|
||||||
|
so in case of equal loads between all measured servers, their
|
||||||
|
request rate over the last second are compared, which allows
|
||||||
|
to better balance server usage over time in the same spirit
|
||||||
|
as roundrobin does, and smooth consistent hash unfairness.
|
||||||
The default value is 2, which generally shows very good
|
The default value is 2, which generally shows very good
|
||||||
distribution and performance. This algorithm is also known as
|
distribution and performance. For large farms with low loads
|
||||||
|
(less than a few requests per second per server), it may help
|
||||||
|
to raise it to 3 or even 4. This algorithm is also known as
|
||||||
the Power of Two Random Choices and is described here :
|
the Power of Two Random Choices and is described here :
|
||||||
http://www.eecs.harvard.edu/~michaelm/postscripts/handbook2001.pdf
|
http://www.eecs.harvard.edu/~michaelm/postscripts/handbook2001.pdf
|
||||||
|
|
||||||
|
|
@ -6867,12 +6935,16 @@ compression offload
|
||||||
|
|
||||||
See also : "compression type", "compression algo", "compression direction"
|
See also : "compression type", "compression algo", "compression direction"
|
||||||
|
|
||||||
compression direction <direction>
|
compression direction <direction> (deprecated)
|
||||||
Makes haproxy able to compress both requests and responses.
|
Makes haproxy able to compress both requests and responses.
|
||||||
Valid values are "request", to compress only requests, "response", to
|
Valid values are "request", to compress only requests, "response", to
|
||||||
compress only responses, or "both", when you want to compress both.
|
compress only responses, or "both", when you want to compress both.
|
||||||
The default value is "response".
|
The default value is "response".
|
||||||
|
|
||||||
|
This directive is only relevant when legacy "filter compression" was
|
||||||
|
enabled, as with explicit comp-req and comp-res filters compression
|
||||||
|
direction is redundant.
|
||||||
|
|
||||||
May be used in the following contexts: http
|
May be used in the following contexts: http
|
||||||
|
|
||||||
See also : "compression type", "compression algo", "compression offload"
|
See also : "compression type", "compression algo", "compression offload"
|
||||||
|
|
@ -9254,6 +9326,9 @@ mode { tcp|http|log|spop }
|
||||||
processing and switching will be possible. This is the mode which
|
processing and switching will be possible. This is the mode which
|
||||||
brings HAProxy most of its value.
|
brings HAProxy most of its value.
|
||||||
|
|
||||||
|
haterm The frontend will work in haterm HTTP benchmark mode. This is
|
||||||
|
not supported by backends. See doc/haterm.txt for details.
|
||||||
|
|
||||||
log When used in a backend section, it will turn the backend into a
|
log When used in a backend section, it will turn the backend into a
|
||||||
log backend. Such backend can be used as a log destination for
|
log backend. Such backend can be used as a log destination for
|
||||||
any "log" directive by using the "backend@<name>" syntax. Log
|
any "log" directive by using the "backend@<name>" syntax. Log
|
||||||
|
|
@ -10800,7 +10875,7 @@ no option logasap
|
||||||
logging.
|
logging.
|
||||||
|
|
||||||
|
|
||||||
option mysql-check [ user <username> [ { post-41 | pre-41 } ] ]
|
option mysql-check [ user <username> [ { post-41 | pre-41 | post-80 } ] ]
|
||||||
Use MySQL health checks for server testing
|
Use MySQL health checks for server testing
|
||||||
|
|
||||||
May be used in the following contexts: tcp
|
May be used in the following contexts: tcp
|
||||||
|
|
@ -10813,6 +10888,12 @@ option mysql-check [ user <username> [ { post-41 | pre-41 } ] ]
|
||||||
server.
|
server.
|
||||||
post-41 Send post v4.1 client compatible checks (the default)
|
post-41 Send post v4.1 client compatible checks (the default)
|
||||||
pre-41 Send pre v4.1 client compatible checks
|
pre-41 Send pre v4.1 client compatible checks
|
||||||
|
post-80 Send post v8.0 client compatible checks with CLIENT_PLUGIN_AUTH
|
||||||
|
capability set and mysql_native_password as the authentication
|
||||||
|
plugin. Use this option when connecting to MySQL 8.0+ servers
|
||||||
|
where the health check user is created with mysql_native_password
|
||||||
|
authentication. Example:
|
||||||
|
CREATE USER 'haproxy'@'%' IDENTIFIED WITH mysql_native_password BY '';
|
||||||
|
|
||||||
If you specify a username, the check consists of sending two MySQL packet,
|
If you specify a username, the check consists of sending two MySQL packet,
|
||||||
one Client Authentication packet, and one QUIT packet, to correctly close
|
one Client Authentication packet, and one QUIT packet, to correctly close
|
||||||
|
|
@ -16298,20 +16379,24 @@ set-status <status> [reason <str>]
|
||||||
http-response set-status 503 reason "Slow Down".
|
http-response set-status 503 reason "Slow Down".
|
||||||
|
|
||||||
|
|
||||||
set-timeout { client | server | tunnel } { <timeout> | <expr> }
|
set-timeout { client | connect | queue | server | tarpit | tunnel } { <timeout> | <expr> }
|
||||||
Usable in: QUIC Ini| TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
|
Usable in: QUIC Ini| TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
|
||||||
- | - | - | - | - | X | X | -
|
- | - | - | - | - | X | X | -
|
||||||
|
|
||||||
This action overrides the specified "client", "server" or "tunnel" timeout
|
This action overrides the specified "client", "connect", "queue", "server",
|
||||||
for the current stream only. The timeout can be specified in milliseconds or
|
"tarpit" or "tunnel" timeout for the current stream only. Changing one timeout
|
||||||
with any other unit if the number is suffixed by the unit as explained at the
|
does not influence any other timeouts, even if they are inherited from each
|
||||||
top of this document. It is also possible to write an expression which must
|
other during configuration parsing (see last example). The timeout can be
|
||||||
return a number interpreted as a timeout in milliseconds.
|
specified in milliseconds or with any other unit if the number is suffixed by
|
||||||
|
the unit as explained at the top of this document. It is also possible to
|
||||||
|
write an expression which must return a number interpreted as a timeout in
|
||||||
|
milliseconds.
|
||||||
|
|
||||||
Note that the server/tunnel timeouts are only relevant on the backend side
|
Note that the connect, queue, server and tunnel timeouts are only relevant on
|
||||||
and thus this rule is only available for the proxies with backend
|
the backend side and thus this rule is only available for the proxies with
|
||||||
capabilities. Likewise, client timeout is only relevant for frontend side.
|
backend capabilities. Likewise, client timeout is only relevant for frontend
|
||||||
Also the timeout value must be non-null to obtain the expected results.
|
side. Tarpit timeout is available to both sides. The timeout value must be
|
||||||
|
non-null to obtain the expected results.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
http-request set-timeout tunnel 5s
|
http-request set-timeout tunnel 5s
|
||||||
|
|
@ -16321,6 +16406,18 @@ set-timeout { client | server | tunnel } { <timeout> | <expr> }
|
||||||
http-response set-timeout tunnel 5s
|
http-response set-timeout tunnel 5s
|
||||||
http-response set-timeout server res.hdr(X-Refresh-Seconds),mul(1000)
|
http-response set-timeout server res.hdr(X-Refresh-Seconds),mul(1000)
|
||||||
|
|
||||||
|
Example:
|
||||||
|
defaults
|
||||||
|
# This will set both tarpit and queue timeout to 5s as they are not
|
||||||
|
# defined
|
||||||
|
timeout connect 5s
|
||||||
|
timeout client 30s
|
||||||
|
timeout server 30s
|
||||||
|
|
||||||
|
listen foo
|
||||||
|
# This will only change the connect timeout to 10s without affecting
|
||||||
|
# queue or tarpit timeouts
|
||||||
|
http-request set-timeout connect 10s
|
||||||
|
|
||||||
set-tos <tos> (deprecated)
|
set-tos <tos> (deprecated)
|
||||||
This is an alias for "set-fc-tos" (which should be used instead).
|
This is an alias for "set-fc-tos" (which should be used instead).
|
||||||
|
|
@ -16544,7 +16641,7 @@ use-service <service-name>
|
||||||
http-request use-service prometheus-exporter if { path /metrics }
|
http-request use-service prometheus-exporter if { path /metrics }
|
||||||
|
|
||||||
|
|
||||||
wait-for-body time <time> [ at-least <bytes> ]
|
wait-for-body time <time> [ at-least <bytes> ] [use-large-buffer]
|
||||||
Usable in: QUIC Ini| TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
|
Usable in: QUIC Ini| TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
|
||||||
- | - | - | - | - | X | X | -
|
- | - | - | - | - | X | X | -
|
||||||
|
|
||||||
|
|
@ -16562,6 +16659,10 @@ wait-for-body time <time> [ at-least <bytes> ]
|
||||||
happens first, this timeout will not occur even if the full body has
|
happens first, this timeout will not occur even if the full body has
|
||||||
not yet been received.
|
not yet been received.
|
||||||
|
|
||||||
|
"use-large-buffer" option may be set to allocate a large buffer if regular
|
||||||
|
one is to small to store the message body. To be used, "tune.bufsize.large"
|
||||||
|
global option must be defined.
|
||||||
|
|
||||||
This action may be used as a replacement for "option http-buffer-request".
|
This action may be used as a replacement for "option http-buffer-request".
|
||||||
|
|
||||||
Arguments :
|
Arguments :
|
||||||
|
|
@ -16576,7 +16677,7 @@ wait-for-body time <time> [ at-least <bytes> ]
|
||||||
Example:
|
Example:
|
||||||
http-request wait-for-body time 1s at-least 1k if METH_POST
|
http-request wait-for-body time 1s at-least 1k if METH_POST
|
||||||
|
|
||||||
See also : "option http-buffer-request"
|
See also : "option http-buffer-request" and "tune.bufsize.large"
|
||||||
|
|
||||||
|
|
||||||
wait-for-handshake
|
wait-for-handshake
|
||||||
|
|
@ -17203,7 +17304,9 @@ interface <interface>
|
||||||
ktls <on|off> [ EXPERIMENTAL ]
|
ktls <on|off> [ EXPERIMENTAL ]
|
||||||
Enables or disables ktls for those sockets. If enabled, kTLS will be used
|
Enables or disables ktls for those sockets. If enabled, kTLS will be used
|
||||||
if the kernel supports it and the cipher is compatible. This is only
|
if the kernel supports it and the cipher is compatible. This is only
|
||||||
available on Linux kernel 4.17 and above.
|
available on Linux kernel 4.17 and above. Please note that some network
|
||||||
|
drivers and/or TLS stacks might restrict kTLS usage to TLS v1.2 only. See
|
||||||
|
also "force-tlsv12".
|
||||||
|
|
||||||
label <label>
|
label <label>
|
||||||
Sets an optional label for these sockets. It could be used group sockets by
|
Sets an optional label for these sockets. It could be used group sockets by
|
||||||
|
|
@ -18306,7 +18409,10 @@ hash-key <key>
|
||||||
|
|
||||||
id The node keys will be derived from the server's numeric
|
id The node keys will be derived from the server's numeric
|
||||||
identifier as set from "id" or which defaults to its position
|
identifier as set from "id" or which defaults to its position
|
||||||
in the server list.
|
in the server list. This is the default. Note that only the 28
|
||||||
|
lowest bits of the ID will be used (i.e. (id % 268435456)), so
|
||||||
|
better only use values comprised between 1 and this value to
|
||||||
|
avoid overlap.
|
||||||
|
|
||||||
addr The node keys will be derived from the server's address, when
|
addr The node keys will be derived from the server's address, when
|
||||||
available, or else fall back on "id".
|
available, or else fall back on "id".
|
||||||
|
|
@ -18318,7 +18424,9 @@ hash-key <key>
|
||||||
HAProxy processes are balancing traffic to the same set of servers. If the
|
HAProxy processes are balancing traffic to the same set of servers. If the
|
||||||
server order of each process is different (because, for example, DNS records
|
server order of each process is different (because, for example, DNS records
|
||||||
were resolved in different orders) then this will allow each independent
|
were resolved in different orders) then this will allow each independent
|
||||||
HAProxy processes to agree on routing decisions.
|
HAProxy processes to agree on routing decisions. Note: "balance random" also
|
||||||
|
uses "hash-type consistent", and the quality of the distribution will depend
|
||||||
|
on the quality of the keys.
|
||||||
|
|
||||||
id <value>
|
id <value>
|
||||||
May be used in the following contexts: tcp, http, log
|
May be used in the following contexts: tcp, http, log
|
||||||
|
|
@ -18463,9 +18571,10 @@ See also: "option tcp-check", "option httpchk"
|
||||||
ktls <on|off> [ EXPERIMENTAL ]
|
ktls <on|off> [ EXPERIMENTAL ]
|
||||||
May be used in the following contexts: tcp, http, log, peers, ring
|
May be used in the following contexts: tcp, http, log, peers, ring
|
||||||
|
|
||||||
Enables or disables ktls for those sockets. If enabled, kTLS will be used
|
Enables or disables ktls for those sockets. If enabled, kTLS will be used if
|
||||||
if the kernel supports it and the cipher is compatible.
|
the kernel supports it and the cipher is compatible. This is only available
|
||||||
This is only available on Linux.
|
on Linux 4.17 and above. Please note that some network drivers and/or TLS
|
||||||
|
stacks might restrict kTLS usage to TLS v1.2 only. See also "force-tlsv12".
|
||||||
|
|
||||||
log-bufsize <bufsize>
|
log-bufsize <bufsize>
|
||||||
May be used in the following contexts: log
|
May be used in the following contexts: log
|
||||||
|
|
@ -20552,6 +20661,7 @@ ip.ver binary integer
|
||||||
ipmask(mask4[,mask6]) address address
|
ipmask(mask4[,mask6]) address address
|
||||||
json([input-code]) string string
|
json([input-code]) string string
|
||||||
json_query(json_path[,output_type]) string _outtype_
|
json_query(json_path[,output_type]) string _outtype_
|
||||||
|
jwt_decrypt_jwk(<jwk>) string binary
|
||||||
jwt_decrypt_cert(<cert>) string binary
|
jwt_decrypt_cert(<cert>) string binary
|
||||||
jwt_decrypt_secret(<secret>) string binary
|
jwt_decrypt_secret(<secret>) string binary
|
||||||
jwt_header_query([json_path[,output_type]]) string string
|
jwt_header_query([json_path[,output_type]]) string string
|
||||||
|
|
@ -21196,9 +21306,9 @@ ip.fp([<mode>])
|
||||||
can be used to distinguish between multiple apparently identical hosts. The
|
can be used to distinguish between multiple apparently identical hosts. The
|
||||||
real-world use case is to refine the identification of misbehaving hosts
|
real-world use case is to refine the identification of misbehaving hosts
|
||||||
between a shared IP address to avoid blocking legitimate users when only one
|
between a shared IP address to avoid blocking legitimate users when only one
|
||||||
is misbehaving and needs to be blocked. The converter builds a 7-byte binary
|
is misbehaving and needs to be blocked. The converter builds a 8-byte minimum
|
||||||
block based on the input. The bytes of the fingerprint are arranged like
|
binary block based on the input. The bytes of the fingerprint are arranged
|
||||||
this:
|
like this:
|
||||||
- byte 0: IP TOS field (see ip.tos)
|
- byte 0: IP TOS field (see ip.tos)
|
||||||
- byte 1:
|
- byte 1:
|
||||||
- bit 7: IPv6 (1) / IPv4 (0)
|
- bit 7: IPv6 (1) / IPv4 (0)
|
||||||
|
|
@ -21213,10 +21323,13 @@ ip.fp([<mode>])
|
||||||
- bits 3..0: TCP window scaling + 1 (1..15) / 0 (no WS advertised)
|
- bits 3..0: TCP window scaling + 1 (1..15) / 0 (no WS advertised)
|
||||||
- byte 3..4: tcp.win
|
- byte 3..4: tcp.win
|
||||||
- byte 5..6: tcp.options.mss, or zero if absent
|
- byte 5..6: tcp.options.mss, or zero if absent
|
||||||
|
- byte 7: 1 bit per present TCP option, with options 2 to 8 being mapped to
|
||||||
|
bits 0..6 respectively, and bit 7 indicating the presence of any
|
||||||
|
option from 9 to 255.
|
||||||
|
|
||||||
The <mode> argument permits to append more information to the fingerprint. By
|
The <mode> argument permits to append more information to the fingerprint. By
|
||||||
default, when the <mode> argument is not set or is zero, the fingerprint is
|
default, when the <mode> argument is not set or is zero, the fingerprint is
|
||||||
solely made of the 7 bytes described above. If <mode> is specified as another
|
solely made of the 8 bytes described above. If <mode> is specified as another
|
||||||
value, it then corresponds to the sum of the following values, and the
|
value, it then corresponds to the sum of the following values, and the
|
||||||
respective components will be concatenated to the fingerprint, in the order
|
respective components will be concatenated to the fingerprint, in the order
|
||||||
below:
|
below:
|
||||||
|
|
@ -21226,7 +21339,7 @@ ip.fp([<mode>])
|
||||||
- 4: the source IP address is appended to the fingerprint, which adds
|
- 4: the source IP address is appended to the fingerprint, which adds
|
||||||
4 bytes for IPv4 and 16 for IPv6.
|
4 bytes for IPv4 and 16 for IPv6.
|
||||||
|
|
||||||
Example: make a 12..24 bytes fingerprint using the base FP, the TTL and the
|
Example: make a 13..25 bytes fingerprint using the base FP, the TTL and the
|
||||||
source address (1+4=5):
|
source address (1+4=5):
|
||||||
|
|
||||||
frontend test
|
frontend test
|
||||||
|
|
@ -21402,6 +21515,44 @@ jwt_decrypt_cert(<cert>)
|
||||||
http-request set-var(txn.bearer) http_auth_bearer
|
http-request set-var(txn.bearer) http_auth_bearer
|
||||||
http-request set-header X-Decrypted %[var(txn.bearer),jwt_decrypt_cert("/foo/bar.pem")]
|
http-request set-header X-Decrypted %[var(txn.bearer),jwt_decrypt_cert("/foo/bar.pem")]
|
||||||
|
|
||||||
|
jwt_decrypt_jwk(<jwk>)
|
||||||
|
Performs a signature validation of a JSON Web Token following the JSON Web
|
||||||
|
Encryption format (see RFC 7516) given in input and return its content
|
||||||
|
decrypted thanks to the provided JSON Web Key (RFC7517).
|
||||||
|
The <jwk> parameter must be a valid JWK of type 'oct' or 'RSA' ('kty' field
|
||||||
|
of the JSON key) that can be provided either as a string or via a variable.
|
||||||
|
|
||||||
|
The only tokens managed yet are the ones using the Compact Serialization
|
||||||
|
format (five dot-separated base64-url encoded strings).
|
||||||
|
|
||||||
|
This converter can be used to decode token that have a symmetric-type
|
||||||
|
algorithm ("alg" field of the JOSE header) among the following: A128KW,
|
||||||
|
A192KW, A256KW, A128GCMKW, A192GCMKW, A256GCMKW, dir. In this case, we expect
|
||||||
|
the provided JWK to be of the 'oct' type. Please note that the A128KW and
|
||||||
|
A192KW algorithms are not available on AWS-LC and decryption will not work.
|
||||||
|
This converter also manages tokens that have an algorithm ("alg" field of
|
||||||
|
the JOSE header) among the following: RSA1_5, RSA-OAEP or RSA-OAEP-256. In
|
||||||
|
such a case an 'RSA' type JWK representing a private key must be provided.
|
||||||
|
|
||||||
|
The JWE token must be provided base64url-encoded and the output will be
|
||||||
|
provided "raw". If an error happens during token parsing, signature
|
||||||
|
verification or content decryption, an empty string will be returned.
|
||||||
|
|
||||||
|
Because of the way quotes, commas and double quotes are treated in the
|
||||||
|
configuration, the contents of the JWK must be properly escaped for this
|
||||||
|
converter to work properly (see section 2.2 for more information).
|
||||||
|
|
||||||
|
Example:
|
||||||
|
# Get a JWT from the authorization header, put its decrypted content in an
|
||||||
|
# HTTP header
|
||||||
|
http-request set-var(txn.bearer) http_auth_bearer
|
||||||
|
http-request set-header X-Decrypted %[var(txn.bearer),jwt_decrypt_secret(\'{\"kty\":\"oct\",\"k\":\"wAsgsg\"}\')
|
||||||
|
|
||||||
|
# or via a variable
|
||||||
|
http-request set-var(txn.bearer) http_auth_bearer
|
||||||
|
http-request set-var(txn.jwk) str(\'{\"kty\":\"oct\",\"k\":\"Q-NFLlghQ\"}\')
|
||||||
|
http-request set-header X-Decrypted %[var(txn.bearer),jwt_decrypt_jwk(txn.jwk)
|
||||||
|
|
||||||
jwt_decrypt_secret(<secret>)
|
jwt_decrypt_secret(<secret>)
|
||||||
Performs a signature validation of a JSON Web Token following the JSON Web
|
Performs a signature validation of a JSON Web Token following the JSON Web
|
||||||
Encryption format (see RFC 7516) given in input and return its content
|
Encryption format (see RFC 7516) given in input and return its content
|
||||||
|
|
@ -23715,12 +23866,18 @@ bc_src_port integer
|
||||||
bc_srv_queue integer
|
bc_srv_queue integer
|
||||||
be_id integer
|
be_id integer
|
||||||
be_name string
|
be_name string
|
||||||
|
be_connect_timeout integer
|
||||||
|
be_queue_timeout integer
|
||||||
be_server_timeout integer
|
be_server_timeout integer
|
||||||
|
be_tarpit_timeout integer
|
||||||
be_tunnel_timeout integer
|
be_tunnel_timeout integer
|
||||||
bytes_in integer
|
bytes_in integer
|
||||||
bytes_out integer
|
bytes_out integer
|
||||||
|
cur_connect_timeout integer
|
||||||
cur_client_timeout integer
|
cur_client_timeout integer
|
||||||
|
cur_queue_timeout integer
|
||||||
cur_server_timeout integer
|
cur_server_timeout integer
|
||||||
|
cur_tarpit_timeout integer
|
||||||
cur_tunnel_timeout integer
|
cur_tunnel_timeout integer
|
||||||
dst ip
|
dst ip
|
||||||
dst_conn integer
|
dst_conn integer
|
||||||
|
|
@ -23754,6 +23911,7 @@ fc_src ip
|
||||||
fc_src_is_local boolean
|
fc_src_is_local boolean
|
||||||
fc_src_port integer
|
fc_src_port integer
|
||||||
fc_unacked integer
|
fc_unacked integer
|
||||||
|
fe_tarpit_timeout integer
|
||||||
fe_client_timeout integer
|
fe_client_timeout integer
|
||||||
fe_defbe string
|
fe_defbe string
|
||||||
fe_id integer
|
fe_id integer
|
||||||
|
|
@ -24048,6 +24206,11 @@ be_id : integer
|
||||||
used in a frontend and no backend was used, it returns the current
|
used in a frontend and no backend was used, it returns the current
|
||||||
frontend's id. It can also be used in a tcp-check or an http-check ruleset.
|
frontend's id. It can also be used in a tcp-check or an http-check ruleset.
|
||||||
|
|
||||||
|
be_connect_timeout : integer
|
||||||
|
Returns the configuration value in millisecond for the connect timeout of the
|
||||||
|
current backend. This timeout can be overwritten by a "set-timeout" rule. See
|
||||||
|
also the "cur_connect_timeout".
|
||||||
|
|
||||||
be_name : string
|
be_name : string
|
||||||
Returns a string containing the current backend's name. It can be used in
|
Returns a string containing the current backend's name. It can be used in
|
||||||
frontends with responses to check which backend processed the request. If
|
frontends with responses to check which backend processed the request. If
|
||||||
|
|
@ -24055,11 +24218,21 @@ be_name : string
|
||||||
frontend's name. It can also be used in a tcp-check or an http-check
|
frontend's name. It can also be used in a tcp-check or an http-check
|
||||||
ruleset.
|
ruleset.
|
||||||
|
|
||||||
|
be_queue_timeout : integer
|
||||||
|
Returns the configuration value in millisecond for the queue timeout of the
|
||||||
|
current backend. This timeout can be overwritten by a "set-timeout" rule. See
|
||||||
|
also the "cur_queue_timeout".
|
||||||
|
|
||||||
be_server_timeout : integer
|
be_server_timeout : integer
|
||||||
Returns the configuration value in millisecond for the server timeout of the
|
Returns the configuration value in millisecond for the server timeout of the
|
||||||
current backend. This timeout can be overwritten by a "set-timeout" rule. See
|
current backend. This timeout can be overwritten by a "set-timeout" rule. See
|
||||||
also the "cur_server_timeout".
|
also the "cur_server_timeout".
|
||||||
|
|
||||||
|
be_tarpit_timeout : integer
|
||||||
|
Returns the configuration value in millisecond for the queue timeout of the
|
||||||
|
current backend. This timeout can be overwritten by a "set-timeout" rule. See
|
||||||
|
also the "cur_tarpit_timeout".
|
||||||
|
|
||||||
be_tunnel_timeout : integer
|
be_tunnel_timeout : integer
|
||||||
Returns the configuration value in millisecond for the tunnel timeout of the
|
Returns the configuration value in millisecond for the tunnel timeout of the
|
||||||
current backend. This timeout can be overwritten by a "set-timeout" rule. See
|
current backend. This timeout can be overwritten by a "set-timeout" rule. See
|
||||||
|
|
@ -24071,16 +24244,32 @@ bytes_in : integer
|
||||||
bytes_out : integer
|
bytes_out : integer
|
||||||
See "res.bytes_in".
|
See "res.bytes_in".
|
||||||
|
|
||||||
|
cur_connect_timeout : integer
|
||||||
|
Returns the currently applied connect timeout in millisecond for the stream.
|
||||||
|
In the default case, this will be equal to be_connect_timeout unless a
|
||||||
|
"set-timeout" rule has been applied. See also "be_connect_timeout".
|
||||||
|
|
||||||
cur_client_timeout : integer
|
cur_client_timeout : integer
|
||||||
Returns the currently applied client timeout in millisecond for the stream.
|
Returns the currently applied client timeout in millisecond for the stream.
|
||||||
In the default case, this will be equal to fe_client_timeout unless a
|
In the default case, this will be equal to fe_client_timeout unless a
|
||||||
"set-timeout" rule has been applied. See also "fe_client_timeout".
|
"set-timeout" rule has been applied. See also "fe_client_timeout".
|
||||||
|
|
||||||
|
cur_queue_timeout : integer
|
||||||
|
Returns the currently applied queue timeout in millisecond for the stream.
|
||||||
|
In the default case, this will be equal to be_queue_timeout unless a
|
||||||
|
"set-timeout" rule has been applied. See also "be_queue_timeout".
|
||||||
|
|
||||||
cur_server_timeout : integer
|
cur_server_timeout : integer
|
||||||
Returns the currently applied server timeout in millisecond for the stream.
|
Returns the currently applied server timeout in millisecond for the stream.
|
||||||
In the default case, this will be equal to be_server_timeout unless a
|
In the default case, this will be equal to be_server_timeout unless a
|
||||||
"set-timeout" rule has been applied. See also "be_server_timeout".
|
"set-timeout" rule has been applied. See also "be_server_timeout".
|
||||||
|
|
||||||
|
cur_tarpit_timeout : integer
|
||||||
|
Returns the currently applied tarpit timeout in millisecond for the stream.
|
||||||
|
In the default case, this will be equal to fe_tarpit_timeout/be_tarpit_timeout
|
||||||
|
unless a "set-timeout" rule has been applied. See also "fe_tarpit_timeout"
|
||||||
|
and "be_tarpit_timeout".
|
||||||
|
|
||||||
cur_tunnel_timeout : integer
|
cur_tunnel_timeout : integer
|
||||||
Returns the currently applied tunnel timeout in millisecond for the stream.
|
Returns the currently applied tunnel timeout in millisecond for the stream.
|
||||||
In the default case, this will be equal to be_tunnel_timeout unless a
|
In the default case, this will be equal to be_tunnel_timeout unless a
|
||||||
|
|
@ -24465,6 +24654,10 @@ fe_name : string
|
||||||
backends to check from which frontend it was called, or to stick all users
|
backends to check from which frontend it was called, or to stick all users
|
||||||
coming via a same frontend to the same server.
|
coming via a same frontend to the same server.
|
||||||
|
|
||||||
|
fe_tarpit_timeout : integer
|
||||||
|
Returns the configuration value in millisecond for the tarpit timeout of the
|
||||||
|
current frontend. This timeout can be overwritten by a "set-timeout" rule.
|
||||||
|
|
||||||
req.bytes_in : integer
|
req.bytes_in : integer
|
||||||
This returns the number of bytes received from the client. The value
|
This returns the number of bytes received from the client. The value
|
||||||
corresponds to what was received by HAProxy, including some headers and some
|
corresponds to what was received by HAProxy, including some headers and some
|
||||||
|
|
@ -26723,9 +26916,10 @@ capture.req.uri : string
|
||||||
allocated.
|
allocated.
|
||||||
|
|
||||||
capture.req.ver : string
|
capture.req.ver : string
|
||||||
This extracts the request's HTTP version and returns either "HTTP/1.0" or
|
This extracts the request's HTTP version and returns it with the format
|
||||||
"HTTP/1.1". Unlike "req.ver", it can be used in both request, response, and
|
"HTTP/<major>.<minor>". It can be used in both request, response, and logs
|
||||||
logs because it relies on a persistent flag.
|
because it relies on a persistent information. If the request version is not
|
||||||
|
valid, this sample fetch fails.
|
||||||
|
|
||||||
capture.res.hdr(<idx>) : string
|
capture.res.hdr(<idx>) : string
|
||||||
This extracts the content of the header captured by the "capture response
|
This extracts the content of the header captured by the "capture response
|
||||||
|
|
@ -26734,9 +26928,10 @@ capture.res.hdr(<idx>) : string
|
||||||
See also: "capture response header"
|
See also: "capture response header"
|
||||||
|
|
||||||
capture.res.ver : string
|
capture.res.ver : string
|
||||||
This extracts the response's HTTP version and returns either "HTTP/1.0" or
|
This extracts the response's HTTP version and returns it with the format
|
||||||
"HTTP/1.1". Unlike "res.ver", it can be used in logs because it relies on a
|
"HTTP/<major>.<minor>". It can be used in logs because it relies on a
|
||||||
persistent flag.
|
persistent information. If the response version is not valid, this sample
|
||||||
|
fetch fails.
|
||||||
|
|
||||||
cookie([<name>]) : string (deprecated)
|
cookie([<name>]) : string (deprecated)
|
||||||
This extracts the last occurrence of the cookie name <name> on a "Cookie"
|
This extracts the last occurrence of the cookie name <name> on a "Cookie"
|
||||||
|
|
@ -27087,16 +27282,14 @@ req.timer.tq : integer
|
||||||
|
|
||||||
req.ver : string
|
req.ver : string
|
||||||
req_ver : string (deprecated)
|
req_ver : string (deprecated)
|
||||||
Returns the version string from the HTTP request, for example "1.1". This can
|
Returns the version string from the HTTP request, with the format
|
||||||
be useful for ACL. For logs use the "%HV" logformat alias. Some predefined
|
"<major>.<minor>". This can be useful for ACL. Some predefined ACL already
|
||||||
ACL already check for versions 1.0 and 1.1.
|
check for common versions. It can be used in both request, response, and
|
||||||
|
logs because it relies on a persistent information. If the request version is
|
||||||
|
not valid, this sample fetch fails.
|
||||||
|
|
||||||
Common values are "1.0", "1.1", "2.0" or "3.0".
|
Common values are "1.0", "1.1", "2.0" or "3.0".
|
||||||
|
|
||||||
In the case of http/2 and http/3, the value is not extracted from the HTTP
|
|
||||||
version in the request line but is determined by the negotiated protocol
|
|
||||||
version.
|
|
||||||
|
|
||||||
ACL derivatives :
|
ACL derivatives :
|
||||||
req.ver : exact string match
|
req.ver : exact string match
|
||||||
|
|
||||||
|
|
@ -27300,8 +27493,9 @@ res.timer.hdr : integer
|
||||||
|
|
||||||
res.ver : string
|
res.ver : string
|
||||||
resp_ver : string (deprecated)
|
resp_ver : string (deprecated)
|
||||||
Returns the version string from the HTTP response, for example "1.1". This
|
Returns the version string from the HTTP response, with the format
|
||||||
can be useful for logs, but is mostly there for ACL.
|
"<major>.<minor>". This can be useful for logs, but is mostly there for
|
||||||
|
ACL. If the response version is not valid, this sample fetch fails.
|
||||||
|
|
||||||
It may be used in tcp-check based expect rules.
|
It may be used in tcp-check based expect rules.
|
||||||
|
|
||||||
|
|
@ -28561,7 +28755,7 @@ Please refer to the table below for currently defined aliases :
|
||||||
| Others |
|
| Others |
|
||||||
+---+------+------------------------------------------------------+---------+
|
+---+------+------------------------------------------------------+---------+
|
||||||
| | %B | bytes_read (from server to client) | numeric |
|
| | %B | bytes_read (from server to client) | numeric |
|
||||||
| | | %[req.bytes_in] | |
|
| | | %[res.bytes_in] | |
|
||||||
+---+------+------------------------------------------------------+---------+
|
+---+------+------------------------------------------------------+---------+
|
||||||
| H | %CC | captured_request_cookie | string |
|
| H | %CC | captured_request_cookie | string |
|
||||||
+---+------+------------------------------------------------------+---------+
|
+---+------+------------------------------------------------------+---------+
|
||||||
|
|
@ -29681,7 +29875,7 @@ See also : "filter"
|
||||||
9.1. Trace
|
9.1. Trace
|
||||||
----------
|
----------
|
||||||
|
|
||||||
filter trace [name <name>] [random-forwarding] [hexdump]
|
filter trace [name <name>] [random-forwarding] [max-fwd <max>] [hexdump]
|
||||||
|
|
||||||
Arguments:
|
Arguments:
|
||||||
<name> is an arbitrary name that will be reported in
|
<name> is an arbitrary name that will be reported in
|
||||||
|
|
@ -29694,6 +29888,11 @@ filter trace [name <name>] [random-forwarding] [hexdump]
|
||||||
data. With this parameter, it only forwards a random
|
data. With this parameter, it only forwards a random
|
||||||
amount of the parsed data.
|
amount of the parsed data.
|
||||||
|
|
||||||
|
<max> is the maximum amount of data that can be forwarded at
|
||||||
|
a time. "max-fwd" option can be combined with the
|
||||||
|
random forwarding. <max> must be an positive integer.
|
||||||
|
0 means there is no limit.
|
||||||
|
|
||||||
<hexdump> dumps all forwarded data to the server and the client.
|
<hexdump> dumps all forwarded data to the server and the client.
|
||||||
|
|
||||||
This filter can be used as a base to develop new filters. It defines all
|
This filter can be used as a base to develop new filters. It defines all
|
||||||
|
|
@ -29709,7 +29908,21 @@ a server by adding some latencies in the processing.
|
||||||
9.2. HTTP compression
|
9.2. HTTP compression
|
||||||
---------------------
|
---------------------
|
||||||
|
|
||||||
filter compression
|
filter comp-req
|
||||||
|
|
||||||
|
Enables filter that explicitly tries to compress HTTP requests according to
|
||||||
|
"compression" settings. Implicitly sets "compression direction request".
|
||||||
|
|
||||||
|
filter comp-res
|
||||||
|
|
||||||
|
Enables filter that explicitly tries to compress HTTP responses according to
|
||||||
|
"compression" settings. Implicitly sets "compression direction response"
|
||||||
|
|
||||||
|
filter compression (deprecated)
|
||||||
|
|
||||||
|
Alias for backward compatibility purposes that is functionnally equivalent to
|
||||||
|
enabling both "comp-req" and "comp-res" filter. "compression" keyword must be
|
||||||
|
used to configure appropriate behavior:
|
||||||
|
|
||||||
The HTTP compression has been moved in a filter in HAProxy 1.7. "compression"
|
The HTTP compression has been moved in a filter in HAProxy 1.7. "compression"
|
||||||
keyword must still be used to enable and configure the HTTP compression. And
|
keyword must still be used to enable and configure the HTTP compression. And
|
||||||
|
|
@ -31684,8 +31897,8 @@ ocsp-update [ off | on ]
|
||||||
|
|
||||||
jwt [ off | on ]
|
jwt [ off | on ]
|
||||||
Allow for this certificate to be used for JWT validation or decryption via
|
Allow for this certificate to be used for JWT validation or decryption via
|
||||||
the "jwt_verify_cert" or "jwt_decrypt_cert" converters when set to 'on'. Its
|
the "jwt_verify_cert", "jwt_decrypt_cert" or "jwt_decrypt" converters when
|
||||||
value defaults to 'off'.
|
set to 'on'. Its value defaults to 'off'.
|
||||||
|
|
||||||
When set to 'on' for a given certificate, the CLI command "del ssl cert" will
|
When set to 'on' for a given certificate, the CLI command "del ssl cert" will
|
||||||
not work. In order to be deleted, a certificate must not be used, either for
|
not work. In order to be deleted, a certificate must not be used, either for
|
||||||
|
|
@ -31694,6 +31907,30 @@ jwt [ off | on ]
|
||||||
This option can be changed during runtime via the "add ssl jwt" and "del ssl
|
This option can be changed during runtime via the "add ssl jwt" and "del ssl
|
||||||
jwt" CLI commands. See also "show ssl jwt" CLI command.
|
jwt" CLI commands. See also "show ssl jwt" CLI command.
|
||||||
|
|
||||||
|
generate-dummy [ off | on ]
|
||||||
|
Allow the generation of a private key and its self-signed certificate at
|
||||||
|
parsing time when set to 'on'. This may be useful if one does not have a
|
||||||
|
certificate at disposal during testing phase for instance. In this case,
|
||||||
|
"keytype", "bits" and "curves" may be used to customize the private key. When
|
||||||
|
not used, the default value is 'off'. (also see "keytype", "bits" and
|
||||||
|
"curves").
|
||||||
|
|
||||||
|
keytype [ RSA | ECDSA ]
|
||||||
|
Allow the selection of the private key type used to generate at parsing time
|
||||||
|
a self-signed certificate. This is the case if "generate-dummy" is set to 'on'
|
||||||
|
for this certificate. When not used, the default is 'RSA'.
|
||||||
|
(also see "generate-dummy").
|
||||||
|
|
||||||
|
bits <number>
|
||||||
|
Configure the number of bits to generate an RSA self-signed certificate when
|
||||||
|
"generate-dummy" is set to 'on' for this self-signed certificate and "keytype"
|
||||||
|
is set to 'RSA'. When not used, the default is 2048. (also see
|
||||||
|
"generate-dummy").
|
||||||
|
|
||||||
|
curves <string>
|
||||||
|
Configure the curves when "generate-dummy" is set to 'on' and "keytype" is
|
||||||
|
set to 'ECDSA" for this self-signed certificate. The default is 'P-384'.
|
||||||
|
|
||||||
12.8. ACME
|
12.8. ACME
|
||||||
----------
|
----------
|
||||||
|
|
||||||
|
|
@ -31706,6 +31943,9 @@ The ACME section allows to configure HAProxy as an ACMEv2 client. This feature
|
||||||
is experimental meaning that "expose-experimental-directives" must be in the
|
is experimental meaning that "expose-experimental-directives" must be in the
|
||||||
global section so this can be used.
|
global section so this can be used.
|
||||||
|
|
||||||
|
A guide is available on the HAProxy wiki
|
||||||
|
https://github.com/haproxy/wiki/wiki/ACME:--native-haproxy
|
||||||
|
|
||||||
Current limitations as of 3.3:
|
Current limitations as of 3.3:
|
||||||
- The feature is limited to the HTTP-01 or DNS-01 challenges for now. HTTP-01
|
- The feature is limited to the HTTP-01 or DNS-01 challenges for now. HTTP-01
|
||||||
is completely handled by HAProxy, but DNS-01 needs either the dataplaneAPI or
|
is completely handled by HAProxy, but DNS-01 needs either the dataplaneAPI or
|
||||||
|
|
|
||||||
140
doc/haterm.txt
Normal file
140
doc/haterm.txt
Normal file
|
|
@ -0,0 +1,140 @@
|
||||||
|
------
|
||||||
|
HATerm
|
||||||
|
------
|
||||||
|
HAProxy's dummy HTTP
|
||||||
|
server for benchmarks
|
||||||
|
|
||||||
|
1. Background
|
||||||
|
-------------
|
||||||
|
|
||||||
|
HATerm is a dummy HTTP server that leverages the flexible and scalable
|
||||||
|
architecture of HAProxy to ease benchmarking of HTTP agents in all versions of
|
||||||
|
HTTP currently supported by HAProxy (HTTP/1, HTTP/2, HTTP/3), and both in clear
|
||||||
|
and TLS / QUIC. It follows the same principle as its ancestor HTTPTerm [1],
|
||||||
|
consisting in producing HTTP responses entirely configured by the request
|
||||||
|
parameters (size, response time, status etc). It also preserves the spirit
|
||||||
|
HTTPTerm which does not require any configuration beyond an optional listening
|
||||||
|
address and a port number, though it also supports advanced configurations with
|
||||||
|
the full spectrum of HAProxy features for specific testing. The goal remains
|
||||||
|
to make it almost as fast as the original HTTPTerm so that it can become a
|
||||||
|
de-facto replacement, with a compatible command line and request parameters
|
||||||
|
that will not change users' habits.
|
||||||
|
|
||||||
|
[1] https://github.com/wtarreau/httpterm
|
||||||
|
|
||||||
|
|
||||||
|
2. Compilation
|
||||||
|
--------------
|
||||||
|
|
||||||
|
HATerm may be compiled in the same way as HAProxy but with "haterm" as Makefile
|
||||||
|
target to provide on the "make" command line as follows:
|
||||||
|
|
||||||
|
$ make -j $(nproc) TARGET=linux-glibc haterm
|
||||||
|
|
||||||
|
HATerm supports HTTPS/SSL/TCP:
|
||||||
|
|
||||||
|
$ make TARGET=linux-glibc USE_OPENSSL=1
|
||||||
|
|
||||||
|
It also supports QUIC:
|
||||||
|
|
||||||
|
$ make -j $(nproc) TARGET=linux-glibc USE_OPENSSL=1 USE_QUIC=1 haterm
|
||||||
|
|
||||||
|
Technically speaking, it uses the regular HAProxy source and object code with a
|
||||||
|
different command line parser. As such, all build options supported by HAProxy
|
||||||
|
also apply to HATerm. See INSTALL for more details about how to compile them.
|
||||||
|
|
||||||
|
|
||||||
|
3. Execution
|
||||||
|
------------
|
||||||
|
|
||||||
|
HATerm is a very easy to use HTTP server with supports for all the HTTP
|
||||||
|
versions. It displays its usage when run without argument or wrong arguments:
|
||||||
|
|
||||||
|
$ ./haterm
|
||||||
|
Usage : haterm -L [<ip>]:<clear port>[:<TCP&QUIC SSL port>] [-L...]* [opts]
|
||||||
|
where <opts> may be any combination of:
|
||||||
|
-G <line> : multiple option; append <line> to the "global" section
|
||||||
|
-F <line> : multiple option; append <line> to the "frontend" section
|
||||||
|
-T <line> : multiple option; append <line> to the "traces" section
|
||||||
|
-C : dump the configuration and exit
|
||||||
|
-D : goes daemon
|
||||||
|
-b <keysize> : RSA key size in bits (ex: "2048", "4096"...)
|
||||||
|
-c <curves> : ECSDA curves (ex: "P-256", "P-384"...)
|
||||||
|
-v : shows version
|
||||||
|
-d : enable the traces for all http protocols
|
||||||
|
--quic-bind-opts <opts> : append options to QUIC "bind" lines
|
||||||
|
--tcp-bind-opts <opts> : append options to TCP "bind" lines
|
||||||
|
|
||||||
|
|
||||||
|
Arguments -G, -F, -T permit to append one or multiple lines at the end of their
|
||||||
|
respective sections. A tab character ('\t') is prepended at the beginning of
|
||||||
|
the argument, and a line feed ('\n') is appended at the end. It is also
|
||||||
|
possible to insert multiple lines at once using escape sequences '\n' and '\t'
|
||||||
|
inside the string argument.
|
||||||
|
|
||||||
|
As HAProxy, HATerm may listen on several TCP/UDP addresses which can be
|
||||||
|
provided by multiple "-L" options. To be functional, it needs at least one
|
||||||
|
correct "-L" option to be set.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
$ ./haterm -L 127.0.0.1:8888 # listen on 127.0.0.1:8888 TCP address
|
||||||
|
|
||||||
|
$ ./haterm -L 127.0.0.1:8888:8889 # listen on 127.0.0.1:8888 TCP address,
|
||||||
|
# 127.0.01:8889 SSL/TCP address,
|
||||||
|
# and 127.0.01:8889 QUIC/UDP address
|
||||||
|
|
||||||
|
$ ./haterm -L 127.0.0.1:8888:8889 -L [::1]:8888:8889
|
||||||
|
|
||||||
|
With USE_QUIC_OPENSSL_COMPAT support, the user must configure a global
|
||||||
|
section as for HAProxy. HATerm sets internally its configuration in.
|
||||||
|
memory as this is done by HAProxy from configuration files:
|
||||||
|
|
||||||
|
$ ./haterm -L 127.0.0.1:8888:8889
|
||||||
|
[NOTICE] (1371578) : haproxy version is 3.4-dev4-ba5eab-28
|
||||||
|
[NOTICE] (1371578) : path to executable is ./haterm
|
||||||
|
[ALERT] (1371578) : Binding [haterm cfgfile:12] for frontend
|
||||||
|
___haterm_frontend___: this SSL library does not
|
||||||
|
support the QUIC protocol. A limited compatibility
|
||||||
|
layer may be enabled using the "limited-quic" global
|
||||||
|
option if desired.
|
||||||
|
|
||||||
|
Such an alert may be fixed with "-G' option:
|
||||||
|
|
||||||
|
$ ./haterm -L 127.0.0.1:8888:8889 -G "limited-quic"
|
||||||
|
|
||||||
|
|
||||||
|
When the SSL support is not compiled in, the second port is ignored. This is
|
||||||
|
also the case for the QUIC support.
|
||||||
|
|
||||||
|
HATerm adjusts its responses depending on the requests it receives. An empty
|
||||||
|
query string provides the information about how the URIs are understood by
|
||||||
|
HATerm:
|
||||||
|
|
||||||
|
$ curl http://127.0.0.1:8888/?
|
||||||
|
HAProxy's dummy HTTP server for benchmarks - version 3.4-dev4.
|
||||||
|
All integer argument values are in the form [digits]*[kmgr] (r=random(0..1))
|
||||||
|
The following arguments are supported to override the default objects :
|
||||||
|
- /?s=<size> return <size> bytes.
|
||||||
|
E.g. /?s=20k
|
||||||
|
- /?r=<retcode> present <retcode> as the HTTP return code.
|
||||||
|
E.g. /?r=404
|
||||||
|
- /?c=<cache> set the return as not cacheable if <1.
|
||||||
|
E.g. /?c=0
|
||||||
|
- /?A=<req-after> drain the request body after sending the response.
|
||||||
|
E.g. /?A=1
|
||||||
|
- /?C=<close> force the response to use close if >0.
|
||||||
|
E.g. /?C=1
|
||||||
|
- /?K=<keep-alive> force the response to use keep-alive if >0.
|
||||||
|
E.g. /?K=1
|
||||||
|
- /?t=<time> wait <time> milliseconds before responding.
|
||||||
|
E.g. /?t=500
|
||||||
|
- /?k=<enable> Enable transfer encoding chunked with only one chunk
|
||||||
|
if >0.
|
||||||
|
- /?R=<enable> Enable sending random data if >0.
|
||||||
|
|
||||||
|
Note that those arguments may be cumulated on one line separated by a set of
|
||||||
|
delimitors among [&?,;/] :
|
||||||
|
- GET /?s=20k&c=1&t=700&K=30r HTTP/1.0
|
||||||
|
- GET /?r=500?s=0?c=0?t=1000 HTTP/1.0
|
||||||
|
|
||||||
|
|
@ -11,7 +11,7 @@ default init, this was controversial but fedora and archlinux already uses it.
|
||||||
At this time HAProxy still had a multi-process model, and the way haproxy is
|
At this time HAProxy still had a multi-process model, and the way haproxy is
|
||||||
working was incompatible with the daemon mode.
|
working was incompatible with the daemon mode.
|
||||||
|
|
||||||
Systemd is compatible with traditionnal forking services, but somehow HAProxy
|
Systemd is compatible with traditional forking services, but somehow HAProxy
|
||||||
is different. To work correctly, systemd needs a main PID, this is the PID of
|
is different. To work correctly, systemd needs a main PID, this is the PID of
|
||||||
the process that systemd will supervises.
|
the process that systemd will supervises.
|
||||||
|
|
||||||
|
|
@ -45,7 +45,7 @@ However the wrapper suffered from several problems:
|
||||||
|
|
||||||
### mworker V1
|
### mworker V1
|
||||||
|
|
||||||
HAProxy 1.8 got ride of the wrapper which was replaced by the master worker
|
HAProxy 1.8 got rid of the wrapper which was replaced by the master worker
|
||||||
mode. This first version was basically a reintegration of the wrapper features
|
mode. This first version was basically a reintegration of the wrapper features
|
||||||
within HAProxy. HAProxy is launched with the -W flag, read the configuration and
|
within HAProxy. HAProxy is launched with the -W flag, read the configuration and
|
||||||
then fork. In mworker mode, the master is usually launched as a root process,
|
then fork. In mworker mode, the master is usually launched as a root process,
|
||||||
|
|
@ -86,7 +86,7 @@ retrieved automatically.
|
||||||
The master is supervising the workers, when a current worker (not a previous one
|
The master is supervising the workers, when a current worker (not a previous one
|
||||||
from before the reload) is exiting without being asked for a reload, the master
|
from before the reload) is exiting without being asked for a reload, the master
|
||||||
will emit an "exit-on-failure" error and will kill every workers with a SIGTERM
|
will emit an "exit-on-failure" error and will kill every workers with a SIGTERM
|
||||||
and exits with the same error code than the failed master, this behavior can be
|
and exits with the same error code than the failed worker, this behavior can be
|
||||||
changed by using the "no exit-on-failure" option in the global section.
|
changed by using the "no exit-on-failure" option in the global section.
|
||||||
|
|
||||||
While the master is supervising the workers using the wait() function, the
|
While the master is supervising the workers using the wait() function, the
|
||||||
|
|
@ -186,8 +186,8 @@ number that can be found in HAPROXY_PROCESSES. With this change the stats socket
|
||||||
in the configuration is less useful and everything can be done from the master
|
in the configuration is less useful and everything can be done from the master
|
||||||
CLI.
|
CLI.
|
||||||
|
|
||||||
With 2.7, the reload mecanism of the master CLI evolved, with previous versions,
|
With 2.7, the reload mechanism of the master CLI evolved, with previous versions,
|
||||||
this mecanism was asynchronous, so once the `reload` command was received, the
|
this mechanism was asynchronous, so once the `reload` command was received, the
|
||||||
master would reload, the active master CLI connection was closed, and there was
|
master would reload, the active master CLI connection was closed, and there was
|
||||||
no way to return a status as a response to the `reload` command. To achieve a
|
no way to return a status as a response to the `reload` command. To achieve a
|
||||||
synchronous reload, a dedicated sockpair is used, one side uses a master CLI
|
synchronous reload, a dedicated sockpair is used, one side uses a master CLI
|
||||||
|
|
@ -208,3 +208,38 @@ starts with -st to achieve a hard stop on the previous worker.
|
||||||
Version 3.0 got rid of the libsystemd dependencies for sd_notify() after the
|
Version 3.0 got rid of the libsystemd dependencies for sd_notify() after the
|
||||||
events of xz/openssh, the function is now implemented directly in haproxy in
|
events of xz/openssh, the function is now implemented directly in haproxy in
|
||||||
src/systemd.c.
|
src/systemd.c.
|
||||||
|
|
||||||
|
### mworker V3
|
||||||
|
|
||||||
|
This version was implemented with HAProxy 3.1, the goal was to stop parsing and
|
||||||
|
applying the configuration in the master process.
|
||||||
|
|
||||||
|
One of the caveats of the previous implementation was that the parser could take
|
||||||
|
a lot of time, and the master process would be stuck in the parser instead of
|
||||||
|
handling its polling loop, signals etc. Some parts of the configuration parsing
|
||||||
|
could also be less reliable with third-party code (EXTRA_OBJS), it could, for
|
||||||
|
example, allow opening FDs and not closing them before the reload which
|
||||||
|
would crash the master after a few reloads.
|
||||||
|
|
||||||
|
The startup of the master-worker was reorganized this way:
|
||||||
|
|
||||||
|
- the "discovery" mode, which is a lighter configuration parsing step, only
|
||||||
|
applies the configuration which need to be effective for the master process.
|
||||||
|
For example, "master-worker", "mworker-max-reloads" and less than 20 other
|
||||||
|
keywords that are identified by KWF_DISCOVERY in the code. It is really fast
|
||||||
|
as it don't need all the configuration to be applied in the master process.
|
||||||
|
|
||||||
|
- the master will then fork a worker, with a PROC_O_INIT flag. This worker has
|
||||||
|
a temporary sockpair connected to the master CLI. Once the worker is forked,
|
||||||
|
the master initializes its configuration and starts its polling loop.
|
||||||
|
|
||||||
|
- The newly forked worker will try to parse the configuration, which could
|
||||||
|
result in a failure (exit 1), or any bad error code. In case of success, the
|
||||||
|
worker will send a "READY" message to the master CLI then close this FD. At
|
||||||
|
this step everything was initialized and the worker can enter its polling
|
||||||
|
loop.
|
||||||
|
|
||||||
|
- The master then waits for the worker, it could:
|
||||||
|
* receive the READY message over the mCLI, resulting in a successful loading
|
||||||
|
of haproxy
|
||||||
|
* receive a SIGCHLD, meaning the worker exited and couldn't load
|
||||||
|
|
|
||||||
|
|
@ -1725,6 +1725,30 @@ add acl [@<ver>] <acl> <pattern>
|
||||||
This command cannot be used if the reference <acl> is a name also used with
|
This command cannot be used if the reference <acl> is a name also used with
|
||||||
a map. In this case, the "add map" command must be used instead.
|
a map. In this case, the "add map" command must be used instead.
|
||||||
|
|
||||||
|
add backend <name> from <defproxy> [mode <mode>] [guid <guid>] [ EXPERIMENTAL ]
|
||||||
|
Instantiate a new backend proxy with the name <name>.
|
||||||
|
|
||||||
|
Only TCP or HTTP proxies can be created. All of the settings are inherited
|
||||||
|
from <defproxy> default proxy instance. By default, it is mandatory to
|
||||||
|
specify the backend mode via the argument of the same name, unless <defproxy>
|
||||||
|
already defines it explicitely. It is also possible to use an optional GUID
|
||||||
|
argument if wanted.
|
||||||
|
|
||||||
|
Servers can be added via the command "add server". The backend is initialized
|
||||||
|
in the unpublished state. Once considered ready for traffic, use "publish
|
||||||
|
backend" to expose the newly created instance.
|
||||||
|
|
||||||
|
All named default proxies can be used, given that they validate the same
|
||||||
|
inheritance rules applied during configuration parsing. There is some
|
||||||
|
exceptions though, for example when the mode is neither TCP nor HTTP. Another
|
||||||
|
exception is that it is not yet possible to use a default proxies which
|
||||||
|
reference custom HTTP errors, for example via the errorfiles or http-rules
|
||||||
|
keywords.
|
||||||
|
|
||||||
|
This command is restricted and can only be issued on sockets configured for
|
||||||
|
level "admin". Moreover, this feature is still considered in development so it
|
||||||
|
also requires experimental mode (see "experimental-mode on").
|
||||||
|
|
||||||
add map [@<ver>] <map> <key> <value>
|
add map [@<ver>] <map> <key> <value>
|
||||||
add map [@<ver>] <map> <payload>
|
add map [@<ver>] <map> <payload>
|
||||||
Add an entry into the map <map> to associate the value <value> to the key
|
Add an entry into the map <map> to associate the value <value> to the key
|
||||||
|
|
@ -2100,6 +2124,30 @@ del acl <acl> [<key>|#<ref>]
|
||||||
listing the content of the acl. Note that if the reference <acl> is a name and
|
listing the content of the acl. Note that if the reference <acl> is a name and
|
||||||
is shared with a map, the entry will be also deleted in the map.
|
is shared with a map, the entry will be also deleted in the map.
|
||||||
|
|
||||||
|
del backend <name>
|
||||||
|
Removes the backend proxy with the name <name>.
|
||||||
|
|
||||||
|
This operation is only possible for TCP or HTTP proxies. To succeed, the
|
||||||
|
backend instance must have been first unpublished. Also, all of its servers
|
||||||
|
must first be removed (via "del server" CLI). Finally, no stream must still
|
||||||
|
be attached to the backend instance.
|
||||||
|
|
||||||
|
There is additional restrictions which prevent backend removal. First, a
|
||||||
|
backend cannot be removed if it is explicitely referenced by config elements,
|
||||||
|
for example via a use_backend rule or in sample expressions. Some proxies
|
||||||
|
options are also incompatible with runtime deletion. Currently, this is the
|
||||||
|
case when deprecated dispatch or option transparent are used. Also, a backend
|
||||||
|
cannot be removed if there is a stick-table declared in it. Finally, it is
|
||||||
|
impossible for now to remove a backend if QUIC servers were present in it.
|
||||||
|
|
||||||
|
It can be useful to use "wait be-removable" prior to this command to check
|
||||||
|
for the aformentioned requisites. This also provides a methode to wait for
|
||||||
|
the final closure of the streams attached to the target backend.
|
||||||
|
|
||||||
|
This command is restricted and can only be issued on sockets configured for
|
||||||
|
level "admin". Moreover, this feature is still considered in development so it
|
||||||
|
also requires experimental mode (see "experimental-mode on").
|
||||||
|
|
||||||
del map <map> [<key>|#<ref>]
|
del map <map> [<key>|#<ref>]
|
||||||
Delete all the map entries from the map <map> corresponding to the key <key>.
|
Delete all the map entries from the map <map> corresponding to the key <key>.
|
||||||
<map> is the #<id> or the <name> returned by "show map". If the <ref> is used,
|
<map> is the #<id> or the <name> returned by "show map". If the <ref> is used,
|
||||||
|
|
@ -2534,7 +2582,8 @@ set maxconn global <maxconn>
|
||||||
delayed until the threshold is reached. A value of zero restores the initial
|
delayed until the threshold is reached. A value of zero restores the initial
|
||||||
setting.
|
setting.
|
||||||
|
|
||||||
set profiling { tasks | memory } { auto | on | off }
|
set profiling memory { on | off }
|
||||||
|
set profiling tasks { auto | on | off | lock | no-lock | memory | no-memory }
|
||||||
Enables or disables CPU or memory profiling for the indicated subsystem. This
|
Enables or disables CPU or memory profiling for the indicated subsystem. This
|
||||||
is equivalent to setting or clearing the "profiling" settings in the "global"
|
is equivalent to setting or clearing the "profiling" settings in the "global"
|
||||||
section of the configuration file. Please also see "show profiling". Note
|
section of the configuration file. Please also see "show profiling". Note
|
||||||
|
|
@ -2544,6 +2593,13 @@ set profiling { tasks | memory } { auto | on | off }
|
||||||
on the linux-glibc target), and requires USE_MEMORY_PROFILING to be set at
|
on the linux-glibc target), and requires USE_MEMORY_PROFILING to be set at
|
||||||
compile time.
|
compile time.
|
||||||
|
|
||||||
|
. For tasks profiling, it is possible to enable or disable the collection of
|
||||||
|
per-task lock and memory timings at runtime, but the change is only taken
|
||||||
|
into account next time the profiler switches from off/auto to on (either
|
||||||
|
automatically or manually). Thus when using "no-lock" to disable per-task
|
||||||
|
lock profiling and save CPU cycles, it is recommended to flip the task
|
||||||
|
profiling off then on to commit the change.
|
||||||
|
|
||||||
set rate-limit connections global <value>
|
set rate-limit connections global <value>
|
||||||
Change the process-wide connection rate limit, which is set by the global
|
Change the process-wide connection rate limit, which is set by the global
|
||||||
'maxconnrate' setting. A value of zero disables the limitation. This limit
|
'maxconnrate' setting. A value of zero disables the limitation. This limit
|
||||||
|
|
@ -4494,6 +4550,13 @@ wait { -h | <delay> } [<condition> [<args>...]]
|
||||||
specified condition to be satisfied, to unrecoverably fail, or to remain
|
specified condition to be satisfied, to unrecoverably fail, or to remain
|
||||||
unsatisfied for the whole <delay> duration. The supported conditions are:
|
unsatisfied for the whole <delay> duration. The supported conditions are:
|
||||||
|
|
||||||
|
- be-removable <proxy> : this will wait for the specified proxy backend to be
|
||||||
|
removable by the "del backend" command. Some conditions will never be
|
||||||
|
accepted (e.g. backend not yet unpublished or with servers in it) and will
|
||||||
|
cause the report of a specific error message indicating what condition is
|
||||||
|
not met. If everything is OK before the delay, a success is returned and
|
||||||
|
the operation is terminated.
|
||||||
|
|
||||||
- srv-removable <proxy>/<server> : this will wait for the specified server to
|
- srv-removable <proxy>/<server> : this will wait for the specified server to
|
||||||
be removable by the "del server" command, i.e. be in maintenance and no
|
be removable by the "del server" command, i.e. be in maintenance and no
|
||||||
longer have any connection on it (neither active or idle). Some conditions
|
longer have any connection on it (neither active or idle). Some conditions
|
||||||
|
|
|
||||||
|
|
@ -627,7 +627,10 @@ For the type PP2_TYPE_SSL, the value is itself a defined like this :
|
||||||
uint8_t client;
|
uint8_t client;
|
||||||
uint32_t verify;
|
uint32_t verify;
|
||||||
struct pp2_tlv sub_tlv[0];
|
struct pp2_tlv sub_tlv[0];
|
||||||
};
|
} __attribute__((packed));
|
||||||
|
|
||||||
|
Note the "packed" attribute which indicates that each field starts immediately
|
||||||
|
after the previous one (i.e. without type-specific alignment nor padding).
|
||||||
|
|
||||||
The <verify> field will be zero if the client presented a certificate
|
The <verify> field will be zero if the client presented a certificate
|
||||||
and it was successfully verified, and non-zero otherwise.
|
and it was successfully verified, and non-zero otherwise.
|
||||||
|
|
|
||||||
|
|
@ -24,7 +24,7 @@ vtest installation
|
||||||
------------------------
|
------------------------
|
||||||
|
|
||||||
To use vtest you will have to download and compile the recent vtest
|
To use vtest you will have to download and compile the recent vtest
|
||||||
sources found at https://github.com/vtest/VTest.
|
sources found at https://github.com/vtest/VTest2.
|
||||||
|
|
||||||
To compile vtest:
|
To compile vtest:
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -102,7 +102,10 @@ enum act_name {
|
||||||
|
|
||||||
/* Timeout name valid for a set-timeout rule */
|
/* Timeout name valid for a set-timeout rule */
|
||||||
enum act_timeout_name {
|
enum act_timeout_name {
|
||||||
|
ACT_TIMEOUT_CONNECT,
|
||||||
ACT_TIMEOUT_SERVER,
|
ACT_TIMEOUT_SERVER,
|
||||||
|
ACT_TIMEOUT_QUEUE,
|
||||||
|
ACT_TIMEOUT_TARPIT,
|
||||||
ACT_TIMEOUT_TUNNEL,
|
ACT_TIMEOUT_TUNNEL,
|
||||||
ACT_TIMEOUT_CLIENT,
|
ACT_TIMEOUT_CLIENT,
|
||||||
};
|
};
|
||||||
|
|
|
||||||
|
|
@ -33,6 +33,8 @@
|
||||||
#define HA_PROF_TASKS_MASK 0x00000003 /* per-task CPU profiling mask */
|
#define HA_PROF_TASKS_MASK 0x00000003 /* per-task CPU profiling mask */
|
||||||
|
|
||||||
#define HA_PROF_MEMORY 0x00000004 /* memory profiling */
|
#define HA_PROF_MEMORY 0x00000004 /* memory profiling */
|
||||||
|
#define HA_PROF_TASKS_MEM 0x00000008 /* per-task CPU profiling with memory */
|
||||||
|
#define HA_PROF_TASKS_LOCK 0x00000010 /* per-task CPU profiling with locks */
|
||||||
|
|
||||||
|
|
||||||
#ifdef USE_MEMORY_PROFILING
|
#ifdef USE_MEMORY_PROFILING
|
||||||
|
|
|
||||||
|
|
@ -192,6 +192,7 @@ struct lbprm {
|
||||||
void (*server_requeue)(struct server *); /* function used to place the server where it must be */
|
void (*server_requeue)(struct server *); /* function used to place the server where it must be */
|
||||||
void (*proxy_deinit)(struct proxy *); /* to be called when we're destroying the proxy */
|
void (*proxy_deinit)(struct proxy *); /* to be called when we're destroying the proxy */
|
||||||
void (*server_deinit)(struct server *); /* to be called when we're destroying the server */
|
void (*server_deinit)(struct server *); /* to be called when we're destroying the server */
|
||||||
|
int (*server_init)(struct server *); /* initialize a freshly added server (runtime); <0=fail. */
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* _HAPROXY_BACKEND_T_H */
|
#endif /* _HAPROXY_BACKEND_T_H */
|
||||||
|
|
|
||||||
|
|
@ -69,6 +69,7 @@ int backend_parse_balance(const char **args, char **err, struct proxy *curproxy)
|
||||||
int tcp_persist_rdp_cookie(struct stream *s, struct channel *req, int an_bit);
|
int tcp_persist_rdp_cookie(struct stream *s, struct channel *req, int an_bit);
|
||||||
|
|
||||||
int be_downtime(struct proxy *px);
|
int be_downtime(struct proxy *px);
|
||||||
|
int be_supports_dynamic_srv(struct proxy *px, char **msg);
|
||||||
void recount_servers(struct proxy *px);
|
void recount_servers(struct proxy *px);
|
||||||
void update_backend_weight(struct proxy *px);
|
void update_backend_weight(struct proxy *px);
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -24,6 +24,7 @@
|
||||||
|
|
||||||
#include <haproxy/api-t.h>
|
#include <haproxy/api-t.h>
|
||||||
#include <haproxy/buf-t.h>
|
#include <haproxy/buf-t.h>
|
||||||
|
#include <haproxy/filters-t.h>
|
||||||
#include <haproxy/show_flags-t.h>
|
#include <haproxy/show_flags-t.h>
|
||||||
|
|
||||||
/* The CF_* macros designate Channel Flags, which may be ORed in the bit field
|
/* The CF_* macros designate Channel Flags, which may be ORed in the bit field
|
||||||
|
|
@ -205,6 +206,7 @@ struct channel {
|
||||||
unsigned char xfer_large; /* number of consecutive large xfers */
|
unsigned char xfer_large; /* number of consecutive large xfers */
|
||||||
unsigned char xfer_small; /* number of consecutive small xfers */
|
unsigned char xfer_small; /* number of consecutive small xfers */
|
||||||
int analyse_exp; /* expiration date for current analysers (if set) */
|
int analyse_exp; /* expiration date for current analysers (if set) */
|
||||||
|
struct chn_flt flt; /* current state of filters active on this channel */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -376,6 +376,7 @@ static inline void channel_add_input(struct channel *chn, unsigned int len)
|
||||||
c_adv(chn, fwd);
|
c_adv(chn, fwd);
|
||||||
}
|
}
|
||||||
/* notify that some data was read */
|
/* notify that some data was read */
|
||||||
|
chn_prod(chn)->bytes_in += len;
|
||||||
chn->flags |= CF_READ_EVENT;
|
chn->flags |= CF_READ_EVENT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -787,8 +788,12 @@ static inline int channel_recv_max(const struct channel *chn)
|
||||||
*/
|
*/
|
||||||
static inline size_t channel_data_limit(const struct channel *chn)
|
static inline size_t channel_data_limit(const struct channel *chn)
|
||||||
{
|
{
|
||||||
size_t max = (global.tune.bufsize - global.tune.maxrewrite);
|
|
||||||
|
|
||||||
|
size_t max;
|
||||||
|
|
||||||
|
if (!c_size(chn))
|
||||||
|
return 0;
|
||||||
|
max = (c_size(chn) - global.tune.maxrewrite);
|
||||||
if (IS_HTX_STRM(chn_strm(chn)))
|
if (IS_HTX_STRM(chn_strm(chn)))
|
||||||
max -= HTX_BUF_OVERHEAD;
|
max -= HTX_BUF_OVERHEAD;
|
||||||
return max;
|
return max;
|
||||||
|
|
|
||||||
|
|
@ -32,6 +32,7 @@
|
||||||
|
|
||||||
|
|
||||||
extern struct pool_head *pool_head_trash;
|
extern struct pool_head *pool_head_trash;
|
||||||
|
extern struct pool_head *pool_head_large_trash;
|
||||||
|
|
||||||
/* function prototypes */
|
/* function prototypes */
|
||||||
|
|
||||||
|
|
@ -46,6 +47,9 @@ int chunk_asciiencode(struct buffer *dst, struct buffer *src, char qc);
|
||||||
int chunk_strcmp(const struct buffer *chk, const char *str);
|
int chunk_strcmp(const struct buffer *chk, const char *str);
|
||||||
int chunk_strcasecmp(const struct buffer *chk, const char *str);
|
int chunk_strcasecmp(const struct buffer *chk, const char *str);
|
||||||
struct buffer *get_trash_chunk(void);
|
struct buffer *get_trash_chunk(void);
|
||||||
|
struct buffer *get_large_trash_chunk(void);
|
||||||
|
struct buffer *get_trash_chunk_sz(size_t size);
|
||||||
|
struct buffer *get_larger_trash_chunk(struct buffer *chunk);
|
||||||
int init_trash_buffers(int first);
|
int init_trash_buffers(int first);
|
||||||
|
|
||||||
static inline void chunk_reset(struct buffer *chk)
|
static inline void chunk_reset(struct buffer *chk)
|
||||||
|
|
@ -106,12 +110,53 @@ static forceinline struct buffer *alloc_trash_chunk(void)
|
||||||
return chunk;
|
return chunk;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Allocate a large trash chunk from the reentrant pool. The buffer starts at
|
||||||
|
* the end of the chunk. This chunk must be freed using free_trash_chunk(). This
|
||||||
|
* call may fail and the caller is responsible for checking that the returned
|
||||||
|
* pointer is not NULL.
|
||||||
|
*/
|
||||||
|
static forceinline struct buffer *alloc_large_trash_chunk(void)
|
||||||
|
{
|
||||||
|
struct buffer *chunk;
|
||||||
|
|
||||||
|
if (!pool_head_large_trash)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
chunk = pool_alloc(pool_head_large_trash);
|
||||||
|
if (chunk) {
|
||||||
|
char *buf = (char *)chunk + sizeof(struct buffer);
|
||||||
|
*buf = 0;
|
||||||
|
chunk_init(chunk, buf,
|
||||||
|
pool_head_large_trash->size - sizeof(struct buffer));
|
||||||
|
}
|
||||||
|
return chunk;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Allocate a trash chunk accordingly to the requested size. This chunk must be
|
||||||
|
* freed using free_trash_chunk(). This call may fail and the caller is
|
||||||
|
* responsible for checking that the returned pointer is not NULL.
|
||||||
|
*/
|
||||||
|
static forceinline struct buffer *alloc_trash_chunk_sz(size_t size)
|
||||||
|
{
|
||||||
|
if (likely(size <= pool_head_trash->size))
|
||||||
|
return alloc_trash_chunk();
|
||||||
|
else if (pool_head_large_trash && size <= pool_head_large_trash->size)
|
||||||
|
return alloc_large_trash_chunk();
|
||||||
|
else
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* free a trash chunk allocated by alloc_trash_chunk(). NOP on NULL.
|
* free a trash chunk allocated by alloc_trash_chunk(). NOP on NULL.
|
||||||
*/
|
*/
|
||||||
static forceinline void free_trash_chunk(struct buffer *chunk)
|
static forceinline void free_trash_chunk(struct buffer *chunk)
|
||||||
{
|
{
|
||||||
|
if (likely(chunk && chunk->size == pool_head_trash->size - sizeof(struct buffer)))
|
||||||
pool_free(pool_head_trash, chunk);
|
pool_free(pool_head_trash, chunk);
|
||||||
|
else
|
||||||
|
pool_free(pool_head_large_trash, chunk);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* copies chunk <src> into <chk>. Returns 0 in case of failure. */
|
/* copies chunk <src> into <chk>. Returns 0 in case of failure. */
|
||||||
|
|
|
||||||
|
|
@ -100,6 +100,7 @@ enum cli_wait_err {
|
||||||
enum cli_wait_cond {
|
enum cli_wait_cond {
|
||||||
CLI_WAIT_COND_NONE, // no condition to wait on
|
CLI_WAIT_COND_NONE, // no condition to wait on
|
||||||
CLI_WAIT_COND_SRV_UNUSED,// wait for server to become unused
|
CLI_WAIT_COND_SRV_UNUSED,// wait for server to become unused
|
||||||
|
CLI_WAIT_COND_BE_UNUSED, // wait for backend to become unused
|
||||||
};
|
};
|
||||||
|
|
||||||
struct cli_wait_ctx {
|
struct cli_wait_ctx {
|
||||||
|
|
|
||||||
|
|
@ -185,6 +185,29 @@ struct be_counters {
|
||||||
} p; /* protocol-specific stats */
|
} p; /* protocol-specific stats */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* extra counters that are registered at boot by various modules */
|
||||||
|
enum counters_type {
|
||||||
|
COUNTERS_FE = 0,
|
||||||
|
COUNTERS_BE,
|
||||||
|
COUNTERS_SV,
|
||||||
|
COUNTERS_LI,
|
||||||
|
COUNTERS_RSLV,
|
||||||
|
|
||||||
|
COUNTERS_OFF_END /* must always be last */
|
||||||
|
};
|
||||||
|
|
||||||
|
struct extra_counters {
|
||||||
|
char **datap; /* points to pointer to heap containing counters allocated in a linear fashion */
|
||||||
|
size_t size; /* size of allocated data */
|
||||||
|
size_t tgrp_step; /* distance in words between two datap for consecutive tgroups, 0 for single */
|
||||||
|
uint nbtgrp; /* number of thread groups accessing these counters */
|
||||||
|
enum counters_type type; /* type of object containing the counters */
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
#define EXTRA_COUNTERS(name) \
|
||||||
|
struct extra_counters *name
|
||||||
|
|
||||||
#endif /* _HAPROXY_COUNTERS_T_H */
|
#endif /* _HAPROXY_COUNTERS_T_H */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
||||||
|
|
@ -26,6 +26,9 @@
|
||||||
|
|
||||||
#include <haproxy/counters-t.h>
|
#include <haproxy/counters-t.h>
|
||||||
#include <haproxy/guid-t.h>
|
#include <haproxy/guid-t.h>
|
||||||
|
#include <haproxy/global.h>
|
||||||
|
|
||||||
|
extern THREAD_LOCAL void *trash_counters;
|
||||||
|
|
||||||
int counters_fe_shared_prepare(struct fe_counters_shared *counters, const struct guid_node *guid, char **errmsg);
|
int counters_fe_shared_prepare(struct fe_counters_shared *counters, const struct guid_node *guid, char **errmsg);
|
||||||
int counters_be_shared_prepare(struct be_counters_shared *counters, const struct guid_node *guid, char **errmsg);
|
int counters_be_shared_prepare(struct be_counters_shared *counters, const struct guid_node *guid, char **errmsg);
|
||||||
|
|
@ -101,4 +104,106 @@ void counters_be_shared_drop(struct be_counters_shared *counters);
|
||||||
__ret; \
|
__ret; \
|
||||||
})
|
})
|
||||||
|
|
||||||
|
#define COUNTERS_UPDATE_MAX(counter, count) \
|
||||||
|
do { \
|
||||||
|
if (!(global.tune.options & GTUNE_NO_MAX_COUNTER)) \
|
||||||
|
HA_ATOMIC_UPDATE_MAX(counter, count); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
/* Manipulation of extra_counters, for boot-time registrable modules */
|
||||||
|
/* retrieve the base storage of extra counters (first tgroup if any) */
|
||||||
|
#define EXTRA_COUNTERS_BASE(counters, mod) \
|
||||||
|
(likely(counters) ? \
|
||||||
|
((void *)(*(counters)->datap + (mod)->counters_off[(counters)->type])) : \
|
||||||
|
(trash_counters))
|
||||||
|
|
||||||
|
/* retrieve the pointer to the extra counters storage for module <mod> for the
|
||||||
|
* current TGID.
|
||||||
|
*/
|
||||||
|
#define EXTRA_COUNTERS_GET(counters, mod) \
|
||||||
|
(likely(counters) ? \
|
||||||
|
((void *)(counters)->datap[(counters)->tgrp_step * (tgid - 1)] + \
|
||||||
|
(mod)->counters_off[(counters)->type]) : \
|
||||||
|
(trash_counters))
|
||||||
|
|
||||||
|
#define EXTRA_COUNTERS_REGISTER(counters, ctype, alloc_failed_label, storage, step) \
|
||||||
|
do { \
|
||||||
|
typeof(*counters) _ctr; \
|
||||||
|
_ctr = calloc(1, sizeof(*_ctr)); \
|
||||||
|
if (!_ctr) \
|
||||||
|
goto alloc_failed_label; \
|
||||||
|
_ctr->type = (ctype); \
|
||||||
|
_ctr->tgrp_step = (step); \
|
||||||
|
_ctr->datap = (storage); \
|
||||||
|
*(counters) = _ctr; \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define EXTRA_COUNTERS_ADD(mod, counters, new_counters, csize) \
|
||||||
|
do { \
|
||||||
|
typeof(counters) _ctr = (counters); \
|
||||||
|
(mod)->counters_off[_ctr->type] = _ctr->size; \
|
||||||
|
_ctr->size += (csize); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define EXTRA_COUNTERS_ALLOC(counters, alloc_failed_label, nbtg) \
|
||||||
|
do { \
|
||||||
|
typeof(counters) _ctr = (counters); \
|
||||||
|
char **datap = _ctr->datap; \
|
||||||
|
uint tgrp; \
|
||||||
|
_ctr->nbtgrp = _ctr->tgrp_step ? (nbtg) : 1; \
|
||||||
|
for (tgrp = 0; tgrp < _ctr->nbtgrp; tgrp++) { \
|
||||||
|
*datap = malloc((_ctr)->size); \
|
||||||
|
if (!*_ctr->datap) \
|
||||||
|
goto alloc_failed_label; \
|
||||||
|
datap += _ctr->tgrp_step; \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define EXTRA_COUNTERS_INIT(counters, mod, init_counters, init_counters_size) \
|
||||||
|
do { \
|
||||||
|
typeof(counters) _ctr = (counters); \
|
||||||
|
char **datap = _ctr->datap; \
|
||||||
|
uint tgrp; \
|
||||||
|
for (tgrp = 0; tgrp < _ctr->nbtgrp; tgrp++) { \
|
||||||
|
memcpy(*datap + mod->counters_off[_ctr->type], \
|
||||||
|
(init_counters), (init_counters_size)); \
|
||||||
|
datap += _ctr->tgrp_step; \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define EXTRA_COUNTERS_FREE(counters) \
|
||||||
|
do { \
|
||||||
|
typeof(counters) _ctr = (counters); \
|
||||||
|
if (_ctr) { \
|
||||||
|
char **datap = _ctr->datap; \
|
||||||
|
uint tgrp; \
|
||||||
|
for (tgrp = 0; tgrp < _ctr->nbtgrp; tgrp++) { \
|
||||||
|
ha_free(datap); \
|
||||||
|
datap += _ctr->tgrp_step; \
|
||||||
|
} \
|
||||||
|
free(_ctr); \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
/* aggregate all values of <metricp> over the thread groups handled by
|
||||||
|
* <counters>. <metricp> MUST correspond to an entry of the first tgrp of
|
||||||
|
* <counters>. The number of groups and the step are found in <counters>. The
|
||||||
|
* type of the return value is the same as <metricp>, and must be a scalar so
|
||||||
|
* that values are summed before being returned.
|
||||||
|
*/
|
||||||
|
#define EXTRA_COUNTERS_AGGR(counters, metricp) \
|
||||||
|
({ \
|
||||||
|
typeof(counters) _ctr = (counters); \
|
||||||
|
typeof(metricp) *valp, _ret = 0; \
|
||||||
|
if (_ctr) { \
|
||||||
|
size_t ofs = (char *)&metricp - _ctr->datap[0]; \
|
||||||
|
uint tgrp; \
|
||||||
|
for (tgrp = 0; tgrp < _ctr->nbtgrp; tgrp++) { \
|
||||||
|
valp = (typeof(valp))(_ctr->datap[tgrp * (counters)->tgrp_step] + ofs); \
|
||||||
|
_ret += HA_ATOMIC_LOAD(valp); \
|
||||||
|
} \
|
||||||
|
} \
|
||||||
|
_ret; \
|
||||||
|
})
|
||||||
|
|
||||||
#endif /* _HAPROXY_COUNTERS_H */
|
#endif /* _HAPROXY_COUNTERS_H */
|
||||||
|
|
|
||||||
|
|
@ -24,12 +24,12 @@
|
||||||
|
|
||||||
#include <import/ebtree-t.h>
|
#include <import/ebtree-t.h>
|
||||||
|
|
||||||
#include <haproxy/connection-t.h>
|
|
||||||
#include <haproxy/buf-t.h>
|
#include <haproxy/buf-t.h>
|
||||||
|
#include <haproxy/connection-t.h>
|
||||||
|
#include <haproxy/counters-t.h>
|
||||||
#include <haproxy/dgram-t.h>
|
#include <haproxy/dgram-t.h>
|
||||||
#include <haproxy/dns_ring-t.h>
|
#include <haproxy/dns_ring-t.h>
|
||||||
#include <haproxy/obj_type-t.h>
|
#include <haproxy/obj_type-t.h>
|
||||||
#include <haproxy/stats-t.h>
|
|
||||||
#include <haproxy/task-t.h>
|
#include <haproxy/task-t.h>
|
||||||
#include <haproxy/thread.h>
|
#include <haproxy/thread.h>
|
||||||
|
|
||||||
|
|
@ -152,6 +152,7 @@ struct dns_nameserver {
|
||||||
struct dns_stream_server *stream; /* used for tcp dns */
|
struct dns_stream_server *stream; /* used for tcp dns */
|
||||||
|
|
||||||
EXTRA_COUNTERS(extra_counters);
|
EXTRA_COUNTERS(extra_counters);
|
||||||
|
char *extra_counters_storage; /* storage used for extra_counters above */
|
||||||
struct dns_counters *counters;
|
struct dns_counters *counters;
|
||||||
|
|
||||||
struct list list; /* nameserver chained list */
|
struct list list; /* nameserver chained list */
|
||||||
|
|
|
||||||
|
|
@ -36,6 +36,7 @@
|
||||||
#include <haproxy/pool.h>
|
#include <haproxy/pool.h>
|
||||||
|
|
||||||
extern struct pool_head *pool_head_buffer;
|
extern struct pool_head *pool_head_buffer;
|
||||||
|
extern struct pool_head *pool_head_large_buffer;
|
||||||
|
|
||||||
int init_buffer(void);
|
int init_buffer(void);
|
||||||
void buffer_dump(FILE *o, struct buffer *b, int from, int to);
|
void buffer_dump(FILE *o, struct buffer *b, int from, int to);
|
||||||
|
|
@ -53,6 +54,30 @@ static inline int buffer_almost_full(const struct buffer *buf)
|
||||||
return b_almost_full(buf);
|
return b_almost_full(buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Return 1 if <sz> is the default buffer size */
|
||||||
|
static inline int b_is_default_sz(size_t sz)
|
||||||
|
{
|
||||||
|
return (sz == pool_head_buffer->size);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Return 1 if <sz> is the size of a large buffer (alwoys false is large buffers are not configured) */
|
||||||
|
static inline int b_is_large_sz(size_t sz)
|
||||||
|
{
|
||||||
|
return (pool_head_large_buffer && sz == pool_head_large_buffer->size);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Return 1 if <bug> is a default buffer */
|
||||||
|
static inline int b_is_default(struct buffer *buf)
|
||||||
|
{
|
||||||
|
return b_is_default_sz(b_size(buf));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Return 1 if <buf> is a large buffer (alwoys 0 is large buffers are not configured) */
|
||||||
|
static inline int b_is_large(struct buffer *buf)
|
||||||
|
{
|
||||||
|
return b_is_large_sz(b_size(buf));
|
||||||
|
}
|
||||||
|
|
||||||
/**************************************************/
|
/**************************************************/
|
||||||
/* Functions below are used for buffer allocation */
|
/* Functions below are used for buffer allocation */
|
||||||
/**************************************************/
|
/**************************************************/
|
||||||
|
|
@ -136,13 +161,18 @@ static inline char *__b_get_emergency_buf(void)
|
||||||
#define __b_free(_buf) \
|
#define __b_free(_buf) \
|
||||||
do { \
|
do { \
|
||||||
char *area = (_buf)->area; \
|
char *area = (_buf)->area; \
|
||||||
|
size_t sz = (_buf)->size; \
|
||||||
\
|
\
|
||||||
/* let's first clear the area to save an occasional "show sess all" \
|
/* let's first clear the area to save an occasional "show sess all" \
|
||||||
* glancing over our shoulder from getting a dangling pointer. \
|
* glancing over our shoulder from getting a dangling pointer. \
|
||||||
*/ \
|
*/ \
|
||||||
*(_buf) = BUF_NULL; \
|
*(_buf) = BUF_NULL; \
|
||||||
__ha_barrier_store(); \
|
__ha_barrier_store(); \
|
||||||
if (th_ctx->emergency_bufs_left < global.tune.reserved_bufs) \
|
/* if enabled, large buffers are always strictly greater \
|
||||||
|
* than the default buffers */ \
|
||||||
|
if (unlikely(b_is_large_sz(sz))) \
|
||||||
|
pool_free(pool_head_large_buffer, area); \
|
||||||
|
else if (th_ctx->emergency_bufs_left < global.tune.reserved_bufs) \
|
||||||
th_ctx->emergency_bufs[th_ctx->emergency_bufs_left++] = area; \
|
th_ctx->emergency_bufs[th_ctx->emergency_bufs_left++] = area; \
|
||||||
else \
|
else \
|
||||||
pool_free(pool_head_buffer, area); \
|
pool_free(pool_head_buffer, area); \
|
||||||
|
|
|
||||||
|
|
@ -232,22 +232,28 @@ struct filter {
|
||||||
* 0: request channel, 1: response channel */
|
* 0: request channel, 1: response channel */
|
||||||
unsigned int pre_analyzers; /* bit field indicating analyzers to pre-process */
|
unsigned int pre_analyzers; /* bit field indicating analyzers to pre-process */
|
||||||
unsigned int post_analyzers; /* bit field indicating analyzers to post-process */
|
unsigned int post_analyzers; /* bit field indicating analyzers to post-process */
|
||||||
struct list list; /* Next filter for the same proxy/stream */
|
struct list list; /* Filter list for the stream */
|
||||||
|
/* req_list and res_list are exactly equivalent, except the order may differ */
|
||||||
|
struct list req_list; /* Filter list for request channel */
|
||||||
|
struct list res_list; /* Filter list for response channel */
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Structure reprensenting the "global" state of filters attached to a stream.
|
* Structure reprensenting the "global" state of filters attached to a stream.
|
||||||
|
* Doesn't hold much information, as the channel themselves hold chn_flt struct
|
||||||
|
* which contains the per-channel members.
|
||||||
*/
|
*/
|
||||||
struct strm_flt {
|
struct strm_flt {
|
||||||
struct list filters; /* List of filters attached to a stream */
|
struct list filters; /* List of filters attached to a stream */
|
||||||
struct filter *current[2]; /* From which filter resume processing, for a specific channel.
|
|
||||||
* This is used for resumable callbacks only,
|
|
||||||
* If NULL, we start from the first filter.
|
|
||||||
* 0: request channel, 1: response channel */
|
|
||||||
unsigned short flags; /* STRM_FL_* */
|
unsigned short flags; /* STRM_FL_* */
|
||||||
unsigned char nb_req_data_filters; /* Number of data filters registered on the request channel */
|
};
|
||||||
unsigned char nb_rsp_data_filters; /* Number of data filters registered on the response channel */
|
|
||||||
unsigned long long offset[2];
|
/* structure holding filter state for some members that are channel oriented */
|
||||||
|
struct chn_flt {
|
||||||
|
struct list filters; /* List of filters attached to a channel */
|
||||||
|
struct filter *current; /* From which filter resume processing, for a specific channel. */
|
||||||
|
unsigned char nb_data_filters; /* Number of data filters registered on channel */
|
||||||
|
unsigned long long offset;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* _HAPROXY_FILTERS_T_H */
|
#endif /* _HAPROXY_FILTERS_T_H */
|
||||||
|
|
|
||||||
|
|
@ -28,7 +28,9 @@
|
||||||
#include <haproxy/stream-t.h>
|
#include <haproxy/stream-t.h>
|
||||||
|
|
||||||
extern const char *trace_flt_id;
|
extern const char *trace_flt_id;
|
||||||
extern const char *http_comp_flt_id;
|
extern const char *http_comp_req_flt_id;
|
||||||
|
extern const char *http_comp_res_flt_id;
|
||||||
|
|
||||||
extern const char *cache_store_flt_id;
|
extern const char *cache_store_flt_id;
|
||||||
extern const char *spoe_filter_id;
|
extern const char *spoe_filter_id;
|
||||||
extern const char *fcgi_flt_id;
|
extern const char *fcgi_flt_id;
|
||||||
|
|
@ -40,13 +42,13 @@ extern const char *fcgi_flt_id;
|
||||||
/* Useful macros to access per-channel values. It can be safely used inside
|
/* Useful macros to access per-channel values. It can be safely used inside
|
||||||
* filters. */
|
* filters. */
|
||||||
#define CHN_IDX(chn) (((chn)->flags & CF_ISRESP) == CF_ISRESP)
|
#define CHN_IDX(chn) (((chn)->flags & CF_ISRESP) == CF_ISRESP)
|
||||||
#define FLT_STRM_OFF(s, chn) (strm_flt(s)->offset[CHN_IDX(chn)])
|
#define FLT_STRM_OFF(s, chn) (chn->flt.offset)
|
||||||
#define FLT_OFF(flt, chn) ((flt)->offset[CHN_IDX(chn)])
|
#define FLT_OFF(flt, chn) ((flt)->offset[CHN_IDX(chn)])
|
||||||
|
|
||||||
#define HAS_FILTERS(strm) ((strm)->strm_flt.flags & STRM_FLT_FL_HAS_FILTERS)
|
#define HAS_FILTERS(strm) ((strm)->strm_flt.flags & STRM_FLT_FL_HAS_FILTERS)
|
||||||
|
|
||||||
#define HAS_REQ_DATA_FILTERS(strm) ((strm)->strm_flt.nb_req_data_filters != 0)
|
#define HAS_REQ_DATA_FILTERS(strm) ((strm)->req.flt.nb_data_filters != 0)
|
||||||
#define HAS_RSP_DATA_FILTERS(strm) ((strm)->strm_flt.nb_rsp_data_filters != 0)
|
#define HAS_RSP_DATA_FILTERS(strm) ((strm)->res.flt.nb_data_filters != 0)
|
||||||
#define HAS_DATA_FILTERS(strm, chn) (((chn)->flags & CF_ISRESP) ? HAS_RSP_DATA_FILTERS(strm) : HAS_REQ_DATA_FILTERS(strm))
|
#define HAS_DATA_FILTERS(strm, chn) (((chn)->flags & CF_ISRESP) ? HAS_RSP_DATA_FILTERS(strm) : HAS_REQ_DATA_FILTERS(strm))
|
||||||
|
|
||||||
#define IS_REQ_DATA_FILTER(flt) ((flt)->flags & FLT_FL_IS_REQ_DATA_FILTER)
|
#define IS_REQ_DATA_FILTER(flt) ((flt)->flags & FLT_FL_IS_REQ_DATA_FILTER)
|
||||||
|
|
@ -137,14 +139,11 @@ static inline void
|
||||||
register_data_filter(struct stream *s, struct channel *chn, struct filter *filter)
|
register_data_filter(struct stream *s, struct channel *chn, struct filter *filter)
|
||||||
{
|
{
|
||||||
if (!IS_DATA_FILTER(filter, chn)) {
|
if (!IS_DATA_FILTER(filter, chn)) {
|
||||||
if (chn->flags & CF_ISRESP) {
|
if (chn->flags & CF_ISRESP)
|
||||||
filter->flags |= FLT_FL_IS_RSP_DATA_FILTER;
|
filter->flags |= FLT_FL_IS_RSP_DATA_FILTER;
|
||||||
strm_flt(s)->nb_rsp_data_filters++;
|
else
|
||||||
}
|
|
||||||
else {
|
|
||||||
filter->flags |= FLT_FL_IS_REQ_DATA_FILTER;
|
filter->flags |= FLT_FL_IS_REQ_DATA_FILTER;
|
||||||
strm_flt(s)->nb_req_data_filters++;
|
chn->flt.nb_data_filters++;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -153,16 +152,64 @@ static inline void
|
||||||
unregister_data_filter(struct stream *s, struct channel *chn, struct filter *filter)
|
unregister_data_filter(struct stream *s, struct channel *chn, struct filter *filter)
|
||||||
{
|
{
|
||||||
if (IS_DATA_FILTER(filter, chn)) {
|
if (IS_DATA_FILTER(filter, chn)) {
|
||||||
if (chn->flags & CF_ISRESP) {
|
if (chn->flags & CF_ISRESP)
|
||||||
filter->flags &= ~FLT_FL_IS_RSP_DATA_FILTER;
|
filter->flags &= ~FLT_FL_IS_RSP_DATA_FILTER;
|
||||||
strm_flt(s)->nb_rsp_data_filters--;
|
else
|
||||||
|
filter->flags &= ~FLT_FL_IS_REQ_DATA_FILTER;
|
||||||
|
chn->flt.nb_data_filters--;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* flt_list_start() and flt_list_next() can be used to iterate over the list of filters
|
||||||
|
* for a given <strm> and <chn> combination. It will automatically choose the proper
|
||||||
|
* list to iterate from depending on the context.
|
||||||
|
*
|
||||||
|
* flt_list_start() has to be called exactly once to get the first value from the list
|
||||||
|
* to get the following values, use flt_list_next() until NULL is returned.
|
||||||
|
*
|
||||||
|
* Example:
|
||||||
|
*
|
||||||
|
* struct filter *filter;
|
||||||
|
*
|
||||||
|
* for (filter = flt_list_start(stream, channel); filter;
|
||||||
|
* filter = flt_list_next(stream, channel, filter)) {
|
||||||
|
* ...
|
||||||
|
* }
|
||||||
|
*/
|
||||||
|
static inline struct filter *flt_list_start(struct stream *strm, struct channel *chn)
|
||||||
|
{
|
||||||
|
struct filter *filter;
|
||||||
|
|
||||||
|
if (chn->flags & CF_ISRESP) {
|
||||||
|
filter = LIST_NEXT(&chn->flt.filters, struct filter *, res_list);
|
||||||
|
if (&filter->res_list == &chn->flt.filters)
|
||||||
|
filter = NULL; /* empty list */
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
filter->flags &= ~FLT_FL_IS_REQ_DATA_FILTER;
|
filter = LIST_NEXT(&chn->flt.filters, struct filter *, req_list);
|
||||||
strm_flt(s)->nb_req_data_filters--;
|
if (&filter->req_list == &chn->flt.filters)
|
||||||
|
filter = NULL; /* empty list */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return filter;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline struct filter *flt_list_next(struct stream *strm, struct channel *chn,
|
||||||
|
struct filter *filter)
|
||||||
|
{
|
||||||
|
if (chn->flags & CF_ISRESP) {
|
||||||
|
filter = LIST_NEXT(&filter->res_list, struct filter *, res_list);
|
||||||
|
if (&filter->res_list == &chn->flt.filters)
|
||||||
|
filter = NULL; /* end of list */
|
||||||
}
|
}
|
||||||
|
else {
|
||||||
|
filter = LIST_NEXT(&filter->req_list, struct filter *, req_list);
|
||||||
|
if (&filter->req_list == &chn->flt.filters)
|
||||||
|
filter = NULL; /* end of list */
|
||||||
|
}
|
||||||
|
|
||||||
|
return filter;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* This function must be called when a filter alter payload data. It updates
|
/* This function must be called when a filter alter payload data. It updates
|
||||||
|
|
@ -177,7 +224,8 @@ flt_update_offsets(struct filter *filter, struct channel *chn, int len)
|
||||||
struct stream *s = chn_strm(chn);
|
struct stream *s = chn_strm(chn);
|
||||||
struct filter *f;
|
struct filter *f;
|
||||||
|
|
||||||
list_for_each_entry(f, &strm_flt(s)->filters, list) {
|
for (f = flt_list_start(s, chn); f;
|
||||||
|
f = flt_list_next(s, chn, f)) {
|
||||||
if (f == filter)
|
if (f == filter)
|
||||||
break;
|
break;
|
||||||
FLT_OFF(f, chn) += len;
|
FLT_OFF(f, chn) += len;
|
||||||
|
|
|
||||||
|
|
@ -86,6 +86,7 @@
|
||||||
#define GTUNE_LISTENER_MQ_OPT (1<<28)
|
#define GTUNE_LISTENER_MQ_OPT (1<<28)
|
||||||
#define GTUNE_LISTENER_MQ_ANY (GTUNE_LISTENER_MQ_FAIR | GTUNE_LISTENER_MQ_OPT)
|
#define GTUNE_LISTENER_MQ_ANY (GTUNE_LISTENER_MQ_FAIR | GTUNE_LISTENER_MQ_OPT)
|
||||||
#define GTUNE_NO_KTLS (1<<29)
|
#define GTUNE_NO_KTLS (1<<29)
|
||||||
|
#define GTUNE_NO_MAX_COUNTER (1<<30)
|
||||||
|
|
||||||
/* subsystem-specific debugging options for tune.debug */
|
/* subsystem-specific debugging options for tune.debug */
|
||||||
#define GDBG_CPU_AFFINITY (1U<< 0)
|
#define GDBG_CPU_AFFINITY (1U<< 0)
|
||||||
|
|
@ -179,6 +180,7 @@ struct global {
|
||||||
uint recv_enough; /* how many input bytes at once are "enough" */
|
uint recv_enough; /* how many input bytes at once are "enough" */
|
||||||
uint bufsize; /* buffer size in bytes, defaults to BUFSIZE */
|
uint bufsize; /* buffer size in bytes, defaults to BUFSIZE */
|
||||||
uint bufsize_small;/* small buffer size in bytes */
|
uint bufsize_small;/* small buffer size in bytes */
|
||||||
|
uint bufsize_large;/* large buffer size in bytes */
|
||||||
int maxrewrite; /* buffer max rewrite size in bytes, defaults to MAXREWRITE */
|
int maxrewrite; /* buffer max rewrite size in bytes, defaults to MAXREWRITE */
|
||||||
int reserved_bufs; /* how many buffers can only be allocated for response */
|
int reserved_bufs; /* how many buffers can only be allocated for response */
|
||||||
int buf_limit; /* if not null, how many total buffers may only be allocated */
|
int buf_limit; /* if not null, how many total buffers may only be allocated */
|
||||||
|
|
|
||||||
|
|
@ -24,6 +24,7 @@
|
||||||
|
|
||||||
#include <haproxy/api-t.h>
|
#include <haproxy/api-t.h>
|
||||||
#include <haproxy/global-t.h>
|
#include <haproxy/global-t.h>
|
||||||
|
#include <haproxy/cfgparse.h>
|
||||||
|
|
||||||
extern struct global global;
|
extern struct global global;
|
||||||
extern int pid; /* current process id */
|
extern int pid; /* current process id */
|
||||||
|
|
@ -54,6 +55,8 @@ extern char **old_argv;
|
||||||
extern const char *old_unixsocket;
|
extern const char *old_unixsocket;
|
||||||
extern int daemon_fd[2];
|
extern int daemon_fd[2];
|
||||||
extern int devnullfd;
|
extern int devnullfd;
|
||||||
|
extern int fileless_mode;
|
||||||
|
extern struct cfgfile fileless_cfg;
|
||||||
|
|
||||||
struct proxy;
|
struct proxy;
|
||||||
struct server;
|
struct server;
|
||||||
|
|
|
||||||
|
|
@ -99,7 +99,7 @@ enum h1m_state {
|
||||||
#define H1_MF_TE_CHUNKED 0x00010000 // T-E "chunked"
|
#define H1_MF_TE_CHUNKED 0x00010000 // T-E "chunked"
|
||||||
#define H1_MF_TE_OTHER 0x00020000 // T-E other than supported ones found (only "chunked" is supported for now)
|
#define H1_MF_TE_OTHER 0x00020000 // T-E other than supported ones found (only "chunked" is supported for now)
|
||||||
#define H1_MF_UPG_H2C 0x00040000 // "h2c" or "h2" used as upgrade token
|
#define H1_MF_UPG_H2C 0x00040000 // "h2c" or "h2" used as upgrade token
|
||||||
|
#define H1_MF_NOT_HTTP 0x00080000 // Not an HTTP message (e.g "RTSP", only possible if invalid message are accepted)
|
||||||
/* Mask to use to reset H1M flags when we restart headers parsing.
|
/* Mask to use to reset H1M flags when we restart headers parsing.
|
||||||
*
|
*
|
||||||
* WARNING: Don't forget to update it if a new flag must be preserved when
|
* WARNING: Don't forget to update it if a new flag must be preserved when
|
||||||
|
|
@ -263,6 +263,8 @@ static inline int h1_parse_chunk_size(const struct buffer *buf, int start, int s
|
||||||
const char *ptr_old = ptr;
|
const char *ptr_old = ptr;
|
||||||
const char *end = b_wrap(buf);
|
const char *end = b_wrap(buf);
|
||||||
uint64_t chunk = 0;
|
uint64_t chunk = 0;
|
||||||
|
int backslash = 0;
|
||||||
|
int quote = 0;
|
||||||
|
|
||||||
stop -= start; // bytes left
|
stop -= start; // bytes left
|
||||||
start = stop; // bytes to transfer
|
start = stop; // bytes to transfer
|
||||||
|
|
@ -327,13 +329,37 @@ static inline int h1_parse_chunk_size(const struct buffer *buf, int start, int s
|
||||||
if (--stop == 0)
|
if (--stop == 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
while (!HTTP_IS_CRLF(*ptr)) {
|
/* The loop seeks the first CRLF or non-tab CTL char
|
||||||
|
* and stops there. If a backslash/quote is active,
|
||||||
|
* it's an error. If none, we assume it's the CRLF
|
||||||
|
* and go back to the top of the loop checking for
|
||||||
|
* CR then LF. This way CTLs, lone LF etc are handled
|
||||||
|
* in the fallback path. This allows to protect
|
||||||
|
* remotes against their own possibly non-compliant
|
||||||
|
* chunk-ext parser which could mistakenly skip a
|
||||||
|
* quoted CRLF. Chunk-ext are not used anyway, except
|
||||||
|
* by attacks.
|
||||||
|
*/
|
||||||
|
while (!HTTP_IS_CTL(*ptr) || HTTP_IS_SPHT(*ptr)) {
|
||||||
|
if (backslash)
|
||||||
|
backslash = 0; // escaped char
|
||||||
|
else if (*ptr == '\\' && quote)
|
||||||
|
backslash = 1;
|
||||||
|
else if (*ptr == '\\') // backslash not permitted outside quotes
|
||||||
|
goto error;
|
||||||
|
else if (*ptr == '"') // begin/end of quoted-pair
|
||||||
|
quote = !quote;
|
||||||
if (++ptr >= end)
|
if (++ptr >= end)
|
||||||
ptr = b_orig(buf);
|
ptr = b_orig(buf);
|
||||||
if (--stop == 0)
|
if (--stop == 0)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
/* we have a CRLF now, loop above */
|
|
||||||
|
/* mismatched quotes / backslashes end here */
|
||||||
|
if (quote || backslash)
|
||||||
|
goto error;
|
||||||
|
|
||||||
|
/* CTLs (CRLF) fall to the common check */
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
|
|
||||||
|
|
@ -222,6 +222,7 @@ struct hlua_proxy_list {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct hlua_proxy_list_iterator_context {
|
struct hlua_proxy_list_iterator_context {
|
||||||
|
struct watcher px_watch; /* watcher to automatically update next pointer on backend deletion */
|
||||||
struct proxy *next;
|
struct proxy *next;
|
||||||
char capabilities;
|
char capabilities;
|
||||||
};
|
};
|
||||||
|
|
|
||||||
36
include/haproxy/hstream-t.h
Normal file
36
include/haproxy/hstream-t.h
Normal file
|
|
@ -0,0 +1,36 @@
|
||||||
|
#ifndef _HAPROXY_HSTREAM_T_H
|
||||||
|
#define _HAPROXY_HSTREAM_T_H
|
||||||
|
|
||||||
|
#include <haproxy/dynbuf-t.h>
|
||||||
|
#include <haproxy/http-t.h>
|
||||||
|
#include <haproxy/obj_type-t.h>
|
||||||
|
|
||||||
|
/* hastream stream */
|
||||||
|
struct hstream {
|
||||||
|
enum obj_type obj_type;
|
||||||
|
struct session *sess;
|
||||||
|
|
||||||
|
struct stconn *sc;
|
||||||
|
struct task *task;
|
||||||
|
|
||||||
|
struct buffer req;
|
||||||
|
struct buffer res;
|
||||||
|
unsigned long long to_write; /* #of response data bytes to write after headers */
|
||||||
|
struct buffer_wait buf_wait; /* Wait list for buffer allocation */
|
||||||
|
|
||||||
|
int flags;
|
||||||
|
|
||||||
|
int ka; /* .0: keep-alive .1: forced .2: http/1.1, .3: was_reused */
|
||||||
|
int req_cache;
|
||||||
|
unsigned long long req_size; /* values passed in the URI to override the server's */
|
||||||
|
unsigned long long req_body; /* remaining body to be consumed from the request */
|
||||||
|
int req_code;
|
||||||
|
int res_wait; /* time to wait before replying in ms */
|
||||||
|
int res_time;
|
||||||
|
int req_chunked;
|
||||||
|
int req_random;
|
||||||
|
int req_after_res; /* Drain the request body after having sent the response */
|
||||||
|
enum http_meth_t req_meth;
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif /* _HAPROXY_HSTREAM_T_H */
|
||||||
12
include/haproxy/hstream.h
Normal file
12
include/haproxy/hstream.h
Normal file
|
|
@ -0,0 +1,12 @@
|
||||||
|
#ifndef _HAPROXY_HSTREAM_H
|
||||||
|
#define _HAPROXY_HSTREAM_H
|
||||||
|
|
||||||
|
#include <haproxy/cfgparse.h>
|
||||||
|
#include <haproxy/hstream-t.h>
|
||||||
|
|
||||||
|
struct task *sc_hstream_io_cb(struct task *t, void *ctx, unsigned int state);
|
||||||
|
int hstream_wake(struct stconn *sc);
|
||||||
|
void hstream_shutdown(struct stconn *sc);
|
||||||
|
void *hstream_new(struct session *sess, struct stconn *sc, struct buffer *input);
|
||||||
|
|
||||||
|
#endif /* _HAPROXY_HSTREAM_H */
|
||||||
|
|
@ -228,7 +228,8 @@ enum h1_state {
|
||||||
*/
|
*/
|
||||||
struct http_msg {
|
struct http_msg {
|
||||||
enum h1_state msg_state; /* where we are in the current message parsing */
|
enum h1_state msg_state; /* where we are in the current message parsing */
|
||||||
/* 3 bytes unused here */
|
unsigned char vsn; /* HTTP version, 4 bits per digit */
|
||||||
|
/* 2 bytes unused here */
|
||||||
unsigned int flags; /* flags describing the message (HTTP version, ...) */
|
unsigned int flags; /* flags describing the message (HTTP version, ...) */
|
||||||
struct channel *chn; /* pointer to the channel transporting the message */
|
struct channel *chn; /* pointer to the channel transporting the message */
|
||||||
};
|
};
|
||||||
|
|
|
||||||
|
|
@ -49,7 +49,7 @@ int http_req_replace_stline(int action, const char *replace, int len,
|
||||||
int http_res_set_status(unsigned int status, struct ist reason, struct stream *s);
|
int http_res_set_status(unsigned int status, struct ist reason, struct stream *s);
|
||||||
void http_check_request_for_cacheability(struct stream *s, struct channel *req);
|
void http_check_request_for_cacheability(struct stream *s, struct channel *req);
|
||||||
void http_check_response_for_cacheability(struct stream *s, struct channel *res);
|
void http_check_response_for_cacheability(struct stream *s, struct channel *res);
|
||||||
enum rule_result http_wait_for_msg_body(struct stream *s, struct channel *chn, unsigned int time, unsigned int bytes);
|
enum rule_result http_wait_for_msg_body(struct stream *s, struct channel *chn, unsigned int time, unsigned int bytes, unsigned int large_buffer);
|
||||||
void http_perform_server_redirect(struct stream *s, struct stconn *sc);
|
void http_perform_server_redirect(struct stream *s, struct stconn *sc);
|
||||||
void http_server_error(struct stream *s, struct stconn *sc, int err, int finst, struct http_reply *msg);
|
void http_server_error(struct stream *s, struct stconn *sc, int err, int finst, struct http_reply *msg);
|
||||||
void http_reply_and_close(struct stream *s, short status, struct http_reply *msg);
|
void http_reply_and_close(struct stream *s, short status, struct http_reply *msg);
|
||||||
|
|
|
||||||
|
|
@ -141,6 +141,7 @@
|
||||||
#define HTX_SL_F_NORMALIZED_URI 0x00000800 /* The received URI is normalized (an implicit absolute-uri form) */
|
#define HTX_SL_F_NORMALIZED_URI 0x00000800 /* The received URI is normalized (an implicit absolute-uri form) */
|
||||||
#define HTX_SL_F_CONN_UPG 0x00001000 /* The message contains "connection: upgrade" header */
|
#define HTX_SL_F_CONN_UPG 0x00001000 /* The message contains "connection: upgrade" header */
|
||||||
#define HTX_SL_F_BODYLESS_RESP 0x00002000 /* The response to this message is bodyloess (only for reqyest) */
|
#define HTX_SL_F_BODYLESS_RESP 0x00002000 /* The response to this message is bodyloess (only for reqyest) */
|
||||||
|
#define HTX_SL_F_NOT_HTTP 0x00004000 /* Not an HTTP message (e.g "RTSP", only possible if invalid message are accepted) */
|
||||||
|
|
||||||
/* This function is used to report flags in debugging tools. Please reflect
|
/* This function is used to report flags in debugging tools. Please reflect
|
||||||
* below any single-bit flag addition above in the same order via the
|
* below any single-bit flag addition above in the same order via the
|
||||||
|
|
@ -177,7 +178,7 @@ static forceinline char *hsl_show_flags(char *buf, size_t len, const char *delim
|
||||||
#define HTX_FL_PARSING_ERROR 0x00000001 /* Set when a parsing error occurred */
|
#define HTX_FL_PARSING_ERROR 0x00000001 /* Set when a parsing error occurred */
|
||||||
#define HTX_FL_PROCESSING_ERROR 0x00000002 /* Set when a processing error occurred */
|
#define HTX_FL_PROCESSING_ERROR 0x00000002 /* Set when a processing error occurred */
|
||||||
#define HTX_FL_FRAGMENTED 0x00000004 /* Set when the HTX buffer is fragmented */
|
#define HTX_FL_FRAGMENTED 0x00000004 /* Set when the HTX buffer is fragmented */
|
||||||
/* 0x00000008 unused */
|
#define HTX_FL_UNORDERED 0x00000008 /* Set when the HTX buffer are not ordered */
|
||||||
#define HTX_FL_EOM 0x00000010 /* Set when end-of-message is reached from the HTTP point of view
|
#define HTX_FL_EOM 0x00000010 /* Set when end-of-message is reached from the HTTP point of view
|
||||||
* (at worst, on the EOM block is missing)
|
* (at worst, on the EOM block is missing)
|
||||||
*/
|
*/
|
||||||
|
|
@ -192,7 +193,7 @@ static forceinline char *htx_show_flags(char *buf, size_t len, const char *delim
|
||||||
_(0);
|
_(0);
|
||||||
/* flags */
|
/* flags */
|
||||||
_(HTX_FL_PARSING_ERROR, _(HTX_FL_PROCESSING_ERROR,
|
_(HTX_FL_PARSING_ERROR, _(HTX_FL_PROCESSING_ERROR,
|
||||||
_(HTX_FL_FRAGMENTED, _(HTX_FL_EOM))));
|
_(HTX_FL_FRAGMENTED, _(HTX_FL_UNORDERED, _(HTX_FL_EOM)))));
|
||||||
/* epilogue */
|
/* epilogue */
|
||||||
_(~0U);
|
_(~0U);
|
||||||
return buf;
|
return buf;
|
||||||
|
|
|
||||||
|
|
@ -98,6 +98,11 @@ static inline struct ist htx_sl_p3(const struct htx_sl *sl)
|
||||||
return ist2(HTX_SL_P3_PTR(sl), HTX_SL_P3_LEN(sl));
|
return ist2(HTX_SL_P3_PTR(sl), HTX_SL_P3_LEN(sl));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline struct ist htx_sl_vsn(const struct htx_sl *sl)
|
||||||
|
{
|
||||||
|
return ((sl->flags & HTX_SL_F_IS_RESP) ? htx_sl_p1(sl) : htx_sl_p3(sl));
|
||||||
|
}
|
||||||
|
|
||||||
static inline struct ist htx_sl_req_meth(const struct htx_sl *sl)
|
static inline struct ist htx_sl_req_meth(const struct htx_sl *sl)
|
||||||
{
|
{
|
||||||
return htx_sl_p1(sl);
|
return htx_sl_p1(sl);
|
||||||
|
|
@ -474,11 +479,12 @@ static inline struct htx_sl *htx_add_stline(struct htx *htx, enum htx_blk_type t
|
||||||
static inline struct htx_blk *htx_add_header(struct htx *htx, const struct ist name,
|
static inline struct htx_blk *htx_add_header(struct htx *htx, const struct ist name,
|
||||||
const struct ist value)
|
const struct ist value)
|
||||||
{
|
{
|
||||||
struct htx_blk *blk;
|
struct htx_blk *blk, *tailblk;
|
||||||
|
|
||||||
if (name.len > 255 || value.len > 1048575)
|
if (name.len > 255 || value.len > 1048575)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
tailblk = htx_get_tail_blk(htx);
|
||||||
blk = htx_add_blk(htx, HTX_BLK_HDR, name.len + value.len);
|
blk = htx_add_blk(htx, HTX_BLK_HDR, name.len + value.len);
|
||||||
if (!blk)
|
if (!blk)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
@ -486,6 +492,8 @@ static inline struct htx_blk *htx_add_header(struct htx *htx, const struct ist n
|
||||||
blk->info += (value.len << 8) + name.len;
|
blk->info += (value.len << 8) + name.len;
|
||||||
ist2bin_lc(htx_get_blk_ptr(htx, blk), name);
|
ist2bin_lc(htx_get_blk_ptr(htx, blk), name);
|
||||||
memcpy(htx_get_blk_ptr(htx, blk) + name.len, value.ptr, value.len);
|
memcpy(htx_get_blk_ptr(htx, blk) + name.len, value.ptr, value.len);
|
||||||
|
if (tailblk && htx_get_blk_type(tailblk) >= HTX_BLK_EOH)
|
||||||
|
htx->flags |= HTX_FL_UNORDERED;
|
||||||
return blk;
|
return blk;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -495,11 +503,12 @@ static inline struct htx_blk *htx_add_header(struct htx *htx, const struct ist n
|
||||||
static inline struct htx_blk *htx_add_trailer(struct htx *htx, const struct ist name,
|
static inline struct htx_blk *htx_add_trailer(struct htx *htx, const struct ist name,
|
||||||
const struct ist value)
|
const struct ist value)
|
||||||
{
|
{
|
||||||
struct htx_blk *blk;
|
struct htx_blk *blk, *tailblk;
|
||||||
|
|
||||||
if (name.len > 255 || value.len > 1048575)
|
if (name.len > 255 || value.len > 1048575)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
tailblk = htx_get_tail_blk(htx);
|
||||||
blk = htx_add_blk(htx, HTX_BLK_TLR, name.len + value.len);
|
blk = htx_add_blk(htx, HTX_BLK_TLR, name.len + value.len);
|
||||||
if (!blk)
|
if (!blk)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
@ -507,6 +516,8 @@ static inline struct htx_blk *htx_add_trailer(struct htx *htx, const struct ist
|
||||||
blk->info += (value.len << 8) + name.len;
|
blk->info += (value.len << 8) + name.len;
|
||||||
ist2bin_lc(htx_get_blk_ptr(htx, blk), name);
|
ist2bin_lc(htx_get_blk_ptr(htx, blk), name);
|
||||||
memcpy(htx_get_blk_ptr(htx, blk) + name.len, value.ptr, value.len);
|
memcpy(htx_get_blk_ptr(htx, blk) + name.len, value.ptr, value.len);
|
||||||
|
if (tailblk && htx_get_blk_type(tailblk) >= HTX_BLK_EOT)
|
||||||
|
htx->flags |= HTX_FL_UNORDERED;
|
||||||
return blk;
|
return blk;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -147,14 +147,14 @@ __attribute__((constructor)) static void __initcb_##linenum() \
|
||||||
#define _DECLARE_INITCALL(...) \
|
#define _DECLARE_INITCALL(...) \
|
||||||
__DECLARE_INITCALL(__VA_ARGS__)
|
__DECLARE_INITCALL(__VA_ARGS__)
|
||||||
|
|
||||||
/* This requires that function <function> is called with pointer argument
|
/* This requires that function <function> is called without arguments
|
||||||
* <argument> during init stage <stage> which must be one of init_stage.
|
* during init stage <stage> which must be one of init_stage.
|
||||||
*/
|
*/
|
||||||
#define INITCALL0(stage, function) \
|
#define INITCALL0(stage, function) \
|
||||||
_DECLARE_INITCALL(stage, __LINE__, function, 0, 0, 0)
|
_DECLARE_INITCALL(stage, __LINE__, function, 0, 0, 0)
|
||||||
|
|
||||||
/* This requires that function <function> is called with pointer argument
|
/* This requires that function <function> is called with pointer argument
|
||||||
* <argument> during init stage <stage> which must be one of init_stage.
|
* <arg1> during init stage <stage> which must be one of init_stage.
|
||||||
*/
|
*/
|
||||||
#define INITCALL1(stage, function, arg1) \
|
#define INITCALL1(stage, function, arg1) \
|
||||||
_DECLARE_INITCALL(stage, __LINE__, function, arg1, 0, 0)
|
_DECLARE_INITCALL(stage, __LINE__, function, arg1, 0, 0)
|
||||||
|
|
|
||||||
|
|
@ -28,13 +28,13 @@
|
||||||
#include <import/ebtree-t.h>
|
#include <import/ebtree-t.h>
|
||||||
|
|
||||||
#include <haproxy/api-t.h>
|
#include <haproxy/api-t.h>
|
||||||
|
#include <haproxy/counters-t.h>
|
||||||
#include <haproxy/guid-t.h>
|
#include <haproxy/guid-t.h>
|
||||||
#include <haproxy/obj_type-t.h>
|
#include <haproxy/obj_type-t.h>
|
||||||
#include <haproxy/quic_cc-t.h>
|
#include <haproxy/quic_cc-t.h>
|
||||||
#include <haproxy/quic_sock-t.h>
|
#include <haproxy/quic_sock-t.h>
|
||||||
#include <haproxy/quic_tp-t.h>
|
#include <haproxy/quic_tp-t.h>
|
||||||
#include <haproxy/receiver-t.h>
|
#include <haproxy/receiver-t.h>
|
||||||
#include <haproxy/stats-t.h>
|
|
||||||
#include <haproxy/thread.h>
|
#include <haproxy/thread.h>
|
||||||
|
|
||||||
/* Some pointer types reference below */
|
/* Some pointer types reference below */
|
||||||
|
|
@ -263,6 +263,7 @@ struct listener {
|
||||||
|
|
||||||
struct li_per_thread *per_thr; /* per-thread fields (one per thread in the group) */
|
struct li_per_thread *per_thr; /* per-thread fields (one per thread in the group) */
|
||||||
|
|
||||||
|
char *extra_counters_storage; /* storage for extra_counters */
|
||||||
EXTRA_COUNTERS(extra_counters);
|
EXTRA_COUNTERS(extra_counters);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -200,6 +200,8 @@ enum qcc_app_ops_close_side {
|
||||||
|
|
||||||
/* QUIC application layer operations */
|
/* QUIC application layer operations */
|
||||||
struct qcc_app_ops {
|
struct qcc_app_ops {
|
||||||
|
const char *alpn;
|
||||||
|
|
||||||
/* Initialize <qcc> connection app context. */
|
/* Initialize <qcc> connection app context. */
|
||||||
int (*init)(struct qcc *qcc);
|
int (*init)(struct qcc *qcc);
|
||||||
/* Finish connection initialization if prelude required. */
|
/* Finish connection initialization if prelude required. */
|
||||||
|
|
@ -232,6 +234,9 @@ struct qcc_app_ops {
|
||||||
void (*inc_err_cnt)(void *ctx, int err_code);
|
void (*inc_err_cnt)(void *ctx, int err_code);
|
||||||
/* Set QCC error code as suspicious activity has been detected. */
|
/* Set QCC error code as suspicious activity has been detected. */
|
||||||
void (*report_susp)(void *ctx);
|
void (*report_susp)(void *ctx);
|
||||||
|
|
||||||
|
/* Free function to close a stream after MUX layer shutdown. */
|
||||||
|
int (*strm_reject)(struct list *out, uint64_t id);
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* USE_QUIC */
|
#endif /* USE_QUIC */
|
||||||
|
|
|
||||||
|
|
@ -12,6 +12,9 @@
|
||||||
#include <haproxy/mux_quic-t.h>
|
#include <haproxy/mux_quic-t.h>
|
||||||
#include <haproxy/stconn.h>
|
#include <haproxy/stconn.h>
|
||||||
|
|
||||||
|
#include <haproxy/h3.h>
|
||||||
|
#include <haproxy/hq_interop.h>
|
||||||
|
|
||||||
#define qcc_report_glitch(qcc, inc, ...) ({ \
|
#define qcc_report_glitch(qcc, inc, ...) ({ \
|
||||||
COUNT_GLITCH(__VA_ARGS__); \
|
COUNT_GLITCH(__VA_ARGS__); \
|
||||||
_qcc_report_glitch(qcc, inc); \
|
_qcc_report_glitch(qcc, inc); \
|
||||||
|
|
@ -88,7 +91,7 @@ static inline char *qcs_st_to_str(enum qcs_state st)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int qcc_install_app_ops(struct qcc *qcc, const struct qcc_app_ops *app_ops);
|
int qcc_install_app_ops(struct qcc *qcc);
|
||||||
|
|
||||||
/* Register <qcs> stream for http-request timeout. If the stream is not yet
|
/* Register <qcs> stream for http-request timeout. If the stream is not yet
|
||||||
* attached in the configured delay, qcc timeout task will be triggered. This
|
* attached in the configured delay, qcc timeout task will be triggered. This
|
||||||
|
|
@ -115,6 +118,16 @@ void qcc_show_quic(struct qcc *qcc);
|
||||||
|
|
||||||
void qcc_wakeup(struct qcc *qcc);
|
void qcc_wakeup(struct qcc *qcc);
|
||||||
|
|
||||||
|
static inline const struct qcc_app_ops *quic_alpn_to_app_ops(const char *alpn, int alpn_len)
|
||||||
|
{
|
||||||
|
if (alpn_len >= 2 && memcmp(alpn, "h3", 2) == 0)
|
||||||
|
return &h3_ops;
|
||||||
|
else if (alpn_len >= 10 && memcmp(alpn, "hq-interop", 10) == 0)
|
||||||
|
return &hq_interop_ops;
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* USE_QUIC */
|
#endif /* USE_QUIC */
|
||||||
|
|
||||||
#endif /* _HAPROXY_MUX_QUIC_H */
|
#endif /* _HAPROXY_MUX_QUIC_H */
|
||||||
|
|
|
||||||
|
|
@ -46,6 +46,7 @@ enum obj_type {
|
||||||
#ifdef USE_QUIC
|
#ifdef USE_QUIC
|
||||||
OBJ_TYPE_DGRAM, /* object is a struct quic_dgram */
|
OBJ_TYPE_DGRAM, /* object is a struct quic_dgram */
|
||||||
#endif
|
#endif
|
||||||
|
OBJ_TYPE_HATERM, /* object is a struct hstream */
|
||||||
OBJ_TYPE_ENTRIES /* last one : number of entries */
|
OBJ_TYPE_ENTRIES /* last one : number of entries */
|
||||||
} __attribute__((packed)) ;
|
} __attribute__((packed)) ;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -26,6 +26,7 @@
|
||||||
#include <haproxy/applet-t.h>
|
#include <haproxy/applet-t.h>
|
||||||
#include <haproxy/check-t.h>
|
#include <haproxy/check-t.h>
|
||||||
#include <haproxy/connection-t.h>
|
#include <haproxy/connection-t.h>
|
||||||
|
#include <haproxy/hstream-t.h>
|
||||||
#include <haproxy/listener-t.h>
|
#include <haproxy/listener-t.h>
|
||||||
#include <haproxy/obj_type-t.h>
|
#include <haproxy/obj_type-t.h>
|
||||||
#include <haproxy/pool.h>
|
#include <haproxy/pool.h>
|
||||||
|
|
@ -189,6 +190,19 @@ static inline struct check *objt_check(enum obj_type *t)
|
||||||
return __objt_check(t);
|
return __objt_check(t);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline struct hstream *__objt_hstream(enum obj_type *t)
|
||||||
|
{
|
||||||
|
return container_of(t, struct hstream, obj_type);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline struct hstream *objt_hstream(enum obj_type *t)
|
||||||
|
{
|
||||||
|
if (!t || *t != OBJ_TYPE_HATERM)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
return __objt_hstream(t);
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef USE_QUIC
|
#ifdef USE_QUIC
|
||||||
static inline struct quic_dgram *__objt_dgram(enum obj_type *t)
|
static inline struct quic_dgram *__objt_dgram(enum obj_type *t)
|
||||||
{
|
{
|
||||||
|
|
|
||||||
|
|
@ -380,6 +380,14 @@ static inline unsigned long ERR_peek_error_func(const char **func)
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if (HA_OPENSSL_VERSION_NUMBER >= 0x40000000L) && !defined(OPENSSL_IS_AWSLC) && !defined(LIBRESSL_VERSION_NUMBER) && !defined(USE_OPENSSL_WOLFSSL)
|
||||||
|
# define X509_STORE_getX_objects(x) X509_STORE_get1_objects(x)
|
||||||
|
# define sk_X509_OBJECT_popX_free(x, y) sk_X509_OBJECT_pop_free(x,y)
|
||||||
|
#else
|
||||||
|
# define X509_STORE_getX_objects(x) X509_STORE_get0_objects(x)
|
||||||
|
# define sk_X509_OBJECT_popX_free(x, y) ({})
|
||||||
|
#endif
|
||||||
|
|
||||||
#if (HA_OPENSSL_VERSION_NUMBER >= 0x1010000fL) || (defined(LIBRESSL_VERSION_NUMBER) && LIBRESSL_VERSION_NUMBER >= 0x2070200fL)
|
#if (HA_OPENSSL_VERSION_NUMBER >= 0x1010000fL) || (defined(LIBRESSL_VERSION_NUMBER) && LIBRESSL_VERSION_NUMBER >= 0x2070200fL)
|
||||||
#define __OPENSSL_110_CONST__ const
|
#define __OPENSSL_110_CONST__ const
|
||||||
#else
|
#else
|
||||||
|
|
|
||||||
|
|
@ -72,8 +72,8 @@ struct pool_registration {
|
||||||
struct list list; /* link element */
|
struct list list; /* link element */
|
||||||
const char *name; /* name of the pool */
|
const char *name; /* name of the pool */
|
||||||
const char *file; /* where the pool is declared */
|
const char *file; /* where the pool is declared */
|
||||||
|
ullong size; /* expected object size */
|
||||||
unsigned int line; /* line in the file where the pool is declared, 0 if none */
|
unsigned int line; /* line in the file where the pool is declared, 0 if none */
|
||||||
unsigned int size; /* expected object size */
|
|
||||||
unsigned int flags; /* MEM_F_* */
|
unsigned int flags; /* MEM_F_* */
|
||||||
unsigned int type_align; /* type-imposed alignment; 0=unspecified */
|
unsigned int type_align; /* type-imposed alignment; 0=unspecified */
|
||||||
unsigned int align; /* expected alignment; 0=unspecified */
|
unsigned int align; /* expected alignment; 0=unspecified */
|
||||||
|
|
|
||||||
|
|
@ -183,7 +183,7 @@ unsigned long long pool_total_allocated(void);
|
||||||
unsigned long long pool_total_used(void);
|
unsigned long long pool_total_used(void);
|
||||||
void pool_flush(struct pool_head *pool);
|
void pool_flush(struct pool_head *pool);
|
||||||
void pool_gc(struct pool_head *pool_ctx);
|
void pool_gc(struct pool_head *pool_ctx);
|
||||||
struct pool_head *create_pool_with_loc(const char *name, unsigned int size, unsigned int align,
|
struct pool_head *create_pool_with_loc(const char *name, ullong size, unsigned int align,
|
||||||
unsigned int flags, const char *file, unsigned int line);
|
unsigned int flags, const char *file, unsigned int line);
|
||||||
struct pool_head *create_pool_from_reg(const char *name, struct pool_registration *reg);
|
struct pool_head *create_pool_from_reg(const char *name, struct pool_registration *reg);
|
||||||
void create_pool_callback(struct pool_head **ptr, char *name, struct pool_registration *reg);
|
void create_pool_callback(struct pool_head **ptr, char *name, struct pool_registration *reg);
|
||||||
|
|
|
||||||
|
|
@ -38,7 +38,6 @@
|
||||||
#include <haproxy/obj_type-t.h>
|
#include <haproxy/obj_type-t.h>
|
||||||
#include <haproxy/queue-t.h>
|
#include <haproxy/queue-t.h>
|
||||||
#include <haproxy/server-t.h>
|
#include <haproxy/server-t.h>
|
||||||
#include <haproxy/stats-t.h>
|
|
||||||
#include <haproxy/tcpcheck-t.h>
|
#include <haproxy/tcpcheck-t.h>
|
||||||
#include <haproxy/thread-t.h>
|
#include <haproxy/thread-t.h>
|
||||||
#include <haproxy/tools-t.h>
|
#include <haproxy/tools-t.h>
|
||||||
|
|
@ -240,14 +239,16 @@ enum PR_SRV_STATE_FILE {
|
||||||
#define PR_RE_JUNK_REQUEST 0x00020000 /* We received an incomplete or garbage response */
|
#define PR_RE_JUNK_REQUEST 0x00020000 /* We received an incomplete or garbage response */
|
||||||
|
|
||||||
/* Proxy flags */
|
/* Proxy flags */
|
||||||
#define PR_FL_DISABLED 0x01 /* The proxy was disabled in the configuration (not at runtime) */
|
#define PR_FL_DISABLED 0x00000001 /* The proxy was disabled in the configuration (not at runtime) */
|
||||||
#define PR_FL_STOPPED 0x02 /* The proxy was stopped */
|
#define PR_FL_STOPPED 0x00000002 /* The proxy was stopped */
|
||||||
#define PR_FL_READY 0x04 /* The proxy is ready to be used (initialized and configured) */
|
#define PR_FL_DEF_EXPLICIT_MODE 0x00000004 /* Proxy mode is explicitely defined - only used for defaults instance */
|
||||||
#define PR_FL_EXPLICIT_REF 0x08 /* The default proxy is explicitly referenced by another proxy */
|
#define PR_FL_EXPLICIT_REF 0x00000008 /* The default proxy is explicitly referenced by another proxy */
|
||||||
#define PR_FL_IMPLICIT_REF 0x10 /* The default proxy is implicitly referenced by another proxy */
|
#define PR_FL_IMPLICIT_REF 0x00000010 /* The default proxy is implicitly referenced by another proxy */
|
||||||
#define PR_FL_PAUSED 0x20 /* The proxy was paused at run time (reversible) */
|
#define PR_FL_PAUSED 0x00000020 /* The proxy was paused at run time (reversible) */
|
||||||
#define PR_FL_CHECKED 0x40 /* The proxy configuration was fully checked (including postparsing checks) */
|
#define PR_FL_CHECKED 0x00000040 /* The proxy configuration was fully checked (including postparsing checks) */
|
||||||
#define PR_FL_BE_UNPUBLISHED 0x80 /* The proxy cannot be targetted by content switching rules */
|
#define PR_FL_BE_UNPUBLISHED 0x00000080 /* The proxy cannot be targetted by content switching rules */
|
||||||
|
#define PR_FL_DELETED 0x00000100 /* Proxy has been deleted and must be manipulated with care */
|
||||||
|
#define PR_FL_NON_PURGEABLE 0x00000200 /* Proxy referenced by config elements which prevent its runtime removal. */
|
||||||
|
|
||||||
struct stream;
|
struct stream;
|
||||||
|
|
||||||
|
|
@ -293,7 +294,8 @@ struct error_snapshot {
|
||||||
struct server *srv; /* server associated with the error (or NULL) */
|
struct server *srv; /* server associated with the error (or NULL) */
|
||||||
/* @64 */
|
/* @64 */
|
||||||
unsigned int ev_id; /* event number (counter incremented for each capture) */
|
unsigned int ev_id; /* event number (counter incremented for each capture) */
|
||||||
/* @68: 4 bytes hole here */
|
unsigned int buf_size; /* buffer size */
|
||||||
|
|
||||||
struct sockaddr_storage src; /* client's address */
|
struct sockaddr_storage src; /* client's address */
|
||||||
|
|
||||||
/**** protocol-specific part ****/
|
/**** protocol-specific part ****/
|
||||||
|
|
@ -305,13 +307,16 @@ struct error_snapshot {
|
||||||
struct proxy_per_tgroup {
|
struct proxy_per_tgroup {
|
||||||
struct queue queue;
|
struct queue queue;
|
||||||
struct lbprm_per_tgrp lbprm;
|
struct lbprm_per_tgrp lbprm;
|
||||||
|
char *extra_counters_fe_storage; /* storage for extra_counters_fe */
|
||||||
|
char *extra_counters_be_storage; /* storage for extra_counters_be */
|
||||||
} THREAD_ALIGNED();
|
} THREAD_ALIGNED();
|
||||||
|
|
||||||
struct proxy {
|
struct proxy {
|
||||||
enum obj_type obj_type; /* object type == OBJ_TYPE_PROXY */
|
enum obj_type obj_type; /* object type == OBJ_TYPE_PROXY */
|
||||||
char flags; /* bit field PR_FL_* */
|
|
||||||
enum pr_mode mode; /* mode = PR_MODE_TCP, PR_MODE_HTTP, ... */
|
enum pr_mode mode; /* mode = PR_MODE_TCP, PR_MODE_HTTP, ... */
|
||||||
char cap; /* supported capabilities (PR_CAP_*) */
|
char cap; /* supported capabilities (PR_CAP_*) */
|
||||||
|
/* 1 byte hole */
|
||||||
|
unsigned int flags; /* bit field PR_FL_* */
|
||||||
int to_log; /* things to be logged (LW_*), special value LW_LOGSTEPS == follow log-steps */
|
int to_log; /* things to be logged (LW_*), special value LW_LOGSTEPS == follow log-steps */
|
||||||
unsigned long last_change; /* internal use only: last time the proxy state was changed */
|
unsigned long last_change; /* internal use only: last time the proxy state was changed */
|
||||||
|
|
||||||
|
|
@ -412,6 +417,7 @@ struct proxy {
|
||||||
int redispatch_after; /* number of retries before redispatch */
|
int redispatch_after; /* number of retries before redispatch */
|
||||||
unsigned down_time; /* total time the proxy was down */
|
unsigned down_time; /* total time the proxy was down */
|
||||||
int (*accept)(struct stream *s); /* application layer's accept() */
|
int (*accept)(struct stream *s); /* application layer's accept() */
|
||||||
|
void *(*stream_new_from_sc)(struct session *sess, struct stconn *sc, struct buffer *in); /* stream instantiation callback for mux stream connector */
|
||||||
struct conn_src conn_src; /* connection source settings */
|
struct conn_src conn_src; /* connection source settings */
|
||||||
enum obj_type *default_target; /* default target to use for accepted streams or NULL */
|
enum obj_type *default_target; /* default target to use for accepted streams or NULL */
|
||||||
struct proxy *next;
|
struct proxy *next;
|
||||||
|
|
@ -474,7 +480,7 @@ struct proxy {
|
||||||
struct log_steps log_steps; /* bitfield of log origins where log should be generated during request handling */
|
struct log_steps log_steps; /* bitfield of log origins where log should be generated during request handling */
|
||||||
const char *file_prev; /* file of the previous instance found with the same name, or NULL */
|
const char *file_prev; /* file of the previous instance found with the same name, or NULL */
|
||||||
int line_prev; /* line of the previous instance found with the same name, or 0 */
|
int line_prev; /* line of the previous instance found with the same name, or 0 */
|
||||||
unsigned int refcount; /* refcount on this proxy (only used for default proxy for now) */
|
unsigned int def_ref; /* default proxy only refcount */
|
||||||
} conf; /* config information */
|
} conf; /* config information */
|
||||||
struct http_ext *http_ext; /* http ext options */
|
struct http_ext *http_ext; /* http ext options */
|
||||||
struct ceb_root *used_server_addr; /* list of server addresses in use */
|
struct ceb_root *used_server_addr; /* list of server addresses in use */
|
||||||
|
|
@ -503,15 +509,23 @@ struct proxy {
|
||||||
struct list filter_configs; /* list of the filters that are declared on this proxy */
|
struct list filter_configs; /* list of the filters that are declared on this proxy */
|
||||||
|
|
||||||
struct guid_node guid; /* GUID global tree node */
|
struct guid_node guid; /* GUID global tree node */
|
||||||
|
struct mt_list watcher_list; /* list of elems which currently references this proxy instance (currently only used with backends) */
|
||||||
|
uint refcount; /* refcount to keep proxy from being deleted during runtime */
|
||||||
|
|
||||||
EXTRA_COUNTERS(extra_counters_fe);
|
EXTRA_COUNTERS(extra_counters_fe);
|
||||||
EXTRA_COUNTERS(extra_counters_be);
|
EXTRA_COUNTERS(extra_counters_be);
|
||||||
|
|
||||||
THREAD_ALIGN();
|
THREAD_ALIGN();
|
||||||
unsigned int queueslength; /* Sum of the length of each queue */
|
/* these ones change all the time */
|
||||||
int served; /* # of active sessions currently being served */
|
int served; /* # of active sessions currently being served */
|
||||||
int totpend; /* total number of pending connections on this instance (for stats) */
|
|
||||||
unsigned int feconn, beconn; /* # of active frontend and backends streams */
|
unsigned int feconn, beconn; /* # of active frontend and backends streams */
|
||||||
|
|
||||||
|
THREAD_ALIGN();
|
||||||
|
/* these ones are only changed when queues are involved, but checked
|
||||||
|
* all the time.
|
||||||
|
*/
|
||||||
|
unsigned int queueslength; /* Sum of the length of each queue */
|
||||||
|
int totpend; /* total number of pending connections on this instance (for stats) */
|
||||||
};
|
};
|
||||||
|
|
||||||
struct switching_rule {
|
struct switching_rule {
|
||||||
|
|
|
||||||
|
|
@ -26,6 +26,7 @@
|
||||||
|
|
||||||
#include <haproxy/api.h>
|
#include <haproxy/api.h>
|
||||||
#include <haproxy/applet-t.h>
|
#include <haproxy/applet-t.h>
|
||||||
|
#include <haproxy/counters.h>
|
||||||
#include <haproxy/freq_ctr.h>
|
#include <haproxy/freq_ctr.h>
|
||||||
#include <haproxy/list.h>
|
#include <haproxy/list.h>
|
||||||
#include <haproxy/listener-t.h>
|
#include <haproxy/listener-t.h>
|
||||||
|
|
@ -41,6 +42,8 @@ extern unsigned int error_snapshot_id; /* global ID assigned to each error then
|
||||||
extern struct ceb_root *proxy_by_name; /* tree of proxies sorted by name */
|
extern struct ceb_root *proxy_by_name; /* tree of proxies sorted by name */
|
||||||
extern struct list defaults_list; /* all defaults proxies list */
|
extern struct list defaults_list; /* all defaults proxies list */
|
||||||
|
|
||||||
|
extern unsigned int dynpx_next_id;
|
||||||
|
|
||||||
extern const struct cfg_opt cfg_opts[];
|
extern const struct cfg_opt cfg_opts[];
|
||||||
extern const struct cfg_opt cfg_opts2[];
|
extern const struct cfg_opt cfg_opts2[];
|
||||||
extern const struct cfg_opt cfg_opts3[];
|
extern const struct cfg_opt cfg_opts3[];
|
||||||
|
|
@ -56,9 +59,10 @@ void stop_proxy(struct proxy *p);
|
||||||
int stream_set_backend(struct stream *s, struct proxy *be);
|
int stream_set_backend(struct stream *s, struct proxy *be);
|
||||||
|
|
||||||
void deinit_proxy(struct proxy *p);
|
void deinit_proxy(struct proxy *p);
|
||||||
void free_proxy(struct proxy *p);
|
void proxy_drop(struct proxy *p);
|
||||||
const char *proxy_cap_str(int cap);
|
const char *proxy_cap_str(int cap);
|
||||||
const char *proxy_mode_str(int mode);
|
const char *proxy_mode_str(int mode);
|
||||||
|
enum pr_mode str_to_proxy_mode(const char *mode);
|
||||||
const char *proxy_find_best_option(const char *word, const char **extra);
|
const char *proxy_find_best_option(const char *word, const char **extra);
|
||||||
uint proxy_get_next_id(uint from);
|
uint proxy_get_next_id(uint from);
|
||||||
void proxy_store_name(struct proxy *px);
|
void proxy_store_name(struct proxy *px);
|
||||||
|
|
@ -74,12 +78,12 @@ void defaults_px_destroy_all_unref(void);
|
||||||
void defaults_px_detach(struct proxy *px);
|
void defaults_px_detach(struct proxy *px);
|
||||||
void defaults_px_ref_all(void);
|
void defaults_px_ref_all(void);
|
||||||
void defaults_px_unref_all(void);
|
void defaults_px_unref_all(void);
|
||||||
|
int proxy_ref_defaults(struct proxy *px, struct proxy *defpx, char **errmsg);
|
||||||
void proxy_ref_defaults(struct proxy *px, struct proxy *defpx);
|
|
||||||
void proxy_unref_defaults(struct proxy *px);
|
void proxy_unref_defaults(struct proxy *px);
|
||||||
int setup_new_proxy(struct proxy *px, const char *name, unsigned int cap, char **errmsg);
|
int setup_new_proxy(struct proxy *px, const char *name, unsigned int cap, char **errmsg);
|
||||||
struct proxy *alloc_new_proxy(const char *name, unsigned int cap,
|
struct proxy *alloc_new_proxy(const char *name, unsigned int cap,
|
||||||
char **errmsg);
|
char **errmsg);
|
||||||
|
void proxy_take(struct proxy *px);
|
||||||
struct proxy *parse_new_proxy(const char *name, unsigned int cap,
|
struct proxy *parse_new_proxy(const char *name, unsigned int cap,
|
||||||
const char *file, int linenum,
|
const char *file, int linenum,
|
||||||
const struct proxy *defproxy);
|
const struct proxy *defproxy);
|
||||||
|
|
@ -97,6 +101,9 @@ int resolve_stick_rule(struct proxy *curproxy, struct sticking_rule *mrule);
|
||||||
void free_stick_rules(struct list *rules);
|
void free_stick_rules(struct list *rules);
|
||||||
void free_server_rules(struct list *srules);
|
void free_server_rules(struct list *srules);
|
||||||
int proxy_init_per_thr(struct proxy *px);
|
int proxy_init_per_thr(struct proxy *px);
|
||||||
|
int proxy_finalize(struct proxy *px, int *err_code);
|
||||||
|
|
||||||
|
int be_check_for_deletion(const char *bename, struct proxy **pb, const char **pm);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This function returns a string containing the type of the proxy in a format
|
* This function returns a string containing the type of the proxy in a format
|
||||||
|
|
@ -175,7 +182,7 @@ static inline void proxy_inc_fe_conn_ctr(struct listener *l, struct proxy *fe)
|
||||||
}
|
}
|
||||||
if (l && l->counters && l->counters->shared.tg)
|
if (l && l->counters && l->counters->shared.tg)
|
||||||
_HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->cum_conn);
|
_HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->cum_conn);
|
||||||
HA_ATOMIC_UPDATE_MAX(&fe->fe_counters.cps_max,
|
COUNTERS_UPDATE_MAX(&fe->fe_counters.cps_max,
|
||||||
update_freq_ctr(&fe->fe_counters._conn_per_sec, 1));
|
update_freq_ctr(&fe->fe_counters._conn_per_sec, 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -188,7 +195,7 @@ static inline void proxy_inc_fe_sess_ctr(struct listener *l, struct proxy *fe)
|
||||||
}
|
}
|
||||||
if (l && l->counters && l->counters->shared.tg)
|
if (l && l->counters && l->counters->shared.tg)
|
||||||
_HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->cum_sess);
|
_HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->cum_sess);
|
||||||
HA_ATOMIC_UPDATE_MAX(&fe->fe_counters.sps_max,
|
COUNTERS_UPDATE_MAX(&fe->fe_counters.sps_max,
|
||||||
update_freq_ctr(&fe->fe_counters._sess_per_sec, 1));
|
update_freq_ctr(&fe->fe_counters._sess_per_sec, 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -215,7 +222,7 @@ static inline void proxy_inc_be_ctr(struct proxy *be)
|
||||||
_HA_ATOMIC_INC(&be->be_counters.shared.tg[tgid - 1]->cum_sess);
|
_HA_ATOMIC_INC(&be->be_counters.shared.tg[tgid - 1]->cum_sess);
|
||||||
update_freq_ctr(&be->be_counters.shared.tg[tgid - 1]->sess_per_sec, 1);
|
update_freq_ctr(&be->be_counters.shared.tg[tgid - 1]->sess_per_sec, 1);
|
||||||
}
|
}
|
||||||
HA_ATOMIC_UPDATE_MAX(&be->be_counters.sps_max,
|
COUNTERS_UPDATE_MAX(&be->be_counters.sps_max,
|
||||||
update_freq_ctr(&be->be_counters._sess_per_sec, 1));
|
update_freq_ctr(&be->be_counters._sess_per_sec, 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -235,7 +242,7 @@ static inline void proxy_inc_fe_req_ctr(struct listener *l, struct proxy *fe,
|
||||||
}
|
}
|
||||||
if (l && l->counters && l->counters->shared.tg)
|
if (l && l->counters && l->counters->shared.tg)
|
||||||
_HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->p.http.cum_req[http_ver]);
|
_HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->p.http.cum_req[http_ver]);
|
||||||
HA_ATOMIC_UPDATE_MAX(&fe->fe_counters.p.http.rps_max,
|
COUNTERS_UPDATE_MAX(&fe->fe_counters.p.http.rps_max,
|
||||||
update_freq_ctr(&fe->fe_counters.p.http._req_per_sec, 1));
|
update_freq_ctr(&fe->fe_counters.p.http._req_per_sec, 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -13,6 +13,7 @@ int qcs_http_handle_standalone_fin(struct qcs *qcs);
|
||||||
|
|
||||||
size_t qcs_http_snd_buf(struct qcs *qcs, struct buffer *buf, size_t count,
|
size_t qcs_http_snd_buf(struct qcs *qcs, struct buffer *buf, size_t count,
|
||||||
char *fin);
|
char *fin);
|
||||||
|
size_t qcs_http_reset_buf(struct qcs *qcs, struct buffer *buf, size_t count);
|
||||||
|
|
||||||
#endif /* USE_QUIC */
|
#endif /* USE_QUIC */
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -38,7 +38,6 @@
|
||||||
#include <haproxy/quic_cc-t.h>
|
#include <haproxy/quic_cc-t.h>
|
||||||
#include <haproxy/quic_frame-t.h>
|
#include <haproxy/quic_frame-t.h>
|
||||||
#include <haproxy/quic_openssl_compat-t.h>
|
#include <haproxy/quic_openssl_compat-t.h>
|
||||||
#include <haproxy/quic_stats-t.h>
|
|
||||||
#include <haproxy/quic_tls-t.h>
|
#include <haproxy/quic_tls-t.h>
|
||||||
#include <haproxy/quic_tp-t.h>
|
#include <haproxy/quic_tp-t.h>
|
||||||
#include <haproxy/show_flags-t.h>
|
#include <haproxy/show_flags-t.h>
|
||||||
|
|
@ -401,6 +400,8 @@ struct quic_conn {
|
||||||
|
|
||||||
struct eb_root streams_by_id; /* qc_stream_desc tree */
|
struct eb_root streams_by_id; /* qc_stream_desc tree */
|
||||||
|
|
||||||
|
const char *alpn;
|
||||||
|
|
||||||
/* MUX */
|
/* MUX */
|
||||||
struct qcc *qcc;
|
struct qcc *qcc;
|
||||||
struct task *timer_task;
|
struct task *timer_task;
|
||||||
|
|
@ -409,7 +410,9 @@ struct quic_conn {
|
||||||
/* Handshake expiration date */
|
/* Handshake expiration date */
|
||||||
unsigned int hs_expire;
|
unsigned int hs_expire;
|
||||||
|
|
||||||
const struct qcc_app_ops *app_ops;
|
/* Callback to close any stream after MUX closure - set by the MUX itself */
|
||||||
|
int (*strm_reject)(struct list *out, uint64_t stream_id);
|
||||||
|
|
||||||
/* Proxy counters */
|
/* Proxy counters */
|
||||||
struct quic_counters *prx_counters;
|
struct quic_counters *prx_counters;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -30,6 +30,7 @@
|
||||||
#include <import/eb64tree.h>
|
#include <import/eb64tree.h>
|
||||||
#include <import/ebmbtree.h>
|
#include <import/ebmbtree.h>
|
||||||
|
|
||||||
|
#include <haproxy/counters.h>
|
||||||
#include <haproxy/chunk.h>
|
#include <haproxy/chunk.h>
|
||||||
#include <haproxy/dynbuf.h>
|
#include <haproxy/dynbuf.h>
|
||||||
#include <haproxy/ncbmbuf.h>
|
#include <haproxy/ncbmbuf.h>
|
||||||
|
|
@ -83,7 +84,7 @@ void qc_check_close_on_released_mux(struct quic_conn *qc);
|
||||||
int quic_stateless_reset_token_cpy(unsigned char *pos, size_t len,
|
int quic_stateless_reset_token_cpy(unsigned char *pos, size_t len,
|
||||||
const unsigned char *salt, size_t saltlen);
|
const unsigned char *salt, size_t saltlen);
|
||||||
int quic_reuse_srv_params(struct quic_conn *qc,
|
int quic_reuse_srv_params(struct quic_conn *qc,
|
||||||
const unsigned char *alpn,
|
const char *alpn,
|
||||||
const struct quic_early_transport_params *etps);
|
const struct quic_early_transport_params *etps);
|
||||||
|
|
||||||
/* Returns true if <qc> is used on the backed side (as a client). */
|
/* Returns true if <qc> is used on the backed side (as a client). */
|
||||||
|
|
@ -193,13 +194,17 @@ static inline void *qc_counters(enum obj_type *o, const struct stats_module *m)
|
||||||
p = l ? l->bind_conf->frontend :
|
p = l ? l->bind_conf->frontend :
|
||||||
s ? s->proxy : NULL;
|
s ? s->proxy : NULL;
|
||||||
|
|
||||||
return p ? EXTRA_COUNTERS_GET(p->extra_counters_fe, m) : NULL;
|
if (l && p)
|
||||||
|
return EXTRA_COUNTERS_GET(p->extra_counters_fe, m);
|
||||||
|
else if (s && p)
|
||||||
|
return EXTRA_COUNTERS_GET(p->extra_counters_be, m);
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
void chunk_frm_appendf(struct buffer *buf, const struct quic_frame *frm);
|
void chunk_frm_appendf(struct buffer *buf, const struct quic_frame *frm);
|
||||||
void quic_set_connection_close(struct quic_conn *qc, const struct quic_err err);
|
void quic_set_connection_close(struct quic_conn *qc, const struct quic_err err);
|
||||||
void quic_set_tls_alert(struct quic_conn *qc, int alert);
|
void quic_set_tls_alert(struct quic_conn *qc, int alert);
|
||||||
int quic_set_app_ops(struct quic_conn *qc, const unsigned char *alpn, size_t alpn_len);
|
int qc_register_alpn(struct quic_conn *qc, const char *alpn, int alpn_len);
|
||||||
int qc_check_dcid(struct quic_conn *qc, unsigned char *dcid, size_t dcid_len);
|
int qc_check_dcid(struct quic_conn *qc, unsigned char *dcid, size_t dcid_len);
|
||||||
|
|
||||||
void qc_notify_err(struct quic_conn *qc);
|
void qc_notify_err(struct quic_conn *qc);
|
||||||
|
|
|
||||||
|
|
@ -20,8 +20,7 @@
|
||||||
#define QUIC_OPENSSL_COMPAT_CLIENT_APPLICATION "CLIENT_TRAFFIC_SECRET_0"
|
#define QUIC_OPENSSL_COMPAT_CLIENT_APPLICATION "CLIENT_TRAFFIC_SECRET_0"
|
||||||
#define QUIC_OPENSSL_COMPAT_SERVER_APPLICATION "SERVER_TRAFFIC_SECRET_0"
|
#define QUIC_OPENSSL_COMPAT_SERVER_APPLICATION "SERVER_TRAFFIC_SECRET_0"
|
||||||
|
|
||||||
void quic_tls_compat_msg_callback(struct connection *conn,
|
void quic_tls_compat_msg_callback(int write_p, int version, int content_type,
|
||||||
int write_p, int version, int content_type,
|
|
||||||
const void *buf, size_t len, SSL *ssl);
|
const void *buf, size_t len, SSL *ssl);
|
||||||
int quic_tls_compat_init(struct bind_conf *bind_conf, SSL_CTX *ctx);
|
int quic_tls_compat_init(struct bind_conf *bind_conf, SSL_CTX *ctx);
|
||||||
void quic_tls_compat_keylog_callback(const SSL *ssl, const char *line);
|
void quic_tls_compat_keylog_callback(const SSL *ssl, const char *line);
|
||||||
|
|
|
||||||
|
|
@ -6,8 +6,6 @@
|
||||||
#error "Must define USE_OPENSSL"
|
#error "Must define USE_OPENSSL"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
extern struct stats_module quic_stats_module;
|
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
QUIC_ST_RXBUF_FULL,
|
QUIC_ST_RXBUF_FULL,
|
||||||
QUIC_ST_DROPPED_PACKET,
|
QUIC_ST_DROPPED_PACKET,
|
||||||
|
|
@ -52,6 +50,7 @@ enum {
|
||||||
QUIC_ST_STREAM_DATA_BLOCKED,
|
QUIC_ST_STREAM_DATA_BLOCKED,
|
||||||
QUIC_ST_STREAMS_BLOCKED_BIDI,
|
QUIC_ST_STREAMS_BLOCKED_BIDI,
|
||||||
QUIC_ST_STREAMS_BLOCKED_UNI,
|
QUIC_ST_STREAMS_BLOCKED_UNI,
|
||||||
|
QUIC_ST_NCBUF_GAP_LIMIT,
|
||||||
QUIC_STATS_COUNT /* must be the last */
|
QUIC_STATS_COUNT /* must be the last */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
@ -99,6 +98,7 @@ struct quic_counters {
|
||||||
long long stream_data_blocked; /* total number of times STREAM_DATA_BLOCKED frame was received */
|
long long stream_data_blocked; /* total number of times STREAM_DATA_BLOCKED frame was received */
|
||||||
long long streams_blocked_bidi; /* total number of times STREAMS_BLOCKED_BIDI frame was received */
|
long long streams_blocked_bidi; /* total number of times STREAMS_BLOCKED_BIDI frame was received */
|
||||||
long long streams_blocked_uni; /* total number of times STREAMS_BLOCKED_UNI frame was received */
|
long long streams_blocked_uni; /* total number of times STREAMS_BLOCKED_UNI frame was received */
|
||||||
|
long long ncbuf_gap_limit; /* total number of times we failed to add data to ncbuf due to gap size limit */
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* USE_QUIC */
|
#endif /* USE_QUIC */
|
||||||
|
|
|
||||||
|
|
@ -7,7 +7,9 @@
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#include <haproxy/quic_stats-t.h>
|
#include <haproxy/quic_stats-t.h>
|
||||||
|
#include <haproxy/stats-t.h>
|
||||||
|
|
||||||
|
extern struct stats_module quic_stats_module;
|
||||||
void quic_stats_transp_err_count_inc(struct quic_counters *ctrs, int error_code);
|
void quic_stats_transp_err_count_inc(struct quic_counters *ctrs, int error_code);
|
||||||
|
|
||||||
#endif /* USE_QUIC */
|
#endif /* USE_QUIC */
|
||||||
|
|
|
||||||
|
|
@ -27,7 +27,6 @@
|
||||||
#include <haproxy/connection-t.h>
|
#include <haproxy/connection-t.h>
|
||||||
#include <haproxy/dns-t.h>
|
#include <haproxy/dns-t.h>
|
||||||
#include <haproxy/obj_type-t.h>
|
#include <haproxy/obj_type-t.h>
|
||||||
#include <haproxy/stats-t.h>
|
|
||||||
#include <haproxy/task-t.h>
|
#include <haproxy/task-t.h>
|
||||||
#include <haproxy/thread.h>
|
#include <haproxy/thread.h>
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -38,7 +38,7 @@ void sc_update_tx(struct stconn *sc);
|
||||||
|
|
||||||
struct task *sc_conn_io_cb(struct task *t, void *ctx, unsigned int state);
|
struct task *sc_conn_io_cb(struct task *t, void *ctx, unsigned int state);
|
||||||
int sc_conn_sync_recv(struct stconn *sc);
|
int sc_conn_sync_recv(struct stconn *sc);
|
||||||
void sc_conn_sync_send(struct stconn *sc);
|
int sc_conn_sync_send(struct stconn *sc);
|
||||||
|
|
||||||
int sc_applet_sync_recv(struct stconn *sc);
|
int sc_applet_sync_recv(struct stconn *sc);
|
||||||
void sc_applet_sync_send(struct stconn *sc);
|
void sc_applet_sync_send(struct stconn *sc);
|
||||||
|
|
@ -74,6 +74,70 @@ static inline struct buffer *sc_ob(const struct stconn *sc)
|
||||||
{
|
{
|
||||||
return &sc_oc(sc)->buf;
|
return &sc_oc(sc)->buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/* The application layer tells the stream connector that it just got the input
|
||||||
|
* buffer it was waiting for. A read activity is reported. The SC_FL_HAVE_BUFF
|
||||||
|
* flag is set and held until sc_used_buff() is called to indicate it was
|
||||||
|
* used.
|
||||||
|
*/
|
||||||
|
static inline void sc_have_buff(struct stconn *sc)
|
||||||
|
{
|
||||||
|
if (sc->flags & SC_FL_NEED_BUFF) {
|
||||||
|
sc->flags &= ~SC_FL_NEED_BUFF;
|
||||||
|
sc->flags |= SC_FL_HAVE_BUFF;
|
||||||
|
sc_ep_report_read_activity(sc);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* The stream connector failed to get an input buffer and is waiting for it.
|
||||||
|
* It indicates a willingness to deliver data to the buffer that will have to
|
||||||
|
* be retried. As such, callers will often automatically clear SE_FL_HAVE_NO_DATA
|
||||||
|
* to be called again as soon as SC_FL_NEED_BUFF is cleared.
|
||||||
|
*/
|
||||||
|
static inline void sc_need_buff(struct stconn *sc)
|
||||||
|
{
|
||||||
|
sc->flags |= SC_FL_NEED_BUFF;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* The stream connector indicates that it has successfully allocated the buffer
|
||||||
|
* it was previously waiting for so it drops the SC_FL_HAVE_BUFF bit.
|
||||||
|
*/
|
||||||
|
static inline void sc_used_buff(struct stconn *sc)
|
||||||
|
{
|
||||||
|
sc->flags &= ~SC_FL_HAVE_BUFF;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Tell a stream connector some room was made in the input buffer and any
|
||||||
|
* failed attempt to inject data into it may be tried again. This is usually
|
||||||
|
* called after a successful transfer of buffer contents to the other side.
|
||||||
|
* A read activity is reported.
|
||||||
|
*/
|
||||||
|
static inline void sc_have_room(struct stconn *sc)
|
||||||
|
{
|
||||||
|
if (sc->flags & SC_FL_NEED_ROOM) {
|
||||||
|
sc->flags &= ~SC_FL_NEED_ROOM;
|
||||||
|
sc->room_needed = 0;
|
||||||
|
sc_ep_report_read_activity(sc);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* The stream connector announces it failed to put data into the input buffer
|
||||||
|
* by lack of room. Since it indicates a willingness to deliver data to the
|
||||||
|
* buffer that will have to be retried. Usually the caller will also clear
|
||||||
|
* SE_FL_HAVE_NO_DATA to be called again as soon as SC_FL_NEED_ROOM is cleared.
|
||||||
|
*
|
||||||
|
* The caller is responsible to specified the amount of free space required to
|
||||||
|
* progress. It must take care to not exceed the buffer size.
|
||||||
|
*/
|
||||||
|
static inline void sc_need_room(struct stconn *sc, ssize_t room_needed)
|
||||||
|
{
|
||||||
|
sc->flags |= SC_FL_NEED_ROOM;
|
||||||
|
BUG_ON_HOT(room_needed > (ssize_t)c_size(sc_ic(sc)));
|
||||||
|
sc->room_needed = room_needed;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/* returns the stream's task associated to this stream connector */
|
/* returns the stream's task associated to this stream connector */
|
||||||
static inline struct task *sc_strm_task(const struct stconn *sc)
|
static inline struct task *sc_strm_task(const struct stconn *sc)
|
||||||
{
|
{
|
||||||
|
|
@ -344,10 +408,15 @@ static inline int sc_sync_recv(struct stconn *sc)
|
||||||
/* Perform a synchronous send using the right version, depending the endpoing is
|
/* Perform a synchronous send using the right version, depending the endpoing is
|
||||||
* a connection or an applet.
|
* a connection or an applet.
|
||||||
*/
|
*/
|
||||||
static inline void sc_sync_send(struct stconn *sc)
|
static inline int sc_sync_send(struct stconn *sc, unsigned cnt)
|
||||||
{
|
{
|
||||||
if (sc_ep_test(sc, SE_FL_T_MUX))
|
if (!sc_ep_test(sc, SE_FL_T_MUX))
|
||||||
sc_conn_sync_send(sc);
|
return 0;
|
||||||
|
if (cnt >= 2 && co_data(sc_oc(sc))) {
|
||||||
|
task_wakeup(__sc_strm(sc)->task, TASK_WOKEN_MSG);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
return sc_conn_sync_send(sc);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Combines both sc_update_rx() and sc_update_tx() at once */
|
/* Combines both sc_update_rx() and sc_update_tx() at once */
|
||||||
|
|
|
||||||
|
|
@ -286,6 +286,7 @@ struct srv_per_tgroup {
|
||||||
struct queue queue; /* pending connections */
|
struct queue queue; /* pending connections */
|
||||||
struct server *server; /* pointer to the corresponding server */
|
struct server *server; /* pointer to the corresponding server */
|
||||||
struct eb32_node lb_node; /* node used for tree-based load balancing */
|
struct eb32_node lb_node; /* node used for tree-based load balancing */
|
||||||
|
char *extra_counters_storage; /* storage for extra_counters */
|
||||||
struct server *next_full; /* next server in the temporary full list */
|
struct server *next_full; /* next server in the temporary full list */
|
||||||
unsigned int last_other_tgrp_served; /* Last other tgrp we dequeued from */
|
unsigned int last_other_tgrp_served; /* Last other tgrp we dequeued from */
|
||||||
unsigned int self_served; /* Number of connection we dequeued from our own queue */
|
unsigned int self_served; /* Number of connection we dequeued from our own queue */
|
||||||
|
|
@ -383,7 +384,6 @@ struct server {
|
||||||
unsigned next_eweight; /* next pending eweight to commit */
|
unsigned next_eweight; /* next pending eweight to commit */
|
||||||
unsigned cumulative_weight; /* weight of servers prior to this one in the same group, for chash balancing */
|
unsigned cumulative_weight; /* weight of servers prior to this one in the same group, for chash balancing */
|
||||||
int maxqueue; /* maximum number of pending connections allowed */
|
int maxqueue; /* maximum number of pending connections allowed */
|
||||||
unsigned int queueslength; /* Sum of the length of each queue */
|
|
||||||
int shard; /* shard (in peers protocol context only) */
|
int shard; /* shard (in peers protocol context only) */
|
||||||
int log_bufsize; /* implicit ring bufsize (for log server only - in log backend) */
|
int log_bufsize; /* implicit ring bufsize (for log server only - in log backend) */
|
||||||
|
|
||||||
|
|
@ -406,6 +406,7 @@ struct server {
|
||||||
unsigned int max_used_conns; /* Max number of used connections (the counter is reset at each connection purges */
|
unsigned int max_used_conns; /* Max number of used connections (the counter is reset at each connection purges */
|
||||||
unsigned int est_need_conns; /* Estimate on the number of needed connections (max of curr and previous max_used) */
|
unsigned int est_need_conns; /* Estimate on the number of needed connections (max of curr and previous max_used) */
|
||||||
unsigned int curr_sess_idle_conns; /* Current number of idle connections attached to a session instead of idle/safe trees. */
|
unsigned int curr_sess_idle_conns; /* Current number of idle connections attached to a session instead of idle/safe trees. */
|
||||||
|
unsigned int queueslength; /* Sum of the length of each queue */
|
||||||
|
|
||||||
/* elements only used during boot, do not perturb and plug the hole */
|
/* elements only used during boot, do not perturb and plug the hole */
|
||||||
struct guid_node guid; /* GUID global tree node */
|
struct guid_node guid; /* GUID global tree node */
|
||||||
|
|
|
||||||
|
|
@ -29,6 +29,7 @@
|
||||||
#include <haproxy/api.h>
|
#include <haproxy/api.h>
|
||||||
#include <haproxy/applet-t.h>
|
#include <haproxy/applet-t.h>
|
||||||
#include <haproxy/arg-t.h>
|
#include <haproxy/arg-t.h>
|
||||||
|
#include <haproxy/counters.h>
|
||||||
#include <haproxy/freq_ctr.h>
|
#include <haproxy/freq_ctr.h>
|
||||||
#include <haproxy/proxy-t.h>
|
#include <haproxy/proxy-t.h>
|
||||||
#include <haproxy/resolvers-t.h>
|
#include <haproxy/resolvers-t.h>
|
||||||
|
|
@ -55,6 +56,7 @@ int srv_update_addr(struct server *s, void *ip, int ip_sin_family, struct server
|
||||||
struct sample_expr *_parse_srv_expr(char *expr, struct arg_list *args_px,
|
struct sample_expr *_parse_srv_expr(char *expr, struct arg_list *args_px,
|
||||||
const char *file, int linenum, char **err);
|
const char *file, int linenum, char **err);
|
||||||
int server_parse_exprs(struct server *srv, struct proxy *px, char **err);
|
int server_parse_exprs(struct server *srv, struct proxy *px, char **err);
|
||||||
|
int srv_configure_auto_sni(struct server *srv, int *err_code, char **err);
|
||||||
int server_set_inetaddr(struct server *s, const struct server_inetaddr *inetaddr, struct server_inetaddr_updater updater, struct buffer *msg);
|
int server_set_inetaddr(struct server *s, const struct server_inetaddr *inetaddr, struct server_inetaddr_updater updater, struct buffer *msg);
|
||||||
int server_set_inetaddr_warn(struct server *s, const struct server_inetaddr *inetaddr, struct server_inetaddr_updater updater);
|
int server_set_inetaddr_warn(struct server *s, const struct server_inetaddr *inetaddr, struct server_inetaddr_updater updater);
|
||||||
void server_get_inetaddr(struct server *s, struct server_inetaddr *inetaddr);
|
void server_get_inetaddr(struct server *s, struct server_inetaddr *inetaddr);
|
||||||
|
|
@ -211,7 +213,7 @@ static inline void srv_inc_sess_ctr(struct server *s)
|
||||||
_HA_ATOMIC_INC(&s->counters.shared.tg[tgid - 1]->cum_sess);
|
_HA_ATOMIC_INC(&s->counters.shared.tg[tgid - 1]->cum_sess);
|
||||||
update_freq_ctr(&s->counters.shared.tg[tgid - 1]->sess_per_sec, 1);
|
update_freq_ctr(&s->counters.shared.tg[tgid - 1]->sess_per_sec, 1);
|
||||||
}
|
}
|
||||||
HA_ATOMIC_UPDATE_MAX(&s->counters.sps_max,
|
COUNTERS_UPDATE_MAX(&s->counters.sps_max,
|
||||||
update_freq_ctr(&s->counters._sess_per_sec, 1));
|
update_freq_ctr(&s->counters._sess_per_sec, 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -349,28 +351,26 @@ static inline int srv_is_transparent(const struct server *srv)
|
||||||
(srv->flags & SRV_F_MAPPORTS);
|
(srv->flags & SRV_F_MAPPORTS);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Detach server from proxy list. It is supported to call this
|
/* Detach <srv> server from its parent proxy list.
|
||||||
* even if the server is not yet in the list
|
*
|
||||||
* Must be called under thread isolation or when it is safe to assume
|
* Must be called under thread isolation.
|
||||||
* that the parent proxy doesn't is not skimming through the server list
|
|
||||||
*/
|
*/
|
||||||
static inline void srv_detach(struct server *srv)
|
static inline void srv_detach(struct server *srv)
|
||||||
{
|
{
|
||||||
struct proxy *px = srv->proxy;
|
struct proxy *px = srv->proxy;
|
||||||
|
|
||||||
if (px->srv == srv)
|
|
||||||
px->srv = srv->next;
|
|
||||||
else {
|
|
||||||
struct server *prev;
|
struct server *prev;
|
||||||
|
|
||||||
|
if (px->srv == srv) {
|
||||||
|
px->srv = srv->next;
|
||||||
|
}
|
||||||
|
else {
|
||||||
for (prev = px->srv; prev && prev->next != srv; prev = prev->next)
|
for (prev = px->srv; prev && prev->next != srv; prev = prev->next)
|
||||||
;
|
;
|
||||||
|
BUG_ON(!prev); /* Server instance not found in proxy list ? */
|
||||||
BUG_ON(!prev);
|
|
||||||
|
|
||||||
prev->next = srv->next;
|
prev->next = srv->next;
|
||||||
}
|
}
|
||||||
/* reset the proxy's ready_srv if it was this one */
|
|
||||||
|
/* Reset the proxy's ready_srv if it was this one. */
|
||||||
HA_ATOMIC_CAS(&px->ready_srv, &srv, NULL);
|
HA_ATOMIC_CAS(&px->ready_srv, &srv, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -73,6 +73,14 @@ struct ckch_conf {
|
||||||
char *id;
|
char *id;
|
||||||
char **domains;
|
char **domains;
|
||||||
} acme;
|
} acme;
|
||||||
|
struct {
|
||||||
|
struct {
|
||||||
|
char *type; /* "RSA" or "ECSDA" */
|
||||||
|
int bits; /* bits for RSA */
|
||||||
|
char *curves; /* NID of curves for ECDSA*/
|
||||||
|
} key;
|
||||||
|
int on;
|
||||||
|
} gencrt;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
||||||
|
|
@ -80,6 +80,7 @@ void ssl_store_delete_cafile_entry(struct cafile_entry *ca_e);
|
||||||
int ssl_store_load_ca_from_buf(struct cafile_entry *ca_e, char *cert_buf, int append);
|
int ssl_store_load_ca_from_buf(struct cafile_entry *ca_e, char *cert_buf, int append);
|
||||||
int ssl_store_load_locations_file(char *path, int create_if_none, enum cafile_type type);
|
int ssl_store_load_locations_file(char *path, int create_if_none, enum cafile_type type);
|
||||||
int __ssl_store_load_locations_file(char *path, int create_if_none, enum cafile_type type, int shuterror);
|
int __ssl_store_load_locations_file(char *path, int create_if_none, enum cafile_type type, int shuterror);
|
||||||
|
const char *ha_default_cert_dir();
|
||||||
|
|
||||||
extern struct cert_exts cert_exts[];
|
extern struct cert_exts cert_exts[];
|
||||||
extern int (*ssl_commit_crlfile_cb)(const char *path, X509_STORE *ctx, char **err);
|
extern int (*ssl_commit_crlfile_cb)(const char *path, X509_STORE *ctx, char **err);
|
||||||
|
|
|
||||||
|
|
@ -28,6 +28,7 @@
|
||||||
|
|
||||||
/* crt-list entry functions */
|
/* crt-list entry functions */
|
||||||
void ssl_sock_free_ssl_conf(struct ssl_bind_conf *conf);
|
void ssl_sock_free_ssl_conf(struct ssl_bind_conf *conf);
|
||||||
|
struct ssl_bind_conf *crtlist_dup_ssl_conf(struct ssl_bind_conf *src);
|
||||||
char **crtlist_dup_filters(char **args, int fcount);
|
char **crtlist_dup_filters(char **args, int fcount);
|
||||||
void crtlist_free_filters(char **args);
|
void crtlist_free_filters(char **args);
|
||||||
void crtlist_entry_free(struct crtlist_entry *entry);
|
void crtlist_entry_free(struct crtlist_entry *entry);
|
||||||
|
|
|
||||||
|
|
@ -32,6 +32,8 @@ int ssl_sock_set_generated_cert(SSL_CTX *ctx, unsigned int key, struct bind_conf
|
||||||
unsigned int ssl_sock_generated_cert_key(const void *data, size_t len);
|
unsigned int ssl_sock_generated_cert_key(const void *data, size_t len);
|
||||||
int ssl_sock_gencert_load_ca(struct bind_conf *bind_conf);
|
int ssl_sock_gencert_load_ca(struct bind_conf *bind_conf);
|
||||||
void ssl_sock_gencert_free_ca(struct bind_conf *bind_conf);
|
void ssl_sock_gencert_free_ca(struct bind_conf *bind_conf);
|
||||||
|
EVP_PKEY *ssl_gen_EVP_PKEY(int keytype, int curves, int bits, char **errmsg);
|
||||||
|
X509 *ssl_gen_x509(EVP_PKEY *pkey);
|
||||||
|
|
||||||
#endif /* USE_OPENSSL */
|
#endif /* USE_OPENSSL */
|
||||||
#endif /* _HAPROXY_SSL_GENCERT_H */
|
#endif /* _HAPROXY_SSL_GENCERT_H */
|
||||||
|
|
|
||||||
|
|
@ -194,7 +194,7 @@ struct issuer_chain {
|
||||||
|
|
||||||
struct connection;
|
struct connection;
|
||||||
|
|
||||||
typedef void (*ssl_sock_msg_callback_func)(struct connection *conn,
|
typedef void (*ssl_sock_msg_callback_func)(
|
||||||
int write_p, int version, int content_type,
|
int write_p, int version, int content_type,
|
||||||
const void *buf, size_t len, SSL *ssl);
|
const void *buf, size_t len, SSL *ssl);
|
||||||
|
|
||||||
|
|
@ -338,6 +338,8 @@ struct global_ssl {
|
||||||
int renegotiate; /* Renegotiate mode (SSL_RENEGOTIATE_ flag) */
|
int renegotiate; /* Renegotiate mode (SSL_RENEGOTIATE_ flag) */
|
||||||
char **passphrase_cmd;
|
char **passphrase_cmd;
|
||||||
int passphrase_cmd_args_cnt;
|
int passphrase_cmd_args_cnt;
|
||||||
|
|
||||||
|
unsigned int certificate_compression:1; /* allow to explicitely disable certificate compression */
|
||||||
};
|
};
|
||||||
|
|
||||||
/* The order here matters for picking a default context,
|
/* The order here matters for picking a default context,
|
||||||
|
|
@ -361,6 +363,7 @@ struct passphrase_cb_data {
|
||||||
const char *path;
|
const char *path;
|
||||||
struct ckch_data *ckch_data;
|
struct ckch_data *ckch_data;
|
||||||
int passphrase_idx;
|
int passphrase_idx;
|
||||||
|
int callback_called;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* USE_OPENSSL */
|
#endif /* USE_OPENSSL */
|
||||||
|
|
|
||||||
|
|
@ -25,12 +25,13 @@
|
||||||
|
|
||||||
|
|
||||||
#include <haproxy/connection.h>
|
#include <haproxy/connection.h>
|
||||||
|
#include <haproxy/counters.h>
|
||||||
#include <haproxy/openssl-compat.h>
|
#include <haproxy/openssl-compat.h>
|
||||||
#include <haproxy/pool-t.h>
|
#include <haproxy/pool-t.h>
|
||||||
#include <haproxy/proxy-t.h>
|
#include <haproxy/proxy-t.h>
|
||||||
#include <haproxy/quic_conn-t.h>
|
#include <haproxy/quic_conn-t.h>
|
||||||
#include <haproxy/ssl_sock-t.h>
|
#include <haproxy/ssl_sock-t.h>
|
||||||
#include <haproxy/stats.h>
|
#include <haproxy/stats-t.h>
|
||||||
#include <haproxy/thread.h>
|
#include <haproxy/thread.h>
|
||||||
|
|
||||||
extern struct list tlskeys_reference;
|
extern struct list tlskeys_reference;
|
||||||
|
|
@ -73,7 +74,7 @@ int ssl_sock_get_alpn(const struct connection *conn, void *xprt_ctx,
|
||||||
const char **str, int *len);
|
const char **str, int *len);
|
||||||
int ssl_bio_and_sess_init(struct connection *conn, SSL_CTX *ssl_ctx,
|
int ssl_bio_and_sess_init(struct connection *conn, SSL_CTX *ssl_ctx,
|
||||||
SSL **ssl, BIO **bio, BIO_METHOD *bio_meth, void *ctx);
|
SSL **ssl, BIO **bio, BIO_METHOD *bio_meth, void *ctx);
|
||||||
int ssl_sock_srv_try_reuse_sess(struct ssl_sock_ctx *ctx, struct server *srv);
|
void ssl_sock_srv_try_reuse_sess(struct ssl_sock_ctx *ctx, struct server *srv);
|
||||||
const char *ssl_sock_get_sni(struct connection *conn);
|
const char *ssl_sock_get_sni(struct connection *conn);
|
||||||
const char *ssl_sock_get_cert_sig(struct connection *conn);
|
const char *ssl_sock_get_cert_sig(struct connection *conn);
|
||||||
const char *ssl_sock_get_cipher_name(struct connection *conn);
|
const char *ssl_sock_get_cipher_name(struct connection *conn);
|
||||||
|
|
|
||||||
|
|
@ -47,7 +47,7 @@ struct shm_stats_file_hdr {
|
||||||
*/
|
*/
|
||||||
struct {
|
struct {
|
||||||
pid_t pid;
|
pid_t pid;
|
||||||
int heartbeat; // last activity of this process + heartbeat timeout, in ticks
|
uint heartbeat; // last activity of this process + heartbeat timeout, in ticks
|
||||||
} slots[64];
|
} slots[64];
|
||||||
int objects; /* actual number of objects stored in the shm */
|
int objects; /* actual number of objects stored in the shm */
|
||||||
int objects_slots; /* total available objects slots unless map is resized */
|
int objects_slots; /* total available objects slots unless map is resized */
|
||||||
|
|
|
||||||
|
|
@ -25,6 +25,7 @@
|
||||||
#include <import/ebtree-t.h>
|
#include <import/ebtree-t.h>
|
||||||
#include <haproxy/api-t.h>
|
#include <haproxy/api-t.h>
|
||||||
#include <haproxy/buf-t.h>
|
#include <haproxy/buf-t.h>
|
||||||
|
#include <haproxy/counters-t.h>
|
||||||
|
|
||||||
/* Flags for applet.ctx.stats.flags */
|
/* Flags for applet.ctx.stats.flags */
|
||||||
#define STAT_F_FMT_HTML 0x00000001 /* dump the stats in HTML format */
|
#define STAT_F_FMT_HTML 0x00000001 /* dump the stats in HTML format */
|
||||||
|
|
@ -515,23 +516,13 @@ struct field {
|
||||||
} u;
|
} u;
|
||||||
};
|
};
|
||||||
|
|
||||||
enum counters_type {
|
|
||||||
COUNTERS_FE = 0,
|
|
||||||
COUNTERS_BE,
|
|
||||||
COUNTERS_SV,
|
|
||||||
COUNTERS_LI,
|
|
||||||
COUNTERS_RSLV,
|
|
||||||
|
|
||||||
COUNTERS_OFF_END
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Entity used to generate statistics on an HAProxy component */
|
/* Entity used to generate statistics on an HAProxy component */
|
||||||
struct stats_module {
|
struct stats_module {
|
||||||
struct list list;
|
struct list list;
|
||||||
const char *name;
|
const char *name;
|
||||||
|
|
||||||
/* functor used to generate the stats module using counters provided through data parameter */
|
/* function used to generate the stats module using counters provided through data parameter */
|
||||||
int (*fill_stats)(void *data, struct field *, unsigned int *);
|
int (*fill_stats)(struct stats_module *, struct extra_counters *, struct field *, unsigned int *);
|
||||||
|
|
||||||
struct stat_col *stats; /* statistics provided by the module */
|
struct stat_col *stats; /* statistics provided by the module */
|
||||||
void *counters; /* initial values of allocated counters */
|
void *counters; /* initial values of allocated counters */
|
||||||
|
|
@ -543,12 +534,6 @@ struct stats_module {
|
||||||
char clearable; /* reset on a clear counters */
|
char clearable; /* reset on a clear counters */
|
||||||
};
|
};
|
||||||
|
|
||||||
struct extra_counters {
|
|
||||||
char *data; /* heap containing counters allocated in a linear fashion */
|
|
||||||
size_t size; /* size of allocated data */
|
|
||||||
enum counters_type type; /* type of object containing the counters */
|
|
||||||
};
|
|
||||||
|
|
||||||
/* stats_domain is used in a flag as a 1 byte field */
|
/* stats_domain is used in a flag as a 1 byte field */
|
||||||
enum stats_domain {
|
enum stats_domain {
|
||||||
STATS_DOMAIN_PROXY = 0,
|
STATS_DOMAIN_PROXY = 0,
|
||||||
|
|
@ -593,58 +578,9 @@ struct show_stat_ctx {
|
||||||
int iid, type, sid; /* proxy id, type and service id if bounding of stats is enabled */
|
int iid, type, sid; /* proxy id, type and service id if bounding of stats is enabled */
|
||||||
int st_code; /* the status code returned by an action */
|
int st_code; /* the status code returned by an action */
|
||||||
struct buffer chunk; /* temporary buffer which holds a single-line output */
|
struct buffer chunk; /* temporary buffer which holds a single-line output */
|
||||||
|
struct watcher px_watch; /* watcher to automatically update obj1 on backend deletion */
|
||||||
struct watcher srv_watch; /* watcher to automatically update obj2 on server deletion */
|
struct watcher srv_watch; /* watcher to automatically update obj2 on server deletion */
|
||||||
enum stat_state state; /* phase of output production */
|
enum stat_state state; /* phase of output production */
|
||||||
};
|
};
|
||||||
|
|
||||||
extern THREAD_LOCAL void *trash_counters;
|
|
||||||
|
|
||||||
#define EXTRA_COUNTERS(name) \
|
|
||||||
struct extra_counters *name
|
|
||||||
|
|
||||||
#define EXTRA_COUNTERS_GET(counters, mod) \
|
|
||||||
(likely(counters) ? \
|
|
||||||
((void *)((counters)->data + (mod)->counters_off[(counters)->type])) : \
|
|
||||||
(trash_counters))
|
|
||||||
|
|
||||||
#define EXTRA_COUNTERS_REGISTER(counters, ctype, alloc_failed_label) \
|
|
||||||
do { \
|
|
||||||
typeof(*counters) _ctr; \
|
|
||||||
_ctr = calloc(1, sizeof(*_ctr)); \
|
|
||||||
if (!_ctr) \
|
|
||||||
goto alloc_failed_label; \
|
|
||||||
_ctr->type = (ctype); \
|
|
||||||
*(counters) = _ctr; \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define EXTRA_COUNTERS_ADD(mod, counters, new_counters, csize) \
|
|
||||||
do { \
|
|
||||||
typeof(counters) _ctr = (counters); \
|
|
||||||
(mod)->counters_off[_ctr->type] = _ctr->size; \
|
|
||||||
_ctr->size += (csize); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define EXTRA_COUNTERS_ALLOC(counters, alloc_failed_label) \
|
|
||||||
do { \
|
|
||||||
typeof(counters) _ctr = (counters); \
|
|
||||||
_ctr->data = malloc((_ctr)->size); \
|
|
||||||
if (!_ctr->data) \
|
|
||||||
goto alloc_failed_label; \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define EXTRA_COUNTERS_INIT(counters, mod, init_counters, init_counters_size) \
|
|
||||||
do { \
|
|
||||||
typeof(counters) _ctr = (counters); \
|
|
||||||
memcpy(_ctr->data + mod->counters_off[_ctr->type], \
|
|
||||||
(init_counters), (init_counters_size)); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define EXTRA_COUNTERS_FREE(counters) \
|
|
||||||
do { \
|
|
||||||
if (counters) { \
|
|
||||||
free((counters)->data); \
|
|
||||||
free(counters); \
|
|
||||||
} \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#endif /* _HAPROXY_STATS_T_H */
|
#endif /* _HAPROXY_STATS_T_H */
|
||||||
|
|
|
||||||
|
|
@ -24,6 +24,7 @@
|
||||||
#define _HAPROXY_STATS_H
|
#define _HAPROXY_STATS_H
|
||||||
|
|
||||||
#include <haproxy/api.h>
|
#include <haproxy/api.h>
|
||||||
|
#include <haproxy/counters.h>
|
||||||
#include <haproxy/listener-t.h>
|
#include <haproxy/listener-t.h>
|
||||||
#include <haproxy/stats-t.h>
|
#include <haproxy/stats-t.h>
|
||||||
#include <haproxy/tools-t.h>
|
#include <haproxy/tools-t.h>
|
||||||
|
|
@ -167,7 +168,8 @@ static inline enum stats_domain_px_cap stats_px_get_cap(uint32_t domain)
|
||||||
}
|
}
|
||||||
|
|
||||||
int stats_allocate_proxy_counters_internal(struct extra_counters **counters,
|
int stats_allocate_proxy_counters_internal(struct extra_counters **counters,
|
||||||
int type, int px_cap);
|
int type, int px_cap,
|
||||||
|
char **storage, size_t step);
|
||||||
int stats_allocate_proxy_counters(struct proxy *px);
|
int stats_allocate_proxy_counters(struct proxy *px);
|
||||||
|
|
||||||
void stats_register_module(struct stats_module *m);
|
void stats_register_module(struct stats_module *m);
|
||||||
|
|
|
||||||
|
|
@ -224,7 +224,7 @@ static forceinline char *sc_show_flags(char *buf, size_t len, const char *delim,
|
||||||
_(SC_FL_NEED_BUFF, _(SC_FL_NEED_ROOM,
|
_(SC_FL_NEED_BUFF, _(SC_FL_NEED_ROOM,
|
||||||
_(SC_FL_RCV_ONCE, _(SC_FL_SND_ASAP, _(SC_FL_SND_NEVERWAIT, _(SC_FL_SND_EXP_MORE,
|
_(SC_FL_RCV_ONCE, _(SC_FL_SND_ASAP, _(SC_FL_SND_NEVERWAIT, _(SC_FL_SND_EXP_MORE,
|
||||||
_(SC_FL_ABRT_WANTED, _(SC_FL_SHUT_WANTED, _(SC_FL_ABRT_DONE, _(SC_FL_SHUT_DONE,
|
_(SC_FL_ABRT_WANTED, _(SC_FL_SHUT_WANTED, _(SC_FL_ABRT_DONE, _(SC_FL_SHUT_DONE,
|
||||||
_(SC_FL_EOS, _(SC_FL_HAVE_BUFF))))))))))))))))))));
|
_(SC_FL_EOS, _(SC_FL_HAVE_BUFF, _(SC_FL_NO_FASTFWD)))))))))))))))))))));
|
||||||
/* epilogue */
|
/* epilogue */
|
||||||
_(~0U);
|
_(~0U);
|
||||||
return buf;
|
return buf;
|
||||||
|
|
|
||||||
|
|
@ -24,6 +24,7 @@
|
||||||
|
|
||||||
#include <haproxy/api.h>
|
#include <haproxy/api.h>
|
||||||
#include <haproxy/connection.h>
|
#include <haproxy/connection.h>
|
||||||
|
#include <haproxy/hstream-t.h>
|
||||||
#include <haproxy/htx-t.h>
|
#include <haproxy/htx-t.h>
|
||||||
#include <haproxy/obj_type.h>
|
#include <haproxy/obj_type.h>
|
||||||
#include <haproxy/stconn-t.h>
|
#include <haproxy/stconn-t.h>
|
||||||
|
|
@ -45,10 +46,12 @@ void se_shutdown(struct sedesc *sedesc, enum se_shut_mode mode);
|
||||||
struct stconn *sc_new_from_endp(struct sedesc *sedesc, struct session *sess, struct buffer *input);
|
struct stconn *sc_new_from_endp(struct sedesc *sedesc, struct session *sess, struct buffer *input);
|
||||||
struct stconn *sc_new_from_strm(struct stream *strm, unsigned int flags);
|
struct stconn *sc_new_from_strm(struct stream *strm, unsigned int flags);
|
||||||
struct stconn *sc_new_from_check(struct check *check, unsigned int flags);
|
struct stconn *sc_new_from_check(struct check *check, unsigned int flags);
|
||||||
|
struct stconn *sc_new_from_haterm(struct sedesc *sd, struct session *sess, struct buffer *input);
|
||||||
void sc_free(struct stconn *sc);
|
void sc_free(struct stconn *sc);
|
||||||
|
|
||||||
int sc_attach_mux(struct stconn *sc, void *target, void *ctx);
|
int sc_attach_mux(struct stconn *sc, void *target, void *ctx);
|
||||||
int sc_attach_strm(struct stconn *sc, struct stream *strm);
|
int sc_attach_strm(struct stconn *sc, struct stream *strm);
|
||||||
|
int sc_attach_hstream(struct stconn *sc, struct hstream *hs);
|
||||||
|
|
||||||
void sc_destroy(struct stconn *sc);
|
void sc_destroy(struct stconn *sc);
|
||||||
int sc_reset_endp(struct stconn *sc);
|
int sc_reset_endp(struct stconn *sc);
|
||||||
|
|
@ -331,6 +334,21 @@ static inline struct check *sc_check(const struct stconn *sc)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Returns the haterm stream from a sc if the application is a
|
||||||
|
* haterm stream. Otherwise NULL is returned. __sc_hstream() returns the haterm
|
||||||
|
* stream without any control while sc_hstream() check the application type.
|
||||||
|
*/
|
||||||
|
static inline struct hstream *__sc_hstream(const struct stconn *sc)
|
||||||
|
{
|
||||||
|
return __objt_hstream(sc->app);
|
||||||
|
}
|
||||||
|
static inline struct hstream *sc_hstream(const struct stconn *sc)
|
||||||
|
{
|
||||||
|
if (obj_type(sc->app) == OBJ_TYPE_HATERM)
|
||||||
|
return __objt_hstream(sc->app);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
/* Returns the name of the application layer's name for the stconn,
|
/* Returns the name of the application layer's name for the stconn,
|
||||||
* or "NONE" when none is attached.
|
* or "NONE" when none is attached.
|
||||||
*/
|
*/
|
||||||
|
|
@ -397,67 +415,6 @@ static inline void se_need_remote_conn(struct sedesc *se)
|
||||||
se_fl_set(se, SE_FL_APPLET_NEED_CONN);
|
se_fl_set(se, SE_FL_APPLET_NEED_CONN);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* The application layer tells the stream connector that it just got the input
|
|
||||||
* buffer it was waiting for. A read activity is reported. The SC_FL_HAVE_BUFF
|
|
||||||
* flag is set and held until sc_used_buff() is called to indicate it was
|
|
||||||
* used.
|
|
||||||
*/
|
|
||||||
static inline void sc_have_buff(struct stconn *sc)
|
|
||||||
{
|
|
||||||
if (sc->flags & SC_FL_NEED_BUFF) {
|
|
||||||
sc->flags &= ~SC_FL_NEED_BUFF;
|
|
||||||
sc->flags |= SC_FL_HAVE_BUFF;
|
|
||||||
sc_ep_report_read_activity(sc);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* The stream connector failed to get an input buffer and is waiting for it.
|
|
||||||
* It indicates a willingness to deliver data to the buffer that will have to
|
|
||||||
* be retried. As such, callers will often automatically clear SE_FL_HAVE_NO_DATA
|
|
||||||
* to be called again as soon as SC_FL_NEED_BUFF is cleared.
|
|
||||||
*/
|
|
||||||
static inline void sc_need_buff(struct stconn *sc)
|
|
||||||
{
|
|
||||||
sc->flags |= SC_FL_NEED_BUFF;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* The stream connector indicates that it has successfully allocated the buffer
|
|
||||||
* it was previously waiting for so it drops the SC_FL_HAVE_BUFF bit.
|
|
||||||
*/
|
|
||||||
static inline void sc_used_buff(struct stconn *sc)
|
|
||||||
{
|
|
||||||
sc->flags &= ~SC_FL_HAVE_BUFF;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Tell a stream connector some room was made in the input buffer and any
|
|
||||||
* failed attempt to inject data into it may be tried again. This is usually
|
|
||||||
* called after a successful transfer of buffer contents to the other side.
|
|
||||||
* A read activity is reported.
|
|
||||||
*/
|
|
||||||
static inline void sc_have_room(struct stconn *sc)
|
|
||||||
{
|
|
||||||
if (sc->flags & SC_FL_NEED_ROOM) {
|
|
||||||
sc->flags &= ~SC_FL_NEED_ROOM;
|
|
||||||
sc->room_needed = 0;
|
|
||||||
sc_ep_report_read_activity(sc);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* The stream connector announces it failed to put data into the input buffer
|
|
||||||
* by lack of room. Since it indicates a willingness to deliver data to the
|
|
||||||
* buffer that will have to be retried. Usually the caller will also clear
|
|
||||||
* SE_FL_HAVE_NO_DATA to be called again as soon as SC_FL_NEED_ROOM is cleared.
|
|
||||||
*
|
|
||||||
* The caller is responsible to specified the amount of free space required to
|
|
||||||
* progress. It must take care to not exceed the buffer size.
|
|
||||||
*/
|
|
||||||
static inline void sc_need_room(struct stconn *sc, ssize_t room_needed)
|
|
||||||
{
|
|
||||||
sc->flags |= SC_FL_NEED_ROOM;
|
|
||||||
BUG_ON_HOT(room_needed > (ssize_t)global.tune.bufsize);
|
|
||||||
sc->room_needed = room_needed;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* The stream endpoint indicates that it's ready to consume data from the
|
/* The stream endpoint indicates that it's ready to consume data from the
|
||||||
* stream's output buffer. Report a send activity if the SE is unblocked.
|
* stream's output buffer. Report a send activity if the SE is unblocked.
|
||||||
*/
|
*/
|
||||||
|
|
@ -568,7 +525,7 @@ static inline size_t se_done_ff(struct sedesc *se)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
se->sc->bytes_out += ret;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -180,6 +180,7 @@ enum {
|
||||||
STRM_EVT_SHUT_SRV_DOWN = 0x00000004, /* Must be shut because the selected server became available */
|
STRM_EVT_SHUT_SRV_DOWN = 0x00000004, /* Must be shut because the selected server became available */
|
||||||
STRM_EVT_SHUT_SRV_UP = 0x00000008, /* Must be shut because a preferred server became available */
|
STRM_EVT_SHUT_SRV_UP = 0x00000008, /* Must be shut because a preferred server became available */
|
||||||
STRM_EVT_KILLED = 0x00000010, /* Must be shut for external reason */
|
STRM_EVT_KILLED = 0x00000010, /* Must be shut for external reason */
|
||||||
|
STRM_EVT_RES = 0x00000020, /* A requested resource is available (a buffer, a conn_slot...) */
|
||||||
};
|
};
|
||||||
|
|
||||||
/* This function is used to report flags in debugging tools. Please reflect
|
/* This function is used to report flags in debugging tools. Please reflect
|
||||||
|
|
@ -320,7 +321,10 @@ struct stream {
|
||||||
struct list *current_rule_list; /* this is used to store the current executed rule list. */
|
struct list *current_rule_list; /* this is used to store the current executed rule list. */
|
||||||
void *current_rule; /* this is used to store the current rule to be resumed. */
|
void *current_rule; /* this is used to store the current rule to be resumed. */
|
||||||
int rules_exp; /* expiration date for current rules execution */
|
int rules_exp; /* expiration date for current rules execution */
|
||||||
int tunnel_timeout;
|
int tunnel_timeout; /* per-stream tunnel timeout, set by set-timeout action */
|
||||||
|
int connect_timeout; /* per-stream connect timeout, set by set-timeout action */
|
||||||
|
int queue_timeout; /* per-stream queue timeout, set by set-timeout action */
|
||||||
|
int tarpit_timeout; /* per-stream tarpit timeout, set by set-timeout action */
|
||||||
|
|
||||||
struct {
|
struct {
|
||||||
void *ptr; /* Pointer on the entity (def: NULL) */
|
void *ptr; /* Pointer on the entity (def: NULL) */
|
||||||
|
|
|
||||||
|
|
@ -59,7 +59,7 @@ extern struct pool_head *pool_head_uniqueid;
|
||||||
|
|
||||||
extern struct data_cb sess_conn_cb;
|
extern struct data_cb sess_conn_cb;
|
||||||
|
|
||||||
struct stream *stream_new(struct session *sess, struct stconn *sc, struct buffer *input);
|
void *stream_new(struct session *sess, struct stconn *sc, struct buffer *input);
|
||||||
void stream_free(struct stream *s);
|
void stream_free(struct stream *s);
|
||||||
int stream_upgrade_from_sc(struct stconn *sc, struct buffer *input);
|
int stream_upgrade_from_sc(struct stconn *sc, struct buffer *input);
|
||||||
int stream_set_http_mode(struct stream *s, const struct mux_proto_list *mux_proto);
|
int stream_set_http_mode(struct stream *s, const struct mux_proto_list *mux_proto);
|
||||||
|
|
@ -412,6 +412,7 @@ static inline void stream_shutdown(struct stream *s, int why)
|
||||||
static inline unsigned int stream_map_task_state(unsigned int state)
|
static inline unsigned int stream_map_task_state(unsigned int state)
|
||||||
{
|
{
|
||||||
return ((state & TASK_WOKEN_TIMER) ? STRM_EVT_TIMER : 0) |
|
return ((state & TASK_WOKEN_TIMER) ? STRM_EVT_TIMER : 0) |
|
||||||
|
((state & TASK_WOKEN_RES) ? STRM_EVT_RES : 0) |
|
||||||
((state & TASK_WOKEN_MSG) ? STRM_EVT_MSG : 0) |
|
((state & TASK_WOKEN_MSG) ? STRM_EVT_MSG : 0) |
|
||||||
((state & TASK_F_UEVT1) ? STRM_EVT_SHUT_SRV_DOWN : 0) |
|
((state & TASK_F_UEVT1) ? STRM_EVT_SHUT_SRV_DOWN : 0) |
|
||||||
((state & TASK_F_UEVT3) ? STRM_EVT_SHUT_SRV_UP : 0) |
|
((state & TASK_F_UEVT3) ? STRM_EVT_SHUT_SRV_UP : 0) |
|
||||||
|
|
|
||||||
|
|
@ -217,6 +217,7 @@ enum lock_label {
|
||||||
QC_CID_LOCK,
|
QC_CID_LOCK,
|
||||||
CACHE_LOCK,
|
CACHE_LOCK,
|
||||||
GUID_LOCK,
|
GUID_LOCK,
|
||||||
|
PROXIES_DEL_LOCK,
|
||||||
OTHER_LOCK,
|
OTHER_LOCK,
|
||||||
/* WT: make sure never to use these ones outside of development,
|
/* WT: make sure never to use these ones outside of development,
|
||||||
* we need them for lock profiling!
|
* we need them for lock profiling!
|
||||||
|
|
|
||||||
|
|
@ -362,15 +362,19 @@ static inline unsigned long thread_isolated()
|
||||||
extern uint64_t now_mono_time(void); \
|
extern uint64_t now_mono_time(void); \
|
||||||
if (_LK_ != _LK_UN) { \
|
if (_LK_ != _LK_UN) { \
|
||||||
th_ctx->lock_level += bal; \
|
th_ctx->lock_level += bal; \
|
||||||
if (unlikely(th_ctx->flags & TH_FL_TASK_PROFILING)) \
|
if (unlikely((th_ctx->flags & (TH_FL_TASK_PROFILING|TH_FL_TASK_PROFILING_L)) == \
|
||||||
|
(TH_FL_TASK_PROFILING|TH_FL_TASK_PROFILING_L))) \
|
||||||
lock_start = now_mono_time(); \
|
lock_start = now_mono_time(); \
|
||||||
} \
|
} \
|
||||||
(void)(expr); \
|
(void)(expr); \
|
||||||
if (_LK_ == _LK_UN) { \
|
if (_LK_ == _LK_UN) { \
|
||||||
th_ctx->lock_level += bal; \
|
th_ctx->lock_level += bal; \
|
||||||
if (th_ctx->lock_level == 0 && unlikely(th_ctx->flags & TH_FL_TASK_PROFILING)) \
|
if (th_ctx->lock_level == 0 &&\
|
||||||
|
unlikely((th_ctx->flags & (TH_FL_TASK_PROFILING|TH_FL_TASK_PROFILING_L)) == \
|
||||||
|
(TH_FL_TASK_PROFILING|TH_FL_TASK_PROFILING_L))) \
|
||||||
th_ctx->locked_total += now_mono_time() - th_ctx->lock_start_date; \
|
th_ctx->locked_total += now_mono_time() - th_ctx->lock_start_date; \
|
||||||
} else if (unlikely(th_ctx->flags & TH_FL_TASK_PROFILING)) { \
|
} else if (unlikely((th_ctx->flags & (TH_FL_TASK_PROFILING|TH_FL_TASK_PROFILING_L)) == \
|
||||||
|
(TH_FL_TASK_PROFILING|TH_FL_TASK_PROFILING_L))) { \
|
||||||
uint64_t now = now_mono_time(); \
|
uint64_t now = now_mono_time(); \
|
||||||
if (lock_start) \
|
if (lock_start) \
|
||||||
th_ctx->lock_wait_total += now - lock_start; \
|
th_ctx->lock_wait_total += now - lock_start; \
|
||||||
|
|
@ -384,7 +388,8 @@ static inline unsigned long thread_isolated()
|
||||||
typeof(expr) _expr = (expr); \
|
typeof(expr) _expr = (expr); \
|
||||||
if (_expr == 0) { \
|
if (_expr == 0) { \
|
||||||
th_ctx->lock_level += bal; \
|
th_ctx->lock_level += bal; \
|
||||||
if (unlikely(th_ctx->flags & TH_FL_TASK_PROFILING)) { \
|
if (unlikely((th_ctx->flags & (TH_FL_TASK_PROFILING|TH_FL_TASK_PROFILING_L)) == \
|
||||||
|
(TH_FL_TASK_PROFILING|TH_FL_TASK_PROFILING_L))) { \
|
||||||
if (_LK_ == _LK_UN && th_ctx->lock_level == 0) \
|
if (_LK_ == _LK_UN && th_ctx->lock_level == 0) \
|
||||||
th_ctx->locked_total += now_mono_time() - th_ctx->lock_start_date; \
|
th_ctx->locked_total += now_mono_time() - th_ctx->lock_start_date; \
|
||||||
else if (_LK_ != _LK_UN && th_ctx->lock_level == 1) \
|
else if (_LK_ != _LK_UN && th_ctx->lock_level == 1) \
|
||||||
|
|
|
||||||
|
|
@ -69,6 +69,8 @@ enum {
|
||||||
#define TH_FL_IN_DBG_HANDLER 0x00000100 /* thread currently in the debug signal handler */
|
#define TH_FL_IN_DBG_HANDLER 0x00000100 /* thread currently in the debug signal handler */
|
||||||
#define TH_FL_IN_WDT_HANDLER 0x00000200 /* thread currently in the wdt signal handler */
|
#define TH_FL_IN_WDT_HANDLER 0x00000200 /* thread currently in the wdt signal handler */
|
||||||
#define TH_FL_IN_ANY_HANDLER 0x00000380 /* mask to test if the thread is in any signal handler */
|
#define TH_FL_IN_ANY_HANDLER 0x00000380 /* mask to test if the thread is in any signal handler */
|
||||||
|
#define TH_FL_TASK_PROFILING_L 0x00000400 /* task profiling in locks (also requires TASK_PROFILING) */
|
||||||
|
#define TH_FL_TASK_PROFILING_M 0x00000800 /* task profiling in mem alloc (also requires TASK_PROFILING) */
|
||||||
|
|
||||||
/* we have 4 buffer-wait queues, in highest to lowest emergency order */
|
/* we have 4 buffer-wait queues, in highest to lowest emergency order */
|
||||||
#define DYNBUF_NBQ 4
|
#define DYNBUF_NBQ 4
|
||||||
|
|
|
||||||
|
|
@ -45,6 +45,7 @@
|
||||||
#define TRC_ARG_CHK (1 << 3)
|
#define TRC_ARG_CHK (1 << 3)
|
||||||
#define TRC_ARG_QCON (1 << 4)
|
#define TRC_ARG_QCON (1 << 4)
|
||||||
#define TRC_ARG_APPCTX (1 << 5)
|
#define TRC_ARG_APPCTX (1 << 5)
|
||||||
|
#define TRC_ARG_HSTRM (1 << 6)
|
||||||
|
|
||||||
#define TRC_ARG1_PRIV (TRC_ARG_PRIV << 0)
|
#define TRC_ARG1_PRIV (TRC_ARG_PRIV << 0)
|
||||||
#define TRC_ARG1_CONN (TRC_ARG_CONN << 0)
|
#define TRC_ARG1_CONN (TRC_ARG_CONN << 0)
|
||||||
|
|
@ -53,6 +54,7 @@
|
||||||
#define TRC_ARG1_CHK (TRC_ARG_CHK << 0)
|
#define TRC_ARG1_CHK (TRC_ARG_CHK << 0)
|
||||||
#define TRC_ARG1_QCON (TRC_ARG_QCON << 0)
|
#define TRC_ARG1_QCON (TRC_ARG_QCON << 0)
|
||||||
#define TRC_ARG1_APPCTX (TRC_ARG_APPCTX << 0)
|
#define TRC_ARG1_APPCTX (TRC_ARG_APPCTX << 0)
|
||||||
|
#define TRC_ARG1_HSTRM (TRC_ARG_HSTRM << 0)
|
||||||
|
|
||||||
#define TRC_ARG2_PRIV (TRC_ARG_PRIV << 8)
|
#define TRC_ARG2_PRIV (TRC_ARG_PRIV << 8)
|
||||||
#define TRC_ARG2_CONN (TRC_ARG_CONN << 8)
|
#define TRC_ARG2_CONN (TRC_ARG_CONN << 8)
|
||||||
|
|
@ -61,6 +63,7 @@
|
||||||
#define TRC_ARG2_CHK (TRC_ARG_CHK << 8)
|
#define TRC_ARG2_CHK (TRC_ARG_CHK << 8)
|
||||||
#define TRC_ARG2_QCON (TRC_ARG_QCON << 8)
|
#define TRC_ARG2_QCON (TRC_ARG_QCON << 8)
|
||||||
#define TRC_ARG2_APPCTX (TRC_ARG_APPCTX << 8)
|
#define TRC_ARG2_APPCTX (TRC_ARG_APPCTX << 8)
|
||||||
|
#define TRC_ARG2_HSTRM (TRC_ARG_HSTRM << 8)
|
||||||
|
|
||||||
#define TRC_ARG3_PRIV (TRC_ARG_PRIV << 16)
|
#define TRC_ARG3_PRIV (TRC_ARG_PRIV << 16)
|
||||||
#define TRC_ARG3_CONN (TRC_ARG_CONN << 16)
|
#define TRC_ARG3_CONN (TRC_ARG_CONN << 16)
|
||||||
|
|
@ -69,6 +72,7 @@
|
||||||
#define TRC_ARG3_CHK (TRC_ARG_CHK << 16)
|
#define TRC_ARG3_CHK (TRC_ARG_CHK << 16)
|
||||||
#define TRC_ARG3_QCON (TRC_ARG_QCON << 16)
|
#define TRC_ARG3_QCON (TRC_ARG_QCON << 16)
|
||||||
#define TRC_ARG3_APPCTX (TRC_ARG_APPCTX << 16)
|
#define TRC_ARG3_APPCTX (TRC_ARG_APPCTX << 16)
|
||||||
|
#define TRC_ARG3_HSTRM (TRC_ARG_HSTRM << 16)
|
||||||
|
|
||||||
#define TRC_ARG4_PRIV (TRC_ARG_PRIV << 24)
|
#define TRC_ARG4_PRIV (TRC_ARG_PRIV << 24)
|
||||||
#define TRC_ARG4_CONN (TRC_ARG_CONN << 24)
|
#define TRC_ARG4_CONN (TRC_ARG_CONN << 24)
|
||||||
|
|
@ -77,6 +81,7 @@
|
||||||
#define TRC_ARG4_CHK (TRC_ARG_CHK << 24)
|
#define TRC_ARG4_CHK (TRC_ARG_CHK << 24)
|
||||||
#define TRC_ARG4_QCON (TRC_ARG_QCON << 24)
|
#define TRC_ARG4_QCON (TRC_ARG_QCON << 24)
|
||||||
#define TRC_ARG4_APPCTX (TRC_ARG_APPCTX << 24)
|
#define TRC_ARG4_APPCTX (TRC_ARG_APPCTX << 24)
|
||||||
|
#define TRC_ARG4_HSTRM (TRC_ARG_HSTRM << 24)
|
||||||
|
|
||||||
/* usable to detect the presence of any arg of the desired type */
|
/* usable to detect the presence of any arg of the desired type */
|
||||||
#define TRC_ARGS_CONN (TRC_ARG_CONN * 0x01010101U)
|
#define TRC_ARGS_CONN (TRC_ARG_CONN * 0x01010101U)
|
||||||
|
|
@ -85,6 +90,7 @@
|
||||||
#define TRC_ARGS_CHK (TRC_ARG_CHK * 0x01010101U)
|
#define TRC_ARGS_CHK (TRC_ARG_CHK * 0x01010101U)
|
||||||
#define TRC_ARGS_QCON (TRC_ARG_QCON * 0x01010101U)
|
#define TRC_ARGS_QCON (TRC_ARG_QCON * 0x01010101U)
|
||||||
#define TRC_ARGS_APPCTX (TRC_ARG_APPCTX * 0x01010101U)
|
#define TRC_ARGS_APPCTX (TRC_ARG_APPCTX * 0x01010101U)
|
||||||
|
#define TRC_ARGS_HSTRM (TRC_ARG_HSTRM * 0x01010101U)
|
||||||
|
|
||||||
|
|
||||||
enum trace_state {
|
enum trace_state {
|
||||||
|
|
@ -148,6 +154,7 @@ struct trace_ctx {
|
||||||
const struct check *check;
|
const struct check *check;
|
||||||
const struct quic_conn *qc;
|
const struct quic_conn *qc;
|
||||||
const struct appctx *appctx;
|
const struct appctx *appctx;
|
||||||
|
const struct hstream *hs;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Regarding the verbosity, if <decoding> is not NULL, it must point to a NULL-
|
/* Regarding the verbosity, if <decoding> is not NULL, it must point to a NULL-
|
||||||
|
|
@ -185,7 +192,7 @@ struct trace_source {
|
||||||
const void *lockon_ptr; // what to lockon when lockon is set
|
const void *lockon_ptr; // what to lockon when lockon is set
|
||||||
const struct trace_source *follow; // other trace source's tracker to follow
|
const struct trace_source *follow; // other trace source's tracker to follow
|
||||||
int cmdline; // true if source was activated via -dt command line args
|
int cmdline; // true if source was activated via -dt command line args
|
||||||
};
|
} THREAD_ALIGNED();
|
||||||
|
|
||||||
#endif /* _HAPROXY_TRACE_T_H */
|
#endif /* _HAPROXY_TRACE_T_H */
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
varnishtest "Test the support for tcp-md5sig option (linux only)"
|
varnishtest "Test the support for tcp-md5sig option (linux only)"
|
||||||
|
|
||||||
feature cmd "$HAPROXY_PROGRAM -cc 'feature(HAVE_TCP_MD5SIG)'"
|
feature cmd "$HAPROXY_PROGRAM -cc 'feature(HAVE_WORKING_TCP_MD5SIG)'"
|
||||||
feature ignore_unknown_macro
|
feature ignore_unknown_macro
|
||||||
|
|
||||||
haproxy h1 -conf {
|
haproxy h1 -conf {
|
||||||
|
|
|
||||||
|
|
@ -1,8 +1,9 @@
|
||||||
#REGTEST_TYPE=devel
|
#REGTEST_TYPE=devel
|
||||||
|
|
||||||
# This reg-test checks the behaviour of the jwt_decrypt_secret and
|
# This reg-test checks the behaviour of the jwt_decrypt_secret,
|
||||||
# jwt_decrypt_cert converters that decode a JSON Web Encryption (JWE) token,
|
# jwt_decrypt_cert and jwt_decrypt_jwk converters that decode a JSON Web
|
||||||
# checks its signature and decrypt its content (RFC 7516).
|
# Encryption (JWE) token, check its signature and decrypt its content (RFC
|
||||||
|
# 7516).
|
||||||
# The tokens have two tiers of encryption, one that is used to encrypt a secret
|
# The tokens have two tiers of encryption, one that is used to encrypt a secret
|
||||||
# ("alg" field of the JOSE header) and this secret is then used to
|
# ("alg" field of the JOSE header) and this secret is then used to
|
||||||
# encrypt/decrypt the data contained in the token ("enc" field of the JOSE
|
# encrypt/decrypt the data contained in the token ("enc" field of the JOSE
|
||||||
|
|
@ -13,12 +14,12 @@
|
||||||
# have a hardcoded "AWS-LC UNMANAGED" value put in the response header instead
|
# have a hardcoded "AWS-LC UNMANAGED" value put in the response header instead
|
||||||
# of the decrypted contents.
|
# of the decrypted contents.
|
||||||
|
|
||||||
varnishtest "Test the 'jwt_decrypt' functionalities"
|
varnishtest "Test the 'jwt_decrypt_jwk' functionalities"
|
||||||
feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(3.4-dev2)'"
|
feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(3.4-dev2)'"
|
||||||
feature cmd "$HAPROXY_PROGRAM -cc 'feature(OPENSSL) && openssl_version_atleast(1.1.1)'"
|
feature cmd "$HAPROXY_PROGRAM -cc 'feature(OPENSSL) && openssl_version_atleast(1.1.1)'"
|
||||||
feature ignore_unknown_macro
|
feature ignore_unknown_macro
|
||||||
|
|
||||||
server s1 -repeat 10 {
|
server s1 -repeat 20 {
|
||||||
rxreq
|
rxreq
|
||||||
txresp
|
txresp
|
||||||
} -start
|
} -start
|
||||||
|
|
@ -57,6 +58,7 @@ haproxy h1 -conf {
|
||||||
|
|
||||||
use_backend secret_based_alg if { path_beg /secret }
|
use_backend secret_based_alg if { path_beg /secret }
|
||||||
use_backend pem_based_alg if { path_beg /pem }
|
use_backend pem_based_alg if { path_beg /pem }
|
||||||
|
use_backend jwk if { path_beg /jwk }
|
||||||
default_backend dflt
|
default_backend dflt
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -85,6 +87,21 @@ haproxy h1 -conf {
|
||||||
http-after-response set-header X-Decrypted %[var(txn.decrypted)]
|
http-after-response set-header X-Decrypted %[var(txn.decrypted)]
|
||||||
server s1 ${s1_addr}:${s1_port}
|
server s1 ${s1_addr}:${s1_port}
|
||||||
|
|
||||||
|
backend jwk
|
||||||
|
|
||||||
|
http-request set-var(txn.jwe) http_auth_bearer
|
||||||
|
http-request set-var(txn.jwk) req.fhdr(X-JWK)
|
||||||
|
|
||||||
|
http-request set-var(txn.decrypted) var(txn.jwe),jwt_decrypt_jwk(txn.jwk)
|
||||||
|
|
||||||
|
.if ssllib_name_startswith(AWS-LC)
|
||||||
|
acl aws_unmanaged var(txn.jwe),jwt_header_query('$.alg') -m str "A128KW"
|
||||||
|
http-request set-var(txn.decrypted) str("AWS-LC UNMANAGED") if aws_unmanaged
|
||||||
|
.endif
|
||||||
|
|
||||||
|
http-after-response set-header X-Decrypted %[var(txn.decrypted)]
|
||||||
|
server s1 ${s1_addr}:${s1_port}
|
||||||
|
|
||||||
|
|
||||||
backend dflt
|
backend dflt
|
||||||
server s1 ${s1_addr}:${s1_port}
|
server s1 ${s1_addr}:${s1_port}
|
||||||
|
|
@ -102,6 +119,10 @@ client c1_1 -connect ${h1_mainfe_sock} {
|
||||||
txreq -url "/secret" -hdr "Authorization: Bearer eyJhbGciOiAiZGlyIiwgImVuYyI6ICJBMjU2R0NNIn0..hxCk0nP4aVNpgfb7.inlyAZtUzDCTpD_9iuWx.Pyu90cmgkXenMIVu9RUp8w" -hdr "X-Secret: ZMpktzGq1g6_r4fKVdnx9OaYr4HjxPjIs7l7SwAsgsg"
|
txreq -url "/secret" -hdr "Authorization: Bearer eyJhbGciOiAiZGlyIiwgImVuYyI6ICJBMjU2R0NNIn0..hxCk0nP4aVNpgfb7.inlyAZtUzDCTpD_9iuWx.Pyu90cmgkXenMIVu9RUp8w" -hdr "X-Secret: ZMpktzGq1g6_r4fKVdnx9OaYr4HjxPjIs7l7SwAsgsg"
|
||||||
rxresp
|
rxresp
|
||||||
expect resp.http.x-decrypted == "Setec Astronomy"
|
expect resp.http.x-decrypted == "Setec Astronomy"
|
||||||
|
|
||||||
|
txreq -url "/jwk" -hdr "Authorization: Bearer eyJhbGciOiAiZGlyIiwgImVuYyI6ICJBMjU2R0NNIn0..hxCk0nP4aVNpgfb7.inlyAZtUzDCTpD_9iuWx.Pyu90cmgkXenMIVu9RUp8w" -hdr "X-JWK: {\"kty\":\"oct\", \"k\":\"ZMpktzGq1g6_r4fKVdnx9OaYr4HjxPjIs7l7SwAsgsg\"}"
|
||||||
|
rxresp
|
||||||
|
expect resp.http.x-decrypted == "Setec Astronomy"
|
||||||
} -run
|
} -run
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -113,6 +134,10 @@ client c1_2 -connect ${h1_mainfe_sock} {
|
||||||
txreq -url "/secret" -hdr "Authorization: Bearer eyJhbGciOiAiZGlyIiwgImVuYyI6ICJBMjU2R0NNIn0..hxCk0nP4aVNpgfb7.inlyAZtUzDCTpD_9iuWx.Pyu90cmgkXenMIVu9RUp8v" -hdr "X-Secret: ZMpktzGq1g6_r4fKVdnx9OaYr4HjxPjIs7l7SwAsgsg"
|
txreq -url "/secret" -hdr "Authorization: Bearer eyJhbGciOiAiZGlyIiwgImVuYyI6ICJBMjU2R0NNIn0..hxCk0nP4aVNpgfb7.inlyAZtUzDCTpD_9iuWx.Pyu90cmgkXenMIVu9RUp8v" -hdr "X-Secret: ZMpktzGq1g6_r4fKVdnx9OaYr4HjxPjIs7l7SwAsgsg"
|
||||||
rxresp
|
rxresp
|
||||||
expect resp.http.x-decrypted == ""
|
expect resp.http.x-decrypted == ""
|
||||||
|
|
||||||
|
txreq -url "/jwk" -hdr "Authorization: Bearer eyJhbGciOiAiZGlyIiwgImVuYyI6ICJBMjU2R0NNIn0..hxCk0nP4aVNpgfb7.inlyAZtUzDCTpD_9iuWx.Pyu90cmgkXenMIVu9RUp8v" -hdr "X-JWK: {\"kty\":\"oct\", \"k\":\"ZMpktzGq1g6_r4fKVdnx9OaYr4HjxPjIs7l7SwAsgsg\"}"
|
||||||
|
rxresp
|
||||||
|
expect resp.http.x-decrypted == ""
|
||||||
} -run
|
} -run
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -124,6 +149,10 @@ client c1_3 -connect ${h1_mainfe_sock} {
|
||||||
txreq -url "/secret" -hdr "Authorization: Bearer eyJhbGciOiAiZGlyIiwgImVuYyI6ICJBMjU2R0NNIn0..hxCk0nP4aVNpgfb7.inlyAZtUzDCTpD_9iuWx.Pyu90cmgkXenMIVu9RUp8w" -hdr "X-Secret: zMpktzGq1g6_r4fKVdnx9OaYr4HjxPjIs7l7SwAsgsg"
|
txreq -url "/secret" -hdr "Authorization: Bearer eyJhbGciOiAiZGlyIiwgImVuYyI6ICJBMjU2R0NNIn0..hxCk0nP4aVNpgfb7.inlyAZtUzDCTpD_9iuWx.Pyu90cmgkXenMIVu9RUp8w" -hdr "X-Secret: zMpktzGq1g6_r4fKVdnx9OaYr4HjxPjIs7l7SwAsgsg"
|
||||||
rxresp
|
rxresp
|
||||||
expect resp.http.x-decrypted == ""
|
expect resp.http.x-decrypted == ""
|
||||||
|
|
||||||
|
txreq -url "/jwk" -hdr "Authorization: Bearer eyJhbGciOiAiZGlyIiwgImVuYyI6ICJBMjU2R0NNIn0..hxCk0nP4aVNpgfb7.inlyAZtUzDCTpD_9iuWx.Pyu90cmgkXenMIVu9RUp8v" -hdr "X-JWK: {\"kty\":\"oct\", \"k\":\"zMpktzGq1g6_r4fKVdnx9OaYr4HjxPjIs7l7SwAsgsg\"}"
|
||||||
|
rxresp
|
||||||
|
expect resp.http.x-decrypted == ""
|
||||||
} -run
|
} -run
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -134,6 +163,11 @@ client c2_1 -connect ${h1_mainfe_sock} {
|
||||||
txreq -url "/secret" -hdr "Authorization: Bearer eyJhbGciOiAiQTEyOEtXIiwgImVuYyI6ICJBMTI4Q0JDLUhTMjU2In0.AaOyP1zNjsywJOoQ941JJWT4LQIDlpy3UibM_48HrsoCJ5ENpQhfbQ.h2ZBUiy9ofvcDZOwV2iVJA.K0FhK6ri44ZWmtFUtJRpiZSeT8feKX5grFpU8xG5026bGXAdZADO4ZkQ8DRvSEE9DwNIlK6cIEoSavm12gSzQVXajz3MWv5U6VbK5gPFCeFjJfMPmdQ9THIi-hapcueSxYz2rkcGxo3iP3ixE_bww8UB_XlQvnokhFxtf8NushMkjef4RDrW5vQu4j_qPbqG334msDKmFi8Klprs6JktrADeEJ0bPGN80NKEWp7XPcCbfmcwYe-9z_tPw_KJcQhLpQevfPLfVI4WjPgPxYNGw03qKYnLD7oTjr9qCrQmzUVXutlhxfpD3UQr11SJu8q19Ug82bON-GRd2CjpSrErQq42dd0_mWjG9iDqjqpYFBK9DV_qawy2dxFbfIcCsnb6ewifjoJLiFg2OT7-YdTaC7kqaXeE1JpA-OtMXN72FUDrnQ8r9ifj_VpMNvBf_36dbOCT-cGwIOI8Pf6HH2smXULhtBv9q-qO2zyScpmliqZDXUqmvQ8rxi-xYI2hijV80jo14teZgIotWsZE2FrMPJTkegDmh4cG5UzoUsQxzPhXqHvkss6Hv7h-_fmvXvXY1AZ8T8bL1qM4bS8mKpewmGtjmU6S220tL60ieT2QL0vmTFlJkOE8uFreWlPnxNKBix_zj4Smhg1zS_sl7GoXhp5Q_QY3MOMM5-gCAALY0crqLLWtHswElVOiJSyd64T9HFyXm7Rleqq2kLXmTvDhOR6lzMnA0rcGP7lQGYlLZgFiicsMY722XlKI3v1-cJYvj2RZMPe1ijBLFFTqyPeCBkbsDC3XCpWhMByNHSHKN3t-NJmQBIC-89ZeOMU-WBtqrDDi_CMnaz9mwkyt3P7ja_fVskc4KKBBlMVYDZ3DJeJw3Kg9Pie0XlqHkD6W1vyAWjOM2z76Rh_3553dLAH1HxNRwidLjq3SvoaX3TOU5O2_omFGPBek7QdzhNBGLgv6Zlul_XxZq9UGiVo1jrnkd40_vAZQRL6NyMxGBEij_b8F_wDMz5njrL-a0c2Y5mMno-q8gmM4sFKI1BS5HsrUAw.PFFSFlDslALnebAdaqS_MA" -hdr "X-Secret: 3921VrO5TrLvPQ-NFLlghQ"
|
txreq -url "/secret" -hdr "Authorization: Bearer eyJhbGciOiAiQTEyOEtXIiwgImVuYyI6ICJBMTI4Q0JDLUhTMjU2In0.AaOyP1zNjsywJOoQ941JJWT4LQIDlpy3UibM_48HrsoCJ5ENpQhfbQ.h2ZBUiy9ofvcDZOwV2iVJA.K0FhK6ri44ZWmtFUtJRpiZSeT8feKX5grFpU8xG5026bGXAdZADO4ZkQ8DRvSEE9DwNIlK6cIEoSavm12gSzQVXajz3MWv5U6VbK5gPFCeFjJfMPmdQ9THIi-hapcueSxYz2rkcGxo3iP3ixE_bww8UB_XlQvnokhFxtf8NushMkjef4RDrW5vQu4j_qPbqG334msDKmFi8Klprs6JktrADeEJ0bPGN80NKEWp7XPcCbfmcwYe-9z_tPw_KJcQhLpQevfPLfVI4WjPgPxYNGw03qKYnLD7oTjr9qCrQmzUVXutlhxfpD3UQr11SJu8q19Ug82bON-GRd2CjpSrErQq42dd0_mWjG9iDqjqpYFBK9DV_qawy2dxFbfIcCsnb6ewifjoJLiFg2OT7-YdTaC7kqaXeE1JpA-OtMXN72FUDrnQ8r9ifj_VpMNvBf_36dbOCT-cGwIOI8Pf6HH2smXULhtBv9q-qO2zyScpmliqZDXUqmvQ8rxi-xYI2hijV80jo14teZgIotWsZE2FrMPJTkegDmh4cG5UzoUsQxzPhXqHvkss6Hv7h-_fmvXvXY1AZ8T8bL1qM4bS8mKpewmGtjmU6S220tL60ieT2QL0vmTFlJkOE8uFreWlPnxNKBix_zj4Smhg1zS_sl7GoXhp5Q_QY3MOMM5-gCAALY0crqLLWtHswElVOiJSyd64T9HFyXm7Rleqq2kLXmTvDhOR6lzMnA0rcGP7lQGYlLZgFiicsMY722XlKI3v1-cJYvj2RZMPe1ijBLFFTqyPeCBkbsDC3XCpWhMByNHSHKN3t-NJmQBIC-89ZeOMU-WBtqrDDi_CMnaz9mwkyt3P7ja_fVskc4KKBBlMVYDZ3DJeJw3Kg9Pie0XlqHkD6W1vyAWjOM2z76Rh_3553dLAH1HxNRwidLjq3SvoaX3TOU5O2_omFGPBek7QdzhNBGLgv6Zlul_XxZq9UGiVo1jrnkd40_vAZQRL6NyMxGBEij_b8F_wDMz5njrL-a0c2Y5mMno-q8gmM4sFKI1BS5HsrUAw.PFFSFlDslALnebAdaqS_MA" -hdr "X-Secret: 3921VrO5TrLvPQ-NFLlghQ"
|
||||||
rxresp
|
rxresp
|
||||||
expect resp.http.x-decrypted ~ "(Sed ut perspiciatis unde omnis iste natus error sit voluptatem doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo veritatis et quasi architecto beatae vitae dicta sunt explicabo\\. Nemo ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt\\. porro quisquam est, qui dolorem ipsum quia dolor sit amet, adipisci velit, sed quia non numquam eius modi tempora incidunt ut dolore magnam aliquam quaerat voluptatem\\. Ut enim ad minima veniam, nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut ea commodi consequatur\\? Quis autem vel eum iure reprehenderit qui in voluptate velit esse quam nihil molestiae consequatur, vel illum qui eum fugiat quo voluptas nulla pariatur\\?|AWS-LC UNMANAGED)"
|
expect resp.http.x-decrypted ~ "(Sed ut perspiciatis unde omnis iste natus error sit voluptatem doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo veritatis et quasi architecto beatae vitae dicta sunt explicabo\\. Nemo ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt\\. porro quisquam est, qui dolorem ipsum quia dolor sit amet, adipisci velit, sed quia non numquam eius modi tempora incidunt ut dolore magnam aliquam quaerat voluptatem\\. Ut enim ad minima veniam, nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut ea commodi consequatur\\? Quis autem vel eum iure reprehenderit qui in voluptate velit esse quam nihil molestiae consequatur, vel illum qui eum fugiat quo voluptas nulla pariatur\\?|AWS-LC UNMANAGED)"
|
||||||
|
|
||||||
|
|
||||||
|
txreq -url "/jwk" -hdr "Authorization: Bearer eyJhbGciOiAiQTEyOEtXIiwgImVuYyI6ICJBMTI4Q0JDLUhTMjU2In0.AaOyP1zNjsywJOoQ941JJWT4LQIDlpy3UibM_48HrsoCJ5ENpQhfbQ.h2ZBUiy9ofvcDZOwV2iVJA.K0FhK6ri44ZWmtFUtJRpiZSeT8feKX5grFpU8xG5026bGXAdZADO4ZkQ8DRvSEE9DwNIlK6cIEoSavm12gSzQVXajz3MWv5U6VbK5gPFCeFjJfMPmdQ9THIi-hapcueSxYz2rkcGxo3iP3ixE_bww8UB_XlQvnokhFxtf8NushMkjef4RDrW5vQu4j_qPbqG334msDKmFi8Klprs6JktrADeEJ0bPGN80NKEWp7XPcCbfmcwYe-9z_tPw_KJcQhLpQevfPLfVI4WjPgPxYNGw03qKYnLD7oTjr9qCrQmzUVXutlhxfpD3UQr11SJu8q19Ug82bON-GRd2CjpSrErQq42dd0_mWjG9iDqjqpYFBK9DV_qawy2dxFbfIcCsnb6ewifjoJLiFg2OT7-YdTaC7kqaXeE1JpA-OtMXN72FUDrnQ8r9ifj_VpMNvBf_36dbOCT-cGwIOI8Pf6HH2smXULhtBv9q-qO2zyScpmliqZDXUqmvQ8rxi-xYI2hijV80jo14teZgIotWsZE2FrMPJTkegDmh4cG5UzoUsQxzPhXqHvkss6Hv7h-_fmvXvXY1AZ8T8bL1qM4bS8mKpewmGtjmU6S220tL60ieT2QL0vmTFlJkOE8uFreWlPnxNKBix_zj4Smhg1zS_sl7GoXhp5Q_QY3MOMM5-gCAALY0crqLLWtHswElVOiJSyd64T9HFyXm7Rleqq2kLXmTvDhOR6lzMnA0rcGP7lQGYlLZgFiicsMY722XlKI3v1-cJYvj2RZMPe1ijBLFFTqyPeCBkbsDC3XCpWhMByNHSHKN3t-NJmQBIC-89ZeOMU-WBtqrDDi_CMnaz9mwkyt3P7ja_fVskc4KKBBlMVYDZ3DJeJw3Kg9Pie0XlqHkD6W1vyAWjOM2z76Rh_3553dLAH1HxNRwidLjq3SvoaX3TOU5O2_omFGPBek7QdzhNBGLgv6Zlul_XxZq9UGiVo1jrnkd40_vAZQRL6NyMxGBEij_b8F_wDMz5njrL-a0c2Y5mMno-q8gmM4sFKI1BS5HsrUAw.PFFSFlDslALnebAdaqS_MA" -hdr "X-JWK: {\"kty\":\"oct\", \"k\":\"3921VrO5TrLvPQ-NFLlghQ\"}"
|
||||||
|
rxresp
|
||||||
|
expect resp.http.x-decrypted ~ "(Sed ut perspiciatis unde omnis iste natus error sit voluptatem doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo veritatis et quasi architecto beatae vitae dicta sunt explicabo\\. Nemo ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt\\. porro quisquam est, qui dolorem ipsum quia dolor sit amet, adipisci velit, sed quia non numquam eius modi tempora incidunt ut dolore magnam aliquam quaerat voluptatem\\. Ut enim ad minima veniam, nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut ea commodi consequatur\\? Quis autem vel eum iure reprehenderit qui in voluptate velit esse quam nihil molestiae consequatur, vel illum qui eum fugiat quo voluptas nulla pariatur\\?|AWS-LC UNMANAGED)"
|
||||||
} -run
|
} -run
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -178,6 +212,10 @@ client c5 -connect ${h1_mainfe_sock} {
|
||||||
txreq -url "/secret" -hdr "Authorization: Bearer eyJhbGciOiJBMjU2R0NNS1ciLCJlbmMiOiJBMTkyQ0JDLUhTMzg0IiwiaXYiOiJzVE81QjlPRXFuaUhCX3dYIiwidGFnIjoid2M1ZnRpYUFnNGNOR1JkZzNWQ3FXdyJ9.2zqnM9zeNU-eAMp5h2uFJyxbHHKsZs9YAYKzOcIF3d3Q9uq1TMQAvqOIuXw3kU9o.hh5aObIoIMR6Ke0rXm6V1A.R7U-4OlqOR6f2C1b3nI5bFqZBIGNBgza7FfoPEgrQT8.asJCzUAHCuxS7o8Ut4ENfaY5RluLB35F" -hdr "X-Secret: vprpatiNyI-biJY57qr8Gg4--4Rycgb2G5yO1_myYAw"
|
txreq -url "/secret" -hdr "Authorization: Bearer eyJhbGciOiJBMjU2R0NNS1ciLCJlbmMiOiJBMTkyQ0JDLUhTMzg0IiwiaXYiOiJzVE81QjlPRXFuaUhCX3dYIiwidGFnIjoid2M1ZnRpYUFnNGNOR1JkZzNWQ3FXdyJ9.2zqnM9zeNU-eAMp5h2uFJyxbHHKsZs9YAYKzOcIF3d3Q9uq1TMQAvqOIuXw3kU9o.hh5aObIoIMR6Ke0rXm6V1A.R7U-4OlqOR6f2C1b3nI5bFqZBIGNBgza7FfoPEgrQT8.asJCzUAHCuxS7o8Ut4ENfaY5RluLB35F" -hdr "X-Secret: vprpatiNyI-biJY57qr8Gg4--4Rycgb2G5yO1_myYAw"
|
||||||
rxresp
|
rxresp
|
||||||
expect resp.http.x-decrypted == "My Encrypted message"
|
expect resp.http.x-decrypted == "My Encrypted message"
|
||||||
|
|
||||||
|
txreq -url "/jwk" -hdr "Authorization: Bearer eyJhbGciOiJBMjU2R0NNS1ciLCJlbmMiOiJBMTkyQ0JDLUhTMzg0IiwiaXYiOiJzVE81QjlPRXFuaUhCX3dYIiwidGFnIjoid2M1ZnRpYUFnNGNOR1JkZzNWQ3FXdyJ9.2zqnM9zeNU-eAMp5h2uFJyxbHHKsZs9YAYKzOcIF3d3Q9uq1TMQAvqOIuXw3kU9o.hh5aObIoIMR6Ke0rXm6V1A.R7U-4OlqOR6f2C1b3nI5bFqZBIGNBgza7FfoPEgrQT8.asJCzUAHCuxS7o8Ut4ENfaY5RluLB35F" -hdr "X-JWK: {\"k\":\"vprpatiNyI-biJY57qr8Gg4--4Rycgb2G5yO1_myYAw\",\"kty\":\"oct\"}"
|
||||||
|
rxresp
|
||||||
|
expect resp.http.x-decrypted == "My Encrypted message"
|
||||||
} -run
|
} -run
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -187,6 +225,12 @@ client c6 -connect ${h1_mainfe_sock} {
|
||||||
txreq -url "/pem" -hdr "Authorization: Bearer eyJhbGciOiAiUlNBMV81IiwgImVuYyI6ICJBMjU2R0NNIn0.ew8AbprGcd_J73-CZPIsE1YonD9rtcL7VCuOOuVkrpS_9UzA9_kMh1yw20u-b5rKJAhmFMCQPXl44ro6IzOeHu8E2X_NlPEnQfyNVQ4R1HB_E9sSk5BLxOH3aHkVUh0I-e2eDDj-pdI3OrdjZtnZEBeQ7tpMcoBEbn1VGg7Pmw4qtdS-0qnDSs-PttU-cejjgPUNLRU8UdoRVC9uJKacJms110QugDuFuMYTTSU2nbIYh0deCMRAuKGWt0Ii6EMYW2JaJ7JfXag59Ar1uylQPyEVrocnOsDuB9xnp2jd796qCPdKxBK9yKUnwjal4SQpYbutr40QzG1S4MsKaUorLg.0el2ruY0mm2s7LUR.X5RI6dF06Y_dbAr8meb-6SG5enj5noto9nzgQU5HDrYdiUofPptIf6E-FikKUM9QR4pY9SyphqbPYeAN1ZYVxBrR8tUf4Do2kw1biuuRAmuIyytpmxwvY946T3ctu1Zw3Ymwe-jWXX08EngzssvzFOGT66gkdufrTkC45Fkr0RBOmWa5OVVg_VR6LwcivtQMmlArlrwbaDmmLqt_2p7afT0UksEz4loq0sskw-p7GbhB2lpzXoDnijdHrQkftRbVCiDbK4-qGr7IRFb0YOHvyVFr-kmDoJv2Zsg_rPKV1LkYmPJUbVDo9T3RAcLinlKPK4ZPC_2bWj3M9BvfOq1HeuyVWzX2Cb1mHFdxXFGqaLPfsE0VOfn0GqL7oHVbuczYYw2eKdmiw5LEMwuuJEdYDE9IIFEe8oRB4hNZ0XMYB6oqqZejD0Fh6nqlj5QUrTYpTSE-3LkgK2zRJ0oZFXZyHCB426bmViuE0mXF7twkQep09g0U35-jFBZcSYBDvZZL1t5d_YEQ0QtO0mEeEpGb0Pvk_EsSMFib7NxClz4_rdtwWCFuM4uFOS5vrQMiMqi_TadhLxrugRFhJpsibuScCiJ7eNDrUvwSWEwv1U593MUX3guDq_ONOo_49EOJSyRJtQCNC6FW6GLWSz9TCo6g5LCnXt-pqwu0Iymr7ZTQ3MTsdq2G55JM2e6SdG43iET8r235hynmXHKPUYHlSjsC2AEAY_pGDO0akIhf4wDVIM5rytn-rjQf-29ZJp05g6KPe-EaN1C-X7aBGhgAEgnX-iaXXbotpGeKRTNj2jAG1UrkYi6BGHxluiXJ8jH_LjHuxKyzIObqK8p28ePDKRL-jyNTrvGW2uorgb_u7HGmWYIWLTI7obnZ5vw3MbkjcwEd4bX5JXUj2rRsUWMlZSSFVO9Wgf7MBvcLsyF0Yqun3p0bi__edmcqNF_uuYZT-8jkUlMborqIDDCYYqIolgi5R1Bmut-gFYq6xyfEncxOi50xmYon50UulVnAH-up_RELGtCjmAivaJb8.upVY733IMAT8YbMab2PZnw" -hdr "X-PEM: ${testdir}/rsa1_5.pem"
|
txreq -url "/pem" -hdr "Authorization: Bearer eyJhbGciOiAiUlNBMV81IiwgImVuYyI6ICJBMjU2R0NNIn0.ew8AbprGcd_J73-CZPIsE1YonD9rtcL7VCuOOuVkrpS_9UzA9_kMh1yw20u-b5rKJAhmFMCQPXl44ro6IzOeHu8E2X_NlPEnQfyNVQ4R1HB_E9sSk5BLxOH3aHkVUh0I-e2eDDj-pdI3OrdjZtnZEBeQ7tpMcoBEbn1VGg7Pmw4qtdS-0qnDSs-PttU-cejjgPUNLRU8UdoRVC9uJKacJms110QugDuFuMYTTSU2nbIYh0deCMRAuKGWt0Ii6EMYW2JaJ7JfXag59Ar1uylQPyEVrocnOsDuB9xnp2jd796qCPdKxBK9yKUnwjal4SQpYbutr40QzG1S4MsKaUorLg.0el2ruY0mm2s7LUR.X5RI6dF06Y_dbAr8meb-6SG5enj5noto9nzgQU5HDrYdiUofPptIf6E-FikKUM9QR4pY9SyphqbPYeAN1ZYVxBrR8tUf4Do2kw1biuuRAmuIyytpmxwvY946T3ctu1Zw3Ymwe-jWXX08EngzssvzFOGT66gkdufrTkC45Fkr0RBOmWa5OVVg_VR6LwcivtQMmlArlrwbaDmmLqt_2p7afT0UksEz4loq0sskw-p7GbhB2lpzXoDnijdHrQkftRbVCiDbK4-qGr7IRFb0YOHvyVFr-kmDoJv2Zsg_rPKV1LkYmPJUbVDo9T3RAcLinlKPK4ZPC_2bWj3M9BvfOq1HeuyVWzX2Cb1mHFdxXFGqaLPfsE0VOfn0GqL7oHVbuczYYw2eKdmiw5LEMwuuJEdYDE9IIFEe8oRB4hNZ0XMYB6oqqZejD0Fh6nqlj5QUrTYpTSE-3LkgK2zRJ0oZFXZyHCB426bmViuE0mXF7twkQep09g0U35-jFBZcSYBDvZZL1t5d_YEQ0QtO0mEeEpGb0Pvk_EsSMFib7NxClz4_rdtwWCFuM4uFOS5vrQMiMqi_TadhLxrugRFhJpsibuScCiJ7eNDrUvwSWEwv1U593MUX3guDq_ONOo_49EOJSyRJtQCNC6FW6GLWSz9TCo6g5LCnXt-pqwu0Iymr7ZTQ3MTsdq2G55JM2e6SdG43iET8r235hynmXHKPUYHlSjsC2AEAY_pGDO0akIhf4wDVIM5rytn-rjQf-29ZJp05g6KPe-EaN1C-X7aBGhgAEgnX-iaXXbotpGeKRTNj2jAG1UrkYi6BGHxluiXJ8jH_LjHuxKyzIObqK8p28ePDKRL-jyNTrvGW2uorgb_u7HGmWYIWLTI7obnZ5vw3MbkjcwEd4bX5JXUj2rRsUWMlZSSFVO9Wgf7MBvcLsyF0Yqun3p0bi__edmcqNF_uuYZT-8jkUlMborqIDDCYYqIolgi5R1Bmut-gFYq6xyfEncxOi50xmYon50UulVnAH-up_RELGtCjmAivaJb8.upVY733IMAT8YbMab2PZnw" -hdr "X-PEM: ${testdir}/rsa1_5.pem"
|
||||||
rxresp
|
rxresp
|
||||||
expect resp.http.x-decrypted == "Sed ut perspiciatis unde omnis iste natus error sit voluptatem doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. porro quisquam est, qui dolorem ipsum quia dolor sit amet, adipisci velit, sed quia non numquam eius modi tempora incidunt ut dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam, nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut ea commodi consequatur? Quis autem vel eum iure reprehenderit qui in voluptate velit esse quam nihil molestiae consequatur, vel illum qui eum fugiat quo voluptas nulla pariatur?"
|
expect resp.http.x-decrypted == "Sed ut perspiciatis unde omnis iste natus error sit voluptatem doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. porro quisquam est, qui dolorem ipsum quia dolor sit amet, adipisci velit, sed quia non numquam eius modi tempora incidunt ut dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam, nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut ea commodi consequatur? Quis autem vel eum iure reprehenderit qui in voluptate velit esse quam nihil molestiae consequatur, vel illum qui eum fugiat quo voluptas nulla pariatur?"
|
||||||
|
|
||||||
|
|
||||||
|
txreq -url "/jwk" -hdr "Authorization: Bearer eyJhbGciOiAiUlNBMV81IiwgImVuYyI6ICJBMjU2R0NNIn0.ew8AbprGcd_J73-CZPIsE1YonD9rtcL7VCuOOuVkrpS_9UzA9_kMh1yw20u-b5rKJAhmFMCQPXl44ro6IzOeHu8E2X_NlPEnQfyNVQ4R1HB_E9sSk5BLxOH3aHkVUh0I-e2eDDj-pdI3OrdjZtnZEBeQ7tpMcoBEbn1VGg7Pmw4qtdS-0qnDSs-PttU-cejjgPUNLRU8UdoRVC9uJKacJms110QugDuFuMYTTSU2nbIYh0deCMRAuKGWt0Ii6EMYW2JaJ7JfXag59Ar1uylQPyEVrocnOsDuB9xnp2jd796qCPdKxBK9yKUnwjal4SQpYbutr40QzG1S4MsKaUorLg.0el2ruY0mm2s7LUR.X5RI6dF06Y_dbAr8meb-6SG5enj5noto9nzgQU5HDrYdiUofPptIf6E-FikKUM9QR4pY9SyphqbPYeAN1ZYVxBrR8tUf4Do2kw1biuuRAmuIyytpmxwvY946T3ctu1Zw3Ymwe-jWXX08EngzssvzFOGT66gkdufrTkC45Fkr0RBOmWa5OVVg_VR6LwcivtQMmlArlrwbaDmmLqt_2p7afT0UksEz4loq0sskw-p7GbhB2lpzXoDnijdHrQkftRbVCiDbK4-qGr7IRFb0YOHvyVFr-kmDoJv2Zsg_rPKV1LkYmPJUbVDo9T3RAcLinlKPK4ZPC_2bWj3M9BvfOq1HeuyVWzX2Cb1mHFdxXFGqaLPfsE0VOfn0GqL7oHVbuczYYw2eKdmiw5LEMwuuJEdYDE9IIFEe8oRB4hNZ0XMYB6oqqZejD0Fh6nqlj5QUrTYpTSE-3LkgK2zRJ0oZFXZyHCB426bmViuE0mXF7twkQep09g0U35-jFBZcSYBDvZZL1t5d_YEQ0QtO0mEeEpGb0Pvk_EsSMFib7NxClz4_rdtwWCFuM4uFOS5vrQMiMqi_TadhLxrugRFhJpsibuScCiJ7eNDrUvwSWEwv1U593MUX3guDq_ONOo_49EOJSyRJtQCNC6FW6GLWSz9TCo6g5LCnXt-pqwu0Iymr7ZTQ3MTsdq2G55JM2e6SdG43iET8r235hynmXHKPUYHlSjsC2AEAY_pGDO0akIhf4wDVIM5rytn-rjQf-29ZJp05g6KPe-EaN1C-X7aBGhgAEgnX-iaXXbotpGeKRTNj2jAG1UrkYi6BGHxluiXJ8jH_LjHuxKyzIObqK8p28ePDKRL-jyNTrvGW2uorgb_u7HGmWYIWLTI7obnZ5vw3MbkjcwEd4bX5JXUj2rRsUWMlZSSFVO9Wgf7MBvcLsyF0Yqun3p0bi__edmcqNF_uuYZT-8jkUlMborqIDDCYYqIolgi5R1Bmut-gFYq6xyfEncxOi50xmYon50UulVnAH-up_RELGtCjmAivaJb8.upVY733IMAT8YbMab2PZnw" \
|
||||||
|
-hdr "X-JWK: { \"kty\": \"RSA\", \"e\": \"AQAB\", \"n\": \"wsqJbopx18NQFYLYOq4ZeMSE89yGiEankUpf25yV8QqroKUGrASj_OeqTWUjwPGKTN1vGFFuHYxiJeAUQH2qQPmg9Oqk6-ATBEKn9COKYniQ5459UxCwmZA2RL6ufhrNyq0JF3GfXkjLDBfhU9zJJEOhknsA0L_c-X4AI3d_NbFdMqxNe1V_UWAlLcbKdwO6iC9fAvwUmDQxgy6R0DC1CMouQpenMRcALaSHar1cm4K-syoNobv3HEuqgZ3s6-hOOSqauqAO0GUozPpaIA7OeruyRl5sTWT0r-iz39bchID2bIKtcqLiFcSYPLBcxmsaQCqRlGhmv6stjTCLV1yT9w\", \"kid\": \"ff3c5c96-392e-46ef-a839-6ff16027af78\", \"d\": \"b9hXfQ8lOtw8mX1dpqPcoElGhbczz_-xq2znCXQpbBPSZBUddZvchRSH5pSSKPEHlgb3CSGIdpLqsBCv0C_XmCM9ViN8uqsYgDO9uCLIDK5plWttbkqA_EufvW03R9UgIKWmOL3W4g4t-C2mBb8aByaGGVNjLnlb6i186uBsPGkvaeLHbQcRQKAvhOUTeNiyiiCbUGJwCm4avMiZrsz1r81Y1Z5izo0ERxdZymxM3FRZ9vjTB-6DtitvTXXnaAm1JTu6TIpj38u2mnNLkGMbflOpgelMNKBZVxSmfobIbFN8CHVc1UqLK2ElsZ9RCQANgkMHlMkOMj-XT0wHa3VBUQ\", \"p\": \"8mgriveKJAp1S7SHqirQAfZafxVuAK_A2QBYPsAUhikfBOvN0HtZjgurPXSJSdgR8KbWV7ZjdJM_eOivIb_XiuAaUdIOXbLRet7t9a_NJtmX9iybhoa9VOJFMBq_rbnbbte2kq0-FnXmv3cukbC2LaEw3aEcDgyURLCgWFqt7M0\", \"q\": \"zbbTv5421GowOfKVEuVoA35CEWgl8mdasnEZac2LWxMwKExikKU5LLacLQlcOt7A6n1ZGUC2wyH8mstO5tV34Eug3fnNrbnxFUEE_ZB_njs_rtZnwz57AoUXOXVnd194seIZF9PjdzZcuwXwXbrZ2RSVW8if_ZH5OVYEM1EsA9M\", \"dp\": \"1BaIYmIKn1X3InGlcSFcNRtSOnaJdFhRpotCqkRssKUx2qBlxs7ln_5dqLtZkx5VM_UE_GE7yzc6BZOwBxtOftdsr8HVh-14ksSR9rAGEsO2zVBiEuW4qZf_aQM-ScWfU--wcczZ0dT-Ou8P87Bk9K9fjcn0PeaLoz3WTPepzNE\", \"dq\": \"kYw2u4_UmWvcXVOeV_VKJ5aQZkJ6_sxTpodRBMPyQmkMHKcW4eKU1mcJju_deqWadw5jGPPpm5yTXm5UkAwfOeookoWpGa7CvVf4kPNI6Aphn3GBjunJHNpPuU6w-wvomGsxd-NqQDGNYKHuFFMcyXO_zWXglQdP_1o1tJ1M-BM\", \"qi\": \"j94Ens784M8zsfwWoJhYq9prcSZOGgNbtFWQZO8HP8pcNM9ls7YA4snTtAS_B4peWWFAFZ0LSKPCxAvJnrq69ocmEKEk7ss1Jo062f9pLTQ6cnhMjev3IqLocIFt5Vbsg_PWYpFSR7re6FRbF9EYOM7F2-HRv1idxKCWoyQfBqk\" }"
|
||||||
|
rxresp
|
||||||
|
expect resp.http.x-decrypted == "Sed ut perspiciatis unde omnis iste natus error sit voluptatem doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. porro quisquam est, qui dolorem ipsum quia dolor sit amet, adipisci velit, sed quia non numquam eius modi tempora incidunt ut dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam, nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut ea commodi consequatur? Quis autem vel eum iure reprehenderit qui in voluptate velit esse quam nihil molestiae consequatur, vel illum qui eum fugiat quo voluptas nulla pariatur?"
|
||||||
} -run
|
} -run
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -199,3 +243,22 @@ client c7 -connect ${h1_mainfe_sock} {
|
||||||
} -run
|
} -run
|
||||||
|
|
||||||
|
|
||||||
|
# Test 'jwt_decrypt_jwk' error cases
|
||||||
|
client c8 -connect ${h1_mainfe_sock} {
|
||||||
|
# Invalid 'oct' JWK
|
||||||
|
txreq -url "/jwk" -hdr "Authorization: Bearer eyJhbGciOiJBMjU2R0NNS1ciLCJlbmMiOiJBMTkyQ0JDLUhTMzg0IiwiaXYiOiJzVE81QjlPRXFuaUhCX3dYIiwidGFnIjoid2M1ZnRpYUFnNGNOR1JkZzNWQ3FXdyJ9.2zqnM9zeNU-eAMp5h2uFJyxbHHKsZs9YAYKzOcIF3d3Q9uq1TMQAvqOIuXw3kU9o.hh5aObIoIMR6Ke0rXm6V1A.R7U-4OlqOR6f2C1b3nI5bFqZBIGNBgza7FfoPEgrQT8.asJCzUAHCuxS7o8Ut4ENfaY5RluLB35F" -hdr "X-JWK: {\"k\":\"invalid\",\"kty\":\"oct\"}"
|
||||||
|
rxresp
|
||||||
|
expect resp.http.x-decrypted == ""
|
||||||
|
|
||||||
|
# Wrong JWK type
|
||||||
|
txreq -url "/jwk" -hdr "Authorization: Bearer eyJhbGciOiJBMjU2R0NNS1ciLCJlbmMiOiJBMTkyQ0JDLUhTMzg0IiwiaXYiOiJzVE81QjlPRXFuaUhCX3dYIiwidGFnIjoid2M1ZnRpYUFnNGNOR1JkZzNWQ3FXdyJ9.2zqnM9zeNU-eAMp5h2uFJyxbHHKsZs9YAYKzOcIF3d3Q9uq1TMQAvqOIuXw3kU9o.hh5aObIoIMR6Ke0rXm6V1A.R7U-4OlqOR6f2C1b3nI5bFqZBIGNBgza7FfoPEgrQT8.asJCzUAHCuxS7o8Ut4ENfaY5RluLB35F" -hdr "X-JWK: {\"k\":\"invalid\",\"kty\":\"RSA\"}"
|
||||||
|
rxresp
|
||||||
|
expect resp.http.x-decrypted == ""
|
||||||
|
|
||||||
|
# Invalid 'RSA' JWK (truncated 'qi')
|
||||||
|
txreq -url "/jwk" -hdr "Authorization: Bearer eyJhbGciOiAiUlNBMV81IiwgImVuYyI6ICJBMjU2R0NNIn0.ew8AbprGcd_J73-CZPIsE1YonD9rtcL7VCuOOuVkrpS_9UzA9_kMh1yw20u-b5rKJAhmFMCQPXl44ro6IzOeHu8E2X_NlPEnQfyNVQ4R1HB_E9sSk5BLxOH3aHkVUh0I-e2eDDj-pdI3OrdjZtnZEBeQ7tpMcoBEbn1VGg7Pmw4qtdS-0qnDSs-PttU-cejjgPUNLRU8UdoRVC9uJKacJms110QugDuFuMYTTSU2nbIYh0deCMRAuKGWt0Ii6EMYW2JaJ7JfXag59Ar1uylQPyEVrocnOsDuB9xnp2jd796qCPdKxBK9yKUnwjal4SQpYbutr40QzG1S4MsKaUorLg.0el2ruY0mm2s7LUR.X5RI6dF06Y_dbAr8meb-6SG5enj5noto9nzgQU5HDrYdiUofPptIf6E-FikKUM9QR4pY9SyphqbPYeAN1ZYVxBrR8tUf4Do2kw1biuuRAmuIyytpmxwvY946T3ctu1Zw3Ymwe-jWXX08EngzssvzFOGT66gkdufrTkC45Fkr0RBOmWa5OVVg_VR6LwcivtQMmlArlrwbaDmmLqt_2p7afT0UksEz4loq0sskw-p7GbhB2lpzXoDnijdHrQkftRbVCiDbK4-qGr7IRFb0YOHvyVFr-kmDoJv2Zsg_rPKV1LkYmPJUbVDo9T3RAcLinlKPK4ZPC_2bWj3M9BvfOq1HeuyVWzX2Cb1mHFdxXFGqaLPfsE0VOfn0GqL7oHVbuczYYw2eKdmiw5LEMwuuJEdYDE9IIFEe8oRB4hNZ0XMYB6oqqZejD0Fh6nqlj5QUrTYpTSE-3LkgK2zRJ0oZFXZyHCB426bmViuE0mXF7twkQep09g0U35-jFBZcSYBDvZZL1t5d_YEQ0QtO0mEeEpGb0Pvk_EsSMFib7NxClz4_rdtwWCFuM4uFOS5vrQMiMqi_TadhLxrugRFhJpsibuScCiJ7eNDrUvwSWEwv1U593MUX3guDq_ONOo_49EOJSyRJtQCNC6FW6GLWSz9TCo6g5LCnXt-pqwu0Iymr7ZTQ3MTsdq2G55JM2e6SdG43iET8r235hynmXHKPUYHlSjsC2AEAY_pGDO0akIhf4wDVIM5rytn-rjQf-29ZJp05g6KPe-EaN1C-X7aBGhgAEgnX-iaXXbotpGeKRTNj2jAG1UrkYi6BGHxluiXJ8jH_LjHuxKyzIObqK8p28ePDKRL-jyNTrvGW2uorgb_u7HGmWYIWLTI7obnZ5vw3MbkjcwEd4bX5JXUj2rRsUWMlZSSFVO9Wgf7MBvcLsyF0Yqun3p0bi__edmcqNF_uuYZT-8jkUlMborqIDDCYYqIolgi5R1Bmut-gFYq6xyfEncxOi50xmYon50UulVnAH-up_RELGtCjmAivaJb8.upVY733IMAT8YbMab2PZnw" \
|
||||||
|
-hdr "X-JWK: { \"kty\": \"RSA\", \"e\": \"AQAB\", \"n\": \"wsqJbopx18NQFYLYOq4ZeMSE89yGiEankUpf25yV8QqroKUGrASj_OeqTWUjwPGKTN1vGFFuHYxiJeAUQH2qQPmg9Oqk6-ATBEKn9COKYniQ5459UxCwmZA2RL6ufhrNyq0JF3GfXkjLDBfhU9zJJEOhknsA0L_c-X4AI3d_NbFdMqxNe1V_UWAlLcbKdwO6iC9fAvwUmDQxgy6R0DC1CMouQpenMRcALaSHar1cm4K-syoNobv3HEuqgZ3s6-hOOSqauqAO0GUozPpaIA7OeruyRl5sTWT0r-iz39bchID2bIKtcqLiFcSYPLBcxmsaQCqRlGhmv6stjTCLV1yT9w\", \"kid\": \"ff3c5c96-392e-46ef-a839-6ff16027af78\", \"d\": \"b9hXfQ8lOtw8mX1dpqPcoElGhbczz_-xq2znCXQpbBPSZBUddZvchRSH5pSSKPEHlgb3CSGIdpLqsBCv0C_XmCM9ViN8uqsYgDO9uCLIDK5plWttbkqA_EufvW03R9UgIKWmOL3W4g4t-C2mBb8aByaGGVNjLnlb6i186uBsPGkvaeLHbQcRQKAvhOUTeNiyiiCbUGJwCm4avMiZrsz1r81Y1Z5izo0ERxdZymxM3FRZ9vjTB-6DtitvTXXnaAm1JTu6TIpj38u2mnNLkGMbflOpgelMNKBZVxSmfobIbFN8CHVc1UqLK2ElsZ9RCQANgkMHlMkOMj-XT0wHa3VBUQ\", \"p\": \"8mgriveKJAp1S7SHqirQAfZafxVuAK_A2QBYPsAUhikfBOvN0HtZjgurPXSJSdgR8KbWV7ZjdJM_eOivIb_XiuAaUdIOXbLRet7t9a_NJtmX9iybhoa9VOJFMBq_rbnbbte2kq0-FnXmv3cukbC2LaEw3aEcDgyURLCgWFqt7M0\", \"q\": \"zbbTv5421GowOfKVEuVoA35CEWgl8mdasnEZac2LWxMwKExikKU5LLacLQlcOt7A6n1ZGUC2wyH8mstO5tV34Eug3fnNrbnxFUEE_ZB_njs_rtZnwz57AoUXOXVnd194seIZF9PjdzZcuwXwXbrZ2RSVW8if_ZH5OVYEM1EsA9M\", \"dp\": \"1BaIYmIKn1X3InGlcSFcNRtSOnaJdFhRpotCqkRssKUx2qBlxs7ln_5dqLtZkx5VM_UE_GE7yzc6BZOwBxtOftdsr8HVh-14ksSR9rAGEsO2zVBiEuW4qZf_aQM-ScWfU--wcczZ0dT-Ou8P87Bk9K9fjcn0PeaLoz3WTPepzNE\", \"dq\": \"kYw2u4_UmWvcXVOeV_VKJ5aQZkJ6_sxTpodRBMPyQmkMHKcW4eKU1mcJju_deqWadw5jGPPpm5yTXm5UkAwfOeookoWpGa7CvVf4kPNI6Aphn3GBjunJHNpPuU6w-wvomGsxd-NqQDGNYKHuFFMcyXO_zWXglQdP_1o1tJ1M-BM\", \"qi\": \"j94Ens784M8zsfwWoJhYq9prcSZOGgNbtFWQZO8HP8pcNM9ls7YA4snTtAS_B4peWWFAFZ0LSKPCxAvJnrq69ocmEKEk7ss1Jo062f9pLTQ6cnhMjev3IqLocIFt5\" }"
|
||||||
|
rxresp
|
||||||
|
expect resp.http.x-decrypted == ""
|
||||||
|
} -run
|
||||||
|
|
||||||
|
|
|
||||||
84
reg-tests/proxy/cli_add_backend.vtc
Normal file
84
reg-tests/proxy/cli_add_backend.vtc
Normal file
|
|
@ -0,0 +1,84 @@
|
||||||
|
varnishtest "Add backend via cli"
|
||||||
|
|
||||||
|
feature ignore_unknown_macro
|
||||||
|
|
||||||
|
haproxy hsrv -conf {
|
||||||
|
global
|
||||||
|
.if feature(THREAD)
|
||||||
|
thread-groups 1
|
||||||
|
.endif
|
||||||
|
|
||||||
|
defaults
|
||||||
|
mode http
|
||||||
|
timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
|
||||||
|
timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
|
||||||
|
timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
|
||||||
|
|
||||||
|
frontend fe
|
||||||
|
bind "fd@${fe}"
|
||||||
|
http-request return status 200
|
||||||
|
} -start
|
||||||
|
|
||||||
|
haproxy h1 -conf {
|
||||||
|
global
|
||||||
|
.if feature(THREAD)
|
||||||
|
thread-groups 1
|
||||||
|
.endif
|
||||||
|
|
||||||
|
defaults
|
||||||
|
mode http
|
||||||
|
timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
|
||||||
|
timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
|
||||||
|
timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
|
||||||
|
|
||||||
|
frontend fe
|
||||||
|
bind "fd@${feS}"
|
||||||
|
force-be-switch if { req.hdr("x-admin") "1" }
|
||||||
|
use_backend %[req.hdr(x-be)]
|
||||||
|
|
||||||
|
defaults def
|
||||||
|
|
||||||
|
defaults def_http
|
||||||
|
mode http
|
||||||
|
} -start
|
||||||
|
|
||||||
|
client c1 -connect ${h1_feS_sock} {
|
||||||
|
txreq -hdr "x-be: be"
|
||||||
|
rxresp
|
||||||
|
expect resp.status == 503
|
||||||
|
} -run
|
||||||
|
|
||||||
|
haproxy h1 -cli {
|
||||||
|
# non existent backend
|
||||||
|
send "experimental-mode on; add backend be from def"
|
||||||
|
expect ~ "Mode is required"
|
||||||
|
|
||||||
|
send "experimental-mode on; add backend be from def_http"
|
||||||
|
expect ~ "New backend registered."
|
||||||
|
|
||||||
|
send "add server be/srv ${hsrv_fe_addr}:${hsrv_fe_port}"
|
||||||
|
expect ~ "New server registered."
|
||||||
|
send "enable server be/srv"
|
||||||
|
expect ~ ".*"
|
||||||
|
}
|
||||||
|
|
||||||
|
client c1 -connect ${h1_feS_sock} {
|
||||||
|
txreq -hdr "x-be: be"
|
||||||
|
rxresp
|
||||||
|
expect resp.status == 503
|
||||||
|
|
||||||
|
txreq -hdr "x-be: be" -hdr "x-admin: 1"
|
||||||
|
rxresp
|
||||||
|
expect resp.status == 200
|
||||||
|
} -run
|
||||||
|
|
||||||
|
haproxy h1 -cli {
|
||||||
|
send "publish backend be"
|
||||||
|
expect ~ "Backend published."
|
||||||
|
}
|
||||||
|
|
||||||
|
client c1 -connect ${h1_feS_sock} {
|
||||||
|
txreq -hdr "x-be: be"
|
||||||
|
rxresp
|
||||||
|
expect resp.status == 200
|
||||||
|
} -run
|
||||||
88
reg-tests/proxy/cli_del_backend.vtc
Normal file
88
reg-tests/proxy/cli_del_backend.vtc
Normal file
|
|
@ -0,0 +1,88 @@
|
||||||
|
varnishtest "Delete backend via cli"
|
||||||
|
|
||||||
|
feature ignore_unknown_macro
|
||||||
|
|
||||||
|
haproxy h1 -conf {
|
||||||
|
global
|
||||||
|
.if feature(THREAD)
|
||||||
|
thread-groups 1
|
||||||
|
.endif
|
||||||
|
|
||||||
|
defaults
|
||||||
|
mode http
|
||||||
|
timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
|
||||||
|
timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
|
||||||
|
timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
|
||||||
|
|
||||||
|
frontend fe
|
||||||
|
bind "fd@${feS}"
|
||||||
|
use_backend be_ref
|
||||||
|
|
||||||
|
listen li
|
||||||
|
bind "fd@${feli}"
|
||||||
|
|
||||||
|
backend be_ref
|
||||||
|
|
||||||
|
backend be
|
||||||
|
server s1 ${s1_addr}:${s1_port} disabled
|
||||||
|
|
||||||
|
# Defaults with tcp-check rules in it
|
||||||
|
# Currently this is the only case of runtime ref on an unnamed default
|
||||||
|
defaults
|
||||||
|
mode http
|
||||||
|
timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
|
||||||
|
timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
|
||||||
|
timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
|
||||||
|
option httpchk GET / HTTP/1.1
|
||||||
|
|
||||||
|
backend be_unnamed_def_ref
|
||||||
|
backend be_unnamed_def_ref2
|
||||||
|
} -start
|
||||||
|
|
||||||
|
haproxy h1 -cli {
|
||||||
|
send "experimental-mode on; del backend other"
|
||||||
|
expect ~ "No such backend."
|
||||||
|
|
||||||
|
send "experimental-mode on; del backend li"
|
||||||
|
expect ~ "Cannot delete a listen section."
|
||||||
|
|
||||||
|
send "experimental-mode on; del backend be_ref"
|
||||||
|
expect ~ "This proxy cannot be removed at runtime due to other configuration elements pointing to it."
|
||||||
|
|
||||||
|
send "show stat be 2 -1"
|
||||||
|
expect ~ "be,BACKEND,"
|
||||||
|
|
||||||
|
send "experimental-mode on; del backend be"
|
||||||
|
expect ~ "Backend must be unpublished prior to its deletion."
|
||||||
|
|
||||||
|
send "unpublish backend be;"
|
||||||
|
expect ~ ".*"
|
||||||
|
send "experimental-mode on; del backend be"
|
||||||
|
expect ~ "Only a backend without server can be deleted."
|
||||||
|
|
||||||
|
send "del server be/s1"
|
||||||
|
expect ~ ".*"
|
||||||
|
send "experimental-mode on; del backend be"
|
||||||
|
expect ~ "Backend deleted."
|
||||||
|
|
||||||
|
send "show stat be 2 -1"
|
||||||
|
expect !~ "be,BACKEND,"
|
||||||
|
}
|
||||||
|
|
||||||
|
haproxy h1 -cli {
|
||||||
|
send "show stat be_unnamed_def_ref 2 -1"
|
||||||
|
expect ~ "be_unnamed_def_ref,BACKEND,"
|
||||||
|
|
||||||
|
send "unpublish backend be_unnamed_def_ref;"
|
||||||
|
expect ~ ".*"
|
||||||
|
send "experimental-mode on; del backend be_unnamed_def_ref"
|
||||||
|
expect ~ "Backend deleted."
|
||||||
|
|
||||||
|
send "show stat be_unnamed_def_ref 2 -1"
|
||||||
|
expect !~ "be_unnamed_def_ref,BACKEND,"
|
||||||
|
|
||||||
|
send "unpublish backend be_unnamed_def_ref2;"
|
||||||
|
expect ~ ".*"
|
||||||
|
send "experimental-mode on; del backend be_unnamed_def_ref2"
|
||||||
|
expect ~ "Backend deleted."
|
||||||
|
}
|
||||||
|
|
@ -15,7 +15,7 @@
|
||||||
# - Check that you have socat
|
# - Check that you have socat
|
||||||
|
|
||||||
varnishtest "Test the 'set ssl ca-file' feature of the CLI"
|
varnishtest "Test the 'set ssl ca-file' feature of the CLI"
|
||||||
feature cmd "$HAPROXY_PROGRAM -cc 'feature(QUIC) && !feature(QUIC_OPENSSL_COMPAT) && !feature(OPENSSL_WOLFSSL)' && !feature(OPENSSL_AWSLC)'"
|
feature cmd "$HAPROXY_PROGRAM -cc 'feature(QUIC) && !feature(QUIC_OPENSSL_COMPAT) && !feature(OPENSSL_WOLFSSL) && !feature(OPENSSL_AWSLC)'"
|
||||||
|
|
||||||
feature cmd "command -v socat"
|
feature cmd "command -v socat"
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -51,7 +51,7 @@ haproxy h1 -cli {
|
||||||
|
|
||||||
# invalid load-balancing algo
|
# invalid load-balancing algo
|
||||||
send "add server other/s1 ${s1_addr}:${s1_port}"
|
send "add server other/s1 ${s1_addr}:${s1_port}"
|
||||||
expect ~ "Backend must use a dynamic load balancing to support dynamic servers."
|
expect ~ "backend 'other' uses a non dynamic load balancing method"
|
||||||
|
|
||||||
# invalid mux proto
|
# invalid mux proto
|
||||||
send "add server other2/s1 ${s1_addr}:${s1_port} proto h2"
|
send "add server other2/s1 ${s1_addr}:${s1_port} proto h2"
|
||||||
|
|
|
||||||
|
|
@ -145,7 +145,7 @@ haproxy h1 -cli {
|
||||||
send "show ssl ca-file ${testdir}/certs/set_cafile_interCA1.crt:2"
|
send "show ssl ca-file ${testdir}/certs/set_cafile_interCA1.crt:2"
|
||||||
expect !~ ".*SHA1 FingerPrint: 4FFF535278883264693CEA72C4FAD13F995D0098"
|
expect !~ ".*SHA1 FingerPrint: 4FFF535278883264693CEA72C4FAD13F995D0098"
|
||||||
send "show ssl ca-file ${testdir}/certs/set_cafile_interCA1.crt:2"
|
send "show ssl ca-file ${testdir}/certs/set_cafile_interCA1.crt:2"
|
||||||
expect ~ ".*SHA1 FingerPrint: 3D3D1D10AD74A8135F05A818E10E5FA91433954D"
|
expect ~ ".*SHA1 FingerPrint: 3D3D1D10AD74A8135F05A818E10E5FA91433954D|5F8DAE4B2099A09F9BDDAFD7E9D900F0CE49977C"
|
||||||
}
|
}
|
||||||
|
|
||||||
client c1 -connect ${h1_clearverifiedlst_sock} {
|
client c1 -connect ${h1_clearverifiedlst_sock} {
|
||||||
|
|
|
||||||
|
|
@ -86,9 +86,7 @@ haproxy h1 -cli {
|
||||||
expect ~ "\\*${testdir}/certs/interCA2_crl_empty.pem"
|
expect ~ "\\*${testdir}/certs/interCA2_crl_empty.pem"
|
||||||
|
|
||||||
send "show ssl crl-file \\*${testdir}/certs/interCA2_crl_empty.pem"
|
send "show ssl crl-file \\*${testdir}/certs/interCA2_crl_empty.pem"
|
||||||
expect ~ "Revoked Certificates:"
|
expect ~ "Revoked Certificates:\n.*Serial Number: 1008"
|
||||||
send "show ssl crl-file \\*${testdir}/certs/interCA2_crl_empty.pem:1"
|
|
||||||
expect ~ "Serial Number: 1008"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# This connection should still succeed since the transaction was not committed
|
# This connection should still succeed since the transaction was not committed
|
||||||
|
|
|
||||||
|
|
@ -1,10 +1,12 @@
|
||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
|
|
||||||
|
DESTDIR=${DESTDIR:-${PWD}/../vtest/}
|
||||||
|
TMPDIR=${TMPDIR:-$(mktemp -d)}
|
||||||
set -eux
|
set -eux
|
||||||
|
|
||||||
curl -fsSL https://github.com/vtest/VTest2/archive/main.tar.gz -o VTest.tar.gz
|
curl -fsSL "https://code.vinyl-cache.org/vtest/VTest2/archive/main.tar.gz" -o "${TMPDIR}/VTest.tar.gz"
|
||||||
mkdir ../vtest
|
mkdir -p "${TMPDIR}/vtest"
|
||||||
tar xvf VTest.tar.gz -C ../vtest --strip-components=1
|
tar xvf ${TMPDIR}/VTest.tar.gz -C "${TMPDIR}/vtest" --strip-components=1
|
||||||
# Special flags due to: https://github.com/vtest/VTest/issues/12
|
# Special flags due to: https://github.com/vtest/VTest/issues/12
|
||||||
|
|
||||||
# Note: do not use "make -C ../vtest", otherwise MAKEFLAGS contains "w"
|
# Note: do not use "make -C ../vtest", otherwise MAKEFLAGS contains "w"
|
||||||
|
|
@ -13,7 +15,7 @@ tar xvf VTest.tar.gz -C ../vtest --strip-components=1
|
||||||
# MFLAGS works on BSD but misses variable definitions on GNU Make.
|
# MFLAGS works on BSD but misses variable definitions on GNU Make.
|
||||||
# Better just avoid the -C and do the cd ourselves then.
|
# Better just avoid the -C and do the cd ourselves then.
|
||||||
|
|
||||||
cd ../vtest
|
cd "${TMPDIR}/vtest"
|
||||||
|
|
||||||
set +e
|
set +e
|
||||||
CPUS=${CPUS:-$(nproc 2>/dev/null)}
|
CPUS=${CPUS:-$(nproc 2>/dev/null)}
|
||||||
|
|
@ -28,3 +30,6 @@ if test -f /opt/homebrew/include/pcre2.h; then
|
||||||
else
|
else
|
||||||
make -j${CPUS} FLAGS="-O2 -s -Wall"
|
make -j${CPUS} FLAGS="-O2 -s -Wall"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
mkdir -p "${DESTDIR}"
|
||||||
|
cp "${TMPDIR}/vtest/vtest" "${DESTDIR}"
|
||||||
|
|
|
||||||
147
src/acme.c
147
src/acme.c
|
|
@ -14,11 +14,6 @@
|
||||||
|
|
||||||
#include <haproxy/acme-t.h>
|
#include <haproxy/acme-t.h>
|
||||||
|
|
||||||
#include <haproxy/cli.h>
|
|
||||||
#include <haproxy/cfgparse.h>
|
|
||||||
#include <haproxy/errors.h>
|
|
||||||
#include <haproxy/jws.h>
|
|
||||||
|
|
||||||
#include <haproxy/base64.h>
|
#include <haproxy/base64.h>
|
||||||
#include <haproxy/cfgparse.h>
|
#include <haproxy/cfgparse.h>
|
||||||
#include <haproxy/cli.h>
|
#include <haproxy/cli.h>
|
||||||
|
|
@ -30,6 +25,7 @@
|
||||||
#include <haproxy/pattern.h>
|
#include <haproxy/pattern.h>
|
||||||
#include <haproxy/sink.h>
|
#include <haproxy/sink.h>
|
||||||
#include <haproxy/ssl_ckch.h>
|
#include <haproxy/ssl_ckch.h>
|
||||||
|
#include <haproxy/ssl_gencert.h>
|
||||||
#include <haproxy/ssl_sock.h>
|
#include <haproxy/ssl_sock.h>
|
||||||
#include <haproxy/ssl_utils.h>
|
#include <haproxy/ssl_utils.h>
|
||||||
#include <haproxy/tools.h>
|
#include <haproxy/tools.h>
|
||||||
|
|
@ -157,7 +153,6 @@ enum acme_ret {
|
||||||
ACME_RET_FAIL = 2
|
ACME_RET_FAIL = 2
|
||||||
};
|
};
|
||||||
|
|
||||||
static EVP_PKEY *acme_EVP_PKEY_gen(int keytype, int curves, int bits, char **errmsg);
|
|
||||||
static int acme_start_task(struct ckch_store *store, char **errmsg);
|
static int acme_start_task(struct ckch_store *store, char **errmsg);
|
||||||
static struct task *acme_scheduler(struct task *task, void *context, unsigned int state);
|
static struct task *acme_scheduler(struct task *task, void *context, unsigned int state);
|
||||||
|
|
||||||
|
|
@ -398,7 +393,7 @@ static int cfg_parse_acme_kws(char **args, int section_type, struct proxy *curpx
|
||||||
err_code |= ERR_ALERT | ERR_FATAL;
|
err_code |= ERR_ALERT | ERR_FATAL;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
if (alertif_too_many_args(2, file, linenum, args, &err_code))
|
if (alertif_too_many_args(1, file, linenum, args, &err_code))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
ha_free(&cur_acme->account.file);
|
ha_free(&cur_acme->account.file);
|
||||||
|
|
@ -415,7 +410,7 @@ static int cfg_parse_acme_kws(char **args, int section_type, struct proxy *curpx
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (alertif_too_many_args(2, file, linenum, args, &err_code))
|
if (alertif_too_many_args(1, file, linenum, args, &err_code))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
ha_free(&cur_acme->challenge);
|
ha_free(&cur_acme->challenge);
|
||||||
|
|
@ -698,7 +693,7 @@ static int cfg_postsection_acme()
|
||||||
} else {
|
} else {
|
||||||
ha_notice("acme: generate account key '%s' for acme section '%s'.\n", path, cur_acme->name);
|
ha_notice("acme: generate account key '%s' for acme section '%s'.\n", path, cur_acme->name);
|
||||||
|
|
||||||
if ((key = acme_EVP_PKEY_gen(cur_acme->key.type, cur_acme->key.curves, cur_acme->key.bits, &errmsg)) == NULL) {
|
if ((key = ssl_gen_EVP_PKEY(cur_acme->key.type, cur_acme->key.curves, cur_acme->key.bits, &errmsg)) == NULL) {
|
||||||
ha_alert("acme: %s\n", errmsg);
|
ha_alert("acme: %s\n", errmsg);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
@ -883,7 +878,7 @@ static void acme_ctx_destroy(struct acme_ctx *ctx)
|
||||||
istfree(&auth->auth);
|
istfree(&auth->auth);
|
||||||
istfree(&auth->chall);
|
istfree(&auth->chall);
|
||||||
istfree(&auth->token);
|
istfree(&auth->token);
|
||||||
istfree(&auth->token);
|
istfree(&auth->dns);
|
||||||
next = auth->next;
|
next = auth->next;
|
||||||
free(auth);
|
free(auth);
|
||||||
auth = next;
|
auth = next;
|
||||||
|
|
@ -1306,7 +1301,6 @@ int acme_res_chkorder(struct task *task, struct acme_ctx *ctx, char **errmsg)
|
||||||
goto error;
|
goto error;
|
||||||
};
|
};
|
||||||
|
|
||||||
out:
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
error:
|
error:
|
||||||
|
|
@ -1359,11 +1353,11 @@ int acme_req_finalize(struct task *task, struct acme_ctx *ctx, char **errmsg)
|
||||||
if (acme_http_req(task, ctx, ctx->finalize, HTTP_METH_POST, hdrs, ist2(req_out->area, req_out->data)))
|
if (acme_http_req(task, ctx, ctx->finalize, HTTP_METH_POST, hdrs, ist2(req_out->area, req_out->data)))
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
goto out;
|
||||||
error:
|
error:
|
||||||
memprintf(errmsg, "couldn't request the finalize URL");
|
memprintf(errmsg, "couldn't request the finalize URL");
|
||||||
|
out:
|
||||||
free_trash_chunk(req_in);
|
free_trash_chunk(req_in);
|
||||||
free_trash_chunk(req_out);
|
free_trash_chunk(req_out);
|
||||||
free_trash_chunk(csr);
|
free_trash_chunk(csr);
|
||||||
|
|
@ -1415,7 +1409,7 @@ int acme_res_finalize(struct task *task, struct acme_ctx *ctx, char **errmsg)
|
||||||
memprintf(errmsg, "invalid HTTP status code %d when getting Finalize URL", hc->res.status);
|
memprintf(errmsg, "invalid HTTP status code %d when getting Finalize URL", hc->res.status);
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
out:
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
error:
|
error:
|
||||||
|
|
@ -1458,9 +1452,10 @@ int acme_req_challenge(struct task *task, struct acme_ctx *ctx, struct acme_auth
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
goto out;
|
||||||
error:
|
error:
|
||||||
memprintf(errmsg, "couldn't generate the Challenge request");
|
memprintf(errmsg, "couldn't generate the Challenge request");
|
||||||
|
out:
|
||||||
free_trash_chunk(req_in);
|
free_trash_chunk(req_in);
|
||||||
free_trash_chunk(req_out);
|
free_trash_chunk(req_out);
|
||||||
|
|
||||||
|
|
@ -1576,6 +1571,8 @@ int acme_post_as_get(struct task *task, struct acme_ctx *ctx, struct ist url, ch
|
||||||
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
|
goto end;
|
||||||
|
|
||||||
error_jws:
|
error_jws:
|
||||||
memprintf(errmsg, "couldn't generate the JWS token: %s", errmsg ? *errmsg : "");
|
memprintf(errmsg, "couldn't generate the JWS token: %s", errmsg ? *errmsg : "");
|
||||||
goto end;
|
goto end;
|
||||||
|
|
@ -1764,7 +1761,6 @@ int acme_res_auth(struct task *task, struct acme_ctx *ctx, struct acme_auth *aut
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
error:
|
error:
|
||||||
|
|
@ -1816,9 +1812,10 @@ int acme_req_neworder(struct task *task, struct acme_ctx *ctx, char **errmsg)
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
goto out;
|
||||||
error:
|
error:
|
||||||
memprintf(errmsg, "couldn't generate the newOrder request");
|
memprintf(errmsg, "couldn't generate the newOrder request");
|
||||||
|
out:
|
||||||
free_trash_chunk(req_in);
|
free_trash_chunk(req_in);
|
||||||
free_trash_chunk(req_out);
|
free_trash_chunk(req_out);
|
||||||
|
|
||||||
|
|
@ -1926,7 +1923,6 @@ int acme_res_neworder(struct task *task, struct acme_ctx *ctx, char **errmsg)
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
error:
|
error:
|
||||||
|
|
@ -1978,9 +1974,10 @@ int acme_req_account(struct task *task, struct acme_ctx *ctx, int newaccount, ch
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
goto out;
|
||||||
error:
|
error:
|
||||||
memprintf(errmsg, "couldn't generate the newAccount request");
|
memprintf(errmsg, "couldn't generate the newAccount request");
|
||||||
|
out:
|
||||||
free_trash_chunk(req_in);
|
free_trash_chunk(req_in);
|
||||||
free_trash_chunk(req_out);
|
free_trash_chunk(req_out);
|
||||||
|
|
||||||
|
|
@ -2587,45 +2584,6 @@ error:
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Return a new Generated private key of type <keytype> with <bits> and <curves> */
|
|
||||||
static EVP_PKEY *acme_EVP_PKEY_gen(int keytype, int curves, int bits, char **errmsg)
|
|
||||||
{
|
|
||||||
|
|
||||||
EVP_PKEY_CTX *pkey_ctx = NULL;
|
|
||||||
EVP_PKEY *pkey = NULL;
|
|
||||||
|
|
||||||
if ((pkey_ctx = EVP_PKEY_CTX_new_id(keytype, NULL)) == NULL) {
|
|
||||||
memprintf(errmsg, "%sCan't generate a private key.\n", errmsg && *errmsg ? *errmsg : "");
|
|
||||||
goto err;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (EVP_PKEY_keygen_init(pkey_ctx) <= 0) {
|
|
||||||
memprintf(errmsg, "%sCan't generate a private key.\n", errmsg && *errmsg ? *errmsg : "");
|
|
||||||
goto err;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (keytype == EVP_PKEY_EC) {
|
|
||||||
if (EVP_PKEY_CTX_set_ec_paramgen_curve_nid(pkey_ctx, curves) <= 0) {
|
|
||||||
memprintf(errmsg, "%sCan't set the curves on the new private key.\n", errmsg && *errmsg ? *errmsg : "");
|
|
||||||
goto err;
|
|
||||||
}
|
|
||||||
} else if (keytype == EVP_PKEY_RSA) {
|
|
||||||
if (EVP_PKEY_CTX_set_rsa_keygen_bits(pkey_ctx, bits) <= 0) {
|
|
||||||
memprintf(errmsg, "%sCan't set the bits on the new private key.\n", errmsg && *errmsg ? *errmsg : "");
|
|
||||||
goto err;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (EVP_PKEY_keygen(pkey_ctx, &pkey) <= 0) {
|
|
||||||
memprintf(errmsg, "%sCan't generate a private key.\n", errmsg && *errmsg ? *errmsg : "");
|
|
||||||
goto err;
|
|
||||||
}
|
|
||||||
|
|
||||||
err:
|
|
||||||
EVP_PKEY_CTX_free(pkey_ctx);
|
|
||||||
return pkey;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Generate a temporary expired X509 or reuse the one generated.
|
* Generate a temporary expired X509 or reuse the one generated.
|
||||||
* Use tmp_pkey to generate
|
* Use tmp_pkey to generate
|
||||||
|
|
@ -2634,81 +2592,18 @@ err:
|
||||||
*/
|
*/
|
||||||
X509 *acme_gen_tmp_x509()
|
X509 *acme_gen_tmp_x509()
|
||||||
{
|
{
|
||||||
X509 *newcrt = NULL;
|
|
||||||
X509_NAME *name;
|
|
||||||
const EVP_MD *digest = NULL;
|
|
||||||
CONF *ctmp = NULL;
|
|
||||||
int key_type;
|
|
||||||
EVP_PKEY *pkey = tmp_pkey;
|
|
||||||
|
|
||||||
if (tmp_x509) {
|
if (tmp_x509) {
|
||||||
X509_up_ref(tmp_x509);
|
X509_up_ref(tmp_x509);
|
||||||
return tmp_x509;
|
return tmp_x509;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!tmp_pkey)
|
if (!tmp_pkey)
|
||||||
goto mkcert_error;
|
|
||||||
|
|
||||||
/* Create the certificate */
|
|
||||||
if (!(newcrt = X509_new()))
|
|
||||||
goto mkcert_error;
|
|
||||||
|
|
||||||
/* Set version number for the certificate (X509v3) and the serial
|
|
||||||
* number */
|
|
||||||
if (X509_set_version(newcrt, 2L) != 1)
|
|
||||||
goto mkcert_error;
|
|
||||||
|
|
||||||
/* Generate an expired certificate */
|
|
||||||
if (!X509_gmtime_adj(X509_getm_notBefore(newcrt), (long)-60*60*48) ||
|
|
||||||
!X509_gmtime_adj(X509_getm_notAfter(newcrt),(long)-60*60*24))
|
|
||||||
goto mkcert_error;
|
|
||||||
|
|
||||||
/* set public key in the certificate */
|
|
||||||
if (X509_set_pubkey(newcrt, pkey) != 1)
|
|
||||||
goto mkcert_error;
|
|
||||||
|
|
||||||
if ((name = X509_NAME_new()) == NULL)
|
|
||||||
goto mkcert_error;
|
|
||||||
|
|
||||||
/* Set the subject name using the servername but the CN */
|
|
||||||
if (X509_NAME_add_entry_by_txt(name, "CN", MBSTRING_ASC, (unsigned char *)"expired",
|
|
||||||
-1, -1, 0) != 1) {
|
|
||||||
X509_NAME_free(name);
|
|
||||||
goto mkcert_error;
|
|
||||||
}
|
|
||||||
if (X509_set_subject_name(newcrt, name) != 1) {
|
|
||||||
X509_NAME_free(name);
|
|
||||||
goto mkcert_error;
|
|
||||||
}
|
|
||||||
/* Set issuer name as itself */
|
|
||||||
if (X509_set_issuer_name(newcrt, name) != 1)
|
|
||||||
goto mkcert_error;
|
|
||||||
X509_NAME_free(name);
|
|
||||||
|
|
||||||
/* Autosign the certificate with the private key */
|
|
||||||
key_type = EVP_PKEY_base_id(pkey);
|
|
||||||
|
|
||||||
if (key_type == EVP_PKEY_RSA)
|
|
||||||
digest = EVP_sha256();
|
|
||||||
else if (key_type == EVP_PKEY_EC)
|
|
||||||
digest = EVP_sha256();
|
|
||||||
else
|
|
||||||
goto mkcert_error;
|
|
||||||
|
|
||||||
if (!(X509_sign(newcrt, pkey, digest)))
|
|
||||||
goto mkcert_error;
|
|
||||||
|
|
||||||
tmp_x509 = newcrt;
|
|
||||||
|
|
||||||
return newcrt;
|
|
||||||
|
|
||||||
mkcert_error:
|
|
||||||
if (ctmp) NCONF_free(ctmp);
|
|
||||||
if (newcrt) X509_free(newcrt);
|
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
}
|
tmp_x509 = ssl_gen_x509(tmp_pkey);
|
||||||
|
|
||||||
|
return tmp_x509;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Generate a temporary RSA2048 pkey or reuse the one generated.
|
* Generate a temporary RSA2048 pkey or reuse the one generated.
|
||||||
|
|
@ -2723,7 +2618,7 @@ EVP_PKEY *acme_gen_tmp_pkey()
|
||||||
return tmp_pkey;
|
return tmp_pkey;
|
||||||
}
|
}
|
||||||
|
|
||||||
tmp_pkey = acme_EVP_PKEY_gen(EVP_PKEY_RSA, 0, 2048, NULL);
|
tmp_pkey = ssl_gen_EVP_PKEY(EVP_PKEY_RSA, 0, 2048, NULL);
|
||||||
|
|
||||||
return tmp_pkey;
|
return tmp_pkey;
|
||||||
}
|
}
|
||||||
|
|
@ -2782,7 +2677,7 @@ static int acme_start_task(struct ckch_store *store, char **errmsg)
|
||||||
ctx->retries = ACME_RETRY;
|
ctx->retries = ACME_RETRY;
|
||||||
|
|
||||||
if (!cfg->reuse_key) {
|
if (!cfg->reuse_key) {
|
||||||
if ((pkey = acme_EVP_PKEY_gen(cfg->key.type, cfg->key.curves, cfg->key.bits, errmsg)) == NULL)
|
if ((pkey = ssl_gen_EVP_PKEY(cfg->key.type, cfg->key.curves, cfg->key.bits, errmsg)) == NULL)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
EVP_PKEY_free(newstore->data->key);
|
EVP_PKEY_free(newstore->data->key);
|
||||||
|
|
|
||||||
21
src/action.c
21
src/action.c
|
|
@ -188,13 +188,30 @@ int cfg_parse_rule_set_timeout(const char **args, int idx, struct act_rule *rule
|
||||||
const char *res;
|
const char *res;
|
||||||
const char *timeout_name = args[idx++];
|
const char *timeout_name = args[idx++];
|
||||||
|
|
||||||
if (strcmp(timeout_name, "server") == 0) {
|
if (strcmp(timeout_name, "connect") == 0) {
|
||||||
|
if (!(px->cap & PR_CAP_BE)) {
|
||||||
|
memprintf(err, "'%s' has no backend capability", px->id);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
rule->arg.timeout.type = ACT_TIMEOUT_CONNECT;
|
||||||
|
}
|
||||||
|
else if (strcmp(timeout_name, "server") == 0) {
|
||||||
if (!(px->cap & PR_CAP_BE)) {
|
if (!(px->cap & PR_CAP_BE)) {
|
||||||
memprintf(err, "'%s' has no backend capability", px->id);
|
memprintf(err, "'%s' has no backend capability", px->id);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
rule->arg.timeout.type = ACT_TIMEOUT_SERVER;
|
rule->arg.timeout.type = ACT_TIMEOUT_SERVER;
|
||||||
}
|
}
|
||||||
|
else if (strcmp(timeout_name, "queue") == 0) {
|
||||||
|
if (!(px->cap & PR_CAP_BE)) {
|
||||||
|
memprintf(err, "'%s' has no backend capability", px->id);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
rule->arg.timeout.type = ACT_TIMEOUT_QUEUE;
|
||||||
|
}
|
||||||
|
else if (strcmp(timeout_name, "tarpit") == 0) {
|
||||||
|
rule->arg.timeout.type = ACT_TIMEOUT_TARPIT;
|
||||||
|
}
|
||||||
else if (strcmp(timeout_name, "tunnel") == 0) {
|
else if (strcmp(timeout_name, "tunnel") == 0) {
|
||||||
if (!(px->cap & PR_CAP_BE)) {
|
if (!(px->cap & PR_CAP_BE)) {
|
||||||
memprintf(err, "'%s' has no backend capability", px->id);
|
memprintf(err, "'%s' has no backend capability", px->id);
|
||||||
|
|
@ -211,7 +228,7 @@ int cfg_parse_rule_set_timeout(const char **args, int idx, struct act_rule *rule
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
memprintf(err,
|
memprintf(err,
|
||||||
"'set-timeout' rule supports 'server'/'tunnel'/'client' (got '%s')",
|
"'set-timeout' rule supports 'client'/'connect'/'queue'/'server'/'tarpit'/'tunnel' (got '%s')",
|
||||||
timeout_name);
|
timeout_name);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -659,8 +659,20 @@ void activity_count_runtime(uint32_t run_time)
|
||||||
if (!(_HA_ATOMIC_LOAD(&th_ctx->flags) & TH_FL_TASK_PROFILING)) {
|
if (!(_HA_ATOMIC_LOAD(&th_ctx->flags) & TH_FL_TASK_PROFILING)) {
|
||||||
if (unlikely((profiling & HA_PROF_TASKS_MASK) == HA_PROF_TASKS_ON ||
|
if (unlikely((profiling & HA_PROF_TASKS_MASK) == HA_PROF_TASKS_ON ||
|
||||||
((profiling & HA_PROF_TASKS_MASK) == HA_PROF_TASKS_AON &&
|
((profiling & HA_PROF_TASKS_MASK) == HA_PROF_TASKS_AON &&
|
||||||
swrate_avg(run_time, TIME_STATS_SAMPLES) >= up)))
|
swrate_avg(run_time, TIME_STATS_SAMPLES) >= up))) {
|
||||||
|
|
||||||
|
if (profiling & HA_PROF_TASKS_LOCK)
|
||||||
|
_HA_ATOMIC_OR(&th_ctx->flags, TH_FL_TASK_PROFILING_L);
|
||||||
|
else
|
||||||
|
_HA_ATOMIC_AND(&th_ctx->flags, ~TH_FL_TASK_PROFILING_L);
|
||||||
|
|
||||||
|
if (profiling & HA_PROF_TASKS_MEM)
|
||||||
|
_HA_ATOMIC_OR(&th_ctx->flags, TH_FL_TASK_PROFILING_M);
|
||||||
|
else
|
||||||
|
_HA_ATOMIC_AND(&th_ctx->flags, ~TH_FL_TASK_PROFILING_M);
|
||||||
|
|
||||||
_HA_ATOMIC_OR(&th_ctx->flags, TH_FL_TASK_PROFILING);
|
_HA_ATOMIC_OR(&th_ctx->flags, TH_FL_TASK_PROFILING);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
if (unlikely((profiling & HA_PROF_TASKS_MASK) == HA_PROF_TASKS_OFF ||
|
if (unlikely((profiling & HA_PROF_TASKS_MASK) == HA_PROF_TASKS_OFF ||
|
||||||
((profiling & HA_PROF_TASKS_MASK) == HA_PROF_TASKS_AOFF &&
|
((profiling & HA_PROF_TASKS_MASK) == HA_PROF_TASKS_AOFF &&
|
||||||
|
|
@ -692,26 +704,41 @@ static int cfg_parse_prof_memory(char **args, int section_type, struct proxy *cu
|
||||||
}
|
}
|
||||||
#endif // USE_MEMORY_PROFILING
|
#endif // USE_MEMORY_PROFILING
|
||||||
|
|
||||||
/* config parser for global "profiling.tasks", accepts "on" or "off" */
|
/* config parser for global "profiling.tasks", accepts "on", "off", 'auto",
|
||||||
|
* "lock", "no-lock", "memory", "no-memory".
|
||||||
|
*/
|
||||||
static int cfg_parse_prof_tasks(char **args, int section_type, struct proxy *curpx,
|
static int cfg_parse_prof_tasks(char **args, int section_type, struct proxy *curpx,
|
||||||
const struct proxy *defpx, const char *file, int line,
|
const struct proxy *defpx, const char *file, int line,
|
||||||
char **err)
|
char **err)
|
||||||
{
|
{
|
||||||
if (too_many_args(1, args, err, NULL))
|
int arg;
|
||||||
return -1;
|
|
||||||
|
|
||||||
if (strcmp(args[1], "on") == 0) {
|
for (arg = 1; *args[arg]; arg++) {
|
||||||
|
if (strcmp(args[arg], "on") == 0) {
|
||||||
profiling = (profiling & ~HA_PROF_TASKS_MASK) | HA_PROF_TASKS_ON;
|
profiling = (profiling & ~HA_PROF_TASKS_MASK) | HA_PROF_TASKS_ON;
|
||||||
HA_ATOMIC_STORE(&prof_task_start_ns, now_ns);
|
HA_ATOMIC_STORE(&prof_task_start_ns, now_ns);
|
||||||
}
|
}
|
||||||
else if (strcmp(args[1], "auto") == 0) {
|
else if (strcmp(args[arg], "auto") == 0) {
|
||||||
profiling = (profiling & ~HA_PROF_TASKS_MASK) | HA_PROF_TASKS_AOFF;
|
profiling = (profiling & ~HA_PROF_TASKS_MASK) | HA_PROF_TASKS_AOFF;
|
||||||
HA_ATOMIC_STORE(&prof_task_start_ns, now_ns);
|
HA_ATOMIC_STORE(&prof_task_start_ns, now_ns);
|
||||||
}
|
}
|
||||||
else if (strcmp(args[1], "off") == 0)
|
else if (strcmp(args[arg], "off") == 0)
|
||||||
profiling = (profiling & ~HA_PROF_TASKS_MASK) | HA_PROF_TASKS_OFF;
|
profiling = (profiling & ~HA_PROF_TASKS_MASK) | HA_PROF_TASKS_OFF;
|
||||||
else {
|
else if (strcmp(args[arg], "lock") == 0)
|
||||||
memprintf(err, "'%s' expects either 'on', 'auto', or 'off' but got '%s'.", args[0], args[1]);
|
profiling |= HA_PROF_TASKS_LOCK;
|
||||||
|
else if (strcmp(args[arg], "no-lock") == 0)
|
||||||
|
profiling &= ~HA_PROF_TASKS_LOCK;
|
||||||
|
else if (strcmp(args[arg], "memory") == 0)
|
||||||
|
profiling |= HA_PROF_TASKS_MEM;
|
||||||
|
else if (strcmp(args[arg], "no-memory") == 0)
|
||||||
|
profiling &= ~HA_PROF_TASKS_MEM;
|
||||||
|
else
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* either no arg or invalid arg */
|
||||||
|
if (arg == 1 || *args[arg]) {
|
||||||
|
memprintf(err, "'%s' expects a combination of either 'on', 'auto', 'off', 'lock', 'no-lock', 'memory', or 'no-memory', but got '%s'.", args[0], args[arg]);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
|
@ -720,6 +747,8 @@ static int cfg_parse_prof_tasks(char **args, int section_type, struct proxy *cur
|
||||||
/* parse a "set profiling" command. It always returns 1. */
|
/* parse a "set profiling" command. It always returns 1. */
|
||||||
static int cli_parse_set_profiling(char **args, char *payload, struct appctx *appctx, void *private)
|
static int cli_parse_set_profiling(char **args, char *payload, struct appctx *appctx, void *private)
|
||||||
{
|
{
|
||||||
|
int arg;
|
||||||
|
|
||||||
if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
|
if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
|
|
@ -765,7 +794,8 @@ static int cli_parse_set_profiling(char **args, char *payload, struct appctx *ap
|
||||||
if (strcmp(args[2], "tasks") != 0)
|
if (strcmp(args[2], "tasks") != 0)
|
||||||
return cli_err(appctx, "Expects either 'tasks' or 'memory'.\n");
|
return cli_err(appctx, "Expects either 'tasks' or 'memory'.\n");
|
||||||
|
|
||||||
if (strcmp(args[3], "on") == 0) {
|
for (arg = 3; *args[arg]; arg++) {
|
||||||
|
if (strcmp(args[arg], "on") == 0) {
|
||||||
unsigned int old = profiling;
|
unsigned int old = profiling;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
|
@ -787,7 +817,7 @@ static int cli_parse_set_profiling(char **args, char *payload, struct appctx *ap
|
||||||
HA_ATOMIC_STORE(&sched_activity[i].caller, NULL);
|
HA_ATOMIC_STORE(&sched_activity[i].caller, NULL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if (strcmp(args[3], "auto") == 0) {
|
else if (strcmp(args[arg], "auto") == 0) {
|
||||||
unsigned int old = profiling;
|
unsigned int old = profiling;
|
||||||
unsigned int new;
|
unsigned int new;
|
||||||
|
|
||||||
|
|
@ -801,7 +831,7 @@ static int cli_parse_set_profiling(char **args, char *payload, struct appctx *ap
|
||||||
HA_ATOMIC_STORE(&prof_task_start_ns, now_ns);
|
HA_ATOMIC_STORE(&prof_task_start_ns, now_ns);
|
||||||
HA_ATOMIC_STORE(&prof_task_stop_ns, 0);
|
HA_ATOMIC_STORE(&prof_task_stop_ns, 0);
|
||||||
}
|
}
|
||||||
else if (strcmp(args[3], "off") == 0) {
|
else if (strcmp(args[arg], "off") == 0) {
|
||||||
unsigned int old = profiling;
|
unsigned int old = profiling;
|
||||||
while (!_HA_ATOMIC_CAS(&profiling, &old, (old & ~HA_PROF_TASKS_MASK) | HA_PROF_TASKS_OFF))
|
while (!_HA_ATOMIC_CAS(&profiling, &old, (old & ~HA_PROF_TASKS_MASK) | HA_PROF_TASKS_OFF))
|
||||||
;
|
;
|
||||||
|
|
@ -809,8 +839,21 @@ static int cli_parse_set_profiling(char **args, char *payload, struct appctx *ap
|
||||||
if (HA_ATOMIC_LOAD(&prof_task_start_ns))
|
if (HA_ATOMIC_LOAD(&prof_task_start_ns))
|
||||||
HA_ATOMIC_STORE(&prof_task_stop_ns, now_ns);
|
HA_ATOMIC_STORE(&prof_task_stop_ns, now_ns);
|
||||||
}
|
}
|
||||||
|
else if (strcmp(args[arg], "lock") == 0)
|
||||||
|
HA_ATOMIC_OR(&profiling, HA_PROF_TASKS_LOCK);
|
||||||
|
else if (strcmp(args[arg], "no-lock") == 0)
|
||||||
|
HA_ATOMIC_AND(&profiling, ~HA_PROF_TASKS_LOCK);
|
||||||
|
else if (strcmp(args[arg], "memory") == 0)
|
||||||
|
HA_ATOMIC_OR(&profiling, HA_PROF_TASKS_MEM);
|
||||||
|
else if (strcmp(args[arg], "no-memory") == 0)
|
||||||
|
HA_ATOMIC_AND(&profiling, ~HA_PROF_TASKS_MEM);
|
||||||
else
|
else
|
||||||
return cli_err(appctx, "Expects 'on', 'auto', or 'off'.\n");
|
break; // unknown arg
|
||||||
|
}
|
||||||
|
|
||||||
|
/* either no arg or invalid one */
|
||||||
|
if (arg == 3 || *args[arg])
|
||||||
|
return cli_err(appctx, "Expects a combination of either 'on', 'auto', 'off', 'lock', 'no-lock', 'memory' or 'no-memory'.\n");
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
11
src/applet.c
11
src/applet.c
|
|
@ -505,7 +505,7 @@ size_t appctx_htx_rcv_buf(struct appctx *appctx, struct buffer *buf, size_t coun
|
||||||
|
|
||||||
ret = appctx_htx->data;
|
ret = appctx_htx->data;
|
||||||
buf_htx = htx_from_buf(buf);
|
buf_htx = htx_from_buf(buf);
|
||||||
if (htx_is_empty(buf_htx) && htx_used_space(appctx_htx) <= count) {
|
if (b_size(&appctx->outbuf) == b_size(buf) && htx_is_empty(buf_htx) && htx_used_space(appctx_htx) <= count) {
|
||||||
htx_to_buf(buf_htx, buf);
|
htx_to_buf(buf_htx, buf);
|
||||||
htx_to_buf(appctx_htx, &appctx->outbuf);
|
htx_to_buf(appctx_htx, &appctx->outbuf);
|
||||||
b_xfer(buf, &appctx->outbuf, b_data(&appctx->outbuf));
|
b_xfer(buf, &appctx->outbuf, b_data(&appctx->outbuf));
|
||||||
|
|
@ -848,6 +848,11 @@ struct task *task_run_applet(struct task *t, void *context, unsigned int state)
|
||||||
|
|
||||||
input = applet_output_data(app);
|
input = applet_output_data(app);
|
||||||
output = co_data(oc);
|
output = co_data(oc);
|
||||||
|
|
||||||
|
/* Don't call I/O handler if the applet was shut (release callback was
|
||||||
|
* already called)
|
||||||
|
*/
|
||||||
|
if (!se_fl_test(app->sedesc, SE_FL_SHR) || !se_fl_test(app->sedesc, SE_FL_SHW))
|
||||||
app->applet->fct(app);
|
app->applet->fct(app);
|
||||||
|
|
||||||
TRACE_POINT(APPLET_EV_PROCESS, app);
|
TRACE_POINT(APPLET_EV_PROCESS, app);
|
||||||
|
|
@ -945,6 +950,10 @@ struct task *task_process_applet(struct task *t, void *context, unsigned int sta
|
||||||
applet_need_more_data(app);
|
applet_need_more_data(app);
|
||||||
applet_have_no_more_data(app);
|
applet_have_no_more_data(app);
|
||||||
|
|
||||||
|
/* Don't call I/O handler if the applet was shut (release callback was
|
||||||
|
* already called)
|
||||||
|
*/
|
||||||
|
if (!applet_fl_test(app, APPCTX_FL_SHUTDOWN))
|
||||||
app->applet->fct(app);
|
app->applet->fct(app);
|
||||||
|
|
||||||
TRACE_POINT(APPLET_EV_PROCESS, app);
|
TRACE_POINT(APPLET_EV_PROCESS, app);
|
||||||
|
|
|
||||||
137
src/backend.c
137
src/backend.c
|
|
@ -59,6 +59,7 @@
|
||||||
#include <haproxy/task.h>
|
#include <haproxy/task.h>
|
||||||
#include <haproxy/ticks.h>
|
#include <haproxy/ticks.h>
|
||||||
#include <haproxy/time.h>
|
#include <haproxy/time.h>
|
||||||
|
#include <haproxy/tools.h>
|
||||||
#include <haproxy/trace.h>
|
#include <haproxy/trace.h>
|
||||||
|
|
||||||
#define TRACE_SOURCE &trace_strm
|
#define TRACE_SOURCE &trace_strm
|
||||||
|
|
@ -576,9 +577,20 @@ struct server *get_server_rnd(struct stream *s, const struct server *avoid)
|
||||||
/* compare the new server to the previous best choice and pick
|
/* compare the new server to the previous best choice and pick
|
||||||
* the one with the least currently served requests.
|
* the one with the least currently served requests.
|
||||||
*/
|
*/
|
||||||
if (prev && prev != curr &&
|
if (prev && prev != curr) {
|
||||||
curr->served * prev->cur_eweight > prev->served * curr->cur_eweight)
|
uint64_t wcurr = (uint64_t)curr->served * prev->cur_eweight;
|
||||||
|
uint64_t wprev = (uint64_t)prev->served * curr->cur_eweight;
|
||||||
|
|
||||||
|
if (wcurr > wprev)
|
||||||
curr = prev;
|
curr = prev;
|
||||||
|
else if (wcurr == wprev && curr->counters.shared.tg && prev->counters.shared.tg) {
|
||||||
|
/* same load: pick the lowest weighted request rate */
|
||||||
|
wcurr = read_freq_ctr_period_estimate(&curr->counters.shared.tg[tgid - 1]->sess_per_sec, MS_TO_TICKS(1000));
|
||||||
|
wprev = read_freq_ctr_period_estimate(&prev->counters.shared.tg[tgid - 1]->sess_per_sec, MS_TO_TICKS(1000));
|
||||||
|
if (wprev * curr->cur_eweight < wcurr * prev->cur_eweight)
|
||||||
|
curr = prev;
|
||||||
|
}
|
||||||
|
}
|
||||||
} while (--draws > 0);
|
} while (--draws > 0);
|
||||||
|
|
||||||
/* if the selected server is full, pretend we have none so that we reach
|
/* if the selected server is full, pretend we have none so that we reach
|
||||||
|
|
@ -2047,6 +2059,26 @@ int connect_server(struct stream *s)
|
||||||
srv_conn->sni_hash = ssl_sock_sni_hash(sni);
|
srv_conn->sni_hash = ssl_sock_sni_hash(sni);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if defined(TLSEXT_TYPE_application_layer_protocol_negotiation)
|
||||||
|
/* Delay mux initialization if SSL and ALPN/NPN is set
|
||||||
|
* and server cache is not yet populated. Note that in
|
||||||
|
* TCP mode this check is ignored as only mux-pt is
|
||||||
|
* available.
|
||||||
|
*
|
||||||
|
* This check must be performed before conn_prepare()
|
||||||
|
* to ensure consistency accross the whole stack, in
|
||||||
|
* particular for QUIC between quic-conn and mux layer.
|
||||||
|
*/
|
||||||
|
if (IS_HTX_STRM(s) && srv->use_ssl &&
|
||||||
|
(srv->ssl_ctx.alpn_str || srv->ssl_ctx.npn_str)) {
|
||||||
|
HA_RWLOCK_RDLOCK(SERVER_LOCK, &srv->path_params.param_lock);
|
||||||
|
if (srv->path_params.nego_alpn[0] == 0)
|
||||||
|
may_start_mux_now = 0;
|
||||||
|
HA_RWLOCK_RDUNLOCK(SERVER_LOCK, &srv->path_params.param_lock);
|
||||||
|
}
|
||||||
|
#endif /* TLSEXT_TYPE_application_layer_protocol_negotiation */
|
||||||
|
|
||||||
#endif /* USE_OPENSSL */
|
#endif /* USE_OPENSSL */
|
||||||
|
|
||||||
if (conn_prepare(srv_conn, proto, srv->xprt)) {
|
if (conn_prepare(srv_conn, proto, srv->xprt)) {
|
||||||
|
|
@ -2079,21 +2111,6 @@ int connect_server(struct stream *s)
|
||||||
}
|
}
|
||||||
srv_conn->ctx = s->scb;
|
srv_conn->ctx = s->scb;
|
||||||
|
|
||||||
#if defined(USE_OPENSSL) && defined(TLSEXT_TYPE_application_layer_protocol_negotiation)
|
|
||||||
/* Delay mux initialization if SSL and ALPN/NPN is set. Note
|
|
||||||
* that this is skipped in TCP mode as we only want mux-pt
|
|
||||||
* anyway.
|
|
||||||
*/
|
|
||||||
if (srv) {
|
|
||||||
HA_RWLOCK_RDLOCK(SERVER_LOCK, &srv->path_params.param_lock);
|
|
||||||
if (IS_HTX_STRM(s) && srv->use_ssl &&
|
|
||||||
(srv->ssl_ctx.alpn_str || srv->ssl_ctx.npn_str) &&
|
|
||||||
srv->path_params.nego_alpn[0] == 0)
|
|
||||||
may_start_mux_now = 0;
|
|
||||||
HA_RWLOCK_RDUNLOCK(SERVER_LOCK, &srv->path_params.param_lock);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* process the case where the server requires the PROXY protocol to be sent */
|
/* process the case where the server requires the PROXY protocol to be sent */
|
||||||
srv_conn->send_proxy_ofs = 0;
|
srv_conn->send_proxy_ofs = 0;
|
||||||
|
|
||||||
|
|
@ -2187,7 +2204,7 @@ int connect_server(struct stream *s)
|
||||||
*/
|
*/
|
||||||
if (may_start_mux_now) {
|
if (may_start_mux_now) {
|
||||||
const struct mux_ops *alt_mux =
|
const struct mux_ops *alt_mux =
|
||||||
likely(!(s->flags & SF_WEBSOCKET)) ? NULL : srv_get_ws_proto(srv);
|
likely(!(s->flags & SF_WEBSOCKET) || !srv) ? NULL : srv_get_ws_proto(srv);
|
||||||
if (conn_install_mux_be(srv_conn, s->scb, s->sess, alt_mux) < 0) {
|
if (conn_install_mux_be(srv_conn, s->scb, s->sess, alt_mux) < 0) {
|
||||||
conn_full_close(srv_conn);
|
conn_full_close(srv_conn);
|
||||||
return SF_ERR_INTERNAL;
|
return SF_ERR_INTERNAL;
|
||||||
|
|
@ -2242,14 +2259,14 @@ int connect_server(struct stream *s)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* set connect timeout */
|
/* set connect timeout */
|
||||||
s->conn_exp = tick_add_ifset(now_ms, s->be->timeout.connect);
|
s->conn_exp = tick_add_ifset(now_ms, s->connect_timeout);
|
||||||
|
|
||||||
if (srv) {
|
if (srv) {
|
||||||
int count;
|
int count;
|
||||||
|
|
||||||
s->flags |= SF_CURR_SESS;
|
s->flags |= SF_CURR_SESS;
|
||||||
count = _HA_ATOMIC_ADD_FETCH(&srv->cur_sess, 1);
|
count = _HA_ATOMIC_ADD_FETCH(&srv->cur_sess, 1);
|
||||||
HA_ATOMIC_UPDATE_MAX(&srv->counters.cur_sess_max, count);
|
COUNTERS_UPDATE_MAX(&srv->counters.cur_sess_max, count);
|
||||||
if (s->be->lbprm.server_take_conn)
|
if (s->be->lbprm.server_take_conn)
|
||||||
s->be->lbprm.server_take_conn(srv);
|
s->be->lbprm.server_take_conn(srv);
|
||||||
}
|
}
|
||||||
|
|
@ -2365,7 +2382,7 @@ int srv_redispatch_connect(struct stream *s)
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
case SRV_STATUS_QUEUED:
|
case SRV_STATUS_QUEUED:
|
||||||
s->conn_exp = tick_add_ifset(now_ms, s->be->timeout.queue);
|
s->conn_exp = tick_add_ifset(now_ms, s->queue_timeout);
|
||||||
s->scb->state = SC_ST_QUE;
|
s->scb->state = SC_ST_QUE;
|
||||||
|
|
||||||
/* handle the unlikely event where we added to the server's
|
/* handle the unlikely event where we added to the server's
|
||||||
|
|
@ -3044,6 +3061,27 @@ int be_downtime(struct proxy *px) {
|
||||||
return ns_to_sec(now_ns) - px->last_change + px->down_time;
|
return ns_to_sec(now_ns) - px->last_change + px->down_time;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Checks if <px> backend supports the addition of servers at runtime. Either a
|
||||||
|
* backend or a defaults proxy are supported. If proxy is incompatible, <msg>
|
||||||
|
* will be allocated to contain a textual explaination.
|
||||||
|
*/
|
||||||
|
int be_supports_dynamic_srv(struct proxy *px, char **msg)
|
||||||
|
{
|
||||||
|
if (px->lbprm.algo && !(px->lbprm.algo & BE_LB_PROP_DYN)) {
|
||||||
|
memprintf(msg, "%s '%s' uses a non dynamic load balancing method",
|
||||||
|
proxy_cap_str(px->cap), px->id);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (px->mode == PR_MODE_SYSLOG) {
|
||||||
|
memprintf(msg, "%s '%s' uses mode log",
|
||||||
|
proxy_cap_str(px->cap), px->id);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This function returns a string containing the balancing
|
* This function returns a string containing the balancing
|
||||||
* mode of the proxy in a format suitable for stats.
|
* mode of the proxy in a format suitable for stats.
|
||||||
|
|
@ -3708,6 +3746,42 @@ smp_fetch_srv_uweight(const struct arg *args, struct sample *smp, const char *kw
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
smp_fetch_be_connect_timeout(const struct arg *args, struct sample *smp, const char *km, void *private)
|
||||||
|
{
|
||||||
|
struct proxy *px = NULL;
|
||||||
|
|
||||||
|
if (smp->strm)
|
||||||
|
px = smp->strm->be;
|
||||||
|
else if (obj_type(smp->sess->origin) == OBJ_TYPE_CHECK)
|
||||||
|
px = __objt_check(smp->sess->origin)->proxy;
|
||||||
|
if (!px)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
smp->flags = SMP_F_VOL_TXN;
|
||||||
|
smp->data.type = SMP_T_SINT;
|
||||||
|
smp->data.u.sint = TICKS_TO_MS(px->timeout.connect);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
smp_fetch_be_queue_timeout(const struct arg *args, struct sample *smp, const char *km, void *private)
|
||||||
|
{
|
||||||
|
struct proxy *px = NULL;
|
||||||
|
|
||||||
|
if (smp->strm)
|
||||||
|
px = smp->strm->be;
|
||||||
|
else if (obj_type(smp->sess->origin) == OBJ_TYPE_CHECK)
|
||||||
|
px = __objt_check(smp->sess->origin)->proxy;
|
||||||
|
if (!px)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
smp->flags = SMP_F_VOL_TXN;
|
||||||
|
smp->data.type = SMP_T_SINT;
|
||||||
|
smp->data.u.sint = TICKS_TO_MS(px->timeout.queue);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
smp_fetch_be_server_timeout(const struct arg *args, struct sample *smp, const char *km, void *private)
|
smp_fetch_be_server_timeout(const struct arg *args, struct sample *smp, const char *km, void *private)
|
||||||
{
|
{
|
||||||
|
|
@ -3726,6 +3800,24 @@ smp_fetch_be_server_timeout(const struct arg *args, struct sample *smp, const ch
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
smp_fetch_be_tarpit_timeout(const struct arg *args, struct sample *smp, const char *km, void *private)
|
||||||
|
{
|
||||||
|
struct proxy *px = NULL;
|
||||||
|
|
||||||
|
if (smp->strm)
|
||||||
|
px = smp->strm->be;
|
||||||
|
else if (obj_type(smp->sess->origin) == OBJ_TYPE_CHECK)
|
||||||
|
px = __objt_check(smp->sess->origin)->proxy;
|
||||||
|
if (!px)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
smp->flags = SMP_F_VOL_TXN;
|
||||||
|
smp->data.type = SMP_T_SINT;
|
||||||
|
smp->data.u.sint = TICKS_TO_MS(px->timeout.tarpit);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
smp_fetch_be_tunnel_timeout(const struct arg *args, struct sample *smp, const char *km, void *private)
|
smp_fetch_be_tunnel_timeout(const struct arg *args, struct sample *smp, const char *km, void *private)
|
||||||
{
|
{
|
||||||
|
|
@ -3826,8 +3918,11 @@ static struct sample_fetch_kw_list smp_kws = {ILH, {
|
||||||
{ "be_conn_free", smp_fetch_be_conn_free, ARG1(1,BE), NULL, SMP_T_SINT, SMP_USE_INTRN, },
|
{ "be_conn_free", smp_fetch_be_conn_free, ARG1(1,BE), NULL, SMP_T_SINT, SMP_USE_INTRN, },
|
||||||
{ "be_id", smp_fetch_be_id, 0, NULL, SMP_T_SINT, SMP_USE_BKEND, },
|
{ "be_id", smp_fetch_be_id, 0, NULL, SMP_T_SINT, SMP_USE_BKEND, },
|
||||||
{ "be_name", smp_fetch_be_name, 0, NULL, SMP_T_STR, SMP_USE_BKEND, },
|
{ "be_name", smp_fetch_be_name, 0, NULL, SMP_T_STR, SMP_USE_BKEND, },
|
||||||
|
{ "be_connect_timeout",smp_fetch_be_connect_timeout,0, NULL, SMP_T_SINT, SMP_USE_BKEND, },
|
||||||
|
{ "be_queue_timeout", smp_fetch_be_queue_timeout, 0, NULL, SMP_T_SINT, SMP_USE_BKEND, },
|
||||||
{ "be_server_timeout", smp_fetch_be_server_timeout, 0, NULL, SMP_T_SINT, SMP_USE_BKEND, },
|
{ "be_server_timeout", smp_fetch_be_server_timeout, 0, NULL, SMP_T_SINT, SMP_USE_BKEND, },
|
||||||
{ "be_sess_rate", smp_fetch_be_sess_rate, ARG1(1,BE), NULL, SMP_T_SINT, SMP_USE_INTRN, },
|
{ "be_sess_rate", smp_fetch_be_sess_rate, ARG1(1,BE), NULL, SMP_T_SINT, SMP_USE_INTRN, },
|
||||||
|
{ "be_tarpit_timeout", smp_fetch_be_tarpit_timeout, 0, NULL, SMP_T_SINT, SMP_USE_BKEND, },
|
||||||
{ "be_tunnel_timeout", smp_fetch_be_tunnel_timeout, 0, NULL, SMP_T_SINT, SMP_USE_BKEND, },
|
{ "be_tunnel_timeout", smp_fetch_be_tunnel_timeout, 0, NULL, SMP_T_SINT, SMP_USE_BKEND, },
|
||||||
{ "connslots", smp_fetch_connslots, ARG1(1,BE), NULL, SMP_T_SINT, SMP_USE_INTRN, },
|
{ "connslots", smp_fetch_connslots, ARG1(1,BE), NULL, SMP_T_SINT, SMP_USE_INTRN, },
|
||||||
{ "nbsrv", smp_fetch_nbsrv, ARG1(1,BE), NULL, SMP_T_SINT, SMP_USE_INTRN, },
|
{ "nbsrv", smp_fetch_nbsrv, ARG1(1,BE), NULL, SMP_T_SINT, SMP_USE_INTRN, },
|
||||||
|
|
|
||||||
|
|
@ -348,7 +348,7 @@ size_t b_xfer(struct buffer *dst, struct buffer *src, size_t count)
|
||||||
|
|
||||||
if (ret > count)
|
if (ret > count)
|
||||||
ret = count;
|
ret = count;
|
||||||
else if (!b_data(dst)) {
|
else if (!b_data(dst) && b_size(dst) == b_size(src)) {
|
||||||
/* zero copy is possible by just swapping buffers */
|
/* zero copy is possible by just swapping buffers */
|
||||||
struct buffer tmp = *dst;
|
struct buffer tmp = *dst;
|
||||||
*dst = *src;
|
*dst = *src;
|
||||||
|
|
|
||||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue