mirror of
https://github.com/haproxy/haproxy.git
synced 2026-03-02 13:20:24 -05:00
Compare commits
No commits in common. "master" and "v3.3-dev7" have entirely different histories.
597 changed files with 16314 additions and 40368 deletions
|
|
@ -1,7 +1,7 @@
|
||||||
FreeBSD_task:
|
FreeBSD_task:
|
||||||
freebsd_instance:
|
freebsd_instance:
|
||||||
matrix:
|
matrix:
|
||||||
image_family: freebsd-14-3
|
image_family: freebsd-14-2
|
||||||
only_if: $CIRRUS_BRANCH =~ 'master|next'
|
only_if: $CIRRUS_BRANCH =~ 'master|next'
|
||||||
install_script:
|
install_script:
|
||||||
- pkg update -f && pkg upgrade -y && pkg install -y openssl git gmake lua54 socat pcre2
|
- pkg update -f && pkg upgrade -y && pkg install -y openssl git gmake lua54 socat pcre2
|
||||||
|
|
|
||||||
34
.github/actions/setup-vtest/action.yml
vendored
34
.github/actions/setup-vtest/action.yml
vendored
|
|
@ -1,34 +0,0 @@
|
||||||
name: 'setup VTest'
|
|
||||||
description: 'ssss'
|
|
||||||
|
|
||||||
runs:
|
|
||||||
using: "composite"
|
|
||||||
steps:
|
|
||||||
|
|
||||||
- name: Setup coredumps
|
|
||||||
if: ${{ startsWith(matrix.os, 'ubuntu-') }}
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
sudo sysctl -w fs.suid_dumpable=1
|
|
||||||
sudo sysctl kernel.core_pattern=/tmp/core.%h.%e.%t
|
|
||||||
|
|
||||||
- name: Setup ulimit for core dumps
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
# This is required for macOS which does not actually allow to increase
|
|
||||||
# the '-n' soft limit to the hard limit, thus failing to run.
|
|
||||||
ulimit -n 65536
|
|
||||||
ulimit -c unlimited
|
|
||||||
|
|
||||||
- name: Install VTest
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
scripts/build-vtest.sh
|
|
||||||
|
|
||||||
- name: Install problem matcher for VTest
|
|
||||||
shell: bash
|
|
||||||
# This allows one to more easily see which tests fail.
|
|
||||||
run: echo "::add-matcher::.github/vtest.json"
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
2
.github/h2spec.config
vendored
2
.github/h2spec.config
vendored
|
|
@ -19,7 +19,7 @@ defaults
|
||||||
|
|
||||||
frontend h2
|
frontend h2
|
||||||
mode http
|
mode http
|
||||||
bind 127.0.0.1:8443 ssl crt reg-tests/ssl/certs/common.pem alpn h2,http/1.1
|
bind 127.0.0.1:8443 ssl crt reg-tests/ssl/common.pem alpn h2,http/1.1
|
||||||
default_backend h2b
|
default_backend h2b
|
||||||
|
|
||||||
backend h2b
|
backend h2b
|
||||||
|
|
|
||||||
13
.github/matrix.py
vendored
13
.github/matrix.py
vendored
|
|
@ -222,7 +222,7 @@ def main(ref_name):
|
||||||
"OPENSSL_VERSION=1.0.2u",
|
"OPENSSL_VERSION=1.0.2u",
|
||||||
"OPENSSL_VERSION=1.1.1s",
|
"OPENSSL_VERSION=1.1.1s",
|
||||||
"OPENSSL_VERSION=3.5.1",
|
"OPENSSL_VERSION=3.5.1",
|
||||||
"QUICTLS_VERSION=OpenSSL_1_1_1w-quic1",
|
"QUICTLS=yes",
|
||||||
"WOLFSSL_VERSION=5.7.0",
|
"WOLFSSL_VERSION=5.7.0",
|
||||||
"AWS_LC_VERSION=1.39.0",
|
"AWS_LC_VERSION=1.39.0",
|
||||||
# "BORINGSSL=yes",
|
# "BORINGSSL=yes",
|
||||||
|
|
@ -261,7 +261,7 @@ def main(ref_name):
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if ssl == "BORINGSSL=yes" or "QUICTLS" in ssl or "LIBRESSL" in ssl or "WOLFSSL" in ssl or "AWS_LC" in ssl or openssl_supports_quic:
|
if ssl == "BORINGSSL=yes" or ssl == "QUICTLS=yes" or "LIBRESSL" in ssl or "WOLFSSL" in ssl or "AWS_LC" in ssl or openssl_supports_quic:
|
||||||
flags.append("USE_QUIC=1")
|
flags.append("USE_QUIC=1")
|
||||||
|
|
||||||
matrix.append(
|
matrix.append(
|
||||||
|
|
@ -275,9 +275,12 @@ def main(ref_name):
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
# macOS on dev branches
|
# macOS
|
||||||
if "haproxy-" not in ref_name:
|
|
||||||
os = "macos-26" # development branch
|
if "haproxy-" in ref_name:
|
||||||
|
os = "macos-13" # stable branch
|
||||||
|
else:
|
||||||
|
os = "macos-15" # development branch
|
||||||
|
|
||||||
TARGET = "osx"
|
TARGET = "osx"
|
||||||
for CC in ["clang"]:
|
for CC in ["clang"]:
|
||||||
|
|
|
||||||
11
.github/workflows/aws-lc-template.yml
vendored
11
.github/workflows/aws-lc-template.yml
vendored
|
|
@ -16,6 +16,9 @@ jobs:
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
|
- name: Install VTest
|
||||||
|
run: |
|
||||||
|
scripts/build-vtest.sh
|
||||||
- name: Determine latest AWS-LC release
|
- name: Determine latest AWS-LC release
|
||||||
id: get_aws_lc_release
|
id: get_aws_lc_release
|
||||||
run: |
|
run: |
|
||||||
|
|
@ -49,10 +52,16 @@ jobs:
|
||||||
ldd $(which haproxy)
|
ldd $(which haproxy)
|
||||||
haproxy -vv
|
haproxy -vv
|
||||||
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
||||||
- uses: ./.github/actions/setup-vtest
|
- name: Install problem matcher for VTest
|
||||||
|
run: echo "::add-matcher::.github/vtest.json"
|
||||||
- name: Run VTest for HAProxy
|
- name: Run VTest for HAProxy
|
||||||
id: vtest
|
id: vtest
|
||||||
run: |
|
run: |
|
||||||
|
# This is required for macOS which does not actually allow to increase
|
||||||
|
# the '-n' soft limit to the hard limit, thus failing to run.
|
||||||
|
ulimit -n 65536
|
||||||
|
# allow to catch coredumps
|
||||||
|
ulimit -c unlimited
|
||||||
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
||||||
- name: Run Unit tests
|
- name: Run Unit tests
|
||||||
id: unittests
|
id: unittests
|
||||||
|
|
|
||||||
3
.github/workflows/contrib.yml
vendored
3
.github/workflows/contrib.yml
vendored
|
|
@ -11,6 +11,9 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
|
- name: Compile admin/halog/halog
|
||||||
|
run: |
|
||||||
|
make admin/halog/halog
|
||||||
- name: Compile dev/flags/flags
|
- name: Compile dev/flags/flags
|
||||||
run: |
|
run: |
|
||||||
make dev/flags/flags
|
make dev/flags/flags
|
||||||
|
|
|
||||||
2
.github/workflows/coverity.yml
vendored
2
.github/workflows/coverity.yml
vendored
|
|
@ -27,7 +27,7 @@ jobs:
|
||||||
libsystemd-dev
|
libsystemd-dev
|
||||||
- name: Install QUICTLS
|
- name: Install QUICTLS
|
||||||
run: |
|
run: |
|
||||||
QUICTLS_VERSION=OpenSSL_1_1_1w-quic1 scripts/build-ssl.sh
|
QUICTLS=yes scripts/build-ssl.sh
|
||||||
- name: Download Coverity build tool
|
- name: Download Coverity build tool
|
||||||
run: |
|
run: |
|
||||||
wget -c -N https://scan.coverity.com/download/linux64 --post-data "token=${{ secrets.COVERITY_SCAN_TOKEN }}&project=Haproxy" -O coverity_tool.tar.gz
|
wget -c -N https://scan.coverity.com/download/linux64 --post-data "token=${{ secrets.COVERITY_SCAN_TOKEN }}&project=Haproxy" -O coverity_tool.tar.gz
|
||||||
|
|
|
||||||
2
.github/workflows/cross-zoo.yml
vendored
2
.github/workflows/cross-zoo.yml
vendored
|
|
@ -104,7 +104,7 @@ jobs:
|
||||||
|
|
||||||
- name: install quictls
|
- name: install quictls
|
||||||
run: |
|
run: |
|
||||||
QUICTLS_EXTRA_ARGS="--cross-compile-prefix=${{ matrix.platform.arch }}- ${{ matrix.platform.target }}" QUICTLS_VERSION=OpenSSL_1_1_1w-quic1 scripts/build-ssl.sh
|
QUICTLS_EXTRA_ARGS="--cross-compile-prefix=${{ matrix.platform.arch }}- ${{ matrix.platform.target }}" QUICTLS=yes scripts/build-ssl.sh
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
|
|
|
||||||
29
.github/workflows/fedora-rawhide.yml
vendored
29
.github/workflows/fedora-rawhide.yml
vendored
|
|
@ -1,4 +1,4 @@
|
||||||
name: Fedora/Rawhide/OpenSSL
|
name: Fedora/Rawhide/QuicTLS
|
||||||
|
|
||||||
on:
|
on:
|
||||||
schedule:
|
schedule:
|
||||||
|
|
@ -13,12 +13,11 @@ jobs:
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
platform: [
|
platform: [
|
||||||
{ name: x64, cc: gcc, ADDLIB_ATOMIC: "", ARCH_FLAGS: "" },
|
{ name: x64, cc: gcc, QUICTLS_EXTRA_ARGS: "", ADDLIB_ATOMIC: "", ARCH_FLAGS: "" },
|
||||||
{ name: x64, cc: clang, ADDLIB_ATOMIC: "", ARCH_FLAGS: "" },
|
{ name: x64, cc: clang, QUICTLS_EXTRA_ARGS: "", ADDLIB_ATOMIC: "", ARCH_FLAGS: "" },
|
||||||
{ name: x86, cc: gcc, ADDLIB_ATOMIC: "-latomic", ARCH_FLAGS: "-m32" },
|
{ name: x86, cc: gcc, QUICTLS_EXTRA_ARGS: "-m32 linux-generic32", ADDLIB_ATOMIC: "-latomic", ARCH_FLAGS: "-m32" },
|
||||||
{ name: x86, cc: clang, ADDLIB_ATOMIC: "-latomic", ARCH_FLAGS: "-m32" }
|
{ name: x86, cc: clang, QUICTLS_EXTRA_ARGS: "-m32 linux-generic32", ADDLIB_ATOMIC: "-latomic", ARCH_FLAGS: "-m32" }
|
||||||
]
|
]
|
||||||
fail-fast: false
|
|
||||||
name: ${{ matrix.platform.cc }}.${{ matrix.platform.name }}
|
name: ${{ matrix.platform.cc }}.${{ matrix.platform.name }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||||
|
|
@ -28,9 +27,12 @@ jobs:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
dnf -y install awk diffutils git pcre-devel zlib-devel pcre2-devel 'perl(FindBin)' perl-IPC-Cmd 'perl(File::Copy)' 'perl(File::Compare)' lua-devel socat findutils systemd-devel clang openssl-devel.x86_64
|
dnf -y install awk diffutils git pcre-devel zlib-devel pcre2-devel 'perl(FindBin)' perl-IPC-Cmd 'perl(File::Copy)' 'perl(File::Compare)' lua-devel socat findutils systemd-devel clang
|
||||||
dnf -y install 'perl(FindBin)' 'perl(File::Compare)' perl-IPC-Cmd 'perl(File::Copy)' glibc-devel.i686 lua-devel.i686 lua-devel.x86_64 systemd-devel.i686 zlib-ng-compat-devel.i686 pcre-devel.i686 libatomic.i686 openssl-devel.i686
|
dnf -y install 'perl(FindBin)' 'perl(File::Compare)' perl-IPC-Cmd 'perl(File::Copy)' glibc-devel.i686 lua-devel.i686 lua-devel.x86_64 systemd-devel.i686 zlib-ng-compat-devel.i686 pcre-devel.i686 libatomic.i686
|
||||||
- uses: ./.github/actions/setup-vtest
|
- name: Install VTest
|
||||||
|
run: scripts/build-vtest.sh
|
||||||
|
- name: Install QuicTLS
|
||||||
|
run: QUICTLS=yes QUICTLS_EXTRA_ARGS="${{ matrix.platform.QUICTLS_EXTRA_ARGS }}" scripts/build-ssl.sh
|
||||||
- name: Build contrib tools
|
- name: Build contrib tools
|
||||||
run: |
|
run: |
|
||||||
make admin/halog/halog
|
make admin/halog/halog
|
||||||
|
|
@ -39,7 +41,7 @@ jobs:
|
||||||
make dev/hpack/decode dev/hpack/gen-enc dev/hpack/gen-rht
|
make dev/hpack/decode dev/hpack/gen-enc dev/hpack/gen-rht
|
||||||
- name: Compile HAProxy with ${{ matrix.platform.cc }}
|
- name: Compile HAProxy with ${{ matrix.platform.cc }}
|
||||||
run: |
|
run: |
|
||||||
make -j3 CC=${{ matrix.platform.cc }} V=1 ERR=1 TARGET=linux-glibc DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" USE_PROMEX=1 USE_OPENSSL=1 USE_QUIC=1 USE_ZLIB=1 USE_PCRE=1 USE_PCRE_JIT=1 USE_LUA=1 ADDLIB="${{ matrix.platform.ADDLIB_ATOMIC }}" ARCH_FLAGS="${{ matrix.platform.ARCH_FLAGS }}"
|
make -j3 CC=${{ matrix.platform.cc }} V=1 ERR=1 TARGET=linux-glibc DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" USE_OPENSSL=1 USE_QUIC=1 USE_ZLIB=1 USE_PCRE=1 USE_PCRE_JIT=1 USE_LUA=1 ADDLIB="${{ matrix.platform.ADDLIB_ATOMIC }} -Wl,-rpath,${HOME}/opt/lib" SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include ARCH_FLAGS="${{ matrix.platform.ARCH_FLAGS }}"
|
||||||
make install
|
make install
|
||||||
- name: Show HAProxy version
|
- name: Show HAProxy version
|
||||||
id: show-version
|
id: show-version
|
||||||
|
|
@ -49,13 +51,6 @@ jobs:
|
||||||
echo "::endgroup::"
|
echo "::endgroup::"
|
||||||
haproxy -vv
|
haproxy -vv
|
||||||
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
||||||
#
|
|
||||||
# TODO: review this workaround later
|
|
||||||
- name: relax crypto policies
|
|
||||||
run: |
|
|
||||||
dnf -y install crypto-policies-scripts
|
|
||||||
echo LEGACY > /etc/crypto-policies/config
|
|
||||||
update-crypto-policies
|
|
||||||
- name: Run VTest for HAProxy ${{ steps.show-version.outputs.version }}
|
- name: Run VTest for HAProxy ${{ steps.show-version.outputs.version }}
|
||||||
id: vtest
|
id: vtest
|
||||||
run: |
|
run: |
|
||||||
|
|
|
||||||
82
.github/workflows/openssl-ech.yml
vendored
82
.github/workflows/openssl-ech.yml
vendored
|
|
@ -1,82 +0,0 @@
|
||||||
name: openssl ECH
|
|
||||||
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
- cron: "0 3 * * *"
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
test:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v5
|
|
||||||
- name: Install VTest
|
|
||||||
run: |
|
|
||||||
scripts/build-vtest.sh
|
|
||||||
- name: Install apt dependencies
|
|
||||||
run: |
|
|
||||||
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
|
|
||||||
sudo apt-get --no-install-recommends -y install socat gdb
|
|
||||||
sudo apt-get --no-install-recommends -y install libpsl-dev
|
|
||||||
- name: Install OpenSSL+ECH
|
|
||||||
run: env OPENSSL_VERSION="git-feature/ech" GIT_TYPE="branch" scripts/build-ssl.sh
|
|
||||||
- name: Install curl+ECH
|
|
||||||
run: env SSL_LIB=${HOME}/opt/ scripts/build-curl.sh
|
|
||||||
- name: Compile HAProxy
|
|
||||||
run: |
|
|
||||||
make -j$(nproc) CC=gcc TARGET=linux-glibc \
|
|
||||||
USE_QUIC=1 USE_OPENSSL=1 USE_ECH=1 \
|
|
||||||
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
|
|
||||||
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
|
|
||||||
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/" \
|
|
||||||
ARCH_FLAGS="-ggdb3 -fsanitize=address"
|
|
||||||
sudo make install
|
|
||||||
- name: Show HAProxy version
|
|
||||||
id: show-version
|
|
||||||
run: |
|
|
||||||
ldd $(which haproxy)
|
|
||||||
haproxy -vv
|
|
||||||
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
|
||||||
- name: Install problem matcher for VTest
|
|
||||||
run: echo "::add-matcher::.github/vtest.json"
|
|
||||||
- name: Run VTest for HAProxy
|
|
||||||
id: vtest
|
|
||||||
run: |
|
|
||||||
# This is required for macOS which does not actually allow to increase
|
|
||||||
# the '-n' soft limit to the hard limit, thus failing to run.
|
|
||||||
ulimit -n 65536
|
|
||||||
# allow to catch coredumps
|
|
||||||
ulimit -c unlimited
|
|
||||||
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
|
||||||
- name: Show VTest results
|
|
||||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
|
||||||
run: |
|
|
||||||
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
|
|
||||||
printf "::group::"
|
|
||||||
cat $folder/INFO
|
|
||||||
cat $folder/LOG
|
|
||||||
echo "::endgroup::"
|
|
||||||
done
|
|
||||||
exit 1
|
|
||||||
- name: Run Unit tests
|
|
||||||
id: unittests
|
|
||||||
run: |
|
|
||||||
make unit-tests
|
|
||||||
- name: Show coredumps
|
|
||||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
|
||||||
run: |
|
|
||||||
failed=false
|
|
||||||
shopt -s nullglob
|
|
||||||
for file in /tmp/core.*; do
|
|
||||||
failed=true
|
|
||||||
printf "::group::"
|
|
||||||
gdb -ex 'thread apply all bt full' ./haproxy $file
|
|
||||||
echo "::endgroup::"
|
|
||||||
done
|
|
||||||
if [ "$failed" = true ]; then
|
|
||||||
exit 1;
|
|
||||||
fi
|
|
||||||
77
.github/workflows/openssl-master.yml
vendored
77
.github/workflows/openssl-master.yml
vendored
|
|
@ -1,77 +0,0 @@
|
||||||
name: openssl master
|
|
||||||
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
- cron: "0 3 * * *"
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
test:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v5
|
|
||||||
- name: Install apt dependencies
|
|
||||||
run: |
|
|
||||||
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
|
|
||||||
sudo apt-get --no-install-recommends -y install socat gdb
|
|
||||||
sudo apt-get --no-install-recommends -y install libpsl-dev
|
|
||||||
- uses: ./.github/actions/setup-vtest
|
|
||||||
- name: Install OpenSSL master
|
|
||||||
run: env OPENSSL_VERSION="git-master" GIT_TYPE="branch" scripts/build-ssl.sh
|
|
||||||
- name: Compile HAProxy
|
|
||||||
run: |
|
|
||||||
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
|
|
||||||
USE_QUIC=1 USE_OPENSSL=1 \
|
|
||||||
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
|
|
||||||
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
|
|
||||||
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
|
|
||||||
sudo make install
|
|
||||||
- name: Show HAProxy version
|
|
||||||
id: show-version
|
|
||||||
run: |
|
|
||||||
ldd $(which haproxy)
|
|
||||||
haproxy -vv
|
|
||||||
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
|
||||||
- name: Install problem matcher for VTest
|
|
||||||
run: echo "::add-matcher::.github/vtest.json"
|
|
||||||
- name: Run VTest for HAProxy
|
|
||||||
id: vtest
|
|
||||||
run: |
|
|
||||||
# This is required for macOS which does not actually allow to increase
|
|
||||||
# the '-n' soft limit to the hard limit, thus failing to run.
|
|
||||||
ulimit -n 65536
|
|
||||||
# allow to catch coredumps
|
|
||||||
ulimit -c unlimited
|
|
||||||
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
|
||||||
- name: Show VTest results
|
|
||||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
|
||||||
run: |
|
|
||||||
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
|
|
||||||
printf "::group::"
|
|
||||||
cat $folder/INFO
|
|
||||||
cat $folder/LOG
|
|
||||||
echo "::endgroup::"
|
|
||||||
done
|
|
||||||
exit 1
|
|
||||||
- name: Run Unit tests
|
|
||||||
id: unittests
|
|
||||||
run: |
|
|
||||||
make unit-tests
|
|
||||||
- name: Show coredumps
|
|
||||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
|
||||||
run: |
|
|
||||||
failed=false
|
|
||||||
shopt -s nullglob
|
|
||||||
for file in /tmp/core.*; do
|
|
||||||
failed=true
|
|
||||||
printf "::group::"
|
|
||||||
gdb -ex 'thread apply all bt full' ./haproxy $file
|
|
||||||
echo "::endgroup::"
|
|
||||||
done
|
|
||||||
if [ "$failed" = true ]; then
|
|
||||||
exit 1;
|
|
||||||
fi
|
|
||||||
34
.github/workflows/openssl-nodeprecated.yml
vendored
Normal file
34
.github/workflows/openssl-nodeprecated.yml
vendored
Normal file
|
|
@ -0,0 +1,34 @@
|
||||||
|
#
|
||||||
|
# special purpose CI: test against OpenSSL built in "no-deprecated" mode
|
||||||
|
# let us run those builds weekly
|
||||||
|
#
|
||||||
|
# for example, OpenWRT uses such OpenSSL builds (those builds are smaller)
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# some details might be found at NL: https://www.mail-archive.com/haproxy@formilux.org/msg35759.html
|
||||||
|
# GH: https://github.com/haproxy/haproxy/issues/367
|
||||||
|
|
||||||
|
name: openssl no-deprecated
|
||||||
|
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: "0 0 * * 4"
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v5
|
||||||
|
- name: Install VTest
|
||||||
|
run: |
|
||||||
|
scripts/build-vtest.sh
|
||||||
|
- name: Compile HAProxy
|
||||||
|
run: |
|
||||||
|
make DEFINE="-DOPENSSL_API_COMPAT=0x10100000L -DOPENSSL_NO_DEPRECATED" -j3 CC=gcc ERR=1 TARGET=linux-glibc USE_OPENSSL=1
|
||||||
|
- name: Run VTest
|
||||||
|
run: |
|
||||||
|
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
||||||
73
.github/workflows/quic-interop-aws-lc.yml
vendored
73
.github/workflows/quic-interop-aws-lc.yml
vendored
|
|
@ -11,7 +11,7 @@ on:
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
combined-build-and-run:
|
build:
|
||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||||
permissions:
|
permissions:
|
||||||
|
|
@ -21,47 +21,84 @@ jobs:
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
|
|
||||||
- name: Update Docker to the latest
|
- name: Log in to the Container registry
|
||||||
uses: docker/setup-docker-action@v4
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.actor }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Build Docker image
|
- name: Build and push Docker image
|
||||||
id: push
|
id: push
|
||||||
uses: docker/build-push-action@v6
|
uses: docker/build-push-action@v5
|
||||||
with:
|
with:
|
||||||
context: https://github.com/haproxytech/haproxy-qns.git
|
context: https://github.com/haproxytech/haproxy-qns.git
|
||||||
platforms: linux/amd64
|
push: true
|
||||||
build-args: |
|
build-args: |
|
||||||
SSLLIB=AWS-LC
|
SSLLIB: AWS-LC
|
||||||
tags: local:aws-lc
|
tags: ghcr.io/${{ github.repository }}:aws-lc
|
||||||
|
|
||||||
|
- name: Cleanup registry
|
||||||
|
uses: actions/delete-package-versions@v5
|
||||||
|
with:
|
||||||
|
owner: ${{ github.repository_owner }}
|
||||||
|
package-name: 'haproxy'
|
||||||
|
package-type: container
|
||||||
|
min-versions-to-keep: 1
|
||||||
|
delete-only-untagged-versions: 'true'
|
||||||
|
|
||||||
|
run:
|
||||||
|
needs: build
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
suite: [
|
||||||
|
{ client: chrome, tests: "http3" },
|
||||||
|
{ client: picoquic, tests: "handshake,transfer,longrtt,chacha20,multiplexing,retry,resumption,zerortt,http3,blackhole,keyupdate,ecn,amplificationlimit,handshakeloss,transferloss,handshakecorruption,transfercorruption,ipv6,v2" },
|
||||||
|
{ client: quic-go, tests: "handshake,transfer,longrtt,chacha20,multiplexing,retry,resumption,zerortt,http3,blackhole,keyupdate,ecn,amplificationlimit,handshakeloss,transferloss,handshakecorruption,transfercorruption,ipv6,v2" },
|
||||||
|
{ client: ngtcp2, tests: "handshake,transfer,longrtt,chacha20,multiplexing,retry,resumption,zerortt,http3,blackhole,keyupdate,ecn,amplificationlimit,handshakeloss,transferloss,handshakecorruption,transfercorruption,ipv6,v2" }
|
||||||
|
]
|
||||||
|
fail-fast: false
|
||||||
|
|
||||||
|
name: ${{ matrix.suite.client }}
|
||||||
|
runs-on: ubuntu-24.04
|
||||||
|
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v5
|
||||||
|
|
||||||
|
- name: Log in to the Container registry
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.actor }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Install tshark
|
- name: Install tshark
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get update
|
sudo apt-get update
|
||||||
sudo apt-get -y install tshark
|
sudo apt-get -y install tshark
|
||||||
|
|
||||||
|
- name: Pull image
|
||||||
|
run: |
|
||||||
|
docker pull ghcr.io/${{ github.repository }}:aws-lc
|
||||||
|
|
||||||
- name: Run
|
- name: Run
|
||||||
run: |
|
run: |
|
||||||
git clone https://github.com/quic-interop/quic-interop-runner
|
git clone https://github.com/quic-interop/quic-interop-runner
|
||||||
cd quic-interop-runner
|
cd quic-interop-runner
|
||||||
pip install -r requirements.txt --break-system-packages
|
pip install -r requirements.txt --break-system-packages
|
||||||
python run.py -j result.json -l logs-chrome -r haproxy=local:aws-lc -t "http3" -c chrome -s haproxy
|
python run.py -j result.json -l logs -r haproxy=ghcr.io/${{ github.repository }}:aws-lc -t ${{ matrix.suite.tests }} -c ${{ matrix.suite.client }} -s haproxy
|
||||||
python run.py -j result.json -l logs-picoquic -r haproxy=local:aws-lc -t "handshake,transfer,longrtt,chacha20,multiplexing,retry,resumption,zerortt,http3,blackhole,keyupdate,ecn,amplificationlimit,handshakeloss,transferloss,handshakecorruption,transfercorruption,ipv6,v2" -c picoquic -s haproxy
|
|
||||||
python run.py -j result.json -l logs-quic-go -r haproxy=local:aws-lc -t "handshake,transfer,longrtt,chacha20,multiplexing,retry,resumption,zerortt,http3,blackhole,keyupdate,ecn,amplificationlimit,handshakeloss,transferloss,handshakecorruption,transfercorruption,ipv6,v2" -c quic-go -s haproxy
|
|
||||||
python run.py -j result.json -l logs-ngtcp2 -r haproxy=local:aws-lc -t "handshake,transfer,longrtt,chacha20,multiplexing,retry,resumption,zerortt,http3,blackhole,keyupdate,ecn,amplificationlimit,handshakeloss,transferloss,handshakecorruption,transfercorruption,ipv6,v2" -c ngtcp2 -s haproxy
|
|
||||||
|
|
||||||
- name: Delete succeeded logs
|
- name: Delete succeeded logs
|
||||||
if: failure()
|
if: failure()
|
||||||
run: |
|
run: |
|
||||||
for client in chrome picoquic quic-go ngtcp2; do
|
cd quic-interop-runner/logs/haproxy_${{ matrix.suite.client }}
|
||||||
pushd quic-interop-runner/logs-${client}/haproxy_${client}
|
|
||||||
cat ../../result.json | jq -r '.results[][] | select(.result=="succeeded") | .name' | xargs rm -rf
|
cat ../../result.json | jq -r '.results[][] | select(.result=="succeeded") | .name' | xargs rm -rf
|
||||||
popd
|
|
||||||
done
|
|
||||||
|
|
||||||
- name: Logs upload
|
- name: Logs upload
|
||||||
if: failure()
|
if: failure()
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: logs
|
name: logs-${{ matrix.suite.client }}
|
||||||
path: quic-interop-runner/logs*/
|
path: quic-interop-runner/logs/
|
||||||
retention-days: 6
|
retention-days: 6
|
||||||
|
|
|
||||||
69
.github/workflows/quic-interop-libressl.yml
vendored
69
.github/workflows/quic-interop-libressl.yml
vendored
|
|
@ -11,7 +11,7 @@ on:
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
combined-build-and-run:
|
build:
|
||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||||
permissions:
|
permissions:
|
||||||
|
|
@ -21,45 +21,82 @@ jobs:
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
|
|
||||||
- name: Update Docker to the latest
|
- name: Log in to the Container registry
|
||||||
uses: docker/setup-docker-action@v4
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.actor }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Build Docker image
|
- name: Build and push Docker image
|
||||||
id: push
|
id: push
|
||||||
uses: docker/build-push-action@v6
|
uses: docker/build-push-action@v5
|
||||||
with:
|
with:
|
||||||
context: https://github.com/haproxytech/haproxy-qns.git
|
context: https://github.com/haproxytech/haproxy-qns.git
|
||||||
platforms: linux/amd64
|
push: true
|
||||||
build-args: |
|
build-args: |
|
||||||
SSLLIB=LibreSSL
|
SSLLIB: LibreSSL
|
||||||
tags: local:libressl
|
tags: ghcr.io/${{ github.repository }}:libressl
|
||||||
|
|
||||||
|
- name: Cleanup registry
|
||||||
|
uses: actions/delete-package-versions@v5
|
||||||
|
with:
|
||||||
|
owner: ${{ github.repository_owner }}
|
||||||
|
package-name: 'haproxy'
|
||||||
|
package-type: container
|
||||||
|
min-versions-to-keep: 1
|
||||||
|
delete-only-untagged-versions: 'true'
|
||||||
|
|
||||||
|
run:
|
||||||
|
needs: build
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
suite: [
|
||||||
|
{ client: picoquic, tests: "handshake,transfer,longrtt,chacha20,multiplexing,retry,http3,blackhole,amplificationlimit,handshakeloss,transferloss,handshakecorruption,transfercorruption,v2" },
|
||||||
|
{ client: quic-go, tests: "handshake,transfer,longrtt,chacha20,multiplexing,retry,http3,blackhole,amplificationlimit,transferloss,transfercorruption,v2" }
|
||||||
|
]
|
||||||
|
fail-fast: false
|
||||||
|
|
||||||
|
name: ${{ matrix.suite.client }}
|
||||||
|
runs-on: ubuntu-24.04
|
||||||
|
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v5
|
||||||
|
|
||||||
|
- name: Log in to the Container registry
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.actor }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Install tshark
|
- name: Install tshark
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get update
|
sudo apt-get update
|
||||||
sudo apt-get -y install tshark
|
sudo apt-get -y install tshark
|
||||||
|
|
||||||
|
- name: Pull image
|
||||||
|
run: |
|
||||||
|
docker pull ghcr.io/${{ github.repository }}:libressl
|
||||||
|
|
||||||
- name: Run
|
- name: Run
|
||||||
run: |
|
run: |
|
||||||
git clone https://github.com/quic-interop/quic-interop-runner
|
git clone https://github.com/quic-interop/quic-interop-runner
|
||||||
cd quic-interop-runner
|
cd quic-interop-runner
|
||||||
pip install -r requirements.txt --break-system-packages
|
pip install -r requirements.txt --break-system-packages
|
||||||
python run.py -j result.json -l logs-picoquic -r haproxy=local:libressl -t "handshake,transfer,longrtt,chacha20,multiplexing,retry,http3,blackhole,amplificationlimit,handshakeloss,transferloss,handshakecorruption,transfercorruption,v2" -c picoquic -s haproxy
|
python run.py -j result.json -l logs -r haproxy=ghcr.io/${{ github.repository }}:libressl -t ${{ matrix.suite.tests }} -c ${{ matrix.suite.client }} -s haproxy
|
||||||
python run.py -j result.json -l logs-quic-go -r haproxy=local:libressl -t "handshake,transfer,longrtt,chacha20,multiplexing,retry,http3,blackhole,amplificationlimit,transferloss,transfercorruption,v2" -c quic-go -s haproxy
|
|
||||||
|
|
||||||
- name: Delete succeeded logs
|
- name: Delete succeeded logs
|
||||||
if: failure()
|
if: failure()
|
||||||
run: |
|
run: |
|
||||||
for client in picoquic quic-go; do
|
cd quic-interop-runner/logs/haproxy_${{ matrix.suite.client }}
|
||||||
pushd quic-interop-runner/logs-${client}/haproxy_${client}
|
|
||||||
cat ../../result.json | jq -r '.results[][] | select(.result=="succeeded") | .name' | xargs rm -rf
|
cat ../../result.json | jq -r '.results[][] | select(.result=="succeeded") | .name' | xargs rm -rf
|
||||||
popd
|
|
||||||
done
|
|
||||||
|
|
||||||
- name: Logs upload
|
- name: Logs upload
|
||||||
if: failure()
|
if: failure()
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: logs
|
name: logs-${{ matrix.suite.client }}
|
||||||
path: quic-interop-runner/logs*/
|
path: quic-interop-runner/logs/
|
||||||
retention-days: 6
|
retention-days: 6
|
||||||
|
|
|
||||||
13
.github/workflows/quictls.yml
vendored
13
.github/workflows/quictls.yml
vendored
|
|
@ -18,12 +18,15 @@ jobs:
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
|
- name: Install VTest
|
||||||
|
run: |
|
||||||
|
scripts/build-vtest.sh
|
||||||
- name: Install apt dependencies
|
- name: Install apt dependencies
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
|
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
|
||||||
sudo apt-get --no-install-recommends -y install socat gdb
|
sudo apt-get --no-install-recommends -y install socat gdb
|
||||||
- name: Install QuicTLS
|
- name: Install QuicTLS
|
||||||
run: env QUICTLS_VERSION=main QUICTLS_URL=https://github.com/quictls/quictls scripts/build-ssl.sh
|
run: env QUICTLS=yes QUICTLS_URL=https://github.com/quictls/quictls scripts/build-ssl.sh
|
||||||
- name: Compile HAProxy
|
- name: Compile HAProxy
|
||||||
run: |
|
run: |
|
||||||
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
|
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
|
||||||
|
|
@ -39,10 +42,16 @@ jobs:
|
||||||
ldd $(which haproxy)
|
ldd $(which haproxy)
|
||||||
haproxy -vv
|
haproxy -vv
|
||||||
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
||||||
- uses: ./.github/actions/setup-vtest
|
- name: Install problem matcher for VTest
|
||||||
|
run: echo "::add-matcher::.github/vtest.json"
|
||||||
- name: Run VTest for HAProxy
|
- name: Run VTest for HAProxy
|
||||||
id: vtest
|
id: vtest
|
||||||
run: |
|
run: |
|
||||||
|
# This is required for macOS which does not actually allow to increase
|
||||||
|
# the '-n' soft limit to the hard limit, thus failing to run.
|
||||||
|
ulimit -n 65536
|
||||||
|
# allow to catch coredumps
|
||||||
|
ulimit -c unlimited
|
||||||
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
||||||
- name: Show VTest results
|
- name: Show VTest results
|
||||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||||
|
|
|
||||||
30
.github/workflows/vtest.yml
vendored
30
.github/workflows/vtest.yml
vendored
|
|
@ -48,6 +48,12 @@ jobs:
|
||||||
with:
|
with:
|
||||||
fetch-depth: 100
|
fetch-depth: 100
|
||||||
|
|
||||||
|
- name: Setup coredumps
|
||||||
|
if: ${{ startsWith(matrix.os, 'ubuntu-') }}
|
||||||
|
run: |
|
||||||
|
sudo sysctl -w fs.suid_dumpable=1
|
||||||
|
sudo sysctl kernel.core_pattern=/tmp/core.%h.%e.%t
|
||||||
|
|
||||||
#
|
#
|
||||||
# Github Action cache key cannot contain comma, so we calculate it based on job name
|
# Github Action cache key cannot contain comma, so we calculate it based on job name
|
||||||
#
|
#
|
||||||
|
|
@ -57,7 +63,7 @@ jobs:
|
||||||
echo "key=$(echo ${{ matrix.name }} | sha256sum | awk '{print $1}')" >> $GITHUB_OUTPUT
|
echo "key=$(echo ${{ matrix.name }} | sha256sum | awk '{print $1}')" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
- name: Cache SSL libs
|
- name: Cache SSL libs
|
||||||
if: ${{ matrix.ssl && matrix.ssl != 'stock' && matrix.ssl != 'BORINGSSL=yes' && !contains(matrix.ssl, 'QUICTLS') }}
|
if: ${{ matrix.ssl && matrix.ssl != 'stock' && matrix.ssl != 'BORINGSSL=yes' && matrix.ssl != 'QUICTLS=yes' }}
|
||||||
id: cache_ssl
|
id: cache_ssl
|
||||||
uses: actions/cache@v4
|
uses: actions/cache@v4
|
||||||
with:
|
with:
|
||||||
|
|
@ -87,7 +93,9 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
brew install socat
|
brew install socat
|
||||||
brew install lua
|
brew install lua
|
||||||
- uses: ./.github/actions/setup-vtest
|
- name: Install VTest
|
||||||
|
run: |
|
||||||
|
scripts/build-vtest.sh
|
||||||
- name: Install SSL ${{ matrix.ssl }}
|
- name: Install SSL ${{ matrix.ssl }}
|
||||||
if: ${{ matrix.ssl && matrix.ssl != 'stock' && steps.cache_ssl.outputs.cache-hit != 'true' }}
|
if: ${{ matrix.ssl && matrix.ssl != 'stock' && steps.cache_ssl.outputs.cache-hit != 'true' }}
|
||||||
run: env ${{ matrix.ssl }} scripts/build-ssl.sh
|
run: env ${{ matrix.ssl }} scripts/build-ssl.sh
|
||||||
|
|
@ -113,16 +121,7 @@ jobs:
|
||||||
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
|
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
|
||||||
${{ join(matrix.FLAGS, ' ') }} \
|
${{ join(matrix.FLAGS, ' ') }} \
|
||||||
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
|
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
|
||||||
sudo make install-bin
|
sudo make install
|
||||||
- name: Compile admin/halog/halog
|
|
||||||
run: |
|
|
||||||
make -j$(nproc) admin/halog/halog \
|
|
||||||
ERR=1 \
|
|
||||||
TARGET=${{ matrix.TARGET }} \
|
|
||||||
CC=${{ matrix.CC }} \
|
|
||||||
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
|
|
||||||
${{ join(matrix.FLAGS, ' ') }} \
|
|
||||||
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
|
|
||||||
- name: Show HAProxy version
|
- name: Show HAProxy version
|
||||||
id: show-version
|
id: show-version
|
||||||
run: |
|
run: |
|
||||||
|
|
@ -137,9 +136,16 @@ jobs:
|
||||||
echo "::endgroup::"
|
echo "::endgroup::"
|
||||||
haproxy -vv
|
haproxy -vv
|
||||||
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
||||||
|
- name: Install problem matcher for VTest
|
||||||
|
# This allows one to more easily see which tests fail.
|
||||||
|
run: echo "::add-matcher::.github/vtest.json"
|
||||||
- name: Run VTest for HAProxy ${{ steps.show-version.outputs.version }}
|
- name: Run VTest for HAProxy ${{ steps.show-version.outputs.version }}
|
||||||
id: vtest
|
id: vtest
|
||||||
run: |
|
run: |
|
||||||
|
# This is required for macOS which does not actually allow to increase
|
||||||
|
# the '-n' soft limit to the hard limit, thus failing to run.
|
||||||
|
ulimit -n 65536
|
||||||
|
ulimit -c unlimited
|
||||||
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
||||||
- name: Show VTest results
|
- name: Show VTest results
|
||||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||||
|
|
|
||||||
1
.github/workflows/windows.yml
vendored
1
.github/workflows/windows.yml
vendored
|
|
@ -18,7 +18,6 @@ jobs:
|
||||||
msys2:
|
msys2:
|
||||||
name: ${{ matrix.name }}
|
name: ${{ matrix.name }}
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
shell: msys2 {0}
|
shell: msys2 {0}
|
||||||
|
|
|
||||||
11
.github/workflows/wolfssl.yml
vendored
11
.github/workflows/wolfssl.yml
vendored
|
|
@ -14,6 +14,9 @@ jobs:
|
||||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v5
|
- uses: actions/checkout@v5
|
||||||
|
- name: Install VTest
|
||||||
|
run: |
|
||||||
|
scripts/build-vtest.sh
|
||||||
- name: Install apt dependencies
|
- name: Install apt dependencies
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
|
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
|
||||||
|
|
@ -35,10 +38,16 @@ jobs:
|
||||||
ldd $(which haproxy)
|
ldd $(which haproxy)
|
||||||
haproxy -vv
|
haproxy -vv
|
||||||
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
||||||
- uses: ./.github/actions/setup-vtest
|
- name: Install problem matcher for VTest
|
||||||
|
run: echo "::add-matcher::.github/vtest.json"
|
||||||
- name: Run VTest for HAProxy
|
- name: Run VTest for HAProxy
|
||||||
id: vtest
|
id: vtest
|
||||||
run: |
|
run: |
|
||||||
|
# This is required for macOS which does not actually allow to increase
|
||||||
|
# the '-n' soft limit to the hard limit, thus failing to run.
|
||||||
|
ulimit -n 65536
|
||||||
|
# allow to catch coredumps
|
||||||
|
ulimit -c unlimited
|
||||||
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
||||||
- name: Run Unit tests
|
- name: Run Unit tests
|
||||||
id: unittests
|
id: unittests
|
||||||
|
|
|
||||||
12
BRANCHES
12
BRANCHES
|
|
@ -171,17 +171,7 @@ feedback for developers:
|
||||||
as the previous releases that had 6 months to stabilize. In terms of
|
as the previous releases that had 6 months to stabilize. In terms of
|
||||||
stability it really means that the point zero version already accumulated
|
stability it really means that the point zero version already accumulated
|
||||||
6 months of fixes and that it is much safer to use even just after it is
|
6 months of fixes and that it is much safer to use even just after it is
|
||||||
released. There is one exception though, features marked as "experimental"
|
released.
|
||||||
are not guaranteed to be maintained beyond the release of the next LTS
|
|
||||||
branch. The rationale here is that the experimental status is made to
|
|
||||||
expose an early preview of a feature, that is often incomplete, not always
|
|
||||||
in its definitive form regarding configuration, and for which developers
|
|
||||||
are seeking feedback from the users. It is even possible that changes will
|
|
||||||
be brought within the stable branch and it may happen that the feature
|
|
||||||
breaks. It is not imaginable to always be able to backport bug fixes too
|
|
||||||
far in this context since the code and configuration may change quite a
|
|
||||||
bit. Users who want to try experimental features are expected to upgrade
|
|
||||||
quickly to benefit from the improvements made to that feature.
|
|
||||||
|
|
||||||
- for developers, given that the odd versions are solely used by highly
|
- for developers, given that the odd versions are solely used by highly
|
||||||
skilled users, it's easier to get advanced traces and captures, and there
|
skilled users, it's easier to get advanced traces and captures, and there
|
||||||
|
|
|
||||||
22
INSTALL
22
INSTALL
|
|
@ -111,7 +111,7 @@ HAProxy requires a working GCC or Clang toolchain and GNU make :
|
||||||
may want to retry with "gmake" which is the name commonly used for GNU make
|
may want to retry with "gmake" which is the name commonly used for GNU make
|
||||||
on BSD systems.
|
on BSD systems.
|
||||||
|
|
||||||
- GCC >= 4.7 (up to 15 tested). Older versions are no longer supported due to
|
- GCC >= 4.7 (up to 14 tested). Older versions are no longer supported due to
|
||||||
the latest mt_list update which only uses c11-like atomics. Newer versions
|
the latest mt_list update which only uses c11-like atomics. Newer versions
|
||||||
may sometimes break due to compiler regressions or behaviour changes. The
|
may sometimes break due to compiler regressions or behaviour changes. The
|
||||||
version shipped with your operating system is very likely to work with no
|
version shipped with your operating system is very likely to work with no
|
||||||
|
|
@ -237,7 +237,7 @@ to forcefully enable it using "USE_LIBCRYPT=1".
|
||||||
-----------------
|
-----------------
|
||||||
For SSL/TLS, it is necessary to use a cryptography library. HAProxy currently
|
For SSL/TLS, it is necessary to use a cryptography library. HAProxy currently
|
||||||
supports the OpenSSL library, and is known to build and work with branches
|
supports the OpenSSL library, and is known to build and work with branches
|
||||||
1.0.0, 1.0.1, 1.0.2, 1.1.0, 1.1.1, and 3.0 to 3.6. It is recommended to use
|
1.0.0, 1.0.1, 1.0.2, 1.1.0, 1.1.1, and 3.0 to 3.5. It is recommended to use
|
||||||
at least OpenSSL 1.1.1 to have support for all SSL keywords and configuration
|
at least OpenSSL 1.1.1 to have support for all SSL keywords and configuration
|
||||||
in HAProxy. OpenSSL follows a long-term support cycle similar to HAProxy's,
|
in HAProxy. OpenSSL follows a long-term support cycle similar to HAProxy's,
|
||||||
and each of the branches above receives its own fixes, without forcing you to
|
and each of the branches above receives its own fixes, without forcing you to
|
||||||
|
|
@ -259,15 +259,11 @@ reported to work as well. While there are some efforts from the community to
|
||||||
ensure they work well, OpenSSL remains the primary target and this means that
|
ensure they work well, OpenSSL remains the primary target and this means that
|
||||||
in case of conflicting choices, OpenSSL support will be favored over other
|
in case of conflicting choices, OpenSSL support will be favored over other
|
||||||
options. Note that QUIC is not fully supported when haproxy is built with
|
options. Note that QUIC is not fully supported when haproxy is built with
|
||||||
OpenSSL < 3.5.2 version. In this case, QUICTLS or AWS-LC are the preferred
|
OpenSSL < 3.5 version. In this case, QUICTLS is the preferred alternative.
|
||||||
alternatives. As of writing this, the QuicTLS project follows OpenSSL very
|
As of writing this, the QuicTLS project follows OpenSSL very closely and provides
|
||||||
closely and provides update simultaneously, but being a volunteer-driven
|
update simultaneously, but being a volunteer-driven project, its long-term future
|
||||||
project, its long-term future does not look certain enough to convince
|
does not look certain enough to convince operating systems to package it, so it
|
||||||
operating systems to package it, so it needs to be build locally. Recent
|
needs to be build locally. See the section about QUIC in this document.
|
||||||
versions of AWS-LC (>= 1.22 and the FIPS branches) are pretty complete and
|
|
||||||
generally more performant than other OpenSSL derivatives, but may behave
|
|
||||||
slightly differently, particularly when dealing with outdated setups. See
|
|
||||||
the section about QUIC in this document.
|
|
||||||
|
|
||||||
A fifth option is wolfSSL (https://github.com/wolfSSL/wolfssl). It is the only
|
A fifth option is wolfSSL (https://github.com/wolfSSL/wolfssl). It is the only
|
||||||
supported alternative stack not based on OpenSSL, yet which implements almost
|
supported alternative stack not based on OpenSSL, yet which implements almost
|
||||||
|
|
@ -563,9 +559,9 @@ It goes into more details with the main options.
|
||||||
To build haproxy, you have to choose your target OS amongst the following ones
|
To build haproxy, you have to choose your target OS amongst the following ones
|
||||||
and assign it to the TARGET variable :
|
and assign it to the TARGET variable :
|
||||||
|
|
||||||
- linux-glibc for Linux kernel 4.17 and above
|
- linux-glibc for Linux kernel 2.6.28 and above
|
||||||
- linux-glibc-legacy for Linux kernel 2.6.28 and above without new features
|
- linux-glibc-legacy for Linux kernel 2.6.28 and above without new features
|
||||||
- linux-musl for Linux kernel 4.17 and above with musl libc
|
- linux-musl for Linux kernel 2.6.28 and above with musl libc
|
||||||
- solaris for Solaris 10 and above
|
- solaris for Solaris 10 and above
|
||||||
- freebsd for FreeBSD 10 and above
|
- freebsd for FreeBSD 10 and above
|
||||||
- dragonfly for DragonFlyBSD 4.3 and above
|
- dragonfly for DragonFlyBSD 4.3 and above
|
||||||
|
|
|
||||||
59
Makefile
59
Makefile
|
|
@ -35,7 +35,6 @@
|
||||||
# USE_OPENSSL : enable use of OpenSSL. Recommended, but see below.
|
# USE_OPENSSL : enable use of OpenSSL. Recommended, but see below.
|
||||||
# USE_OPENSSL_AWSLC : enable use of AWS-LC
|
# USE_OPENSSL_AWSLC : enable use of AWS-LC
|
||||||
# USE_OPENSSL_WOLFSSL : enable use of wolfSSL with the OpenSSL API
|
# USE_OPENSSL_WOLFSSL : enable use of wolfSSL with the OpenSSL API
|
||||||
# USE_ECH : enable use of ECH with the OpenSSL API
|
|
||||||
# USE_QUIC : enable use of QUIC with the quictls API (quictls, libressl, boringssl)
|
# USE_QUIC : enable use of QUIC with the quictls API (quictls, libressl, boringssl)
|
||||||
# USE_QUIC_OPENSSL_COMPAT : enable use of QUIC with the standard openssl API (limited features)
|
# USE_QUIC_OPENSSL_COMPAT : enable use of QUIC with the standard openssl API (limited features)
|
||||||
# USE_ENGINE : enable use of OpenSSL Engine.
|
# USE_ENGINE : enable use of OpenSSL Engine.
|
||||||
|
|
@ -214,8 +213,7 @@ UNIT_TEST_SCRIPT=./scripts/run-unittests.sh
|
||||||
# undefined behavior to silently produce invalid code. For this reason we have
|
# undefined behavior to silently produce invalid code. For this reason we have
|
||||||
# to use -fwrapv or -fno-strict-overflow to guarantee the intended behavior.
|
# to use -fwrapv or -fno-strict-overflow to guarantee the intended behavior.
|
||||||
# It is preferable not to change this option in order to avoid breakage.
|
# It is preferable not to change this option in order to avoid breakage.
|
||||||
STD_CFLAGS := $(call cc-opt-alt,-fwrapv,-fno-strict-overflow) \
|
STD_CFLAGS := $(call cc-opt-alt,-fwrapv,-fno-strict-overflow)
|
||||||
$(call cc-opt,-fvect-cost-model=very-cheap)
|
|
||||||
|
|
||||||
#### Compiler-specific flags to enable certain classes of warnings.
|
#### Compiler-specific flags to enable certain classes of warnings.
|
||||||
# Some are hard-coded, others are enabled only if supported.
|
# Some are hard-coded, others are enabled only if supported.
|
||||||
|
|
@ -342,7 +340,6 @@ use_opts = USE_EPOLL USE_KQUEUE USE_NETFILTER USE_POLL \
|
||||||
USE_TPROXY USE_LINUX_TPROXY USE_LINUX_CAP \
|
USE_TPROXY USE_LINUX_TPROXY USE_LINUX_CAP \
|
||||||
USE_LINUX_SPLICE USE_LIBCRYPT USE_CRYPT_H USE_ENGINE \
|
USE_LINUX_SPLICE USE_LIBCRYPT USE_CRYPT_H USE_ENGINE \
|
||||||
USE_GETADDRINFO USE_OPENSSL USE_OPENSSL_WOLFSSL USE_OPENSSL_AWSLC \
|
USE_GETADDRINFO USE_OPENSSL USE_OPENSSL_WOLFSSL USE_OPENSSL_AWSLC \
|
||||||
USE_ECH \
|
|
||||||
USE_SSL USE_LUA USE_ACCEPT4 USE_CLOSEFROM USE_ZLIB USE_SLZ \
|
USE_SSL USE_LUA USE_ACCEPT4 USE_CLOSEFROM USE_ZLIB USE_SLZ \
|
||||||
USE_CPU_AFFINITY USE_TFO USE_NS USE_DL USE_RT USE_LIBATOMIC \
|
USE_CPU_AFFINITY USE_TFO USE_NS USE_DL USE_RT USE_LIBATOMIC \
|
||||||
USE_MATH USE_DEVICEATLAS USE_51DEGREES \
|
USE_MATH USE_DEVICEATLAS USE_51DEGREES \
|
||||||
|
|
@ -382,13 +379,13 @@ ifeq ($(TARGET),haiku)
|
||||||
set_target_defaults = $(call default_opts,USE_POLL USE_TPROXY USE_OBSOLETE_LINKER)
|
set_target_defaults = $(call default_opts,USE_POLL USE_TPROXY USE_OBSOLETE_LINKER)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# For linux >= 4.17 and glibc
|
# For linux >= 2.6.28 and glibc
|
||||||
ifeq ($(TARGET),linux-glibc)
|
ifeq ($(TARGET),linux-glibc)
|
||||||
set_target_defaults = $(call default_opts, \
|
set_target_defaults = $(call default_opts, \
|
||||||
USE_POLL USE_TPROXY USE_LIBCRYPT USE_DL USE_RT USE_CRYPT_H USE_NETFILTER \
|
USE_POLL USE_TPROXY USE_LIBCRYPT USE_DL USE_RT USE_CRYPT_H USE_NETFILTER \
|
||||||
USE_CPU_AFFINITY USE_THREAD USE_EPOLL USE_LINUX_TPROXY USE_LINUX_CAP \
|
USE_CPU_AFFINITY USE_THREAD USE_EPOLL USE_LINUX_TPROXY USE_LINUX_CAP \
|
||||||
USE_ACCEPT4 USE_LINUX_SPLICE USE_PRCTL USE_THREAD_DUMP USE_NS USE_TFO \
|
USE_ACCEPT4 USE_LINUX_SPLICE USE_PRCTL USE_THREAD_DUMP USE_NS USE_TFO \
|
||||||
USE_GETADDRINFO USE_BACKTRACE USE_SHM_OPEN USE_KTLS)
|
USE_GETADDRINFO USE_BACKTRACE USE_SHM_OPEN)
|
||||||
INSTALL = install -v
|
INSTALL = install -v
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
|
@ -401,13 +398,13 @@ ifeq ($(TARGET),linux-glibc-legacy)
|
||||||
INSTALL = install -v
|
INSTALL = install -v
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# For linux >= 4.17 and musl
|
# For linux >= 2.6.28 and musl
|
||||||
ifeq ($(TARGET),linux-musl)
|
ifeq ($(TARGET),linux-musl)
|
||||||
set_target_defaults = $(call default_opts, \
|
set_target_defaults = $(call default_opts, \
|
||||||
USE_POLL USE_TPROXY USE_LIBCRYPT USE_DL USE_RT USE_CRYPT_H USE_NETFILTER \
|
USE_POLL USE_TPROXY USE_LIBCRYPT USE_DL USE_RT USE_CRYPT_H USE_NETFILTER \
|
||||||
USE_CPU_AFFINITY USE_THREAD USE_EPOLL USE_LINUX_TPROXY USE_LINUX_CAP \
|
USE_CPU_AFFINITY USE_THREAD USE_EPOLL USE_LINUX_TPROXY USE_LINUX_CAP \
|
||||||
USE_ACCEPT4 USE_LINUX_SPLICE USE_PRCTL USE_THREAD_DUMP USE_NS USE_TFO \
|
USE_ACCEPT4 USE_LINUX_SPLICE USE_PRCTL USE_THREAD_DUMP USE_NS USE_TFO \
|
||||||
USE_GETADDRINFO USE_BACKTRACE USE_SHM_OPEN USE_KTLS)
|
USE_GETADDRINFO USE_BACKTRACE USE_SHM_OPEN)
|
||||||
INSTALL = install -v
|
INSTALL = install -v
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
|
@ -601,10 +598,6 @@ ifneq ($(USE_BACKTRACE:0=),)
|
||||||
BACKTRACE_CFLAGS = -fno-omit-frame-pointer
|
BACKTRACE_CFLAGS = -fno-omit-frame-pointer
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifneq ($(USE_MEMORY_PROFILING:0=),)
|
|
||||||
MEMORY_PROFILING_CFLAGS = -fno-optimize-sibling-calls
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifneq ($(USE_CPU_AFFINITY:0=),)
|
ifneq ($(USE_CPU_AFFINITY:0=),)
|
||||||
OPTIONS_OBJS += src/cpuset.o
|
OPTIONS_OBJS += src/cpuset.o
|
||||||
OPTIONS_OBJS += src/cpu_topo.o
|
OPTIONS_OBJS += src/cpu_topo.o
|
||||||
|
|
@ -643,7 +636,7 @@ ifneq ($(USE_OPENSSL:0=),)
|
||||||
OPTIONS_OBJS += src/ssl_sock.o src/ssl_ckch.o src/ssl_ocsp.o src/ssl_crtlist.o \
|
OPTIONS_OBJS += src/ssl_sock.o src/ssl_ckch.o src/ssl_ocsp.o src/ssl_crtlist.o \
|
||||||
src/ssl_sample.o src/cfgparse-ssl.o src/ssl_gencert.o \
|
src/ssl_sample.o src/cfgparse-ssl.o src/ssl_gencert.o \
|
||||||
src/ssl_utils.o src/jwt.o src/ssl_clienthello.o src/jws.o src/acme.o \
|
src/ssl_utils.o src/jwt.o src/ssl_clienthello.o src/jws.o src/acme.o \
|
||||||
src/ssl_trace.o src/jwe.o
|
src/ssl_trace.o
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifneq ($(USE_ENGINE:0=),)
|
ifneq ($(USE_ENGINE:0=),)
|
||||||
|
|
@ -956,7 +949,6 @@ endif # obsolete targets
|
||||||
endif # TARGET
|
endif # TARGET
|
||||||
|
|
||||||
OBJS =
|
OBJS =
|
||||||
HATERM_OBJS =
|
|
||||||
|
|
||||||
ifneq ($(EXTRA_OBJS),)
|
ifneq ($(EXTRA_OBJS),)
|
||||||
OBJS += $(EXTRA_OBJS)
|
OBJS += $(EXTRA_OBJS)
|
||||||
|
|
@ -970,15 +962,15 @@ OBJS += src/mux_h2.o src/mux_h1.o src/mux_fcgi.o src/log.o \
|
||||||
src/cache.o src/stconn.o src/http_htx.o src/debug.o \
|
src/cache.o src/stconn.o src/http_htx.o src/debug.o \
|
||||||
src/check.o src/stats-html.o src/haproxy.o src/listener.o \
|
src/check.o src/stats-html.o src/haproxy.o src/listener.o \
|
||||||
src/applet.o src/pattern.o src/cfgparse-listen.o \
|
src/applet.o src/pattern.o src/cfgparse-listen.o \
|
||||||
src/flt_spoe.o src/cebis_tree.o src/http_ext.o \
|
src/flt_spoe.o src/cebuis_tree.o src/http_ext.o \
|
||||||
src/http_act.o src/http_fetch.o src/cebs_tree.o \
|
src/http_act.o src/http_fetch.o src/cebus_tree.o \
|
||||||
src/cebib_tree.o src/http_client.o src/dns.o \
|
src/cebuib_tree.o src/http_client.o src/dns.o \
|
||||||
src/cebb_tree.o src/vars.o src/event_hdl.o src/tcp_rules.o \
|
src/cebub_tree.o src/vars.o src/event_hdl.o src/tcp_rules.o \
|
||||||
src/trace.o src/stats-proxy.o src/pool.o src/stats.o \
|
src/trace.o src/stats-proxy.o src/pool.o src/stats.o \
|
||||||
src/cfgparse-global.o src/filters.o src/mux_pt.o \
|
src/cfgparse-global.o src/filters.o src/mux_pt.o \
|
||||||
src/flt_http_comp.o src/sock.o src/h1.o src/sink.o \
|
src/flt_http_comp.o src/sock.o src/h1.o src/sink.o \
|
||||||
src/ceba_tree.o src/session.o src/payload.o src/htx.o \
|
src/cebua_tree.o src/session.o src/payload.o src/htx.o \
|
||||||
src/cebl_tree.o src/ceb32_tree.o src/ceb64_tree.o \
|
src/cebul_tree.o src/cebu32_tree.o src/cebu64_tree.o \
|
||||||
src/server_state.o src/proto_rhttp.o src/flt_trace.o src/fd.o \
|
src/server_state.o src/proto_rhttp.o src/flt_trace.o src/fd.o \
|
||||||
src/task.o src/map.o src/fcgi-app.o src/h2.o src/mworker.o \
|
src/task.o src/map.o src/fcgi-app.o src/h2.o src/mworker.o \
|
||||||
src/tcp_sample.o src/mjson.o src/h1_htx.o src/tcp_act.o \
|
src/tcp_sample.o src/mjson.o src/h1_htx.o src/tcp_act.o \
|
||||||
|
|
@ -993,7 +985,7 @@ OBJS += src/mux_h2.o src/mux_h1.o src/mux_fcgi.o src/log.o \
|
||||||
src/cfgcond.o src/proto_udp.o src/lb_fwlc.o src/ebmbtree.o \
|
src/cfgcond.o src/proto_udp.o src/lb_fwlc.o src/ebmbtree.o \
|
||||||
src/proto_uxdg.o src/cfgdiag.o src/sock_unix.o src/sha1.o \
|
src/proto_uxdg.o src/cfgdiag.o src/sock_unix.o src/sha1.o \
|
||||||
src/lb_fas.o src/clock.o src/sock_inet.o src/ev_select.o \
|
src/lb_fas.o src/clock.o src/sock_inet.o src/ev_select.o \
|
||||||
src/lb_map.o src/shctx.o src/hpack-dec.o src/net_helper.o \
|
src/lb_map.o src/shctx.o src/hpack-dec.o \
|
||||||
src/arg.o src/signal.o src/fix.o src/dynbuf.o src/guid.o \
|
src/arg.o src/signal.o src/fix.o src/dynbuf.o src/guid.o \
|
||||||
src/cfgparse-tcp.o src/lb_ss.o src/chunk.o src/counters.o \
|
src/cfgparse-tcp.o src/lb_ss.o src/chunk.o src/counters.o \
|
||||||
src/cfgparse-unix.o src/regex.o src/fcgi.o src/uri_auth.o \
|
src/cfgparse-unix.o src/regex.o src/fcgi.o src/uri_auth.o \
|
||||||
|
|
@ -1003,15 +995,12 @@ OBJS += src/mux_h2.o src/mux_h1.o src/mux_fcgi.o src/log.o \
|
||||||
src/ebsttree.o src/freq_ctr.o src/systemd.o src/init.o \
|
src/ebsttree.o src/freq_ctr.o src/systemd.o src/init.o \
|
||||||
src/http_acl.o src/dict.o src/dgram.o src/pipe.o \
|
src/http_acl.o src/dict.o src/dgram.o src/pipe.o \
|
||||||
src/hpack-huff.o src/hpack-enc.o src/ebtree.o src/hash.o \
|
src/hpack-huff.o src/hpack-enc.o src/ebtree.o src/hash.o \
|
||||||
src/httpclient_cli.o src/version.o src/ncbmbuf.o src/ech.o \
|
src/httpclient_cli.o src/version.o
|
||||||
src/cfgparse-peers.o src/haterm.o
|
|
||||||
|
|
||||||
ifneq ($(TRACE),)
|
ifneq ($(TRACE),)
|
||||||
OBJS += src/calltrace.o
|
OBJS += src/calltrace.o
|
||||||
endif
|
endif
|
||||||
|
|
||||||
HATERM_OBJS += $(OBJS) src/haterm_init.o
|
|
||||||
|
|
||||||
# Used only for forced dependency checking. May be cleared during development.
|
# Used only for forced dependency checking. May be cleared during development.
|
||||||
INCLUDES = $(wildcard include/*/*.h)
|
INCLUDES = $(wildcard include/*/*.h)
|
||||||
DEP = $(INCLUDES) .build_opts
|
DEP = $(INCLUDES) .build_opts
|
||||||
|
|
@ -1043,7 +1032,7 @@ IGNORE_OPTS=help install install-man install-doc install-bin \
|
||||||
uninstall clean tags cscope tar git-tar version update-version \
|
uninstall clean tags cscope tar git-tar version update-version \
|
||||||
opts reg-tests reg-tests-help unit-tests admin/halog/halog dev/flags/flags \
|
opts reg-tests reg-tests-help unit-tests admin/halog/halog dev/flags/flags \
|
||||||
dev/haring/haring dev/ncpu/ncpu dev/poll/poll dev/tcploop/tcploop \
|
dev/haring/haring dev/ncpu/ncpu dev/poll/poll dev/tcploop/tcploop \
|
||||||
dev/term_events/term_events dev/gdb/pm-from-core
|
dev/term_events/term_events
|
||||||
|
|
||||||
ifneq ($(TARGET),)
|
ifneq ($(TARGET),)
|
||||||
ifeq ($(filter $(firstword $(MAKECMDGOALS)),$(IGNORE_OPTS)),)
|
ifeq ($(filter $(firstword $(MAKECMDGOALS)),$(IGNORE_OPTS)),)
|
||||||
|
|
@ -1059,9 +1048,6 @@ endif # non-empty target
|
||||||
haproxy: $(OPTIONS_OBJS) $(OBJS)
|
haproxy: $(OPTIONS_OBJS) $(OBJS)
|
||||||
$(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS)
|
$(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS)
|
||||||
|
|
||||||
haterm: $(OPTIONS_OBJS) $(HATERM_OBJS)
|
|
||||||
$(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS)
|
|
||||||
|
|
||||||
objsize: haproxy
|
objsize: haproxy
|
||||||
$(Q)objdump -t $^|grep ' g '|grep -F '.text'|awk '{print $$5 FS $$6}'|sort
|
$(Q)objdump -t $^|grep ' g '|grep -F '.text'|awk '{print $$5 FS $$6}'|sort
|
||||||
|
|
||||||
|
|
@ -1077,9 +1063,6 @@ admin/dyncookie/dyncookie: admin/dyncookie/dyncookie.o
|
||||||
dev/flags/flags: dev/flags/flags.o
|
dev/flags/flags: dev/flags/flags.o
|
||||||
$(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS)
|
$(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS)
|
||||||
|
|
||||||
dev/gdb/pm-from-core: dev/gdb/pm-from-core.o
|
|
||||||
$(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS)
|
|
||||||
|
|
||||||
dev/haring/haring: dev/haring/haring.o
|
dev/haring/haring: dev/haring/haring.o
|
||||||
$(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS)
|
$(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS)
|
||||||
|
|
||||||
|
|
@ -1133,11 +1116,6 @@ install-doc:
|
||||||
$(INSTALL) -m 644 doc/$$x.txt "$(DESTDIR)$(DOCDIR)" ; \
|
$(INSTALL) -m 644 doc/$$x.txt "$(DESTDIR)$(DOCDIR)" ; \
|
||||||
done
|
done
|
||||||
|
|
||||||
install-admin:
|
|
||||||
$(Q)$(INSTALL) -d "$(DESTDIR)$(SBINDIR)"
|
|
||||||
$(Q)$(INSTALL) admin/cli/haproxy-dump-certs "$(DESTDIR)$(SBINDIR)"
|
|
||||||
$(Q)$(INSTALL) admin/cli/haproxy-reload "$(DESTDIR)$(SBINDIR)"
|
|
||||||
|
|
||||||
install-bin:
|
install-bin:
|
||||||
$(Q)for i in haproxy $(EXTRA); do \
|
$(Q)for i in haproxy $(EXTRA); do \
|
||||||
if ! [ -e "$$i" ]; then \
|
if ! [ -e "$$i" ]; then \
|
||||||
|
|
@ -1148,7 +1126,7 @@ install-bin:
|
||||||
$(Q)$(INSTALL) -d "$(DESTDIR)$(SBINDIR)"
|
$(Q)$(INSTALL) -d "$(DESTDIR)$(SBINDIR)"
|
||||||
$(Q)$(INSTALL) haproxy $(EXTRA) "$(DESTDIR)$(SBINDIR)"
|
$(Q)$(INSTALL) haproxy $(EXTRA) "$(DESTDIR)$(SBINDIR)"
|
||||||
|
|
||||||
install: install-bin install-admin install-man install-doc
|
install: install-bin install-man install-doc
|
||||||
|
|
||||||
uninstall:
|
uninstall:
|
||||||
$(Q)rm -f "$(DESTDIR)$(MANDIR)"/man1/haproxy.1
|
$(Q)rm -f "$(DESTDIR)$(MANDIR)"/man1/haproxy.1
|
||||||
|
|
@ -1178,7 +1156,7 @@ distclean: clean
|
||||||
$(Q)rm -f admin/dyncookie/dyncookie
|
$(Q)rm -f admin/dyncookie/dyncookie
|
||||||
$(Q)rm -f dev/haring/haring dev/ncpu/ncpu{,.so} dev/poll/poll dev/tcploop/tcploop
|
$(Q)rm -f dev/haring/haring dev/ncpu/ncpu{,.so} dev/poll/poll dev/tcploop/tcploop
|
||||||
$(Q)rm -f dev/hpack/decode dev/hpack/gen-enc dev/hpack/gen-rht
|
$(Q)rm -f dev/hpack/decode dev/hpack/gen-enc dev/hpack/gen-rht
|
||||||
$(Q)rm -f dev/qpack/decode dev/gdb/pm-from-core
|
$(Q)rm -f dev/qpack/decode
|
||||||
|
|
||||||
tags:
|
tags:
|
||||||
$(Q)find src include \( -name '*.c' -o -name '*.h' \) -print0 | \
|
$(Q)find src include \( -name '*.c' -o -name '*.h' \) -print0 | \
|
||||||
|
|
@ -1305,8 +1283,6 @@ unit-tests:
|
||||||
# options for all commits within RANGE. RANGE may be either a git range
|
# options for all commits within RANGE. RANGE may be either a git range
|
||||||
# such as ref1..ref2 or a single commit, in which case all commits from
|
# such as ref1..ref2 or a single commit, in which case all commits from
|
||||||
# the master branch to this one will be tested.
|
# the master branch to this one will be tested.
|
||||||
# Will execute TEST_CMD for each commit if defined, and will stop in case of
|
|
||||||
# failure.
|
|
||||||
|
|
||||||
range:
|
range:
|
||||||
$(Q)[ -d .git/. ] || { echo "## Fatal: \"make $@\" may only be used inside a Git repository."; exit 1; }
|
$(Q)[ -d .git/. ] || { echo "## Fatal: \"make $@\" may only be used inside a Git repository."; exit 1; }
|
||||||
|
|
@ -1332,7 +1308,6 @@ range:
|
||||||
echo "[ $$index/$$count ] $$commit #############################"; \
|
echo "[ $$index/$$count ] $$commit #############################"; \
|
||||||
git checkout -q $$commit || die 1; \
|
git checkout -q $$commit || die 1; \
|
||||||
$(MAKE) all || die 1; \
|
$(MAKE) all || die 1; \
|
||||||
[ -z "$(TEST_CMD)" ] || $(TEST_CMD) || die 1; \
|
|
||||||
index=$$((index + 1)); \
|
index=$$((index + 1)); \
|
||||||
done; \
|
done; \
|
||||||
echo;echo "Done! $${count} commit(s) built successfully for RANGE $${RANGE}" ; \
|
echo;echo "Done! $${count} commit(s) built successfully for RANGE $${RANGE}" ; \
|
||||||
|
|
|
||||||
|
|
@ -2,6 +2,7 @@
|
||||||
|
|
||||||
[](https://github.com/haproxy/haproxy/actions/workflows/musl.yml)
|
[](https://github.com/haproxy/haproxy/actions/workflows/musl.yml)
|
||||||
[](https://github.com/haproxy/haproxy/actions/workflows/aws-lc.yml)
|
[](https://github.com/haproxy/haproxy/actions/workflows/aws-lc.yml)
|
||||||
|
[](https://github.com/haproxy/haproxy/actions/workflows/openssl-nodeprecated.yml)
|
||||||
[](https://github.com/haproxy/haproxy/actions/workflows/illumos.yml)
|
[](https://github.com/haproxy/haproxy/actions/workflows/illumos.yml)
|
||||||
[](https://github.com/haproxy/haproxy/actions/workflows/netbsd.yml)
|
[](https://github.com/haproxy/haproxy/actions/workflows/netbsd.yml)
|
||||||
[](https://cirrus-ci.com/github/haproxy/haproxy/)
|
[](https://cirrus-ci.com/github/haproxy/haproxy/)
|
||||||
|
|
|
||||||
2
VERDATE
2
VERDATE
|
|
@ -1,2 +1,2 @@
|
||||||
$Format:%ci$
|
$Format:%ci$
|
||||||
2026/02/19
|
2025/08/20
|
||||||
|
|
|
||||||
2
VERSION
2
VERSION
|
|
@ -1 +1 @@
|
||||||
3.4-dev5
|
3.3-dev7
|
||||||
|
|
|
||||||
|
|
@ -31,7 +31,6 @@ static struct {
|
||||||
da_atlas_t atlas;
|
da_atlas_t atlas;
|
||||||
da_evidence_id_t useragentid;
|
da_evidence_id_t useragentid;
|
||||||
da_severity_t loglevel;
|
da_severity_t loglevel;
|
||||||
size_t maxhdrlen;
|
|
||||||
char separator;
|
char separator;
|
||||||
unsigned char daset:1;
|
unsigned char daset:1;
|
||||||
} global_deviceatlas = {
|
} global_deviceatlas = {
|
||||||
|
|
@ -43,7 +42,6 @@ static struct {
|
||||||
.atlasmap = NULL,
|
.atlasmap = NULL,
|
||||||
.atlasfd = -1,
|
.atlasfd = -1,
|
||||||
.useragentid = 0,
|
.useragentid = 0,
|
||||||
.maxhdrlen = 0,
|
|
||||||
.daset = 0,
|
.daset = 0,
|
||||||
.separator = '|',
|
.separator = '|',
|
||||||
};
|
};
|
||||||
|
|
@ -59,10 +57,6 @@ static int da_json_file(char **args, int section_type, struct proxy *curpx,
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
global_deviceatlas.jsonpath = strdup(args[1]);
|
global_deviceatlas.jsonpath = strdup(args[1]);
|
||||||
if (unlikely(global_deviceatlas.jsonpath == NULL)) {
|
|
||||||
memprintf(err, "deviceatlas json file : out of memory.\n");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -79,7 +73,6 @@ static int da_log_level(char **args, int section_type, struct proxy *curpx,
|
||||||
loglevel = atol(args[1]);
|
loglevel = atol(args[1]);
|
||||||
if (loglevel < 0 || loglevel > 3) {
|
if (loglevel < 0 || loglevel > 3) {
|
||||||
memprintf(err, "deviceatlas log level : expects a log level between 0 and 3, %s given.\n", args[1]);
|
memprintf(err, "deviceatlas log level : expects a log level between 0 and 3, %s given.\n", args[1]);
|
||||||
return -1;
|
|
||||||
} else {
|
} else {
|
||||||
global_deviceatlas.loglevel = (da_severity_t)loglevel;
|
global_deviceatlas.loglevel = (da_severity_t)loglevel;
|
||||||
}
|
}
|
||||||
|
|
@ -108,10 +101,6 @@ static int da_properties_cookie(char **args, int section_type, struct proxy *cur
|
||||||
return -1;
|
return -1;
|
||||||
} else {
|
} else {
|
||||||
global_deviceatlas.cookiename = strdup(args[1]);
|
global_deviceatlas.cookiename = strdup(args[1]);
|
||||||
if (unlikely(global_deviceatlas.cookiename == NULL)) {
|
|
||||||
memprintf(err, "deviceatlas cookie name : out of memory.\n");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
global_deviceatlas.cookienamelen = strlen(global_deviceatlas.cookiename);
|
global_deviceatlas.cookienamelen = strlen(global_deviceatlas.cookiename);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
@ -130,7 +119,6 @@ static int da_cache_size(char **args, int section_type, struct proxy *curpx,
|
||||||
cachesize = atol(args[1]);
|
cachesize = atol(args[1]);
|
||||||
if (cachesize < 0 || cachesize > DA_CACHE_MAX) {
|
if (cachesize < 0 || cachesize > DA_CACHE_MAX) {
|
||||||
memprintf(err, "deviceatlas cache size : expects a cache size between 0 and %d, %s given.\n", DA_CACHE_MAX, args[1]);
|
memprintf(err, "deviceatlas cache size : expects a cache size between 0 and %d, %s given.\n", DA_CACHE_MAX, args[1]);
|
||||||
return -1;
|
|
||||||
} else {
|
} else {
|
||||||
#ifdef APINOCACHE
|
#ifdef APINOCACHE
|
||||||
fprintf(stdout, "deviceatlas cache size : no-op, its support is disabled.\n");
|
fprintf(stdout, "deviceatlas cache size : no-op, its support is disabled.\n");
|
||||||
|
|
@ -177,7 +165,7 @@ static int init_deviceatlas(void)
|
||||||
da_status_t status;
|
da_status_t status;
|
||||||
|
|
||||||
jsonp = fopen(global_deviceatlas.jsonpath, "r");
|
jsonp = fopen(global_deviceatlas.jsonpath, "r");
|
||||||
if (unlikely(jsonp == 0)) {
|
if (jsonp == 0) {
|
||||||
ha_alert("deviceatlas : '%s' json file has invalid path or is not readable.\n",
|
ha_alert("deviceatlas : '%s' json file has invalid path or is not readable.\n",
|
||||||
global_deviceatlas.jsonpath);
|
global_deviceatlas.jsonpath);
|
||||||
err_code |= ERR_ALERT | ERR_FATAL;
|
err_code |= ERR_ALERT | ERR_FATAL;
|
||||||
|
|
@ -189,11 +177,9 @@ static int init_deviceatlas(void)
|
||||||
status = da_atlas_compile(jsonp, da_haproxy_read, da_haproxy_seek,
|
status = da_atlas_compile(jsonp, da_haproxy_read, da_haproxy_seek,
|
||||||
&global_deviceatlas.atlasimgptr, &atlasimglen);
|
&global_deviceatlas.atlasimgptr, &atlasimglen);
|
||||||
fclose(jsonp);
|
fclose(jsonp);
|
||||||
if (unlikely(status != DA_OK)) {
|
if (status != DA_OK) {
|
||||||
ha_alert("deviceatlas : '%s' json file is invalid.\n",
|
ha_alert("deviceatlas : '%s' json file is invalid.\n",
|
||||||
global_deviceatlas.jsonpath);
|
global_deviceatlas.jsonpath);
|
||||||
free(global_deviceatlas.atlasimgptr);
|
|
||||||
da_fini();
|
|
||||||
err_code |= ERR_ALERT | ERR_FATAL;
|
err_code |= ERR_ALERT | ERR_FATAL;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
@ -201,10 +187,8 @@ static int init_deviceatlas(void)
|
||||||
status = da_atlas_open(&global_deviceatlas.atlas, extraprops,
|
status = da_atlas_open(&global_deviceatlas.atlas, extraprops,
|
||||||
global_deviceatlas.atlasimgptr, atlasimglen);
|
global_deviceatlas.atlasimgptr, atlasimglen);
|
||||||
|
|
||||||
if (unlikely(status != DA_OK)) {
|
if (status != DA_OK) {
|
||||||
ha_alert("deviceatlas : data could not be compiled.\n");
|
ha_alert("deviceatlas : data could not be compiled.\n");
|
||||||
free(global_deviceatlas.atlasimgptr);
|
|
||||||
da_fini();
|
|
||||||
err_code |= ERR_ALERT | ERR_FATAL;
|
err_code |= ERR_ALERT | ERR_FATAL;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
@ -213,28 +197,11 @@ static int init_deviceatlas(void)
|
||||||
|
|
||||||
if (global_deviceatlas.cookiename == 0) {
|
if (global_deviceatlas.cookiename == 0) {
|
||||||
global_deviceatlas.cookiename = strdup(DA_COOKIENAME_DEFAULT);
|
global_deviceatlas.cookiename = strdup(DA_COOKIENAME_DEFAULT);
|
||||||
if (unlikely(global_deviceatlas.cookiename == NULL)) {
|
|
||||||
ha_alert("deviceatlas : out of memory.\n");
|
|
||||||
da_atlas_close(&global_deviceatlas.atlas);
|
|
||||||
free(global_deviceatlas.atlasimgptr);
|
|
||||||
da_fini();
|
|
||||||
err_code |= ERR_ALERT | ERR_FATAL;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
global_deviceatlas.cookienamelen = strlen(global_deviceatlas.cookiename);
|
global_deviceatlas.cookienamelen = strlen(global_deviceatlas.cookiename);
|
||||||
}
|
}
|
||||||
|
|
||||||
global_deviceatlas.useragentid = da_atlas_header_evidence_id(&global_deviceatlas.atlas,
|
global_deviceatlas.useragentid = da_atlas_header_evidence_id(&global_deviceatlas.atlas,
|
||||||
"user-agent");
|
"user-agent");
|
||||||
{
|
|
||||||
size_t hi;
|
|
||||||
global_deviceatlas.maxhdrlen = 16;
|
|
||||||
for (hi = 0; hi < global_deviceatlas.atlas.header_evidence_count; hi++) {
|
|
||||||
size_t nl = strlen(global_deviceatlas.atlas.header_priorities[hi].name);
|
|
||||||
if (nl > global_deviceatlas.maxhdrlen)
|
|
||||||
global_deviceatlas.maxhdrlen = nl;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if ((global_deviceatlas.atlasfd = shm_open(ATLASMAPNM, O_RDWR, 0660)) != -1) {
|
if ((global_deviceatlas.atlasfd = shm_open(ATLASMAPNM, O_RDWR, 0660)) != -1) {
|
||||||
global_deviceatlas.atlasmap = mmap(NULL, ATLASTOKSZ, PROT_READ | PROT_WRITE, MAP_SHARED, global_deviceatlas.atlasfd, 0);
|
global_deviceatlas.atlasmap = mmap(NULL, ATLASTOKSZ, PROT_READ | PROT_WRITE, MAP_SHARED, global_deviceatlas.atlasfd, 0);
|
||||||
if (global_deviceatlas.atlasmap == MAP_FAILED) {
|
if (global_deviceatlas.atlasmap == MAP_FAILED) {
|
||||||
|
|
@ -264,13 +231,15 @@ static void deinit_deviceatlas(void)
|
||||||
free(global_deviceatlas.cookiename);
|
free(global_deviceatlas.cookiename);
|
||||||
da_atlas_close(&global_deviceatlas.atlas);
|
da_atlas_close(&global_deviceatlas.atlas);
|
||||||
free(global_deviceatlas.atlasimgptr);
|
free(global_deviceatlas.atlasimgptr);
|
||||||
da_fini();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (global_deviceatlas.atlasfd != -1) {
|
if (global_deviceatlas.atlasfd != -1) {
|
||||||
munmap(global_deviceatlas.atlasmap, ATLASTOKSZ);
|
munmap(global_deviceatlas.atlasmap, ATLASTOKSZ);
|
||||||
close(global_deviceatlas.atlasfd);
|
close(global_deviceatlas.atlasfd);
|
||||||
|
shm_unlink(ATLASMAPNM);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
da_fini();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void da_haproxy_checkinst(void)
|
static void da_haproxy_checkinst(void)
|
||||||
|
|
@ -289,10 +258,6 @@ static void da_haproxy_checkinst(void)
|
||||||
da_property_decl_t extraprops[1] = {{NULL, 0}};
|
da_property_decl_t extraprops[1] = {{NULL, 0}};
|
||||||
#ifdef USE_THREAD
|
#ifdef USE_THREAD
|
||||||
HA_SPIN_LOCK(OTHER_LOCK, &dadwsch_lock);
|
HA_SPIN_LOCK(OTHER_LOCK, &dadwsch_lock);
|
||||||
if (base[0] == 0) {
|
|
||||||
HA_SPIN_UNLOCK(OTHER_LOCK, &dadwsch_lock);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
strlcpy2(atlasp, base + sizeof(char), sizeof(atlasp));
|
strlcpy2(atlasp, base + sizeof(char), sizeof(atlasp));
|
||||||
jsonp = fopen(atlasp, "r");
|
jsonp = fopen(atlasp, "r");
|
||||||
|
|
@ -310,20 +275,10 @@ static void da_haproxy_checkinst(void)
|
||||||
fclose(jsonp);
|
fclose(jsonp);
|
||||||
if (status == DA_OK) {
|
if (status == DA_OK) {
|
||||||
if (da_atlas_open(&inst, extraprops, cnew, atlassz) == DA_OK) {
|
if (da_atlas_open(&inst, extraprops, cnew, atlassz) == DA_OK) {
|
||||||
inst.config.cache_size = global_deviceatlas.cachesize;
|
|
||||||
da_atlas_close(&global_deviceatlas.atlas);
|
da_atlas_close(&global_deviceatlas.atlas);
|
||||||
free(global_deviceatlas.atlasimgptr);
|
free(global_deviceatlas.atlasimgptr);
|
||||||
global_deviceatlas.atlasimgptr = cnew;
|
global_deviceatlas.atlasimgptr = cnew;
|
||||||
global_deviceatlas.atlas = inst;
|
global_deviceatlas.atlas = inst;
|
||||||
{
|
|
||||||
size_t hi;
|
|
||||||
global_deviceatlas.maxhdrlen = 16;
|
|
||||||
for (hi = 0; hi < inst.header_evidence_count; hi++) {
|
|
||||||
size_t nl = strlen(inst.header_priorities[hi].name);
|
|
||||||
if (nl > global_deviceatlas.maxhdrlen)
|
|
||||||
global_deviceatlas.maxhdrlen = nl;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
base[0] = 0;
|
base[0] = 0;
|
||||||
ha_notice("deviceatlas : new instance, data file date `%s`.\n",
|
ha_notice("deviceatlas : new instance, data file date `%s`.\n",
|
||||||
da_getdatacreationiso8601(&global_deviceatlas.atlas));
|
da_getdatacreationiso8601(&global_deviceatlas.atlas));
|
||||||
|
|
@ -331,8 +286,6 @@ static void da_haproxy_checkinst(void)
|
||||||
ha_alert("deviceatlas : instance update failed.\n");
|
ha_alert("deviceatlas : instance update failed.\n");
|
||||||
free(cnew);
|
free(cnew);
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
free(cnew);
|
|
||||||
}
|
}
|
||||||
#ifdef USE_THREAD
|
#ifdef USE_THREAD
|
||||||
HA_SPIN_UNLOCK(OTHER_LOCK, &dadwsch_lock);
|
HA_SPIN_UNLOCK(OTHER_LOCK, &dadwsch_lock);
|
||||||
|
|
@ -344,7 +297,7 @@ static void da_haproxy_checkinst(void)
|
||||||
static int da_haproxy(const struct arg *args, struct sample *smp, da_deviceinfo_t *devinfo)
|
static int da_haproxy(const struct arg *args, struct sample *smp, da_deviceinfo_t *devinfo)
|
||||||
{
|
{
|
||||||
struct buffer *tmp;
|
struct buffer *tmp;
|
||||||
da_propid_t prop;
|
da_propid_t prop, *pprop;
|
||||||
da_status_t status;
|
da_status_t status;
|
||||||
da_type_t proptype;
|
da_type_t proptype;
|
||||||
const char *propname;
|
const char *propname;
|
||||||
|
|
@ -364,15 +317,13 @@ static int da_haproxy(const struct arg *args, struct sample *smp, da_deviceinfo_
|
||||||
chunk_appendf(tmp, "%c", global_deviceatlas.separator);
|
chunk_appendf(tmp, "%c", global_deviceatlas.separator);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (unlikely(da_atlas_getproptype(&global_deviceatlas.atlas, prop, &proptype) != DA_OK)) {
|
pprop = ∝
|
||||||
chunk_appendf(tmp, "%c", global_deviceatlas.separator);
|
da_atlas_getproptype(&global_deviceatlas.atlas, *pprop, &proptype);
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
switch (proptype) {
|
switch (proptype) {
|
||||||
case DA_TYPE_BOOLEAN: {
|
case DA_TYPE_BOOLEAN: {
|
||||||
bool val;
|
bool val;
|
||||||
status = da_getpropboolean(devinfo, prop, &val);
|
status = da_getpropboolean(devinfo, *pprop, &val);
|
||||||
if (status == DA_OK) {
|
if (status == DA_OK) {
|
||||||
chunk_appendf(tmp, "%d", val);
|
chunk_appendf(tmp, "%d", val);
|
||||||
}
|
}
|
||||||
|
|
@ -381,7 +332,7 @@ static int da_haproxy(const struct arg *args, struct sample *smp, da_deviceinfo_
|
||||||
case DA_TYPE_INTEGER:
|
case DA_TYPE_INTEGER:
|
||||||
case DA_TYPE_NUMBER: {
|
case DA_TYPE_NUMBER: {
|
||||||
long val;
|
long val;
|
||||||
status = da_getpropinteger(devinfo, prop, &val);
|
status = da_getpropinteger(devinfo, *pprop, &val);
|
||||||
if (status == DA_OK) {
|
if (status == DA_OK) {
|
||||||
chunk_appendf(tmp, "%ld", val);
|
chunk_appendf(tmp, "%ld", val);
|
||||||
}
|
}
|
||||||
|
|
@ -389,7 +340,7 @@ static int da_haproxy(const struct arg *args, struct sample *smp, da_deviceinfo_
|
||||||
}
|
}
|
||||||
case DA_TYPE_STRING: {
|
case DA_TYPE_STRING: {
|
||||||
const char *val;
|
const char *val;
|
||||||
status = da_getpropstring(devinfo, prop, &val);
|
status = da_getpropstring(devinfo, *pprop, &val);
|
||||||
if (status == DA_OK) {
|
if (status == DA_OK) {
|
||||||
chunk_appendf(tmp, "%s", val);
|
chunk_appendf(tmp, "%s", val);
|
||||||
}
|
}
|
||||||
|
|
@ -420,26 +371,29 @@ static int da_haproxy_conv(const struct arg *args, struct sample *smp, void *pri
|
||||||
{
|
{
|
||||||
da_deviceinfo_t devinfo;
|
da_deviceinfo_t devinfo;
|
||||||
da_status_t status;
|
da_status_t status;
|
||||||
char useragentbuf[1024];
|
const char *useragent;
|
||||||
|
char useragentbuf[1024] = { 0 };
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (unlikely(global_deviceatlas.daset == 0) || smp->data.u.str.data == 0) {
|
if (global_deviceatlas.daset == 0 || smp->data.u.str.data == 0) {
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
da_haproxy_checkinst();
|
da_haproxy_checkinst();
|
||||||
|
|
||||||
i = smp->data.u.str.data > sizeof(useragentbuf) - 1 ? sizeof(useragentbuf) - 1 : smp->data.u.str.data;
|
i = smp->data.u.str.data > sizeof(useragentbuf) ? sizeof(useragentbuf) : smp->data.u.str.data;
|
||||||
memcpy(useragentbuf, smp->data.u.str.area, i);
|
memcpy(useragentbuf, smp->data.u.str.area, i - 1);
|
||||||
useragentbuf[i] = 0;
|
useragentbuf[i - 1] = 0;
|
||||||
|
|
||||||
|
useragent = (const char *)useragentbuf;
|
||||||
|
|
||||||
status = da_search(&global_deviceatlas.atlas, &devinfo,
|
status = da_search(&global_deviceatlas.atlas, &devinfo,
|
||||||
global_deviceatlas.useragentid, useragentbuf, 0);
|
global_deviceatlas.useragentid, useragent, 0);
|
||||||
|
|
||||||
return status != DA_OK ? 0 : da_haproxy(args, smp, &devinfo);
|
return status != DA_OK ? 0 : da_haproxy(args, smp, &devinfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define DA_MAX_HEADERS 32
|
#define DA_MAX_HEADERS 24
|
||||||
|
|
||||||
static int da_haproxy_fetch(const struct arg *args, struct sample *smp, const char *kw, void *private)
|
static int da_haproxy_fetch(const struct arg *args, struct sample *smp, const char *kw, void *private)
|
||||||
{
|
{
|
||||||
|
|
@ -449,10 +403,10 @@ static int da_haproxy_fetch(const struct arg *args, struct sample *smp, const ch
|
||||||
struct channel *chn;
|
struct channel *chn;
|
||||||
struct htx *htx;
|
struct htx *htx;
|
||||||
struct htx_blk *blk;
|
struct htx_blk *blk;
|
||||||
char vbuf[DA_MAX_HEADERS][1024];
|
char vbuf[DA_MAX_HEADERS][1024] = {{ 0 }};
|
||||||
int i, nbh = 0;
|
int i, nbh = 0;
|
||||||
|
|
||||||
if (unlikely(global_deviceatlas.daset == 0)) {
|
if (global_deviceatlas.daset == 0) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -460,17 +414,18 @@ static int da_haproxy_fetch(const struct arg *args, struct sample *smp, const ch
|
||||||
|
|
||||||
chn = (smp->strm ? &smp->strm->req : NULL);
|
chn = (smp->strm ? &smp->strm->req : NULL);
|
||||||
htx = smp_prefetch_htx(smp, chn, NULL, 1);
|
htx = smp_prefetch_htx(smp, chn, NULL, 1);
|
||||||
if (unlikely(!htx))
|
if (!htx)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
i = 0;
|
||||||
for (blk = htx_get_first_blk(htx); nbh < DA_MAX_HEADERS && blk; blk = htx_get_next_blk(htx, blk)) {
|
for (blk = htx_get_first_blk(htx); nbh < DA_MAX_HEADERS && blk; blk = htx_get_next_blk(htx, blk)) {
|
||||||
size_t vlen;
|
size_t vlen;
|
||||||
char *pval;
|
char *pval;
|
||||||
da_evidence_id_t evid;
|
da_evidence_id_t evid;
|
||||||
enum htx_blk_type type;
|
enum htx_blk_type type;
|
||||||
struct ist n, v;
|
struct ist n, v;
|
||||||
char hbuf[64];
|
char hbuf[24] = { 0 };
|
||||||
char tval[1024];
|
char tval[1024] = { 0 };
|
||||||
|
|
||||||
type = htx_get_blk_type(blk);
|
type = htx_get_blk_type(blk);
|
||||||
|
|
||||||
|
|
@ -483,18 +438,20 @@ static int da_haproxy_fetch(const struct arg *args, struct sample *smp, const ch
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (n.len > global_deviceatlas.maxhdrlen || n.len >= sizeof(hbuf)) {
|
/* The HTTP headers used by the DeviceAtlas API are not longer */
|
||||||
|
if (n.len >= sizeof(hbuf)) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
memcpy(hbuf, n.ptr, n.len);
|
memcpy(hbuf, n.ptr, n.len);
|
||||||
hbuf[n.len] = 0;
|
hbuf[n.len] = 0;
|
||||||
|
pval = v.ptr;
|
||||||
|
vlen = v.len;
|
||||||
evid = -1;
|
evid = -1;
|
||||||
i = v.len > sizeof(tval) - 1 ? sizeof(tval) - 1 : v.len;
|
i = v.len > sizeof(tval) - 1 ? sizeof(tval) - 1 : v.len;
|
||||||
memcpy(tval, v.ptr, i);
|
memcpy(tval, v.ptr, i);
|
||||||
tval[i] = 0;
|
tval[i] = 0;
|
||||||
pval = tval;
|
pval = tval;
|
||||||
vlen = i;
|
|
||||||
|
|
||||||
if (strcasecmp(hbuf, "Accept-Language") == 0) {
|
if (strcasecmp(hbuf, "Accept-Language") == 0) {
|
||||||
evid = da_atlas_accept_language_evidence_id(&global_deviceatlas.atlas);
|
evid = da_atlas_accept_language_evidence_id(&global_deviceatlas.atlas);
|
||||||
|
|
@ -512,7 +469,7 @@ static int da_haproxy_fetch(const struct arg *args, struct sample *smp, const ch
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
vlen = pl;
|
vlen -= global_deviceatlas.cookienamelen - 1;
|
||||||
pval = p;
|
pval = p;
|
||||||
evid = da_atlas_clientprop_evidence_id(&global_deviceatlas.atlas);
|
evid = da_atlas_clientprop_evidence_id(&global_deviceatlas.atlas);
|
||||||
} else {
|
} else {
|
||||||
|
|
|
||||||
|
|
@ -141,11 +141,6 @@ enum {
|
||||||
DA_INITIAL_MEMORY_ESTIMATE = 1024 * 1024 * 14
|
DA_INITIAL_MEMORY_ESTIMATE = 1024 * 1024 * 14
|
||||||
};
|
};
|
||||||
|
|
||||||
struct header_evidence_entry {
|
|
||||||
const char *name;
|
|
||||||
da_evidence_id_t id;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct da_config {
|
struct da_config {
|
||||||
unsigned int cache_size;
|
unsigned int cache_size;
|
||||||
unsigned int __reserved[15]; /* enough reserved keywords for future use */
|
unsigned int __reserved[15]; /* enough reserved keywords for future use */
|
||||||
|
|
|
||||||
|
|
@ -36,7 +36,6 @@
|
||||||
#include <haproxy/stats.h>
|
#include <haproxy/stats.h>
|
||||||
#include <haproxy/stconn.h>
|
#include <haproxy/stconn.h>
|
||||||
#include <haproxy/stream.h>
|
#include <haproxy/stream.h>
|
||||||
#include <haproxy/stress.h>
|
|
||||||
#include <haproxy/task.h>
|
#include <haproxy/task.h>
|
||||||
#include <haproxy/tools.h>
|
#include <haproxy/tools.h>
|
||||||
#include <haproxy/version.h>
|
#include <haproxy/version.h>
|
||||||
|
|
@ -83,8 +82,6 @@ struct promex_ctx {
|
||||||
unsigned field_num; /* current field number (ST_I_PX_* etc) */
|
unsigned field_num; /* current field number (ST_I_PX_* etc) */
|
||||||
unsigned mod_field_num; /* first field number of the current module (ST_I_PX_* etc) */
|
unsigned mod_field_num; /* first field number of the current module (ST_I_PX_* etc) */
|
||||||
int obj_state; /* current state among PROMEX_{FRONT|BACK|SRV|LI}_STATE_* */
|
int obj_state; /* current state among PROMEX_{FRONT|BACK|SRV|LI}_STATE_* */
|
||||||
struct watcher px_watch; /* watcher to automatically update next pointer */
|
|
||||||
struct watcher srv_watch; /* watcher to automatically update next pointer */
|
|
||||||
struct list modules; /* list of promex modules to export */
|
struct list modules; /* list of promex modules to export */
|
||||||
struct eb_root filters; /* list of filters to apply on metrics name */
|
struct eb_root filters; /* list of filters to apply on metrics name */
|
||||||
};
|
};
|
||||||
|
|
@ -349,10 +346,6 @@ static int promex_dump_ts(struct appctx *appctx, struct ist prefix,
|
||||||
istcat(&n, prefix, PROMEX_MAX_NAME_LEN);
|
istcat(&n, prefix, PROMEX_MAX_NAME_LEN);
|
||||||
istcat(&n, name, PROMEX_MAX_NAME_LEN);
|
istcat(&n, name, PROMEX_MAX_NAME_LEN);
|
||||||
|
|
||||||
/* In stress mode, force yielding on each metric. */
|
|
||||||
if (STRESS_RUN1(istlen(*out), 0))
|
|
||||||
goto full;
|
|
||||||
|
|
||||||
if ((ctx->flags & PROMEX_FL_METRIC_HDR) &&
|
if ((ctx->flags & PROMEX_FL_METRIC_HDR) &&
|
||||||
!promex_dump_ts_header(n, desc, type, out, max))
|
!promex_dump_ts_header(n, desc, type, out, max))
|
||||||
goto full;
|
goto full;
|
||||||
|
|
@ -632,6 +625,8 @@ static int promex_dump_front_metrics(struct appctx *appctx, struct htx *htx)
|
||||||
}
|
}
|
||||||
|
|
||||||
list_for_each_entry_from(mod, &stats_module_list[STATS_DOMAIN_PROXY], list) {
|
list_for_each_entry_from(mod, &stats_module_list[STATS_DOMAIN_PROXY], list) {
|
||||||
|
void *counters;
|
||||||
|
|
||||||
if (!(stats_px_get_cap(mod->domain_flags) & STATS_PX_CAP_FE))
|
if (!(stats_px_get_cap(mod->domain_flags) & STATS_PX_CAP_FE))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
|
@ -668,7 +663,8 @@ static int promex_dump_front_metrics(struct appctx *appctx, struct htx *htx)
|
||||||
if ((px->flags & PR_FL_DISABLED) || px->uuid <= 0 || !(px->cap & PR_CAP_FE))
|
if ((px->flags & PR_FL_DISABLED) || px->uuid <= 0 || !(px->cap & PR_CAP_FE))
|
||||||
goto next_px2;
|
goto next_px2;
|
||||||
|
|
||||||
if (!mod->fill_stats(mod, px->extra_counters_fe, stats + ctx->field_num, &ctx->mod_field_num))
|
counters = EXTRA_COUNTERS_GET(px->extra_counters_fe, mod);
|
||||||
|
if (!mod->fill_stats(counters, stats + ctx->field_num, &ctx->mod_field_num))
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
val = stats[ctx->field_num + ctx->mod_field_num];
|
val = stats[ctx->field_num + ctx->mod_field_num];
|
||||||
|
|
@ -820,6 +816,8 @@ static int promex_dump_listener_metrics(struct appctx *appctx, struct htx *htx)
|
||||||
}
|
}
|
||||||
|
|
||||||
list_for_each_entry_from(mod, &stats_module_list[STATS_DOMAIN_PROXY], list) {
|
list_for_each_entry_from(mod, &stats_module_list[STATS_DOMAIN_PROXY], list) {
|
||||||
|
void *counters;
|
||||||
|
|
||||||
if (!(stats_px_get_cap(mod->domain_flags) & STATS_PX_CAP_LI))
|
if (!(stats_px_get_cap(mod->domain_flags) & STATS_PX_CAP_LI))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
|
@ -865,7 +863,8 @@ static int promex_dump_listener_metrics(struct appctx *appctx, struct htx *htx)
|
||||||
labels[lb_idx+1].name = ist("mod");
|
labels[lb_idx+1].name = ist("mod");
|
||||||
labels[lb_idx+1].value = ist2(mod->name, strlen(mod->name));
|
labels[lb_idx+1].value = ist2(mod->name, strlen(mod->name));
|
||||||
|
|
||||||
if (!mod->fill_stats(mod, li->extra_counters, stats + ctx->field_num, &ctx->mod_field_num))
|
counters = EXTRA_COUNTERS_GET(li->extra_counters, mod);
|
||||||
|
if (!mod->fill_stats(counters, stats + ctx->field_num, &ctx->mod_field_num))
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
val = stats[ctx->field_num + ctx->mod_field_num];
|
val = stats[ctx->field_num + ctx->mod_field_num];
|
||||||
|
|
@ -941,6 +940,9 @@ static int promex_dump_back_metrics(struct appctx *appctx, struct htx *htx)
|
||||||
if (promex_filter_metric(appctx, prefix, name))
|
if (promex_filter_metric(appctx, prefix, name))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
if (!px)
|
||||||
|
px = proxies_list;
|
||||||
|
|
||||||
while (px) {
|
while (px) {
|
||||||
struct promex_label labels[PROMEX_MAX_LABELS-1] = {};
|
struct promex_label labels[PROMEX_MAX_LABELS-1] = {};
|
||||||
unsigned int srv_state_count[PROMEX_SRV_STATE_COUNT] = { 0 };
|
unsigned int srv_state_count[PROMEX_SRV_STATE_COUNT] = { 0 };
|
||||||
|
|
@ -1095,16 +1097,9 @@ static int promex_dump_back_metrics(struct appctx *appctx, struct htx *htx)
|
||||||
&val, labels, &out, max))
|
&val, labels, &out, max))
|
||||||
goto full;
|
goto full;
|
||||||
next_px:
|
next_px:
|
||||||
px = watcher_next(&ctx->px_watch, px->next);
|
px = px->next;
|
||||||
}
|
}
|
||||||
watcher_detach(&ctx->px_watch);
|
|
||||||
ctx->flags |= PROMEX_FL_METRIC_HDR;
|
ctx->flags |= PROMEX_FL_METRIC_HDR;
|
||||||
|
|
||||||
/* Prepare a new iteration for the next stat column.
|
|
||||||
* Update ctx.p[0] via watcher.
|
|
||||||
*/
|
|
||||||
watcher_attach(&ctx->px_watch, proxies_list);
|
|
||||||
px = proxies_list;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Skip extra counters */
|
/* Skip extra counters */
|
||||||
|
|
@ -1117,6 +1112,8 @@ static int promex_dump_back_metrics(struct appctx *appctx, struct htx *htx)
|
||||||
}
|
}
|
||||||
|
|
||||||
list_for_each_entry_from(mod, &stats_module_list[STATS_DOMAIN_PROXY], list) {
|
list_for_each_entry_from(mod, &stats_module_list[STATS_DOMAIN_PROXY], list) {
|
||||||
|
void *counters;
|
||||||
|
|
||||||
if (!(stats_px_get_cap(mod->domain_flags) & STATS_PX_CAP_BE))
|
if (!(stats_px_get_cap(mod->domain_flags) & STATS_PX_CAP_BE))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
|
@ -1127,6 +1124,9 @@ static int promex_dump_back_metrics(struct appctx *appctx, struct htx *htx)
|
||||||
if (promex_filter_metric(appctx, prefix, name))
|
if (promex_filter_metric(appctx, prefix, name))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
if (!px)
|
||||||
|
px = proxies_list;
|
||||||
|
|
||||||
while (px) {
|
while (px) {
|
||||||
struct promex_label labels[PROMEX_MAX_LABELS-1] = {};
|
struct promex_label labels[PROMEX_MAX_LABELS-1] = {};
|
||||||
struct promex_metric metric;
|
struct promex_metric metric;
|
||||||
|
|
@ -1150,7 +1150,8 @@ static int promex_dump_back_metrics(struct appctx *appctx, struct htx *htx)
|
||||||
if ((px->flags & PR_FL_DISABLED) || px->uuid <= 0 || !(px->cap & PR_CAP_BE))
|
if ((px->flags & PR_FL_DISABLED) || px->uuid <= 0 || !(px->cap & PR_CAP_BE))
|
||||||
goto next_px2;
|
goto next_px2;
|
||||||
|
|
||||||
if (!mod->fill_stats(mod, px->extra_counters_be, stats + ctx->field_num, &ctx->mod_field_num))
|
counters = EXTRA_COUNTERS_GET(px->extra_counters_be, mod);
|
||||||
|
if (!mod->fill_stats(counters, stats + ctx->field_num, &ctx->mod_field_num))
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
val = stats[ctx->field_num + ctx->mod_field_num];
|
val = stats[ctx->field_num + ctx->mod_field_num];
|
||||||
|
|
@ -1161,39 +1162,25 @@ static int promex_dump_back_metrics(struct appctx *appctx, struct htx *htx)
|
||||||
goto full;
|
goto full;
|
||||||
|
|
||||||
next_px2:
|
next_px2:
|
||||||
px = watcher_next(&ctx->px_watch, px->next);
|
px = px->next;
|
||||||
}
|
}
|
||||||
watcher_detach(&ctx->px_watch);
|
|
||||||
ctx->flags |= PROMEX_FL_METRIC_HDR;
|
ctx->flags |= PROMEX_FL_METRIC_HDR;
|
||||||
|
|
||||||
/* Prepare a new iteration for the next stat column.
|
|
||||||
* Update ctx.p[0] via watcher.
|
|
||||||
*/
|
|
||||||
watcher_attach(&ctx->px_watch, proxies_list);
|
|
||||||
px = proxies_list;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx->field_num += mod->stats_count;
|
ctx->field_num += mod->stats_count;
|
||||||
ctx->mod_field_num = 0;
|
ctx->mod_field_num = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
end:
|
px = NULL;
|
||||||
if (ret) {
|
|
||||||
watcher_detach(&ctx->px_watch);
|
|
||||||
mod = NULL;
|
mod = NULL;
|
||||||
}
|
|
||||||
|
|
||||||
|
end:
|
||||||
if (out.len) {
|
if (out.len) {
|
||||||
if (!htx_add_data_atonce(htx, out)) {
|
if (!htx_add_data_atonce(htx, out))
|
||||||
watcher_detach(&ctx->px_watch);
|
|
||||||
return -1; /* Unexpected and unrecoverable error */
|
return -1; /* Unexpected and unrecoverable error */
|
||||||
}
|
}
|
||||||
}
|
/* Save pointers (0=current proxy, 1=current stats module) of the current context */
|
||||||
|
ctx->p[0] = px;
|
||||||
/* Save pointers of the current context for dump resumption :
|
|
||||||
* 0=current proxy, 1=current stats module
|
|
||||||
* Note that p[0] is already automatically updated via px_watch.
|
|
||||||
*/
|
|
||||||
ctx->p[1] = mod;
|
ctx->p[1] = mod;
|
||||||
return ret;
|
return ret;
|
||||||
full:
|
full:
|
||||||
|
|
@ -1235,6 +1222,9 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
|
||||||
if (promex_filter_metric(appctx, prefix, name))
|
if (promex_filter_metric(appctx, prefix, name))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
if (!px)
|
||||||
|
px = proxies_list;
|
||||||
|
|
||||||
while (px) {
|
while (px) {
|
||||||
struct promex_label labels[PROMEX_MAX_LABELS-1] = {};
|
struct promex_label labels[PROMEX_MAX_LABELS-1] = {};
|
||||||
enum promex_mt_type type;
|
enum promex_mt_type type;
|
||||||
|
|
@ -1254,12 +1244,15 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
|
||||||
if ((px->flags & PR_FL_DISABLED) || px->uuid <= 0 || !(px->cap & PR_CAP_BE))
|
if ((px->flags & PR_FL_DISABLED) || px->uuid <= 0 || !(px->cap & PR_CAP_BE))
|
||||||
goto next_px;
|
goto next_px;
|
||||||
|
|
||||||
|
if (!sv)
|
||||||
|
sv = px->srv;
|
||||||
|
|
||||||
while (sv) {
|
while (sv) {
|
||||||
labels[lb_idx].name = ist("server");
|
labels[lb_idx].name = ist("server");
|
||||||
labels[lb_idx].value = ist2(sv->id, strlen(sv->id));
|
labels[lb_idx].value = ist2(sv->id, strlen(sv->id));
|
||||||
|
|
||||||
if (!stats_fill_sv_line(px, sv, 0, stats, ST_I_PX_MAX, &(ctx->field_num)))
|
if (!stats_fill_sv_line(px, sv, 0, stats, ST_I_PX_MAX, &(ctx->field_num)))
|
||||||
goto error;
|
return -1;
|
||||||
|
|
||||||
if ((ctx->flags & PROMEX_FL_NO_MAINT_SRV) && (sv->cur_admin & SRV_ADMF_MAINT))
|
if ((ctx->flags & PROMEX_FL_NO_MAINT_SRV) && (sv->cur_admin & SRV_ADMF_MAINT))
|
||||||
goto next_sv;
|
goto next_sv;
|
||||||
|
|
@ -1404,30 +1397,13 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
|
||||||
&val, labels, &out, max))
|
&val, labels, &out, max))
|
||||||
goto full;
|
goto full;
|
||||||
next_sv:
|
next_sv:
|
||||||
sv = watcher_next(&ctx->srv_watch, sv->next);
|
sv = sv->next;
|
||||||
}
|
}
|
||||||
|
|
||||||
next_px:
|
next_px:
|
||||||
watcher_detach(&ctx->srv_watch);
|
px = px->next;
|
||||||
px = watcher_next(&ctx->px_watch, px->next);
|
|
||||||
if (px) {
|
|
||||||
/* Update ctx.p[1] via watcher. */
|
|
||||||
watcher_attach(&ctx->srv_watch, px->srv);
|
|
||||||
sv = ctx->p[1];
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
watcher_detach(&ctx->px_watch);
|
|
||||||
ctx->flags |= PROMEX_FL_METRIC_HDR;
|
ctx->flags |= PROMEX_FL_METRIC_HDR;
|
||||||
|
|
||||||
/* Prepare a new iteration for the next stat column.
|
|
||||||
* Update ctx.p[0]/p[1] via px_watch/srv_watch.
|
|
||||||
*/
|
|
||||||
watcher_attach(&ctx->px_watch, proxies_list);
|
|
||||||
px = proxies_list;
|
|
||||||
if (likely(px)) {
|
|
||||||
watcher_attach(&ctx->srv_watch, px->srv);
|
|
||||||
sv = ctx->p[1];
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Skip extra counters */
|
/* Skip extra counters */
|
||||||
|
|
@ -1440,6 +1416,8 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
|
||||||
}
|
}
|
||||||
|
|
||||||
list_for_each_entry_from(mod, &stats_module_list[STATS_DOMAIN_PROXY], list) {
|
list_for_each_entry_from(mod, &stats_module_list[STATS_DOMAIN_PROXY], list) {
|
||||||
|
void *counters;
|
||||||
|
|
||||||
if (!(stats_px_get_cap(mod->domain_flags) & STATS_PX_CAP_SRV))
|
if (!(stats_px_get_cap(mod->domain_flags) & STATS_PX_CAP_SRV))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
|
@ -1450,6 +1428,9 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
|
||||||
if (promex_filter_metric(appctx, prefix, name))
|
if (promex_filter_metric(appctx, prefix, name))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
if (!px)
|
||||||
|
px = proxies_list;
|
||||||
|
|
||||||
while (px) {
|
while (px) {
|
||||||
struct promex_label labels[PROMEX_MAX_LABELS-1] = {};
|
struct promex_label labels[PROMEX_MAX_LABELS-1] = {};
|
||||||
struct promex_metric metric;
|
struct promex_metric metric;
|
||||||
|
|
@ -1470,6 +1451,9 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
|
||||||
if ((px->flags & PR_FL_DISABLED) || px->uuid <= 0 || !(px->cap & PR_CAP_BE))
|
if ((px->flags & PR_FL_DISABLED) || px->uuid <= 0 || !(px->cap & PR_CAP_BE))
|
||||||
goto next_px2;
|
goto next_px2;
|
||||||
|
|
||||||
|
if (!sv)
|
||||||
|
sv = px->srv;
|
||||||
|
|
||||||
while (sv) {
|
while (sv) {
|
||||||
labels[lb_idx].name = ist("server");
|
labels[lb_idx].name = ist("server");
|
||||||
labels[lb_idx].value = ist2(sv->id, strlen(sv->id));
|
labels[lb_idx].value = ist2(sv->id, strlen(sv->id));
|
||||||
|
|
@ -1481,8 +1465,9 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
|
||||||
goto next_sv2;
|
goto next_sv2;
|
||||||
|
|
||||||
|
|
||||||
if (!mod->fill_stats(mod, sv->extra_counters, stats + ctx->field_num, &ctx->mod_field_num))
|
counters = EXTRA_COUNTERS_GET(sv->extra_counters, mod);
|
||||||
goto error;
|
if (!mod->fill_stats(counters, stats + ctx->field_num, &ctx->mod_field_num))
|
||||||
|
return -1;
|
||||||
|
|
||||||
val = stats[ctx->field_num + ctx->mod_field_num];
|
val = stats[ctx->field_num + ctx->mod_field_num];
|
||||||
metric.type = ((val.type == FN_GAUGE) ? PROMEX_MT_GAUGE : PROMEX_MT_COUNTER);
|
metric.type = ((val.type == FN_GAUGE) ? PROMEX_MT_GAUGE : PROMEX_MT_COUNTER);
|
||||||
|
|
@ -1492,62 +1477,42 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
|
||||||
goto full;
|
goto full;
|
||||||
|
|
||||||
next_sv2:
|
next_sv2:
|
||||||
sv = watcher_next(&ctx->srv_watch, sv->next);
|
sv = sv->next;
|
||||||
}
|
}
|
||||||
|
|
||||||
next_px2:
|
next_px2:
|
||||||
watcher_detach(&ctx->srv_watch);
|
px = px->next;
|
||||||
px = watcher_next(&ctx->px_watch, px->next);
|
|
||||||
if (px) {
|
|
||||||
/* Update ctx.p[1] via watcher. */
|
|
||||||
watcher_attach(&ctx->srv_watch, px->srv);
|
|
||||||
sv = ctx->p[1];
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
watcher_detach(&ctx->px_watch);
|
|
||||||
ctx->flags |= PROMEX_FL_METRIC_HDR;
|
ctx->flags |= PROMEX_FL_METRIC_HDR;
|
||||||
|
|
||||||
/* Prepare a new iteration for the next stat column.
|
|
||||||
* Update ctx.p[0]/p[1] via px_watch/srv_watch.
|
|
||||||
*/
|
|
||||||
watcher_attach(&ctx->px_watch, proxies_list);
|
|
||||||
px = proxies_list;
|
|
||||||
if (likely(px)) {
|
|
||||||
watcher_attach(&ctx->srv_watch, px->srv);
|
|
||||||
sv = ctx->p[1];
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx->field_num += mod->stats_count;
|
ctx->field_num += mod->stats_count;
|
||||||
ctx->mod_field_num = 0;
|
ctx->mod_field_num = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
end:
|
px = NULL;
|
||||||
if (ret) {
|
sv = NULL;
|
||||||
watcher_detach(&ctx->px_watch);
|
|
||||||
watcher_detach(&ctx->srv_watch);
|
|
||||||
mod = NULL;
|
mod = NULL;
|
||||||
}
|
|
||||||
|
|
||||||
|
end:
|
||||||
if (out.len) {
|
if (out.len) {
|
||||||
if (!htx_add_data_atonce(htx, out))
|
if (!htx_add_data_atonce(htx, out))
|
||||||
return -1; /* Unexpected and unrecoverable error */
|
return -1; /* Unexpected and unrecoverable error */
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Save pointers of the current context for dump resumption :
|
/* Decrement server refcount if it was saved through ctx.p[1]. */
|
||||||
* 0=current proxy, 1=current server, 2=current stats module
|
srv_drop(ctx->p[1]);
|
||||||
* Note that p[0]/p[1] are already automatically updated via px_watch/srv_watch.
|
if (sv)
|
||||||
*/
|
srv_take(sv);
|
||||||
|
|
||||||
|
/* Save pointers (0=current proxy, 1=current server, 2=current stats module) of the current context */
|
||||||
|
ctx->p[0] = px;
|
||||||
|
ctx->p[1] = sv;
|
||||||
ctx->p[2] = mod;
|
ctx->p[2] = mod;
|
||||||
return ret;
|
return ret;
|
||||||
full:
|
full:
|
||||||
ret = 0;
|
ret = 0;
|
||||||
goto end;
|
goto end;
|
||||||
|
|
||||||
error:
|
|
||||||
watcher_detach(&ctx->px_watch);
|
|
||||||
watcher_detach(&ctx->srv_watch);
|
|
||||||
return -1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Dump metrics of module <mod>. It returns 1 on success, 0 if <out> is full and
|
/* Dump metrics of module <mod>. It returns 1 on success, 0 if <out> is full and
|
||||||
|
|
@ -1768,11 +1733,6 @@ static int promex_dump_metrics(struct appctx *appctx, struct htx *htx)
|
||||||
ctx->field_num = ST_I_PX_PXNAME;
|
ctx->field_num = ST_I_PX_PXNAME;
|
||||||
ctx->mod_field_num = 0;
|
ctx->mod_field_num = 0;
|
||||||
appctx->st1 = PROMEX_DUMPER_BACK;
|
appctx->st1 = PROMEX_DUMPER_BACK;
|
||||||
|
|
||||||
if (ctx->flags & PROMEX_FL_SCOPE_BACK) {
|
|
||||||
/* Update ctx.p[0] via watcher. */
|
|
||||||
watcher_attach(&ctx->px_watch, proxies_list);
|
|
||||||
}
|
|
||||||
__fallthrough;
|
__fallthrough;
|
||||||
|
|
||||||
case PROMEX_DUMPER_BACK:
|
case PROMEX_DUMPER_BACK:
|
||||||
|
|
@ -1790,15 +1750,6 @@ static int promex_dump_metrics(struct appctx *appctx, struct htx *htx)
|
||||||
ctx->field_num = ST_I_PX_PXNAME;
|
ctx->field_num = ST_I_PX_PXNAME;
|
||||||
ctx->mod_field_num = 0;
|
ctx->mod_field_num = 0;
|
||||||
appctx->st1 = PROMEX_DUMPER_SRV;
|
appctx->st1 = PROMEX_DUMPER_SRV;
|
||||||
|
|
||||||
if (ctx->flags & PROMEX_FL_SCOPE_SERVER) {
|
|
||||||
/* Update ctx.p[0] via watcher. */
|
|
||||||
watcher_attach(&ctx->px_watch, proxies_list);
|
|
||||||
if (likely(proxies_list)) {
|
|
||||||
/* Update ctx.p[1] via watcher. */
|
|
||||||
watcher_attach(&ctx->srv_watch, proxies_list->srv);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
__fallthrough;
|
__fallthrough;
|
||||||
|
|
||||||
case PROMEX_DUMPER_SRV:
|
case PROMEX_DUMPER_SRV:
|
||||||
|
|
@ -2076,8 +2027,6 @@ static int promex_appctx_init(struct appctx *appctx)
|
||||||
LIST_INIT(&ctx->modules);
|
LIST_INIT(&ctx->modules);
|
||||||
ctx->filters = EB_ROOT;
|
ctx->filters = EB_ROOT;
|
||||||
appctx->st0 = PROMEX_ST_INIT;
|
appctx->st0 = PROMEX_ST_INIT;
|
||||||
watcher_init(&ctx->px_watch, &ctx->p[0], offsetof(struct proxy, watcher_list));
|
|
||||||
watcher_init(&ctx->srv_watch, &ctx->p[1], offsetof(struct server, watcher_list));
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -2091,14 +2040,11 @@ static void promex_appctx_release(struct appctx *appctx)
|
||||||
struct promex_metric_filter *flt;
|
struct promex_metric_filter *flt;
|
||||||
struct eb32_node *node, *next;
|
struct eb32_node *node, *next;
|
||||||
|
|
||||||
if (appctx->st1 == PROMEX_DUMPER_BACK ||
|
if (appctx->st1 == PROMEX_DUMPER_SRV) {
|
||||||
appctx->st1 == PROMEX_DUMPER_SRV) {
|
struct server *srv = objt_server(ctx->p[1]);
|
||||||
watcher_detach(&ctx->px_watch);
|
srv_drop(srv);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (appctx->st1 == PROMEX_DUMPER_SRV)
|
|
||||||
watcher_detach(&ctx->srv_watch);
|
|
||||||
|
|
||||||
list_for_each_entry_safe(ref, back, &ctx->modules, list) {
|
list_for_each_entry_safe(ref, back, &ctx->modules, list) {
|
||||||
LIST_DELETE(&ref->list);
|
LIST_DELETE(&ref->list);
|
||||||
pool_free(pool_head_promex_mod_ref, ref);
|
pool_free(pool_head_promex_mod_ref, ref);
|
||||||
|
|
@ -2198,7 +2144,7 @@ static void promex_appctx_handle_io(struct appctx *appctx)
|
||||||
|
|
||||||
struct applet promex_applet = {
|
struct applet promex_applet = {
|
||||||
.obj_type = OBJ_TYPE_APPLET,
|
.obj_type = OBJ_TYPE_APPLET,
|
||||||
.flags = APPLET_FL_NEW_API|APPLET_FL_HTX,
|
.flags = APPLET_FL_NEW_API,
|
||||||
.name = "<PROMEX>", /* used for logging */
|
.name = "<PROMEX>", /* used for logging */
|
||||||
.init = promex_appctx_init,
|
.init = promex_appctx_init,
|
||||||
.release = promex_appctx_release,
|
.release = promex_appctx_release,
|
||||||
|
|
|
||||||
|
|
@ -1,235 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
#
|
|
||||||
# Dump certificates from the HAProxy stats or master socket to the filesystem
|
|
||||||
# Experimental script
|
|
||||||
#
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
export BASEPATH=${BASEPATH:-/etc/haproxy}/
|
|
||||||
export SOCKET=${SOCKET:-/var/run/haproxy-master.sock}
|
|
||||||
export DRY_RUN=0
|
|
||||||
export DEBUG=
|
|
||||||
export VERBOSE=
|
|
||||||
export M="@1 "
|
|
||||||
export TMP
|
|
||||||
|
|
||||||
vecho() {
|
|
||||||
|
|
||||||
[ -n "$VERBOSE" ] && echo "$@"
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
read_certificate() {
|
|
||||||
name=$1
|
|
||||||
crt_filename=
|
|
||||||
key_filename=
|
|
||||||
|
|
||||||
OFS=$IFS
|
|
||||||
IFS=":"
|
|
||||||
|
|
||||||
while read -r key value; do
|
|
||||||
case "$key" in
|
|
||||||
"Crt filename")
|
|
||||||
crt_filename="${value# }"
|
|
||||||
key_filename="${value# }"
|
|
||||||
;;
|
|
||||||
"Key filename")
|
|
||||||
key_filename="${value# }"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done < <(echo "${M}show ssl cert ${name}" | socat "${SOCKET}" -)
|
|
||||||
IFS=$OFS
|
|
||||||
|
|
||||||
if [ -z "$crt_filename" ] || [ -z "$key_filename" ]; then
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# handle fields without a crt-base/key-base
|
|
||||||
[ "${crt_filename:0:1}" != "/" ] && crt_filename="${BASEPATH}${crt_filename}"
|
|
||||||
[ "${key_filename:0:1}" != "/" ] && key_filename="${BASEPATH}${key_filename}"
|
|
||||||
|
|
||||||
vecho "name:$name"
|
|
||||||
vecho "crt:$crt_filename"
|
|
||||||
vecho "key:$key_filename"
|
|
||||||
|
|
||||||
export NAME="$name"
|
|
||||||
export CRT_FILENAME="$crt_filename"
|
|
||||||
export KEY_FILENAME="$key_filename"
|
|
||||||
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
cmp_certkey() {
|
|
||||||
prev=$1
|
|
||||||
new=$2
|
|
||||||
|
|
||||||
if [ ! -f "$prev" ]; then
|
|
||||||
return 1;
|
|
||||||
fi
|
|
||||||
|
|
||||||
if ! cmp -s <(openssl x509 -in "$prev" -noout -fingerprint -sha256) <(openssl x509 -in "$new" -noout -fingerprint -sha256); then
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
dump_certificate() {
|
|
||||||
name=$1
|
|
||||||
prev_crt=$2
|
|
||||||
prev_key=$3
|
|
||||||
r="tmp.${RANDOM}"
|
|
||||||
d="old.$(date +%s)"
|
|
||||||
new_crt="$TMP/$(basename "$prev_crt").${r}"
|
|
||||||
new_key="$TMP/$(basename "$prev_key").${r}"
|
|
||||||
|
|
||||||
if ! touch "${new_crt}" || ! touch "${new_key}"; then
|
|
||||||
echo "[ALERT] ($$) : can't dump \"$name\", can't create tmp files" >&2
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "${M}dump ssl cert ${name}" | socat "${SOCKET}" - | openssl pkey >> "${new_key}"
|
|
||||||
# use crl2pkcs7 as a way to dump multiple x509, storeutl could be used in modern versions of openssl
|
|
||||||
echo "${M}dump ssl cert ${name}" | socat "${SOCKET}" - | openssl crl2pkcs7 -nocrl -certfile /dev/stdin | openssl pkcs7 -print_certs >> "${new_crt}"
|
|
||||||
|
|
||||||
if ! cmp -s <(openssl x509 -in "${new_crt}" -pubkey -noout) <(openssl pkey -in "${new_key}" -pubout); then
|
|
||||||
echo "[ALERT] ($$) : Private key \"${new_key}\" and public key \"${new_crt}\" don't match" >&2
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if cmp_certkey "${prev_crt}" "${new_crt}"; then
|
|
||||||
echo "[NOTICE] ($$) : ${crt_filename} is already up to date" >&2
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# dry run will just return before trying to move the files
|
|
||||||
if [ "${DRY_RUN}" != "0" ]; then
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# move the current certificates to ".old.timestamp"
|
|
||||||
if [ -f "${prev_crt}" ] && [ -f "${prev_key}" ]; then
|
|
||||||
mv "${prev_crt}" "${prev_crt}.${d}"
|
|
||||||
[ "${prev_crt}" != "${prev_key}" ] && mv "${prev_key}" "${prev_key}.${d}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# move the new certificates to old place
|
|
||||||
mv "${new_crt}" "${prev_crt}"
|
|
||||||
[ "${prev_crt}" != "${prev_key}" ] && mv "${new_key}" "${prev_key}"
|
|
||||||
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
dump_all_certificates() {
|
|
||||||
echo "${M}show ssl cert" | socat "${SOCKET}" - | grep -v '^#' | grep -v '^$' | while read -r line; do
|
|
||||||
export NAME
|
|
||||||
export CRT_FILENAME
|
|
||||||
export KEY_FILENAME
|
|
||||||
|
|
||||||
if read_certificate "$line"; then
|
|
||||||
dump_certificate "$NAME" "$CRT_FILENAME" "$KEY_FILENAME"
|
|
||||||
else
|
|
||||||
echo "[WARNING] ($$) : can't dump \"$name\", crt/key filename details not found in \"show ssl cert\"" >&2
|
|
||||||
fi
|
|
||||||
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
usage() {
|
|
||||||
echo "Usage:"
|
|
||||||
echo " $0 [options]* [cert]*"
|
|
||||||
echo ""
|
|
||||||
echo " Dump certificates from the HAProxy stats or master socket to the filesystem"
|
|
||||||
echo " Require socat and openssl"
|
|
||||||
echo " EXPERIMENTAL script, backup your files!"
|
|
||||||
echo " The script will move your previous files to FILE.old.unixtimestamp (ex: foo.com.pem.old.1759044998)"
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "Options:"
|
|
||||||
echo " -S, --master-socket <path> Use the master socket at <path> (default: ${SOCKET})"
|
|
||||||
echo " -s, --socket <path> Use the stats socket at <path>"
|
|
||||||
echo " -p, --path <path> Specifiy a base path for relative files (default: ${BASEPATH})"
|
|
||||||
echo " -n, --dry-run Read certificates on the socket but don't dump them"
|
|
||||||
echo " -d, --debug Debug mode, set -x"
|
|
||||||
echo " -v, --verbose Verbose mode"
|
|
||||||
echo " -h, --help This help"
|
|
||||||
echo " -- End of options"
|
|
||||||
echo ""
|
|
||||||
echo "Examples:"
|
|
||||||
echo " $0 -v -p ${BASEPATH} -S ${SOCKET}"
|
|
||||||
echo " $0 -v -p ${BASEPATH} -S ${SOCKET} bar.com.rsa.pem"
|
|
||||||
echo " $0 -v -p ${BASEPATH} -S ${SOCKET} -- foo.com.ecdsa.pem bar.com.rsa.pem"
|
|
||||||
}
|
|
||||||
|
|
||||||
main() {
|
|
||||||
while [ -n "$1" ]; do
|
|
||||||
case "$1" in
|
|
||||||
-S|--master-socket)
|
|
||||||
SOCKET="$2"
|
|
||||||
M="@1 "
|
|
||||||
shift 2
|
|
||||||
;;
|
|
||||||
-s|--socket)
|
|
||||||
SOCKET="$2"
|
|
||||||
M=
|
|
||||||
shift 2
|
|
||||||
;;
|
|
||||||
-p|--path)
|
|
||||||
BASEPATH="$2/"
|
|
||||||
shift 2
|
|
||||||
;;
|
|
||||||
-n|--dry-run)
|
|
||||||
DRY_RUN=1
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
-d|--debug)
|
|
||||||
DEBUG=1
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
-v|--verbose)
|
|
||||||
VERBOSE=1
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
-h|--help)
|
|
||||||
usage "$@"
|
|
||||||
exit 0
|
|
||||||
;;
|
|
||||||
--)
|
|
||||||
shift
|
|
||||||
break
|
|
||||||
;;
|
|
||||||
-*)
|
|
||||||
echo "[ALERT] ($$) : Unknown option '$1'" >&2
|
|
||||||
usage "$@"
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
break
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ -n "$DEBUG" ]; then
|
|
||||||
set -x
|
|
||||||
fi
|
|
||||||
|
|
||||||
TMP=${TMP:-$(mktemp -d)}
|
|
||||||
|
|
||||||
if [ -z "$1" ]; then
|
|
||||||
dump_all_certificates
|
|
||||||
else
|
|
||||||
# compute the certificates names at the end of the command
|
|
||||||
while [ -n "$1" ]; do
|
|
||||||
if ! read_certificate "$1"; then
|
|
||||||
echo "[ALERT] ($$) : can't dump \"$1\", crt/key filename details not found in \"show ssl cert\"" >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
[ "${DRY_RUN}" = "0" ] && dump_certificate "$NAME" "$CRT_FILENAME" "$KEY_FILENAME"
|
|
||||||
shift
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
trap 'rm -rf -- "$TMP"' EXIT
|
|
||||||
main "$@"
|
|
||||||
|
|
@ -1,113 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
export VERBOSE=1
|
|
||||||
export TIMEOUT=90
|
|
||||||
export MASTER_SOCKET=${MASTER_SOCKET:-/var/run/haproxy-master.sock}
|
|
||||||
export RET=
|
|
||||||
|
|
||||||
alert() {
|
|
||||||
if [ "$VERBOSE" -ge "1" ]; then
|
|
||||||
echo "[ALERT] $*" >&2
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
reload() {
|
|
||||||
while read -r line; do
|
|
||||||
|
|
||||||
if [ "$line" = "Success=0" ]; then
|
|
||||||
RET=1
|
|
||||||
elif [ "$line" = "Success=1" ]; then
|
|
||||||
RET=0
|
|
||||||
elif [ "$line" = "Another reload is still in progress." ]; then
|
|
||||||
alert "$line"
|
|
||||||
elif [ "$line" = "--" ]; then
|
|
||||||
continue;
|
|
||||||
else
|
|
||||||
if [ "$RET" = 1 ] && [ "$VERBOSE" = "2" ]; then
|
|
||||||
echo "$line" >&2
|
|
||||||
elif [ "$VERBOSE" = "3" ]; then
|
|
||||||
echo "$line" >&2
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
done < <(echo "reload" | socat -t"${TIMEOUT}" "${MASTER_SOCKET}" -)
|
|
||||||
|
|
||||||
if [ -z "$RET" ]; then
|
|
||||||
alert "Couldn't finish the reload before the timeout (${TIMEOUT})."
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
return "$RET"
|
|
||||||
}
|
|
||||||
|
|
||||||
usage() {
|
|
||||||
echo "Usage:"
|
|
||||||
echo " $0 [options]*"
|
|
||||||
echo ""
|
|
||||||
echo " Trigger a reload from the master socket"
|
|
||||||
echo " Require socat"
|
|
||||||
echo " EXPERIMENTAL script!"
|
|
||||||
echo ""
|
|
||||||
echo "Options:"
|
|
||||||
echo " -S, --master-socket <path> Use the master socket at <path> (default: ${MASTER_SOCKET})"
|
|
||||||
echo " -d, --debug Debug mode, set -x"
|
|
||||||
echo " -t, --timeout Timeout (socat -t) (default: ${TIMEOUT})"
|
|
||||||
echo " -s, --silent Silent mode (no output)"
|
|
||||||
echo " -v, --verbose Verbose output (output from haproxy on failure)"
|
|
||||||
echo " -vv Even more verbose output (output from haproxy on success and failure)"
|
|
||||||
echo " -h, --help This help"
|
|
||||||
echo ""
|
|
||||||
echo "Examples:"
|
|
||||||
echo " $0 -S ${MASTER_SOCKET} -d ${TIMEOUT}"
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
main() {
|
|
||||||
while [ -n "$1" ]; do
|
|
||||||
case "$1" in
|
|
||||||
-S|--master-socket)
|
|
||||||
MASTER_SOCKET="$2"
|
|
||||||
shift 2
|
|
||||||
;;
|
|
||||||
-t|--timeout)
|
|
||||||
TIMEOUT="$2"
|
|
||||||
shift 2
|
|
||||||
;;
|
|
||||||
-s|--silent)
|
|
||||||
VERBOSE=0
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
-v|--verbose)
|
|
||||||
VERBOSE=2
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
-vv|--verbose)
|
|
||||||
VERBOSE=3
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
-d|--debug)
|
|
||||||
DEBUG=1
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
-h|--help)
|
|
||||||
usage "$@"
|
|
||||||
exit 0
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "[ALERT] ($$) : Unknown option '$1'" >&2
|
|
||||||
usage "$@"
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ -n "$DEBUG" ]; then
|
|
||||||
set -x
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
main "$@"
|
|
||||||
reload
|
|
||||||
|
|
@ -1571,10 +1571,6 @@ void filter_count_srv_status(const char *accept_field, const char *time_field, s
|
||||||
if (!srv_node) {
|
if (!srv_node) {
|
||||||
/* server not yet in the tree, let's create it */
|
/* server not yet in the tree, let's create it */
|
||||||
srv = (void *)calloc(1, sizeof(struct srv_st) + e - b + 1);
|
srv = (void *)calloc(1, sizeof(struct srv_st) + e - b + 1);
|
||||||
if (unlikely(!srv)) {
|
|
||||||
fprintf(stderr, "%s: not enough memory\n", __FUNCTION__);
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
srv_node = &srv->node;
|
srv_node = &srv->node;
|
||||||
memcpy(&srv_node->key, b, e - b);
|
memcpy(&srv_node->key, b, e - b);
|
||||||
srv_node->key[e - b] = '\0';
|
srv_node->key[e - b] = '\0';
|
||||||
|
|
@ -1684,10 +1680,6 @@ void filter_count_url(const char *accept_field, const char *time_field, struct t
|
||||||
*/
|
*/
|
||||||
if (unlikely(!ustat))
|
if (unlikely(!ustat))
|
||||||
ustat = calloc(1, sizeof(*ustat));
|
ustat = calloc(1, sizeof(*ustat));
|
||||||
if (unlikely(!ustat)) {
|
|
||||||
fprintf(stderr, "%s: not enough memory\n", __FUNCTION__);
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
ustat->nb_err = err;
|
ustat->nb_err = err;
|
||||||
ustat->nb_req = 1;
|
ustat->nb_req = 1;
|
||||||
|
|
|
||||||
|
|
@ -6,9 +6,9 @@ Wants=network-online.target
|
||||||
[Service]
|
[Service]
|
||||||
EnvironmentFile=-/etc/default/haproxy
|
EnvironmentFile=-/etc/default/haproxy
|
||||||
EnvironmentFile=-/etc/sysconfig/haproxy
|
EnvironmentFile=-/etc/sysconfig/haproxy
|
||||||
Environment="CONFIG=/etc/haproxy/haproxy.cfg" "PIDFILE=/run/haproxy.pid" "CFGDIR=/etc/haproxy/conf.d" "EXTRAOPTS=-S /run/haproxy-master.sock"
|
Environment="CONFIG=/etc/haproxy/haproxy.cfg" "PIDFILE=/run/haproxy.pid" "EXTRAOPTS=-S /run/haproxy-master.sock"
|
||||||
ExecStart=@SBINDIR@/haproxy -Ws -f $CONFIG -f $CFGDIR -p $PIDFILE $EXTRAOPTS
|
ExecStart=@SBINDIR@/haproxy -Ws -f $CONFIG -p $PIDFILE $EXTRAOPTS
|
||||||
ExecReload=@SBINDIR@/haproxy -Ws -f $CONFIG -f $CFGDIR -c $EXTRAOPTS
|
ExecReload=@SBINDIR@/haproxy -Ws -f $CONFIG -c $EXTRAOPTS
|
||||||
ExecReload=/bin/kill -USR2 $MAINPID
|
ExecReload=/bin/kill -USR2 $MAINPID
|
||||||
KillMode=mixed
|
KillMode=mixed
|
||||||
Restart=always
|
Restart=always
|
||||||
|
|
|
||||||
|
|
@ -1,141 +0,0 @@
|
||||||
/*
|
|
||||||
* Find the post-mortem offset from a core dump
|
|
||||||
*
|
|
||||||
* Copyright (C) 2026 Willy Tarreau <w@1wt.eu>
|
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining
|
|
||||||
* a copy of this software and associated documentation files (the
|
|
||||||
* "Software"), to deal in the Software without restriction, including
|
|
||||||
* without limitation the rights to use, copy, modify, merge, publish,
|
|
||||||
* distribute, sublicense, and/or sell copies of the Software, and to
|
|
||||||
* permit persons to whom the Software is furnished to do so, subject to
|
|
||||||
* the following conditions:
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice shall be
|
|
||||||
* included in all copies or substantial portions of the Software.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
|
||||||
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
|
||||||
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
|
||||||
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
||||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
||||||
* OTHER DEALINGS IN THE SOFTWARE.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Note: builds with no option under glibc, and can be built as a minimal
|
|
||||||
* uploadable static executable using nolibc as well:
|
|
||||||
gcc -o pm-from-core -nostdinc -nostdlib -s -Os -static -fno-ident \
|
|
||||||
-fno-exceptions -fno-asynchronous-unwind-tables -fno-unwind-tables \
|
|
||||||
-Wl,--gc-sections,--orphan-handling=discard,-znoseparate-code \
|
|
||||||
-I /path/to/nolibc-sysroot/include pm-from-core.c
|
|
||||||
*/
|
|
||||||
#define _GNU_SOURCE
|
|
||||||
#include <sys/mman.h>
|
|
||||||
#include <sys/stat.h>
|
|
||||||
#include <elf.h>
|
|
||||||
#include <fcntl.h>
|
|
||||||
#include <stdint.h>
|
|
||||||
#include <stdio.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <string.h>
|
|
||||||
|
|
||||||
#if defined(__GLIBC__)
|
|
||||||
# define my_memmem memmem
|
|
||||||
#else
|
|
||||||
void *my_memmem(const void *haystack, size_t haystacklen,
|
|
||||||
const void *needle, size_t needlelen)
|
|
||||||
{
|
|
||||||
while (haystacklen >= needlelen) {
|
|
||||||
if (!memcmp(haystack, needle, needlelen))
|
|
||||||
return (void*)haystack;
|
|
||||||
haystack++;
|
|
||||||
haystacklen--;
|
|
||||||
}
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define MAGIC "POST-MORTEM STARTS HERE+7654321\0"
|
|
||||||
|
|
||||||
int main(int argc, char **argv)
|
|
||||||
{
|
|
||||||
Elf64_Ehdr *ehdr;
|
|
||||||
Elf64_Phdr *phdr;
|
|
||||||
struct stat st;
|
|
||||||
uint8_t *mem;
|
|
||||||
int i, fd;
|
|
||||||
|
|
||||||
if (argc < 2) {
|
|
||||||
printf("Usage: %s <core_file>\n", argv[0]);
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
fd = open(argv[1], O_RDONLY);
|
|
||||||
|
|
||||||
/* Let's just map the core dump as an ELF header */
|
|
||||||
fstat(fd, &st);
|
|
||||||
mem = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
|
|
||||||
if (mem == MAP_FAILED) {
|
|
||||||
perror("mmap()");
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* get the program headers */
|
|
||||||
ehdr = (Elf64_Ehdr *)mem;
|
|
||||||
|
|
||||||
/* check that it's really a core. Should be "\x7fELF" */
|
|
||||||
if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0) {
|
|
||||||
fprintf(stderr, "ELF magic not found.\n");
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ehdr->e_ident[EI_CLASS] != ELFCLASS64) {
|
|
||||||
fprintf(stderr, "Only 64-bit ELF supported.\n");
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ehdr->e_type != ET_CORE) {
|
|
||||||
fprintf(stderr, "ELF type %d, not a core dump.\n", ehdr->e_type);
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* OK we can safely go with program headers */
|
|
||||||
phdr = (Elf64_Phdr *)(mem + ehdr->e_phoff);
|
|
||||||
|
|
||||||
for (i = 0; i < ehdr->e_phnum; i++) {
|
|
||||||
uint64_t size = phdr[i].p_filesz;
|
|
||||||
uint64_t offset = phdr[i].p_offset;
|
|
||||||
uint64_t vaddr = phdr[i].p_vaddr;
|
|
||||||
uint64_t found_ofs;
|
|
||||||
uint8_t *found;
|
|
||||||
|
|
||||||
if (phdr[i].p_type != PT_LOAD)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
//printf("Scanning segment %d...\n", ehdr->e_phnum);
|
|
||||||
//printf("\r%-5d: off=%lx va=%lx sz=%lx ", i, (long)offset, (long)vaddr, (long)size);
|
|
||||||
if (!size)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (size >= 1048576) // don't scan large segments
|
|
||||||
continue;
|
|
||||||
|
|
||||||
found = my_memmem(mem + offset, size, MAGIC, sizeof(MAGIC) - 1);
|
|
||||||
if (!found)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
found_ofs = found - (mem + offset);
|
|
||||||
|
|
||||||
printf("Found post-mortem magic in segment %d:\n", i);
|
|
||||||
printf(" Core File Offset: 0x%lx (0x%lx + 0x%lx)\n", offset + found_ofs, offset, found_ofs);
|
|
||||||
printf(" Runtime VAddr: 0x%lx (0x%lx + 0x%lx)\n", vaddr + found_ofs, vaddr, found_ofs);
|
|
||||||
printf(" Segment Size: 0x%lx\n", size);
|
|
||||||
printf("\nIn gdb, copy-paste this line:\n\n pm_init 0x%lx\n\n", vaddr + found_ofs);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
//printf("\r%75s\n", "\r");
|
|
||||||
printf("post-mortem magic not found\n");
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
@ -14,8 +14,8 @@ define pools_dump
|
||||||
set $idx=$idx + 1
|
set $idx=$idx + 1
|
||||||
end
|
end
|
||||||
|
|
||||||
set $mem = (unsigned long)$total * $e->size
|
set $mem = $total * $e->size
|
||||||
printf "list=%#lx pool_head=%p name=%s size=%u alloc=%u used=%u mem=%lu\n", $p, $e, $e->name, $e->size, $total, $used, $mem
|
printf "list=%#lx pool_head=%p name=%s size=%u alloc=%u used=%u mem=%u\n", $p, $e, $e->name, $e->size, $total, $used, $mem
|
||||||
set $p = *(void **)$p
|
set $p = *(void **)$p
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
|
||||||
|
|
@ -59,9 +59,9 @@ struct ring_v2 {
|
||||||
struct ring_v2a {
|
struct ring_v2a {
|
||||||
size_t size; // storage size
|
size_t size; // storage size
|
||||||
size_t rsvd; // header length (used for file-backed maps)
|
size_t rsvd; // header length (used for file-backed maps)
|
||||||
size_t tail ALIGNED(64); // storage tail
|
size_t tail __attribute__((aligned(64))); // storage tail
|
||||||
size_t head ALIGNED(64); // storage head
|
size_t head __attribute__((aligned(64))); // storage head
|
||||||
char area[0] ALIGNED(64); // storage area begins immediately here
|
char area[0] __attribute__((aligned(64))); // storage area begins immediately here
|
||||||
};
|
};
|
||||||
|
|
||||||
/* display the message and exit with the code */
|
/* display the message and exit with the code */
|
||||||
|
|
|
||||||
|
|
@ -1,70 +0,0 @@
|
||||||
BEGININPUT
|
|
||||||
BEGINCONTEXT
|
|
||||||
|
|
||||||
HAProxy's development cycle consists in one development branch, and multiple
|
|
||||||
maintenance branches.
|
|
||||||
|
|
||||||
All the development is made into the development branch exclusively. This
|
|
||||||
includes mostly new features, doc updates, cleanups and or course, fixes.
|
|
||||||
|
|
||||||
The maintenance branches, also called stable branches, never see any
|
|
||||||
development, and only receive ultra-safe fixes for bugs that affect them,
|
|
||||||
that are picked from the development branch.
|
|
||||||
|
|
||||||
Branches are numbered in 0.1 increments. Every 6 months, upon a new major
|
|
||||||
release, the development branch enters maintenance and a new development branch
|
|
||||||
is created with a new, higher version. The current development branch is
|
|
||||||
3.4-dev, and maintenance branches are 3.3 and below.
|
|
||||||
|
|
||||||
Fixes created in the development branch for issues that were introduced in an
|
|
||||||
earlier branch are applied in descending order to each and every version till
|
|
||||||
that branch that introduced the issue: 3.3 first, then 3.2, then 3.1, then 3.0
|
|
||||||
and so on. This operation is called "backporting". A fix for an issue is never
|
|
||||||
backported beyond the branch that introduced the issue. An important point is
|
|
||||||
that the project maintainers really aim at zero regression in maintenance
|
|
||||||
branches, so they're never willing to take any risk backporting patches that
|
|
||||||
are not deemed strictly necessary.
|
|
||||||
|
|
||||||
Fixes consist of patches managed using the Git version control tool and are
|
|
||||||
identified by a Git commit ID and a commit message. For this reason we
|
|
||||||
indistinctly talk about backporting fixes, commits, or patches; all mean the
|
|
||||||
same thing. When mentioning commit IDs, developers always use a short form
|
|
||||||
made of the first 8 characters only, and expect the AI assistant to do the
|
|
||||||
same.
|
|
||||||
|
|
||||||
It seldom happens that some fixes depend on changes that were brought by other
|
|
||||||
patches that were not in some branches and that will need to be backported as
|
|
||||||
well for the fix to work. In this case, such information is explicitly provided
|
|
||||||
in the commit message by the patch's author in natural language.
|
|
||||||
|
|
||||||
Developers are serious and always indicate if a patch needs to be backported.
|
|
||||||
Sometimes they omit the exact target branch, or they will say that the patch is
|
|
||||||
"needed" in some older branch, but it means the same. If a commit message
|
|
||||||
doesn't mention any backport instructions, it means that the commit does not
|
|
||||||
have to be backported. And patches that are not strictly bug fixes nor doc
|
|
||||||
improvements are normally not backported. For example, fixes for design
|
|
||||||
limitations, architectural improvements and performance optimizations are
|
|
||||||
considered too risky for a backport. Finally, all bug fixes are tagged as
|
|
||||||
"BUG" at the beginning of their subject line. Patches that are not tagged as
|
|
||||||
such are not bugs, and must never be backported unless their commit message
|
|
||||||
explicitly requests so.
|
|
||||||
|
|
||||||
ENDCONTEXT
|
|
||||||
|
|
||||||
A developer is reviewing the development branch, trying to spot which commits
|
|
||||||
need to be backported to maintenance branches. This person is already expert
|
|
||||||
on HAProxy and everything related to Git, patch management, and the risks
|
|
||||||
associated with backports, so he doesn't want to be told how to proceed nor to
|
|
||||||
review the contents of the patch.
|
|
||||||
|
|
||||||
The goal for this developer is to get some help from the AI assistant to save
|
|
||||||
some precious time on this tedious review work. In order to do a better job, he
|
|
||||||
needs an accurate summary of the information and instructions found in each
|
|
||||||
commit message. Specifically he needs to figure if the patch fixes a problem
|
|
||||||
affecting an older branch or not, if it needs to be backported, if so to which
|
|
||||||
branches, and if other patches need to be backported along with it.
|
|
||||||
|
|
||||||
The indented text block below after an "id" line and starting with a Subject line
|
|
||||||
is a commit message from the HAProxy development branch that describes a patch
|
|
||||||
applied to that branch, starting with its subject line, please read it carefully.
|
|
||||||
|
|
||||||
|
|
@ -1,29 +0,0 @@
|
||||||
|
|
||||||
ENDINPUT
|
|
||||||
BEGININSTRUCTION
|
|
||||||
|
|
||||||
You are an AI assistant that follows instruction extremely well. Help as much
|
|
||||||
as you can, responding to a single question using a single response.
|
|
||||||
|
|
||||||
The developer wants to know if he needs to backport the patch above to fix
|
|
||||||
maintenance branches, for which branches, and what possible dependencies might
|
|
||||||
be mentioned in the commit message. Carefully study the commit message and its
|
|
||||||
backporting instructions if any (otherwise it should probably not be backported),
|
|
||||||
then provide a very concise and short summary that will help the developer decide
|
|
||||||
to backport it, or simply to skip it.
|
|
||||||
|
|
||||||
Start by explaining in one or two sentences what you recommend for this one and why.
|
|
||||||
Finally, based on your analysis, give your general conclusion as "Conclusion: X"
|
|
||||||
where X is a single word among:
|
|
||||||
- "yes", if you recommend to backport the patch right now either because
|
|
||||||
it explicitly states this or because it's a fix for a bug that affects
|
|
||||||
a maintenance branch (3.3 or lower);
|
|
||||||
- "wait", if this patch explicitly mentions that it must be backported, but
|
|
||||||
only after waiting some time.
|
|
||||||
- "no", if nothing clearly indicates a necessity to backport this patch (e.g.
|
|
||||||
lack of explicit backport instructions, or it's just an improvement);
|
|
||||||
- "uncertain" otherwise for cases not covered above
|
|
||||||
|
|
||||||
ENDINSTRUCTION
|
|
||||||
|
|
||||||
Explanation:
|
|
||||||
|
|
@ -22,8 +22,7 @@ STABLE=$(cd "$HAPROXY_DIR" && git describe --tags "v${BRANCH}-dev0^" |cut -f1,2
|
||||||
PATCHES_DIR="$PATCHES_PFX"-"$BRANCH"
|
PATCHES_DIR="$PATCHES_PFX"-"$BRANCH"
|
||||||
|
|
||||||
(cd "$HAPROXY_DIR"
|
(cd "$HAPROXY_DIR"
|
||||||
# avoid git pull, it chokes on forced push
|
git pull
|
||||||
git remote update origin; git reset origin/master;git checkout -f
|
|
||||||
last_file=$(ls -1 "$PATCHES_DIR"/*.patch 2>/dev/null | tail -n1)
|
last_file=$(ls -1 "$PATCHES_DIR"/*.patch 2>/dev/null | tail -n1)
|
||||||
if [ -n "$last_file" ]; then
|
if [ -n "$last_file" ]; then
|
||||||
restart=$(head -n1 "$last_file" | cut -f2 -d' ')
|
restart=$(head -n1 "$last_file" | cut -f2 -d' ')
|
||||||
|
|
|
||||||
|
|
@ -30,8 +30,8 @@ static const char *tevt_fd_types[16] = {
|
||||||
};
|
};
|
||||||
|
|
||||||
static const char *tevt_hs_types[16] = {
|
static const char *tevt_hs_types[16] = {
|
||||||
[ 0] = "-", [ 1] = "-", [ 2] = "-", [ 3] = "-",
|
[ 0] = "-", [ 1] = "-", [ 2] = "-", [ 3] = "rcv_err",
|
||||||
[ 4] = "snd_err", [ 5] = "truncated_shutr", [ 6] = "truncated_rcv_err", [ 7] = "-",
|
[ 4] = "snd_err", [ 5] = "-", [ 6] = "-", [ 7] = "-",
|
||||||
[ 8] = "-", [ 9] = "-", [10] = "-", [11] = "-",
|
[ 8] = "-", [ 9] = "-", [10] = "-", [11] = "-",
|
||||||
[12] = "-", [13] = "-", [14] = "-", [15] = "-",
|
[12] = "-", [13] = "-", [14] = "-", [15] = "-",
|
||||||
};
|
};
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,7 @@
|
||||||
|
|
||||||
A number of contributors are often embarrassed with coding style issues, they
|
A number of contributors are often embarrassed with coding style issues, they
|
||||||
don't always know if they're doing it right, especially since the coding style
|
don't always know if they're doing it right, especially since the coding style
|
||||||
has evolved along the years. What is explained here is not necessarily what is
|
has elvoved along the years. What is explained here is not necessarily what is
|
||||||
applied in the code, but new code should as much as possible conform to this
|
applied in the code, but new code should as much as possible conform to this
|
||||||
style. Coding style fixes happen when code is replaced. It is useless to send
|
style. Coding style fixes happen when code is replaced. It is useless to send
|
||||||
patches to fix coding style only, they will be rejected, unless they belong to
|
patches to fix coding style only, they will be rejected, unless they belong to
|
||||||
|
|
|
||||||
File diff suppressed because it is too large
Load diff
140
doc/haterm.txt
140
doc/haterm.txt
|
|
@ -1,140 +0,0 @@
|
||||||
------
|
|
||||||
HATerm
|
|
||||||
------
|
|
||||||
HAProxy's dummy HTTP
|
|
||||||
server for benchmarks
|
|
||||||
|
|
||||||
1. Background
|
|
||||||
-------------
|
|
||||||
|
|
||||||
HATerm is a dummy HTTP server that leverages the flexible and scalable
|
|
||||||
architecture of HAProxy to ease benchmarking of HTTP agents in all versions of
|
|
||||||
HTTP currently supported by HAProxy (HTTP/1, HTTP/2, HTTP/3), and both in clear
|
|
||||||
and TLS / QUIC. It follows the same principle as its ancestor HTTPTerm [1],
|
|
||||||
consisting in producing HTTP responses entirely configured by the request
|
|
||||||
parameters (size, response time, status etc). It also preserves the spirit
|
|
||||||
HTTPTerm which does not require any configuration beyond an optional listening
|
|
||||||
address and a port number, though it also supports advanced configurations with
|
|
||||||
the full spectrum of HAProxy features for specific testing. The goal remains
|
|
||||||
to make it almost as fast as the original HTTPTerm so that it can become a
|
|
||||||
de-facto replacement, with a compatible command line and request parameters
|
|
||||||
that will not change users' habits.
|
|
||||||
|
|
||||||
[1] https://github.com/wtarreau/httpterm
|
|
||||||
|
|
||||||
|
|
||||||
2. Compilation
|
|
||||||
--------------
|
|
||||||
|
|
||||||
HATerm may be compiled in the same way as HAProxy but with "haterm" as Makefile
|
|
||||||
target to provide on the "make" command line as follows:
|
|
||||||
|
|
||||||
$ make -j $(nproc) TARGET=linux-glibc haterm
|
|
||||||
|
|
||||||
HATerm supports HTTPS/SSL/TCP:
|
|
||||||
|
|
||||||
$ make TARGET=linux-glibc USE_OPENSSL=1
|
|
||||||
|
|
||||||
It also supports QUIC:
|
|
||||||
|
|
||||||
$ make -j $(nproc) TARGET=linux-glibc USE_OPENSSL=1 USE_QUIC=1 haterm
|
|
||||||
|
|
||||||
Technically speaking, it uses the regular HAProxy source and object code with a
|
|
||||||
different command line parser. As such, all build options supported by HAProxy
|
|
||||||
also apply to HATerm. See INSTALL for more details about how to compile them.
|
|
||||||
|
|
||||||
|
|
||||||
3. Execution
|
|
||||||
------------
|
|
||||||
|
|
||||||
HATerm is a very easy to use HTTP server with supports for all the HTTP
|
|
||||||
versions. It displays its usage when run without argument or wrong arguments:
|
|
||||||
|
|
||||||
$ ./haterm
|
|
||||||
Usage : haterm -L [<ip>]:<clear port>[:<TCP&QUIC SSL port>] [-L...]* [opts]
|
|
||||||
where <opts> may be any combination of:
|
|
||||||
-G <line> : multiple option; append <line> to the "global" section
|
|
||||||
-F <line> : multiple option; append <line> to the "frontend" section
|
|
||||||
-T <line> : multiple option; append <line> to the "traces" section
|
|
||||||
-C : dump the configuration and exit
|
|
||||||
-D : goes daemon
|
|
||||||
-b <keysize> : RSA key size in bits (ex: "2048", "4096"...)
|
|
||||||
-c <curves> : ECSDA curves (ex: "P-256", "P-384"...)
|
|
||||||
-v : shows version
|
|
||||||
-d : enable the traces for all http protocols
|
|
||||||
--quic-bind-opts <opts> : append options to QUIC "bind" lines
|
|
||||||
--tcp-bind-opts <opts> : append options to TCP "bind" lines
|
|
||||||
|
|
||||||
|
|
||||||
Arguments -G, -F, -T permit to append one or multiple lines at the end of their
|
|
||||||
respective sections. A tab character ('\t') is prepended at the beginning of
|
|
||||||
the argument, and a line feed ('\n') is appended at the end. It is also
|
|
||||||
possible to insert multiple lines at once using escape sequences '\n' and '\t'
|
|
||||||
inside the string argument.
|
|
||||||
|
|
||||||
As HAProxy, HATerm may listen on several TCP/UDP addresses which can be
|
|
||||||
provided by multiple "-L" options. To be functional, it needs at least one
|
|
||||||
correct "-L" option to be set.
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
|
|
||||||
$ ./haterm -L 127.0.0.1:8888 # listen on 127.0.0.1:8888 TCP address
|
|
||||||
|
|
||||||
$ ./haterm -L 127.0.0.1:8888:8889 # listen on 127.0.0.1:8888 TCP address,
|
|
||||||
# 127.0.01:8889 SSL/TCP address,
|
|
||||||
# and 127.0.01:8889 QUIC/UDP address
|
|
||||||
|
|
||||||
$ ./haterm -L 127.0.0.1:8888:8889 -L [::1]:8888:8889
|
|
||||||
|
|
||||||
With USE_QUIC_OPENSSL_COMPAT support, the user must configure a global
|
|
||||||
section as for HAProxy. HATerm sets internally its configuration in.
|
|
||||||
memory as this is done by HAProxy from configuration files:
|
|
||||||
|
|
||||||
$ ./haterm -L 127.0.0.1:8888:8889
|
|
||||||
[NOTICE] (1371578) : haproxy version is 3.4-dev4-ba5eab-28
|
|
||||||
[NOTICE] (1371578) : path to executable is ./haterm
|
|
||||||
[ALERT] (1371578) : Binding [haterm cfgfile:12] for frontend
|
|
||||||
___haterm_frontend___: this SSL library does not
|
|
||||||
support the QUIC protocol. A limited compatibility
|
|
||||||
layer may be enabled using the "limited-quic" global
|
|
||||||
option if desired.
|
|
||||||
|
|
||||||
Such an alert may be fixed with "-G' option:
|
|
||||||
|
|
||||||
$ ./haterm -L 127.0.0.1:8888:8889 -G "limited-quic"
|
|
||||||
|
|
||||||
|
|
||||||
When the SSL support is not compiled in, the second port is ignored. This is
|
|
||||||
also the case for the QUIC support.
|
|
||||||
|
|
||||||
HATerm adjusts its responses depending on the requests it receives. An empty
|
|
||||||
query string provides the information about how the URIs are understood by
|
|
||||||
HATerm:
|
|
||||||
|
|
||||||
$ curl http://127.0.0.1:8888/?
|
|
||||||
HAProxy's dummy HTTP server for benchmarks - version 3.4-dev4.
|
|
||||||
All integer argument values are in the form [digits]*[kmgr] (r=random(0..1))
|
|
||||||
The following arguments are supported to override the default objects :
|
|
||||||
- /?s=<size> return <size> bytes.
|
|
||||||
E.g. /?s=20k
|
|
||||||
- /?r=<retcode> present <retcode> as the HTTP return code.
|
|
||||||
E.g. /?r=404
|
|
||||||
- /?c=<cache> set the return as not cacheable if <1.
|
|
||||||
E.g. /?c=0
|
|
||||||
- /?A=<req-after> drain the request body after sending the response.
|
|
||||||
E.g. /?A=1
|
|
||||||
- /?C=<close> force the response to use close if >0.
|
|
||||||
E.g. /?C=1
|
|
||||||
- /?K=<keep-alive> force the response to use keep-alive if >0.
|
|
||||||
E.g. /?K=1
|
|
||||||
- /?t=<time> wait <time> milliseconds before responding.
|
|
||||||
E.g. /?t=500
|
|
||||||
- /?k=<enable> Enable transfer encoding chunked with only one chunk
|
|
||||||
if >0.
|
|
||||||
- /?R=<enable> Enable sending random data if >0.
|
|
||||||
|
|
||||||
Note that those arguments may be cumulated on one line separated by a set of
|
|
||||||
delimitors among [&?,;/] :
|
|
||||||
- GET /?s=20k&c=1&t=700&K=30r HTTP/1.0
|
|
||||||
- GET /?r=500?s=0?c=0?t=1000 HTTP/1.0
|
|
||||||
|
|
||||||
|
|
@ -5,7 +5,7 @@
|
||||||
|
|
||||||
The buffer list API allows one to share a certain amount of buffers between
|
The buffer list API allows one to share a certain amount of buffers between
|
||||||
multiple entities, which will each see their own as lists of buffers, while
|
multiple entities, which will each see their own as lists of buffers, while
|
||||||
keeping a shared free list. The immediate use case is for muxes, which may
|
keeping a sharedd free list. The immediate use case is for muxes, which may
|
||||||
want to allocate up to a certain number of buffers per connection, shared
|
want to allocate up to a certain number of buffers per connection, shared
|
||||||
among all streams. In this case, each stream will first request a new list
|
among all streams. In this case, each stream will first request a new list
|
||||||
for its own use, then may request extra entries from the free list. At any
|
for its own use, then may request extra entries from the free list. At any
|
||||||
|
|
|
||||||
|
|
@ -245,30 +245,6 @@ mt_list_pop(l)
|
||||||
#=========#
|
#=========#
|
||||||
|
|
||||||
|
|
||||||
mt_list_pop_locked(l)
|
|
||||||
Removes the list's first element, returns it locked. If the list was empty,
|
|
||||||
NULL is returned. A macro MT_LIST_POP_LOCKED() is provided for a
|
|
||||||
more convenient use; instead of returning the list element, it will return
|
|
||||||
the structure holding the element, taking care of preserving the NULL.
|
|
||||||
|
|
||||||
before:
|
|
||||||
+---+ +---+ +---+ +---+ +---+ +---+ +---+
|
|
||||||
#=>| L |<===>| A |<===>| B |<===>| C |<===>| D |<===>| E |<===>| F |<=#
|
|
||||||
# +---+ +---+ +---+ +---+ +---+ +---+ +---+ #
|
|
||||||
#=====================================================================#
|
|
||||||
|
|
||||||
after:
|
|
||||||
+---+ +---+ +---+ +---+ +---+ +---+
|
|
||||||
#=>| L |<===>| B |<===>| C |<===>| D |<===>| E |<===>| F |<=#
|
|
||||||
# +---+ +---+ +---+ +---+ +---+ +---+ #
|
|
||||||
#===========================================================#
|
|
||||||
|
|
||||||
+---+
|
|
||||||
# x| A |x #
|
|
||||||
# +---+ #
|
|
||||||
#=========#
|
|
||||||
|
|
||||||
|
|
||||||
_mt_list_lock_next(elt)
|
_mt_list_lock_next(elt)
|
||||||
Locks the link that starts at the next pointer of the designated element.
|
Locks the link that starts at the next pointer of the designated element.
|
||||||
The link is replaced by two locked pointers, and a pointer to the next
|
The link is replaced by two locked pointers, and a pointer to the next
|
||||||
|
|
|
||||||
|
|
@ -11,7 +11,7 @@ default init, this was controversial but fedora and archlinux already uses it.
|
||||||
At this time HAProxy still had a multi-process model, and the way haproxy is
|
At this time HAProxy still had a multi-process model, and the way haproxy is
|
||||||
working was incompatible with the daemon mode.
|
working was incompatible with the daemon mode.
|
||||||
|
|
||||||
Systemd is compatible with traditional forking services, but somehow HAProxy
|
Systemd is compatible with traditionnal forking services, but somehow HAProxy
|
||||||
is different. To work correctly, systemd needs a main PID, this is the PID of
|
is different. To work correctly, systemd needs a main PID, this is the PID of
|
||||||
the process that systemd will supervises.
|
the process that systemd will supervises.
|
||||||
|
|
||||||
|
|
@ -45,7 +45,7 @@ However the wrapper suffered from several problems:
|
||||||
|
|
||||||
### mworker V1
|
### mworker V1
|
||||||
|
|
||||||
HAProxy 1.8 got rid of the wrapper which was replaced by the master worker
|
HAProxy 1.8 got ride of the wrapper which was replaced by the master worker
|
||||||
mode. This first version was basically a reintegration of the wrapper features
|
mode. This first version was basically a reintegration of the wrapper features
|
||||||
within HAProxy. HAProxy is launched with the -W flag, read the configuration and
|
within HAProxy. HAProxy is launched with the -W flag, read the configuration and
|
||||||
then fork. In mworker mode, the master is usually launched as a root process,
|
then fork. In mworker mode, the master is usually launched as a root process,
|
||||||
|
|
@ -86,7 +86,7 @@ retrieved automatically.
|
||||||
The master is supervising the workers, when a current worker (not a previous one
|
The master is supervising the workers, when a current worker (not a previous one
|
||||||
from before the reload) is exiting without being asked for a reload, the master
|
from before the reload) is exiting without being asked for a reload, the master
|
||||||
will emit an "exit-on-failure" error and will kill every workers with a SIGTERM
|
will emit an "exit-on-failure" error and will kill every workers with a SIGTERM
|
||||||
and exits with the same error code than the failed worker, this behavior can be
|
and exits with the same error code than the failed master, this behavior can be
|
||||||
changed by using the "no exit-on-failure" option in the global section.
|
changed by using the "no exit-on-failure" option in the global section.
|
||||||
|
|
||||||
While the master is supervising the workers using the wait() function, the
|
While the master is supervising the workers using the wait() function, the
|
||||||
|
|
@ -186,8 +186,8 @@ number that can be found in HAPROXY_PROCESSES. With this change the stats socket
|
||||||
in the configuration is less useful and everything can be done from the master
|
in the configuration is less useful and everything can be done from the master
|
||||||
CLI.
|
CLI.
|
||||||
|
|
||||||
With 2.7, the reload mechanism of the master CLI evolved, with previous versions,
|
With 2.7, the reload mecanism of the master CLI evolved, with previous versions,
|
||||||
this mechanism was asynchronous, so once the `reload` command was received, the
|
this mecanism was asynchronous, so once the `reload` command was received, the
|
||||||
master would reload, the active master CLI connection was closed, and there was
|
master would reload, the active master CLI connection was closed, and there was
|
||||||
no way to return a status as a response to the `reload` command. To achieve a
|
no way to return a status as a response to the `reload` command. To achieve a
|
||||||
synchronous reload, a dedicated sockpair is used, one side uses a master CLI
|
synchronous reload, a dedicated sockpair is used, one side uses a master CLI
|
||||||
|
|
@ -208,38 +208,3 @@ starts with -st to achieve a hard stop on the previous worker.
|
||||||
Version 3.0 got rid of the libsystemd dependencies for sd_notify() after the
|
Version 3.0 got rid of the libsystemd dependencies for sd_notify() after the
|
||||||
events of xz/openssh, the function is now implemented directly in haproxy in
|
events of xz/openssh, the function is now implemented directly in haproxy in
|
||||||
src/systemd.c.
|
src/systemd.c.
|
||||||
|
|
||||||
### mworker V3
|
|
||||||
|
|
||||||
This version was implemented with HAProxy 3.1, the goal was to stop parsing and
|
|
||||||
applying the configuration in the master process.
|
|
||||||
|
|
||||||
One of the caveats of the previous implementation was that the parser could take
|
|
||||||
a lot of time, and the master process would be stuck in the parser instead of
|
|
||||||
handling its polling loop, signals etc. Some parts of the configuration parsing
|
|
||||||
could also be less reliable with third-party code (EXTRA_OBJS), it could, for
|
|
||||||
example, allow opening FDs and not closing them before the reload which
|
|
||||||
would crash the master after a few reloads.
|
|
||||||
|
|
||||||
The startup of the master-worker was reorganized this way:
|
|
||||||
|
|
||||||
- the "discovery" mode, which is a lighter configuration parsing step, only
|
|
||||||
applies the configuration which need to be effective for the master process.
|
|
||||||
For example, "master-worker", "mworker-max-reloads" and less than 20 other
|
|
||||||
keywords that are identified by KWF_DISCOVERY in the code. It is really fast
|
|
||||||
as it don't need all the configuration to be applied in the master process.
|
|
||||||
|
|
||||||
- the master will then fork a worker, with a PROC_O_INIT flag. This worker has
|
|
||||||
a temporary sockpair connected to the master CLI. Once the worker is forked,
|
|
||||||
the master initializes its configuration and starts its polling loop.
|
|
||||||
|
|
||||||
- The newly forked worker will try to parse the configuration, which could
|
|
||||||
result in a failure (exit 1), or any bad error code. In case of success, the
|
|
||||||
worker will send a "READY" message to the master CLI then close this FD. At
|
|
||||||
this step everything was initialized and the worker can enter its polling
|
|
||||||
loop.
|
|
||||||
|
|
||||||
- The master then waits for the worker, it could:
|
|
||||||
* receive the READY message over the mCLI, resulting in a successful loading
|
|
||||||
of haproxy
|
|
||||||
* receive a SIGCHLD, meaning the worker exited and couldn't load
|
|
||||||
|
|
|
||||||
|
|
@ -1,53 +0,0 @@
|
||||||
2025/09/16 - SHM stats file storage description and hints
|
|
||||||
|
|
||||||
Shm stats file (used to share thread-groupable statistics over multiple
|
|
||||||
process through the "shm-stats-file" directive) is made of:
|
|
||||||
|
|
||||||
- a main header which describes the file version, the processes making
|
|
||||||
use of it, the common clock source and hints about the number of
|
|
||||||
objects that are currently stored or provisionned in the file.
|
|
||||||
- an indefinite number of "objects" blocks coming right after the
|
|
||||||
main header, all blocks have the same size which is the size of the
|
|
||||||
maximum underlying object that may be stored. The main header tells
|
|
||||||
how many objects are stored in the file.
|
|
||||||
|
|
||||||
File header looks like this (32/64 bits systems):
|
|
||||||
|
|
||||||
0 8 16 32 48 64
|
|
||||||
+-------+---------+----------------+-------------------+-------------------+
|
|
||||||
| VERSION | 2 bytes | global_now_ms (global mono date in ms)|
|
|
||||||
|MAJOR | MINOR | hole | |
|
|
||||||
+----------------------------------+---------------------------------------+
|
|
||||||
| global_now_ns (global mono date in ns) |
|
|
||||||
+--------------------------------------------------------------------------+
|
|
||||||
| now_offset (offset applied to global monotonic date |
|
|
||||||
| on startup) |
|
|
||||||
+--------------------------------------------------------------------------+
|
|
||||||
| Process slot : | 1byte x 64
|
|
||||||
| pid | heartbeat (ticks) |
|
|
||||||
+----------------------------------+---------------------------------------+
|
|
||||||
| objects | objects slots |
|
|
||||||
| (used objects) | (available for use) |
|
|
||||||
+----------------------------------+---------------------------------------+
|
|
||||||
| padding (for future use) | 128 bytes
|
|
||||||
+--------------------------------------------------------------------------+
|
|
||||||
|
|
||||||
Object block looks like this:
|
|
||||||
|
|
||||||
0 8 16 32 48 64
|
|
||||||
+-------+---------+----------------+-------------------+-------------------+
|
|
||||||
| GUID | 128 bytes
|
|
||||||
+ (zero terminated) +
|
|
||||||
| |
|
|
||||||
+-------+---------+--------------------------------------------------------+
|
|
||||||
| tgid | type | padding |
|
|
||||||
+-------+---------+--------------------------------------------------------+
|
|
||||||
| users (bitmask of process slots making use of the obj) |
|
|
||||||
+--------------------------------------------------------------------------+
|
|
||||||
| object data |
|
|
||||||
| (version dependent) |
|
|
||||||
| struct be_counters_shared_tg or |
|
|
||||||
| struct fe_counters_shared_tg |
|
|
||||||
+--------------------------------------------------------------------------+
|
|
||||||
| padding (to anticipate evolutions) | 64 bytes
|
|
||||||
+--------------------------------------------------------------------------+
|
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
-----------------------
|
-----------------------
|
||||||
HAProxy Starter Guide
|
HAProxy Starter Guide
|
||||||
-----------------------
|
-----------------------
|
||||||
version 3.4
|
version 3.3
|
||||||
|
|
||||||
|
|
||||||
This document is an introduction to HAProxy for all those who don't know it, as
|
This document is an introduction to HAProxy for all those who don't know it, as
|
||||||
|
|
@ -1693,7 +1693,7 @@ A small team of trusted developers will receive it and will be able to propose
|
||||||
a fix. We usually don't use embargoes and once a fix is available it gets
|
a fix. We usually don't use embargoes and once a fix is available it gets
|
||||||
merged. In some rare circumstances it can happen that a release is coordinated
|
merged. In some rare circumstances it can happen that a release is coordinated
|
||||||
with software vendors. Please note that this process usually messes up with
|
with software vendors. Please note that this process usually messes up with
|
||||||
everyone's work, and that rushed up releases can sometimes introduce new bugs,
|
eveyone's work, and that rushed up releases can sometimes introduce new bugs,
|
||||||
so it's best avoided unless strictly necessary; as such, there is often little
|
so it's best avoided unless strictly necessary; as such, there is often little
|
||||||
consideration for reports that needlessly cause such extra burden, and the best
|
consideration for reports that needlessly cause such extra burden, and the best
|
||||||
way to see your work credited usually is to provide a working fix, which will
|
way to see your work credited usually is to provide a working fix, which will
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
------------------------
|
------------------------
|
||||||
HAProxy Management Guide
|
HAProxy Management Guide
|
||||||
------------------------
|
------------------------
|
||||||
version 3.4
|
version 3.3
|
||||||
|
|
||||||
|
|
||||||
This document describes how to start, stop, manage, and troubleshoot HAProxy,
|
This document describes how to start, stop, manage, and troubleshoot HAProxy,
|
||||||
|
|
@ -900,7 +900,9 @@ If a memory allocation fails due to the memory limit being reached or because
|
||||||
the system doesn't have any enough memory, then haproxy will first start to
|
the system doesn't have any enough memory, then haproxy will first start to
|
||||||
free all available objects from all pools before attempting to allocate memory
|
free all available objects from all pools before attempting to allocate memory
|
||||||
again. This mechanism of releasing unused memory can be triggered by sending
|
again. This mechanism of releasing unused memory can be triggered by sending
|
||||||
the signal SIGQUIT to the haproxy process.
|
the signal SIGQUIT to the haproxy process. When doing so, the pools state prior
|
||||||
|
to the flush will also be reported to stderr when the process runs in
|
||||||
|
foreground.
|
||||||
|
|
||||||
During a reload operation, the process switched to the graceful stop state also
|
During a reload operation, the process switched to the graceful stop state also
|
||||||
automatically performs some flushes after releasing any connection so that all
|
automatically performs some flushes after releasing any connection so that all
|
||||||
|
|
@ -1335,26 +1337,6 @@ Here is the list of static fields using the proxy statistics domain:
|
||||||
97. used_conn_cur [...S]: current number of connections in use
|
97. used_conn_cur [...S]: current number of connections in use
|
||||||
98. need_conn_est [...S]: estimated needed number of connections
|
98. need_conn_est [...S]: estimated needed number of connections
|
||||||
99. uweight [..BS]: total user weight (backend), server user weight (server)
|
99. uweight [..BS]: total user weight (backend), server user weight (server)
|
||||||
100. agg_server_status [..B.]: backend aggregated gauge of server's status
|
|
||||||
101. agg_server_status_check [..B.]: (deprecated)
|
|
||||||
102. agg_check_status [..B.]: backend aggregated gauge of server's state check
|
|
||||||
status
|
|
||||||
103. srid [...S]: server id revision
|
|
||||||
104. sess_other [.F..]: total number of sessions other than HTTP since process
|
|
||||||
started
|
|
||||||
105. h1_sess [.F..]: total number of HTTP/1 sessions since process started
|
|
||||||
106. h2_sess [.F..]: total number of HTTP/2 sessions since process started
|
|
||||||
107. h3_sess [.F..]: total number of HTTP/3 sessions since process started
|
|
||||||
108. req_other [.F..]: total number of sessions other than HTTP processed by
|
|
||||||
this object since the worker process started
|
|
||||||
109. h1req [.F..]: total number of HTTP/1 sessions processed by this object
|
|
||||||
since the worker process started
|
|
||||||
110. h2req [.F..]: total number of hTTP/2 sessions processed by this object
|
|
||||||
since the worker process started
|
|
||||||
111. h3req [.F..]: total number of HTTP/3 sessions processed by this object
|
|
||||||
since the worker process started
|
|
||||||
112. proto [L...]: protocol
|
|
||||||
113. priv_idle_cur [...S]: current number of private idle connections
|
|
||||||
|
|
||||||
For all other statistics domains, the presence or the order of the fields are
|
For all other statistics domains, the presence or the order of the fields are
|
||||||
not guaranteed. In this case, the header line should always be used to parse
|
not guaranteed. In this case, the header line should always be used to parse
|
||||||
|
|
@ -1725,30 +1707,6 @@ add acl [@<ver>] <acl> <pattern>
|
||||||
This command cannot be used if the reference <acl> is a name also used with
|
This command cannot be used if the reference <acl> is a name also used with
|
||||||
a map. In this case, the "add map" command must be used instead.
|
a map. In this case, the "add map" command must be used instead.
|
||||||
|
|
||||||
add backend <name> from <defproxy> [mode <mode>] [guid <guid>] [ EXPERIMENTAL ]
|
|
||||||
Instantiate a new backend proxy with the name <name>.
|
|
||||||
|
|
||||||
Only TCP or HTTP proxies can be created. All of the settings are inherited
|
|
||||||
from <defproxy> default proxy instance. By default, it is mandatory to
|
|
||||||
specify the backend mode via the argument of the same name, unless <defproxy>
|
|
||||||
already defines it explicitely. It is also possible to use an optional GUID
|
|
||||||
argument if wanted.
|
|
||||||
|
|
||||||
Servers can be added via the command "add server". The backend is initialized
|
|
||||||
in the unpublished state. Once considered ready for traffic, use "publish
|
|
||||||
backend" to expose the newly created instance.
|
|
||||||
|
|
||||||
All named default proxies can be used, given that they validate the same
|
|
||||||
inheritance rules applied during configuration parsing. There is some
|
|
||||||
exceptions though, for example when the mode is neither TCP nor HTTP. Another
|
|
||||||
exception is that it is not yet possible to use a default proxies which
|
|
||||||
reference custom HTTP errors, for example via the errorfiles or http-rules
|
|
||||||
keywords.
|
|
||||||
|
|
||||||
This command is restricted and can only be issued on sockets configured for
|
|
||||||
level "admin". Moreover, this feature is still considered in development so it
|
|
||||||
also requires experimental mode (see "experimental-mode on").
|
|
||||||
|
|
||||||
add map [@<ver>] <map> <key> <value>
|
add map [@<ver>] <map> <key> <value>
|
||||||
add map [@<ver>] <map> <payload>
|
add map [@<ver>] <map> <payload>
|
||||||
Add an entry into the map <map> to associate the value <value> to the key
|
Add an entry into the map <map> to associate the value <value> to the key
|
||||||
|
|
@ -1858,35 +1816,6 @@ add ssl crt-list <crtlist> <payload>
|
||||||
$ echo -e 'add ssl crt-list certlist1 <<\nfoobar.pem [allow-0rtt] foo.bar.com
|
$ echo -e 'add ssl crt-list certlist1 <<\nfoobar.pem [allow-0rtt] foo.bar.com
|
||||||
!test1.com\n' | socat /tmp/sock1 -
|
!test1.com\n' | socat /tmp/sock1 -
|
||||||
|
|
||||||
add ssl ech <bind> <payload>
|
|
||||||
Add an ECH key to a <bind> line. The payload must be in the PEM for ECH format.
|
|
||||||
(https://datatracker.ietf.org/doc/html/draft-farrell-tls-pemesni)
|
|
||||||
|
|
||||||
The bind line format is <frontend>/@<filename>:<linenum> (Example:
|
|
||||||
frontend1/@haproxy.conf:19) or <frontend>/<name> if the bind line was named
|
|
||||||
with the "name" keyword.
|
|
||||||
|
|
||||||
Necessitates an OpenSSL version that supports ECH, and HAProxy must be
|
|
||||||
compiled with USE_ECH=1. This command is only supported on a CLI connection
|
|
||||||
running in experimental mode (see "experimental-mode on").
|
|
||||||
|
|
||||||
See also "show ssl ech" and "ech" in the Section 5.1 of the configuration
|
|
||||||
manual.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
$ openssl ech -public_name foobar.com -out foobar3.com.ech
|
|
||||||
$ echo -e "experimental-mode on; add ssl ech frontend1/@haproxy.conf:19 <<%EOF%\n$(cat foobar3.com.ech)\n%EOF%\n" | \
|
|
||||||
socat /tmp/haproxy.sock -
|
|
||||||
added a new ECH config to frontend1
|
|
||||||
|
|
||||||
add ssl jwt <filename>
|
|
||||||
Add an already loaded certificate to the list of certificates that can be
|
|
||||||
used for JWT validation (see "jwt_verify_cert" converter). This command does
|
|
||||||
not work on ongoing transactions.
|
|
||||||
See also "del ssl jwt" and "show ssl jwt" commands.
|
|
||||||
See "jwt" certificate option for more information.
|
|
||||||
|
|
||||||
clear counters
|
clear counters
|
||||||
Clear the max values of the statistics counters in each proxy (frontend &
|
Clear the max values of the statistics counters in each proxy (frontend &
|
||||||
backend) and in each server. The accumulated counters are not affected. The
|
backend) and in each server. The accumulated counters are not affected. The
|
||||||
|
|
@ -2124,30 +2053,6 @@ del acl <acl> [<key>|#<ref>]
|
||||||
listing the content of the acl. Note that if the reference <acl> is a name and
|
listing the content of the acl. Note that if the reference <acl> is a name and
|
||||||
is shared with a map, the entry will be also deleted in the map.
|
is shared with a map, the entry will be also deleted in the map.
|
||||||
|
|
||||||
del backend <name>
|
|
||||||
Removes the backend proxy with the name <name>.
|
|
||||||
|
|
||||||
This operation is only possible for TCP or HTTP proxies. To succeed, the
|
|
||||||
backend instance must have been first unpublished. Also, all of its servers
|
|
||||||
must first be removed (via "del server" CLI). Finally, no stream must still
|
|
||||||
be attached to the backend instance.
|
|
||||||
|
|
||||||
There is additional restrictions which prevent backend removal. First, a
|
|
||||||
backend cannot be removed if it is explicitely referenced by config elements,
|
|
||||||
for example via a use_backend rule or in sample expressions. Some proxies
|
|
||||||
options are also incompatible with runtime deletion. Currently, this is the
|
|
||||||
case when deprecated dispatch or option transparent are used. Also, a backend
|
|
||||||
cannot be removed if there is a stick-table declared in it. Finally, it is
|
|
||||||
impossible for now to remove a backend if QUIC servers were present in it.
|
|
||||||
|
|
||||||
It can be useful to use "wait be-removable" prior to this command to check
|
|
||||||
for the aformentioned requisites. This also provides a methode to wait for
|
|
||||||
the final closure of the streams attached to the target backend.
|
|
||||||
|
|
||||||
This command is restricted and can only be issued on sockets configured for
|
|
||||||
level "admin". Moreover, this feature is still considered in development so it
|
|
||||||
also requires experimental mode (see "experimental-mode on").
|
|
||||||
|
|
||||||
del map <map> [<key>|#<ref>]
|
del map <map> [<key>|#<ref>]
|
||||||
Delete all the map entries from the map <map> corresponding to the key <key>.
|
Delete all the map entries from the map <map> corresponding to the key <key>.
|
||||||
<map> is the #<id> or the <name> returned by "show map". If the <ref> is used,
|
<map> is the #<id> or the <name> returned by "show map". If the <ref> is used,
|
||||||
|
|
@ -2162,11 +2067,10 @@ del ssl ca-file <cafile>
|
||||||
the "ca-file" or "ca-verify-file" directives in the configuration.
|
the "ca-file" or "ca-verify-file" directives in the configuration.
|
||||||
|
|
||||||
del ssl cert <certfile>
|
del ssl cert <certfile>
|
||||||
Delete a certificate store from HAProxy. The certificate must be unused
|
Delete a certificate store from HAProxy. The certificate must be unused and
|
||||||
(included for JWT validation) and removed from any crt-list or directory.
|
removed from any crt-list or directory. "show ssl cert" displays the status
|
||||||
"show ssl cert" displays the status of the certificate. The deletion doesn't
|
of the certificate. The deletion doesn't work with a certificate referenced
|
||||||
work with a certificate referenced directly with the "crt" directive in the
|
directly with the "crt" directive in the configuration.
|
||||||
configuration.
|
|
||||||
|
|
||||||
del ssl crl-file <crlfile>
|
del ssl crl-file <crlfile>
|
||||||
Delete a CRL file tree entry from HAProxy. The CRL file must be unused and
|
Delete a CRL file tree entry from HAProxy. The CRL file must be unused and
|
||||||
|
|
@ -2180,46 +2084,12 @@ del ssl crt-list <filename> <certfile[:line]>
|
||||||
you will need to provide which line you want to delete. To display the line
|
you will need to provide which line you want to delete. To display the line
|
||||||
numbers, use "show ssl crt-list -n <crtlist>".
|
numbers, use "show ssl crt-list -n <crtlist>".
|
||||||
|
|
||||||
det ssl ech <bind>
|
|
||||||
Delete the ECH keys of a bind line.
|
|
||||||
|
|
||||||
The bind line format is <frontend>/@<filename>:<linenum> (Example:
|
|
||||||
frontend1/@haproxy.conf:19) or <frontend>/<name> if the bind line was named
|
|
||||||
with the "name" keyword.
|
|
||||||
|
|
||||||
Necessitates an OpenSSL version that supports ECH, and HAProxy must be
|
|
||||||
compiled with USE_ECH=1. This command is only supported on a CLI connection
|
|
||||||
running in experimental mode (see "experimental-mode on").
|
|
||||||
|
|
||||||
See also "show ssl ech", "add ssl ech" and "ech" in the Section 5.1 of the
|
|
||||||
configuration manual.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
$ echo "experimental-mode on; del ssl ech frontend1/@haproxy.conf:19" | socat /tmp/haproxy.sock -
|
|
||||||
deleted all ECH configs from frontend1/@haproxy.conf:19
|
|
||||||
|
|
||||||
del ssl jwt <filename>
|
|
||||||
Remove an already loaded certificate to the list of certificates that can be
|
|
||||||
used for JWT validation (see "jwt_verify_cert" converter). This command does
|
|
||||||
not work on ongoing transactions.
|
|
||||||
See also "add ssl jwt" and "show ssl jwt" commands.
|
|
||||||
See "jwt" certificate option for more information.
|
|
||||||
|
|
||||||
del server <backend>/<server>
|
del server <backend>/<server>
|
||||||
Delete a removable server attached to the backend <backend>. A removable
|
Remove a server attached to the backend <backend>. All servers are eligible,
|
||||||
server is the server which satisfies all of these conditions :
|
except servers which are referenced by other configuration elements. The
|
||||||
- not referenced by other configuration elements
|
server must be put in maintenance mode prior to its deletion. The operation
|
||||||
- must already be in maintenance (see "disable server")
|
is cancelled if the server still has active or idle connection or its
|
||||||
- must not have any active or idle connections
|
connection queue is not empty.
|
||||||
|
|
||||||
If any of these conditions is not met, the command will fail.
|
|
||||||
|
|
||||||
Active connections are those with at least one ongoing request. It is
|
|
||||||
possible to speed up their termination using "shutdown sessions server". It
|
|
||||||
is highly recommended to use "wait srv-removable" before "del server" to
|
|
||||||
ensure that all active or idle connections are closed and that the command
|
|
||||||
succeeds.
|
|
||||||
|
|
||||||
disable agent <backend>/<server>
|
disable agent <backend>/<server>
|
||||||
Mark the auxiliary agent check as temporarily stopped.
|
Mark the auxiliary agent check as temporarily stopped.
|
||||||
|
|
@ -2522,11 +2392,6 @@ prompt [help | n | i | p | timed]*
|
||||||
advanced scripts, and the non-interactive mode (default) to basic scripts.
|
advanced scripts, and the non-interactive mode (default) to basic scripts.
|
||||||
Note that the non-interactive mode is not available for the master socket.
|
Note that the non-interactive mode is not available for the master socket.
|
||||||
|
|
||||||
publish backend <backend>
|
|
||||||
Activates content switching to a backend instance. This is the reverse
|
|
||||||
operation of "unpublish backend" command. This command is restricted and can
|
|
||||||
only be issued on sockets configured for levels "operator" or "admin".
|
|
||||||
|
|
||||||
quit
|
quit
|
||||||
Close the connection when in interactive mode.
|
Close the connection when in interactive mode.
|
||||||
|
|
||||||
|
|
@ -2582,8 +2447,7 @@ set maxconn global <maxconn>
|
||||||
delayed until the threshold is reached. A value of zero restores the initial
|
delayed until the threshold is reached. A value of zero restores the initial
|
||||||
setting.
|
setting.
|
||||||
|
|
||||||
set profiling memory { on | off }
|
set profiling { tasks | memory } { auto | on | off }
|
||||||
set profiling tasks { auto | on | off | lock | no-lock | memory | no-memory }
|
|
||||||
Enables or disables CPU or memory profiling for the indicated subsystem. This
|
Enables or disables CPU or memory profiling for the indicated subsystem. This
|
||||||
is equivalent to setting or clearing the "profiling" settings in the "global"
|
is equivalent to setting or clearing the "profiling" settings in the "global"
|
||||||
section of the configuration file. Please also see "show profiling". Note
|
section of the configuration file. Please also see "show profiling". Note
|
||||||
|
|
@ -2593,13 +2457,6 @@ set profiling tasks { auto | on | off | lock | no-lock | memory | no-memory }
|
||||||
on the linux-glibc target), and requires USE_MEMORY_PROFILING to be set at
|
on the linux-glibc target), and requires USE_MEMORY_PROFILING to be set at
|
||||||
compile time.
|
compile time.
|
||||||
|
|
||||||
. For tasks profiling, it is possible to enable or disable the collection of
|
|
||||||
per-task lock and memory timings at runtime, but the change is only taken
|
|
||||||
into account next time the profiler switches from off/auto to on (either
|
|
||||||
automatically or manually). Thus when using "no-lock" to disable per-task
|
|
||||||
lock profiling and save CPU cycles, it is recommended to flip the task
|
|
||||||
profiling off then on to commit the change.
|
|
||||||
|
|
||||||
set rate-limit connections global <value>
|
set rate-limit connections global <value>
|
||||||
Change the process-wide connection rate limit, which is set by the global
|
Change the process-wide connection rate limit, which is set by the global
|
||||||
'maxconnrate' setting. A value of zero disables the limitation. This limit
|
'maxconnrate' setting. A value of zero disables the limitation. This limit
|
||||||
|
|
@ -2760,28 +2617,6 @@ set ssl crl-file <crlfile> <payload>
|
||||||
socat /var/run/haproxy.stat -
|
socat /var/run/haproxy.stat -
|
||||||
echo "commit ssl crl-file crlfile.pem" | socat /var/run/haproxy.stat -
|
echo "commit ssl crl-file crlfile.pem" | socat /var/run/haproxy.stat -
|
||||||
|
|
||||||
set ssl ech <bind> <payload>
|
|
||||||
Replace the ECH keys of a bind line with this one. The payload must be in the
|
|
||||||
PEM for ECH format.
|
|
||||||
(https://datatracker.ietf.org/doc/html/draft-farrell-tls-pemesni)
|
|
||||||
|
|
||||||
The bind line format is <frontend>/@<filename>:<linenum> (Example:
|
|
||||||
frontend1/@haproxy.conf:19) or <frontend>/<name> if the bind line was named
|
|
||||||
with the "name" keyword.
|
|
||||||
|
|
||||||
Necessitates an OpenSSL version that supports ECH, and HAProxy must be
|
|
||||||
compiled with USE_ECH=1. This command is only supported on a CLI connection
|
|
||||||
running in experimental mode (see "experimental-mode on").
|
|
||||||
|
|
||||||
See also "show ssl ech", "add ssl ech" and "ech" in the Section 5.1 of the
|
|
||||||
configuration manual.
|
|
||||||
|
|
||||||
$ openssl ech -public_name foobar.com -out foobar3.com.ech
|
|
||||||
$ echo -e "experimental-mode on;
|
|
||||||
set ssl ech frontend1/@haproxy.conf:19 <<%EOF%\n$(cat foobar3.com.ech)\n%EOF%\n" | \
|
|
||||||
socat /tmp/haproxy.sock -
|
|
||||||
set new ECH configs for frontend1/@haproxy.conf:19
|
|
||||||
|
|
||||||
set ssl ocsp-response <response | payload>
|
set ssl ocsp-response <response | payload>
|
||||||
This command is used to update an OCSP Response for a certificate (see "crt"
|
This command is used to update an OCSP Response for a certificate (see "crt"
|
||||||
on "bind" lines). Same controls are performed as during the initial loading of
|
on "bind" lines). Same controls are performed as during the initial loading of
|
||||||
|
|
@ -2903,13 +2738,6 @@ operator
|
||||||
increased. It also drops expert and experimental mode. See also "show cli
|
increased. It also drops expert and experimental mode. See also "show cli
|
||||||
level".
|
level".
|
||||||
|
|
||||||
unpublish backend <backend>
|
|
||||||
Marks the backend as unqualified for future traffic selection. In effect,
|
|
||||||
use_backend / default_backend rules which reference it are ignored and the
|
|
||||||
next content switching rules are evaluated. Contrary to disabled backends,
|
|
||||||
servers health checks remain active. This command is restricted and can only
|
|
||||||
be issued on sockets configured for levels "operator" or "admin".
|
|
||||||
|
|
||||||
user
|
user
|
||||||
Decrease the CLI level of the current CLI session to user. It can't be
|
Decrease the CLI level of the current CLI session to user. It can't be
|
||||||
increased. It also drops expert and experimental mode. See also "show cli
|
increased. It also drops expert and experimental mode. See also "show cli
|
||||||
|
|
@ -3410,10 +3238,9 @@ show quic [<format>] [<filter>]
|
||||||
in the format will instead show a more detailed help message.
|
in the format will instead show a more detailed help message.
|
||||||
|
|
||||||
The final argument is used to restrict or extend the connection list. By
|
The final argument is used to restrict or extend the connection list. By
|
||||||
default, active frontend connections only are displayed. Use the extra
|
default, connections on closing or draining state are not displayed. Use the
|
||||||
argument "clo" to list instead closing frontend connections, "be" for backend
|
extra argument "all" to include them in the output. It's also possible to
|
||||||
connections or "all" for every categories. It's also possible to restrict to
|
restrict to a single connection by specifying its hexadecimal address.
|
||||||
a single connection by specifying its hexadecimal address.
|
|
||||||
|
|
||||||
show servers conn [<backend>]
|
show servers conn [<backend>]
|
||||||
Dump the current and idle connections state of the servers belonging to the
|
Dump the current and idle connections state of the servers belonging to the
|
||||||
|
|
@ -3434,12 +3261,9 @@ show servers conn [<backend>]
|
||||||
port Server's port (or zero if none)
|
port Server's port (or zero if none)
|
||||||
- Unused field, serves as a visual delimiter
|
- Unused field, serves as a visual delimiter
|
||||||
purge_delay Interval between connection purges, in milliseconds
|
purge_delay Interval between connection purges, in milliseconds
|
||||||
served Number of connections currently in use
|
|
||||||
used_cur Number of connections currently in use
|
used_cur Number of connections currently in use
|
||||||
note that this excludes conns attached to a session
|
|
||||||
used_max Highest value of used_cur since the process started
|
used_max Highest value of used_cur since the process started
|
||||||
need_est Floating estimate of total needed connections
|
need_est Floating estimate of total needed connections
|
||||||
idle_sess Number of idle connections flagged as private
|
|
||||||
unsafe_nb Number of idle connections considered as "unsafe"
|
unsafe_nb Number of idle connections considered as "unsafe"
|
||||||
safe_nb Number of idle connections considered as "safe"
|
safe_nb Number of idle connections considered as "safe"
|
||||||
idle_lim Configured maximum number of idle connections
|
idle_lim Configured maximum number of idle connections
|
||||||
|
|
@ -3683,7 +3507,7 @@ show stat [domain <resolvers|proxy>] [{<iid>|<proxy>} <type> <sid>] \
|
||||||
format" described in the section above. In short, the second column (after the
|
format" described in the section above. In short, the second column (after the
|
||||||
first ':') indicates the origin, nature, scope and persistence state of the
|
first ':') indicates the origin, nature, scope and persistence state of the
|
||||||
variable. The third column indicates the field type, among "s32", "s64",
|
variable. The third column indicates the field type, among "s32", "s64",
|
||||||
"u32", "u64", "flt" and "str". Then the fourth column is the value itself,
|
"u32", "u64", "flt' and "str". Then the fourth column is the value itself,
|
||||||
which the consumer knows how to parse thanks to column 3 and how to process
|
which the consumer knows how to parse thanks to column 3 and how to process
|
||||||
thanks to column 2.
|
thanks to column 2.
|
||||||
|
|
||||||
|
|
@ -3925,66 +3749,6 @@ show ssl crt-list [-n] [<filename>]
|
||||||
ecdsa.pem:3 [verify none allow-0rtt ssl-min-ver TLSv1.0 ssl-max-ver TLSv1.3] localhost !www.test1.com
|
ecdsa.pem:3 [verify none allow-0rtt ssl-min-ver TLSv1.0 ssl-max-ver TLSv1.3] localhost !www.test1.com
|
||||||
ecdsa.pem:4 [verify none allow-0rtt ssl-min-ver TLSv1.0 ssl-max-ver TLSv1.3]
|
ecdsa.pem:4 [verify none allow-0rtt ssl-min-ver TLSv1.0 ssl-max-ver TLSv1.3]
|
||||||
|
|
||||||
show ssl ech [<name>]
|
|
||||||
Display the list of ECH keys loaded in the HAProxy process.
|
|
||||||
|
|
||||||
When <name> is specified, displays the keys for a specific bind line. The
|
|
||||||
bind line format is <frontend>/@<filename>:<linenum> (Example:
|
|
||||||
frontend1/@haproxy.conf:19) or <frontend>/<name> if the bind line was named
|
|
||||||
with the "name" keyword.
|
|
||||||
|
|
||||||
The 'age' entry represents the time, in seconds, since the key was loaded in
|
|
||||||
the bind line. This value is reset when HAProxy is started, reloaded, or
|
|
||||||
restarted.
|
|
||||||
|
|
||||||
Necessitates an OpenSSL version that supports ECH, and HAProxy must be
|
|
||||||
compiled with USE_ECH=1.
|
|
||||||
This command is only supported on a CLI connection running in experimental
|
|
||||||
mode (see "experimental-mode on").
|
|
||||||
|
|
||||||
See also "ech" in the Section 5.1 of the configuration manual.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
$ echo "experimental-mode on; show ssl ech" | socat /tmp/haproxy.sock -
|
|
||||||
***
|
|
||||||
frontend: frontend1
|
|
||||||
|
|
||||||
bind: frontend1/@haproxy.conf:19
|
|
||||||
|
|
||||||
ECH entry: 0 public_name: example.com age: 557 (has private key)
|
|
||||||
[fe0d,94,example.com,[0020,0001,0001],c39285b774bf61c071864181c5292a012b30adaf767e39369a566af05573ef2b,00,00]
|
|
||||||
|
|
||||||
ECH entry: 1 public_name: example.com age: 557 (has private key)
|
|
||||||
[fe0d,ee,example.com,[0020,0001,0001],6572191131b5cabba819f8cacf2d2e06fa0b87b30d9b793644daba7b8866d511,00,00]
|
|
||||||
|
|
||||||
bind: frontend1/@haproxy.conf:20
|
|
||||||
|
|
||||||
ECH entry: 0 public_name: example.com age: 557 (has private key)
|
|
||||||
[fe0d,94,example.com,[0020,0001,0001],c39285b774bf61c071864181c5292a012b30adaf767e39369a566af05573ef2b,00,00]
|
|
||||||
|
|
||||||
ECH entry: 1 public_name: example.com age: 557 (has private key)
|
|
||||||
[fe0d,ee,example.com,[0020,0001,0001],6572191131b5cabba819f8cacf2d2e06fa0b87b30d9b793644daba7b8866d511,00,00]
|
|
||||||
|
|
||||||
$ echo "experimental-mode on; show ssl ech frontend1/@haproxy.conf:19" | socat /tmp/haproxy.sock -
|
|
||||||
***
|
|
||||||
ECH for frontend1/@haproxy.conf:19
|
|
||||||
ECH entry: 0 public_name: example.com age: 786 (has private key)
|
|
||||||
[fe0d,94,example.com,[0020,0001,0001],c39285b774bf61c071864181c5292a012b30adaf767e39369a566af05573ef2b,00,00]
|
|
||||||
|
|
||||||
ECH entry: 1 public_name: example.com age: 786 (has private key)
|
|
||||||
[fe0d,ee,example.com,[0020,0001,0001],6572191131b5cabba819f8cacf2d2e06fa0b87b30d9b793644daba7b8866d511,00,00]
|
|
||||||
|
|
||||||
show ssl jwt
|
|
||||||
Display the list of certificates that can be used for JWT validation.
|
|
||||||
See also "add ssl jwt" and "del ssl jwt" commands.
|
|
||||||
See "jwt" certificate option for more information.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
echo "show ssl jwt" | socat /tmp/sock1 -
|
|
||||||
#filename
|
|
||||||
jwt.pem
|
|
||||||
|
|
||||||
show ssl ocsp-response [[text|base64] <id|path>]
|
show ssl ocsp-response [[text|base64] <id|path>]
|
||||||
Display the IDs of the OCSP tree entries corresponding to all the OCSP
|
Display the IDs of the OCSP tree entries corresponding to all the OCSP
|
||||||
responses used in HAProxy, as well as the corresponding frontend
|
responses used in HAProxy, as well as the corresponding frontend
|
||||||
|
|
@ -4333,10 +4097,6 @@ shutdown sessions server <backend>/<server>
|
||||||
maintenance mode, for instance. Such terminated streams are reported with a
|
maintenance mode, for instance. Such terminated streams are reported with a
|
||||||
'K' flag in the logs.
|
'K' flag in the logs.
|
||||||
|
|
||||||
Backend connections are left in idle state, unless the server is already in
|
|
||||||
maintenance mode, in which case they will be immediately scheduled for
|
|
||||||
deletion.
|
|
||||||
|
|
||||||
trace
|
trace
|
||||||
The "trace" command alone lists the trace sources, their current status, and
|
The "trace" command alone lists the trace sources, their current status, and
|
||||||
their brief descriptions. It is only meant as a menu to enter next levels,
|
their brief descriptions. It is only meant as a menu to enter next levels,
|
||||||
|
|
@ -4550,21 +4310,13 @@ wait { -h | <delay> } [<condition> [<args>...]]
|
||||||
specified condition to be satisfied, to unrecoverably fail, or to remain
|
specified condition to be satisfied, to unrecoverably fail, or to remain
|
||||||
unsatisfied for the whole <delay> duration. The supported conditions are:
|
unsatisfied for the whole <delay> duration. The supported conditions are:
|
||||||
|
|
||||||
- be-removable <proxy> : this will wait for the specified proxy backend to be
|
|
||||||
removable by the "del backend" command. Some conditions will never be
|
|
||||||
accepted (e.g. backend not yet unpublished or with servers in it) and will
|
|
||||||
cause the report of a specific error message indicating what condition is
|
|
||||||
not met. If everything is OK before the delay, a success is returned and
|
|
||||||
the operation is terminated.
|
|
||||||
|
|
||||||
- srv-removable <proxy>/<server> : this will wait for the specified server to
|
- srv-removable <proxy>/<server> : this will wait for the specified server to
|
||||||
be removable by the "del server" command, i.e. be in maintenance and no
|
be removable, i.e. be in maintenance and no longer have any connection on
|
||||||
longer have any connection on it (neither active or idle). Some conditions
|
it. Some conditions will never be accepted (e.g. not in maintenance) and
|
||||||
will never be accepted (e.g. not in maintenance) and will cause the report
|
will cause the report of a specific error message indicating what condition
|
||||||
of a specific error message indicating what condition is not met. The
|
is not met. The server might even have been removed in parallel and no
|
||||||
server might even have been removed in parallel and no longer exit. If
|
longer exit. If everything is OK before the delay, a success is returned
|
||||||
everything is OK before the delay, a success is returned and the operation
|
and the operation is terminated.
|
||||||
is terminated.
|
|
||||||
|
|
||||||
The default unit for the delay is milliseconds, though other units are
|
The default unit for the delay is milliseconds, though other units are
|
||||||
accepted if suffixed with the usual timer units (us, ms, s, m, h, d). When
|
accepted if suffixed with the usual timer units (us, ms, s, m, h, d). When
|
||||||
|
|
@ -4615,11 +4367,6 @@ Example:
|
||||||
case the full command ends at the end of line or semi-colon like any regular
|
case the full command ends at the end of line or semi-colon like any regular
|
||||||
command.
|
command.
|
||||||
|
|
||||||
Bugs: the sockpair@ protocol used to implement communication between the
|
|
||||||
master and the worker is known to not be reliable on macOS because of an
|
|
||||||
issue in the macOS sendmsg(2) implementation. A command might end up without
|
|
||||||
response because of that.
|
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
|
|
||||||
$ socat /var/run/haproxy-master.sock readline
|
$ socat /var/run/haproxy-master.sock readline
|
||||||
|
|
@ -4686,11 +4433,6 @@ Example:
|
||||||
command). In this case, the prompt mode of the master socket (interactive,
|
command). In this case, the prompt mode of the master socket (interactive,
|
||||||
prompt, timed) is propagated into the worker process.
|
prompt, timed) is propagated into the worker process.
|
||||||
|
|
||||||
Bugs: the sockpair@ protocol used to implement communication between the
|
|
||||||
master and the worker is known to not be reliable on macOS because of an
|
|
||||||
issue in the macOS sendmsg(2) implementation. A command might end up without
|
|
||||||
response because of that.
|
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
# gracefully close connections and delete a server once idle (wait max 10s)
|
# gracefully close connections and delete a server once idle (wait max 10s)
|
||||||
$ socat -t 11 /var/run/haproxy-master.sock - <<< \
|
$ socat -t 11 /var/run/haproxy-master.sock - <<< \
|
||||||
|
|
|
||||||
|
|
@ -28,9 +28,7 @@ Revision history
|
||||||
string encoding. With contributions from Andriy Palamarchuk
|
string encoding. With contributions from Andriy Palamarchuk
|
||||||
(Amazon.com).
|
(Amazon.com).
|
||||||
2020/03/05 - added the unique ID TLV type (Tim Düsterhus)
|
2020/03/05 - added the unique ID TLV type (Tim Düsterhus)
|
||||||
2025/09/09 - added SSL-related TLVs for key exchange group and signature
|
|
||||||
scheme (Steven Collison)
|
|
||||||
2026/01/15 - added SSL client certificate TLV (Simon Ser)
|
|
||||||
|
|
||||||
1. Background
|
1. Background
|
||||||
|
|
||||||
|
|
@ -548,9 +546,6 @@ The following types have already been registered for the <type> field :
|
||||||
#define PP2_SUBTYPE_SSL_CIPHER 0x23
|
#define PP2_SUBTYPE_SSL_CIPHER 0x23
|
||||||
#define PP2_SUBTYPE_SSL_SIG_ALG 0x24
|
#define PP2_SUBTYPE_SSL_SIG_ALG 0x24
|
||||||
#define PP2_SUBTYPE_SSL_KEY_ALG 0x25
|
#define PP2_SUBTYPE_SSL_KEY_ALG 0x25
|
||||||
#define PP2_SUBTYPE_SSL_GROUP 0x26
|
|
||||||
#define PP2_SUBTYPE_SSL_SIG_SCHEME 0x27
|
|
||||||
#define PP2_SUBTYPE_SSL_CLIENT_CERT 0x28
|
|
||||||
#define PP2_TYPE_NETNS 0x30
|
#define PP2_TYPE_NETNS 0x30
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -627,10 +622,7 @@ For the type PP2_TYPE_SSL, the value is itself a defined like this :
|
||||||
uint8_t client;
|
uint8_t client;
|
||||||
uint32_t verify;
|
uint32_t verify;
|
||||||
struct pp2_tlv sub_tlv[0];
|
struct pp2_tlv sub_tlv[0];
|
||||||
} __attribute__((packed));
|
};
|
||||||
|
|
||||||
Note the "packed" attribute which indicates that each field starts immediately
|
|
||||||
after the previous one (i.e. without type-specific alignment nor padding).
|
|
||||||
|
|
||||||
The <verify> field will be zero if the client presented a certificate
|
The <verify> field will be zero if the client presented a certificate
|
||||||
and it was successfully verified, and non-zero otherwise.
|
and it was successfully verified, and non-zero otherwise.
|
||||||
|
|
@ -662,25 +654,13 @@ of the used cipher, for example "ECDHE-RSA-AES128-GCM-SHA256".
|
||||||
The second level TLV PP2_SUBTYPE_SSL_SIG_ALG provides the US-ASCII string name
|
The second level TLV PP2_SUBTYPE_SSL_SIG_ALG provides the US-ASCII string name
|
||||||
of the algorithm used to sign the certificate presented by the frontend when
|
of the algorithm used to sign the certificate presented by the frontend when
|
||||||
the incoming connection was made over an SSL/TLS transport layer, for example
|
the incoming connection was made over an SSL/TLS transport layer, for example
|
||||||
"RSA-SHA256".
|
"SHA256".
|
||||||
|
|
||||||
The second level TLV PP2_SUBTYPE_SSL_KEY_ALG provides the US-ASCII string name
|
The second level TLV PP2_SUBTYPE_SSL_KEY_ALG provides the US-ASCII string name
|
||||||
of the algorithm used to generate the key of the certificate presented by the
|
of the algorithm used to generate the key of the certificate presented by the
|
||||||
frontend when the incoming connection was made over an SSL/TLS transport layer,
|
frontend when the incoming connection was made over an SSL/TLS transport layer,
|
||||||
for example "RSA2048".
|
for example "RSA2048".
|
||||||
|
|
||||||
The second level TLV PP2_SUBTYPE_SSL_GROUP provides the US-ASCII string name of
|
|
||||||
the key exchange algorithm used for the frontend TLS connection, for example
|
|
||||||
"secp256r1".
|
|
||||||
|
|
||||||
The second level TLV PP2_SUBTYPE_SSL_SIG_SCHEME provides the US-ASCII string
|
|
||||||
name of the algorithm the frontend used to sign the ServerKeyExchange or
|
|
||||||
CertificateVerify message, for example "rsa_pss_rsae_sha256".
|
|
||||||
|
|
||||||
The optional second level TLV PP2_SUBTYPE_SSL_CLIENT_CERT provides the raw
|
|
||||||
X.509 client certificate encoded in ASN.1 DER. The frontend may choose to omit
|
|
||||||
this TLV depending on configuration.
|
|
||||||
|
|
||||||
In all cases, the string representation (in UTF8) of the Common Name field
|
In all cases, the string representation (in UTF8) of the Common Name field
|
||||||
(OID: 2.5.4.3) of the client certificate's Distinguished Name, is appended
|
(OID: 2.5.4.3) of the client certificate's Distinguished Name, is appended
|
||||||
using the TLV format and the type PP2_SUBTYPE_SSL_CN. E.g. "example.com".
|
using the TLV format and the type PP2_SUBTYPE_SSL_CN. E.g. "example.com".
|
||||||
|
|
|
||||||
|
|
@ -24,7 +24,7 @@ vtest installation
|
||||||
------------------------
|
------------------------
|
||||||
|
|
||||||
To use vtest you will have to download and compile the recent vtest
|
To use vtest you will have to download and compile the recent vtest
|
||||||
sources found at https://github.com/vtest/VTest2.
|
sources found at https://github.com/vtest/VTest.
|
||||||
|
|
||||||
To compile vtest:
|
To compile vtest:
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -12,7 +12,6 @@ struct acme_cfg {
|
||||||
char *filename; /* config filename */
|
char *filename; /* config filename */
|
||||||
int linenum; /* config linenum */
|
int linenum; /* config linenum */
|
||||||
char *name; /* section name */
|
char *name; /* section name */
|
||||||
int reuse_key; /* do we need to renew the private key */
|
|
||||||
char *directory; /* directory URL */
|
char *directory; /* directory URL */
|
||||||
char *map; /* storage for tokens + thumbprint */
|
char *map; /* storage for tokens + thumbprint */
|
||||||
struct {
|
struct {
|
||||||
|
|
@ -28,8 +27,6 @@ struct acme_cfg {
|
||||||
int curves; /* NID of curves */
|
int curves; /* NID of curves */
|
||||||
} key;
|
} key;
|
||||||
char *challenge; /* HTTP-01, DNS-01, etc */
|
char *challenge; /* HTTP-01, DNS-01, etc */
|
||||||
char *vars; /* variables put in the dpapi sink */
|
|
||||||
char *provider; /* DNS provider put in the dpapi sink */
|
|
||||||
struct acme_cfg *next;
|
struct acme_cfg *next;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
@ -85,8 +82,7 @@ struct acme_ctx {
|
||||||
struct ist finalize;
|
struct ist finalize;
|
||||||
struct ist certificate;
|
struct ist certificate;
|
||||||
struct task *task;
|
struct task *task;
|
||||||
struct ebmb_node node;
|
struct mt_list el;
|
||||||
char name[VAR_ARRAY];
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#define ACME_EV_SCHED (1ULL << 0) /* scheduling wakeup */
|
#define ACME_EV_SCHED (1ULL << 0) /* scheduling wakeup */
|
||||||
|
|
|
||||||
|
|
@ -4,9 +4,6 @@
|
||||||
|
|
||||||
#include <haproxy/ssl_ckch-t.h>
|
#include <haproxy/ssl_ckch-t.h>
|
||||||
|
|
||||||
int ckch_conf_acme_init(void *value, char *buf, struct ckch_store *s, int cli, const char *filename, int linenum, char **err);
|
int ckch_conf_acme_init(void *value, char *buf, struct ckch_data *d, int cli, const char *filename, int linenum, char **err);
|
||||||
EVP_PKEY *acme_gen_tmp_pkey();
|
|
||||||
X509 *acme_gen_tmp_x509();
|
|
||||||
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
||||||
|
|
@ -102,10 +102,7 @@ enum act_name {
|
||||||
|
|
||||||
/* Timeout name valid for a set-timeout rule */
|
/* Timeout name valid for a set-timeout rule */
|
||||||
enum act_timeout_name {
|
enum act_timeout_name {
|
||||||
ACT_TIMEOUT_CONNECT,
|
|
||||||
ACT_TIMEOUT_SERVER,
|
ACT_TIMEOUT_SERVER,
|
||||||
ACT_TIMEOUT_QUEUE,
|
|
||||||
ACT_TIMEOUT_TARPIT,
|
|
||||||
ACT_TIMEOUT_TUNNEL,
|
ACT_TIMEOUT_TUNNEL,
|
||||||
ACT_TIMEOUT_CLIENT,
|
ACT_TIMEOUT_CLIENT,
|
||||||
};
|
};
|
||||||
|
|
|
||||||
|
|
@ -33,8 +33,6 @@
|
||||||
#define HA_PROF_TASKS_MASK 0x00000003 /* per-task CPU profiling mask */
|
#define HA_PROF_TASKS_MASK 0x00000003 /* per-task CPU profiling mask */
|
||||||
|
|
||||||
#define HA_PROF_MEMORY 0x00000004 /* memory profiling */
|
#define HA_PROF_MEMORY 0x00000004 /* memory profiling */
|
||||||
#define HA_PROF_TASKS_MEM 0x00000008 /* per-task CPU profiling with memory */
|
|
||||||
#define HA_PROF_TASKS_LOCK 0x00000010 /* per-task CPU profiling with locks */
|
|
||||||
|
|
||||||
|
|
||||||
#ifdef USE_MEMORY_PROFILING
|
#ifdef USE_MEMORY_PROFILING
|
||||||
|
|
@ -78,12 +76,12 @@ struct memprof_stats {
|
||||||
const void *caller;
|
const void *caller;
|
||||||
enum memprof_method method;
|
enum memprof_method method;
|
||||||
/* 4-7 bytes hole here */
|
/* 4-7 bytes hole here */
|
||||||
unsigned long long locked_calls;
|
|
||||||
unsigned long long alloc_calls;
|
unsigned long long alloc_calls;
|
||||||
unsigned long long free_calls;
|
unsigned long long free_calls;
|
||||||
unsigned long long alloc_tot;
|
unsigned long long alloc_tot;
|
||||||
unsigned long long free_tot;
|
unsigned long long free_tot;
|
||||||
void *info; // for pools, ptr to the pool
|
void *info; // for pools, ptr to the pool
|
||||||
|
void *pad; // pad to 64
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
@ -127,8 +125,8 @@ struct activity {
|
||||||
unsigned int ctr2; // general purposee debug counter
|
unsigned int ctr2; // general purposee debug counter
|
||||||
#endif
|
#endif
|
||||||
char __pad[0]; // unused except to check remaining room
|
char __pad[0]; // unused except to check remaining room
|
||||||
char __end[0] THREAD_ALIGNED();
|
char __end[0] __attribute__((aligned(64))); // align size to 64.
|
||||||
} THREAD_ALIGNED();
|
};
|
||||||
|
|
||||||
/* 256 entries for callers * callees should be highly sufficient (~45 seen usually) */
|
/* 256 entries for callers * callees should be highly sufficient (~45 seen usually) */
|
||||||
#define SCHED_ACT_HASH_BITS 8
|
#define SCHED_ACT_HASH_BITS 8
|
||||||
|
|
@ -145,10 +143,7 @@ struct sched_activity {
|
||||||
uint64_t calls;
|
uint64_t calls;
|
||||||
uint64_t cpu_time;
|
uint64_t cpu_time;
|
||||||
uint64_t lat_time;
|
uint64_t lat_time;
|
||||||
uint64_t lkw_time; /* lock waiting time */
|
};
|
||||||
uint64_t lkd_time; /* locked time */
|
|
||||||
uint64_t mem_time; /* memory ops wait time */
|
|
||||||
} THREAD_ALIGNED();
|
|
||||||
|
|
||||||
#endif /* _HAPROXY_ACTIVITY_T_H */
|
#endif /* _HAPROXY_ACTIVITY_T_H */
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -47,7 +47,7 @@
|
||||||
#define APPCTX_FL_ERROR 0x00000080
|
#define APPCTX_FL_ERROR 0x00000080
|
||||||
#define APPCTX_FL_SHUTDOWN 0x00000100 /* applet was shut down (->release() called if any). No more data exchange with SCs */
|
#define APPCTX_FL_SHUTDOWN 0x00000100 /* applet was shut down (->release() called if any). No more data exchange with SCs */
|
||||||
#define APPCTX_FL_WANT_DIE 0x00000200 /* applet was running and requested to die */
|
#define APPCTX_FL_WANT_DIE 0x00000200 /* applet was running and requested to die */
|
||||||
/* unused: 0x00000400 */
|
#define APPCTX_FL_INOUT_BUFS 0x00000400 /* applet uses its own buffers */
|
||||||
#define APPCTX_FL_FASTFWD 0x00000800 /* zero-copy forwarding is in-use, don't fill the outbuf */
|
#define APPCTX_FL_FASTFWD 0x00000800 /* zero-copy forwarding is in-use, don't fill the outbuf */
|
||||||
#define APPCTX_FL_IN_MAYALLOC 0x00001000 /* applet may try again to allocate its inbuf */
|
#define APPCTX_FL_IN_MAYALLOC 0x00001000 /* applet may try again to allocate its inbuf */
|
||||||
#define APPCTX_FL_OUT_MAYALLOC 0x00002000 /* applet may try again to allocate its outbuf */
|
#define APPCTX_FL_OUT_MAYALLOC 0x00002000 /* applet may try again to allocate its outbuf */
|
||||||
|
|
@ -73,8 +73,8 @@ static forceinline char *appctx_show_flags(char *buf, size_t len, const char *de
|
||||||
_(APPCTX_FL_OUTBLK_ALLOC, _(APPCTX_FL_OUTBLK_FULL,
|
_(APPCTX_FL_OUTBLK_ALLOC, _(APPCTX_FL_OUTBLK_FULL,
|
||||||
_(APPCTX_FL_EOI, _(APPCTX_FL_EOS,
|
_(APPCTX_FL_EOI, _(APPCTX_FL_EOS,
|
||||||
_(APPCTX_FL_ERR_PENDING, _(APPCTX_FL_ERROR,
|
_(APPCTX_FL_ERR_PENDING, _(APPCTX_FL_ERROR,
|
||||||
_(APPCTX_FL_SHUTDOWN, _(APPCTX_FL_WANT_DIE,
|
_(APPCTX_FL_SHUTDOWN, _(APPCTX_FL_WANT_DIE, _(APPCTX_FL_INOUT_BUFS,
|
||||||
_(APPCTX_FL_FASTFWD, _(APPCTX_FL_IN_MAYALLOC, _(APPCTX_FL_OUT_MAYALLOC)))))))))))));
|
_(APPCTX_FL_FASTFWD, _(APPCTX_FL_IN_MAYALLOC, _(APPCTX_FL_OUT_MAYALLOC))))))))))))));
|
||||||
/* epilogue */
|
/* epilogue */
|
||||||
_(~0U);
|
_(~0U);
|
||||||
return buf;
|
return buf;
|
||||||
|
|
@ -83,7 +83,6 @@ static forceinline char *appctx_show_flags(char *buf, size_t len, const char *de
|
||||||
|
|
||||||
#define APPLET_FL_NEW_API 0x00000001 /* Set if the applet is based on the new API (using applet's buffers) */
|
#define APPLET_FL_NEW_API 0x00000001 /* Set if the applet is based on the new API (using applet's buffers) */
|
||||||
#define APPLET_FL_WARNED 0x00000002 /* Set when warning was already emitted about a legacy applet */
|
#define APPLET_FL_WARNED 0x00000002 /* Set when warning was already emitted about a legacy applet */
|
||||||
#define APPLET_FL_HTX 0x00000004 /* Set if the applet is using HTX buffers */
|
|
||||||
|
|
||||||
/* Applet descriptor */
|
/* Applet descriptor */
|
||||||
struct applet {
|
struct applet {
|
||||||
|
|
|
||||||
|
|
@ -62,12 +62,6 @@ ssize_t applet_append_line(void *ctx, struct ist v1, struct ist v2, size_t ofs,
|
||||||
static forceinline void applet_fl_set(struct appctx *appctx, uint on);
|
static forceinline void applet_fl_set(struct appctx *appctx, uint on);
|
||||||
static forceinline void applet_fl_clr(struct appctx *appctx, uint off);
|
static forceinline void applet_fl_clr(struct appctx *appctx, uint off);
|
||||||
|
|
||||||
|
|
||||||
static forceinline uint appctx_app_test(const struct appctx *appctx, uint test)
|
|
||||||
{
|
|
||||||
return (appctx->applet->flags & test);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline struct appctx *appctx_new_here(struct applet *applet, struct sedesc *sedesc)
|
static inline struct appctx *appctx_new_here(struct applet *applet, struct sedesc *sedesc)
|
||||||
{
|
{
|
||||||
return appctx_new_on(applet, sedesc, tid);
|
return appctx_new_on(applet, sedesc, tid);
|
||||||
|
|
@ -294,7 +288,7 @@ static inline void applet_expect_data(struct appctx *appctx)
|
||||||
*/
|
*/
|
||||||
static inline struct buffer *applet_get_inbuf(struct appctx *appctx)
|
static inline struct buffer *applet_get_inbuf(struct appctx *appctx)
|
||||||
{
|
{
|
||||||
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
|
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
|
||||||
if (applet_fl_test(appctx, APPCTX_FL_INBLK_ALLOC) || !appctx_get_buf(appctx, &appctx->inbuf))
|
if (applet_fl_test(appctx, APPCTX_FL_INBLK_ALLOC) || !appctx_get_buf(appctx, &appctx->inbuf))
|
||||||
return NULL;
|
return NULL;
|
||||||
return &appctx->inbuf;
|
return &appctx->inbuf;
|
||||||
|
|
@ -309,7 +303,7 @@ static inline struct buffer *applet_get_inbuf(struct appctx *appctx)
|
||||||
*/
|
*/
|
||||||
static inline struct buffer *applet_get_outbuf(struct appctx *appctx)
|
static inline struct buffer *applet_get_outbuf(struct appctx *appctx)
|
||||||
{
|
{
|
||||||
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
|
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
|
||||||
if (applet_fl_test(appctx, APPCTX_FL_OUTBLK_ALLOC|APPCTX_FL_OUTBLK_FULL) ||
|
if (applet_fl_test(appctx, APPCTX_FL_OUTBLK_ALLOC|APPCTX_FL_OUTBLK_FULL) ||
|
||||||
!appctx_get_buf(appctx, &appctx->outbuf))
|
!appctx_get_buf(appctx, &appctx->outbuf))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
@ -319,46 +313,22 @@ static inline struct buffer *applet_get_outbuf(struct appctx *appctx)
|
||||||
return sc_ib(appctx_sc(appctx));
|
return sc_ib(appctx_sc(appctx));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Returns the amount of HTX data in the input buffer (see applet_get_inbuf) */
|
|
||||||
static inline size_t applet_htx_input_data(const struct appctx *appctx)
|
|
||||||
{
|
|
||||||
if (appctx_app_test(appctx, APPLET_FL_NEW_API))
|
|
||||||
return htx_used_space(htxbuf(&appctx->inbuf));
|
|
||||||
else
|
|
||||||
return co_data(sc_oc(appctx_sc(appctx)));
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Returns the amount of data in the input buffer (see applet_get_inbuf) */
|
/* Returns the amount of data in the input buffer (see applet_get_inbuf) */
|
||||||
static inline size_t applet_input_data(const struct appctx *appctx)
|
static inline size_t applet_input_data(const struct appctx *appctx)
|
||||||
{
|
{
|
||||||
if (appctx_app_test(appctx, APPLET_FL_HTX))
|
if (appctx->flags & APPCTX_FL_INOUT_BUFS)
|
||||||
return applet_htx_input_data(appctx);
|
|
||||||
|
|
||||||
if (appctx_app_test(appctx, APPLET_FL_NEW_API))
|
|
||||||
return b_data(&appctx->inbuf);
|
return b_data(&appctx->inbuf);
|
||||||
else
|
else
|
||||||
return co_data(sc_oc(appctx_sc(appctx)));
|
return co_data(sc_oc(appctx_sc(appctx)));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Returns the amount of HTX data in the output buffer (see applet_get_outbuf) */
|
/* Returns the amount of HTX data in the input buffer (see applet_get_inbuf) */
|
||||||
static inline size_t applet_htx_output_data(const struct appctx *appctx)
|
static inline size_t applet_htx_input_data(const struct appctx *appctx)
|
||||||
{
|
{
|
||||||
if (appctx_app_test(appctx, APPLET_FL_NEW_API))
|
if (appctx->flags & APPCTX_FL_INOUT_BUFS)
|
||||||
return htx_used_space(htxbuf(&appctx->outbuf));
|
return htx_used_space(htxbuf(&appctx->inbuf));
|
||||||
else
|
else
|
||||||
return ci_data(sc_ic(appctx_sc(appctx)));
|
return co_data(sc_oc(appctx_sc(appctx)));
|
||||||
}
|
|
||||||
|
|
||||||
/* Returns the amount of data in the output buffer (see applet_get_outbuf) */
|
|
||||||
static inline size_t applet_output_data(const struct appctx *appctx)
|
|
||||||
{
|
|
||||||
if (appctx_app_test(appctx, APPLET_FL_HTX))
|
|
||||||
return applet_htx_output_data(appctx);
|
|
||||||
|
|
||||||
if (appctx_app_test(appctx, APPLET_FL_NEW_API))
|
|
||||||
return b_data(&appctx->outbuf);
|
|
||||||
else
|
|
||||||
return ci_data(sc_ic(appctx_sc(appctx)));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Skips <len> bytes from the input buffer (see applet_get_inbuf).
|
/* Skips <len> bytes from the input buffer (see applet_get_inbuf).
|
||||||
|
|
@ -366,13 +336,11 @@ static inline size_t applet_output_data(const struct appctx *appctx)
|
||||||
* This is useful when data have been read directly from the buffer. It is
|
* This is useful when data have been read directly from the buffer. It is
|
||||||
* illegal to call this function with <len> causing a wrapping at the end of the
|
* illegal to call this function with <len> causing a wrapping at the end of the
|
||||||
* buffer. It's the caller's responsibility to ensure that <len> is never larger
|
* buffer. It's the caller's responsibility to ensure that <len> is never larger
|
||||||
* than available output data.
|
* than available ouput data.
|
||||||
*
|
|
||||||
* This function is not HTX aware.
|
|
||||||
*/
|
*/
|
||||||
static inline void applet_skip_input(struct appctx *appctx, size_t len)
|
static inline void applet_skip_input(struct appctx *appctx, size_t len)
|
||||||
{
|
{
|
||||||
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
|
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
|
||||||
b_del(&appctx->inbuf, len);
|
b_del(&appctx->inbuf, len);
|
||||||
applet_fl_clr(appctx, APPCTX_FL_INBLK_FULL);
|
applet_fl_clr(appctx, APPCTX_FL_INBLK_FULL);
|
||||||
}
|
}
|
||||||
|
|
@ -384,7 +352,7 @@ static inline void applet_skip_input(struct appctx *appctx, size_t len)
|
||||||
*/
|
*/
|
||||||
static inline void applet_reset_input(struct appctx *appctx)
|
static inline void applet_reset_input(struct appctx *appctx)
|
||||||
{
|
{
|
||||||
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
|
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
|
||||||
b_reset(&appctx->inbuf);
|
b_reset(&appctx->inbuf);
|
||||||
applet_fl_clr(appctx, APPCTX_FL_INBLK_FULL);
|
applet_fl_clr(appctx, APPCTX_FL_INBLK_FULL);
|
||||||
}
|
}
|
||||||
|
|
@ -392,25 +360,22 @@ static inline void applet_reset_input(struct appctx *appctx)
|
||||||
co_skip(sc_oc(appctx_sc(appctx)), co_data(sc_oc(appctx_sc(appctx))));
|
co_skip(sc_oc(appctx_sc(appctx)), co_data(sc_oc(appctx_sc(appctx))));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Returns the amount of space available at the HTX output buffer (see applet_get_outbuf).
|
/* Returns the amout of space available at the output buffer (see applet_get_outbuf).
|
||||||
*/
|
*/
|
||||||
static inline size_t applet_htx_output_room(const struct appctx *appctx)
|
static inline size_t applet_output_room(const struct appctx *appctx)
|
||||||
{
|
{
|
||||||
if (appctx_app_test(appctx, APPLET_FL_NEW_API))
|
if (appctx->flags & APPCTX_FL_INOUT_BUFS)
|
||||||
return htx_free_data_space(htxbuf(&appctx->outbuf));
|
return b_room(&appctx->outbuf);
|
||||||
else
|
else
|
||||||
return channel_recv_max(sc_ic(appctx_sc(appctx)));
|
return channel_recv_max(sc_ic(appctx_sc(appctx)));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Returns the amount of space available at the output buffer (see applet_get_outbuf).
|
/* Returns the amout of space available at the HTX output buffer (see applet_get_outbuf).
|
||||||
*/
|
*/
|
||||||
static inline size_t applet_output_room(const struct appctx *appctx)
|
static inline size_t applet_htx_output_room(const struct appctx *appctx)
|
||||||
{
|
{
|
||||||
if (appctx_app_test(appctx, APPLET_FL_HTX))
|
if (appctx->flags & APPCTX_FL_INOUT_BUFS)
|
||||||
return applet_htx_output_room(appctx);
|
return htx_free_data_space(htxbuf(&appctx->outbuf));
|
||||||
|
|
||||||
if (appctx_app_test(appctx, APPLET_FL_NEW_API))
|
|
||||||
return b_room(&appctx->outbuf);
|
|
||||||
else
|
else
|
||||||
return channel_recv_max(sc_ic(appctx_sc(appctx)));
|
return channel_recv_max(sc_ic(appctx_sc(appctx)));
|
||||||
}
|
}
|
||||||
|
|
@ -425,7 +390,7 @@ static inline size_t applet_output_room(const struct appctx *appctx)
|
||||||
*/
|
*/
|
||||||
static inline void applet_need_room(struct appctx *appctx, size_t room_needed)
|
static inline void applet_need_room(struct appctx *appctx, size_t room_needed)
|
||||||
{
|
{
|
||||||
if (appctx_app_test(appctx, APPLET_FL_NEW_API))
|
if (appctx->flags & APPCTX_FL_INOUT_BUFS)
|
||||||
applet_have_more_data(appctx);
|
applet_have_more_data(appctx);
|
||||||
else
|
else
|
||||||
sc_need_room(appctx_sc(appctx), room_needed);
|
sc_need_room(appctx_sc(appctx), room_needed);
|
||||||
|
|
@ -437,7 +402,7 @@ static inline int _applet_putchk(struct appctx *appctx, struct buffer *chunk,
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
|
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
|
||||||
if (unlikely(stress) ?
|
if (unlikely(stress) ?
|
||||||
b_data(&appctx->outbuf) :
|
b_data(&appctx->outbuf) :
|
||||||
b_data(chunk) > b_room(&appctx->outbuf)) {
|
b_data(chunk) > b_room(&appctx->outbuf)) {
|
||||||
|
|
@ -492,7 +457,7 @@ static inline int applet_putblk(struct appctx *appctx, const char *blk, int len)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
|
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
|
||||||
if (len > b_room(&appctx->outbuf)) {
|
if (len > b_room(&appctx->outbuf)) {
|
||||||
applet_fl_set(appctx, APPCTX_FL_OUTBLK_FULL);
|
applet_fl_set(appctx, APPCTX_FL_OUTBLK_FULL);
|
||||||
ret = -1;
|
ret = -1;
|
||||||
|
|
@ -528,7 +493,7 @@ static inline int applet_putstr(struct appctx *appctx, const char *str)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
|
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
|
||||||
int len = strlen(str);
|
int len = strlen(str);
|
||||||
|
|
||||||
if (len > b_room(&appctx->outbuf)) {
|
if (len > b_room(&appctx->outbuf)) {
|
||||||
|
|
@ -564,7 +529,7 @@ static inline int applet_putchr(struct appctx *appctx, char chr)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
|
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
|
||||||
if (b_full(&appctx->outbuf)) {
|
if (b_full(&appctx->outbuf)) {
|
||||||
applet_fl_set(appctx, APPCTX_FL_OUTBLK_FULL);
|
applet_fl_set(appctx, APPCTX_FL_OUTBLK_FULL);
|
||||||
ret = -1;
|
ret = -1;
|
||||||
|
|
@ -593,7 +558,7 @@ static inline int applet_putchr(struct appctx *appctx, char chr)
|
||||||
|
|
||||||
static inline int applet_may_get(const struct appctx *appctx, size_t len)
|
static inline int applet_may_get(const struct appctx *appctx, size_t len)
|
||||||
{
|
{
|
||||||
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
|
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
|
||||||
if (len > b_data(&appctx->inbuf)) {
|
if (len > b_data(&appctx->inbuf)) {
|
||||||
if (se_fl_test(appctx->sedesc, SE_FL_SHW))
|
if (se_fl_test(appctx->sedesc, SE_FL_SHW))
|
||||||
return -1;
|
return -1;
|
||||||
|
|
@ -628,7 +593,7 @@ static inline int applet_getchar(const struct appctx *appctx, char *c)
|
||||||
ret = applet_may_get(appctx, 1);
|
ret = applet_may_get(appctx, 1);
|
||||||
if (ret <= 0)
|
if (ret <= 0)
|
||||||
return ret;
|
return ret;
|
||||||
*c = ((appctx_app_test(appctx, APPLET_FL_NEW_API))
|
*c = ((appctx->flags & APPCTX_FL_INOUT_BUFS)
|
||||||
? *(b_head(&appctx->inbuf))
|
? *(b_head(&appctx->inbuf))
|
||||||
: *(co_head(sc_oc(appctx_sc(appctx)))));
|
: *(co_head(sc_oc(appctx_sc(appctx)))));
|
||||||
|
|
||||||
|
|
@ -657,7 +622,7 @@ static inline int applet_getblk(const struct appctx *appctx, char *blk, int len,
|
||||||
if (ret <= 0)
|
if (ret <= 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
buf = ((appctx_app_test(appctx, APPLET_FL_NEW_API))
|
buf = ((appctx->flags & APPCTX_FL_INOUT_BUFS)
|
||||||
? &appctx->inbuf
|
? &appctx->inbuf
|
||||||
: sc_ob(appctx_sc(appctx)));
|
: sc_ob(appctx_sc(appctx)));
|
||||||
return b_getblk(buf, blk, len, offset);
|
return b_getblk(buf, blk, len, offset);
|
||||||
|
|
@ -689,7 +654,7 @@ static inline int applet_getword(const struct appctx *appctx, char *str, int len
|
||||||
if (ret <= 0)
|
if (ret <= 0)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
|
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
|
||||||
buf = &appctx->inbuf;
|
buf = &appctx->inbuf;
|
||||||
input = b_data(buf);
|
input = b_data(buf);
|
||||||
}
|
}
|
||||||
|
|
@ -716,7 +681,7 @@ static inline int applet_getword(const struct appctx *appctx, char *str, int len
|
||||||
p = b_next(buf, p);
|
p = b_next(buf, p);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
|
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
|
||||||
if (ret < len && (ret < input || b_room(buf)) &&
|
if (ret < len && (ret < input || b_room(buf)) &&
|
||||||
!se_fl_test(appctx->sedesc, SE_FL_SHW))
|
!se_fl_test(appctx->sedesc, SE_FL_SHW))
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
@ -776,7 +741,7 @@ static inline int applet_getblk_nc(const struct appctx *appctx, const char **blk
|
||||||
if (ret <= 0)
|
if (ret <= 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
|
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
|
||||||
buf = &appctx->inbuf;
|
buf = &appctx->inbuf;
|
||||||
max = b_data(buf);
|
max = b_data(buf);
|
||||||
}
|
}
|
||||||
|
|
@ -832,7 +797,7 @@ static inline int applet_getword_nc(const struct appctx *appctx, const char **bl
|
||||||
* the resulting string is made of the concatenation of the pending
|
* the resulting string is made of the concatenation of the pending
|
||||||
* blocks (1 or 2).
|
* blocks (1 or 2).
|
||||||
*/
|
*/
|
||||||
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
|
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
|
||||||
if (b_full(&appctx->inbuf) || se_fl_test(appctx->sedesc, SE_FL_SHW))
|
if (b_full(&appctx->inbuf) || se_fl_test(appctx->sedesc, SE_FL_SHW))
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -192,7 +192,6 @@ struct lbprm {
|
||||||
void (*server_requeue)(struct server *); /* function used to place the server where it must be */
|
void (*server_requeue)(struct server *); /* function used to place the server where it must be */
|
||||||
void (*proxy_deinit)(struct proxy *); /* to be called when we're destroying the proxy */
|
void (*proxy_deinit)(struct proxy *); /* to be called when we're destroying the proxy */
|
||||||
void (*server_deinit)(struct server *); /* to be called when we're destroying the server */
|
void (*server_deinit)(struct server *); /* to be called when we're destroying the server */
|
||||||
int (*server_init)(struct server *); /* initialize a freshly added server (runtime); <0=fail. */
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* _HAPROXY_BACKEND_T_H */
|
#endif /* _HAPROXY_BACKEND_T_H */
|
||||||
|
|
|
||||||
|
|
@ -46,8 +46,6 @@ int alloc_bind_address(struct sockaddr_storage **ss,
|
||||||
struct server *srv, struct proxy *be,
|
struct server *srv, struct proxy *be,
|
||||||
struct stream *s);
|
struct stream *s);
|
||||||
|
|
||||||
int be_reuse_mode(const struct proxy *be, const struct server *srv);
|
|
||||||
|
|
||||||
int64_t be_calculate_conn_hash(struct server *srv, struct stream *strm,
|
int64_t be_calculate_conn_hash(struct server *srv, struct stream *strm,
|
||||||
struct session *sess,
|
struct session *sess,
|
||||||
struct sockaddr_storage *src,
|
struct sockaddr_storage *src,
|
||||||
|
|
@ -69,7 +67,6 @@ int backend_parse_balance(const char **args, char **err, struct proxy *curproxy)
|
||||||
int tcp_persist_rdp_cookie(struct stream *s, struct channel *req, int an_bit);
|
int tcp_persist_rdp_cookie(struct stream *s, struct channel *req, int an_bit);
|
||||||
|
|
||||||
int be_downtime(struct proxy *px);
|
int be_downtime(struct proxy *px);
|
||||||
int be_supports_dynamic_srv(struct proxy *px, char **msg);
|
|
||||||
void recount_servers(struct proxy *px);
|
void recount_servers(struct proxy *px);
|
||||||
void update_backend_weight(struct proxy *px);
|
void update_backend_weight(struct proxy *px);
|
||||||
|
|
||||||
|
|
@ -86,20 +83,9 @@ static inline int be_usable_srv(struct proxy *be)
|
||||||
return be->srv_bck;
|
return be->srv_bck;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Returns true if <be> backend can be used as target to a switching rules. */
|
|
||||||
static inline int be_is_eligible(const struct proxy *be)
|
|
||||||
{
|
|
||||||
/* A disabled or unpublished backend cannot be selected for traffic.
|
|
||||||
* Note that STOPPED state is ignored as there is a risk of breaking
|
|
||||||
* requests during soft-stop.
|
|
||||||
*/
|
|
||||||
return !(be->flags & (PR_FL_DISABLED|PR_FL_BE_UNPUBLISHED));
|
|
||||||
}
|
|
||||||
|
|
||||||
/* set the time of last session on the backend */
|
/* set the time of last session on the backend */
|
||||||
static inline void be_set_sess_last(struct proxy *be)
|
static inline void be_set_sess_last(struct proxy *be)
|
||||||
{
|
{
|
||||||
if (be->be_counters.shared.tg)
|
|
||||||
HA_ATOMIC_STORE(&be->be_counters.shared.tg[tgid - 1]->last_sess, ns_to_sec(now_ns));
|
HA_ATOMIC_STORE(&be->be_counters.shared.tg[tgid - 1]->last_sess, ns_to_sec(now_ns));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -180,12 +166,6 @@ void set_backend_down(struct proxy *be);
|
||||||
|
|
||||||
unsigned int gen_hash(const struct proxy* px, const char* key, unsigned long len);
|
unsigned int gen_hash(const struct proxy* px, const char* key, unsigned long len);
|
||||||
|
|
||||||
/* Returns true if connection reuse is supported by <be> backend. */
|
|
||||||
static inline int be_supports_conn_reuse(const struct proxy *be)
|
|
||||||
{
|
|
||||||
return be->mode == PR_MODE_HTTP || be->mode == PR_MODE_SPOP;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* _HAPROXY_BACKEND_H */
|
#endif /* _HAPROXY_BACKEND_H */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
||||||
|
|
@ -40,23 +40,6 @@
|
||||||
#define DPRINTF(x...)
|
#define DPRINTF(x...)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Let's make DEBUG_STRESS equal to zero if not set or not valid, or to
|
|
||||||
* 1 if set. This way it is always set and should be easy to use in "if ()"
|
|
||||||
* statements without requiring ifdefs, while remaining compatible with
|
|
||||||
* "#if DEBUG_STRESS > 0". We also force DEBUG_STRICT and DEBUG_STRICT_ACTION
|
|
||||||
* when stressed.
|
|
||||||
*/
|
|
||||||
#if !defined(DEBUG_STRESS)
|
|
||||||
# define DEBUG_STRESS 0
|
|
||||||
#elif DEBUG_STRESS != 0
|
|
||||||
# undef DEBUG_STRESS
|
|
||||||
# define DEBUG_STRESS 1 // make sure comparison >0 always works
|
|
||||||
# undef DEBUG_STRICT
|
|
||||||
# define DEBUG_STRICT 2 // enable BUG_ON
|
|
||||||
# undef DEBUG_STRICT_ACTION
|
|
||||||
# define DEBUG_STRICT_ACTION 3 // enable crash on match
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define DUMP_TRACE() do { extern void ha_backtrace_to_stderr(void); ha_backtrace_to_stderr(); } while (0)
|
#define DUMP_TRACE() do { extern void ha_backtrace_to_stderr(void); ha_backtrace_to_stderr(); } while (0)
|
||||||
|
|
||||||
/* First, let's try to handle some arch-specific crashing methods. We prefer
|
/* First, let's try to handle some arch-specific crashing methods. We prefer
|
||||||
|
|
@ -424,20 +407,6 @@ extern __attribute__((__weak__)) struct debug_count __stop_dbg_cnt HA_SECTION_S
|
||||||
# define COUNT_IF_HOT(cond, ...) DISGUISE(cond)
|
# define COUNT_IF_HOT(cond, ...) DISGUISE(cond)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* turn BUG_ON_STRESS() into a real statement when DEBUG_STRESS is set,
|
|
||||||
* otherwise simply ignore it, at the risk of failing to notice if the
|
|
||||||
* condition would build at all. We don't really care if BUG_ON_STRESS
|
|
||||||
* doesn't always build, because it's meant to be used only in certain
|
|
||||||
* scenarios, possibly requiring certain combinations of options. We
|
|
||||||
* just want to be certain that the condition is not implemented at all
|
|
||||||
* when not used, so as to encourage developers to put a lot of them at
|
|
||||||
* zero cost.
|
|
||||||
*/
|
|
||||||
#if DEBUG_STRESS > 0
|
|
||||||
# define BUG_ON_STRESS(cond, ...) BUG_ON(cond, __VA_ARGS__)
|
|
||||||
#else
|
|
||||||
# define BUG_ON_STRESS(cond, ...) do { } while (0)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* When not optimizing, clang won't remove that code, so only compile it in when optimizing */
|
/* When not optimizing, clang won't remove that code, so only compile it in when optimizing */
|
||||||
#if defined(__GNUC__) && defined(__OPTIMIZE__)
|
#if defined(__GNUC__) && defined(__OPTIMIZE__)
|
||||||
|
|
@ -537,7 +506,7 @@ struct mem_stats {
|
||||||
size_t size;
|
size_t size;
|
||||||
struct ha_caller caller;
|
struct ha_caller caller;
|
||||||
const void *extra; // extra info specific to this call (e.g. pool ptr)
|
const void *extra; // extra info specific to this call (e.g. pool ptr)
|
||||||
} ALIGNED(sizeof(void*));
|
} __attribute__((aligned(sizeof(void*))));
|
||||||
|
|
||||||
#undef calloc
|
#undef calloc
|
||||||
#define calloc(x,y) ({ \
|
#define calloc(x,y) ({ \
|
||||||
|
|
|
||||||
|
|
@ -54,8 +54,6 @@ enum cond_predicate {
|
||||||
CFG_PRED_OSSL_VERSION_ATLEAST, // "openssl_version_atleast"
|
CFG_PRED_OSSL_VERSION_ATLEAST, // "openssl_version_atleast"
|
||||||
CFG_PRED_OSSL_VERSION_BEFORE, // "openssl_version_before"
|
CFG_PRED_OSSL_VERSION_BEFORE, // "openssl_version_before"
|
||||||
CFG_PRED_SSLLIB_NAME_STARTSWITH, // "ssllib_name_startswith"
|
CFG_PRED_SSLLIB_NAME_STARTSWITH, // "ssllib_name_startswith"
|
||||||
CFG_PRED_AWSLC_API_ATLEAST, // "awslc_api_atleast"
|
|
||||||
CFG_PRED_AWSLC_API_BEFORE, // "awslc_api_before"
|
|
||||||
CFG_PRED_ENABLED, // "enabled"
|
CFG_PRED_ENABLED, // "enabled"
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -111,7 +111,6 @@ extern char *cursection;
|
||||||
extern int non_global_section_parsed;
|
extern int non_global_section_parsed;
|
||||||
|
|
||||||
extern struct proxy *curproxy;
|
extern struct proxy *curproxy;
|
||||||
extern struct proxy *last_defproxy;
|
|
||||||
extern char initial_cwd[PATH_MAX];
|
extern char initial_cwd[PATH_MAX];
|
||||||
|
|
||||||
int cfg_parse_global(const char *file, int linenum, char **args, int inv);
|
int cfg_parse_global(const char *file, int linenum, char **args, int inv);
|
||||||
|
|
@ -141,7 +140,7 @@ int warnif_misplaced_tcp_req_sess(struct proxy *proxy, const char *file, int lin
|
||||||
int warnif_misplaced_tcp_req_cont(struct proxy *proxy, const char *file, int line, const char *arg, const char *arg2);
|
int warnif_misplaced_tcp_req_cont(struct proxy *proxy, const char *file, int line, const char *arg, const char *arg2);
|
||||||
int warnif_misplaced_tcp_res_cont(struct proxy *proxy, const char *file, int line, const char *arg, const char *arg2);
|
int warnif_misplaced_tcp_res_cont(struct proxy *proxy, const char *file, int line, const char *arg, const char *arg2);
|
||||||
int warnif_misplaced_quic_init(struct proxy *proxy, const char *file, int line, const char *arg, const char *arg2);
|
int warnif_misplaced_quic_init(struct proxy *proxy, const char *file, int line, const char *arg, const char *arg2);
|
||||||
int warnif_cond_conflicts(const struct acl_cond *cond, unsigned int where, char **err);
|
int warnif_cond_conflicts(const struct acl_cond *cond, unsigned int where, const char *file, int line);
|
||||||
int warnif_tcp_http_cond(const struct proxy *px, const struct acl_cond *cond);
|
int warnif_tcp_http_cond(const struct proxy *px, const struct acl_cond *cond);
|
||||||
int too_many_args_idx(int maxarg, int index, char **args, char **msg, int *err_code);
|
int too_many_args_idx(int maxarg, int index, char **args, char **msg, int *err_code);
|
||||||
int too_many_args(int maxarg, char **args, char **msg, int *err_code);
|
int too_many_args(int maxarg, char **args, char **msg, int *err_code);
|
||||||
|
|
|
||||||
|
|
@ -24,7 +24,6 @@
|
||||||
|
|
||||||
#include <haproxy/api-t.h>
|
#include <haproxy/api-t.h>
|
||||||
#include <haproxy/buf-t.h>
|
#include <haproxy/buf-t.h>
|
||||||
#include <haproxy/filters-t.h>
|
|
||||||
#include <haproxy/show_flags-t.h>
|
#include <haproxy/show_flags-t.h>
|
||||||
|
|
||||||
/* The CF_* macros designate Channel Flags, which may be ORed in the bit field
|
/* The CF_* macros designate Channel Flags, which may be ORed in the bit field
|
||||||
|
|
@ -205,8 +204,8 @@ struct channel {
|
||||||
unsigned short last_read; /* 16 lower bits of last read date (max pause=65s) */
|
unsigned short last_read; /* 16 lower bits of last read date (max pause=65s) */
|
||||||
unsigned char xfer_large; /* number of consecutive large xfers */
|
unsigned char xfer_large; /* number of consecutive large xfers */
|
||||||
unsigned char xfer_small; /* number of consecutive small xfers */
|
unsigned char xfer_small; /* number of consecutive small xfers */
|
||||||
|
unsigned long long total; /* total data read */
|
||||||
int analyse_exp; /* expiration date for current analysers (if set) */
|
int analyse_exp; /* expiration date for current analysers (if set) */
|
||||||
struct chn_flt flt; /* current state of filters active on this channel */
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -323,6 +323,7 @@ static inline void channel_init(struct channel *chn)
|
||||||
chn->to_forward = 0;
|
chn->to_forward = 0;
|
||||||
chn->last_read = now_ms;
|
chn->last_read = now_ms;
|
||||||
chn->xfer_small = chn->xfer_large = 0;
|
chn->xfer_small = chn->xfer_large = 0;
|
||||||
|
chn->total = 0;
|
||||||
chn->analysers = 0;
|
chn->analysers = 0;
|
||||||
chn->flags = 0;
|
chn->flags = 0;
|
||||||
chn->output = 0;
|
chn->output = 0;
|
||||||
|
|
@ -376,6 +377,7 @@ static inline void channel_add_input(struct channel *chn, unsigned int len)
|
||||||
c_adv(chn, fwd);
|
c_adv(chn, fwd);
|
||||||
}
|
}
|
||||||
/* notify that some data was read */
|
/* notify that some data was read */
|
||||||
|
chn->total += len;
|
||||||
chn->flags |= CF_READ_EVENT;
|
chn->flags |= CF_READ_EVENT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -787,12 +789,8 @@ static inline int channel_recv_max(const struct channel *chn)
|
||||||
*/
|
*/
|
||||||
static inline size_t channel_data_limit(const struct channel *chn)
|
static inline size_t channel_data_limit(const struct channel *chn)
|
||||||
{
|
{
|
||||||
|
size_t max = (global.tune.bufsize - global.tune.maxrewrite);
|
||||||
|
|
||||||
size_t max;
|
|
||||||
|
|
||||||
if (!c_size(chn))
|
|
||||||
return 0;
|
|
||||||
max = (c_size(chn) - global.tune.maxrewrite);
|
|
||||||
if (IS_HTX_STRM(chn_strm(chn)))
|
if (IS_HTX_STRM(chn_strm(chn)))
|
||||||
max -= HTX_BUF_OVERHEAD;
|
max -= HTX_BUF_OVERHEAD;
|
||||||
return max;
|
return max;
|
||||||
|
|
|
||||||
|
|
@ -32,7 +32,6 @@
|
||||||
|
|
||||||
|
|
||||||
extern struct pool_head *pool_head_trash;
|
extern struct pool_head *pool_head_trash;
|
||||||
extern struct pool_head *pool_head_large_trash;
|
|
||||||
|
|
||||||
/* function prototypes */
|
/* function prototypes */
|
||||||
|
|
||||||
|
|
@ -47,9 +46,6 @@ int chunk_asciiencode(struct buffer *dst, struct buffer *src, char qc);
|
||||||
int chunk_strcmp(const struct buffer *chk, const char *str);
|
int chunk_strcmp(const struct buffer *chk, const char *str);
|
||||||
int chunk_strcasecmp(const struct buffer *chk, const char *str);
|
int chunk_strcasecmp(const struct buffer *chk, const char *str);
|
||||||
struct buffer *get_trash_chunk(void);
|
struct buffer *get_trash_chunk(void);
|
||||||
struct buffer *get_large_trash_chunk(void);
|
|
||||||
struct buffer *get_trash_chunk_sz(size_t size);
|
|
||||||
struct buffer *get_larger_trash_chunk(struct buffer *chunk);
|
|
||||||
int init_trash_buffers(int first);
|
int init_trash_buffers(int first);
|
||||||
|
|
||||||
static inline void chunk_reset(struct buffer *chk)
|
static inline void chunk_reset(struct buffer *chk)
|
||||||
|
|
@ -110,53 +106,12 @@ static forceinline struct buffer *alloc_trash_chunk(void)
|
||||||
return chunk;
|
return chunk;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Allocate a large trash chunk from the reentrant pool. The buffer starts at
|
|
||||||
* the end of the chunk. This chunk must be freed using free_trash_chunk(). This
|
|
||||||
* call may fail and the caller is responsible for checking that the returned
|
|
||||||
* pointer is not NULL.
|
|
||||||
*/
|
|
||||||
static forceinline struct buffer *alloc_large_trash_chunk(void)
|
|
||||||
{
|
|
||||||
struct buffer *chunk;
|
|
||||||
|
|
||||||
if (!pool_head_large_trash)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
chunk = pool_alloc(pool_head_large_trash);
|
|
||||||
if (chunk) {
|
|
||||||
char *buf = (char *)chunk + sizeof(struct buffer);
|
|
||||||
*buf = 0;
|
|
||||||
chunk_init(chunk, buf,
|
|
||||||
pool_head_large_trash->size - sizeof(struct buffer));
|
|
||||||
}
|
|
||||||
return chunk;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Allocate a trash chunk accordingly to the requested size. This chunk must be
|
|
||||||
* freed using free_trash_chunk(). This call may fail and the caller is
|
|
||||||
* responsible for checking that the returned pointer is not NULL.
|
|
||||||
*/
|
|
||||||
static forceinline struct buffer *alloc_trash_chunk_sz(size_t size)
|
|
||||||
{
|
|
||||||
if (likely(size <= pool_head_trash->size))
|
|
||||||
return alloc_trash_chunk();
|
|
||||||
else if (pool_head_large_trash && size <= pool_head_large_trash->size)
|
|
||||||
return alloc_large_trash_chunk();
|
|
||||||
else
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* free a trash chunk allocated by alloc_trash_chunk(). NOP on NULL.
|
* free a trash chunk allocated by alloc_trash_chunk(). NOP on NULL.
|
||||||
*/
|
*/
|
||||||
static forceinline void free_trash_chunk(struct buffer *chunk)
|
static forceinline void free_trash_chunk(struct buffer *chunk)
|
||||||
{
|
{
|
||||||
if (likely(chunk && chunk->size == pool_head_trash->size - sizeof(struct buffer)))
|
|
||||||
pool_free(pool_head_trash, chunk);
|
pool_free(pool_head_trash, chunk);
|
||||||
else
|
|
||||||
pool_free(pool_head_large_trash, chunk);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* copies chunk <src> into <chk>. Returns 0 in case of failure. */
|
/* copies chunk <src> into <chk>. Returns 0 in case of failure. */
|
||||||
|
|
|
||||||
|
|
@ -47,7 +47,6 @@
|
||||||
#define APPCTX_CLI_ST1_INTER (1 << 3) /* interactive mode (i.e. don't close after 1st cmd) */
|
#define APPCTX_CLI_ST1_INTER (1 << 3) /* interactive mode (i.e. don't close after 1st cmd) */
|
||||||
#define APPCTX_CLI_ST1_PROMPT (1 << 4) /* display prompt */
|
#define APPCTX_CLI_ST1_PROMPT (1 << 4) /* display prompt */
|
||||||
#define APPCTX_CLI_ST1_TIMED (1 << 5) /* display timer in prompt */
|
#define APPCTX_CLI_ST1_TIMED (1 << 5) /* display timer in prompt */
|
||||||
#define APPCTX_CLI_ST1_YIELD (1 << 6) /* forced yield between commands */
|
|
||||||
|
|
||||||
#define CLI_PREFIX_KW_NB 5
|
#define CLI_PREFIX_KW_NB 5
|
||||||
#define CLI_MAX_MATCHES 5
|
#define CLI_MAX_MATCHES 5
|
||||||
|
|
@ -100,7 +99,6 @@ enum cli_wait_err {
|
||||||
enum cli_wait_cond {
|
enum cli_wait_cond {
|
||||||
CLI_WAIT_COND_NONE, // no condition to wait on
|
CLI_WAIT_COND_NONE, // no condition to wait on
|
||||||
CLI_WAIT_COND_SRV_UNUSED,// wait for server to become unused
|
CLI_WAIT_COND_SRV_UNUSED,// wait for server to become unused
|
||||||
CLI_WAIT_COND_BE_UNUSED, // wait for backend to become unused
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct cli_wait_ctx {
|
struct cli_wait_ctx {
|
||||||
|
|
|
||||||
|
|
@ -31,23 +31,6 @@
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* DEFVAL() returns either the second argument as-is, or <def> if absent. This
|
|
||||||
* is for use in macros arguments.
|
|
||||||
*/
|
|
||||||
#define DEFVAL(_def,...) _FIRST_ARG(NULL, ##__VA_ARGS__, (_def))
|
|
||||||
|
|
||||||
/* DEFNULL() returns either the argument as-is, or NULL if absent. This is for
|
|
||||||
* use in macros arguments.
|
|
||||||
*/
|
|
||||||
#define DEFNULL(...) DEFVAL(NULL, ##__VA_ARGS__)
|
|
||||||
|
|
||||||
/* DEFZERO() returns either the argument as-is, or 0 if absent. This is for
|
|
||||||
* use in macros arguments.
|
|
||||||
*/
|
|
||||||
#define DEFZERO(...) DEFVAL(0, ##__VA_ARGS__)
|
|
||||||
|
|
||||||
#define _FIRST_ARG(a, b, ...) b
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Gcc before 3.0 needs [0] to declare a variable-size array
|
* Gcc before 3.0 needs [0] to declare a variable-size array
|
||||||
*/
|
*/
|
||||||
|
|
@ -432,13 +415,6 @@
|
||||||
* for multi_threading, see THREAD_PAD() below. *
|
* for multi_threading, see THREAD_PAD() below. *
|
||||||
\*****************************************************************************/
|
\*****************************************************************************/
|
||||||
|
|
||||||
/* Cache line size for alignment purposes. This value is incorrect for some
|
|
||||||
* Apple CPUs which have 128 bytes cache lines.
|
|
||||||
*/
|
|
||||||
#ifndef CACHELINE_SIZE
|
|
||||||
#define CACHELINE_SIZE 64
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* sets alignment for current field or variable */
|
/* sets alignment for current field or variable */
|
||||||
#ifndef ALIGNED
|
#ifndef ALIGNED
|
||||||
#define ALIGNED(x) __attribute__((aligned(x)))
|
#define ALIGNED(x) __attribute__((aligned(x)))
|
||||||
|
|
@ -462,12 +438,12 @@
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Sets alignment for current field or variable only when threads are enabled.
|
/* sets alignment for current field or variable only when threads are enabled.
|
||||||
* When no parameters are provided, we align to the cache line size.
|
* Typically used to respect cache line alignment to avoid false sharing.
|
||||||
*/
|
*/
|
||||||
#ifndef THREAD_ALIGNED
|
#ifndef THREAD_ALIGNED
|
||||||
#ifdef USE_THREAD
|
#ifdef USE_THREAD
|
||||||
#define THREAD_ALIGNED(...) ALIGNED(DEFVAL(CACHELINE_SIZE, ##__VA_ARGS__))
|
#define THREAD_ALIGNED(x) __attribute__((aligned(x)))
|
||||||
#else
|
#else
|
||||||
#define THREAD_ALIGNED(x)
|
#define THREAD_ALIGNED(x)
|
||||||
#endif
|
#endif
|
||||||
|
|
@ -500,44 +476,32 @@
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Add an optional alignment for next fields in a structure, only when threads
|
/* add an optional alignment for next fields in a structure, only when threads
|
||||||
* are enabled. When no parameters are provided, we align to the cache line size.
|
* are enabled. Typically used to respect cache line alignment to avoid false
|
||||||
|
* sharing.
|
||||||
*/
|
*/
|
||||||
#ifndef THREAD_ALIGN
|
#ifndef THREAD_ALIGN
|
||||||
#ifdef USE_THREAD
|
#ifdef USE_THREAD
|
||||||
#define THREAD_ALIGN(...) union { } ALIGNED(DEFVAL(CACHELINE_SIZE, ##__VA_ARGS__))
|
#define THREAD_ALIGN(x) union { } ALIGNED(x)
|
||||||
#else
|
#else
|
||||||
#define THREAD_ALIGN(x)
|
#define THREAD_ALIGN(x)
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* add padding of the specified size */
|
|
||||||
#define _PAD(x,l) char __pad_##l[x]
|
|
||||||
|
|
||||||
/* add optional padding of the specified size between fields in a structure,
|
/* add optional padding of the specified size between fields in a structure,
|
||||||
* only when threads are enabled. This is used to avoid false sharing of cache
|
* only when threads are enabled. This is used to avoid false sharing of cache
|
||||||
* lines for dynamically allocated structures which cannot guarantee alignment.
|
* lines for dynamically allocated structures which cannot guarantee alignment.
|
||||||
*/
|
*/
|
||||||
#ifndef THREAD_PAD
|
#ifndef THREAD_PAD
|
||||||
# ifdef USE_THREAD
|
# ifdef USE_THREAD
|
||||||
# define _THREAD_PAD(x,l) _PAD(x, l)
|
# define __THREAD_PAD(x,l) char __pad_##l[x]
|
||||||
|
# define _THREAD_PAD(x,l) __THREAD_PAD(x, l)
|
||||||
# define THREAD_PAD(x) _THREAD_PAD(x, __LINE__)
|
# define THREAD_PAD(x) _THREAD_PAD(x, __LINE__)
|
||||||
# else
|
# else
|
||||||
# define THREAD_PAD(x)
|
# define THREAD_PAD(x)
|
||||||
# endif
|
# endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* add mandatory padding of the specified size between fields in a structure,
|
|
||||||
* This is used to avoid false sharing of cache lines for dynamically allocated
|
|
||||||
* structures which cannot guarantee alignment, or to ensure that the size of
|
|
||||||
* the struct remains consistent on architectures with different alignment
|
|
||||||
* constraints
|
|
||||||
*/
|
|
||||||
#ifndef ALWAYS_PAD
|
|
||||||
# define _ALWAYS_PAD(x,l) _PAD(x, l)
|
|
||||||
# define ALWAYS_PAD(x) _ALWAYS_PAD(x, __LINE__)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* The THREAD_LOCAL type attribute defines thread-local storage and is defined
|
/* The THREAD_LOCAL type attribute defines thread-local storage and is defined
|
||||||
* to __thread when threads are enabled or empty when disabled.
|
* to __thread when threads are enabled or empty when disabled.
|
||||||
*/
|
*/
|
||||||
|
|
|
||||||
|
|
@ -28,7 +28,7 @@
|
||||||
#include <netinet/ip.h>
|
#include <netinet/ip.h>
|
||||||
#include <netinet/ip6.h>
|
#include <netinet/ip6.h>
|
||||||
|
|
||||||
#include <import/cebtree.h>
|
#include <import/ebtree-t.h>
|
||||||
#include <import/ist.h>
|
#include <import/ist.h>
|
||||||
|
|
||||||
#include <haproxy/api-t.h>
|
#include <haproxy/api-t.h>
|
||||||
|
|
@ -145,7 +145,7 @@ enum {
|
||||||
CO_FL_WAIT_ROOM = 0x00000800, /* data sink is full */
|
CO_FL_WAIT_ROOM = 0x00000800, /* data sink is full */
|
||||||
|
|
||||||
CO_FL_WANT_SPLICING = 0x00001000, /* we wish to use splicing on the connection when possible */
|
CO_FL_WANT_SPLICING = 0x00001000, /* we wish to use splicing on the connection when possible */
|
||||||
CO_FL_SSL_NO_CACHED_INFO = 0x00002000, /* Don't use any cached information when creating a new SSL connection */
|
/* unused: 0x00002000 */
|
||||||
|
|
||||||
CO_FL_EARLY_SSL_HS = 0x00004000, /* We have early data pending, don't start SSL handshake yet */
|
CO_FL_EARLY_SSL_HS = 0x00004000, /* We have early data pending, don't start SSL handshake yet */
|
||||||
CO_FL_EARLY_DATA = 0x00008000, /* At least some of the data are early data */
|
CO_FL_EARLY_DATA = 0x00008000, /* At least some of the data are early data */
|
||||||
|
|
@ -212,13 +212,13 @@ static forceinline char *conn_show_flags(char *buf, size_t len, const char *deli
|
||||||
/* flags */
|
/* flags */
|
||||||
_(CO_FL_SAFE_LIST, _(CO_FL_IDLE_LIST, _(CO_FL_CTRL_READY,
|
_(CO_FL_SAFE_LIST, _(CO_FL_IDLE_LIST, _(CO_FL_CTRL_READY,
|
||||||
_(CO_FL_REVERSED, _(CO_FL_ACT_REVERSING, _(CO_FL_OPT_MARK, _(CO_FL_OPT_TOS,
|
_(CO_FL_REVERSED, _(CO_FL_ACT_REVERSING, _(CO_FL_OPT_MARK, _(CO_FL_OPT_TOS,
|
||||||
_(CO_FL_XPRT_READY, _(CO_FL_WANT_DRAIN, _(CO_FL_WAIT_ROOM, _(CO_FL_SSL_NO_CACHED_INFO, _(CO_FL_EARLY_SSL_HS,
|
_(CO_FL_XPRT_READY, _(CO_FL_WANT_DRAIN, _(CO_FL_WAIT_ROOM, _(CO_FL_EARLY_SSL_HS,
|
||||||
_(CO_FL_EARLY_DATA, _(CO_FL_SOCKS4_SEND, _(CO_FL_SOCKS4_RECV, _(CO_FL_SOCK_RD_SH,
|
_(CO_FL_EARLY_DATA, _(CO_FL_SOCKS4_SEND, _(CO_FL_SOCKS4_RECV, _(CO_FL_SOCK_RD_SH,
|
||||||
_(CO_FL_SOCK_WR_SH, _(CO_FL_ERROR, _(CO_FL_FDLESS, _(CO_FL_WAIT_L4_CONN,
|
_(CO_FL_SOCK_WR_SH, _(CO_FL_ERROR, _(CO_FL_FDLESS, _(CO_FL_WAIT_L4_CONN,
|
||||||
_(CO_FL_WAIT_L6_CONN, _(CO_FL_SEND_PROXY, _(CO_FL_ACCEPT_PROXY, _(CO_FL_ACCEPT_CIP,
|
_(CO_FL_WAIT_L6_CONN, _(CO_FL_SEND_PROXY, _(CO_FL_ACCEPT_PROXY, _(CO_FL_ACCEPT_CIP,
|
||||||
_(CO_FL_SSL_WAIT_HS, _(CO_FL_PRIVATE, _(CO_FL_RCVD_PROXY, _(CO_FL_SESS_IDLE,
|
_(CO_FL_SSL_WAIT_HS, _(CO_FL_PRIVATE, _(CO_FL_RCVD_PROXY, _(CO_FL_SESS_IDLE,
|
||||||
_(CO_FL_XPRT_TRACKED
|
_(CO_FL_XPRT_TRACKED
|
||||||
)))))))))))))))))))))))))))));
|
))))))))))))))))))))))))))));
|
||||||
/* epilogue */
|
/* epilogue */
|
||||||
_(~0U);
|
_(~0U);
|
||||||
return buf;
|
return buf;
|
||||||
|
|
@ -329,7 +329,6 @@ enum {
|
||||||
CO_RFL_KEEP_RECV = 0x0008, /* Instruct the mux to still wait for read events */
|
CO_RFL_KEEP_RECV = 0x0008, /* Instruct the mux to still wait for read events */
|
||||||
CO_RFL_BUF_NOT_STUCK = 0x0010, /* Buffer is not stuck. Optims are possible during data copy */
|
CO_RFL_BUF_NOT_STUCK = 0x0010, /* Buffer is not stuck. Optims are possible during data copy */
|
||||||
CO_RFL_MAY_SPLICE = 0x0020, /* The producer can use the kernel splicing */
|
CO_RFL_MAY_SPLICE = 0x0020, /* The producer can use the kernel splicing */
|
||||||
CO_RFL_TRY_HARDER = 0x0040, /* Try to read till READ0 even on short reads */
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/* flags that can be passed to xprt->snd_buf() and mux->snd_buf() */
|
/* flags that can be passed to xprt->snd_buf() and mux->snd_buf() */
|
||||||
|
|
@ -476,7 +475,7 @@ struct xprt_ops {
|
||||||
void (*dump_info)(struct buffer *, const struct connection *);
|
void (*dump_info)(struct buffer *, const struct connection *);
|
||||||
/*
|
/*
|
||||||
* Returns the value for various capabilities.
|
* Returns the value for various capabilities.
|
||||||
* Returns 0 if the capability is known, with the actual value in arg,
|
* Returns 0 if the capability is known, iwth the actual value in arg,
|
||||||
* or -1 otherwise
|
* or -1 otherwise
|
||||||
*/
|
*/
|
||||||
int (*get_capability)(struct connection *connection, void *xprt_ctx, enum xprt_capabilities, void *arg);
|
int (*get_capability)(struct connection *connection, void *xprt_ctx, enum xprt_capabilities, void *arg);
|
||||||
|
|
@ -567,7 +566,7 @@ enum conn_hash_params_t {
|
||||||
#define CONN_HASH_PARAMS_TYPE_COUNT 7
|
#define CONN_HASH_PARAMS_TYPE_COUNT 7
|
||||||
|
|
||||||
#define CONN_HASH_PAYLOAD_LEN \
|
#define CONN_HASH_PAYLOAD_LEN \
|
||||||
(((sizeof(((struct conn_hash_node *)0)->key)) * 8) - CONN_HASH_PARAMS_TYPE_COUNT)
|
(((sizeof(((struct conn_hash_node *)0)->node.key)) * 8) - CONN_HASH_PARAMS_TYPE_COUNT)
|
||||||
|
|
||||||
#define CONN_HASH_GET_PAYLOAD(hash) \
|
#define CONN_HASH_GET_PAYLOAD(hash) \
|
||||||
(((hash) << CONN_HASH_PARAMS_TYPE_COUNT) >> CONN_HASH_PARAMS_TYPE_COUNT)
|
(((hash) << CONN_HASH_PARAMS_TYPE_COUNT) >> CONN_HASH_PARAMS_TYPE_COUNT)
|
||||||
|
|
@ -599,14 +598,6 @@ struct conn_tlv_list {
|
||||||
} __attribute__((packed));
|
} __attribute__((packed));
|
||||||
|
|
||||||
|
|
||||||
/* node for backend connection in the idle trees for http-reuse
|
|
||||||
* A connection is identified by a hash generated from its specific parameters
|
|
||||||
*/
|
|
||||||
struct conn_hash_node {
|
|
||||||
struct ceb_node node; /* indexes the hashing key for safe/idle/avail */
|
|
||||||
uint64_t key; /* the hashing key, also used by session-owned */
|
|
||||||
};
|
|
||||||
|
|
||||||
/* This structure describes a connection with its methods and data.
|
/* This structure describes a connection with its methods and data.
|
||||||
* A connection may be performed to proxy or server via a local or remote
|
* A connection may be performed to proxy or server via a local or remote
|
||||||
* socket, and can also be made to an internal applet. It can support
|
* socket, and can also be made to an internal applet. It can support
|
||||||
|
|
@ -631,14 +622,12 @@ struct connection {
|
||||||
/* second cache line */
|
/* second cache line */
|
||||||
struct wait_event *subs; /* Task to wake when awaited events are ready */
|
struct wait_event *subs; /* Task to wake when awaited events are ready */
|
||||||
union {
|
union {
|
||||||
/* Backend connections only */
|
|
||||||
struct {
|
|
||||||
struct mt_list toremove_list; /* list element when idle connection is ready to be purged */
|
|
||||||
struct list idle_list; /* list element for idle connection in server idle list */
|
struct list idle_list; /* list element for idle connection in server idle list */
|
||||||
struct list sess_el; /* used by private connections, list elem into session */
|
struct mt_list toremove_list; /* list element when idle connection is ready to be purged */
|
||||||
};
|
};
|
||||||
/* Frontend connections only */
|
union {
|
||||||
struct list stopping_list; /* attach point in mux stopping list */
|
struct list sess_el; /* used by private backend conns, list elem into session */
|
||||||
|
struct list stopping_list; /* used by frontend conns, attach point in mux stopping list */
|
||||||
};
|
};
|
||||||
union conn_handle handle; /* connection handle at the socket layer */
|
union conn_handle handle; /* connection handle at the socket layer */
|
||||||
const struct netns_entry *proxy_netns;
|
const struct netns_entry *proxy_netns;
|
||||||
|
|
@ -652,7 +641,7 @@ struct connection {
|
||||||
/* used to identify a backend connection for http-reuse,
|
/* used to identify a backend connection for http-reuse,
|
||||||
* thus only present if conn.target is of type OBJ_TYPE_SERVER
|
* thus only present if conn.target is of type OBJ_TYPE_SERVER
|
||||||
*/
|
*/
|
||||||
struct conn_hash_node hash_node;
|
struct conn_hash_node *hash_node;
|
||||||
|
|
||||||
/* Members used if connection must be reversed. */
|
/* Members used if connection must be reversed. */
|
||||||
struct {
|
struct {
|
||||||
|
|
@ -660,18 +649,24 @@ struct connection {
|
||||||
struct buffer name; /* Only used for passive reverse. Used as SNI when connection added to server idle pool. */
|
struct buffer name; /* Only used for passive reverse. Used as SNI when connection added to server idle pool. */
|
||||||
} reverse;
|
} reverse;
|
||||||
|
|
||||||
uint64_t sni_hash; /* Hash of the SNI. Used to cache the TLS session and try to reuse it. set to 0 is there is no SNI */
|
|
||||||
uint32_t term_evts_log; /* Termination events log: first 4 events reported from fd, handshake or xprt */
|
uint32_t term_evts_log; /* Termination events log: first 4 events reported from fd, handshake or xprt */
|
||||||
uint32_t mark; /* set network mark, if CO_FL_OPT_MARK is set */
|
uint32_t mark; /* set network mark, if CO_FL_OPT_MARK is set */
|
||||||
uint8_t tos; /* set ip tos, if CO_FL_OPT_TOS is set */
|
uint8_t tos; /* set ip tos, if CO_FL_OPT_TOS is set */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* node for backend connection in the idle trees for http-reuse
|
||||||
|
* A connection is identified by a hash generated from its specific parameters
|
||||||
|
*/
|
||||||
|
struct conn_hash_node {
|
||||||
|
struct eb64_node node; /* contains the hashing key */
|
||||||
|
struct connection *conn; /* connection owner of the node */
|
||||||
|
};
|
||||||
|
|
||||||
struct mux_proto_list {
|
struct mux_proto_list {
|
||||||
const struct ist token; /* token name and length. Empty is catch-all */
|
const struct ist token; /* token name and length. Empty is catch-all */
|
||||||
enum proto_proxy_mode mode;
|
enum proto_proxy_mode mode;
|
||||||
enum proto_proxy_side side;
|
enum proto_proxy_side side;
|
||||||
const struct mux_ops *mux;
|
const struct mux_ops *mux;
|
||||||
const char *alpn; /* Default alpn to set by default when the mux protocol is forced (optional, in binary form) */
|
|
||||||
struct list list;
|
struct list list;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
@ -795,7 +790,7 @@ struct idle_conns {
|
||||||
struct mt_list toremove_conns;
|
struct mt_list toremove_conns;
|
||||||
struct task *cleanup_task;
|
struct task *cleanup_task;
|
||||||
__decl_thread(HA_SPINLOCK_T idle_conns_lock);
|
__decl_thread(HA_SPINLOCK_T idle_conns_lock);
|
||||||
} THREAD_ALIGNED();
|
} THREAD_ALIGNED(64);
|
||||||
|
|
||||||
|
|
||||||
/* Termination events logs:
|
/* Termination events logs:
|
||||||
|
|
|
||||||
|
|
@ -39,6 +39,7 @@
|
||||||
#include <haproxy/task-t.h>
|
#include <haproxy/task-t.h>
|
||||||
|
|
||||||
extern struct pool_head *pool_head_connection;
|
extern struct pool_head *pool_head_connection;
|
||||||
|
extern struct pool_head *pool_head_conn_hash_node;
|
||||||
extern struct pool_head *pool_head_sockaddr;
|
extern struct pool_head *pool_head_sockaddr;
|
||||||
extern struct pool_head *pool_head_pp_tlv_128;
|
extern struct pool_head *pool_head_pp_tlv_128;
|
||||||
extern struct pool_head *pool_head_pp_tlv_256;
|
extern struct pool_head *pool_head_pp_tlv_256;
|
||||||
|
|
@ -83,13 +84,14 @@ int conn_install_mux_be(struct connection *conn, void *ctx, struct session *sess
|
||||||
const struct mux_ops *force_mux_ops);
|
const struct mux_ops *force_mux_ops);
|
||||||
int conn_install_mux_chk(struct connection *conn, void *ctx, struct session *sess);
|
int conn_install_mux_chk(struct connection *conn, void *ctx, struct session *sess);
|
||||||
|
|
||||||
void conn_delete_from_tree(struct connection *conn, int thr);
|
void conn_delete_from_tree(struct connection *conn);
|
||||||
|
|
||||||
void conn_init(struct connection *conn, void *target);
|
void conn_init(struct connection *conn, void *target);
|
||||||
struct connection *conn_new(void *target);
|
struct connection *conn_new(void *target);
|
||||||
void conn_free(struct connection *conn);
|
void conn_free(struct connection *conn);
|
||||||
void conn_release(struct connection *conn);
|
void conn_release(struct connection *conn);
|
||||||
void conn_set_errno(struct connection *conn, int err);
|
void conn_set_errno(struct connection *conn, int err);
|
||||||
|
struct conn_hash_node *conn_alloc_hash_node(struct connection *conn);
|
||||||
struct sockaddr_storage *sockaddr_alloc(struct sockaddr_storage **sap, const struct sockaddr_storage *orig, socklen_t len);
|
struct sockaddr_storage *sockaddr_alloc(struct sockaddr_storage **sap, const struct sockaddr_storage *orig, socklen_t len);
|
||||||
void sockaddr_free(struct sockaddr_storage **sap);
|
void sockaddr_free(struct sockaddr_storage **sap);
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -33,22 +33,15 @@
|
||||||
struct { \
|
struct { \
|
||||||
uint16_t flags; /* COUNTERS_SHARED_F flags */\
|
uint16_t flags; /* COUNTERS_SHARED_F flags */\
|
||||||
};
|
};
|
||||||
|
|
||||||
/* /!\ any change performed here will impact shm-stats-file mapping because the
|
|
||||||
* struct is embedded in shm_stats_file_object struct, so proceed with caution
|
|
||||||
* and change shm stats file version if needed. Also please always keep this
|
|
||||||
* struct 64b-aligned.
|
|
||||||
*/
|
|
||||||
#define COUNTERS_SHARED_TG \
|
#define COUNTERS_SHARED_TG \
|
||||||
struct { \
|
struct { \
|
||||||
|
unsigned long last_state_change; /* last time, when the state was changed */\
|
||||||
long long srv_aborts; /* aborted responses during DATA phase caused by the server */\
|
long long srv_aborts; /* aborted responses during DATA phase caused by the server */\
|
||||||
long long cli_aborts; /* aborted responses during DATA phase caused by the client */\
|
long long cli_aborts; /* aborted responses during DATA phase caused by the client */\
|
||||||
long long internal_errors; /* internal processing errors */\
|
long long internal_errors; /* internal processing errors */\
|
||||||
long long failed_rewrites; /* failed rewrites (warning) */\
|
long long failed_rewrites; /* failed rewrites (warning) */\
|
||||||
long long req_in; /* number of bytes received from the client */\
|
long long bytes_out; /* number of bytes transferred from the server to the client */\
|
||||||
long long req_out; /* number of bytes sent to the server */\
|
long long bytes_in; /* number of bytes transferred from the client to the server */\
|
||||||
long long res_in; /* number of bytes received from the server */\
|
|
||||||
long long res_out; /* number of bytes sent to the client */\
|
|
||||||
long long denied_resp; /* blocked responses because of security concerns */\
|
long long denied_resp; /* blocked responses because of security concerns */\
|
||||||
long long denied_req; /* blocked requests because of security concerns */\
|
long long denied_req; /* blocked requests because of security concerns */\
|
||||||
long long cum_sess; /* cumulated number of accepted connections */\
|
long long cum_sess; /* cumulated number of accepted connections */\
|
||||||
|
|
@ -56,9 +49,7 @@
|
||||||
long long comp_in[2]; /* input bytes fed to the compressor */\
|
long long comp_in[2]; /* input bytes fed to the compressor */\
|
||||||
long long comp_out[2]; /* output bytes emitted by the compressor */\
|
long long comp_out[2]; /* output bytes emitted by the compressor */\
|
||||||
long long comp_byp[2]; /* input bytes that bypassed the compressor (cpu/ram/bw limitation) */\
|
long long comp_byp[2]; /* input bytes that bypassed the compressor (cpu/ram/bw limitation) */\
|
||||||
struct freq_ctr sess_per_sec; /* sessions per second on this server (3x32b) */\
|
struct freq_ctr sess_per_sec; /* sessions per second on this server */\
|
||||||
unsigned int last_state_change; /* last time, when the state was changed (32b) */\
|
|
||||||
/* we're still 64b-aligned here */ \
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// for convenience (generic pointer)
|
// for convenience (generic pointer)
|
||||||
|
|
@ -66,14 +57,10 @@ struct counters_shared {
|
||||||
COUNTERS_SHARED;
|
COUNTERS_SHARED;
|
||||||
struct {
|
struct {
|
||||||
COUNTERS_SHARED_TG;
|
COUNTERS_SHARED_TG;
|
||||||
} **tg;
|
} *tg[MAX_TGROUPS];
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/* counters used by listeners and frontends */
|
||||||
* /!\ any change performed here will impact shm-stats-file mapping because the
|
|
||||||
* struct is embedded in shm_stats_file_object struct, so proceed with caution
|
|
||||||
* and change shm stats file version if needed
|
|
||||||
*/
|
|
||||||
struct fe_counters_shared_tg {
|
struct fe_counters_shared_tg {
|
||||||
COUNTERS_SHARED_TG;
|
COUNTERS_SHARED_TG;
|
||||||
|
|
||||||
|
|
@ -97,14 +84,13 @@ struct fe_counters_shared_tg {
|
||||||
} p; /* protocol-specific stats */
|
} p; /* protocol-specific stats */
|
||||||
|
|
||||||
long long failed_req; /* failed requests (eg: invalid or timeout) */
|
long long failed_req; /* failed requests (eg: invalid or timeout) */
|
||||||
} ALIGNED(8);
|
};
|
||||||
|
|
||||||
struct fe_counters_shared {
|
struct fe_counters_shared {
|
||||||
COUNTERS_SHARED;
|
COUNTERS_SHARED;
|
||||||
struct fe_counters_shared_tg **tg;
|
struct fe_counters_shared_tg *tg[MAX_TGROUPS];
|
||||||
};
|
};
|
||||||
|
|
||||||
/* counters used by listeners and frontends */
|
|
||||||
struct fe_counters {
|
struct fe_counters {
|
||||||
struct fe_counters_shared shared; /* shared counters */
|
struct fe_counters_shared shared; /* shared counters */
|
||||||
unsigned int conn_max; /* max # of active sessions */
|
unsigned int conn_max; /* max # of active sessions */
|
||||||
|
|
@ -122,11 +108,6 @@ struct fe_counters {
|
||||||
} p; /* protocol-specific stats */
|
} p; /* protocol-specific stats */
|
||||||
};
|
};
|
||||||
|
|
||||||
/* /!\ any change performed here will impact shm-stats-file mapping because the
|
|
||||||
* struct is embedded in shm_stats_file_object struct, so proceed with caution
|
|
||||||
* and change shm stats file version if needed. Pay attention to keeping the
|
|
||||||
* struct 64b-aligned.
|
|
||||||
*/
|
|
||||||
struct be_counters_shared_tg {
|
struct be_counters_shared_tg {
|
||||||
COUNTERS_SHARED_TG;
|
COUNTERS_SHARED_TG;
|
||||||
|
|
||||||
|
|
@ -134,6 +115,7 @@ struct be_counters_shared_tg {
|
||||||
|
|
||||||
long long connect; /* number of connection establishment attempts */
|
long long connect; /* number of connection establishment attempts */
|
||||||
long long reuse; /* number of connection reuses */
|
long long reuse; /* number of connection reuses */
|
||||||
|
unsigned long last_sess; /* last session time */
|
||||||
|
|
||||||
long long failed_checks, failed_hana; /* failed health checks and health analyses for servers */
|
long long failed_checks, failed_hana; /* failed health checks and health analyses for servers */
|
||||||
long long down_trans; /* up->down transitions */
|
long long down_trans; /* up->down transitions */
|
||||||
|
|
@ -154,13 +136,11 @@ struct be_counters_shared_tg {
|
||||||
long long retries; /* retried and redispatched connections (BE only) */
|
long long retries; /* retried and redispatched connections (BE only) */
|
||||||
long long failed_resp; /* failed responses (BE only) */
|
long long failed_resp; /* failed responses (BE only) */
|
||||||
long long failed_conns; /* failed connect() attempts (BE only) */
|
long long failed_conns; /* failed connect() attempts (BE only) */
|
||||||
unsigned int last_sess; /* last session time */
|
};
|
||||||
/* 32-bit hole here */
|
|
||||||
} ALIGNED(8);
|
|
||||||
|
|
||||||
struct be_counters_shared {
|
struct be_counters_shared {
|
||||||
COUNTERS_SHARED;
|
COUNTERS_SHARED;
|
||||||
struct be_counters_shared_tg **tg;
|
struct be_counters_shared_tg *tg[MAX_TGROUPS];
|
||||||
};
|
};
|
||||||
|
|
||||||
/* counters used by servers and backends */
|
/* counters used by servers and backends */
|
||||||
|
|
@ -185,29 +165,6 @@ struct be_counters {
|
||||||
} p; /* protocol-specific stats */
|
} p; /* protocol-specific stats */
|
||||||
};
|
};
|
||||||
|
|
||||||
/* extra counters that are registered at boot by various modules */
|
|
||||||
enum counters_type {
|
|
||||||
COUNTERS_FE = 0,
|
|
||||||
COUNTERS_BE,
|
|
||||||
COUNTERS_SV,
|
|
||||||
COUNTERS_LI,
|
|
||||||
COUNTERS_RSLV,
|
|
||||||
|
|
||||||
COUNTERS_OFF_END /* must always be last */
|
|
||||||
};
|
|
||||||
|
|
||||||
struct extra_counters {
|
|
||||||
char **datap; /* points to pointer to heap containing counters allocated in a linear fashion */
|
|
||||||
size_t size; /* size of allocated data */
|
|
||||||
size_t tgrp_step; /* distance in words between two datap for consecutive tgroups, 0 for single */
|
|
||||||
uint nbtgrp; /* number of thread groups accessing these counters */
|
|
||||||
enum counters_type type; /* type of object containing the counters */
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
#define EXTRA_COUNTERS(name) \
|
|
||||||
struct extra_counters *name
|
|
||||||
|
|
||||||
#endif /* _HAPROXY_COUNTERS_T_H */
|
#endif /* _HAPROXY_COUNTERS_T_H */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
||||||
|
|
@ -27,10 +27,8 @@
|
||||||
#include <haproxy/counters-t.h>
|
#include <haproxy/counters-t.h>
|
||||||
#include <haproxy/guid-t.h>
|
#include <haproxy/guid-t.h>
|
||||||
|
|
||||||
extern THREAD_LOCAL void *trash_counters;
|
int counters_fe_shared_prepare(struct fe_counters_shared *counters, const struct guid_node *guid);
|
||||||
|
int counters_be_shared_prepare(struct be_counters_shared *counters, const struct guid_node *guid);
|
||||||
int counters_fe_shared_prepare(struct fe_counters_shared *counters, const struct guid_node *guid, char **errmsg);
|
|
||||||
int counters_be_shared_prepare(struct be_counters_shared *counters, const struct guid_node *guid, char **errmsg);
|
|
||||||
|
|
||||||
void counters_fe_shared_drop(struct fe_counters_shared *counters);
|
void counters_fe_shared_drop(struct fe_counters_shared *counters);
|
||||||
void counters_be_shared_drop(struct be_counters_shared *counters);
|
void counters_be_shared_drop(struct be_counters_shared *counters);
|
||||||
|
|
@ -45,13 +43,11 @@ void counters_be_shared_drop(struct be_counters_shared *counters);
|
||||||
*/
|
*/
|
||||||
#define COUNTERS_SHARED_LAST_OFFSET(scounters, type, offset) \
|
#define COUNTERS_SHARED_LAST_OFFSET(scounters, type, offset) \
|
||||||
({ \
|
({ \
|
||||||
unsigned long last = 0; \
|
unsigned long last = HA_ATOMIC_LOAD((type *)((char *)scounters[0] + offset));\
|
||||||
unsigned long now_seconds = ns_to_sec(now_ns); \
|
unsigned long now_seconds = ns_to_sec(now_ns); \
|
||||||
int it; \
|
int it; \
|
||||||
\
|
\
|
||||||
if (scounters) \
|
for (it = 1; it < global.nbtgroups; it++) { \
|
||||||
last = HA_ATOMIC_LOAD((type *)((char *)scounters[0] + offset));\
|
|
||||||
for (it = 1; (it < global.nbtgroups && scounters); it++) { \
|
|
||||||
unsigned long cur = HA_ATOMIC_LOAD((type *)((char *)scounters[it] + offset));\
|
unsigned long cur = HA_ATOMIC_LOAD((type *)((char *)scounters[it] + offset));\
|
||||||
if ((now_seconds - cur) < (now_seconds - last)) \
|
if ((now_seconds - cur) < (now_seconds - last)) \
|
||||||
last = cur; \
|
last = cur; \
|
||||||
|
|
@ -78,7 +74,7 @@ void counters_be_shared_drop(struct be_counters_shared *counters);
|
||||||
uint64_t __ret = 0; \
|
uint64_t __ret = 0; \
|
||||||
int it; \
|
int it; \
|
||||||
\
|
\
|
||||||
for (it = 0; (it < global.nbtgroups && scounters); it++) \
|
for (it = 0; it < global.nbtgroups; it++) \
|
||||||
__ret += rfunc((type *)((char *)scounters[it] + offset)); \
|
__ret += rfunc((type *)((char *)scounters[it] + offset)); \
|
||||||
__ret; \
|
__ret; \
|
||||||
})
|
})
|
||||||
|
|
@ -98,105 +94,9 @@ void counters_be_shared_drop(struct be_counters_shared *counters);
|
||||||
uint64_t __ret = 0; \
|
uint64_t __ret = 0; \
|
||||||
int it; \
|
int it; \
|
||||||
\
|
\
|
||||||
for (it = 0; (it < global.nbtgroups && scounters); it++) \
|
for (it = 0; it < global.nbtgroups; it++) \
|
||||||
__ret += rfunc(&scounters[it]->elem, arg1, arg2); \
|
__ret += rfunc(&scounters[it]->elem, arg1, arg2); \
|
||||||
__ret; \
|
__ret; \
|
||||||
})
|
})
|
||||||
|
|
||||||
/* Manipulation of extra_counters, for boot-time registrable modules */
|
|
||||||
/* retrieve the base storage of extra counters (first tgroup if any) */
|
|
||||||
#define EXTRA_COUNTERS_BASE(counters, mod) \
|
|
||||||
(likely(counters) ? \
|
|
||||||
((void *)(*(counters)->datap + (mod)->counters_off[(counters)->type])) : \
|
|
||||||
(trash_counters))
|
|
||||||
|
|
||||||
/* retrieve the pointer to the extra counters storage for module <mod> for the
|
|
||||||
* current TGID.
|
|
||||||
*/
|
|
||||||
#define EXTRA_COUNTERS_GET(counters, mod) \
|
|
||||||
(likely(counters) ? \
|
|
||||||
((void *)(counters)->datap[(counters)->tgrp_step * (tgid - 1)] + \
|
|
||||||
(mod)->counters_off[(counters)->type]) : \
|
|
||||||
(trash_counters))
|
|
||||||
|
|
||||||
#define EXTRA_COUNTERS_REGISTER(counters, ctype, alloc_failed_label, storage, step) \
|
|
||||||
do { \
|
|
||||||
typeof(*counters) _ctr; \
|
|
||||||
_ctr = calloc(1, sizeof(*_ctr)); \
|
|
||||||
if (!_ctr) \
|
|
||||||
goto alloc_failed_label; \
|
|
||||||
_ctr->type = (ctype); \
|
|
||||||
_ctr->tgrp_step = (step); \
|
|
||||||
_ctr->datap = (storage); \
|
|
||||||
*(counters) = _ctr; \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define EXTRA_COUNTERS_ADD(mod, counters, new_counters, csize) \
|
|
||||||
do { \
|
|
||||||
typeof(counters) _ctr = (counters); \
|
|
||||||
(mod)->counters_off[_ctr->type] = _ctr->size; \
|
|
||||||
_ctr->size += (csize); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define EXTRA_COUNTERS_ALLOC(counters, alloc_failed_label, nbtg) \
|
|
||||||
do { \
|
|
||||||
typeof(counters) _ctr = (counters); \
|
|
||||||
char **datap = _ctr->datap; \
|
|
||||||
uint tgrp; \
|
|
||||||
_ctr->nbtgrp = _ctr->tgrp_step ? (nbtg) : 1; \
|
|
||||||
for (tgrp = 0; tgrp < _ctr->nbtgrp; tgrp++) { \
|
|
||||||
*datap = malloc((_ctr)->size); \
|
|
||||||
if (!*_ctr->datap) \
|
|
||||||
goto alloc_failed_label; \
|
|
||||||
datap += _ctr->tgrp_step; \
|
|
||||||
} \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define EXTRA_COUNTERS_INIT(counters, mod, init_counters, init_counters_size) \
|
|
||||||
do { \
|
|
||||||
typeof(counters) _ctr = (counters); \
|
|
||||||
char **datap = _ctr->datap; \
|
|
||||||
uint tgrp; \
|
|
||||||
for (tgrp = 0; tgrp < _ctr->nbtgrp; tgrp++) { \
|
|
||||||
memcpy(*datap + mod->counters_off[_ctr->type], \
|
|
||||||
(init_counters), (init_counters_size)); \
|
|
||||||
datap += _ctr->tgrp_step; \
|
|
||||||
} \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define EXTRA_COUNTERS_FREE(counters) \
|
|
||||||
do { \
|
|
||||||
typeof(counters) _ctr = (counters); \
|
|
||||||
if (_ctr) { \
|
|
||||||
char **datap = _ctr->datap; \
|
|
||||||
uint tgrp; \
|
|
||||||
for (tgrp = 0; tgrp < _ctr->nbtgrp; tgrp++) { \
|
|
||||||
ha_free(datap); \
|
|
||||||
datap += _ctr->tgrp_step; \
|
|
||||||
} \
|
|
||||||
free(_ctr); \
|
|
||||||
} \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
/* aggregate all values of <metricp> over the thread groups handled by
|
|
||||||
* <counters>. <metricp> MUST correspond to an entry of the first tgrp of
|
|
||||||
* <counters>. The number of groups and the step are found in <counters>. The
|
|
||||||
* type of the return value is the same as <metricp>, and must be a scalar so
|
|
||||||
* that values are summed before being returned.
|
|
||||||
*/
|
|
||||||
#define EXTRA_COUNTERS_AGGR(counters, metricp) \
|
|
||||||
({ \
|
|
||||||
typeof(counters) _ctr = (counters); \
|
|
||||||
typeof(metricp) *valp, _ret = 0; \
|
|
||||||
if (_ctr) { \
|
|
||||||
size_t ofs = (char *)&metricp - _ctr->datap[0]; \
|
|
||||||
uint tgrp; \
|
|
||||||
for (tgrp = 0; tgrp < _ctr->nbtgrp; tgrp++) { \
|
|
||||||
valp = (typeof(valp))(_ctr->datap[tgrp * (counters)->tgrp_step] + ofs); \
|
|
||||||
_ret += HA_ATOMIC_LOAD(valp); \
|
|
||||||
} \
|
|
||||||
} \
|
|
||||||
_ret; \
|
|
||||||
})
|
|
||||||
|
|
||||||
#endif /* _HAPROXY_COUNTERS_H */
|
#endif /* _HAPROXY_COUNTERS_H */
|
||||||
|
|
|
||||||
|
|
@ -366,13 +366,6 @@
|
||||||
#define STATS_VERSION_STRING " version " HAPROXY_VERSION ", released " HAPROXY_DATE
|
#define STATS_VERSION_STRING " version " HAPROXY_VERSION ", released " HAPROXY_DATE
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* specifies the default max number of object per thread group that the shm stats file
|
|
||||||
* will be able to handle
|
|
||||||
*/
|
|
||||||
#ifndef SHM_STATS_FILE_MAX_OBJECTS
|
|
||||||
#define SHM_STATS_FILE_MAX_OBJECTS 2000
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* This is the default statistics URI */
|
/* This is the default statistics URI */
|
||||||
#ifdef CONFIG_STATS_DEFAULT_URI
|
#ifdef CONFIG_STATS_DEFAULT_URI
|
||||||
#define STATS_DEFAULT_URI CONFIG_STATS_DEFAULT_URI
|
#define STATS_DEFAULT_URI CONFIG_STATS_DEFAULT_URI
|
||||||
|
|
|
||||||
|
|
@ -24,12 +24,12 @@
|
||||||
|
|
||||||
#include <import/ebtree-t.h>
|
#include <import/ebtree-t.h>
|
||||||
|
|
||||||
#include <haproxy/buf-t.h>
|
|
||||||
#include <haproxy/connection-t.h>
|
#include <haproxy/connection-t.h>
|
||||||
#include <haproxy/counters-t.h>
|
#include <haproxy/buf-t.h>
|
||||||
#include <haproxy/dgram-t.h>
|
#include <haproxy/dgram-t.h>
|
||||||
#include <haproxy/dns_ring-t.h>
|
#include <haproxy/dns_ring-t.h>
|
||||||
#include <haproxy/obj_type-t.h>
|
#include <haproxy/obj_type-t.h>
|
||||||
|
#include <haproxy/stats-t.h>
|
||||||
#include <haproxy/task-t.h>
|
#include <haproxy/task-t.h>
|
||||||
#include <haproxy/thread.h>
|
#include <haproxy/thread.h>
|
||||||
|
|
||||||
|
|
@ -152,7 +152,6 @@ struct dns_nameserver {
|
||||||
struct dns_stream_server *stream; /* used for tcp dns */
|
struct dns_stream_server *stream; /* used for tcp dns */
|
||||||
|
|
||||||
EXTRA_COUNTERS(extra_counters);
|
EXTRA_COUNTERS(extra_counters);
|
||||||
char *extra_counters_storage; /* storage used for extra_counters above */
|
|
||||||
struct dns_counters *counters;
|
struct dns_counters *counters;
|
||||||
|
|
||||||
struct list list; /* nameserver chained list */
|
struct list list; /* nameserver chained list */
|
||||||
|
|
|
||||||
|
|
@ -36,7 +36,6 @@
|
||||||
#include <haproxy/pool.h>
|
#include <haproxy/pool.h>
|
||||||
|
|
||||||
extern struct pool_head *pool_head_buffer;
|
extern struct pool_head *pool_head_buffer;
|
||||||
extern struct pool_head *pool_head_large_buffer;
|
|
||||||
|
|
||||||
int init_buffer(void);
|
int init_buffer(void);
|
||||||
void buffer_dump(FILE *o, struct buffer *b, int from, int to);
|
void buffer_dump(FILE *o, struct buffer *b, int from, int to);
|
||||||
|
|
@ -54,30 +53,6 @@ static inline int buffer_almost_full(const struct buffer *buf)
|
||||||
return b_almost_full(buf);
|
return b_almost_full(buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Return 1 if <sz> is the default buffer size */
|
|
||||||
static inline int b_is_default_sz(size_t sz)
|
|
||||||
{
|
|
||||||
return (sz == pool_head_buffer->size);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Return 1 if <sz> is the size of a large buffer (alwoys false is large buffers are not configured) */
|
|
||||||
static inline int b_is_large_sz(size_t sz)
|
|
||||||
{
|
|
||||||
return (pool_head_large_buffer && sz == pool_head_large_buffer->size);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Return 1 if <bug> is a default buffer */
|
|
||||||
static inline int b_is_default(struct buffer *buf)
|
|
||||||
{
|
|
||||||
return b_is_default_sz(b_size(buf));
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Return 1 if <buf> is a large buffer (alwoys 0 is large buffers are not configured) */
|
|
||||||
static inline int b_is_large(struct buffer *buf)
|
|
||||||
{
|
|
||||||
return b_is_large_sz(b_size(buf));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**************************************************/
|
/**************************************************/
|
||||||
/* Functions below are used for buffer allocation */
|
/* Functions below are used for buffer allocation */
|
||||||
/**************************************************/
|
/**************************************************/
|
||||||
|
|
@ -161,18 +136,13 @@ static inline char *__b_get_emergency_buf(void)
|
||||||
#define __b_free(_buf) \
|
#define __b_free(_buf) \
|
||||||
do { \
|
do { \
|
||||||
char *area = (_buf)->area; \
|
char *area = (_buf)->area; \
|
||||||
size_t sz = (_buf)->size; \
|
|
||||||
\
|
\
|
||||||
/* let's first clear the area to save an occasional "show sess all" \
|
/* let's first clear the area to save an occasional "show sess all" \
|
||||||
* glancing over our shoulder from getting a dangling pointer. \
|
* glancing over our shoulder from getting a dangling pointer. \
|
||||||
*/ \
|
*/ \
|
||||||
*(_buf) = BUF_NULL; \
|
*(_buf) = BUF_NULL; \
|
||||||
__ha_barrier_store(); \
|
__ha_barrier_store(); \
|
||||||
/* if enabled, large buffers are always strictly greater \
|
if (th_ctx->emergency_bufs_left < global.tune.reserved_bufs) \
|
||||||
* than the default buffers */ \
|
|
||||||
if (unlikely(b_is_large_sz(sz))) \
|
|
||||||
pool_free(pool_head_large_buffer, area); \
|
|
||||||
else if (th_ctx->emergency_bufs_left < global.tune.reserved_bufs) \
|
|
||||||
th_ctx->emergency_bufs[th_ctx->emergency_bufs_left++] = area; \
|
th_ctx->emergency_bufs[th_ctx->emergency_bufs_left++] = area; \
|
||||||
else \
|
else \
|
||||||
pool_free(pool_head_buffer, area); \
|
pool_free(pool_head_buffer, area); \
|
||||||
|
|
|
||||||
|
|
@ -1,13 +0,0 @@
|
||||||
/* SPDX-License-Identifier: LGPL-2.1-or-later */
|
|
||||||
#ifndef _HAPROXY_ECH_H
|
|
||||||
# define _HAPROXY_ECH_H
|
|
||||||
#ifdef USE_ECH
|
|
||||||
|
|
||||||
#include <openssl/ech.h>
|
|
||||||
|
|
||||||
int load_echkeys(SSL_CTX *ctx, char *dirname, int *loaded);
|
|
||||||
int conn_get_ech_status(struct connection *conn, struct buffer *buf);
|
|
||||||
int conn_get_ech_outer_sni(struct connection *conn, struct buffer *buf);
|
|
||||||
|
|
||||||
# endif /* USE_ECH */
|
|
||||||
#endif /* _HAPROXY_ECH_H */
|
|
||||||
|
|
@ -202,7 +202,7 @@ struct fdtab {
|
||||||
#ifdef DEBUG_FD
|
#ifdef DEBUG_FD
|
||||||
unsigned int event_count; /* number of events reported */
|
unsigned int event_count; /* number of events reported */
|
||||||
#endif
|
#endif
|
||||||
} THREAD_ALIGNED();
|
} THREAD_ALIGNED(64);
|
||||||
|
|
||||||
/* polled mask, one bit per thread and per direction for each FD */
|
/* polled mask, one bit per thread and per direction for each FD */
|
||||||
struct polled_mask {
|
struct polled_mask {
|
||||||
|
|
|
||||||
|
|
@ -232,28 +232,22 @@ struct filter {
|
||||||
* 0: request channel, 1: response channel */
|
* 0: request channel, 1: response channel */
|
||||||
unsigned int pre_analyzers; /* bit field indicating analyzers to pre-process */
|
unsigned int pre_analyzers; /* bit field indicating analyzers to pre-process */
|
||||||
unsigned int post_analyzers; /* bit field indicating analyzers to post-process */
|
unsigned int post_analyzers; /* bit field indicating analyzers to post-process */
|
||||||
struct list list; /* Filter list for the stream */
|
struct list list; /* Next filter for the same proxy/stream */
|
||||||
/* req_list and res_list are exactly equivalent, except the order may differ */
|
|
||||||
struct list req_list; /* Filter list for request channel */
|
|
||||||
struct list res_list; /* Filter list for response channel */
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Structure reprensenting the "global" state of filters attached to a stream.
|
* Structure reprensenting the "global" state of filters attached to a stream.
|
||||||
* Doesn't hold much information, as the channel themselves hold chn_flt struct
|
|
||||||
* which contains the per-channel members.
|
|
||||||
*/
|
*/
|
||||||
struct strm_flt {
|
struct strm_flt {
|
||||||
struct list filters; /* List of filters attached to a stream */
|
struct list filters; /* List of filters attached to a stream */
|
||||||
|
struct filter *current[2]; /* From which filter resume processing, for a specific channel.
|
||||||
|
* This is used for resumable callbacks only,
|
||||||
|
* If NULL, we start from the first filter.
|
||||||
|
* 0: request channel, 1: response channel */
|
||||||
unsigned short flags; /* STRM_FL_* */
|
unsigned short flags; /* STRM_FL_* */
|
||||||
};
|
unsigned char nb_req_data_filters; /* Number of data filters registered on the request channel */
|
||||||
|
unsigned char nb_rsp_data_filters; /* Number of data filters registered on the response channel */
|
||||||
/* structure holding filter state for some members that are channel oriented */
|
unsigned long long offset[2];
|
||||||
struct chn_flt {
|
|
||||||
struct list filters; /* List of filters attached to a channel */
|
|
||||||
struct filter *current; /* From which filter resume processing, for a specific channel. */
|
|
||||||
unsigned char nb_data_filters; /* Number of data filters registered on channel */
|
|
||||||
unsigned long long offset;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* _HAPROXY_FILTERS_T_H */
|
#endif /* _HAPROXY_FILTERS_T_H */
|
||||||
|
|
|
||||||
|
|
@ -40,13 +40,13 @@ extern const char *fcgi_flt_id;
|
||||||
/* Useful macros to access per-channel values. It can be safely used inside
|
/* Useful macros to access per-channel values. It can be safely used inside
|
||||||
* filters. */
|
* filters. */
|
||||||
#define CHN_IDX(chn) (((chn)->flags & CF_ISRESP) == CF_ISRESP)
|
#define CHN_IDX(chn) (((chn)->flags & CF_ISRESP) == CF_ISRESP)
|
||||||
#define FLT_STRM_OFF(s, chn) (chn->flt.offset)
|
#define FLT_STRM_OFF(s, chn) (strm_flt(s)->offset[CHN_IDX(chn)])
|
||||||
#define FLT_OFF(flt, chn) ((flt)->offset[CHN_IDX(chn)])
|
#define FLT_OFF(flt, chn) ((flt)->offset[CHN_IDX(chn)])
|
||||||
|
|
||||||
#define HAS_FILTERS(strm) ((strm)->strm_flt.flags & STRM_FLT_FL_HAS_FILTERS)
|
#define HAS_FILTERS(strm) ((strm)->strm_flt.flags & STRM_FLT_FL_HAS_FILTERS)
|
||||||
|
|
||||||
#define HAS_REQ_DATA_FILTERS(strm) ((strm)->req.flt.nb_data_filters != 0)
|
#define HAS_REQ_DATA_FILTERS(strm) ((strm)->strm_flt.nb_req_data_filters != 0)
|
||||||
#define HAS_RSP_DATA_FILTERS(strm) ((strm)->res.flt.nb_data_filters != 0)
|
#define HAS_RSP_DATA_FILTERS(strm) ((strm)->strm_flt.nb_rsp_data_filters != 0)
|
||||||
#define HAS_DATA_FILTERS(strm, chn) (((chn)->flags & CF_ISRESP) ? HAS_RSP_DATA_FILTERS(strm) : HAS_REQ_DATA_FILTERS(strm))
|
#define HAS_DATA_FILTERS(strm, chn) (((chn)->flags & CF_ISRESP) ? HAS_RSP_DATA_FILTERS(strm) : HAS_REQ_DATA_FILTERS(strm))
|
||||||
|
|
||||||
#define IS_REQ_DATA_FILTER(flt) ((flt)->flags & FLT_FL_IS_REQ_DATA_FILTER)
|
#define IS_REQ_DATA_FILTER(flt) ((flt)->flags & FLT_FL_IS_REQ_DATA_FILTER)
|
||||||
|
|
@ -137,11 +137,14 @@ static inline void
|
||||||
register_data_filter(struct stream *s, struct channel *chn, struct filter *filter)
|
register_data_filter(struct stream *s, struct channel *chn, struct filter *filter)
|
||||||
{
|
{
|
||||||
if (!IS_DATA_FILTER(filter, chn)) {
|
if (!IS_DATA_FILTER(filter, chn)) {
|
||||||
if (chn->flags & CF_ISRESP)
|
if (chn->flags & CF_ISRESP) {
|
||||||
filter->flags |= FLT_FL_IS_RSP_DATA_FILTER;
|
filter->flags |= FLT_FL_IS_RSP_DATA_FILTER;
|
||||||
else
|
strm_flt(s)->nb_rsp_data_filters++;
|
||||||
|
}
|
||||||
|
else {
|
||||||
filter->flags |= FLT_FL_IS_REQ_DATA_FILTER;
|
filter->flags |= FLT_FL_IS_REQ_DATA_FILTER;
|
||||||
chn->flt.nb_data_filters++;
|
strm_flt(s)->nb_req_data_filters++;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -150,64 +153,16 @@ static inline void
|
||||||
unregister_data_filter(struct stream *s, struct channel *chn, struct filter *filter)
|
unregister_data_filter(struct stream *s, struct channel *chn, struct filter *filter)
|
||||||
{
|
{
|
||||||
if (IS_DATA_FILTER(filter, chn)) {
|
if (IS_DATA_FILTER(filter, chn)) {
|
||||||
if (chn->flags & CF_ISRESP)
|
if (chn->flags & CF_ISRESP) {
|
||||||
filter->flags &= ~FLT_FL_IS_RSP_DATA_FILTER;
|
filter->flags &= ~FLT_FL_IS_RSP_DATA_FILTER;
|
||||||
else
|
strm_flt(s)->nb_rsp_data_filters--;
|
||||||
|
|
||||||
|
}
|
||||||
|
else {
|
||||||
filter->flags &= ~FLT_FL_IS_REQ_DATA_FILTER;
|
filter->flags &= ~FLT_FL_IS_REQ_DATA_FILTER;
|
||||||
chn->flt.nb_data_filters--;
|
strm_flt(s)->nb_req_data_filters--;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* flt_list_start() and flt_list_next() can be used to iterate over the list of filters
|
|
||||||
* for a given <strm> and <chn> combination. It will automatically choose the proper
|
|
||||||
* list to iterate from depending on the context.
|
|
||||||
*
|
|
||||||
* flt_list_start() has to be called exactly once to get the first value from the list
|
|
||||||
* to get the following values, use flt_list_next() until NULL is returned.
|
|
||||||
*
|
|
||||||
* Example:
|
|
||||||
*
|
|
||||||
* struct filter *filter;
|
|
||||||
*
|
|
||||||
* for (filter = flt_list_start(stream, channel); filter;
|
|
||||||
* filter = flt_list_next(stream, channel, filter)) {
|
|
||||||
* ...
|
|
||||||
* }
|
|
||||||
*/
|
|
||||||
static inline struct filter *flt_list_start(struct stream *strm, struct channel *chn)
|
|
||||||
{
|
|
||||||
struct filter *filter;
|
|
||||||
|
|
||||||
if (chn->flags & CF_ISRESP) {
|
|
||||||
filter = LIST_NEXT(&chn->flt.filters, struct filter *, res_list);
|
|
||||||
if (&filter->res_list == &chn->flt.filters)
|
|
||||||
filter = NULL; /* empty list */
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
filter = LIST_NEXT(&chn->flt.filters, struct filter *, req_list);
|
|
||||||
if (&filter->req_list == &chn->flt.filters)
|
|
||||||
filter = NULL; /* empty list */
|
|
||||||
}
|
|
||||||
|
|
||||||
return filter;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline struct filter *flt_list_next(struct stream *strm, struct channel *chn,
|
|
||||||
struct filter *filter)
|
|
||||||
{
|
|
||||||
if (chn->flags & CF_ISRESP) {
|
|
||||||
filter = LIST_NEXT(&filter->res_list, struct filter *, res_list);
|
|
||||||
if (&filter->res_list == &chn->flt.filters)
|
|
||||||
filter = NULL; /* end of list */
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
filter = LIST_NEXT(&filter->req_list, struct filter *, req_list);
|
|
||||||
if (&filter->req_list == &chn->flt.filters)
|
|
||||||
filter = NULL; /* end of list */
|
|
||||||
}
|
|
||||||
|
|
||||||
return filter;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* This function must be called when a filter alter payload data. It updates
|
/* This function must be called when a filter alter payload data. It updates
|
||||||
|
|
@ -222,8 +177,7 @@ flt_update_offsets(struct filter *filter, struct channel *chn, int len)
|
||||||
struct stream *s = chn_strm(chn);
|
struct stream *s = chn_strm(chn);
|
||||||
struct filter *f;
|
struct filter *f;
|
||||||
|
|
||||||
for (f = flt_list_start(s, chn); f;
|
list_for_each_entry(f, &strm_flt(s)->filters, list) {
|
||||||
f = flt_list_next(s, chn, f)) {
|
|
||||||
if (f == filter)
|
if (f == filter)
|
||||||
break;
|
break;
|
||||||
FLT_OFF(f, chn) += len;
|
FLT_OFF(f, chn) += len;
|
||||||
|
|
|
||||||
|
|
@ -31,7 +31,7 @@
|
||||||
ullong _freq_ctr_total_from_values(uint period, int pend, uint tick, ullong past, ullong curr);
|
ullong _freq_ctr_total_from_values(uint period, int pend, uint tick, ullong past, ullong curr);
|
||||||
ullong freq_ctr_total(const struct freq_ctr *ctr, uint period, int pend);
|
ullong freq_ctr_total(const struct freq_ctr *ctr, uint period, int pend);
|
||||||
ullong freq_ctr_total_estimate(const struct freq_ctr *ctr, uint period, int pend);
|
ullong freq_ctr_total_estimate(const struct freq_ctr *ctr, uint period, int pend);
|
||||||
uint freq_ctr_overshoot_period(const struct freq_ctr *ctr, uint period, uint freq);
|
int freq_ctr_overshoot_period(const struct freq_ctr *ctr, uint period, uint freq);
|
||||||
uint update_freq_ctr_period_slow(struct freq_ctr *ctr, uint period, uint inc);
|
uint update_freq_ctr_period_slow(struct freq_ctr *ctr, uint period, uint inc);
|
||||||
|
|
||||||
/* Only usable during single threaded startup phase. */
|
/* Only usable during single threaded startup phase. */
|
||||||
|
|
|
||||||
|
|
@ -67,7 +67,7 @@
|
||||||
#define GTUNE_USE_SYSTEMD (1<<10)
|
#define GTUNE_USE_SYSTEMD (1<<10)
|
||||||
|
|
||||||
#define GTUNE_BUSY_POLLING (1<<11)
|
#define GTUNE_BUSY_POLLING (1<<11)
|
||||||
#define GTUNE_PURGE_DEFAULTS (1<<12)
|
/* (1<<12) unused */
|
||||||
#define GTUNE_SET_DUMPABLE (1<<13)
|
#define GTUNE_SET_DUMPABLE (1<<13)
|
||||||
#define GTUNE_USE_EVPORTS (1<<14)
|
#define GTUNE_USE_EVPORTS (1<<14)
|
||||||
#define GTUNE_STRICT_LIMITS (1<<15)
|
#define GTUNE_STRICT_LIMITS (1<<15)
|
||||||
|
|
@ -80,7 +80,7 @@
|
||||||
#define GTUNE_DISABLE_ACTIVE_CLOSE (1<<22)
|
#define GTUNE_DISABLE_ACTIVE_CLOSE (1<<22)
|
||||||
#define GTUNE_QUICK_EXIT (1<<23)
|
#define GTUNE_QUICK_EXIT (1<<23)
|
||||||
/* (1<<24) unused */
|
/* (1<<24) unused */
|
||||||
/* (1<<25) unused */
|
#define GTUNE_NO_QUIC (1<<25)
|
||||||
#define GTUNE_USE_FAST_FWD (1<<26)
|
#define GTUNE_USE_FAST_FWD (1<<26)
|
||||||
#define GTUNE_LISTENER_MQ_FAIR (1<<27)
|
#define GTUNE_LISTENER_MQ_FAIR (1<<27)
|
||||||
#define GTUNE_LISTENER_MQ_OPT (1<<28)
|
#define GTUNE_LISTENER_MQ_OPT (1<<28)
|
||||||
|
|
@ -167,7 +167,6 @@ struct global {
|
||||||
char *server_state_base; /* path to a directory where server state files can be found */
|
char *server_state_base; /* path to a directory where server state files can be found */
|
||||||
char *server_state_file; /* path to the file where server states are loaded from */
|
char *server_state_file; /* path to the file where server states are loaded from */
|
||||||
char *stats_file; /* path to stats-file */
|
char *stats_file; /* path to stats-file */
|
||||||
char *shm_stats_file; /* path to shm-stats-file */
|
|
||||||
unsigned char cluster_secret[16]; /* 128 bits of an SHA1 digest of a secret defined as ASCII string */
|
unsigned char cluster_secret[16]; /* 128 bits of an SHA1 digest of a secret defined as ASCII string */
|
||||||
struct {
|
struct {
|
||||||
int maxpollevents; /* max number of poll events at once */
|
int maxpollevents; /* max number of poll events at once */
|
||||||
|
|
@ -179,7 +178,6 @@ struct global {
|
||||||
uint recv_enough; /* how many input bytes at once are "enough" */
|
uint recv_enough; /* how many input bytes at once are "enough" */
|
||||||
uint bufsize; /* buffer size in bytes, defaults to BUFSIZE */
|
uint bufsize; /* buffer size in bytes, defaults to BUFSIZE */
|
||||||
uint bufsize_small;/* small buffer size in bytes */
|
uint bufsize_small;/* small buffer size in bytes */
|
||||||
uint bufsize_large;/* large buffer size in bytes */
|
|
||||||
int maxrewrite; /* buffer max rewrite size in bytes, defaults to MAXREWRITE */
|
int maxrewrite; /* buffer max rewrite size in bytes, defaults to MAXREWRITE */
|
||||||
int reserved_bufs; /* how many buffers can only be allocated for response */
|
int reserved_bufs; /* how many buffers can only be allocated for response */
|
||||||
int buf_limit; /* if not null, how many total buffers may only be allocated */
|
int buf_limit; /* if not null, how many total buffers may only be allocated */
|
||||||
|
|
@ -215,6 +213,20 @@ struct global {
|
||||||
uint max_checks_per_thread; /* if >0, no more than this concurrent checks per thread */
|
uint max_checks_per_thread; /* if >0, no more than this concurrent checks per thread */
|
||||||
uint ring_queues; /* if >0, #ring queues, otherwise equals #thread groups */
|
uint ring_queues; /* if >0, #ring queues, otherwise equals #thread groups */
|
||||||
enum threadgroup_takeover tg_takeover; /* Policy for threadgroup takeover */
|
enum threadgroup_takeover tg_takeover; /* Policy for threadgroup takeover */
|
||||||
|
#ifdef USE_QUIC
|
||||||
|
unsigned int quic_backend_max_idle_timeout;
|
||||||
|
unsigned int quic_frontend_max_idle_timeout;
|
||||||
|
unsigned int quic_frontend_glitches_threshold;
|
||||||
|
unsigned int quic_frontend_max_data;
|
||||||
|
unsigned int quic_frontend_max_streams_bidi;
|
||||||
|
uint64_t quic_frontend_max_tx_mem;
|
||||||
|
size_t quic_frontend_max_window_size;
|
||||||
|
unsigned int quic_frontend_stream_data_ratio;
|
||||||
|
unsigned int quic_retry_threshold;
|
||||||
|
unsigned int quic_reorder_ratio;
|
||||||
|
unsigned int quic_max_frame_loss;
|
||||||
|
unsigned int quic_cubic_loss_tol;
|
||||||
|
#endif /* USE_QUIC */
|
||||||
} tune;
|
} tune;
|
||||||
struct {
|
struct {
|
||||||
char *prefix; /* path prefix of unix bind socket */
|
char *prefix; /* path prefix of unix bind socket */
|
||||||
|
|
@ -234,7 +246,6 @@ struct global {
|
||||||
* than 255 arguments
|
* than 255 arguments
|
||||||
*/
|
*/
|
||||||
/* 2-bytes hole */
|
/* 2-bytes hole */
|
||||||
int est_fd_usage; /* rough estimate of reserved FDs (listeners, pollers etc) */
|
|
||||||
int cfg_curr_line; /* line number currently being parsed */
|
int cfg_curr_line; /* line number currently being parsed */
|
||||||
const char *cfg_curr_file; /* config file currently being parsed or NULL */
|
const char *cfg_curr_file; /* config file currently being parsed or NULL */
|
||||||
char *cfg_curr_section; /* config section name currently being parsed or NULL */
|
char *cfg_curr_section; /* config section name currently being parsed or NULL */
|
||||||
|
|
@ -262,7 +273,6 @@ struct global {
|
||||||
unsigned int req_count; /* request counter (HTTP or TCP session) for logs and unique_id */
|
unsigned int req_count; /* request counter (HTTP or TCP session) for logs and unique_id */
|
||||||
int last_checks;
|
int last_checks;
|
||||||
uint32_t anon_key;
|
uint32_t anon_key;
|
||||||
int maxthrpertgroup; /* Maximum number of threads per thread group */
|
|
||||||
|
|
||||||
/* leave this at the end to make sure we don't share this cache line by accident */
|
/* leave this at the end to make sure we don't share this cache line by accident */
|
||||||
ALWAYS_ALIGN(64);
|
ALWAYS_ALIGN(64);
|
||||||
|
|
|
||||||
|
|
@ -24,7 +24,6 @@
|
||||||
|
|
||||||
#include <haproxy/api-t.h>
|
#include <haproxy/api-t.h>
|
||||||
#include <haproxy/global-t.h>
|
#include <haproxy/global-t.h>
|
||||||
#include <haproxy/cfgparse.h>
|
|
||||||
|
|
||||||
extern struct global global;
|
extern struct global global;
|
||||||
extern int pid; /* current process id */
|
extern int pid; /* current process id */
|
||||||
|
|
@ -54,9 +53,6 @@ extern char *progname;
|
||||||
extern char **old_argv;
|
extern char **old_argv;
|
||||||
extern const char *old_unixsocket;
|
extern const char *old_unixsocket;
|
||||||
extern int daemon_fd[2];
|
extern int daemon_fd[2];
|
||||||
extern int devnullfd;
|
|
||||||
extern int fileless_mode;
|
|
||||||
extern struct cfgfile fileless_cfg;
|
|
||||||
|
|
||||||
struct proxy;
|
struct proxy;
|
||||||
struct server;
|
struct server;
|
||||||
|
|
|
||||||
|
|
@ -1,15 +1,14 @@
|
||||||
#ifndef _HAPROXY_GUID_T_H
|
#ifndef _HAPROXY_GUID_T_H
|
||||||
#define _HAPROXY_GUID_T_H
|
#define _HAPROXY_GUID_T_H
|
||||||
|
|
||||||
#include <import/cebtree.h>
|
#include <import/ebtree-t.h>
|
||||||
#include <haproxy/obj_type-t.h>
|
#include <haproxy/obj_type-t.h>
|
||||||
|
|
||||||
/* Maximum GUID size excluding final '\0' */
|
/* Maximum GUID size excluding final '\0' */
|
||||||
#define GUID_MAX_LEN 127
|
#define GUID_MAX_LEN 127
|
||||||
|
|
||||||
struct guid_node {
|
struct guid_node {
|
||||||
struct ceb_node node; /* attach point into GUID global tree */
|
struct ebpt_node node; /* attach point into GUID global tree */
|
||||||
char *key; /* the key itself */
|
|
||||||
enum obj_type *obj_type; /* pointer to GUID obj owner */
|
enum obj_type *obj_type; /* pointer to GUID obj owner */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -17,7 +17,7 @@ struct guid_node *guid_lookup(const char *uid);
|
||||||
*/
|
*/
|
||||||
static inline const char *guid_get(const struct guid_node *guid)
|
static inline const char *guid_get(const struct guid_node *guid)
|
||||||
{
|
{
|
||||||
return guid->key;
|
return guid->node.key;
|
||||||
}
|
}
|
||||||
|
|
||||||
int guid_is_valid_fmt(const char *uid, char **errmsg);
|
int guid_is_valid_fmt(const char *uid, char **errmsg);
|
||||||
|
|
|
||||||
|
|
@ -263,8 +263,6 @@ static inline int h1_parse_chunk_size(const struct buffer *buf, int start, int s
|
||||||
const char *ptr_old = ptr;
|
const char *ptr_old = ptr;
|
||||||
const char *end = b_wrap(buf);
|
const char *end = b_wrap(buf);
|
||||||
uint64_t chunk = 0;
|
uint64_t chunk = 0;
|
||||||
int backslash = 0;
|
|
||||||
int quote = 0;
|
|
||||||
|
|
||||||
stop -= start; // bytes left
|
stop -= start; // bytes left
|
||||||
start = stop; // bytes to transfer
|
start = stop; // bytes to transfer
|
||||||
|
|
@ -329,37 +327,13 @@ static inline int h1_parse_chunk_size(const struct buffer *buf, int start, int s
|
||||||
if (--stop == 0)
|
if (--stop == 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* The loop seeks the first CRLF or non-tab CTL char
|
while (!HTTP_IS_CRLF(*ptr)) {
|
||||||
* and stops there. If a backslash/quote is active,
|
|
||||||
* it's an error. If none, we assume it's the CRLF
|
|
||||||
* and go back to the top of the loop checking for
|
|
||||||
* CR then LF. This way CTLs, lone LF etc are handled
|
|
||||||
* in the fallback path. This allows to protect
|
|
||||||
* remotes against their own possibly non-compliant
|
|
||||||
* chunk-ext parser which could mistakenly skip a
|
|
||||||
* quoted CRLF. Chunk-ext are not used anyway, except
|
|
||||||
* by attacks.
|
|
||||||
*/
|
|
||||||
while (!HTTP_IS_CTL(*ptr) || HTTP_IS_SPHT(*ptr)) {
|
|
||||||
if (backslash)
|
|
||||||
backslash = 0; // escaped char
|
|
||||||
else if (*ptr == '\\' && quote)
|
|
||||||
backslash = 1;
|
|
||||||
else if (*ptr == '\\') // backslash not permitted outside quotes
|
|
||||||
goto error;
|
|
||||||
else if (*ptr == '"') // begin/end of quoted-pair
|
|
||||||
quote = !quote;
|
|
||||||
if (++ptr >= end)
|
if (++ptr >= end)
|
||||||
ptr = b_orig(buf);
|
ptr = b_orig(buf);
|
||||||
if (--stop == 0)
|
if (--stop == 0)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
/* we have a CRLF now, loop above */
|
||||||
/* mismatched quotes / backslashes end here */
|
|
||||||
if (quote || backslash)
|
|
||||||
goto error;
|
|
||||||
|
|
||||||
/* CTLs (CRLF) fall to the common check */
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
|
|
||||||
|
|
@ -222,7 +222,6 @@ struct hlua_proxy_list {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct hlua_proxy_list_iterator_context {
|
struct hlua_proxy_list_iterator_context {
|
||||||
struct watcher px_watch; /* watcher to automatically update next pointer on backend deletion */
|
|
||||||
struct proxy *next;
|
struct proxy *next;
|
||||||
char capabilities;
|
char capabilities;
|
||||||
};
|
};
|
||||||
|
|
@ -256,7 +255,6 @@ struct hlua_patref_iterator_context {
|
||||||
struct hlua_patref *ref;
|
struct hlua_patref *ref;
|
||||||
struct bref bref; /* back-reference from the pat_ref_elt being accessed
|
struct bref bref; /* back-reference from the pat_ref_elt being accessed
|
||||||
* during listing */
|
* during listing */
|
||||||
struct pat_ref_gen *gen; /* the generation we are iterating over */
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#else /* USE_LUA */
|
#else /* USE_LUA */
|
||||||
|
|
|
||||||
|
|
@ -1,36 +0,0 @@
|
||||||
#ifndef _HAPROXY_HSTREAM_T_H
|
|
||||||
#define _HAPROXY_HSTREAM_T_H
|
|
||||||
|
|
||||||
#include <haproxy/dynbuf-t.h>
|
|
||||||
#include <haproxy/http-t.h>
|
|
||||||
#include <haproxy/obj_type-t.h>
|
|
||||||
|
|
||||||
/* hastream stream */
|
|
||||||
struct hstream {
|
|
||||||
enum obj_type obj_type;
|
|
||||||
struct session *sess;
|
|
||||||
|
|
||||||
struct stconn *sc;
|
|
||||||
struct task *task;
|
|
||||||
|
|
||||||
struct buffer req;
|
|
||||||
struct buffer res;
|
|
||||||
unsigned long long to_write; /* #of response data bytes to write after headers */
|
|
||||||
struct buffer_wait buf_wait; /* Wait list for buffer allocation */
|
|
||||||
|
|
||||||
int flags;
|
|
||||||
|
|
||||||
int ka; /* .0: keep-alive .1: forced .2: http/1.1, .3: was_reused */
|
|
||||||
int req_cache;
|
|
||||||
unsigned long long req_size; /* values passed in the URI to override the server's */
|
|
||||||
unsigned long long req_body; /* remaining body to be consumed from the request */
|
|
||||||
int req_code;
|
|
||||||
int res_wait; /* time to wait before replying in ms */
|
|
||||||
int res_time;
|
|
||||||
int req_chunked;
|
|
||||||
int req_random;
|
|
||||||
int req_after_res; /* Drain the request body after having sent the response */
|
|
||||||
enum http_meth_t req_meth;
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif /* _HAPROXY_HSTREAM_T_H */
|
|
||||||
|
|
@ -1,12 +0,0 @@
|
||||||
#ifndef _HAPROXY_HSTREAM_H
|
|
||||||
#define _HAPROXY_HSTREAM_H
|
|
||||||
|
|
||||||
#include <haproxy/cfgparse.h>
|
|
||||||
#include <haproxy/hstream-t.h>
|
|
||||||
|
|
||||||
struct task *sc_hstream_io_cb(struct task *t, void *ctx, unsigned int state);
|
|
||||||
int hstream_wake(struct stconn *sc);
|
|
||||||
void hstream_shutdown(struct stconn *sc);
|
|
||||||
void *hstream_new(struct session *sess, struct stconn *sc, struct buffer *input);
|
|
||||||
|
|
||||||
#endif /* _HAPROXY_HSTREAM_H */
|
|
||||||
|
|
@ -184,7 +184,6 @@ enum {
|
||||||
PERSIST_TYPE_NONE = 0, /* no persistence */
|
PERSIST_TYPE_NONE = 0, /* no persistence */
|
||||||
PERSIST_TYPE_FORCE, /* force-persist */
|
PERSIST_TYPE_FORCE, /* force-persist */
|
||||||
PERSIST_TYPE_IGNORE, /* ignore-persist */
|
PERSIST_TYPE_IGNORE, /* ignore-persist */
|
||||||
PERSIST_TYPE_BE_SWITCH, /* force-be-switch */
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/* final results for http-request rules */
|
/* final results for http-request rules */
|
||||||
|
|
|
||||||
|
|
@ -49,7 +49,7 @@ int http_req_replace_stline(int action, const char *replace, int len,
|
||||||
int http_res_set_status(unsigned int status, struct ist reason, struct stream *s);
|
int http_res_set_status(unsigned int status, struct ist reason, struct stream *s);
|
||||||
void http_check_request_for_cacheability(struct stream *s, struct channel *req);
|
void http_check_request_for_cacheability(struct stream *s, struct channel *req);
|
||||||
void http_check_response_for_cacheability(struct stream *s, struct channel *res);
|
void http_check_response_for_cacheability(struct stream *s, struct channel *res);
|
||||||
enum rule_result http_wait_for_msg_body(struct stream *s, struct channel *chn, unsigned int time, unsigned int bytes, unsigned int large_buffer);
|
enum rule_result http_wait_for_msg_body(struct stream *s, struct channel *chn, unsigned int time, unsigned int bytes);
|
||||||
void http_perform_server_redirect(struct stream *s, struct stconn *sc);
|
void http_perform_server_redirect(struct stream *s, struct stconn *sc);
|
||||||
void http_server_error(struct stream *s, struct stconn *sc, int err, int finst, struct http_reply *msg);
|
void http_server_error(struct stream *s, struct stconn *sc, int err, int finst, struct http_reply *msg);
|
||||||
void http_reply_and_close(struct stream *s, short status, struct http_reply *msg);
|
void http_reply_and_close(struct stream *s, short status, struct http_reply *msg);
|
||||||
|
|
|
||||||
|
|
@ -177,7 +177,7 @@ static forceinline char *hsl_show_flags(char *buf, size_t len, const char *delim
|
||||||
#define HTX_FL_PARSING_ERROR 0x00000001 /* Set when a parsing error occurred */
|
#define HTX_FL_PARSING_ERROR 0x00000001 /* Set when a parsing error occurred */
|
||||||
#define HTX_FL_PROCESSING_ERROR 0x00000002 /* Set when a processing error occurred */
|
#define HTX_FL_PROCESSING_ERROR 0x00000002 /* Set when a processing error occurred */
|
||||||
#define HTX_FL_FRAGMENTED 0x00000004 /* Set when the HTX buffer is fragmented */
|
#define HTX_FL_FRAGMENTED 0x00000004 /* Set when the HTX buffer is fragmented */
|
||||||
#define HTX_FL_UNORDERED 0x00000008 /* Set when the HTX buffer are not ordered */
|
#define HTX_FL_ALTERED_PAYLOAD 0x00000008 /* The payload is altered, the extra value must not be trusted */
|
||||||
#define HTX_FL_EOM 0x00000010 /* Set when end-of-message is reached from the HTTP point of view
|
#define HTX_FL_EOM 0x00000010 /* Set when end-of-message is reached from the HTTP point of view
|
||||||
* (at worst, on the EOM block is missing)
|
* (at worst, on the EOM block is missing)
|
||||||
*/
|
*/
|
||||||
|
|
@ -192,7 +192,7 @@ static forceinline char *htx_show_flags(char *buf, size_t len, const char *delim
|
||||||
_(0);
|
_(0);
|
||||||
/* flags */
|
/* flags */
|
||||||
_(HTX_FL_PARSING_ERROR, _(HTX_FL_PROCESSING_ERROR,
|
_(HTX_FL_PARSING_ERROR, _(HTX_FL_PROCESSING_ERROR,
|
||||||
_(HTX_FL_FRAGMENTED, _(HTX_FL_UNORDERED, _(HTX_FL_EOM)))));
|
_(HTX_FL_FRAGMENTED, _(HTX_FL_EOM))));
|
||||||
/* epilogue */
|
/* epilogue */
|
||||||
_(~0U);
|
_(~0U);
|
||||||
return buf;
|
return buf;
|
||||||
|
|
@ -265,12 +265,13 @@ struct htx {
|
||||||
uint32_t head_addr; /* start address of the free space at the beginning */
|
uint32_t head_addr; /* start address of the free space at the beginning */
|
||||||
uint32_t end_addr; /* end address of the free space at the beginning */
|
uint32_t end_addr; /* end address of the free space at the beginning */
|
||||||
|
|
||||||
|
uint64_t extra; /* known bytes amount remaining to receive */
|
||||||
uint32_t flags; /* HTX_FL_* */
|
uint32_t flags; /* HTX_FL_* */
|
||||||
|
|
||||||
/* XXX 4 bytes unused */
|
/* XXX 4 bytes unused */
|
||||||
|
|
||||||
/* Blocks representing the HTTP message itself */
|
/* Blocks representing the HTTP message itself */
|
||||||
char blocks[VAR_ARRAY] ALIGNED(8);
|
char blocks[VAR_ARRAY] __attribute__((aligned(8)));
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* _HAPROXY_HTX_T_H */
|
#endif /* _HAPROXY_HTX_T_H */
|
||||||
|
|
|
||||||
|
|
@ -30,6 +30,11 @@
|
||||||
#include <haproxy/http-t.h>
|
#include <haproxy/http-t.h>
|
||||||
#include <haproxy/htx-t.h>
|
#include <haproxy/htx-t.h>
|
||||||
|
|
||||||
|
/* ->extra field value when the payload length is unknown (non-chunked message
|
||||||
|
* with no "Content-length" header)
|
||||||
|
*/
|
||||||
|
#define HTX_UNKOWN_PAYLOAD_LENGTH ULLONG_MAX
|
||||||
|
|
||||||
extern struct htx htx_empty;
|
extern struct htx htx_empty;
|
||||||
|
|
||||||
struct htx_blk *htx_defrag(struct htx *htx, struct htx_blk *blk, uint32_t info);
|
struct htx_blk *htx_defrag(struct htx *htx, struct htx_blk *blk, uint32_t info);
|
||||||
|
|
@ -474,12 +479,11 @@ static inline struct htx_sl *htx_add_stline(struct htx *htx, enum htx_blk_type t
|
||||||
static inline struct htx_blk *htx_add_header(struct htx *htx, const struct ist name,
|
static inline struct htx_blk *htx_add_header(struct htx *htx, const struct ist name,
|
||||||
const struct ist value)
|
const struct ist value)
|
||||||
{
|
{
|
||||||
struct htx_blk *blk, *tailblk;
|
struct htx_blk *blk;
|
||||||
|
|
||||||
if (name.len > 255 || value.len > 1048575)
|
if (name.len > 255 || value.len > 1048575)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
tailblk = htx_get_tail_blk(htx);
|
|
||||||
blk = htx_add_blk(htx, HTX_BLK_HDR, name.len + value.len);
|
blk = htx_add_blk(htx, HTX_BLK_HDR, name.len + value.len);
|
||||||
if (!blk)
|
if (!blk)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
@ -487,8 +491,6 @@ static inline struct htx_blk *htx_add_header(struct htx *htx, const struct ist n
|
||||||
blk->info += (value.len << 8) + name.len;
|
blk->info += (value.len << 8) + name.len;
|
||||||
ist2bin_lc(htx_get_blk_ptr(htx, blk), name);
|
ist2bin_lc(htx_get_blk_ptr(htx, blk), name);
|
||||||
memcpy(htx_get_blk_ptr(htx, blk) + name.len, value.ptr, value.len);
|
memcpy(htx_get_blk_ptr(htx, blk) + name.len, value.ptr, value.len);
|
||||||
if (tailblk && htx_get_blk_type(tailblk) >= HTX_BLK_EOH)
|
|
||||||
htx->flags |= HTX_FL_UNORDERED;
|
|
||||||
return blk;
|
return blk;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -498,12 +500,11 @@ static inline struct htx_blk *htx_add_header(struct htx *htx, const struct ist n
|
||||||
static inline struct htx_blk *htx_add_trailer(struct htx *htx, const struct ist name,
|
static inline struct htx_blk *htx_add_trailer(struct htx *htx, const struct ist name,
|
||||||
const struct ist value)
|
const struct ist value)
|
||||||
{
|
{
|
||||||
struct htx_blk *blk, *tailblk;
|
struct htx_blk *blk;
|
||||||
|
|
||||||
if (name.len > 255 || value.len > 1048575)
|
if (name.len > 255 || value.len > 1048575)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
tailblk = htx_get_tail_blk(htx);
|
|
||||||
blk = htx_add_blk(htx, HTX_BLK_TLR, name.len + value.len);
|
blk = htx_add_blk(htx, HTX_BLK_TLR, name.len + value.len);
|
||||||
if (!blk)
|
if (!blk)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
@ -511,8 +512,6 @@ static inline struct htx_blk *htx_add_trailer(struct htx *htx, const struct ist
|
||||||
blk->info += (value.len << 8) + name.len;
|
blk->info += (value.len << 8) + name.len;
|
||||||
ist2bin_lc(htx_get_blk_ptr(htx, blk), name);
|
ist2bin_lc(htx_get_blk_ptr(htx, blk), name);
|
||||||
memcpy(htx_get_blk_ptr(htx, blk) + name.len, value.ptr, value.len);
|
memcpy(htx_get_blk_ptr(htx, blk) + name.len, value.ptr, value.len);
|
||||||
if (tailblk && htx_get_blk_type(tailblk) >= HTX_BLK_EOT)
|
|
||||||
htx->flags |= HTX_FL_UNORDERED;
|
|
||||||
return blk;
|
return blk;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -661,6 +660,7 @@ static inline void htx_reset(struct htx *htx)
|
||||||
htx->tail = htx->head = htx->first = -1;
|
htx->tail = htx->head = htx->first = -1;
|
||||||
htx->data = 0;
|
htx->data = 0;
|
||||||
htx->tail_addr = htx->head_addr = htx->end_addr = 0;
|
htx->tail_addr = htx->head_addr = htx->end_addr = 0;
|
||||||
|
htx->extra = 0;
|
||||||
htx->flags = HTX_FL_NONE;
|
htx->flags = HTX_FL_NONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -700,6 +700,8 @@ static inline struct htx *htxbuf(const struct buffer *buf)
|
||||||
htx->size = buf->size - sizeof(*htx);
|
htx->size = buf->size - sizeof(*htx);
|
||||||
htx_reset(htx);
|
htx_reset(htx);
|
||||||
}
|
}
|
||||||
|
if (htx->flags & HTX_FL_ALTERED_PAYLOAD)
|
||||||
|
htx->extra = 0;
|
||||||
return htx;
|
return htx;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -835,10 +837,10 @@ static inline void htx_dump(struct buffer *chunk, const struct htx *htx, int ful
|
||||||
{
|
{
|
||||||
int32_t pos;
|
int32_t pos;
|
||||||
|
|
||||||
chunk_appendf(chunk, " htx=%p(size=%u,data=%u,used=%u,wrap=%s,flags=0x%08x,"
|
chunk_appendf(chunk, " htx=%p(size=%u,data=%u,used=%u,wrap=%s,flags=0x%08x,extra=%llu,"
|
||||||
"first=%d,head=%d,tail=%d,tail_addr=%d,head_addr=%d,end_addr=%d)",
|
"first=%d,head=%d,tail=%d,tail_addr=%d,head_addr=%d,end_addr=%d)",
|
||||||
htx, htx->size, htx->data, htx_nbblks(htx), (!htx->head_addr) ? "NO" : "YES",
|
htx, htx->size, htx->data, htx_nbblks(htx), (!htx->head_addr) ? "NO" : "YES",
|
||||||
htx->flags, htx->first, htx->head, htx->tail,
|
htx->flags, (unsigned long long)htx->extra, htx->first, htx->head, htx->tail,
|
||||||
htx->tail_addr, htx->head_addr, htx->end_addr);
|
htx->tail_addr, htx->head_addr, htx->end_addr);
|
||||||
|
|
||||||
if (!full || !htx_nbblks(htx))
|
if (!full || !htx_nbblks(htx))
|
||||||
|
|
|
||||||
|
|
@ -67,7 +67,6 @@ enum init_stage {
|
||||||
STG_ALLOC, // allocate required structures
|
STG_ALLOC, // allocate required structures
|
||||||
STG_POOL, // create pools
|
STG_POOL, // create pools
|
||||||
STG_INIT, // subsystems normal initialization
|
STG_INIT, // subsystems normal initialization
|
||||||
STG_INIT_2, // runs after step_init_2, to have global.nbthread
|
|
||||||
STG_SIZE // size of the stages array, must be last
|
STG_SIZE // size of the stages array, must be last
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
@ -147,14 +146,14 @@ __attribute__((constructor)) static void __initcb_##linenum() \
|
||||||
#define _DECLARE_INITCALL(...) \
|
#define _DECLARE_INITCALL(...) \
|
||||||
__DECLARE_INITCALL(__VA_ARGS__)
|
__DECLARE_INITCALL(__VA_ARGS__)
|
||||||
|
|
||||||
/* This requires that function <function> is called without arguments
|
/* This requires that function <function> is called with pointer argument
|
||||||
* during init stage <stage> which must be one of init_stage.
|
* <argument> during init stage <stage> which must be one of init_stage.
|
||||||
*/
|
*/
|
||||||
#define INITCALL0(stage, function) \
|
#define INITCALL0(stage, function) \
|
||||||
_DECLARE_INITCALL(stage, __LINE__, function, 0, 0, 0)
|
_DECLARE_INITCALL(stage, __LINE__, function, 0, 0, 0)
|
||||||
|
|
||||||
/* This requires that function <function> is called with pointer argument
|
/* This requires that function <function> is called with pointer argument
|
||||||
* <arg1> during init stage <stage> which must be one of init_stage.
|
* <argument> during init stage <stage> which must be one of init_stage.
|
||||||
*/
|
*/
|
||||||
#define INITCALL1(stage, function, arg1) \
|
#define INITCALL1(stage, function, arg1) \
|
||||||
_DECLARE_INITCALL(stage, __LINE__, function, arg1, 0, 0)
|
_DECLARE_INITCALL(stage, __LINE__, function, arg1, 0, 0)
|
||||||
|
|
@ -203,7 +202,6 @@ DECLARE_INIT_SECTION(STG_REGISTER);
|
||||||
DECLARE_INIT_SECTION(STG_ALLOC);
|
DECLARE_INIT_SECTION(STG_ALLOC);
|
||||||
DECLARE_INIT_SECTION(STG_POOL);
|
DECLARE_INIT_SECTION(STG_POOL);
|
||||||
DECLARE_INIT_SECTION(STG_INIT);
|
DECLARE_INIT_SECTION(STG_INIT);
|
||||||
DECLARE_INIT_SECTION(STG_INIT_2);
|
|
||||||
|
|
||||||
// for use in the main haproxy.c file
|
// for use in the main haproxy.c file
|
||||||
#define DECLARE_INIT_STAGES asm("")
|
#define DECLARE_INIT_STAGES asm("")
|
||||||
|
|
|
||||||
|
|
@ -6,13 +6,13 @@
|
||||||
#include <haproxy/openssl-compat.h>
|
#include <haproxy/openssl-compat.h>
|
||||||
#include <haproxy/jwt-t.h>
|
#include <haproxy/jwt-t.h>
|
||||||
|
|
||||||
size_t bn2base64url(const BIGNUM *bn, char *dst, size_t dsize);
|
int bn2base64url(const BIGNUM *bn, char *dst, size_t dsize);
|
||||||
size_t EVP_PKEY_to_pub_jwk(EVP_PKEY *pkey, char *dst, size_t dsize);
|
int EVP_PKEY_to_pub_jwk(EVP_PKEY *pkey, char *dst, size_t dsize);
|
||||||
enum jwt_alg EVP_PKEY_to_jws_alg(EVP_PKEY *pkey);
|
enum jwt_alg EVP_PKEY_to_jws_alg(EVP_PKEY *pkey);
|
||||||
size_t jws_b64_payload(char *payload, char *dst, size_t dsize);
|
int jws_b64_payload(char *payload, char *dst, size_t dsize);
|
||||||
size_t jws_b64_protected(enum jwt_alg alg, char *kid, char *jwk, char *nonce, char *url, char *dst, size_t dsize);
|
int jws_b64_protected(enum jwt_alg alg, char *kid, char *jwk, char *nonce, char *url, char *dst, size_t dsize);
|
||||||
size_t jws_b64_signature(EVP_PKEY *pkey, enum jwt_alg alg, char *b64protected, char *b64payload, char *dst, size_t dsize);
|
int jws_b64_signature(EVP_PKEY *pkey, enum jwt_alg alg, char *b64protected, char *b64payload, char *dst, size_t dsize);
|
||||||
size_t jws_flattened(char *protected, char *payload, char *signature, char *dst, size_t dsize);
|
int jws_flattened(char *protected, char *payload, char *signature, char *dst, size_t dsize);
|
||||||
size_t jws_thumbprint(EVP_PKEY *pkey, char *dst, size_t dsize);
|
int jws_thumbprint(EVP_PKEY *pkey, char *dst, size_t dsize);
|
||||||
|
|
||||||
#endif /* ! _HAPROXY_JWK_H_ */
|
#endif /* ! _HAPROXY_JWK_H_ */
|
||||||
|
|
|
||||||
|
|
@ -55,7 +55,6 @@ struct jwt_ctx {
|
||||||
struct jwt_item signature;
|
struct jwt_item signature;
|
||||||
char *key;
|
char *key;
|
||||||
unsigned int key_length;
|
unsigned int key_length;
|
||||||
int is_x509; /* 1 if 'key' field is a certificate, 0 otherwise */
|
|
||||||
};
|
};
|
||||||
|
|
||||||
enum jwt_elt {
|
enum jwt_elt {
|
||||||
|
|
@ -65,8 +64,17 @@ enum jwt_elt {
|
||||||
JWT_ELT_MAX
|
JWT_ELT_MAX
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum jwt_entry_type {
|
||||||
|
JWT_ENTRY_DFLT,
|
||||||
|
JWT_ENTRY_STORE,
|
||||||
|
JWT_ENTRY_PKEY,
|
||||||
|
JWT_ENTRY_INVALID, /* already tried looking into ckch_store tree (unsuccessful) */
|
||||||
|
};
|
||||||
|
|
||||||
struct jwt_cert_tree_entry {
|
struct jwt_cert_tree_entry {
|
||||||
EVP_PKEY *pubkey;
|
EVP_PKEY *pubkey;
|
||||||
|
struct ckch_store *ckch_store;
|
||||||
|
int type; /* jwt_entry_type */
|
||||||
struct ebmb_node node;
|
struct ebmb_node node;
|
||||||
char path[VAR_ARRAY];
|
char path[VAR_ARRAY];
|
||||||
};
|
};
|
||||||
|
|
@ -80,8 +88,7 @@ enum jwt_vrfy_status {
|
||||||
JWT_VRFY_INVALID_TOKEN = -3,
|
JWT_VRFY_INVALID_TOKEN = -3,
|
||||||
JWT_VRFY_OUT_OF_MEMORY = -4,
|
JWT_VRFY_OUT_OF_MEMORY = -4,
|
||||||
JWT_VRFY_UNKNOWN_CERT = -5,
|
JWT_VRFY_UNKNOWN_CERT = -5,
|
||||||
JWT_VRFY_INTERNAL_ERR = -6,
|
JWT_VRFY_INTERNAL_ERR = -6
|
||||||
JWT_VRFY_UNAVAIL_CERT = -7,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* USE_OPENSSL */
|
#endif /* USE_OPENSSL */
|
||||||
|
|
|
||||||
|
|
@ -28,10 +28,12 @@
|
||||||
#ifdef USE_OPENSSL
|
#ifdef USE_OPENSSL
|
||||||
enum jwt_alg jwt_parse_alg(const char *alg_str, unsigned int alg_len);
|
enum jwt_alg jwt_parse_alg(const char *alg_str, unsigned int alg_len);
|
||||||
int jwt_tokenize(const struct buffer *jwt, struct jwt_item *items, unsigned int *item_num);
|
int jwt_tokenize(const struct buffer *jwt, struct jwt_item *items, unsigned int *item_num);
|
||||||
int jwt_tree_load_cert(char *path, int pathlen, int tryload_cert, const char *file, int line, char **err);
|
int jwt_tree_load_cert(char *path, int pathlen, const char *file, int line, char **err);
|
||||||
|
|
||||||
enum jwt_vrfy_status jwt_verify(const struct buffer *token, const struct buffer *alg,
|
enum jwt_vrfy_status jwt_verify(const struct buffer *token, const struct buffer *alg,
|
||||||
const struct buffer *key, int is_x509);
|
const struct buffer *key);
|
||||||
|
|
||||||
|
void jwt_replace_ckch_store(struct ckch_store *old_ckchs, struct ckch_store *new_ckchs);
|
||||||
|
|
||||||
#endif /* USE_OPENSSL */
|
#endif /* USE_OPENSSL */
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -28,13 +28,13 @@
|
||||||
#include <import/ebtree-t.h>
|
#include <import/ebtree-t.h>
|
||||||
|
|
||||||
#include <haproxy/api-t.h>
|
#include <haproxy/api-t.h>
|
||||||
#include <haproxy/counters-t.h>
|
|
||||||
#include <haproxy/guid-t.h>
|
#include <haproxy/guid-t.h>
|
||||||
#include <haproxy/obj_type-t.h>
|
#include <haproxy/obj_type-t.h>
|
||||||
#include <haproxy/quic_cc-t.h>
|
#include <haproxy/quic_cc-t.h>
|
||||||
#include <haproxy/quic_sock-t.h>
|
#include <haproxy/quic_sock-t.h>
|
||||||
#include <haproxy/quic_tp-t.h>
|
#include <haproxy/quic_tp-t.h>
|
||||||
#include <haproxy/receiver-t.h>
|
#include <haproxy/receiver-t.h>
|
||||||
|
#include <haproxy/stats-t.h>
|
||||||
#include <haproxy/thread.h>
|
#include <haproxy/thread.h>
|
||||||
|
|
||||||
/* Some pointer types reference below */
|
/* Some pointer types reference below */
|
||||||
|
|
@ -152,9 +152,6 @@ struct ssl_bind_conf {
|
||||||
char *client_sigalgs; /* Client Signature algorithms */
|
char *client_sigalgs; /* Client Signature algorithms */
|
||||||
struct tls_version_filter ssl_methods_cfg; /* original ssl methods found in configuration */
|
struct tls_version_filter ssl_methods_cfg; /* original ssl methods found in configuration */
|
||||||
struct tls_version_filter ssl_methods; /* actual ssl methods used at runtime */
|
struct tls_version_filter ssl_methods; /* actual ssl methods used at runtime */
|
||||||
#ifdef USE_ECH
|
|
||||||
char *ech_filedir; /* ECH config, file/directory name */
|
|
||||||
#endif
|
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
@ -186,7 +183,7 @@ struct bind_conf {
|
||||||
#endif
|
#endif
|
||||||
#ifdef USE_QUIC
|
#ifdef USE_QUIC
|
||||||
struct quic_transport_params quic_params; /* QUIC transport parameters. */
|
struct quic_transport_params quic_params; /* QUIC transport parameters. */
|
||||||
const struct quic_cc_algo *quic_cc_algo; /* QUIC control congestion algorithm */
|
struct quic_cc_algo *quic_cc_algo; /* QUIC control congestion algorithm */
|
||||||
size_t max_cwnd; /* QUIC maximumu congestion control window size (kB) */
|
size_t max_cwnd; /* QUIC maximumu congestion control window size (kB) */
|
||||||
enum quic_sock_mode quic_mode; /* QUIC socket allocation strategy */
|
enum quic_sock_mode quic_mode; /* QUIC socket allocation strategy */
|
||||||
#endif
|
#endif
|
||||||
|
|
@ -198,13 +195,11 @@ struct bind_conf {
|
||||||
int maxseg; /* for TCP, advertised MSS */
|
int maxseg; /* for TCP, advertised MSS */
|
||||||
int tcp_ut; /* for TCP, user timeout */
|
int tcp_ut; /* for TCP, user timeout */
|
||||||
char *tcp_md5sig; /* TCP MD5 signature password (RFC2385) */
|
char *tcp_md5sig; /* TCP MD5 signature password (RFC2385) */
|
||||||
char *cc_algo; /* TCP congestion control algorithm ("cc" parameter) */
|
|
||||||
int idle_ping; /* MUX idle-ping interval in ms */
|
int idle_ping; /* MUX idle-ping interval in ms */
|
||||||
int maxaccept; /* if set, max number of connections accepted at once (-1 when disabled) */
|
int maxaccept; /* if set, max number of connections accepted at once (-1 when disabled) */
|
||||||
unsigned int backlog; /* if set, listen backlog */
|
unsigned int backlog; /* if set, listen backlog */
|
||||||
int maxconn; /* maximum connections allowed on this listener */
|
int maxconn; /* maximum connections allowed on this listener */
|
||||||
int (*accept)(struct connection *conn); /* upper layer's accept() */
|
int (*accept)(struct connection *conn); /* upper layer's accept() */
|
||||||
int tcp_ss; /* for TCP, Save SYN */
|
|
||||||
int level; /* stats access level (ACCESS_LVL_*) */
|
int level; /* stats access level (ACCESS_LVL_*) */
|
||||||
int severity_output; /* default severity output format in cli feedback messages */
|
int severity_output; /* default severity output format in cli feedback messages */
|
||||||
short int nice; /* nice value to assign to the instantiated tasks */
|
short int nice; /* nice value to assign to the instantiated tasks */
|
||||||
|
|
@ -243,7 +238,7 @@ struct listener {
|
||||||
enum obj_type obj_type; /* object type = OBJ_TYPE_LISTENER */
|
enum obj_type obj_type; /* object type = OBJ_TYPE_LISTENER */
|
||||||
enum li_state state; /* state: NEW, INIT, ASSIGNED, LISTEN, READY, FULL */
|
enum li_state state; /* state: NEW, INIT, ASSIGNED, LISTEN, READY, FULL */
|
||||||
uint16_t flags; /* listener flags: LI_F_* */
|
uint16_t flags; /* listener flags: LI_F_* */
|
||||||
int luid; /* listener universally unique ID, used for SNMP, indexed by <luid_node> below */
|
int luid; /* listener universally unique ID, used for SNMP */
|
||||||
int nbconn; /* current number of connections on this listener */
|
int nbconn; /* current number of connections on this listener */
|
||||||
unsigned long thr_idx; /* thread indexes for queue distribution (see listener_accept()) */
|
unsigned long thr_idx; /* thread indexes for queue distribution (see listener_accept()) */
|
||||||
__decl_thread(HA_RWLOCK_T lock);
|
__decl_thread(HA_RWLOCK_T lock);
|
||||||
|
|
@ -258,12 +253,14 @@ struct listener {
|
||||||
struct list by_bind; /* chaining in bind_conf's list of listeners */
|
struct list by_bind; /* chaining in bind_conf's list of listeners */
|
||||||
struct bind_conf *bind_conf; /* "bind" line settings, include SSL settings among other things */
|
struct bind_conf *bind_conf; /* "bind" line settings, include SSL settings among other things */
|
||||||
struct receiver rx; /* network receiver parts */
|
struct receiver rx; /* network receiver parts */
|
||||||
struct ceb_node luid_node; /* place in the tree of used IDs, indexes <luid> above */
|
struct {
|
||||||
|
struct eb32_node id; /* place in the tree of used IDs */
|
||||||
|
} conf; /* config information */
|
||||||
|
|
||||||
struct guid_node guid; /* GUID global tree node */
|
struct guid_node guid; /* GUID global tree node */
|
||||||
|
|
||||||
struct li_per_thread *per_thr; /* per-thread fields (one per thread in the group) */
|
struct li_per_thread *per_thr; /* per-thread fields (one per thread in the group) */
|
||||||
|
|
||||||
char *extra_counters_storage; /* storage for extra_counters */
|
|
||||||
EXTRA_COUNTERS(extra_counters);
|
EXTRA_COUNTERS(extra_counters);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
@ -311,7 +308,7 @@ struct bind_kw_list {
|
||||||
struct accept_queue_ring {
|
struct accept_queue_ring {
|
||||||
uint32_t idx; /* (head << 16) | tail */
|
uint32_t idx; /* (head << 16) | tail */
|
||||||
struct tasklet *tasklet; /* tasklet of the thread owning this ring */
|
struct tasklet *tasklet; /* tasklet of the thread owning this ring */
|
||||||
struct connection *entry[ACCEPT_QUEUE_SIZE] THREAD_ALIGNED();
|
struct connection *entry[ACCEPT_QUEUE_SIZE] __attribute((aligned(64)));
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -25,11 +25,8 @@
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
|
|
||||||
#include <import/ceb32_tree.h>
|
|
||||||
|
|
||||||
#include <haproxy/api.h>
|
#include <haproxy/api.h>
|
||||||
#include <haproxy/listener-t.h>
|
#include <haproxy/listener-t.h>
|
||||||
#include <haproxy/proxy-t.h>
|
|
||||||
|
|
||||||
struct proxy;
|
struct proxy;
|
||||||
struct task;
|
struct task;
|
||||||
|
|
@ -85,12 +82,6 @@ int relax_listener(struct listener *l, int lpx, int lli);
|
||||||
*/
|
*/
|
||||||
void stop_listener(struct listener *l, int lpx, int lpr, int lli);
|
void stop_listener(struct listener *l, int lpx, int lpr, int lli);
|
||||||
|
|
||||||
/* This function returns the first unused listener ID greater than or equal to
|
|
||||||
* <from> in the proxy <px>. Zero is returned if no spare one is found (should
|
|
||||||
* never happen).
|
|
||||||
*/
|
|
||||||
uint listener_get_next_id(const struct proxy *px, uint from);
|
|
||||||
|
|
||||||
/* This function adds the specified listener's file descriptor to the polling
|
/* This function adds the specified listener's file descriptor to the polling
|
||||||
* lists if it is in the LI_LISTEN state. The listener enters LI_READY or
|
* lists if it is in the LI_LISTEN state. The listener enters LI_READY or
|
||||||
* LI_FULL state depending on its number of connections. In daemon mode, we
|
* LI_FULL state depending on its number of connections. In daemon mode, we
|
||||||
|
|
@ -231,7 +222,7 @@ const char *listener_state_str(const struct listener *l);
|
||||||
struct task *accept_queue_process(struct task *t, void *context, unsigned int state);
|
struct task *accept_queue_process(struct task *t, void *context, unsigned int state);
|
||||||
struct task *manage_global_listener_queue(struct task *t, void *context, unsigned int state);
|
struct task *manage_global_listener_queue(struct task *t, void *context, unsigned int state);
|
||||||
|
|
||||||
extern struct accept_queue_ring accept_queue_rings[MAX_THREADS] THREAD_ALIGNED();
|
extern struct accept_queue_ring accept_queue_rings[MAX_THREADS] __attribute__((aligned(64)));
|
||||||
|
|
||||||
extern const char* li_status_st[LI_STATE_COUNT];
|
extern const char* li_status_st[LI_STATE_COUNT];
|
||||||
enum li_status get_li_status(struct listener *l);
|
enum li_status get_li_status(struct listener *l);
|
||||||
|
|
@ -239,12 +230,6 @@ enum li_status get_li_status(struct listener *l);
|
||||||
/* number of times an accepted connection resulted in maxconn being reached */
|
/* number of times an accepted connection resulted in maxconn being reached */
|
||||||
extern ullong maxconn_reached;
|
extern ullong maxconn_reached;
|
||||||
|
|
||||||
/* index listener <li>'s id into proxy <px>'s used_listener_id */
|
|
||||||
static inline void listener_index_id(struct proxy *px, struct listener *li)
|
|
||||||
{
|
|
||||||
ceb32_item_insert(&px->conf.used_listener_id, luid_node, luid, li);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline uint accept_queue_ring_len(const struct accept_queue_ring *ring)
|
static inline uint accept_queue_ring_len(const struct accept_queue_ring *ring)
|
||||||
{
|
{
|
||||||
uint idx, head, tail, len;
|
uint idx, head, tail, len;
|
||||||
|
|
@ -258,12 +243,6 @@ static inline uint accept_queue_ring_len(const struct accept_queue_ring *ring)
|
||||||
return len;
|
return len;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Returns a pointer to the first bind_conf matching either name <name>, or
|
|
||||||
* filename:linenum in <name> if <name> begins with a '@'. NULL is returned if
|
|
||||||
* no match is found.
|
|
||||||
*/
|
|
||||||
struct bind_conf *bind_conf_find_by_name(struct proxy *front, const char *name);
|
|
||||||
|
|
||||||
#endif /* _HAPROXY_LISTENER_H */
|
#endif /* _HAPROXY_LISTENER_H */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
||||||
|
|
@ -335,13 +335,6 @@ struct log_profile {
|
||||||
struct eb_root extra; // extra log profile steps (if any)
|
struct eb_root extra; // extra log profile steps (if any)
|
||||||
};
|
};
|
||||||
|
|
||||||
/* add additional bitmasks in this struct if needed but don't
|
|
||||||
* forget to update px_parse_log_steps() and log_orig_proxy() accordingly
|
|
||||||
*/
|
|
||||||
struct log_steps {
|
|
||||||
uint64_t steps_1; // first 64 steps
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif /* _HAPROXY_LOG_T_H */
|
#endif /* _HAPROXY_LOG_T_H */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
||||||
|
|
@ -62,7 +62,6 @@
|
||||||
#define H2_CF_RCVD_SHUT 0x00020000 // a recv() attempt already failed on a shutdown
|
#define H2_CF_RCVD_SHUT 0x00020000 // a recv() attempt already failed on a shutdown
|
||||||
#define H2_CF_END_REACHED 0x00040000 // pending data too short with RCVD_SHUT present
|
#define H2_CF_END_REACHED 0x00040000 // pending data too short with RCVD_SHUT present
|
||||||
|
|
||||||
#define H2_CF_SETTINGS_NEEDED 0x00080000 // can't proceed without knowing settings (frontend or extensions)
|
|
||||||
#define H2_CF_RCVD_RFC8441 0x00100000 // settings from RFC8441 has been received indicating support for Extended CONNECT
|
#define H2_CF_RCVD_RFC8441 0x00100000 // settings from RFC8441 has been received indicating support for Extended CONNECT
|
||||||
#define H2_CF_SHTS_UPDATED 0x00200000 // SETTINGS_HEADER_TABLE_SIZE updated
|
#define H2_CF_SHTS_UPDATED 0x00200000 // SETTINGS_HEADER_TABLE_SIZE updated
|
||||||
#define H2_CF_DTSU_EMITTED 0x00400000 // HPACK Dynamic Table Size Update opcode emitted
|
#define H2_CF_DTSU_EMITTED 0x00400000 // HPACK Dynamic Table Size Update opcode emitted
|
||||||
|
|
|
||||||
|
|
@ -41,7 +41,6 @@ struct qcc {
|
||||||
struct connection *conn;
|
struct connection *conn;
|
||||||
uint64_t nb_sc; /* number of attached stream connectors */
|
uint64_t nb_sc; /* number of attached stream connectors */
|
||||||
uint64_t nb_hreq; /* number of in-progress http requests */
|
uint64_t nb_hreq; /* number of in-progress http requests */
|
||||||
uint64_t tot_sc; /* total number of stream connectors seen since conn init */
|
|
||||||
uint32_t flags; /* QC_CF_* */
|
uint32_t flags; /* QC_CF_* */
|
||||||
enum qcc_app_st app_st; /* application layer state */
|
enum qcc_app_st app_st; /* application layer state */
|
||||||
int glitches; /* total number of glitches on this connection */
|
int glitches; /* total number of glitches on this connection */
|
||||||
|
|
|
||||||
|
|
@ -1,32 +0,0 @@
|
||||||
#ifndef _HAPROXY_NCBMBUF_T_H
|
|
||||||
#define _HAPROXY_NCBMBUF_T_H
|
|
||||||
|
|
||||||
#include <haproxy/ncbuf_common-t.h>
|
|
||||||
|
|
||||||
/* Non-contiguous bitmap buffer
|
|
||||||
*
|
|
||||||
* This module is an alternative implementation to ncbuf type. Its main
|
|
||||||
* difference is that filled blocks and gaps are encoded via a bitmap.
|
|
||||||
*
|
|
||||||
* The main advantage of the bitmap is that contrary to ncbuf type there is no
|
|
||||||
* limitation on the minimal size of gaps. Thus, operation such as add and
|
|
||||||
* advance are guaranteed to succeed.
|
|
||||||
*
|
|
||||||
* Storage is reserved for the bitmap at the end of the buffer area,
|
|
||||||
* representing roughly 1/9 of the total space. Thus, usable buffer storage is
|
|
||||||
* smaller than the default ncbuf type.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#define NCBMBUF_NULL ((struct ncbmbuf){ })
|
|
||||||
|
|
||||||
struct ncbmbuf {
|
|
||||||
char *area; /* allocated area used for both data and bitmap storage */
|
|
||||||
unsigned char *bitmap; /* bitmap storage located at the end of allocated area */
|
|
||||||
|
|
||||||
ncb_sz_t size; /* size usable for data storage */
|
|
||||||
ncb_sz_t size_bm; /* size of bitmap storage */
|
|
||||||
|
|
||||||
ncb_sz_t head;
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif /* _HAPROXY_NCBMBUF_T_H */
|
|
||||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue