Compare commits

..

No commits in common. "master" and "v3.2-dev3" have entirely different histories.

865 changed files with 25474 additions and 70713 deletions

View file

@ -1,7 +1,7 @@
FreeBSD_task:
freebsd_instance:
matrix:
image_family: freebsd-14-3
image_family: freebsd-14-1
only_if: $CIRRUS_BRANCH =~ 'master|next'
install_script:
- pkg update -f && pkg upgrade -y && pkg install -y openssl git gmake lua54 socat pcre2

View file

@ -1,34 +0,0 @@
name: 'setup VTest'
description: 'ssss'
runs:
using: "composite"
steps:
- name: Setup coredumps
if: ${{ startsWith(matrix.os, 'ubuntu-') }}
shell: bash
run: |
sudo sysctl -w fs.suid_dumpable=1
sudo sysctl kernel.core_pattern=/tmp/core.%h.%e.%t
- name: Setup ulimit for core dumps
shell: bash
run: |
# This is required for macOS which does not actually allow to increase
# the '-n' soft limit to the hard limit, thus failing to run.
ulimit -n 65536
ulimit -c unlimited
- name: Install VTest
shell: bash
run: |
scripts/build-vtest.sh
- name: Install problem matcher for VTest
shell: bash
# This allows one to more easily see which tests fail.
run: echo "::add-matcher::.github/vtest.json"

View file

@ -19,9 +19,9 @@ defaults
frontend h2
mode http
bind 127.0.0.1:8443 ssl crt reg-tests/ssl/certs/common.pem alpn h2,http/1.1
default_backend h2b
bind 127.0.0.1:8443 ssl crt reg-tests/ssl/common.pem alpn h2,http/1.1
default_backend h2
backend h2b
backend h2
errorfile 200 .github/errorfile
http-request deny deny_status 200

90
.github/matrix.py vendored
View file

@ -125,11 +125,9 @@ def main(ref_name):
# Ubuntu
if "haproxy-" in ref_name:
os = "ubuntu-24.04" # stable branch
os_arm = "ubuntu-24.04-arm" # stable branch
os = "ubuntu-22.04" # stable branch
else:
os = "ubuntu-24.04" # development branch
os_arm = "ubuntu-24.04-arm" # development branch
os = "ubuntu-24.04" # development branch
TARGET = "linux-glibc"
for CC in ["gcc", "clang"]:
@ -174,37 +172,36 @@ def main(ref_name):
# ASAN
for os_asan in [os, os_arm]:
matrix.append(
{
"name": "{}, {}, ASAN, all features".format(os_asan, CC),
"os": os_asan,
"TARGET": TARGET,
"CC": CC,
"FLAGS": [
"USE_OBSOLETE_LINKER=1",
'ARCH_FLAGS="-g -fsanitize=address"',
'OPT_CFLAGS="-O1"',
"USE_ZLIB=1",
"USE_OT=1",
"OT_INC=${HOME}/opt-ot/include",
"OT_LIB=${HOME}/opt-ot/lib",
"OT_RUNPATH=1",
"USE_PCRE2=1",
"USE_PCRE2_JIT=1",
"USE_LUA=1",
"USE_OPENSSL=1",
"USE_WURFL=1",
"WURFL_INC=addons/wurfl/dummy",
"WURFL_LIB=addons/wurfl/dummy",
"USE_DEVICEATLAS=1",
"DEVICEATLAS_SRC=addons/deviceatlas/dummy",
"USE_PROMEX=1",
"USE_51DEGREES=1",
"51DEGREES_SRC=addons/51degrees/dummy/pattern",
],
}
)
matrix.append(
{
"name": "{}, {}, ASAN, all features".format(os, CC),
"os": os,
"TARGET": TARGET,
"CC": CC,
"FLAGS": [
"USE_OBSOLETE_LINKER=1",
'ARCH_FLAGS="-g -fsanitize=address"',
'OPT_CFLAGS="-O1"',
"USE_ZLIB=1",
"USE_OT=1",
"OT_INC=${HOME}/opt-ot/include",
"OT_LIB=${HOME}/opt-ot/lib",
"OT_RUNPATH=1",
"USE_PCRE2=1",
"USE_PCRE2_JIT=1",
"USE_LUA=1",
"USE_OPENSSL=1",
"USE_WURFL=1",
"WURFL_INC=addons/wurfl/dummy",
"WURFL_LIB=addons/wurfl/dummy",
"USE_DEVICEATLAS=1",
"DEVICEATLAS_SRC=addons/deviceatlas/dummy",
"USE_PROMEX=1",
"USE_51DEGREES=1",
"51DEGREES_SRC=addons/51degrees/dummy/pattern",
],
}
)
for compression in ["USE_ZLIB=1"]:
matrix.append(
@ -221,8 +218,7 @@ def main(ref_name):
"stock",
"OPENSSL_VERSION=1.0.2u",
"OPENSSL_VERSION=1.1.1s",
"OPENSSL_VERSION=3.5.1",
"QUICTLS_VERSION=OpenSSL_1_1_1w-quic1",
"QUICTLS=yes",
"WOLFSSL_VERSION=5.7.0",
"AWS_LC_VERSION=1.39.0",
# "BORINGSSL=yes",
@ -236,7 +232,8 @@ def main(ref_name):
for ssl in ssl_versions:
flags = ["USE_OPENSSL=1"]
skipdup=0
if ssl == "BORINGSSL=yes" or ssl == "QUICTLS=yes" or "LIBRESSL" in ssl or "WOLFSSL" in ssl or "AWS_LC" in ssl:
flags.append("USE_QUIC=1")
if "WOLFSSL" in ssl:
flags.append("USE_OPENSSL_WOLFSSL=1")
if "AWS_LC" in ssl:
@ -246,23 +243,8 @@ def main(ref_name):
flags.append("SSL_INC=${HOME}/opt/include")
if "LIBRESSL" in ssl and "latest" in ssl:
ssl = determine_latest_libressl(ssl)
skipdup=1
if "OPENSSL" in ssl and "latest" in ssl:
ssl = determine_latest_openssl(ssl)
skipdup=1
# if "latest" equals a version already in the list
if ssl in ssl_versions and skipdup == 1:
continue
openssl_supports_quic = False
try:
openssl_supports_quic = version.Version(ssl.split("OPENSSL_VERSION=",1)[1]) >= version.Version("3.5.0")
except:
pass
if ssl == "BORINGSSL=yes" or "QUICTLS" in ssl or "LIBRESSL" in ssl or "WOLFSSL" in ssl or "AWS_LC" in ssl or openssl_supports_quic:
flags.append("USE_QUIC=1")
matrix.append(
{
@ -280,7 +262,7 @@ def main(ref_name):
if "haproxy-" in ref_name:
os = "macos-13" # stable branch
else:
os = "macos-26" # development branch
os = "macos-15" # development branch
TARGET = "osx"
for CC in ["clang"]:

View file

@ -5,8 +5,82 @@ on:
- cron: "0 0 * * 4"
workflow_dispatch:
permissions:
contents: read
jobs:
test:
uses: ./.github/workflows/aws-lc-template.yml
with:
command: "from matrix import determine_latest_aws_lc_fips; print(determine_latest_aws_lc_fips(''))"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install VTest
run: |
scripts/build-vtest.sh
- name: Determine latest AWS-LC release
id: get_aws_lc_release
run: |
result=$(cd .github && python3 -c "from matrix import determine_latest_aws_lc_fips; print(determine_latest_aws_lc_fips(''))")
echo $result
echo "result=$result" >> $GITHUB_OUTPUT
- name: Cache AWS-LC
id: cache_aws_lc
uses: actions/cache@v4
with:
path: '~/opt/'
key: ssl-${{ steps.get_aws_lc_release.outputs.result }}-Ubuntu-latest-gcc
- name: Install apt dependencies
run: |
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
sudo apt-get --no-install-recommends -y install socat gdb
- name: Install AWS-LC
if: ${{ steps.cache_ssl.outputs.cache-hit != 'true' }}
run: env ${{ steps.get_aws_lc_release.outputs.result }} scripts/build-ssl.sh
- name: Compile HAProxy
run: |
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
USE_OPENSSL_AWSLC=1 USE_QUIC=1 \
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
DEBUG="-DDEBUG_POOL_INTEGRITY" \
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
sudo make install
- name: Show HAProxy version
id: show-version
run: |
ldd $(which haproxy)
haproxy -vv
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
- name: Install problem matcher for VTest
run: echo "::add-matcher::.github/vtest.json"
- name: Run VTest for HAProxy
id: vtest
run: |
# This is required for macOS which does not actually allow to increase
# the '-n' soft limit to the hard limit, thus failing to run.
ulimit -n 65536
# allow to catch coredumps
ulimit -c unlimited
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Show VTest results
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
printf "::group::"
cat $folder/INFO
cat $folder/LOG
echo "::endgroup::"
done
exit 1
- name: Show coredumps
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
failed=false
shopt -s nullglob
for file in /tmp/core.*; do
failed=true
printf "::group::"
gdb -ex 'thread apply all bt full' ./haproxy $file
echo "::endgroup::"
done
if [ "$failed" = true ]; then
exit 1;
fi

View file

@ -1,94 +0,0 @@
name: AWS-LC template
on:
workflow_call:
inputs:
command:
required: true
type: string
permissions:
contents: read
jobs:
test:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
steps:
- uses: actions/checkout@v5
- name: Determine latest AWS-LC release
id: get_aws_lc_release
run: |
result=$(cd .github && python3 -c "${{ inputs.command }}")
echo $result
echo "result=$result" >> $GITHUB_OUTPUT
- name: Cache AWS-LC
id: cache_aws_lc
uses: actions/cache@v4
with:
path: '~/opt/'
key: ssl-${{ steps.get_aws_lc_release.outputs.result }}-Ubuntu-latest-gcc
- name: Install apt dependencies
run: |
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
sudo apt-get --no-install-recommends -y install socat gdb jose
- name: Install AWS-LC
if: ${{ steps.cache_ssl.outputs.cache-hit != 'true' }}
run: env ${{ steps.get_aws_lc_release.outputs.result }} scripts/build-ssl.sh
- name: Compile HAProxy
run: |
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
USE_OPENSSL_AWSLC=1 USE_QUIC=1 \
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
sudo make install
- name: Show HAProxy version
id: show-version
run: |
ldd $(which haproxy)
haproxy -vv
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
- uses: ./.github/actions/setup-vtest
- name: Run VTest for HAProxy
id: vtest
run: |
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Run Unit tests
id: unittests
run: |
make unit-tests
- name: Show VTest results
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
printf "::group::"
cat $folder/INFO
cat $folder/LOG
echo "::endgroup::"
done
exit 1
- name: Show coredumps
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
failed=false
shopt -s nullglob
for file in /tmp/core.*; do
failed=true
printf "::group::"
gdb -ex 'thread apply all bt full' ./haproxy $file
echo "::endgroup::"
done
if [ "$failed" = true ]; then
exit 1;
fi
- name: Show Unit-Tests results
if: ${{ failure() && steps.unittests.outcome == 'failure' }}
run: |
for result in ${TMPDIR:-/tmp}/ha-unittests-*/results/res.*; do
printf "::group::"
cat $result
echo "::endgroup::"
done
exit 1

View file

@ -5,8 +5,82 @@ on:
- cron: "0 0 * * 4"
workflow_dispatch:
permissions:
contents: read
jobs:
test:
uses: ./.github/workflows/aws-lc-template.yml
with:
command: "from matrix import determine_latest_aws_lc; print(determine_latest_aws_lc(''))"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install VTest
run: |
scripts/build-vtest.sh
- name: Determine latest AWS-LC release
id: get_aws_lc_release
run: |
result=$(cd .github && python3 -c "from matrix import determine_latest_aws_lc; print(determine_latest_aws_lc(''))")
echo $result
echo "result=$result" >> $GITHUB_OUTPUT
- name: Cache AWS-LC
id: cache_aws_lc
uses: actions/cache@v4
with:
path: '~/opt/'
key: ssl-${{ steps.get_aws_lc_release.outputs.result }}-Ubuntu-latest-gcc
- name: Install apt dependencies
run: |
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
sudo apt-get --no-install-recommends -y install socat gdb
- name: Install AWS-LC
if: ${{ steps.cache_ssl.outputs.cache-hit != 'true' }}
run: env ${{ steps.get_aws_lc_release.outputs.result }} scripts/build-ssl.sh
- name: Compile HAProxy
run: |
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
USE_OPENSSL_AWSLC=1 USE_QUIC=1 \
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
DEBUG="-DDEBUG_POOL_INTEGRITY" \
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
sudo make install
- name: Show HAProxy version
id: show-version
run: |
ldd $(which haproxy)
haproxy -vv
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
- name: Install problem matcher for VTest
run: echo "::add-matcher::.github/vtest.json"
- name: Run VTest for HAProxy
id: vtest
run: |
# This is required for macOS which does not actually allow to increase
# the '-n' soft limit to the hard limit, thus failing to run.
ulimit -n 65536
# allow to catch coredumps
ulimit -c unlimited
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Show VTest results
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
printf "::group::"
cat $folder/INFO
cat $folder/LOG
echo "::endgroup::"
done
exit 1
- name: Show coredumps
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
failed=false
shopt -s nullglob
for file in /tmp/core.*; do
failed=true
printf "::group::"
gdb -ex 'thread apply all bt full' ./haproxy $file
echo "::endgroup::"
done
if [ "$failed" = true ]; then
exit 1;
fi

View file

@ -3,7 +3,6 @@ name: Spelling Check
on:
schedule:
- cron: "0 0 * * 2"
workflow_dispatch:
permissions:
contents: read
@ -11,12 +10,12 @@ permissions:
jobs:
codespell:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
if: ${{ github.repository_owner == 'haproxy' }}
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- uses: codespell-project/codespell-problem-matcher@v1.2.0
- uses: codespell-project/actions-codespell@master
with:
skip: CHANGELOG,Makefile,*.fig,*.pem,./doc/design-thoughts,./doc/internals
ignore_words_list: pres,ist,ists,hist,wan,ca,cas,que,ans,te,nd,referer,ot,uint,iif,fo,keep-alives,dosen,ifset,thrid,strack,ba,chck,hel,unx,mor,clen,collet,bu,htmp,siz,experim
ignore_words_list: ist,ists,hist,wan,ca,cas,que,ans,te,nd,referer,ot,uint,iif,fo,keep-alives,dosen,ifset,thrid,strack,ba,chck,hel,unx,mor,clen,collet,bu,htmp,siz,experim
uri_ignore_words_list: trafic,ressources

View file

@ -11,10 +11,15 @@ permissions:
jobs:
h2spec:
name: h2spec
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
runs-on: ${{ matrix.os }}
strategy:
matrix:
include:
- TARGET: linux-glibc
CC: gcc
os: ubuntu-latest
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- name: Install h2spec
id: install-h2spec
run: |
@ -23,12 +28,12 @@ jobs:
tar xvf h2spec.tar.gz
sudo install -m755 h2spec /usr/local/bin/h2spec
echo "version=${H2SPEC_VERSION}" >> $GITHUB_OUTPUT
- name: Compile HAProxy with gcc
- name: Compile HAProxy with ${{ matrix.CC }}
run: |
make -j$(nproc) all \
ERR=1 \
TARGET=linux-glibc \
CC=gcc \
TARGET=${{ matrix.TARGET }} \
CC=${{ matrix.CC }} \
DEBUG="-DDEBUG_POOL_INTEGRITY" \
USE_OPENSSL=1
sudo make install

View file

@ -10,7 +10,7 @@ jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- name: Compile admin/halog/halog
run: |
make admin/halog/halog

View file

@ -15,9 +15,9 @@ permissions:
jobs:
scan:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
if: ${{ github.repository_owner == 'haproxy' }}
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- name: Install apt dependencies
run: |
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
@ -27,7 +27,7 @@ jobs:
libsystemd-dev
- name: Install QUICTLS
run: |
QUICTLS_VERSION=OpenSSL_1_1_1w-quic1 scripts/build-ssl.sh
QUICTLS=yes scripts/build-ssl.sh
- name: Download Coverity build tool
run: |
wget -c -N https://scan.coverity.com/download/linux64 --post-data "token=${{ secrets.COVERITY_SCAN_TOKEN }}&project=Haproxy" -O coverity_tool.tar.gz
@ -38,7 +38,7 @@ jobs:
- name: Build with Coverity build tool
run: |
export PATH=`pwd`/coverity_tool/bin:$PATH
cov-build --dir cov-int make CC=clang TARGET=linux-glibc USE_ZLIB=1 USE_PCRE2=1 USE_PCRE2_JIT=1 USE_LUA=1 USE_OPENSSL=1 USE_QUIC=1 USE_WURFL=1 WURFL_INC=addons/wurfl/dummy WURFL_LIB=addons/wurfl/dummy USE_DEVICEATLAS=1 DEVICEATLAS_SRC=addons/deviceatlas/dummy USE_51DEGREES=1 51DEGREES_SRC=addons/51degrees/dummy/pattern ADDLIB=\"-Wl,-rpath,$HOME/opt/lib/\" SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include DEBUG+=-DDEBUG_STRICT=2 DEBUG+=-DDEBUG_USE_ABORT=1
cov-build --dir cov-int make CC=clang TARGET=linux-glibc USE_ZLIB=1 USE_PCRE2=1 USE_PCRE2_JIT=1 USE_LUA=1 USE_OPENSSL=1 USE_QUIC=1 USE_WURFL=1 WURFL_INC=addons/wurfl/dummy WURFL_LIB=addons/wurfl/dummy USE_DEVICEATLAS=1 DEVICEATLAS_SRC=addons/deviceatlas/dummy USE_51DEGREES=1 51DEGREES_SRC=addons/51degrees/dummy/pattern ADDLIB=\"-Wl,-rpath,$HOME/opt/lib/\" SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include DEBUG+=-DDEBUG_STRICT=1 DEBUG+=-DDEBUG_USE_ABORT=1
- name: Submit build result to Coverity Scan
run: |
tar czvf cov.tar.gz cov-int

View file

@ -91,7 +91,7 @@ jobs:
}
]
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
if: ${{ github.repository_owner == 'haproxy' }}
steps:
- name: install packages
run: |
@ -99,12 +99,12 @@ jobs:
sudo apt-get -yq --force-yes install \
gcc-${{ matrix.platform.arch }} \
${{ matrix.platform.libs }}
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- name: install quictls
run: |
QUICTLS_EXTRA_ARGS="--cross-compile-prefix=${{ matrix.platform.arch }}- ${{ matrix.platform.target }}" QUICTLS_VERSION=OpenSSL_1_1_1w-quic1 scripts/build-ssl.sh
QUICTLS_EXTRA_ARGS="--cross-compile-prefix=${{ matrix.platform.arch }}- ${{ matrix.platform.target }}" QUICTLS=yes scripts/build-ssl.sh
- name: Build
run: |

View file

@ -1,4 +1,4 @@
name: Fedora/Rawhide/OpenSSL
name: Fedora/Rawhide/QuicTLS
on:
schedule:
@ -13,24 +13,26 @@ jobs:
strategy:
matrix:
platform: [
{ name: x64, cc: gcc, ADDLIB_ATOMIC: "", ARCH_FLAGS: "" },
{ name: x64, cc: clang, ADDLIB_ATOMIC: "", ARCH_FLAGS: "" },
{ name: x86, cc: gcc, ADDLIB_ATOMIC: "-latomic", ARCH_FLAGS: "-m32" },
{ name: x86, cc: clang, ADDLIB_ATOMIC: "-latomic", ARCH_FLAGS: "-m32" }
{ name: x64, cc: gcc, QUICTLS_EXTRA_ARGS: "", ADDLIB_ATOMIC: "", ARCH_FLAGS: "" },
{ name: x64, cc: clang, QUICTLS_EXTRA_ARGS: "", ADDLIB_ATOMIC: "", ARCH_FLAGS: "" },
{ name: x86, cc: gcc, QUICTLS_EXTRA_ARGS: "-m32 linux-generic32", ADDLIB_ATOMIC: "-latomic", ARCH_FLAGS: "-m32" },
{ name: x86, cc: clang, QUICTLS_EXTRA_ARGS: "-m32 linux-generic32", ADDLIB_ATOMIC: "-latomic", ARCH_FLAGS: "-m32" }
]
fail-fast: false
name: ${{ matrix.platform.cc }}.${{ matrix.platform.name }}
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
if: ${{ github.repository_owner == 'haproxy' }}
container:
image: fedora:rawhide
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- name: Install dependencies
run: |
dnf -y install awk diffutils git pcre-devel zlib-devel pcre2-devel 'perl(FindBin)' perl-IPC-Cmd 'perl(File::Copy)' 'perl(File::Compare)' lua-devel socat findutils systemd-devel clang openssl-devel.x86_64
dnf -y install 'perl(FindBin)' 'perl(File::Compare)' perl-IPC-Cmd 'perl(File::Copy)' glibc-devel.i686 lua-devel.i686 lua-devel.x86_64 systemd-devel.i686 zlib-ng-compat-devel.i686 pcre-devel.i686 libatomic.i686 openssl-devel.i686
- uses: ./.github/actions/setup-vtest
dnf -y install diffutils git pcre-devel zlib-devel pcre2-devel 'perl(FindBin)' perl-IPC-Cmd 'perl(File::Copy)' 'perl(File::Compare)' lua-devel socat findutils systemd-devel clang
dnf -y install 'perl(FindBin)' 'perl(File::Compare)' perl-IPC-Cmd 'perl(File::Copy)' glibc-devel.i686 lua-devel.i686 lua-devel.x86_64 systemd-devel.i686 zlib-ng-compat-devel.i686 pcre-devel.i686 libatomic.i686
- name: Install VTest
run: scripts/build-vtest.sh
- name: Install QuicTLS
run: QUICTLS=yes QUICTLS_EXTRA_ARGS="${{ matrix.platform.QUICTLS_EXTRA_ARGS }}" scripts/build-ssl.sh
- name: Build contrib tools
run: |
make admin/halog/halog
@ -39,7 +41,7 @@ jobs:
make dev/hpack/decode dev/hpack/gen-enc dev/hpack/gen-rht
- name: Compile HAProxy with ${{ matrix.platform.cc }}
run: |
make -j3 CC=${{ matrix.platform.cc }} V=1 ERR=1 TARGET=linux-glibc DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" USE_PROMEX=1 USE_OPENSSL=1 USE_QUIC=1 USE_ZLIB=1 USE_PCRE=1 USE_PCRE_JIT=1 USE_LUA=1 ADDLIB="${{ matrix.platform.ADDLIB_ATOMIC }}" ARCH_FLAGS="${{ matrix.platform.ARCH_FLAGS }}"
make -j3 CC=${{ matrix.platform.cc }} V=1 ERR=1 TARGET=linux-glibc USE_OPENSSL=1 USE_QUIC=1 USE_ZLIB=1 USE_PCRE=1 USE_PCRE_JIT=1 USE_LUA=1 ADDLIB="${{ matrix.platform.ADDLIB_ATOMIC }} -Wl,-rpath,${HOME}/opt/lib" SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include ARCH_FLAGS="${{ matrix.platform.ARCH_FLAGS }}"
make install
- name: Show HAProxy version
id: show-version
@ -49,13 +51,6 @@ jobs:
echo "::endgroup::"
haproxy -vv
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
#
# TODO: review this workaround later
- name: relax crypto policies
run: |
dnf -y install crypto-policies-scripts
echo LEGACY > /etc/crypto-policies/config
update-crypto-policies
- name: Run VTest for HAProxy ${{ steps.show-version.outputs.version }}
id: vtest
run: |
@ -69,7 +64,3 @@ jobs:
cat $folder/LOG
echo "::endgroup::"
done
- name: Run Unit tests
id: unittests
run: |
make unit-tests

View file

@ -8,12 +8,12 @@ on:
jobs:
gcc:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
if: ${{ github.repository_owner == 'haproxy' }}
permissions:
contents: read
steps:
- name: "Checkout repository"
uses: actions/checkout@v5
uses: actions/checkout@v4
- name: "Build on VM"
uses: vmactions/solaris-vm@v1

View file

@ -20,13 +20,13 @@ jobs:
run: |
ulimit -c unlimited
echo '/tmp/core/core.%h.%e.%t' > /proc/sys/kernel/core_pattern
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- name: Install dependencies
run: apk add gcc gdb make tar git python3 libc-dev linux-headers pcre-dev pcre2-dev openssl-dev lua5.3-dev grep socat curl musl-dbg lua5.3-dbg jose
run: apk add gcc gdb make tar git python3 libc-dev linux-headers pcre-dev pcre2-dev openssl-dev lua5.3-dev grep socat curl musl-dbg lua5.3-dbg
- name: Install VTest
run: scripts/build-vtest.sh
- name: Build
run: make -j$(nproc) TARGET=linux-musl DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" ARCH_FLAGS='-ggdb3' CC=cc V=1 USE_LUA=1 LUA_INC=/usr/include/lua5.3 LUA_LIB=/usr/lib/lua5.3 USE_OPENSSL=1 USE_PCRE2=1 USE_PCRE2_JIT=1 USE_PROMEX=1
run: make -j$(nproc) TARGET=linux-musl ARCH_FLAGS='-ggdb3' CC=cc V=1 USE_LUA=1 LUA_INC=/usr/include/lua5.3 LUA_LIB=/usr/lib/lua5.3 USE_OPENSSL=1 USE_PCRE2=1 USE_PCRE2_JIT=1 USE_PROMEX=1
- name: Show version
run: ./haproxy -vv
- name: Show linked libraries
@ -37,10 +37,6 @@ jobs:
- name: Run VTest
id: vtest
run: make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Run Unit tests
id: unittests
run: |
make unit-tests
- name: Show coredumps
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
@ -64,13 +60,3 @@ jobs:
cat $folder/LOG
echo "::endgroup::"
done
- name: Show Unit-Tests results
if: ${{ failure() && steps.unittests.outcome == 'failure' }}
run: |
for result in ${TMPDIR:-/tmp}/ha-unittests-*/results/res.*; do
printf "::group::"
cat $result
echo "::endgroup::"
done
exit 1

View file

@ -8,12 +8,12 @@ on:
jobs:
gcc:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
if: ${{ github.repository_owner == 'haproxy' }}
permissions:
contents: read
steps:
- name: "Checkout repository"
uses: actions/checkout@v5
uses: actions/checkout@v4
- name: "Build on VM"
uses: vmactions/netbsd-vm@v1

View file

@ -1,82 +0,0 @@
name: openssl ECH
on:
schedule:
- cron: "0 3 * * *"
workflow_dispatch:
permissions:
contents: read
jobs:
test:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
steps:
- uses: actions/checkout@v5
- name: Install VTest
run: |
scripts/build-vtest.sh
- name: Install apt dependencies
run: |
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
sudo apt-get --no-install-recommends -y install socat gdb
sudo apt-get --no-install-recommends -y install libpsl-dev
- name: Install OpenSSL+ECH
run: env OPENSSL_VERSION="git-feature/ech" GIT_TYPE="branch" scripts/build-ssl.sh
- name: Install curl+ECH
run: env SSL_LIB=${HOME}/opt/ scripts/build-curl.sh
- name: Compile HAProxy
run: |
make -j$(nproc) CC=gcc TARGET=linux-glibc \
USE_QUIC=1 USE_OPENSSL=1 USE_ECH=1 \
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/" \
ARCH_FLAGS="-ggdb3 -fsanitize=address"
sudo make install
- name: Show HAProxy version
id: show-version
run: |
ldd $(which haproxy)
haproxy -vv
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
- name: Install problem matcher for VTest
run: echo "::add-matcher::.github/vtest.json"
- name: Run VTest for HAProxy
id: vtest
run: |
# This is required for macOS which does not actually allow to increase
# the '-n' soft limit to the hard limit, thus failing to run.
ulimit -n 65536
# allow to catch coredumps
ulimit -c unlimited
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Show VTest results
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
printf "::group::"
cat $folder/INFO
cat $folder/LOG
echo "::endgroup::"
done
exit 1
- name: Run Unit tests
id: unittests
run: |
make unit-tests
- name: Show coredumps
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
failed=false
shopt -s nullglob
for file in /tmp/core.*; do
failed=true
printf "::group::"
gdb -ex 'thread apply all bt full' ./haproxy $file
echo "::endgroup::"
done
if [ "$failed" = true ]; then
exit 1;
fi

View file

@ -1,77 +0,0 @@
name: openssl master
on:
schedule:
- cron: "0 3 * * *"
workflow_dispatch:
permissions:
contents: read
jobs:
test:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
steps:
- uses: actions/checkout@v5
- name: Install apt dependencies
run: |
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
sudo apt-get --no-install-recommends -y install socat gdb
sudo apt-get --no-install-recommends -y install libpsl-dev
- uses: ./.github/actions/setup-vtest
- name: Install OpenSSL master
run: env OPENSSL_VERSION="git-master" GIT_TYPE="branch" scripts/build-ssl.sh
- name: Compile HAProxy
run: |
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
USE_QUIC=1 USE_OPENSSL=1 \
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
sudo make install
- name: Show HAProxy version
id: show-version
run: |
ldd $(which haproxy)
haproxy -vv
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
- name: Install problem matcher for VTest
run: echo "::add-matcher::.github/vtest.json"
- name: Run VTest for HAProxy
id: vtest
run: |
# This is required for macOS which does not actually allow to increase
# the '-n' soft limit to the hard limit, thus failing to run.
ulimit -n 65536
# allow to catch coredumps
ulimit -c unlimited
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Show VTest results
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
printf "::group::"
cat $folder/INFO
cat $folder/LOG
echo "::endgroup::"
done
exit 1
- name: Run Unit tests
id: unittests
run: |
make unit-tests
- name: Show coredumps
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
failed=false
shopt -s nullglob
for file in /tmp/core.*; do
failed=true
printf "::group::"
gdb -ex 'thread apply all bt full' ./haproxy $file
echo "::endgroup::"
done
if [ "$failed" = true ]; then
exit 1;
fi

View file

@ -0,0 +1,34 @@
#
# special purpose CI: test against OpenSSL built in "no-deprecated" mode
# let us run those builds weekly
#
# for example, OpenWRT uses such OpenSSL builds (those builds are smaller)
#
#
# some details might be found at NL: https://www.mail-archive.com/haproxy@formilux.org/msg35759.html
# GH: https://github.com/haproxy/haproxy/issues/367
name: openssl no-deprecated
on:
schedule:
- cron: "0 0 * * 4"
workflow_dispatch:
permissions:
contents: read
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install VTest
run: |
scripts/build-vtest.sh
- name: Compile HAProxy
run: |
make DEFINE="-DOPENSSL_API_COMPAT=0x10100000L -DOPENSSL_NO_DEPRECATED" -j3 CC=gcc ERR=1 TARGET=linux-glibc USE_OPENSSL=1
- name: Run VTest
run: |
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel

View file

@ -13,13 +13,13 @@ on:
jobs:
build:
runs-on: ubuntu-24.04
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
if: ${{ github.repository_owner == 'haproxy' }}
permissions:
contents: read
packages: write
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- name: Log in to the Container registry
uses: docker/login-action@v3
@ -35,18 +35,9 @@ jobs:
context: https://github.com/haproxytech/haproxy-qns.git
push: true
build-args: |
SSLLIB=AWS-LC
SSLLIB: AWS-LC
tags: ghcr.io/${{ github.repository }}:aws-lc
- name: Cleanup registry
uses: actions/delete-package-versions@v5
with:
owner: ${{ github.repository_owner }}
package-name: 'haproxy'
package-type: container
min-versions-to-keep: 1
delete-only-untagged-versions: 'true'
run:
needs: build
strategy:
@ -61,10 +52,10 @@ jobs:
name: ${{ matrix.suite.client }}
runs-on: ubuntu-24.04
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
if: ${{ github.repository_owner == 'haproxy' }}
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- name: Log in to the Container registry
uses: docker/login-action@v3

View file

@ -13,13 +13,13 @@ on:
jobs:
build:
runs-on: ubuntu-24.04
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
if: ${{ github.repository_owner == 'haproxy' }}
permissions:
contents: read
packages: write
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- name: Log in to the Container registry
uses: docker/login-action@v3
@ -35,18 +35,9 @@ jobs:
context: https://github.com/haproxytech/haproxy-qns.git
push: true
build-args: |
SSLLIB=LibreSSL
SSLLIB: LibreSSL
tags: ghcr.io/${{ github.repository }}:libressl
- name: Cleanup registry
uses: actions/delete-package-versions@v5
with:
owner: ${{ github.repository_owner }}
package-name: 'haproxy'
package-type: container
min-versions-to-keep: 1
delete-only-untagged-versions: 'true'
run:
needs: build
strategy:
@ -59,10 +50,10 @@ jobs:
name: ${{ matrix.suite.client }}
runs-on: ubuntu-24.04
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
if: ${{ github.repository_owner == 'haproxy' }}
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- name: Log in to the Container registry
uses: docker/login-action@v3

View file

@ -1,74 +0,0 @@
#
# weekly run against modern QuicTLS branch, i.e. https://github.com/quictls/quictls
#
name: QuicTLS
on:
schedule:
- cron: "0 0 * * 4"
workflow_dispatch:
permissions:
contents: read
jobs:
test:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
steps:
- uses: actions/checkout@v5
- name: Install apt dependencies
run: |
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
sudo apt-get --no-install-recommends -y install socat gdb
- name: Install QuicTLS
run: env QUICTLS_VERSION=main QUICTLS_URL=https://github.com/quictls/quictls scripts/build-ssl.sh
- name: Compile HAProxy
run: |
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
USE_QUIC=1 USE_OPENSSL=1 \
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/" \
ARCH_FLAGS="-ggdb3 -fsanitize=address"
sudo make install
- name: Show HAProxy version
id: show-version
run: |
ldd $(which haproxy)
haproxy -vv
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
- uses: ./.github/actions/setup-vtest
- name: Run VTest for HAProxy
id: vtest
run: |
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Show VTest results
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
printf "::group::"
cat $folder/INFO
cat $folder/LOG
echo "::endgroup::"
done
exit 1
- name: Run Unit tests
id: unittests
run: |
make unit-tests
- name: Show coredumps
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
failed=false
shopt -s nullglob
for file in /tmp/core.*; do
failed=true
printf "::group::"
gdb -ex 'thread apply all bt full' ./haproxy $file
echo "::endgroup::"
done
if [ "$failed" = true ]; then
exit 1;
fi

View file

@ -23,7 +23,7 @@ jobs:
outputs:
matrix: ${{ steps.set-matrix.outputs.matrix }}
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- name: Generate Build Matrix
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
@ -44,10 +44,16 @@ jobs:
TMPDIR: /tmp
OT_CPP_VERSION: 1.6.0
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
with:
fetch-depth: 100
- name: Setup coredumps
if: ${{ startsWith(matrix.os, 'ubuntu-') }}
run: |
sudo sysctl -w fs.suid_dumpable=1
sudo sysctl kernel.core_pattern=/tmp/core.%h.%e.%t
#
# Github Action cache key cannot contain comma, so we calculate it based on job name
#
@ -57,7 +63,7 @@ jobs:
echo "key=$(echo ${{ matrix.name }} | sha256sum | awk '{print $1}')" >> $GITHUB_OUTPUT
- name: Cache SSL libs
if: ${{ matrix.ssl && matrix.ssl != 'stock' && matrix.ssl != 'BORINGSSL=yes' && !contains(matrix.ssl, 'QUICTLS') }}
if: ${{ matrix.ssl && matrix.ssl != 'stock' && matrix.ssl != 'BORINGSSL=yes' && matrix.ssl != 'QUICTLS=yes' }}
id: cache_ssl
uses: actions/cache@v4
with:
@ -70,7 +76,7 @@ jobs:
uses: actions/cache@v4
with:
path: '~/opt-ot/'
key: ${{ matrix.os }}-ot-${{ matrix.CC }}-${{ env.OT_CPP_VERSION }}-${{ contains(matrix.name, 'ASAN') }}
key: ot-${{ matrix.CC }}-${{ env.OT_CPP_VERSION }}-${{ contains(matrix.name, 'ASAN') }}
- name: Install apt dependencies
if: ${{ startsWith(matrix.os, 'ubuntu-') }}
run: |
@ -80,14 +86,15 @@ jobs:
${{ contains(matrix.FLAGS, 'USE_PCRE2=1') && 'libpcre2-dev' || '' }} \
${{ contains(matrix.ssl, 'BORINGSSL=yes') && 'ninja-build' || '' }} \
socat \
gdb \
jose
gdb
- name: Install brew dependencies
if: ${{ startsWith(matrix.os, 'macos-') }}
run: |
brew install socat
brew install lua
- uses: ./.github/actions/setup-vtest
- name: Install VTest
run: |
scripts/build-vtest.sh
- name: Install SSL ${{ matrix.ssl }}
if: ${{ matrix.ssl && matrix.ssl != 'stock' && steps.cache_ssl.outputs.cache-hit != 'true' }}
run: env ${{ matrix.ssl }} scripts/build-ssl.sh
@ -110,19 +117,10 @@ jobs:
ERR=1 \
TARGET=${{ matrix.TARGET }} \
CC=${{ matrix.CC }} \
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
${{ join(matrix.FLAGS, ' ') }} \
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
sudo make install-bin
- name: Compile admin/halog/halog
run: |
make -j$(nproc) admin/halog/halog \
ERR=1 \
TARGET=${{ matrix.TARGET }} \
CC=${{ matrix.CC }} \
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
DEBUG="-DDEBUG_POOL_INTEGRITY" \
${{ join(matrix.FLAGS, ' ') }} \
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
sudo make install
- name: Show HAProxy version
id: show-version
run: |
@ -137,10 +135,25 @@ jobs:
echo "::endgroup::"
haproxy -vv
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
- name: Install problem matcher for VTest
# This allows one to more easily see which tests fail.
run: echo "::add-matcher::.github/vtest.json"
- name: Run VTest for HAProxy ${{ steps.show-version.outputs.version }}
id: vtest
run: |
# This is required for macOS which does not actually allow to increase
# the '-n' soft limit to the hard limit, thus failing to run.
ulimit -n 65536
ulimit -c unlimited
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Config syntax check memleak smoke testing
if: ${{ contains(matrix.name, 'ASAN') }}
run: |
./haproxy -dI -f .github/h2spec.config -c
./haproxy -dI -f examples/content-sw-sample.cfg -c
./haproxy -dI -f examples/option-http_proxy.cfg -c
./haproxy -dI -f examples/quick-test.cfg -c
./haproxy -dI -f examples/transparent_proxy.cfg -c
- name: Show VTest results
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
@ -151,19 +164,7 @@ jobs:
echo "::endgroup::"
done
exit 1
- name: Run Unit tests
id: unittests
run: |
make unit-tests
- name: Show Unit-Tests results
if: ${{ failure() && steps.unittests.outcome == 'failure' }}
run: |
for result in ${TMPDIR:-/tmp}/ha-unittests-*/results/res.*; do
printf "::group::"
cat $result
echo "::endgroup::"
done
exit 1
- name: Show coredumps
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |

View file

@ -35,7 +35,7 @@ jobs:
- USE_THREAD=1
- USE_ZLIB=1
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- uses: msys2/setup-msys2@v2
with:
install: >-

View file

@ -11,13 +11,15 @@ permissions:
jobs:
test:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- name: Install VTest
run: |
scripts/build-vtest.sh
- name: Install apt dependencies
run: |
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
sudo apt-get --no-install-recommends -y install socat gdb jose
sudo apt-get --no-install-recommends -y install socat gdb
- name: Install WolfSSL
run: env WOLFSSL_VERSION=git-master WOLFSSL_DEBUG=1 scripts/build-ssl.sh
- name: Compile HAProxy
@ -25,7 +27,7 @@ jobs:
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
USE_OPENSSL_WOLFSSL=1 USE_QUIC=1 \
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
DEBUG="-DDEBUG_POOL_INTEGRITY" \
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/" \
ARCH_FLAGS="-ggdb3 -fsanitize=address"
sudo make install
@ -35,15 +37,17 @@ jobs:
ldd $(which haproxy)
haproxy -vv
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
- uses: ./.github/actions/setup-vtest
- name: Install problem matcher for VTest
run: echo "::add-matcher::.github/vtest.json"
- name: Run VTest for HAProxy
id: vtest
run: |
# This is required for macOS which does not actually allow to increase
# the '-n' soft limit to the hard limit, thus failing to run.
ulimit -n 65536
# allow to catch coredumps
ulimit -c unlimited
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Run Unit tests
id: unittests
run: |
make unit-tests
- name: Show VTest results
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
@ -68,13 +72,3 @@ jobs:
if [ "$failed" = true ]; then
exit 1;
fi
- name: Show Unit-Tests results
if: ${{ failure() && steps.unittests.outcome == 'failure' }}
run: |
for result in ${TMPDIR:-/tmp}/ha-unittests-*/results/res.*; do
printf "::group::"
cat $result
echo "::endgroup::"
done
exit 1

1
.gitignore vendored
View file

@ -57,4 +57,3 @@ dev/udp/udp-perturb
/src/dlmalloc.c
/tests/test_hashes
doc/lua-api/_build
dev/term_events/term_events

View file

@ -171,17 +171,7 @@ feedback for developers:
as the previous releases that had 6 months to stabilize. In terms of
stability it really means that the point zero version already accumulated
6 months of fixes and that it is much safer to use even just after it is
released. There is one exception though, features marked as "experimental"
are not guaranteed to be maintained beyond the release of the next LTS
branch. The rationale here is that the experimental status is made to
expose an early preview of a feature, that is often incomplete, not always
in its definitive form regarding configuration, and for which developers
are seeking feedback from the users. It is even possible that changes will
be brought within the stable branch and it may happen that the feature
breaks. It is not imaginable to always be able to backport bug fixes too
far in this context since the code and configuration may change quite a
bit. Users who want to try experimental features are expected to upgrade
quickly to benefit from the improvements made to that feature.
released.
- for developers, given that the odd versions are solely used by highly
skilled users, it's easier to get advanced traces and captures, and there

2360
CHANGELOG

File diff suppressed because it is too large Load diff

View file

@ -1010,7 +1010,7 @@ you notice you're already practising some of them:
- continue to send pull requests after having been explained why they are not
welcome.
- give wrong advice to people asking for help, or sending them patches to
- give wrong advices to people asking for help, or sending them patches to
try which make no sense, waste their time, and give them a bad impression
of the people working on the project.

77
INSTALL
View file

@ -111,22 +111,20 @@ HAProxy requires a working GCC or Clang toolchain and GNU make :
may want to retry with "gmake" which is the name commonly used for GNU make
on BSD systems.
- GCC >= 4.7 (up to 15 tested). Older versions are no longer supported due to
the latest mt_list update which only uses c11-like atomics. Newer versions
may sometimes break due to compiler regressions or behaviour changes. The
version shipped with your operating system is very likely to work with no
trouble. Clang >= 3.0 is also known to work as an alternative solution, and
versions up to 19 were successfully tested. Recent versions may emit a bit
more warnings that are worth reporting as they may reveal real bugs. TCC
(https://repo.or.cz/tinycc.git) is also usable for developers but will not
support threading and was found at least once to produce bad code in some
rare corner cases (since fixed). But it builds extremely quickly (typically
half a second for the whole project) and is very convenient to run quick
tests during API changes or code refactoring.
- GCC >= 4.2 (up to 14 tested). Older versions can be made to work with a
few minor adaptations if really needed. Newer versions may sometimes break
due to compiler regressions or behaviour changes. The version shipped with
your operating system is very likely to work with no trouble. Clang >= 3.0
is also known to work as an alternative solution. Recent versions may emit
a bit more warnings that are worth reporting as they may reveal real bugs.
TCC (https://repo.or.cz/tinycc.git) is also usable for developers but will
not support threading and was found at least once to produce bad code in
some rare corner cases (since fixed). But it builds extremely quickly
(typically half a second for the whole project) and is very convenient to
run quick tests during API changes or code refactoring.
- GNU ld (binutils package), with no particular version. Other linkers might
work but were not tested. The default one from your operating system will
normally work.
work but were not tested.
On debian or Ubuntu systems and their derivatives, you may get all these tools
at once by issuing the two following commands :
@ -237,7 +235,7 @@ to forcefully enable it using "USE_LIBCRYPT=1".
-----------------
For SSL/TLS, it is necessary to use a cryptography library. HAProxy currently
supports the OpenSSL library, and is known to build and work with branches
1.0.0, 1.0.1, 1.0.2, 1.1.0, 1.1.1, and 3.0 to 3.6. It is recommended to use
1.0.0, 1.0.1, 1.0.2, 1.1.0, 1.1.1, and 3.0 to 3.4. It is recommended to use
at least OpenSSL 1.1.1 to have support for all SSL keywords and configuration
in HAProxy. OpenSSL follows a long-term support cycle similar to HAProxy's,
and each of the branches above receives its own fixes, without forcing you to
@ -259,15 +257,11 @@ reported to work as well. While there are some efforts from the community to
ensure they work well, OpenSSL remains the primary target and this means that
in case of conflicting choices, OpenSSL support will be favored over other
options. Note that QUIC is not fully supported when haproxy is built with
OpenSSL < 3.5.2 version. In this case, QUICTLS or AWS-LC are the preferred
alternatives. As of writing this, the QuicTLS project follows OpenSSL very
closely and provides update simultaneously, but being a volunteer-driven
project, its long-term future does not look certain enough to convince
operating systems to package it, so it needs to be build locally. Recent
versions of AWS-LC (>= 1.22 and the FIPS branches) are pretty complete and
generally more performant than other OpenSSL derivatives, but may behave
slightly differently, particularly when dealing with outdated setups. See
the section about QUIC in this document.
OpenSSL. In this case, QUICTLS is the preferred alternative. As of writing
this, the QuicTLS project follows OpenSSL very closely and provides update
simultaneously, but being a volunteer-driven project, its long-term future does
not look certain enough to convince operating systems to package it, so it
needs to be build locally. See the section about QUIC in this document.
A fifth option is wolfSSL (https://github.com/wolfSSL/wolfssl). It is the only
supported alternative stack not based on OpenSSL, yet which implements almost
@ -494,8 +488,8 @@ target. Common issues may include:
other supported compatible library.
- many "dereferencing pointer 'sa.985' does break strict-aliasing rules"
=> these warnings happen on old compilers (typically gcc before 7.x),
and may safely be ignored; newer ones are better on these.
=> these warnings happen on old compilers (typically gcc-4.4), and may
safely be ignored; newer ones are better on these.
4.11) QUIC
@ -504,11 +498,10 @@ QUIC is the new transport layer protocol and is required for HTTP/3. This
protocol stack is currently supported as an experimental feature in haproxy on
the frontend side. In order to enable it, use "USE_QUIC=1 USE_OPENSSL=1".
Note that QUIC is not always fully supported by the OpenSSL library depending on
its version. Indeed QUIC 0-RTT cannot be supported by OpenSSL for versions before
3.5 contrary to others libraries with full QUIC support. The preferred option is
to use QUICTLS. This is a fork of OpenSSL with a QUIC-compatible API. Its
repository is available at this location:
Note that QUIC is not fully supported by the OpenSSL library. Indeed QUIC 0-RTT
cannot be supported by OpenSSL contrary to others libraries with full QUIC
support. The preferred option is to use QUICTLS. This is a fork of OpenSSL with
a QUIC-compatible API. Its repository is available at this location:
https://github.com/quictls/openssl
@ -536,18 +529,14 @@ way assuming that wolfSSL was installed in /opt/wolfssl-5.6.0 as shown in 4.5:
SSL_INC=/opt/wolfssl-5.6.0/include SSL_LIB=/opt/wolfssl-5.6.0/lib
LDFLAGS="-Wl,-rpath,/opt/wolfssl-5.6.0/lib"
As last resort, haproxy may be compiled against OpenSSL as follows from 3.5
version with 0-RTT support:
$ make TARGET=generic USE_OPENSSL=1 USE_QUIC=1
or as follows for all OpenSSL versions but without O-RTT support:
As last resort, haproxy may be compiled against OpenSSL as follows:
$ make TARGET=generic USE_OPENSSL=1 USE_QUIC=1 USE_QUIC_OPENSSL_COMPAT=1
In addition to this requirements, the QUIC listener bindings must be explicitly
enabled with a specific QUIC tuning parameter. (see "limited-quic" global
parameter of haproxy Configuration Manual).
Note that QUIC 0-RTT is not supported by haproxy QUIC stack when built against
OpenSSL. In addition to this compilation requirements, the QUIC listener
bindings must be explicitly enabled with a specific QUIC tuning parameter.
(see "limited-quic" global parameter of haproxy Configuration Manual).
5) How to build HAProxy
@ -563,9 +552,9 @@ It goes into more details with the main options.
To build haproxy, you have to choose your target OS amongst the following ones
and assign it to the TARGET variable :
- linux-glibc for Linux kernel 4.17 and above
- linux-glibc for Linux kernel 2.6.28 and above
- linux-glibc-legacy for Linux kernel 2.6.28 and above without new features
- linux-musl for Linux kernel 4.17 and above with musl libc
- linux-musl for Linux kernel 2.6.28 and above with musl libc
- solaris for Solaris 10 and above
- freebsd for FreeBSD 10 and above
- dragonfly for DragonFlyBSD 4.3 and above
@ -765,8 +754,8 @@ forced to produce final binaries, and must not be used during bisect sessions,
as it will often lead to the wrong commit.
Examples:
# silence strict-aliasing warnings with old gcc-5.5:
$ make -j$(nproc) TARGET=linux-glibc CC=gcc-55 CFLAGS=-fno-strict-aliasing
# silence strict-aliasing warnings with old gcc-4.4:
$ make -j$(nproc) TARGET=linux-glibc CC=gcc-44 CFLAGS=-fno-strict-aliasing
# disable all warning options:
$ make -j$(nproc) TARGET=linux-glibc CC=mycc WARN_CFLAGS= NOWARN_CFLAGS=

View file

@ -35,7 +35,6 @@
# USE_OPENSSL : enable use of OpenSSL. Recommended, but see below.
# USE_OPENSSL_AWSLC : enable use of AWS-LC
# USE_OPENSSL_WOLFSSL : enable use of wolfSSL with the OpenSSL API
# USE_ECH : enable use of ECH with the OpenSSL API
# USE_QUIC : enable use of QUIC with the quictls API (quictls, libressl, boringssl)
# USE_QUIC_OPENSSL_COMPAT : enable use of QUIC with the standard openssl API (limited features)
# USE_ENGINE : enable use of OpenSSL Engine.
@ -63,8 +62,6 @@
# USE_MEMORY_PROFILING : enable the memory profiler. Linux-glibc only.
# USE_LIBATOMIC : force to link with/without libatomic. Automatic.
# USE_PTHREAD_EMULATION : replace pthread's rwlocks with ours
# USE_SHM_OPEN : use shm_open() for features that can make use of shared memory
# USE_KTLS : use kTLS.(requires at least Linux 4.17).
#
# Options can be forced by specifying "USE_xxx=1" or can be disabled by using
# "USE_xxx=" (empty string). The list of enabled and disabled options for a
@ -204,7 +201,6 @@ endif
#### May be used to force running a specific set of reg-tests
REG_TEST_FILES =
REG_TEST_SCRIPT=./scripts/run-regtests.sh
UNIT_TEST_SCRIPT=./scripts/run-unittests.sh
#### Standard C definition
# Compiler-specific flags that may be used to set the standard behavior we
@ -214,8 +210,7 @@ UNIT_TEST_SCRIPT=./scripts/run-unittests.sh
# undefined behavior to silently produce invalid code. For this reason we have
# to use -fwrapv or -fno-strict-overflow to guarantee the intended behavior.
# It is preferable not to change this option in order to avoid breakage.
STD_CFLAGS := $(call cc-opt-alt,-fwrapv,-fno-strict-overflow) \
$(call cc-opt,-fvect-cost-model=very-cheap)
STD_CFLAGS := $(call cc-opt-alt,-fwrapv,-fno-strict-overflow)
#### Compiler-specific flags to enable certain classes of warnings.
# Some are hard-coded, others are enabled only if supported.
@ -265,9 +260,9 @@ endif
# without appearing here. Currently defined DEBUG macros include DEBUG_FULL,
# DEBUG_MEM_STATS, DEBUG_DONT_SHARE_POOLS, DEBUG_FD, DEBUG_POOL_INTEGRITY,
# DEBUG_NO_POOLS, DEBUG_FAIL_ALLOC, DEBUG_STRICT_ACTION=[0-3], DEBUG_HPACK,
# DEBUG_AUTH, DEBUG_SPOE, DEBUG_UAF, DEBUG_THREAD=0-2, DEBUG_STRICT, DEBUG_DEV,
# DEBUG_AUTH, DEBUG_SPOE, DEBUG_UAF, DEBUG_THREAD, DEBUG_STRICT, DEBUG_DEV,
# DEBUG_TASK, DEBUG_MEMORY_POOLS, DEBUG_POOL_TRACING, DEBUG_QPACK, DEBUG_LIST,
# DEBUG_COUNTERS=[0-2], DEBUG_STRESS, DEBUG_UNIT.
# DEBUG_GLITCHES, DEBUG_STRESS.
DEBUG =
#### Trace options
@ -342,16 +337,14 @@ use_opts = USE_EPOLL USE_KQUEUE USE_NETFILTER USE_POLL \
USE_TPROXY USE_LINUX_TPROXY USE_LINUX_CAP \
USE_LINUX_SPLICE USE_LIBCRYPT USE_CRYPT_H USE_ENGINE \
USE_GETADDRINFO USE_OPENSSL USE_OPENSSL_WOLFSSL USE_OPENSSL_AWSLC \
USE_ECH \
USE_SSL USE_LUA USE_ACCEPT4 USE_CLOSEFROM USE_ZLIB USE_SLZ \
USE_CPU_AFFINITY USE_TFO USE_NS USE_DL USE_RT USE_LIBATOMIC \
USE_MATH USE_DEVICEATLAS USE_51DEGREES \
USE_WURFL USE_OBSOLETE_LINKER USE_PRCTL USE_PROCCTL \
USE_THREAD_DUMP USE_EVPORTS USE_OT USE_QUIC USE_PROMEX \
USE_MEMORY_PROFILING USE_SHM_OPEN \
USE_MEMORY_PROFILING \
USE_STATIC_PCRE USE_STATIC_PCRE2 \
USE_PCRE USE_PCRE_JIT USE_PCRE2 USE_PCRE2_JIT \
USE_QUIC_OPENSSL_COMPAT USE_KTLS
USE_PCRE USE_PCRE_JIT USE_PCRE2 USE_PCRE2_JIT USE_QUIC_OPENSSL_COMPAT
# preset all variables for all supported build options among use_opts
$(reset_opts_vars)
@ -382,13 +375,13 @@ ifeq ($(TARGET),haiku)
set_target_defaults = $(call default_opts,USE_POLL USE_TPROXY USE_OBSOLETE_LINKER)
endif
# For linux >= 4.17 and glibc
# For linux >= 2.6.28 and glibc
ifeq ($(TARGET),linux-glibc)
set_target_defaults = $(call default_opts, \
USE_POLL USE_TPROXY USE_LIBCRYPT USE_DL USE_RT USE_CRYPT_H USE_NETFILTER \
USE_CPU_AFFINITY USE_THREAD USE_EPOLL USE_LINUX_TPROXY USE_LINUX_CAP \
USE_ACCEPT4 USE_LINUX_SPLICE USE_PRCTL USE_THREAD_DUMP USE_NS USE_TFO \
USE_GETADDRINFO USE_BACKTRACE USE_SHM_OPEN USE_KTLS)
USE_GETADDRINFO USE_BACKTRACE)
INSTALL = install -v
endif
@ -401,13 +394,13 @@ ifeq ($(TARGET),linux-glibc-legacy)
INSTALL = install -v
endif
# For linux >= 4.17 and musl
# For linux >= 2.6.28 and musl
ifeq ($(TARGET),linux-musl)
set_target_defaults = $(call default_opts, \
USE_POLL USE_TPROXY USE_LIBCRYPT USE_DL USE_RT USE_CRYPT_H USE_NETFILTER \
USE_CPU_AFFINITY USE_THREAD USE_EPOLL USE_LINUX_TPROXY USE_LINUX_CAP \
USE_ACCEPT4 USE_LINUX_SPLICE USE_PRCTL USE_THREAD_DUMP USE_NS USE_TFO \
USE_GETADDRINFO USE_BACKTRACE USE_SHM_OPEN USE_KTLS)
USE_GETADDRINFO)
INSTALL = install -v
endif
@ -598,16 +591,10 @@ endif
ifneq ($(USE_BACKTRACE:0=),)
BACKTRACE_LDFLAGS = -Wl,$(if $(EXPORT_SYMBOL),$(EXPORT_SYMBOL),--export-dynamic)
BACKTRACE_CFLAGS = -fno-omit-frame-pointer
endif
ifneq ($(USE_MEMORY_PROFILING:0=),)
MEMORY_PROFILING_CFLAGS = -fno-optimize-sibling-calls
endif
ifneq ($(USE_CPU_AFFINITY:0=),)
OPTIONS_OBJS += src/cpuset.o
OPTIONS_OBJS += src/cpu_topo.o
endif
# OpenSSL is packaged in various forms and with various dependencies.
@ -640,10 +627,7 @@ ifneq ($(USE_OPENSSL:0=),)
SSL_LDFLAGS := $(if $(SSL_LIB),-L$(SSL_LIB)) -lssl -lcrypto
endif
USE_SSL := $(if $(USE_SSL:0=),$(USE_SSL:0=),implicit)
OPTIONS_OBJS += src/ssl_sock.o src/ssl_ckch.o src/ssl_ocsp.o src/ssl_crtlist.o \
src/ssl_sample.o src/cfgparse-ssl.o src/ssl_gencert.o \
src/ssl_utils.o src/jwt.o src/ssl_clienthello.o src/jws.o src/acme.o \
src/ssl_trace.o src/jwe.o
OPTIONS_OBJS += src/ssl_sock.o src/ssl_ckch.o src/ssl_ocsp.o src/ssl_crtlist.o src/ssl_sample.o src/cfgparse-ssl.o src/ssl_gencert.o src/ssl_utils.o src/jwt.o src/ssl_clienthello.o
endif
ifneq ($(USE_ENGINE:0=),)
@ -670,7 +654,7 @@ OPTIONS_OBJS += src/mux_quic.o src/h3.o src/quic_rx.o src/quic_tx.o \
src/quic_cc_nocc.o src/quic_cc.o src/quic_pacing.o \
src/h3_stats.o src/quic_stats.o src/qpack-enc.o \
src/qpack-tbl.o src/quic_cc_drs.o src/quic_fctl.o \
src/quic_enc.o
src/cbuf.o
endif
ifneq ($(USE_QUIC_OPENSSL_COMPAT:0=),)
@ -969,15 +953,15 @@ OBJS += src/mux_h2.o src/mux_h1.o src/mux_fcgi.o src/log.o \
src/cache.o src/stconn.o src/http_htx.o src/debug.o \
src/check.o src/stats-html.o src/haproxy.o src/listener.o \
src/applet.o src/pattern.o src/cfgparse-listen.o \
src/flt_spoe.o src/cebis_tree.o src/http_ext.o \
src/http_act.o src/http_fetch.o src/cebs_tree.o \
src/cebib_tree.o src/http_client.o src/dns.o \
src/cebb_tree.o src/vars.o src/event_hdl.o src/tcp_rules.o \
src/flt_spoe.o src/cebuis_tree.o src/http_ext.o \
src/http_act.o src/http_fetch.o src/cebus_tree.o \
src/cebuib_tree.o src/http_client.o src/dns.o \
src/cebub_tree.o src/vars.o src/event_hdl.o src/tcp_rules.o \
src/trace.o src/stats-proxy.o src/pool.o src/stats.o \
src/cfgparse-global.o src/filters.o src/mux_pt.o \
src/flt_http_comp.o src/sock.o src/h1.o src/sink.o \
src/ceba_tree.o src/session.o src/payload.o src/htx.o \
src/cebl_tree.o src/ceb32_tree.o src/ceb64_tree.o \
src/cebua_tree.o src/session.o src/payload.o src/htx.o \
src/cebul_tree.o src/cebu32_tree.o src/cebu64_tree.o \
src/server_state.o src/proto_rhttp.o src/flt_trace.o src/fd.o \
src/task.o src/map.o src/fcgi-app.o src/h2.o src/mworker.o \
src/tcp_sample.o src/mjson.o src/h1_htx.o src/tcp_act.o \
@ -992,9 +976,9 @@ OBJS += src/mux_h2.o src/mux_h1.o src/mux_fcgi.o src/log.o \
src/cfgcond.o src/proto_udp.o src/lb_fwlc.o src/ebmbtree.o \
src/proto_uxdg.o src/cfgdiag.o src/sock_unix.o src/sha1.o \
src/lb_fas.o src/clock.o src/sock_inet.o src/ev_select.o \
src/lb_map.o src/shctx.o src/hpack-dec.o src/net_helper.o \
src/lb_map.o src/shctx.o src/mworker-prog.o src/hpack-dec.o \
src/arg.o src/signal.o src/fix.o src/dynbuf.o src/guid.o \
src/cfgparse-tcp.o src/lb_ss.o src/chunk.o src/counters.o \
src/cfgparse-tcp.o src/lb_ss.o src/chunk.o \
src/cfgparse-unix.o src/regex.o src/fcgi.o src/uri_auth.o \
src/eb64tree.o src/eb32tree.o src/eb32sctree.o src/lru.o \
src/limits.o src/ebimtree.o src/wdt.o src/hpack-tbl.o \
@ -1002,8 +986,7 @@ OBJS += src/mux_h2.o src/mux_h1.o src/mux_fcgi.o src/log.o \
src/ebsttree.o src/freq_ctr.o src/systemd.o src/init.o \
src/http_acl.o src/dict.o src/dgram.o src/pipe.o \
src/hpack-huff.o src/hpack-enc.o src/ebtree.o src/hash.o \
src/httpclient_cli.o src/version.o src/ncbmbuf.o src/ech.o \
src/cfgparse-peers.o
src/version.o
ifneq ($(TRACE),)
OBJS += src/calltrace.o
@ -1038,9 +1021,8 @@ help:
# TARGET variable is not set since we're not building, by definition.
IGNORE_OPTS=help install install-man install-doc install-bin \
uninstall clean tags cscope tar git-tar version update-version \
opts reg-tests reg-tests-help unit-tests admin/halog/halog dev/flags/flags \
dev/haring/haring dev/ncpu/ncpu dev/poll/poll dev/tcploop/tcploop \
dev/term_events/term_events
opts reg-tests reg-tests-help admin/halog/halog dev/flags/flags \
dev/haring/haring dev/ncpu/ncpu dev/poll/poll dev/tcploop/tcploop
ifneq ($(TARGET),)
ifeq ($(filter $(firstword $(MAKECMDGOALS)),$(IGNORE_OPTS)),)
@ -1092,16 +1074,13 @@ dev/tcploop/tcploop:
dev/udp/udp-perturb: dev/udp/udp-perturb.o
$(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS)
dev/term_events/term_events: dev/term_events/term_events.o
$(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS)
# rebuild it every time
.PHONY: src/version.c dev/ncpu/ncpu dev/poll/poll dev/tcploop/tcploop
src/calltrace.o: src/calltrace.c $(DEP)
$(cmd_CC) $(TRACE_COPTS) -c -o $@ $<
src/version.o: src/version.c $(DEP)
src/haproxy.o: src/haproxy.c $(DEP)
$(cmd_CC) $(COPTS) \
-DBUILD_TARGET='"$(strip $(TARGET))"' \
-DBUILD_CC='"$(strip $(CC))"' \
@ -1124,11 +1103,6 @@ install-doc:
$(INSTALL) -m 644 doc/$$x.txt "$(DESTDIR)$(DOCDIR)" ; \
done
install-admin:
$(Q)$(INSTALL) -d "$(DESTDIR)$(SBINDIR)"
$(Q)$(INSTALL) admin/cli/haproxy-dump-certs "$(DESTDIR)$(SBINDIR)"
$(Q)$(INSTALL) admin/cli/haproxy-reload "$(DESTDIR)$(SBINDIR)"
install-bin:
$(Q)for i in haproxy $(EXTRA); do \
if ! [ -e "$$i" ]; then \
@ -1139,7 +1113,7 @@ install-bin:
$(Q)$(INSTALL) -d "$(DESTDIR)$(SBINDIR)"
$(Q)$(INSTALL) haproxy $(EXTRA) "$(DESTDIR)$(SBINDIR)"
install: install-bin install-admin install-man install-doc
install: install-bin install-man install-doc
uninstall:
$(Q)rm -f "$(DESTDIR)$(MANDIR)"/man1/haproxy.1
@ -1287,17 +1261,10 @@ reg-tests-help:
.PHONY: reg-tests reg-tests-help
unit-tests:
$(Q)$(UNIT_TEST_SCRIPT)
.PHONY: unit-tests
# "make range" iteratively builds using "make all" and the exact same build
# options for all commits within RANGE. RANGE may be either a git range
# such as ref1..ref2 or a single commit, in which case all commits from
# the master branch to this one will be tested.
# Will execute TEST_CMD for each commit if defined, and will stop in case of
# failure.
range:
$(Q)[ -d .git/. ] || { echo "## Fatal: \"make $@\" may only be used inside a Git repository."; exit 1; }
@ -1323,7 +1290,6 @@ range:
echo "[ $$index/$$count ] $$commit #############################"; \
git checkout -q $$commit || die 1; \
$(MAKE) all || die 1; \
[ -z "$(TEST_CMD)" ] || $(TEST_CMD) || die 1; \
index=$$((index + 1)); \
done; \
echo;echo "Done! $${count} commit(s) built successfully for RANGE $${RANGE}" ; \

View file

@ -1,2 +1,2 @@
$Format:%ci$
2026/01/22
2025/01/09

View file

@ -1 +1 @@
3.4-dev3
3.2-dev3

View file

@ -5,8 +5,7 @@ CXX := c++
CXXLIB := -lstdc++
ifeq ($(DEVICEATLAS_SRC),)
OPTIONS_CFLAGS += -I$(DEVICEATLAS_INC)
OPTIONS_LDFLAGS += -Wl,-rpath,$(DEVICEATLAS_LIB) -L$(DEVICEATLAS_LIB) -lda
OPTIONS_LDFLAGS += -lda
else
DEVICEATLAS_INC = $(DEVICEATLAS_SRC)
DEVICEATLAS_LIB = $(DEVICEATLAS_SRC)

View file

@ -212,7 +212,7 @@ da_status_t da_atlas_compile(void *ctx, da_read_fn readfn, da_setpos_fn setposfn
* da_getpropid on the atlas, and if generated by the search, the ID will be consistent across
* different calls to search.
* Properties added by a search that are neither in the compiled atlas, nor in the extra_props list
* Are assigned an ID within the context that is not transferable through different search results
* Are assigned an ID within the context that is not transferrable through different search results
* within the same atlas.
* @param atlas Atlas instance
* @param extra_props properties

View file

@ -718,7 +718,7 @@ static void flt_ot_check_timeouts(struct stream *s, struct filter *f)
if (flt_ot_is_disabled(f FLT_OT_DBG_ARGS(, -1)))
FLT_OT_RETURN();
s->pending_events |= STRM_EVT_MSG;
s->pending_events |= TASK_WOKEN_MSG;
flt_ot_return_void(f, &err);

View file

@ -94,7 +94,7 @@ name must be preceded by a minus character ('-'). Here are examples:
* Add section description as label for all metrics
It is possible to set a description in global and proxy sections, via the
"description" directive. The global description is exposed if it is define via
"description" directive. The global descrption is exposed if it is define via
the "haproxy_process_description" metric. But the descriptions provided in proxy
sections are not dumped. However, it is possible to add it as a label for all
metrics of the corresponding section, including the global one. To do so,
@ -389,9 +389,6 @@ listed below. Metrics from extra counters are not listed.
| haproxy_server_max_connect_time_seconds |
| haproxy_server_max_response_time_seconds |
| haproxy_server_max_total_time_seconds |
| haproxy_server_agent_status |
| haproxy_server_agent_code |
| haproxy_server_agent_duration_seconds |
| haproxy_server_internal_errors_total |
| haproxy_server_unsafe_idle_connections_current |
| haproxy_server_safe_idle_connections_current |

View file

@ -32,11 +32,11 @@
/* Prometheus exporter flags (ctx->flags) */
#define PROMEX_FL_METRIC_HDR 0x00000001
#define PROMEX_FL_BODYLESS_RESP 0x00000002
/* unused: 0x00000004 */
/* unused: 0x00000008 */
/* unused: 0x00000010 */
/* unused: 0x00000020 */
#define PROMEX_FL_INFO_METRIC 0x00000002
#define PROMEX_FL_FRONT_METRIC 0x00000004
#define PROMEX_FL_BACK_METRIC 0x00000008
#define PROMEX_FL_SRV_METRIC 0x00000010
#define PROMEX_FL_LI_METRIC 0x00000020
#define PROMEX_FL_MODULE_METRIC 0x00000040
#define PROMEX_FL_SCOPE_GLOBAL 0x00000080
#define PROMEX_FL_SCOPE_FRONT 0x00000100

File diff suppressed because it is too large Load diff

View file

@ -1,235 +0,0 @@
#!/bin/bash
#
# Dump certificates from the HAProxy stats or master socket to the filesystem
# Experimental script
#
set -e
export BASEPATH=${BASEPATH:-/etc/haproxy}/
export SOCKET=${SOCKET:-/var/run/haproxy-master.sock}
export DRY_RUN=0
export DEBUG=
export VERBOSE=
export M="@1 "
export TMP
vecho() {
[ -n "$VERBOSE" ] && echo "$@"
return 0
}
read_certificate() {
name=$1
crt_filename=
key_filename=
OFS=$IFS
IFS=":"
while read -r key value; do
case "$key" in
"Crt filename")
crt_filename="${value# }"
key_filename="${value# }"
;;
"Key filename")
key_filename="${value# }"
;;
esac
done < <(echo "${M}show ssl cert ${name}" | socat "${SOCKET}" -)
IFS=$OFS
if [ -z "$crt_filename" ] || [ -z "$key_filename" ]; then
return 1
fi
# handle fields without a crt-base/key-base
[ "${crt_filename:0:1}" != "/" ] && crt_filename="${BASEPATH}${crt_filename}"
[ "${key_filename:0:1}" != "/" ] && key_filename="${BASEPATH}${key_filename}"
vecho "name:$name"
vecho "crt:$crt_filename"
vecho "key:$key_filename"
export NAME="$name"
export CRT_FILENAME="$crt_filename"
export KEY_FILENAME="$key_filename"
return 0
}
cmp_certkey() {
prev=$1
new=$2
if [ ! -f "$prev" ]; then
return 1;
fi
if ! cmp -s <(openssl x509 -in "$prev" -noout -fingerprint -sha256) <(openssl x509 -in "$new" -noout -fingerprint -sha256); then
return 1
fi
return 0
}
dump_certificate() {
name=$1
prev_crt=$2
prev_key=$3
r="tmp.${RANDOM}"
d="old.$(date +%s)"
new_crt="$TMP/$(basename "$prev_crt").${r}"
new_key="$TMP/$(basename "$prev_key").${r}"
if ! touch "${new_crt}" || ! touch "${new_key}"; then
echo "[ALERT] ($$) : can't dump \"$name\", can't create tmp files" >&2
return 1
fi
echo "${M}dump ssl cert ${name}" | socat "${SOCKET}" - | openssl pkey >> "${new_key}"
# use crl2pkcs7 as a way to dump multiple x509, storeutl could be used in modern versions of openssl
echo "${M}dump ssl cert ${name}" | socat "${SOCKET}" - | openssl crl2pkcs7 -nocrl -certfile /dev/stdin | openssl pkcs7 -print_certs >> "${new_crt}"
if ! cmp -s <(openssl x509 -in "${new_crt}" -pubkey -noout) <(openssl pkey -in "${new_key}" -pubout); then
echo "[ALERT] ($$) : Private key \"${new_key}\" and public key \"${new_crt}\" don't match" >&2
return 1
fi
if cmp_certkey "${prev_crt}" "${new_crt}"; then
echo "[NOTICE] ($$) : ${crt_filename} is already up to date" >&2
return 0
fi
# dry run will just return before trying to move the files
if [ "${DRY_RUN}" != "0" ]; then
return 0
fi
# move the current certificates to ".old.timestamp"
if [ -f "${prev_crt}" ] && [ -f "${prev_key}" ]; then
mv "${prev_crt}" "${prev_crt}.${d}"
[ "${prev_crt}" != "${prev_key}" ] && mv "${prev_key}" "${prev_key}.${d}"
fi
# move the new certificates to old place
mv "${new_crt}" "${prev_crt}"
[ "${prev_crt}" != "${prev_key}" ] && mv "${new_key}" "${prev_key}"
return 0
}
dump_all_certificates() {
echo "${M}show ssl cert" | socat "${SOCKET}" - | grep -v '^#' | grep -v '^$' | while read -r line; do
export NAME
export CRT_FILENAME
export KEY_FILENAME
if read_certificate "$line"; then
dump_certificate "$NAME" "$CRT_FILENAME" "$KEY_FILENAME"
else
echo "[WARNING] ($$) : can't dump \"$name\", crt/key filename details not found in \"show ssl cert\"" >&2
fi
done
}
usage() {
echo "Usage:"
echo " $0 [options]* [cert]*"
echo ""
echo " Dump certificates from the HAProxy stats or master socket to the filesystem"
echo " Require socat and openssl"
echo " EXPERIMENTAL script, backup your files!"
echo " The script will move your previous files to FILE.old.unixtimestamp (ex: foo.com.pem.old.1759044998)"
echo ""
echo "Options:"
echo " -S, --master-socket <path> Use the master socket at <path> (default: ${SOCKET})"
echo " -s, --socket <path> Use the stats socket at <path>"
echo " -p, --path <path> Specifiy a base path for relative files (default: ${BASEPATH})"
echo " -n, --dry-run Read certificates on the socket but don't dump them"
echo " -d, --debug Debug mode, set -x"
echo " -v, --verbose Verbose mode"
echo " -h, --help This help"
echo " -- End of options"
echo ""
echo "Examples:"
echo " $0 -v -p ${BASEPATH} -S ${SOCKET}"
echo " $0 -v -p ${BASEPATH} -S ${SOCKET} bar.com.rsa.pem"
echo " $0 -v -p ${BASEPATH} -S ${SOCKET} -- foo.com.ecdsa.pem bar.com.rsa.pem"
}
main() {
while [ -n "$1" ]; do
case "$1" in
-S|--master-socket)
SOCKET="$2"
M="@1 "
shift 2
;;
-s|--socket)
SOCKET="$2"
M=
shift 2
;;
-p|--path)
BASEPATH="$2/"
shift 2
;;
-n|--dry-run)
DRY_RUN=1
shift
;;
-d|--debug)
DEBUG=1
shift
;;
-v|--verbose)
VERBOSE=1
shift
;;
-h|--help)
usage "$@"
exit 0
;;
--)
shift
break
;;
-*)
echo "[ALERT] ($$) : Unknown option '$1'" >&2
usage "$@"
exit 1
;;
*)
break
;;
esac
done
if [ -n "$DEBUG" ]; then
set -x
fi
TMP=${TMP:-$(mktemp -d)}
if [ -z "$1" ]; then
dump_all_certificates
else
# compute the certificates names at the end of the command
while [ -n "$1" ]; do
if ! read_certificate "$1"; then
echo "[ALERT] ($$) : can't dump \"$1\", crt/key filename details not found in \"show ssl cert\"" >&2
exit 1
fi
[ "${DRY_RUN}" = "0" ] && dump_certificate "$NAME" "$CRT_FILENAME" "$KEY_FILENAME"
shift
done
fi
}
trap 'rm -rf -- "$TMP"' EXIT
main "$@"

View file

@ -1,113 +0,0 @@
#!/bin/bash
set -e
export VERBOSE=1
export TIMEOUT=90
export MASTER_SOCKET=${MASTER_SOCKET:-/var/run/haproxy-master.sock}
export RET=
alert() {
if [ "$VERBOSE" -ge "1" ]; then
echo "[ALERT] $*" >&2
fi
}
reload() {
while read -r line; do
if [ "$line" = "Success=0" ]; then
RET=1
elif [ "$line" = "Success=1" ]; then
RET=0
elif [ "$line" = "Another reload is still in progress." ]; then
alert "$line"
elif [ "$line" = "--" ]; then
continue;
else
if [ "$RET" = 1 ] && [ "$VERBOSE" = "2" ]; then
echo "$line" >&2
elif [ "$VERBOSE" = "3" ]; then
echo "$line" >&2
fi
fi
done < <(echo "reload" | socat -t"${TIMEOUT}" "${MASTER_SOCKET}" -)
if [ -z "$RET" ]; then
alert "Couldn't finish the reload before the timeout (${TIMEOUT})."
return 1
fi
return "$RET"
}
usage() {
echo "Usage:"
echo " $0 [options]*"
echo ""
echo " Trigger a reload from the master socket"
echo " Require socat"
echo " EXPERIMENTAL script!"
echo ""
echo "Options:"
echo " -S, --master-socket <path> Use the master socket at <path> (default: ${MASTER_SOCKET})"
echo " -d, --debug Debug mode, set -x"
echo " -t, --timeout Timeout (socat -t) (default: ${TIMEOUT})"
echo " -s, --silent Silent mode (no output)"
echo " -v, --verbose Verbose output (output from haproxy on failure)"
echo " -vv Even more verbose output (output from haproxy on success and failure)"
echo " -h, --help This help"
echo ""
echo "Examples:"
echo " $0 -S ${MASTER_SOCKET} -d ${TIMEOUT}"
}
main() {
while [ -n "$1" ]; do
case "$1" in
-S|--master-socket)
MASTER_SOCKET="$2"
shift 2
;;
-t|--timeout)
TIMEOUT="$2"
shift 2
;;
-s|--silent)
VERBOSE=0
shift
;;
-v|--verbose)
VERBOSE=2
shift
;;
-vv|--verbose)
VERBOSE=3
shift
;;
-d|--debug)
DEBUG=1
shift
;;
-h|--help)
usage "$@"
exit 0
;;
*)
echo "[ALERT] ($$) : Unknown option '$1'" >&2
usage "$@"
exit 1
;;
esac
done
if [ -n "$DEBUG" ]; then
set -x
fi
}
main "$@"
reload

View file

@ -123,22 +123,6 @@ struct url_stat {
#define FILT2_PRESERVE_QUERY 0x02
#define FILT2_EXTRACT_CAPTURE 0x04
#define FILT_OUTPUT_FMT (FILT_COUNT_ONLY| \
FILT_COUNT_STATUS| \
FILT_COUNT_SRV_STATUS| \
FILT_COUNT_COOK_CODES| \
FILT_COUNT_TERM_CODES| \
FILT_COUNT_URL_ONLY| \
FILT_COUNT_URL_COUNT| \
FILT_COUNT_URL_ERR| \
FILT_COUNT_URL_TAVG| \
FILT_COUNT_URL_TTOT| \
FILT_COUNT_URL_TAVGO| \
FILT_COUNT_URL_TTOTO| \
FILT_COUNT_URL_BAVG| \
FILT_COUNT_URL_BTOT| \
FILT_COUNT_IP_COUNT)
unsigned int filter = 0;
unsigned int filter2 = 0;
unsigned int filter_invert = 0;
@ -208,7 +192,7 @@ void help()
" you can also use -n to start from earlier then field %d\n"
" -query preserve the query string for per-URL (-u*) statistics\n"
"\n"
"Output format - **only one** may be used at a time\n"
"Output format - only one may be used at a time\n"
" -c only report the number of lines that would have been printed\n"
" -pct output connect and response times percentiles\n"
" -st output number of requests per HTTP status code\n"
@ -914,9 +898,6 @@ int main(int argc, char **argv)
if (!filter && !filter2)
die("No action specified.\n");
if ((filter & FILT_OUTPUT_FMT) & ((filter & FILT_OUTPUT_FMT) - 1))
die("Please, set only one output filter.\n");
if (filter & FILT_ACC_COUNT && !filter_acc_count)
filter_acc_count=1;
@ -1571,10 +1552,6 @@ void filter_count_srv_status(const char *accept_field, const char *time_field, s
if (!srv_node) {
/* server not yet in the tree, let's create it */
srv = (void *)calloc(1, sizeof(struct srv_st) + e - b + 1);
if (unlikely(!srv)) {
fprintf(stderr, "%s: not enough memory\n", __FUNCTION__);
exit(1);
}
srv_node = &srv->node;
memcpy(&srv_node->key, b, e - b);
srv_node->key[e - b] = '\0';
@ -1684,10 +1661,6 @@ void filter_count_url(const char *accept_field, const char *time_field, struct t
*/
if (unlikely(!ustat))
ustat = calloc(1, sizeof(*ustat));
if (unlikely(!ustat)) {
fprintf(stderr, "%s: not enough memory\n", __FUNCTION__);
exit(1);
}
ustat->nb_err = err;
ustat->nb_req = 1;

View file

@ -6,9 +6,9 @@ Wants=network-online.target
[Service]
EnvironmentFile=-/etc/default/haproxy
EnvironmentFile=-/etc/sysconfig/haproxy
Environment="CONFIG=/etc/haproxy/haproxy.cfg" "PIDFILE=/run/haproxy.pid" "CFGDIR=/etc/haproxy/conf.d" "EXTRAOPTS=-S /run/haproxy-master.sock"
ExecStart=@SBINDIR@/haproxy -Ws -f $CONFIG -f $CFGDIR -p $PIDFILE $EXTRAOPTS
ExecReload=@SBINDIR@/haproxy -Ws -f $CONFIG -f $CFGDIR -c $EXTRAOPTS
Environment="CONFIG=/etc/haproxy/haproxy.cfg" "PIDFILE=/run/haproxy.pid" "EXTRAOPTS=-S /run/haproxy-master.sock"
ExecStart=@SBINDIR@/haproxy -Ws -f $CONFIG -p $PIDFILE $EXTRAOPTS
ExecReload=@SBINDIR@/haproxy -Ws -f $CONFIG -c $EXTRAOPTS
ExecReload=/bin/kill -USR2 $MAINPID
KillMode=mixed
Restart=always

View file

@ -25,7 +25,7 @@ end
# returns $node filled with the first node of ebroot $arg0
define ebtree_first
# browse ebtree left until encountering leaf
# browse ebtree left until encoutering leaf
set $node = (struct eb_node *)$arg0->b[0]
while 1
_ebtree_set_tag_node $node
@ -76,7 +76,7 @@ end
# returns $node filled with the first node of ebroot $arg0
define ebsctree_first
# browse ebsctree left until encountering leaf
# browse ebsctree left until encoutering leaf
set $node = (struct eb32sc_node *)$arg0->b[0]
while 1
_ebsctree_set_tag_node $node

View file

@ -1,19 +0,0 @@
# show non-null memprofile entries with method, alloc/free counts/tot and caller
define memprof_dump
set $i = 0
set $meth={ "UNKN", "MALL", "CALL", "REAL", "STRD", "FREE", "P_AL", "P_FR", "STND", "VALL", "ALAL", "PALG", "MALG", "PVAL" }
while $i < sizeof(memprof_stats) / sizeof(memprof_stats[0])
if memprof_stats[$i].alloc_calls || memprof_stats[$i].free_calls
set $m = memprof_stats[$i].method
printf "m:%s ac:%u fc:%u at:%u ft:%u ", $meth[$m], \
memprof_stats[$i].alloc_calls, memprof_stats[$i].free_calls, \
memprof_stats[$i].alloc_tot, memprof_stats[$i].free_tot
output/a memprof_stats[$i].caller
printf "\n"
end
set $i = $i + 1
end
end

View file

@ -1,247 +0,0 @@
-- This is an HTTP/2 tracer for a TCP proxy. It will decode the frames that are
-- exchanged between the client and the server and indicate their direction,
-- types, flags and lengths. Lines are prefixed with a connection number modulo
-- 4096 that allows to sort out multiplexed exchanges. In order to use this,
-- simply load this file in the global section and use it from a TCP proxy:
--
-- global
-- lua-load "dev/h2/h2-tracer.lua"
--
-- listen h2_sniffer
-- mode tcp
-- bind :8002
-- filter lua.h2-tracer #hex
-- server s1 127.0.0.1:8003
--
-- define the decoder's class here
Dec = {}
Dec.id = "Lua H2 tracer"
Dec.flags = 0
Dec.__index = Dec
Dec.args = {} -- args passed by the filter's declaration
Dec.cid = 0 -- next connection ID
-- prefix to indent responses
res_pfx = " | "
-- H2 frame types
h2ft = {
[0] = "DATA",
[1] = "HEADERS",
[2] = "PRIORITY",
[3] = "RST_STREAM",
[4] = "SETTINGS",
[5] = "PUSH_PROMISE",
[6] = "PING",
[7] = "GOAWAY",
[8] = "WINDOW_UPDATE",
[9] = "CONTINUATION",
}
h2ff = {
[0] = { [0] = "ES", [3] = "PADDED" }, -- data
[1] = { [0] = "ES", [2] = "EH", [3] = "PADDED", [5] = "PRIORITY" }, -- headers
[2] = { }, -- priority
[3] = { }, -- rst_stream
[4] = { [0] = "ACK" }, -- settings
[5] = { [2] = "EH", [3] = "PADDED" }, -- push_promise
[6] = { [0] = "ACK" }, -- ping
[7] = { }, -- goaway
[8] = { }, -- window_update
[9] = { [2] = "EH" }, -- continuation
}
function Dec:new()
local dec = {}
setmetatable(dec, Dec)
dec.do_hex = false
if (Dec.args[1] == "hex") then
dec.do_hex = true
end
Dec.cid = Dec.cid+1
-- mix the thread number when multithreading.
dec.cid = Dec.cid + 64 * core.thread
-- state per dir. [1]=req [2]=res
dec.st = {
[1] = {
hdr = { 0, 0, 0, 0, 0, 0, 0, 0, 0 },
fofs = 0,
flen = 0,
ftyp = 0,
fflg = 0,
sid = 0,
tot = 0,
},
[2] = {
hdr = { 0, 0, 0, 0, 0, 0, 0, 0, 0 },
fofs = 0,
flen = 0,
ftyp = 0,
fflg = 0,
sid = 0,
tot = 0,
},
}
return dec
end
function Dec:start_analyze(txn, chn)
if chn:is_resp() then
io.write(string.format("[%03x] ", self.cid % 4096) .. res_pfx .. "### res start\n")
else
io.write(string.format("[%03x] ", self.cid % 4096) .. "### req start\n")
end
filter.register_data_filter(self, chn)
end
function Dec:end_analyze(txn, chn)
if chn:is_resp() then
io.write(string.format("[%03x] ", self.cid % 4096) .. res_pfx .. "### res end: " .. self.st[2].tot .. " bytes total\n")
else
io.write(string.format("[%03x] ", self.cid % 4096) .. "### req end: " ..self.st[1].tot.. " bytes total\n")
end
end
function Dec:tcp_payload(txn, chn)
local data = { }
local dofs = 1
local pfx = ""
local dir = 1
local sofs = 0
local ft = ""
local ff = ""
if chn:is_resp() then
pfx = res_pfx
dir = 2
end
pfx = string.format("[%03x] ", self.cid % 4096) .. pfx
-- stream offset before processing
sofs = self.st[dir].tot
if (chn:input() > 0) then
data = chn:data()
self.st[dir].tot = self.st[dir].tot + chn:input()
end
if (chn:input() > 0 and self.do_hex ~= false) then
io.write("\n" .. pfx .. "Hex:\n")
for i = 1, #data do
if ((i & 7) == 1) then io.write(pfx) end
io.write(string.format("0x%02x ", data:sub(i, i):byte()))
if ((i & 7) == 0 or i == #data) then io.write("\n") end
end
end
-- start at byte 1 in the <data> string
dofs = 1
-- the first 24 bytes are expected to be an H2 preface on the request
if (dir == 1 and sofs < 24) then
-- let's not check it for now
local bytes = self.st[dir].tot - sofs
if (sofs + self.st[dir].tot >= 24) then
-- skip what was missing from the preface
dofs = dofs + 24 - sofs
sofs = 24
io.write(pfx .. "[PREFACE len=24]\n")
else
-- consume more preface bytes
sofs = sofs + self.st[dir].tot
return
end
end
-- parse contents as long as there are pending data
while true do
-- check if we need to consume data from the current frame
-- flen is the number of bytes left before the frame's end.
if (self.st[dir].flen > 0) then
if dofs > #data then return end -- missing data
if (#data - dofs + 1 < self.st[dir].flen) then
-- insufficient data
self.st[dir].flen = self.st[dir].flen - (#data - dofs + 1)
io.write(pfx .. string.format("%32s\n", "... -" .. (#data - dofs + 1) .. " = " .. self.st[dir].flen))
dofs = #data + 1
return
else
-- enough data to finish
if (dofs == 1) then
-- only print a partial size if the frame was interrupted
io.write(pfx .. string.format("%32s\n", "... -" .. self.st[dir].flen .. " = 0"))
end
dofs = dofs + self.st[dir].flen
self.st[dir].flen = 0
end
end
-- here, flen = 0, we're at the beginning of a new frame --
-- read possibly missing header bytes until dec.fofs == 9
while self.st[dir].fofs < 9 do
if dofs > #data then return end -- missing data
self.st[dir].hdr[self.st[dir].fofs + 1] = data:sub(dofs, dofs):byte()
dofs = dofs + 1
self.st[dir].fofs = self.st[dir].fofs + 1
end
-- we have a full frame header here
if (self.do_hex ~= false) then
io.write("\n" .. pfx .. string.format("hdr=%02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
self.st[dir].hdr[1], self.st[dir].hdr[2], self.st[dir].hdr[3],
self.st[dir].hdr[4], self.st[dir].hdr[5], self.st[dir].hdr[6],
self.st[dir].hdr[7], self.st[dir].hdr[8], self.st[dir].hdr[9]))
end
-- we have a full frame header, we'll be ready
-- for a new frame once the data is gone
self.st[dir].flen = self.st[dir].hdr[1] * 65536 +
self.st[dir].hdr[2] * 256 +
self.st[dir].hdr[3]
self.st[dir].ftyp = self.st[dir].hdr[4]
self.st[dir].fflg = self.st[dir].hdr[5]
self.st[dir].sid = self.st[dir].hdr[6] * 16777216 +
self.st[dir].hdr[7] * 65536 +
self.st[dir].hdr[8] * 256 +
self.st[dir].hdr[9]
self.st[dir].fofs = 0
-- decode frame type
if self.st[dir].ftyp <= 9 then
ft = h2ft[self.st[dir].ftyp]
else
ft = string.format("TYPE_0x%02x\n", self.st[dir].ftyp)
end
-- decode frame flags for frame type <ftyp>
ff = ""
for i = 7, 0, -1 do
if (((self.st[dir].fflg >> i) & 1) ~= 0) then
if self.st[dir].ftyp <= 9 and h2ff[self.st[dir].ftyp][i] ~= nil then
ff = ff .. ((ff == "") and "" or "+")
ff = ff .. h2ff[self.st[dir].ftyp][i]
else
ff = ff .. ((ff == "") and "" or "+")
ff = ff .. string.format("0x%02x", 1<<i)
end
end
end
io.write(pfx .. string.format("[%s %ssid=%u len=%u (bytes=%u)]\n",
ft, (ff == "") and "" or ff .. " ",
self.st[dir].sid, self.st[dir].flen,
(#data - dofs + 1)))
end
end
core.register_filter("h2-tracer", Dec, function(dec, args)
Dec.args = args
return dec
end)

View file

@ -59,9 +59,9 @@ struct ring_v2 {
struct ring_v2a {
size_t size; // storage size
size_t rsvd; // header length (used for file-backed maps)
size_t tail ALIGNED(64); // storage tail
size_t head ALIGNED(64); // storage head
char area[0] ALIGNED(64); // storage area begins immediately here
size_t tail __attribute__((aligned(64))); // storage tail
size_t head __attribute__((aligned(64))); // storage head
char area[0] __attribute__((aligned(64))); // storage area begins immediately here
};
/* display the message and exit with the code */

View file

@ -1,5 +1,4 @@
#define _GNU_SOURCE
#include <errno.h>
#include <limits.h>
#include <sched.h>
#include <stdio.h>
@ -12,22 +11,6 @@
static char prog_full_path[PATH_MAX];
long sysconf(int name)
{
if (name == _SC_NPROCESSORS_ONLN ||
name == _SC_NPROCESSORS_CONF) {
const char *ncpu = getenv("NCPU");
int n;
n = ncpu ? atoi(ncpu) : CPU_SETSIZE;
if (n < 0 || n > CPU_SETSIZE)
n = CPU_SETSIZE;
return n;
}
errno = EINVAL;
return -1;
}
/* return a cpu_set having the first $NCPU set */
int sched_getaffinity(pid_t pid, size_t cpusetsize, cpu_set_t *mask)
{

View file

@ -1,70 +0,0 @@
BEGININPUT
BEGINCONTEXT
HAProxy's development cycle consists in one development branch, and multiple
maintenance branches.
All the development is made into the development branch exclusively. This
includes mostly new features, doc updates, cleanups and or course, fixes.
The maintenance branches, also called stable branches, never see any
development, and only receive ultra-safe fixes for bugs that affect them,
that are picked from the development branch.
Branches are numbered in 0.1 increments. Every 6 months, upon a new major
release, the development branch enters maintenance and a new development branch
is created with a new, higher version. The current development branch is
3.3-dev, and maintenance branches are 3.2 and below.
Fixes created in the development branch for issues that were introduced in an
earlier branch are applied in descending order to each and every version till
that branch that introduced the issue: 3.2 first, then 3.1, then 3.0, then 2.9
and so on. This operation is called "backporting". A fix for an issue is never
backported beyond the branch that introduced the issue. An important point is
that the project maintainers really aim at zero regression in maintenance
branches, so they're never willing to take any risk backporting patches that
are not deemed strictly necessary.
Fixes consist of patches managed using the Git version control tool and are
identified by a Git commit ID and a commit message. For this reason we
indistinctly talk about backporting fixes, commits, or patches; all mean the
same thing. When mentioning commit IDs, developers always use a short form
made of the first 8 characters only, and expect the AI assistant to do the
same.
It seldom happens that some fixes depend on changes that were brought by other
patches that were not in some branches and that will need to be backported as
well for the fix to work. In this case, such information is explicitly provided
in the commit message by the patch's author in natural language.
Developers are serious and always indicate if a patch needs to be backported.
Sometimes they omit the exact target branch, or they will say that the patch is
"needed" in some older branch, but it means the same. If a commit message
doesn't mention any backport instructions, it means that the commit does not
have to be backported. And patches that are not strictly bug fixes nor doc
improvements are normally not backported. For example, fixes for design
limitations, architectural improvements and performance optimizations are
considered too risky for a backport. Finally, all bug fixes are tagged as
"BUG" at the beginning of their subject line. Patches that are not tagged as
such are not bugs, and must never be backported unless their commit message
explicitly requests so.
ENDCONTEXT
A developer is reviewing the development branch, trying to spot which commits
need to be backported to maintenance branches. This person is already expert
on HAProxy and everything related to Git, patch management, and the risks
associated with backports, so he doesn't want to be told how to proceed nor to
review the contents of the patch.
The goal for this developer is to get some help from the AI assistant to save
some precious time on this tedious review work. In order to do a better job, he
needs an accurate summary of the information and instructions found in each
commit message. Specifically he needs to figure if the patch fixes a problem
affecting an older branch or not, if it needs to be backported, if so to which
branches, and if other patches need to be backported along with it.
The indented text block below after an "id" line and starting with a Subject line
is a commit message from the HAProxy development branch that describes a patch
applied to that branch, starting with its subject line, please read it carefully.

View file

@ -1,29 +0,0 @@
ENDINPUT
BEGININSTRUCTION
You are an AI assistant that follows instruction extremely well. Help as much
as you can, responding to a single question using a single response.
The developer wants to know if he needs to backport the patch above to fix
maintenance branches, for which branches, and what possible dependencies might
be mentioned in the commit message. Carefully study the commit message and its
backporting instructions if any (otherwise it should probably not be backported),
then provide a very concise and short summary that will help the developer decide
to backport it, or simply to skip it.
Start by explaining in one or two sentences what you recommend for this one and why.
Finally, based on your analysis, give your general conclusion as "Conclusion: X"
where X is a single word among:
- "yes", if you recommend to backport the patch right now either because
it explicitly states this or because it's a fix for a bug that affects
a maintenance branch (3.2 or lower);
- "wait", if this patch explicitly mentions that it must be backported, but
only after waiting some time.
- "no", if nothing clearly indicates a necessity to backport this patch (e.g.
lack of explicit backport instructions, or it's just an improvement);
- "uncertain" otherwise for cases not covered above
ENDINSTRUCTION
Explanation:

View file

@ -1,70 +0,0 @@
BEGININPUT
BEGINCONTEXT
HAProxy's development cycle consists in one development branch, and multiple
maintenance branches.
All the development is made into the development branch exclusively. This
includes mostly new features, doc updates, cleanups and or course, fixes.
The maintenance branches, also called stable branches, never see any
development, and only receive ultra-safe fixes for bugs that affect them,
that are picked from the development branch.
Branches are numbered in 0.1 increments. Every 6 months, upon a new major
release, the development branch enters maintenance and a new development branch
is created with a new, higher version. The current development branch is
3.4-dev, and maintenance branches are 3.3 and below.
Fixes created in the development branch for issues that were introduced in an
earlier branch are applied in descending order to each and every version till
that branch that introduced the issue: 3.3 first, then 3.2, then 3.1, then 3.0
and so on. This operation is called "backporting". A fix for an issue is never
backported beyond the branch that introduced the issue. An important point is
that the project maintainers really aim at zero regression in maintenance
branches, so they're never willing to take any risk backporting patches that
are not deemed strictly necessary.
Fixes consist of patches managed using the Git version control tool and are
identified by a Git commit ID and a commit message. For this reason we
indistinctly talk about backporting fixes, commits, or patches; all mean the
same thing. When mentioning commit IDs, developers always use a short form
made of the first 8 characters only, and expect the AI assistant to do the
same.
It seldom happens that some fixes depend on changes that were brought by other
patches that were not in some branches and that will need to be backported as
well for the fix to work. In this case, such information is explicitly provided
in the commit message by the patch's author in natural language.
Developers are serious and always indicate if a patch needs to be backported.
Sometimes they omit the exact target branch, or they will say that the patch is
"needed" in some older branch, but it means the same. If a commit message
doesn't mention any backport instructions, it means that the commit does not
have to be backported. And patches that are not strictly bug fixes nor doc
improvements are normally not backported. For example, fixes for design
limitations, architectural improvements and performance optimizations are
considered too risky for a backport. Finally, all bug fixes are tagged as
"BUG" at the beginning of their subject line. Patches that are not tagged as
such are not bugs, and must never be backported unless their commit message
explicitly requests so.
ENDCONTEXT
A developer is reviewing the development branch, trying to spot which commits
need to be backported to maintenance branches. This person is already expert
on HAProxy and everything related to Git, patch management, and the risks
associated with backports, so he doesn't want to be told how to proceed nor to
review the contents of the patch.
The goal for this developer is to get some help from the AI assistant to save
some precious time on this tedious review work. In order to do a better job, he
needs an accurate summary of the information and instructions found in each
commit message. Specifically he needs to figure if the patch fixes a problem
affecting an older branch or not, if it needs to be backported, if so to which
branches, and if other patches need to be backported along with it.
The indented text block below after an "id" line and starting with a Subject line
is a commit message from the HAProxy development branch that describes a patch
applied to that branch, starting with its subject line, please read it carefully.

View file

@ -1,29 +0,0 @@
ENDINPUT
BEGININSTRUCTION
You are an AI assistant that follows instruction extremely well. Help as much
as you can, responding to a single question using a single response.
The developer wants to know if he needs to backport the patch above to fix
maintenance branches, for which branches, and what possible dependencies might
be mentioned in the commit message. Carefully study the commit message and its
backporting instructions if any (otherwise it should probably not be backported),
then provide a very concise and short summary that will help the developer decide
to backport it, or simply to skip it.
Start by explaining in one or two sentences what you recommend for this one and why.
Finally, based on your analysis, give your general conclusion as "Conclusion: X"
where X is a single word among:
- "yes", if you recommend to backport the patch right now either because
it explicitly states this or because it's a fix for a bug that affects
a maintenance branch (3.3 or lower);
- "wait", if this patch explicitly mentions that it must be backported, but
only after waiting some time.
- "no", if nothing clearly indicates a necessity to backport this patch (e.g.
lack of explicit backport instructions, or it's just an improvement);
- "uncertain" otherwise for cases not covered above
ENDINSTRUCTION
Explanation:

View file

@ -22,8 +22,7 @@ STABLE=$(cd "$HAPROXY_DIR" && git describe --tags "v${BRANCH}-dev0^" |cut -f1,2
PATCHES_DIR="$PATCHES_PFX"-"$BRANCH"
(cd "$HAPROXY_DIR"
# avoid git pull, it chokes on forced push
git remote update origin; git reset origin/master;git checkout -f
git pull
last_file=$(ls -1 "$PATCHES_DIR"/*.patch 2>/dev/null | tail -n1)
if [ -n "$last_file" ]; then
restart=$(head -n1 "$last_file" | cut -f2 -d' ')

BIN
dev/phash/a.out Executable file

Binary file not shown.

View file

@ -1,233 +0,0 @@
#include <stdio.h>
#include <stdlib.h>
#include <haproxy/connection-t.h>
#include <haproxy/intops.h>
struct tevt_info {
const char *loc;
const char **types;
};
/* will be sufficient for even largest flag names */
static char buf[4096];
static size_t bsz = sizeof(buf);
static const char *tevt_unknown_types[16] = {
[ 0] = "-", [ 1] = "-", [ 2] = "-", [ 3] = "-",
[ 4] = "-", [ 5] = "-", [ 6] = "-", [ 7] = "-",
[ 8] = "-", [ 9] = "-", [10] = "-", [11] = "-",
[12] = "-", [13] = "-", [14] = "-", [15] = "-",
};
static const char *tevt_fd_types[16] = {
[ 0] = "-", [ 1] = "shutw", [ 2] = "shutr", [ 3] = "rcv_err",
[ 4] = "snd_err", [ 5] = "-", [ 6] = "-", [ 7] = "conn_err",
[ 8] = "intercepted", [ 9] = "conn_poll_err", [10] = "poll_err", [11] = "poll_hup",
[12] = "-", [13] = "-", [14] = "-", [15] = "-",
};
static const char *tevt_hs_types[16] = {
[ 0] = "-", [ 1] = "-", [ 2] = "-", [ 3] = "rcv_err",
[ 4] = "snd_err", [ 5] = "-", [ 6] = "-", [ 7] = "-",
[ 8] = "-", [ 9] = "-", [10] = "-", [11] = "-",
[12] = "-", [13] = "-", [14] = "-", [15] = "-",
};
static const char *tevt_xprt_types[16] = {
[ 0] = "-", [ 1] = "shutw", [ 2] = "shutr", [ 3] = "rcv_err",
[ 4] = "snd_err", [ 5] = "-", [ 6] = "-", [ 7] = "-",
[ 8] = "-", [ 9] = "-", [10] = "-", [11] = "-",
[12] = "-", [13] = "-", [14] = "-", [15] = "-",
};
static const char *tevt_muxc_types[16] = {
[ 0] = "-", [ 1] = "shutw", [ 2] = "shutr", [ 3] = "rcv_err",
[ 4] = "snd_err", [ 5] = "truncated_shutr", [ 6] = "truncated_rcv_err", [ 7] = "tout",
[ 8] = "goaway_rcvd", [ 9] = "proto_err", [10] = "internal_err", [11] = "other_err",
[12] = "graceful_shut", [13] = "-", [14] = "-", [15] = "-",
};
static const char *tevt_se_types[16] = {
[ 0] = "-", [ 1] = "shutw", [ 2] = "eos", [ 3] = "rcv_err",
[ 4] = "snd_err", [ 5] = "truncated_eos", [ 6] = "truncated_rcv_err", [ 7] = "-",
[ 8] = "rst_rcvd", [ 9] = "proto_err", [10] = "internal_err", [11] = "other_err",
[12] = "cancelled", [13] = "-", [14] = "-", [15] = "-",
};
static const char *tevt_strm_types[16] = {
[ 0] = "-", [ 1] = "shutw", [ 2] = "eos", [ 3] = "rcv_err",
[ 4] = "snd_err", [ 5] = "truncated_eos", [ 6] = "truncated_rcv_err", [ 7] = "tout",
[ 8] = "intercepted", [ 9] = "proto_err", [10] = "internal_err", [11] = "other_err",
[12] = "aborted", [13] = "-", [14] = "-", [15] = "-",
};
static const struct tevt_info tevt_location[26] = {
[ 0] = {.loc = "-", .types = tevt_unknown_types}, [ 1] = {.loc = "-", .types = tevt_unknown_types},
[ 2] = {.loc = "-", .types = tevt_unknown_types}, [ 3] = {.loc = "-", .types = tevt_unknown_types},
[ 4] = {.loc = "se", .types = tevt_se_types}, [ 5] = {.loc = "fd", .types = tevt_fd_types},
[ 6] = {.loc = "-", .types = tevt_unknown_types}, [ 7] = {.loc = "hs", .types = tevt_hs_types},
[ 8] = {.loc = "-", .types = tevt_unknown_types}, [ 9] = {.loc = "-", .types = tevt_unknown_types},
[10] = {.loc = "-", .types = tevt_unknown_types}, [11] = {.loc = "-", .types = tevt_unknown_types},
[12] = {.loc = "muxc", .types = tevt_muxc_types}, [13] = {.loc = "-", .types = tevt_unknown_types},
[14] = {.loc = "-", .types = tevt_unknown_types}, [15] = {.loc = "-", .types = tevt_unknown_types},
[16] = {.loc = "-", .types = tevt_unknown_types}, [17] = {.loc = "-", .types = tevt_unknown_types},
[18] = {.loc = "strm", .types = tevt_strm_types}, [19] = {.loc = "-", .types = tevt_unknown_types},
[20] = {.loc = "-", .types = tevt_unknown_types}, [21] = {.loc = "-", .types = tevt_unknown_types},
[22] = {.loc = "-", .types = tevt_unknown_types}, [23] = {.loc = "xprt", .types = tevt_xprt_types},
[24] = {.loc = "-", .types = tevt_unknown_types}, [25] = {.loc = "-", .types = tevt_unknown_types},
};
void usage_exit(const char *name)
{
fprintf(stderr, "Usage: %s { value* | - }\n", name);
exit(1);
}
char *to_upper(char *dst, const char *src)
{
int i;
for (i = 0; src[i]; i++)
dst[i] = toupper(src[i]);
dst[i] = 0;
return dst;
}
char *tevt_show_events(char *buf, size_t len, const char *delim, const char *value)
{
char loc[5];
int ret;
if (!value || !*value) {
snprintf(buf, len, "##NONE");
goto end;
}
if (strcmp(value, "-") == 0) {
snprintf(buf, len, "##UNK");
goto end;
}
if (strlen(value) % 2 != 0) {
snprintf(buf, len, "##INV");
goto end;
}
while (*value) {
struct tevt_info info;
char l = value[0];
char t = value[1];
if (!isalpha(l) || !isxdigit(t)) {
snprintf(buf, len, "##INV");
goto end;
}
info = tevt_location[tolower(l) - 'a'];
ret = snprintf(buf, len, "%s:%s%s",
isupper(l) ? to_upper(loc, info.loc) : info.loc,
info.types[hex2i(t)],
value[2] != 0 ? delim : "");
if (ret < 0)
break;
len -= ret;
buf += ret;
value += 2;
}
end:
return buf;
}
char *tevt_show_tuple_events(char *buf, size_t len, char *value)
{
char *p = value;
/* skip '{' */
p++;
while (*p) {
char *v;
char c;
while (*p == ' ' || *p == '\t')
p++;
v = p;
while (*p && *p != ',' && *p != '}')
p++;
c = *p;
*p = 0;
tevt_show_events(buf, len, " > ", v);
printf("\t- %s\n", buf);
*p = c;
if (*p == ',')
p++;
else if (*p == '}')
break;
else {
printf("\t- ##INV\n");
break;
}
}
*buf = 0;
return buf;
}
int main(int argc, char **argv)
{
const char *name = argv[0];
char line[128];
char *value;
int multi = 0;
int use_stdin = 0;
char *err;
while (argc == 1)
usage_exit(name);
argv++; argc--;
if (argc > 1)
multi = 1;
if (strcmp(argv[0], "-") == 0)
use_stdin = 1;
while (argc > 0) {
if (use_stdin) {
value = fgets(line, sizeof(line), stdin);
if (!value)
break;
/* skip common leading delimiters that slip from copy-paste */
while (*value == ' ' || *value == '\t' || *value == ':' || *value == '=')
value++;
err = value;
while (*err && *err != '\n')
err++;
*err = 0;
}
else {
value = argv[0];
argv++; argc--;
}
if (multi)
printf("### %-8s : ", value);
if (*value == '{') {
if (!use_stdin)
printf("\n");
tevt_show_tuple_events(buf, bsz, value);
}
else
tevt_show_events(buf, bsz, " > ", value);
printf("%s\n", buf);
}
return 0;
}

View file

@ -3,9 +3,7 @@ DeviceAtlas Device Detection
In order to add DeviceAtlas Device Detection support, you would need to download
the API source code from https://deviceatlas.com/deviceatlas-haproxy-module.
Once extracted, two modes are supported :
1/ Build HAProxy and DeviceAtlas in one command
Once extracted :
$ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_SRC=<path to the API root folder>
@ -16,6 +14,10 @@ directory. Also, in the case the api cache support is not needed and/or a C++ to
$ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_SRC=<path to the API root folder> DEVICEATLAS_NOCACHE=1
However, if the API had been installed beforehand, DEVICEATLAS_SRC
can be omitted. Note that the DeviceAtlas C API version supported is from the 3.x
releases series (3.2.1 minimum recommended).
For HAProxy developers who need to verify that their changes didn't accidentally
break the DeviceAtlas code, it is possible to build a dummy library provided in
the addons/deviceatlas/dummy directory and to use it as an alternative for the
@ -25,29 +27,6 @@ validate API changes :
$ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_SRC=$PWD/addons/deviceatlas/dummy
2/ Build and install DeviceAtlas according to https://docs.deviceatlas.com/apis/enterprise/c/<release version>/README.html
For example :
In the deviceatlas library folder :
$ cmake .
$ make
$ sudo make install
In the HAProxy folder :
$ make TARGET=<target> USE_DEVICEATLAS=1
Note that if the -DCMAKE_INSTALL_PREFIX cmake option had been used, it is necessary to set as well DEVICEATLAS_LIB and
DEVICEATLAS_INC as follow :
$ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_INC=<CMAKE_INSTALL_PREFIX value>/include DEVICEATLAS_LIB=<CMAKE_INSTALL_PREFIX value>/lib
For example :
$ cmake -DCMAKE_INSTALL_PREFIX=/opt/local
$ make
$ sudo make install
$ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_INC=/opt/local/include DEVICEATLAS_LIB=/opt/local/lib
Note that DEVICEATLAS_SRC is omitted in this case.
These are supported DeviceAtlas directives (see doc/configuration.txt) :
- deviceatlas-json-file <path to the DeviceAtlas JSON data file>.
- deviceatlas-log-level <number> (0 to 3, level of information returned by

View file

@ -362,7 +362,7 @@ option set-process-time <var name>
latency added by the SPOE processing for the last handled event or group.
If several events or groups are processed for the same stream, this value
will be overridden.
will be overrideen.
See also: "option set-total-time".

View file

@ -3,7 +3,7 @@
A number of contributors are often embarrassed with coding style issues, they
don't always know if they're doing it right, especially since the coding style
has evolved along the years. What is explained here is not necessarily what is
has elvoved along the years. What is explained here is not necessarily what is
applied in the code, but new code should as much as possible conform to this
style. Coding style fixes happen when code is replaced. It is useless to send
patches to fix coding style only, they will be rejected, unless they belong to

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -5,7 +5,7 @@
The buffer list API allows one to share a certain amount of buffers between
multiple entities, which will each see their own as lists of buffers, while
keeping a shared free list. The immediate use case is for muxes, which may
keeping a sharedd free list. The immediate use case is for muxes, which may
want to allocate up to a certain number of buffers per connection, shared
among all streams. In this case, each stream will first request a new list
for its own use, then may request extra entries from the free list. At any

View file

@ -540,15 +540,14 @@ message. These functions are used by HTX analyzers or by multiplexers.
the amount of data drained.
- htx_xfer_blks() transfers HTX blocks from an HTX message to another,
stopping after the first block of a specified type is transferred or when
a specific amount of bytes, including meta-data, was moved. If the tail
block is a DATA block, it may be partially moved. All other block are
transferred at once or kept. This function returns a mixed value, with the
last block moved, or NULL if nothing was moved, and the amount of data
transferred. When HEADERS or TRAILERS blocks must be transferred, this
function transfers all of them. Otherwise, if it is not possible, it
triggers an error. It is the caller responsibility to transfer all headers
or trailers at once.
stopping on the first block of a specified type or when a specific amount
of bytes, including meta-data, was moved. If the tail block is a DATA
block, it may be partially moved. All other block are transferred at once
or kept. This function returns a mixed value, with the last block moved,
or NULL if nothing was moved, and the amount of data transferred. When
HEADERS or TRAILERS blocks must be transferred, this function transfers
all of them. Otherwise, if it is not possible, it triggers an error. It is
the caller responsibility to transfer all headers or trailers at once.
- htx_append_msg() append an HTX message to another one. All the message is
copied or nothing. So, if an error occurred, a rollback is performed. This

View file

@ -314,16 +314,6 @@ alphanumerically ordered:
call to cfg_register_section() with the three arguments at stage
STG_REGISTER.
You can only register a section once, but you can register post callbacks
multiple time for this section with REGISTER_CONFIG_POST_SECTION().
- REGISTER_CONFIG_POST_SECTION(name, post)
Registers a function which will be called after a section is parsed. This is
the same as the <post> argument in REGISTER_CONFIG_SECTION(), the difference
is that it allows to register multiple <post> callbacks and to register them
elsewhere in the code.
- REGISTER_PER_THREAD_ALLOC(fct)
Registers a call to register_per_thread_alloc(fct) at stage STG_REGISTER.

View file

@ -1,86 +0,0 @@
2025-08-13 - Memory allocation in HAProxy 3.3
The vast majority of dynamic memory allocations are performed from pools. Pools
are optimized to store pre-calibrated objects of the right size for a given
usage, try to favor locality and hot objects as much as possible, and are
heavily instrumented to detect and help debug a wide class of bugs including
buffer overflows, use-after-free, etc.
For objects of random sizes, or those used only at configuration time, pools
are not suited, and the regular malloc/free family is available, in addition of
a few others.
The standard allocation calls are intercepted at the code level (#define) when
the code is compiled with -DDEBUG_MEM_STATS. For this reason, these calls are
redefined as macros in "bug.h", and one must not try to use the pointers to
such functions, as this may break DEBUG_MEM_STATS. This provides fine-grained
stats about allocation/free per line of source code using locally implemented
counters that can be consulted by "debug dev memstats". The calls are
categorized into one of "calloc", "free", "malloc", "realloc", "strdup",
"p_alloc", "p_free", the latter two designating pools. Extra calls such as
memalign() and similar are also intercepted and counted as malloc.
Due to the nature of this replacement, DEBUG_MEM_STATS cannot see operations
performed in libraries or dependencies.
In addition to DEBUG_MEM_STATS, when haproxy is built with USE_MEMORY_PROFILING
the standard functions are wrapped by new ones defined in "activity.c", which
also hold counters by call place. These ones are able to trace activity in
libraries because the functions check the return pointer to figure where the
call was made. The approach is different and relies on a large hash table. The
files, function names and line numbers are not know, but by passing the pointer
to dladdr(), we can often resolve most of these symbols. These operations are
consulted via "show profiling memory". It must first be enabled either in the
global config "profiling.memory on" or the CLI using "set profiling memory on".
Memory profiling can also track pool allocations and frees thanks to knowing
the size of the element and knowing a place where to store it. Some future
evolutions might consider making this possible as well for pure malloc/free
too by leveraging malloc_usable_size() a bit more.
Finally, 3.3 brought aligned allocations. These are made available via a new
family of functions around ha_aligned_alloc() that simply map to either
posix_memalign(), memalign() or _aligned_malloc() for CYGWIN, depending on
which one is available. This latter one requires to pass the pointer to
_aligned_free() instead of free(), so for this reason, all aligned allocations
have to be released using ha_aligned_free(). Since this mostly happens on
configuration elements, in practice it's not as inconvenient as it can sound.
These functions are in reality macros handled in "bug.h" like the previous
ones in order to deal with DEBUG_MEM_STATS. All "alloc" variants are reported
in memstats as "malloc". All "zalloc" variants are reported in memstats as
"calloc".
The currently available allocators are the following:
- void *ha_aligned_alloc(size_t align, size_t size)
- void *ha_aligned_zalloc(size_t align, size_t size)
Equivalent of malloc() but aligned to <align> bytes. The alignment MUST be
at least as large as one word and MUST be a power of two. The "zalloc"
variant also zeroes the area on success. Both return NULL on failure.
- void *ha_aligned_alloc_safe(size_t align, size_t size)
- void *ha_aligned_zalloc_safe(size_t align, size_t size)
Equivalent of malloc() but aligned to <align> bytes. The alignment is
automatically adjusted to the nearest larger power of two that is at least
as large as a word. The "zalloc" variant also zeroes the area on
success. Both return NULL on failure.
- (type *)ha_aligned_alloc_typed(size_t count, type)
(type *)ha_aligned_zalloc_typed(size_t count, type)
This macro returns an area aligned to the required alignment for type
<type>, large enough for <count> objects of this type, and the result is a
pointer of this type. The goal is to ease allocation of known structures
whose alignment is not necessarily known to the developer (and to avoid
encouraging to hard-code alignment). The cast in return also provides a
last-minute control in case a wrong type is mistakenly used due to a poor
copy-paste or an extra "*" after the type. When DEBUG_MEM_STATS is in use,
the type is stored as a string in the ".extra" field so that it can be
displayed in "debug dev memstats". The "zalloc" variant also zeroes the
area on success. Both return NULL on failure.
- void ha_aligned_free(void *ptr)
Frees the area pointed to by ptr. It is the equivalent of free() but for
objects allocated using one of the functions above.

View file

@ -245,30 +245,6 @@ mt_list_pop(l)
#=========#
mt_list_pop_locked(l)
Removes the list's first element, returns it locked. If the list was empty,
NULL is returned. A macro MT_LIST_POP_LOCKED() is provided for a
more convenient use; instead of returning the list element, it will return
the structure holding the element, taking care of preserving the NULL.
before:
+---+ +---+ +---+ +---+ +---+ +---+ +---+
#=>| L |<===>| A |<===>| B |<===>| C |<===>| D |<===>| E |<===>| F |<=#
# +---+ +---+ +---+ +---+ +---+ +---+ +---+ #
#=====================================================================#
after:
+---+ +---+ +---+ +---+ +---+ +---+
#=>| L |<===>| B |<===>| C |<===>| D |<===>| E |<===>| F |<=#
# +---+ +---+ +---+ +---+ +---+ +---+ #
#===========================================================#
+---+
# x| A |x #
# +---+ #
#=========#
_mt_list_lock_next(elt)
Locks the link that starts at the next pointer of the designated element.
The link is replaced by two locked pointers, and a pointer to the next
@ -400,9 +376,6 @@ mt_list_lock_prev(elt)
Return A elt
value: <===>
mt_list_try_lock_prev(elt)
Does the same thing as mt_list_lock_prev(), except if the list is
locked already, it returns { NULL, NULL } instead of waiting.
mt_list_lock_elem(elt)
Locks the element only. Both of its pointers are replaced by two locked

View file

@ -1,4 +1,4 @@
2025-08-11 - Pools structure and API
2022-02-24 - Pools structure and API
1. Background
-------------
@ -204,14 +204,6 @@ the cache, when this option is set, objects are picked from the cache from the
oldest one instead of the freshest one. This way even late memory corruptions
have a chance to be detected.
Another non-destructive approach is to use "-dMbackup". A full copy of the
object is made after its end, which eases inspection (e.g. of the parts
scratched by the pool_item elements), and a comparison is made upon allocation
of that object, just like with "-dMintegrity", causing a crash on mismatch. The
initial 4 words corresponding to the list are ignored as well. Note that when
both "-dMbackup" and "-dMintegrity" are used, the copy is performed before
being scratched, and the comparison is done by "-dMintegrity" only.
When build option DEBUG_MEMORY_POOLS is set, or the boot-time option "-dMtag"
is passed on the executable's command line, pool objects are allocated with
one extra pointer compared to the requested size, so that the bytes that follow
@ -239,6 +231,10 @@ currently in use:
+------------+ +------------+ / is set at build time
or -dMtag at boot time
Right now no provisions are made to return objects aligned on larger boundaries
than those currently covered by malloc() (i.e. two pointers). This need appears
from time to time and the layout above might evolve a little bit if needed.
4. Storage in the process-wide shared pool
------------------------------------------
@ -346,25 +342,7 @@ struct pool_head *create_pool(char *name, uint size, uint flags)
"-dMno-merge" is passed on the executable's command line, the pools
also need to have the exact same name to be merged. In addition, unless
MEM_F_EXACT is set in <flags>, the object size will usually be rounded
up to the size of pointers (16 or 32 bytes). MEM_F_UAF may be set on a
per-pool basis to enable the UAF detection only for this specific pool,
saving the massive overhead of global usage. The name that will appear
in the pool upon merging is the name of the first created pool. The
returned pointer is the new (or reused) pool head, or NULL upon error.
Pools created this way must be destroyed using pool_destroy().
struct pool_head *create_aligned_pool(char *name, uint size, uint align, uint flags)
Create a new pool named <name> for objects of size <size> bytes and
aligned to <align> bytes (0 meaning use the platform's default). Pool
names are truncated to their first 11 characters. Pools of very similar
size will usually be merged if both have set the flag MEM_F_SHARED in
<flags>. When DEBUG_DONT_SHARE_POOLS was set at build time, or
"-dMno-merge" is passed on the executable's command line, the pools
also need to have the exact same name to be merged. In addition, unless
MEM_F_EXACT is set in <flags>, the object size will usually be rounded
up to the size of pointers (16 or 32 bytes). MEM_F_UAF may be set on a
per-pool basis to enable the UAF detection only for this specific pool,
saving the massive overhead of global usage. The name that will appear
up to the size of pointers (16 or 32 bytes). The name that will appear
in the pool upon merging is the name of the first created pool. The
returned pointer is the new (or reused) pool head, or NULL upon error.
Pools created this way must be destroyed using pool_destroy().
@ -482,20 +460,6 @@ complicate maintenance.
A few macros exist to ease the declaration of pools:
DECLARE_ALIGNED_POOL(ptr, name, size, align)
Placed at the top level of a file, this declares a global memory pool
as variable <ptr>, name <name> and size <size> bytes per element, all
of which will be aligned to <align> bytes. The alignment will be
rounded up to the next power of two and will be at least as large as a
word on the platform. This is made via a call to REGISTER_ALIGNED_POOL()
and by assigning the resulting pointer to variable <ptr>. <ptr> will be
created of type "struct pool_head *". If the pool needs to be visible
outside of the function (which is likely), it will also need to be
declared somewhere as "extern struct pool_head *<ptr>;". It is
recommended to place such declarations very early in the source file so
that the variable is already known to all subsequent functions which
may use it.
DECLARE_POOL(ptr, name, size)
Placed at the top level of a file, this declares a global memory pool
as variable <ptr>, name <name> and size <size> bytes per element. This
@ -507,17 +471,6 @@ DECLARE_POOL(ptr, name, size)
declarations very early in the source file so that the variable is
already known to all subsequent functions which may use it.
DECLARE_STATIC_ALIGNED_POOL(ptr, name, size, align)
Placed at the top level of a file, this declares a global memory pool
as variable <ptr>, name <name> and size <size> bytes per element, all
of which will be aligned to <align> bytes. The alignment will be
rounded up to the next power of two and will be at least as large as a
word on the platform. This is made via a call to REGISTER_ALIGNED_POOL()
and by assigning the resulting pointer to local variable <ptr>. <ptr>
will be created of type "static struct pool_head *". It is recommended
to place such declarations very early in the source file so that the
variable is already known to all subsequent functions which may use it.
DECLARE_STATIC_POOL(ptr, name, size)
Placed at the top level of a file, this declares a static memory pool
as variable <ptr>, name <name> and size <size> bytes per element. This
@ -527,42 +480,6 @@ DECLARE_STATIC_POOL(ptr, name, size)
early in the source file so that the variable is already known to all
subsequent functions which may use it.
DECLARE_STATIC_TYPED_POOL(ptr, name, type[, extra[, align]])
Placed at the top level of a file, this declares a global memory pool
as variable <ptr>, name <name>, and configured to allocate objects of
type <type>. It is optionally possible to grow these objects by <extra>
bytes (e.g. if they contain some variable length data at the end), and
to force them to be aligned to <align> bytes. If only alignment is
desired without extra data, pass 0 as <extra>. Alignment must be at
least as large as the type's, and a control is enforced at declaration
time so that objects cannot be less aligned than what is promised to
the compiler. The default alignment of zero indicates that the default
one (from the type) should be used. This is made via a call to
REGISTER_ALIGNED_POOL() and by assigning the resulting pointer to local
variable <ptr>. <ptr> will be created of type "static struct pool_head
*". It is recommended to place such declarations very early in the
source file so that the variable is already known to all subsequent
functions which may use it.
DECLARE_TYPED_POOL(ptr, name, type[, extra[, align]])
Placed at the top level of a file, this declares a global memory pool
as variable <ptr>, name <name>, and configured to allocate objects of
type <type>. It is optionally possible to grow these objects by <extra>
bytes (e.g. if they contain some variable length data at the end), and
to force them to be aligned to <align> bytes. If only alignment is
desired without extra data, pass 0 as <extra>. Alignment must be at
least as large as the type's, and a control is enforced at declaration
time so that objects cannot be less aligned than what is promised to
the compiler. The default alignment of zero indicates that the default
one (from the type) should be used. This is made via a call to
REGISTER_ALIGNED_POOL() and by assigning the resulting pointer to
variable <ptr>. <ptr> will be created of type "struct pool_head *". If
the pool needs to be visible outside of the function (which is likely),
it will also need to be declared somewhere as "extern struct pool_head
*<ptr>;". It is recommended to place such declarations very early in
the source file so that the variable is already known to all subsequent
functions which may use it.
6. Build options
----------------

View file

@ -221,9 +221,6 @@ state field before the call to ->process()
- TASK_F_UEVT2 one-shot user-defined event type 2. This is application
specific, and reset to 0 when the handler is called.
- TASK_F_UEVT3 one-shot user-defined event type 3. This is application
specific, and reset to 0 when the handler is called.
In addition, a few persistent flags may be observed or manipulated by the
application, both for tasks and tasklets:

View file

@ -11,7 +11,7 @@ default init, this was controversial but fedora and archlinux already uses it.
At this time HAProxy still had a multi-process model, and the way haproxy is
working was incompatible with the daemon mode.
Systemd is compatible with traditional forking services, but somehow HAProxy
Systemd is compatible with traditionnal forking services, but somehow HAProxy
is different. To work correctly, systemd needs a main PID, this is the PID of
the process that systemd will supervises.
@ -45,7 +45,7 @@ However the wrapper suffered from several problems:
### mworker V1
HAProxy 1.8 got rid of the wrapper which was replaced by the master worker
HAProxy 1.8 got ride of the wrapper which was replaced by the master worker
mode. This first version was basically a reintegration of the wrapper features
within HAProxy. HAProxy is launched with the -W flag, read the configuration and
then fork. In mworker mode, the master is usually launched as a root process,
@ -86,7 +86,7 @@ retrieved automatically.
The master is supervising the workers, when a current worker (not a previous one
from before the reload) is exiting without being asked for a reload, the master
will emit an "exit-on-failure" error and will kill every workers with a SIGTERM
and exits with the same error code than the failed worker, this behavior can be
and exits with the same error code than the failed master, this behavior can be
changed by using the "no exit-on-failure" option in the global section.
While the master is supervising the workers using the wait() function, the
@ -186,8 +186,8 @@ number that can be found in HAPROXY_PROCESSES. With this change the stats socket
in the configuration is less useful and everything can be done from the master
CLI.
With 2.7, the reload mechanism of the master CLI evolved, with previous versions,
this mechanism was asynchronous, so once the `reload` command was received, the
With 2.7, the reload mecanism of the master CLI evolved, with previous versions,
this mecanism was asynchronous, so once the `reload` command was received, the
master would reload, the active master CLI connection was closed, and there was
no way to return a status as a response to the `reload` command. To achieve a
synchronous reload, a dedicated sockpair is used, one side uses a master CLI

View file

@ -1,53 +0,0 @@
2025/09/16 - SHM stats file storage description and hints
Shm stats file (used to share thread-groupable statistics over multiple
process through the "shm-stats-file" directive) is made of:
- a main header which describes the file version, the processes making
use of it, the common clock source and hints about the number of
objects that are currently stored or provisionned in the file.
- an indefinite number of "objects" blocks coming right after the
main header, all blocks have the same size which is the size of the
maximum underlying object that may be stored. The main header tells
how many objects are stored in the file.
File header looks like this (32/64 bits systems):
0 8 16 32 48 64
+-------+---------+----------------+-------------------+-------------------+
| VERSION | 2 bytes | global_now_ms (global mono date in ms)|
|MAJOR | MINOR | hole | |
+----------------------------------+---------------------------------------+
| global_now_ns (global mono date in ns) |
+--------------------------------------------------------------------------+
| now_offset (offset applied to global monotonic date |
| on startup) |
+--------------------------------------------------------------------------+
| Process slot : | 1byte x 64
| pid | heartbeat (ticks) |
+----------------------------------+---------------------------------------+
| objects | objects slots |
| (used objects) | (available for use) |
+----------------------------------+---------------------------------------+
| padding (for future use) | 128 bytes
+--------------------------------------------------------------------------+
Object block looks like this:
0 8 16 32 48 64
+-------+---------+----------------+-------------------+-------------------+
| GUID | 128 bytes
+ (zero terminated) +
| |
+-------+---------+--------------------------------------------------------+
| tgid | type | padding |
+-------+---------+--------------------------------------------------------+
| users (bitmask of process slots making use of the obj) |
+--------------------------------------------------------------------------+
| object data |
| (version dependent) |
| struct be_counters_shared_tg or |
| struct fe_counters_shared_tg |
+--------------------------------------------------------------------------+
| padding (to anticipate evolutions) | 64 bytes
+--------------------------------------------------------------------------+

View file

@ -1,144 +0,0 @@
2025-02-13 - Details of the watchdog's internals
------------------------------------------------
1. The watchdog timer
---------------------
The watchdog sets up a timer that triggers every 1 to 1000ms. This is pre-
initialized by init_wdt() which positions wdt_handler() as the signal handler
of signal WDTSIG (SIGALRM).
But this is not sufficient, an alarm actually has to be set. This is done for
each thread by init_wdt_per_thread() which calls clock_setup_signal_timer()
which in turn enables a ticking timer for the current thread, that delivers
the WDTSIG signal (SIGALRM) to the process. Since there's no notion of thread
at this point, there are as many timers as there are threads, and each signal
comes with an integer value which in fact contains the thread number as passed
to clock_setup_signal_timer() during initialization.
The timer preferably uses CLOCK_THREAD_CPUTIME_ID if available, otherwise
falls back to CLOCK_REALTIME. The former is more accurate as it really counts
the time spent in the process, while the latter might also account for time
stuck on paging in etc.
Then wdt_ping() is called to arm the timer. It's set to trigger every
<wdt_warn_blocked_traffic_ns> interval. It is also called by wdt_handler()
to reprogram a new wakeup after it has ticked.
When wdt_handler() is called, it reads the thread number in si_value.sival_int,
as positioned during initialization. Most of the time the signal lands on the
wrong thread (typically thread 1 regardless of the reported thread). From this
point, the function retrieves the various info related to that thread's recent
activity (its current time and flags), ignores corner cases such as if that
thread is already dumping another one, being dumped, in the poller, has quit,
etc.
If the thread was not marked as stuck, it's verified that no progress was made
for at least one second, in which case the TH_FL_STUCK flag is set. The lack of
progress is measured by the distance between the thread's current cpu_time and
its prev_cpu_time. If the lack of progress is at least as large as the warning
threshold, then the signal is bounced to the faulty thread if it's not the
current one. Since this bounce is based on the time spent without update, it
already doesn't happen often.
Once on the faulty thread, two checks are performed:
1) if the thread was already marked as stuck, then the thread is considered
as definitely stuck, and ha_panic() is called. It will not return.
2) a check is made to verify if the scheduler is still ticking, by reading
and setting a variable that only the scheduler can clear when leaving a
task. If the scheduler didn't make any progress, ha_stuck_warning() is
called to emit a warning about that thread.
Most of the time there's no panic of course, and a wdt_ping() is performed
before leaving the handler to reprogram a check for that thread.
2. The debug handler
--------------------
Both ha_panic() and ha_stuck_warning() are quite similar. In both cases, they
will first verify that no panic is in progress and just return if so. This is
verified using mark_tained() which atomically sets a tainted bit and returns
the previous value. ha_panic() sets TAINTED_PANIC while ha_stuck_warning() will
set TAINTED_WARN_BLOCKED_TRAFFIC.
ha_panic() uses the current thread's trash buffer to produce the messages, as
we don't care about its contents since that thread will never return. However
ha_stuck_warning() instead uses a local 8kB buffer in the thread's stack.
ha_panic() will call ha_thread_dump_fill() for each thread, to complete the
buffer being filled with each thread's dump messages. ha_stuck_warning() only
calls ha_thread_dump_one(), which works on the current thread. In both cases
the message is then directly sent to fd #2 (stderr) and ha_thread_dump_done()
is called to release the dumped thread.
Both print a few extra messages, but ha_panic() just ends by looping on abort()
until the process dies.
ha_thread_dump_fill() uses a locking mechanism to make sure that each thread is
only dumped once at a time. For this it atomically sets is thread_dump_buffer
to point to the target buffer. The thread_dump_buffer has 4 possible values:
- NULL: no dump in progress
- a valid, even, pointer: this is the pointer to the buffer that's currently
in the process of being filled by the thread
- a valid pointer + 1: this is the pointer of the now filled buffer, that the
caller can consume. The atomic |1 at the end marks the end of the dump.
- 0x2: this indicates to the dumping function that it is responsible for
assigning its own buffer itself (used by the debug_handler to pick one of
its own trash buffers during a panic). The idea here is that each thread
will keep their own copy of their own dump so that it can be later found in
the core file for inspection.
A copy of the last valid thread_dump_buffer used is kept in last_dump_buffer,
for easier post-mortem analysis. This one may be NULL or even invalid, but
usually during a panic it will be valid, and may reveal useful hints even if it
still contains the dump of the last warning. Usually this will point to a trash
buffer or to stack area.
ha_thread_dump_fill() then either directly calls ha_thread_dump_one() if the
target thread is the current thread, or sends the target thread DEBUGSIG
(SIGURG) if it's a different thread. This signal is initialized at boot time
by init_debug() to call handler debug_handler().
debug_handler() then operates on the target thread and recognizes that it must
allocate its own buffer if the pointer is 0x2, calls ha_thread_dump_one(), then
waits forever (it does not return from the signal handler so as to make sure
the dumped thread will not badly interact with other ones).
ha_thread_dump_one() collects some info, that it prints all along into the
target buffer. Depending on the situation, it will dump current tasks or not,
may mark that Lua is involved and TAINTED_LUA_STUCK, and if running in shared
mode, also taint the process with TAINTED_LUA_STUCK_SHARED. It calls
ha_dump_backtrace() before returning.
ha_dump_backtrace() produces a backtrace into a local buffer (100 entries max),
then dumps the code bytes nearby the crashing instrution, dumps pointers and
tries to resolve function names, and sends all of that into the target buffer.
On some architectures (x86_64, arm64), it will also try to detect and decode
call instructions and resolve them to called functions.
3. Improvements
---------------
The symbols resolution is extremely expensive, particularly for the warnings
which should be fast. But we need it, it's just unfortunate that it strikes at
the wrong moment. At least ha_dump_backtrace() does disable signals while it's
resolving, in order to avoid unwanted re-entrance. In addition, the called
function resolve_sym_name() uses some locking and refrains from calling the
dladdr family of functions in a re-entrant way (in the worst case only well
known symbols will be resolved)..
In an ideal case, ha_dump_backtrace() would dump the pointers to a local array,
which would then later be resolved asynchronously in a tasklet. This can work
because the code bytes will not change either so the dump can be done at once
there.
However the tasks dumps are not much compatible with this. For example
ha_task_dump() makes a number of tests and itself will call hlua_traceback() if
needed, so it might still need to be dumped in real time synchronously and
buffered. But then it's difficult to reassemble chunks of text between the
backtrace (that needs to be resolved later) and the tasks/lua parts. Or maybe
we can afford to disable Lua trace dumps in warnings and keep them only for
panics (where the asynchronous resolution is not needed) ?
Also differentiating the call paths for warnings and panics is not something
easy either.

View file

@ -1,7 +1,7 @@
-----------------------
HAProxy Starter Guide
-----------------------
version 3.4
version 3.2
This document is an introduction to HAProxy for all those who don't know it, as
@ -1693,7 +1693,7 @@ A small team of trusted developers will receive it and will be able to propose
a fix. We usually don't use embargoes and once a fix is available it gets
merged. In some rare circumstances it can happen that a release is coordinated
with software vendors. Please note that this process usually messes up with
everyone's work, and that rushed up releases can sometimes introduce new bugs,
eveyone's work, and that rushed up releases can sometimes introduce new bugs,
so it's best avoided unless strictly necessary; as such, there is often little
consideration for reports that needlessly cause such extra burden, and the best
way to see your work credited usually is to provide a working fix, which will

View file

@ -893,9 +893,7 @@ Core class
**context**: init, task, action
This function returns a new object of a *httpclient* class. An *httpclient*
object must be used to process one and only one request. It must never be
reused to process several requests.
This function returns a new object of a *httpclient* class.
:returns: A :ref:`httpclient_class` object.
@ -928,25 +926,12 @@ Core class
its work and wants to give back the control to HAProxy without executing the
remaining code. It can be seen as a multi-level "return".
.. js:function:: core.wait([milliseconds])
**context**: task, action
Give back the hand at the HAProxy scheduler. Unlike :js:func:`core.yield`
the task will not be woken up automatically to resume as fast as possible.
Instead, it will wait for an event to wake the task. If milliseconds argument
is provided then the Lua execution will be automatically resumed passed this
delay even if no event caused the task to wake itself up.
:param integer milliseconds: automatic wakeup passed this delay. (optional)
.. js:function:: core.yield()
**context**: task, action
Give back the hand at the HAProxy scheduler. It is used when the LUA
processing consumes a lot of processing time. Lua execution will be resumed
automatically (automatic reschedule).
processing consumes a lot of processing time.
.. js:function:: core.parse_addr(address)
@ -1089,13 +1074,18 @@ Core class
perform the heavy job in a dedicated task and allow remaining events to be
processed more quickly.
.. js:function:: core.use_native_mailers_config()
.. js:function:: core.disable_legacy_mailers()
**context**: body
**LEGACY**
Inform haproxy that the script will make use of the native "mailers"
config section (although legacy). In other words, inform haproxy that
:js:func:`Proxy.get_mailers()` will be used later in the program.
**context**: body, init
Disable the sending of email alerts through the legacy email sending
function when mailers are used in the configuration.
Use this when sending email alerts directly from lua.
:see: :js:func:`Proxy.get_mailers()`
.. _proxy_class:
@ -1224,14 +1214,8 @@ Proxy class
**LEGACY**
Returns a table containing legacy mailers config (from haproxy configuration
file) for the current proxy or nil if mailers are not available for the proxy.
.. warning::
When relying on :js:func:`Proxy.get_mailers()` to retrieve mailers
configuration, :js:func:`core.use_native_mailers_config()` must be called
first from body or init context to inform haproxy that Lua makes use of the
legacy mailers config.
Returns a table containing mailers config for the current proxy or nil
if mailers are not available for the proxy.
:param class_proxy px: A :ref:`proxy_class` which indicates the manipulated
proxy.
@ -1248,6 +1232,10 @@ ProxyMailers class
This class provides mailers config for a given proxy.
If sending emails directly from lua, please consider
:js:func:`core.disable_legacy_mailers()` to disable the email sending from
haproxy. (Or email alerts will be sent twice...)
.. js:attribute:: ProxyMailers.track_server_health
Boolean set to true if the option "log-health-checks" is configured on
@ -1881,17 +1869,6 @@ Queue class
Use :js:func:`core.queue` to get a new Queue object.
.. js:function:: Queue.alarm()
**context**: task, action, service
Sets a wakeup alarm on the current Lua context so that when new data
becomes available on the Queue, the current Lua context is woken up
automatically. It can be combined with :js:func:`core.wait` to wait
for Queue events.
:param class_queue queue: A :ref:`queue_class` to the current queue
.. js:function:: Queue.size(queue)
This function returns the number of items within the Queue.
@ -2580,9 +2557,7 @@ HTTPClient class
.. js:class:: HTTPClient
The httpclient class allows issue of outbound HTTP requests through a simple
API without the knowledge of HAProxy internals. Any instance must be used to
process one and only one request. It must never be reused to process several
requests.
API without the knowledge of HAProxy internals.
.. js:function:: HTTPClient.get(httpclient, request)
.. js:function:: HTTPClient.head(httpclient, request)
@ -3489,7 +3464,7 @@ Patref class
in case of duplicated entries, only the first matching entry is returned.
.. Warning::
Not meant to be shared between multiple contexts. If multiple contexts
Not meant to be shared bewteen multiple contexts. If multiple contexts
need to work on the same pattern reference, each context should have
its own patref object.
@ -3519,7 +3494,7 @@ Patref class
.. js:function:: Patref.commit(ref)
Tries to commit pending Patref object updates, that is updates made to the
local object will be committed to the underlying pattern reference storage
local object will be committed to the underlying patter reference storage
in an atomic manner upon success. Upon failure, local pending updates are
lost. Upon success, all other pending updates on the pattern reference
(e.g.: "prepare" from the cli or from other Patref Lua objects) started
@ -3911,31 +3886,16 @@ AppletTCP class
:param class_AppletTCP applet: An :ref:`applettcp_class`
:returns: a string. The string can be empty if we reach the end of the stream.
.. js:function:: AppletTCP.receive(applet, [size, [timeout]])
.. js:function:: AppletTCP.receive(applet, [size])
Reads data from the TCP stream, according to the specified read *size*. If the
*size* is missing, the function tries to read all the content of the stream
until the end. An optional timeout may be specified in milliseconds. In this
case the function will return no longer than this delay, with the amount of
available data, or nil if there is no data. An empty string is returned if the
connection is closed.
until the end.
:param class_AppletTCP applet: An :ref:`applettcp_class`
:param integer size: the required read size.
:returns: return nil if the timeout has expired and no data was available but
can still be received. Otherwise, a string is returned, possibly an empty
string if the connection is closed.
.. js:function:: AppletTCP.try_receive(applet)
Reads available data from the TCP stream and returns immediately. Returns a
string containing read bytes or nil if no bytes are available at that time. An
empty string is returned if the connection is closed.
:param class_AppletTCP applet: An :ref:`applettcp_class`
:returns: return nil if no data was available but can still be
received. Otherwise, a string is returned, possibly an empty string if the
connection is closed.
:returns: always return a string, the string can be empty if the connection is
closed.
.. js:function:: AppletTCP.send(appletmsg)
@ -4612,27 +4572,6 @@ HTTPMessage class
data by default.
:returns: an integer containing the amount of bytes copied or -1.
.. js:function:: HTTPMessage.set_body_len(http_msg, length)
This function changes the expected payload length of the HTTP message
**http_msg**. **length** can be an integer value. In that case, a
"Content-Length" header is added with the given value. It is also possible to
pass the **"chunked"** string instead of an integer value to force the HTTP
message to be chunk-encoded. In that case, a "Transfer-Encoding" header is
added with the "chunked" value. In both cases, all existing "Content-Length"
and "Transfer-Encoding" headers are removed.
This function should be used in the filter context to be able to alter the
payload of the HTTP message. The internal state of the HTTP message is updated
accordingly. :js:func:`HTTPMessage.add_header()` or
:js:func:`HTTPMessage.set_header()` functions must be used in that case.
:param class_httpmessage http_msg: The manipulated HTTP message.
:param type length: The new payload length to set. It can be an integer or
the string "chunked".
:returns: true if the payload length was successfully updated, false
otherwise.
.. js:function:: HTTPMessage.set_eom(http_msg)
This function set the end of message for the HTTP message **http_msg**.

File diff suppressed because it is too large Load diff

View file

@ -28,9 +28,7 @@ Revision history
string encoding. With contributions from Andriy Palamarchuk
(Amazon.com).
2020/03/05 - added the unique ID TLV type (Tim Düsterhus)
2025/09/09 - added SSL-related TLVs for key exchange group and signature
scheme (Steven Collison)
2026/01/15 - added SSL client certificate TLV (Simon Ser)
1. Background
@ -537,21 +535,18 @@ the information they choose to publish.
The following types have already been registered for the <type> field :
#define PP2_TYPE_ALPN 0x01
#define PP2_TYPE_AUTHORITY 0x02
#define PP2_TYPE_CRC32C 0x03
#define PP2_TYPE_NOOP 0x04
#define PP2_TYPE_UNIQUE_ID 0x05
#define PP2_TYPE_SSL 0x20
#define PP2_SUBTYPE_SSL_VERSION 0x21
#define PP2_SUBTYPE_SSL_CN 0x22
#define PP2_SUBTYPE_SSL_CIPHER 0x23
#define PP2_SUBTYPE_SSL_SIG_ALG 0x24
#define PP2_SUBTYPE_SSL_KEY_ALG 0x25
#define PP2_SUBTYPE_SSL_GROUP 0x26
#define PP2_SUBTYPE_SSL_SIG_SCHEME 0x27
#define PP2_SUBTYPE_SSL_CLIENT_CERT 0x28
#define PP2_TYPE_NETNS 0x30
#define PP2_TYPE_ALPN 0x01
#define PP2_TYPE_AUTHORITY 0x02
#define PP2_TYPE_CRC32C 0x03
#define PP2_TYPE_NOOP 0x04
#define PP2_TYPE_UNIQUE_ID 0x05
#define PP2_TYPE_SSL 0x20
#define PP2_SUBTYPE_SSL_VERSION 0x21
#define PP2_SUBTYPE_SSL_CN 0x22
#define PP2_SUBTYPE_SSL_CIPHER 0x23
#define PP2_SUBTYPE_SSL_SIG_ALG 0x24
#define PP2_SUBTYPE_SSL_KEY_ALG 0x25
#define PP2_TYPE_NETNS 0x30
2.2.1 PP2_TYPE_ALPN
@ -659,25 +654,13 @@ of the used cipher, for example "ECDHE-RSA-AES128-GCM-SHA256".
The second level TLV PP2_SUBTYPE_SSL_SIG_ALG provides the US-ASCII string name
of the algorithm used to sign the certificate presented by the frontend when
the incoming connection was made over an SSL/TLS transport layer, for example
"RSA-SHA256".
"SHA256".
The second level TLV PP2_SUBTYPE_SSL_KEY_ALG provides the US-ASCII string name
of the algorithm used to generate the key of the certificate presented by the
frontend when the incoming connection was made over an SSL/TLS transport layer,
for example "RSA2048".
The second level TLV PP2_SUBTYPE_SSL_GROUP provides the US-ASCII string name of
the key exchange algorithm used for the frontend TLS connection, for example
"secp256r1".
The second level TLV PP2_SUBTYPE_SSL_SIG_SCHEME provides the US-ASCII string
name of the algorithm the frontend used to sign the ServerKeyExchange or
CertificateVerify message, for example "rsa_pss_rsae_sha256".
The optional second level TLV PP2_SUBTYPE_SSL_CLIENT_CERT provides the raw
X.509 client certificate encoded in ASN.1 DER. The frontend may choose to omit
this TLV depending on configuration.
In all cases, the string representation (in UTF8) of the Common Name field
(OID: 2.5.4.3) of the client certificate's Distinguished Name, is appended
using the TLV format and the type PP2_SUBTYPE_SSL_CN. E.g. "example.com".

View file

@ -24,7 +24,7 @@ vtest installation
------------------------
To use vtest you will have to download and compile the recent vtest
sources found at https://github.com/vtest/VTest2.
sources found at https://github.com/vtest/VTest.
To compile vtest:

View file

@ -1,14 +0,0 @@
global
default-path config
tune.lua.bool-sample-conversion normal
# load all games here
lua-load lua/trisdemo.lua
defaults
timeout client 1h
# map one TCP port to each game
.notice 'use "socat TCP-CONNECT:0:7001 STDIO,raw,echo=0" to start playing'
frontend trisdemo
bind :7001
tcp-request content use-service lua.trisdemo

View file

@ -3,7 +3,7 @@
-- Provides a pure lua alternative to tcpcheck mailers.
--
-- To be loaded using "lua-load" from haproxy configuration to handle
-- email-alerts directly from lua
-- email-alerts directly from lua and disable legacy tcpcheck implementation.
local SYSLOG_LEVEL = {
["EMERG"] = 0,
@ -364,9 +364,9 @@ local function srv_event_add(event, data)
mailers_track_server_events(data.reference)
end
-- tell haproxy that we do use the legacy native "mailers" config section
-- which allows us to retrieve mailers configuration using Proxy:get_mailers()
core.use_native_mailers_config()
-- disable legacy email-alerts since email-alerts will be sent from lua directly
core.disable_legacy_mailers()
-- event subscriptions are purposely performed in an init function to prevent
-- email alerts from being generated too early (when process is starting up)

View file

@ -1,251 +0,0 @@
-- Example game of falling pieces for HAProxy CLI/Applet
local board_width = 10
local board_height = 20
local game_name = "Lua Tris Demo"
-- Shapes with IDs for color mapping
local pieces = {
{id = 1, shape = {{1,1,1,1}}}, -- I (Cyan)
{id = 2, shape = {{1,1},{1,1}}}, -- O (Yellow)
{id = 3, shape = {{0,1,0},{1,1,1}}}, -- T (Purple)
{id = 4, shape = {{0,1,1},{1,1,0}}}, -- S (Green)
{id = 5, shape = {{1,1,0},{0,1,1}}}, -- Z (Red)
{id = 6, shape = {{1,0,0},{1,1,1}}}, -- J (Blue)
{id = 7, shape = {{0,0,1},{1,1,1}}} -- L (Orange)
}
-- ANSI escape codes
local clear_screen = "\27[2J"
local cursor_home = "\27[H"
local cursor_hide = "\27[?25l"
local cursor_show = "\27[?25h"
local reset_color = "\27[0m"
local color_codes = {
[1] = "\27[1;36m", -- I: Cyan
[2] = "\27[1;37m", -- O: White
[3] = "\27[1;35m", -- T: Purple
[4] = "\27[1;32m", -- S: Green
[5] = "\27[1;31m", -- Z: Red
[6] = "\27[1;34m", -- J: Blue
[7] = "\27[1;33m" -- L: Yellow
}
local function init_board()
local board = {}
for y = 1, board_height do
board[y] = {}
for x = 1, board_width do
board[y][x] = 0 -- 0 for empty, piece ID for placed blocks
end
end
return board
end
local function can_place_piece(board, piece, px, py)
for y = 1, #piece do
for x = 1, #piece[1] do
if piece[y][x] == 1 then
local board_x = px + x - 1
local board_y = py + y - 1
if board_x < 1 or board_x > board_width or board_y > board_height or
(board_y >= 1 and board[board_y][board_x] ~= 0) then
return false
end
end
end
end
return true
end
local function place_piece(board, piece, piece_id, px, py)
for y = 1, #piece do
for x = 1, #piece[1] do
if piece[y][x] == 1 then
local board_x = px + x - 1
local board_y = py + y - 1
if board_y >= 1 and board_y <= board_height then
board[board_y][board_x] = piece_id -- Store piece ID for color
end
end
end
end
end
local function clear_lines(board)
local lines_cleared = 0
local y = board_height
while y >= 1 do
local full = true
for x = 1, board_width do
if board[y][x] == 0 then
full = false
break
end
end
if full then
table.remove(board, y)
table.insert(board, 1, {})
for x = 1, board_width do
board[1][x] = 0
end
lines_cleared = lines_cleared + 1
else
y = y - 1
end
end
return lines_cleared
end
local function rotate_piece(piece, piece_id, px, py, board)
local new_piece = {}
for x = 1, #piece[1] do
new_piece[x] = {}
for y = 1, #piece do
new_piece[x][#piece + 1 - y] = piece[y][x]
end
end
if can_place_piece(board, new_piece, px, py) then
return new_piece
end
return piece
end
function render(applet, board, piece, piece_id, px, py, score)
local output = cursor_home
output = output .. game_name .. " - Lines: " .. score .. "\r\n"
output = output .. "+" .. string.rep("-", board_width * 2) .. "+\r\n"
for y = 1, board_height do
output = output .. "|"
for x = 1, board_width do
local char = " "
-- Current piece
for py_idx = 1, #piece do
for px_idx = 1, #piece[1] do
if piece[py_idx][px_idx] == 1 then
local board_x = px + px_idx - 1
local board_y = py + py_idx - 1
if board_x == x and board_y == y then
char = color_codes[piece_id] .. "[]" .. reset_color
end
end
end
end
-- Placed blocks
if board[y][x] ~= 0 then
char = color_codes[board[y][x]] .. "[]" .. reset_color
end
output = output .. char
end
output = output .. "|\r\n"
end
output = output .. "+" .. string.rep("-", board_width * 2) .. "+\r\n"
output = output .. "Use arrow keys to move, Up to rotate, q to quit"
applet:send(output)
end
function handler(applet)
local board = init_board()
local piece_idx = math.random(#pieces)
local current_piece = pieces[piece_idx].shape
local piece_id = pieces[piece_idx].id
local piece_x = math.floor(board_width / 2) - math.floor(#current_piece[1] / 2)
local piece_y = 1
local score = 0
local game_over = false
local delay = 500
if not can_place_piece(board, current_piece, piece_x, piece_y) then
game_over = true
end
applet:send(cursor_hide)
applet:send(clear_screen)
-- fall the piece by one line every delay
local function fall_piece()
while not game_over do
piece_y = piece_y + 1
if not can_place_piece(board, current_piece, piece_x, piece_y) then
piece_y = piece_y - 1
place_piece(board, current_piece, piece_id, piece_x, piece_y)
score = score + clear_lines(board)
piece_idx = math.random(#pieces)
current_piece = pieces[piece_idx].shape
piece_id = pieces[piece_idx].id
piece_x = math.floor(board_width / 2) - math.floor(#current_piece[1] / 2)
piece_y = 1
if not can_place_piece(board, current_piece, piece_x, piece_y) then
game_over = true
end
end
core.msleep(delay)
end
end
core.register_task(fall_piece)
local function drop_piece()
while can_place_piece(board, current_piece, piece_x, piece_y) do
piece_y = piece_y + 1
end
piece_y = piece_y - 1
place_piece(board, current_piece, piece_id, piece_x, piece_y)
score = score + clear_lines(board)
piece_idx = math.random(#pieces)
current_piece = pieces[piece_idx].shape
piece_id = pieces[piece_idx].id
piece_x = math.floor(board_width / 2) - math.floor(#current_piece[1] / 2)
piece_y = 1
if not can_place_piece(board, current_piece, piece_x, piece_y) then
game_over = true
end
render(applet, board, current_piece, piece_id, piece_x, piece_y, score)
end
while not game_over do
render(applet, board, current_piece, piece_id, piece_x, piece_y, score)
-- update the delay based on the score: 500 for 0 lines to 100ms for 100 lines.
if score >= 100 then
delay = 100
else
delay = 500 - 4*score
end
local input = applet:receive(1, delay)
if input then
if input == "" or input == "q" then
game_over = true
elseif input == "\27" then
local a = applet:receive(1, delay)
if a == "[" then
local b = applet:receive(1, delay)
if b == "A" then -- Up arrow (rotate clockwise)
current_piece = rotate_piece(current_piece, piece_id, piece_x, piece_y, board)
elseif b == "B" then -- Down arrow (full drop)
drop_piece()
elseif b == "C" then -- Right arrow
piece_x = piece_x + 1
if not can_place_piece(board, current_piece, piece_x, piece_y) then
piece_x = piece_x - 1
end
elseif b == "D" then -- Left arrow
piece_x = piece_x - 1
if not can_place_piece(board, current_piece, piece_x, piece_y) then
piece_x = piece_x + 1
end
end
end
end
end
end
applet:send(clear_screen .. cursor_home .. "Game Over! Lines: " .. score .. "\r\n" .. cursor_show)
end
-- works as a TCP applet
core.register_service("trisdemo", "tcp", handler)
-- may also work on the CLI but requires an unbuffered handler
core.register_cli({"trisdemo"}, "Play a simple falling pieces game", handler)

View file

@ -104,26 +104,6 @@ struct acl_cond *build_acl_cond(const char *file, int line, struct list *known_a
*/
enum acl_test_res acl_exec_cond(struct acl_cond *cond, struct proxy *px, struct session *sess, struct stream *strm, unsigned int opt);
/* helper that combines acl_exec_cond() and acl_pass(), and also takes into
* account cond->pol in order to return either 1 if the cond should pass and
* 0 otherwise
* <cond> may be NULL, in which case 1 is returned as the cond cannot fail
*/
static inline int acl_match_cond(struct acl_cond *cond, struct proxy *px, struct session *sess, struct stream *strm, unsigned int opt)
{
int ret;
if (!cond)
return 1;
ret = acl_pass(acl_exec_cond(cond, px, sess, strm, opt));
if (cond->pol == ACL_COND_UNLESS)
ret = !ret;
return ret;
}
/* Returns a pointer to the first ACL conflicting with usage at place <where>
* which is one of the SMP_VAL_* bits indicating a check place, or NULL if
* no conflict is found. Only full conflicts are detected (ACL is not usable).

View file

@ -1,104 +0,0 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
#ifndef _ACME_T_H_
#define _ACME_T_H_
#include <haproxy/istbuf.h>
#include <haproxy/openssl-compat.h>
#define ACME_RETRY 5
/* acme section configuration */
struct acme_cfg {
char *filename; /* config filename */
int linenum; /* config linenum */
char *name; /* section name */
int reuse_key; /* do we need to renew the private key */
char *directory; /* directory URL */
char *map; /* storage for tokens + thumbprint */
struct {
char *contact; /* email associated to account */
char *file; /* account key filename */
EVP_PKEY *pkey; /* account PKEY */
char *thumbprint; /* account PKEY JWS thumbprint */
} account;
struct {
int type; /* EVP_PKEY_EC or EVP_PKEY_RSA */
int bits; /* bits for RSA */
int curves; /* NID of curves */
} key;
char *challenge; /* HTTP-01, DNS-01, etc */
char *vars; /* variables put in the dpapi sink */
char *provider; /* DNS provider put in the dpapi sink */
struct acme_cfg *next;
};
enum acme_st {
ACME_RESOURCES = 0,
ACME_NEWNONCE,
ACME_CHKACCOUNT,
ACME_NEWACCOUNT,
ACME_NEWORDER,
ACME_AUTH,
ACME_CHALLENGE,
ACME_CHKCHALLENGE,
ACME_FINALIZE,
ACME_CHKORDER,
ACME_CERTIFICATE,
ACME_END
};
enum http_st {
ACME_HTTP_REQ,
ACME_HTTP_RES,
};
struct acme_auth {
struct ist dns; /* dns entry */
struct ist auth; /* auth URI */
struct ist chall; /* challenge URI */
struct ist token; /* token */
int ready; /* is the challenge ready ? */
void *next;
};
/* acme task context */
struct acme_ctx {
enum acme_st state;
enum http_st http_state;
int retries;
int retryafter;
struct httpclient *hc;
struct acme_cfg *cfg;
struct ckch_store *store;
struct {
struct ist newNonce;
struct ist newAccount;
struct ist newOrder;
} resources;
struct ist nonce;
struct ist kid;
struct ist order;
struct acme_auth *auths;
struct acme_auth *next_auth;
X509_REQ *req;
struct ist finalize;
struct ist certificate;
struct task *task;
struct ebmb_node node;
char name[VAR_ARRAY];
};
#define ACME_EV_SCHED (1ULL << 0) /* scheduling wakeup */
#define ACME_EV_NEW (1ULL << 1) /* new task */
#define ACME_EV_TASK (1ULL << 2) /* Task handler */
#define ACME_EV_REQ (1ULL << 3) /* HTTP Request */
#define ACME_EV_RES (1ULL << 4) /* HTTP Response */
#define ACME_VERB_CLEAN 1
#define ACME_VERB_MINIMAL 2
#define ACME_VERB_SIMPLE 3
#define ACME_VERB_ADVANCED 4
#define ACME_VERB_COMPLETE 5
#endif

View file

@ -1,12 +0,0 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
#ifndef _ACME_H_
#define _ACME_H_
#include <haproxy/ssl_ckch-t.h>
int ckch_conf_acme_init(void *value, char *buf, struct ckch_store *s, int cli, const char *filename, int linenum, char **err);
EVP_PKEY *acme_gen_tmp_pkey();
X509 *acme_gen_tmp_x509();
#endif

View file

@ -66,8 +66,7 @@ enum act_parse_ret {
enum act_opt {
ACT_OPT_NONE = 0x00000000, /* no flag */
ACT_OPT_FINAL = 0x00000001, /* last call, cannot yield */
ACT_OPT_FINAL_EARLY = 0x00000002, /* set in addition to ACT_OPT_FINAL if last call occurs earlier than normal due to unexpected IO/error */
ACT_OPT_FIRST = 0x00000004, /* first call for this action */
ACT_OPT_FIRST = 0x00000002, /* first call for this action */
};
/* Flags used to describe the action. */

View file

@ -76,12 +76,12 @@ struct memprof_stats {
const void *caller;
enum memprof_method method;
/* 4-7 bytes hole here */
unsigned long long locked_calls;
unsigned long long alloc_calls;
unsigned long long free_calls;
unsigned long long alloc_tot;
unsigned long long free_tot;
void *info; // for pools, ptr to the pool
void *pad; // pad to 64
};
#endif
@ -125,8 +125,8 @@ struct activity {
unsigned int ctr2; // general purposee debug counter
#endif
char __pad[0]; // unused except to check remaining room
char __end[0] THREAD_ALIGNED();
} THREAD_ALIGNED();
char __end[0] __attribute__((aligned(64))); // align size to 64.
};
/* 256 entries for callers * callees should be highly sufficient (~45 seen usually) */
#define SCHED_ACT_HASH_BITS 8
@ -143,10 +143,7 @@ struct sched_activity {
uint64_t calls;
uint64_t cpu_time;
uint64_t lat_time;
uint64_t lkw_time; /* lock waiting time */
uint64_t lkd_time; /* locked time */
uint64_t mem_time; /* memory ops wait time */
} THREAD_ALIGNED();
};
#endif /* _HAPROXY_ACTIVITY_T_H */

View file

@ -47,7 +47,7 @@
#define APPCTX_FL_ERROR 0x00000080
#define APPCTX_FL_SHUTDOWN 0x00000100 /* applet was shut down (->release() called if any). No more data exchange with SCs */
#define APPCTX_FL_WANT_DIE 0x00000200 /* applet was running and requested to die */
/* unused: 0x00000400 */
#define APPCTX_FL_INOUT_BUFS 0x00000400 /* applet uses its own buffers */
#define APPCTX_FL_FASTFWD 0x00000800 /* zero-copy forwarding is in-use, don't fill the outbuf */
#define APPCTX_FL_IN_MAYALLOC 0x00001000 /* applet may try again to allocate its inbuf */
#define APPCTX_FL_OUT_MAYALLOC 0x00002000 /* applet may try again to allocate its outbuf */
@ -73,22 +73,17 @@ static forceinline char *appctx_show_flags(char *buf, size_t len, const char *de
_(APPCTX_FL_OUTBLK_ALLOC, _(APPCTX_FL_OUTBLK_FULL,
_(APPCTX_FL_EOI, _(APPCTX_FL_EOS,
_(APPCTX_FL_ERR_PENDING, _(APPCTX_FL_ERROR,
_(APPCTX_FL_SHUTDOWN, _(APPCTX_FL_WANT_DIE,
_(APPCTX_FL_FASTFWD, _(APPCTX_FL_IN_MAYALLOC, _(APPCTX_FL_OUT_MAYALLOC)))))))))))));
_(APPCTX_FL_SHUTDOWN, _(APPCTX_FL_WANT_DIE, _(APPCTX_FL_INOUT_BUFS,
_(APPCTX_FL_FASTFWD, _(APPCTX_FL_IN_MAYALLOC, _(APPCTX_FL_OUT_MAYALLOC))))))))))))));
/* epilogue */
_(~0U);
return buf;
#undef _
}
#define APPLET_FL_NEW_API 0x00000001 /* Set if the applet is based on the new API (using applet's buffers) */
#define APPLET_FL_WARNED 0x00000002 /* Set when warning was already emitted about a legacy applet */
#define APPLET_FL_HTX 0x00000004 /* Set if the applet is using HTX buffers */
/* Applet descriptor */
struct applet {
enum obj_type obj_type; /* object type = OBJ_TYPE_APPLET */
unsigned int flags; /* APPLET_FL_* flags */
/* 3 unused bytes here */
char *name; /* applet's name to report in logs */
int (*init)(struct appctx *); /* callback to init resources, may be NULL.
@ -106,36 +101,29 @@ struct applet {
struct appctx {
enum obj_type obj_type; /* OBJ_TYPE_APPCTX */
/* 3 unused bytes here */
unsigned int st0; /* Main applet state. May be used by any applet */
unsigned int st1; /* Applet substate. Mau be used by any applet */
unsigned int st0; /* CLI state for stats, session state for peers */
unsigned int st1; /* prompt/payload (bitwise OR of APPCTX_CLI_ST1_*) for stats, session error for peers */
unsigned int flags; /* APPCTX_FL_* */
struct buffer inbuf;
struct buffer outbuf;
size_t to_forward;
struct buffer *chunk; /* used to store unfinished commands */
struct applet *applet; /* applet this context refers to */
struct session *sess; /* session for frontend applets (NULL for backend applets) */
struct sedesc *sedesc; /* stream endpoint descriptor the applet is attached to */
struct {
struct buffer *cmdline; /* used to store unfinished commands */
int severity_output; /* used within the cli_io_handler to format severity output of informational feedback */
int level; /* the level of CLI which can be lowered dynamically */
char payload_pat[8]; /* Payload pattern */
char *payload; /* Pointer on the payload. NULL if no payload */
uint32_t anon_key; /* the key to anonymise with the hash in cli */
/* XXX 4 unused bytes here */
int (*io_handler)(struct appctx *appctx); /* used within the cli_io_handler when st0 = CLI_ST_CALLBACK */
void (*io_release)(struct appctx *appctx); /* used within the cli_io_handler when st0 = CLI_ST_CALLBACK,
if the command is terminated or the session released */
} cli_ctx; /* context dedicated to the CLI applet */
struct act_rule *rule; /* rule associated with the applet. */
int (*io_handler)(struct appctx *appctx); /* used within the cli_io_handler when st0 = CLI_ST_CALLBACK */
void (*io_release)(struct appctx *appctx); /* used within the cli_io_handler when st0 = CLI_ST_CALLBACK,
if the command is terminated or the session released */
int cli_severity_output; /* used within the cli_io_handler to format severity output of informational feedback */
int cli_level; /* the level of CLI which can be lowered dynamically */
char cli_payload_pat[8]; /* Payload pattern */
uint32_t cli_anon_key; /* the key to anonymise with the hash in cli */
struct buffer_wait buffer_wait; /* position in the list of objects waiting for a buffer */
struct task *t; /* task associated to the applet */
struct freq_ctr call_rate; /* appctx call rate */
/* XXX 4 unused bytes here */
struct mt_list wait_entry; /* entry in a list of waiters for an event (e.g. ring events) */
/* The pointer seen by application code is appctx->svcctx. In 2.7 the

View file

@ -58,16 +58,10 @@ size_t appctx_raw_snd_buf(struct appctx *appctx, struct buffer *buf, size_t coun
size_t appctx_snd_buf(struct stconn *sc, struct buffer *buf, size_t count, unsigned int flags);
int appctx_fastfwd(struct stconn *sc, unsigned int count, unsigned int flags);
ssize_t applet_append_line(void *ctx, struct ist v1, struct ist v2, size_t ofs, size_t len, char delim);
ssize_t applet_append_line(void *ctx, struct ist v1, struct ist v2, size_t ofs, size_t len);
static forceinline void applet_fl_set(struct appctx *appctx, uint on);
static forceinline void applet_fl_clr(struct appctx *appctx, uint off);
static forceinline uint appctx_app_test(const struct appctx *appctx, uint test)
{
return (appctx->applet->flags & test);
}
static inline struct appctx *appctx_new_here(struct applet *applet, struct sedesc *sedesc)
{
return appctx_new_on(applet, sedesc, tid);
@ -122,7 +116,7 @@ static inline int appctx_init(struct appctx *appctx)
* the appctx will be fully initialized. The session and the stream will
* eventually be created. The affinity must be set now !
*/
BUG_ON(appctx->t->tid != -1 && appctx->t->tid != tid);
BUG_ON(appctx->t->tid != tid);
task_set_thread(appctx->t, tid);
if (appctx->applet->init)
@ -152,9 +146,6 @@ static inline void __appctx_free(struct appctx *appctx)
#define appctx_wakeup(ctx) \
_task_wakeup((ctx)->t, TASK_WOKEN_OTHER, MK_CALLER(WAKEUP_TYPE_APPCTX_WAKEUP, 0, 0))
#define appctx_schedule(ctx, w) \
_task_schedule((ctx)->t, w, MK_CALLER(WAKEUP_TYPE_TASK_SCHEDULE, 0, 0))
/* returns the stream connector the appctx is attached to, via the sedesc */
static inline struct stconn *appctx_sc(const struct appctx *appctx)
{
@ -288,156 +279,13 @@ static inline void applet_expect_data(struct appctx *appctx)
se_fl_clr(appctx->sedesc, SE_FL_EXP_NO_DATA);
}
/* Returns the buffer containing data pushed to the applet by the stream. For
* applets using its own buffers it is the appctx input buffer. For legacy
* applet, it is the output channel buffer.
*/
static inline struct buffer *applet_get_inbuf(struct appctx *appctx)
{
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
if (applet_fl_test(appctx, APPCTX_FL_INBLK_ALLOC) || !appctx_get_buf(appctx, &appctx->inbuf))
return NULL;
return &appctx->inbuf;
}
else
return sc_ob(appctx_sc(appctx));
}
/* Returns the buffer containing data pushed by the applets to the stream. For
* applets using its own buffer it is the appctx output buffer. For legacy
* applet, it is the input channel buffer.
*/
static inline struct buffer *applet_get_outbuf(struct appctx *appctx)
{
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
if (applet_fl_test(appctx, APPCTX_FL_OUTBLK_ALLOC|APPCTX_FL_OUTBLK_FULL) ||
!appctx_get_buf(appctx, &appctx->outbuf))
return NULL;
return &appctx->outbuf;
}
else
return sc_ib(appctx_sc(appctx));
}
/* Returns the amount of HTX data in the input buffer (see applet_get_inbuf) */
static inline size_t applet_htx_input_data(const struct appctx *appctx)
{
if (appctx_app_test(appctx, APPLET_FL_NEW_API))
return htx_used_space(htxbuf(&appctx->inbuf));
else
return co_data(sc_oc(appctx_sc(appctx)));
}
/* Returns the amount of data in the input buffer (see applet_get_inbuf) */
static inline size_t applet_input_data(const struct appctx *appctx)
{
if (appctx_app_test(appctx, APPLET_FL_HTX))
return applet_htx_input_data(appctx);
if (appctx_app_test(appctx, APPLET_FL_NEW_API))
return b_data(&appctx->inbuf);
else
return co_data(sc_oc(appctx_sc(appctx)));
}
/* Returns the amount of HTX data in the output buffer (see applet_get_outbuf) */
static inline size_t applet_htx_output_data(const struct appctx *appctx)
{
if (appctx_app_test(appctx, APPLET_FL_NEW_API))
return htx_used_space(htxbuf(&appctx->outbuf));
else
return ci_data(sc_ic(appctx_sc(appctx)));
}
/* Returns the amount of data in the output buffer (see applet_get_outbuf) */
static inline size_t applet_output_data(const struct appctx *appctx)
{
if (appctx_app_test(appctx, APPLET_FL_HTX))
return applet_htx_output_data(appctx);
if (appctx_app_test(appctx, APPLET_FL_NEW_API))
return b_data(&appctx->outbuf);
else
return ci_data(sc_ic(appctx_sc(appctx)));
}
/* Skips <len> bytes from the input buffer (see applet_get_inbuf).
*
* This is useful when data have been read directly from the buffer. It is
* illegal to call this function with <len> causing a wrapping at the end of the
* buffer. It's the caller's responsibility to ensure that <len> is never larger
* than available output data.
*
* This function is not HTX aware.
*/
static inline void applet_skip_input(struct appctx *appctx, size_t len)
{
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
b_del(&appctx->inbuf, len);
applet_fl_clr(appctx, APPCTX_FL_INBLK_FULL);
}
else
co_skip(sc_oc(appctx_sc(appctx)), len);
}
/* Removes all bytes from the input buffer (see applet_get_inbuf).
*/
static inline void applet_reset_input(struct appctx *appctx)
{
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
b_reset(&appctx->inbuf);
applet_fl_clr(appctx, APPCTX_FL_INBLK_FULL);
}
else
co_skip(sc_oc(appctx_sc(appctx)), co_data(sc_oc(appctx_sc(appctx))));
}
/* Returns the amount of space available at the HTX output buffer (see applet_get_outbuf).
*/
static inline size_t applet_htx_output_room(const struct appctx *appctx)
{
if (appctx_app_test(appctx, APPLET_FL_NEW_API))
return htx_free_data_space(htxbuf(&appctx->outbuf));
else
return channel_recv_max(sc_ic(appctx_sc(appctx)));
}
/* Returns the amount of space available at the output buffer (see applet_get_outbuf).
*/
static inline size_t applet_output_room(const struct appctx *appctx)
{
if (appctx_app_test(appctx, APPLET_FL_HTX))
return applet_htx_output_room(appctx);
if (appctx_app_test(appctx, APPLET_FL_NEW_API))
return b_room(&appctx->outbuf);
else
return channel_recv_max(sc_ic(appctx_sc(appctx)));
}
/*Indicates that the applet have more data to deliver and it needs more room in
* the output buffer to do so (see applet_get_outbuf).
*
* For applets using its own buffers, <room_needed> is not used and only
* <appctx> flags are updated. For legacy applets, the amount of free space
* required must be specified. In this last case, it is the caller
* responsibility to be sure <room_needed> is valid.
*/
static inline void applet_need_room(struct appctx *appctx, size_t room_needed)
{
if (appctx_app_test(appctx, APPLET_FL_NEW_API))
applet_have_more_data(appctx);
else
sc_need_room(appctx_sc(appctx), room_needed);
}
/* Should only be used via wrappers applet_putchk() / applet_putchk_stress(). */
static inline int _applet_putchk(struct appctx *appctx, struct buffer *chunk,
int stress)
{
int ret;
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
if (unlikely(stress) ?
b_data(&appctx->outbuf) :
b_data(chunk) > b_room(&appctx->outbuf)) {
@ -467,10 +315,9 @@ static inline int _applet_putchk(struct appctx *appctx, struct buffer *chunk,
return ret;
}
/* writes chunk <chunk> into the applet output buffer (see applet_get_outbuf).
*
* Returns the number of written bytes on success or -1 on error (lake of space,
* shutdown, invalid call...)
/* writes chunk <chunk> into the input channel of the stream attached to this
* appctx's endpoint, and marks the SC_FL_NEED_ROOM on a channel full error.
* See ci_putchk() for the list of return codes.
*/
static inline int applet_putchk(struct appctx *appctx, struct buffer *chunk)
{
@ -483,16 +330,15 @@ static inline int applet_putchk_stress(struct appctx *appctx, struct buffer *chu
return _applet_putchk(appctx, chunk, 1);
}
/* writes <len> chars from <blk> into the applet output buffer (see applet_get_outbuf).
*
* Returns the number of written bytes on success or -1 on error (lake of space,
* shutdown, invalid call...)
/* writes <len> chars from <blk> into the input channel of the stream attached
* to this appctx's endpoint, and marks the SC_FL_NEED_ROOM on a channel full
* error. See ci_putblk() for the list of return codes.
*/
static inline int applet_putblk(struct appctx *appctx, const char *blk, int len)
{
int ret;
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
if (len > b_room(&appctx->outbuf)) {
applet_fl_set(appctx, APPCTX_FL_OUTBLK_FULL);
ret = -1;
@ -518,17 +364,16 @@ static inline int applet_putblk(struct appctx *appctx, const char *blk, int len)
return ret;
}
/* writes chars from <str> up to the trailing zero (excluded) into the applet
* output buffer (see applet_get_outbuf).
*
* Returns the number of written bytes on success or -1 on error (lake of space,
* shutdown, invalid call...)
/* writes chars from <str> up to the trailing zero (excluded) into the input
* channel of the stream attached to this appctx's endpoint, and marks the
* SC_FL_NEED_ROOM on a channel full error. See ci_putstr() for the list of
* return codes.
*/
static inline int applet_putstr(struct appctx *appctx, const char *str)
{
int ret;
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
int len = strlen(str);
if (len > b_room(&appctx->outbuf)) {
@ -555,16 +400,15 @@ static inline int applet_putstr(struct appctx *appctx, const char *str)
return ret;
}
/* writes character <chr> into the applet's output buffer (see applet_get_outbuf).
*
* Returns the number of written bytes on success or -1 on error (lake of space,
* shutdown, invalid call...)
/* writes character <chr> into the input channel of the stream attached to this
* appctx's endpoint, and marks the SC_FL_NEED_ROOM on a channel full error.
* See ci_putchr() for the list of return codes.
*/
static inline int applet_putchr(struct appctx *appctx, char chr)
{
int ret;
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
if (b_full(&appctx->outbuf)) {
applet_fl_set(appctx, APPCTX_FL_OUTBLK_FULL);
ret = -1;
@ -591,283 +435,6 @@ static inline int applet_putchr(struct appctx *appctx, char chr)
return ret;
}
static inline int applet_may_get(const struct appctx *appctx, size_t len)
{
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
if (len > b_data(&appctx->inbuf)) {
if (se_fl_test(appctx->sedesc, SE_FL_SHW))
return -1;
return 0;
}
}
else {
const struct stconn *sc = appctx_sc(appctx);
if ((sc->flags & SC_FL_SHUT_DONE) || len > co_data(sc_oc(sc))) {
if (sc->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED))
return -1;
return 0;
}
}
return 1;
}
/* Gets one char from the applet input buffer (see appet_get_inbuf),
*
* Return values :
* 1 : number of bytes read, equal to requested size.
* =0 : not enough data available. <c> is left undefined.
* <0 : no more bytes readable because output is shut.
*
* The status of the corresponding buffer is not changed. The caller must call
* applet_skip_input() to update it.
*/
static inline int applet_getchar(const struct appctx *appctx, char *c)
{
int ret;
ret = applet_may_get(appctx, 1);
if (ret <= 0)
return ret;
*c = ((appctx_app_test(appctx, APPLET_FL_NEW_API))
? *(b_head(&appctx->inbuf))
: *(co_head(sc_oc(appctx_sc(appctx)))));
return 1;
}
/* Copies one full block of data from the applet input buffer (see
* appet_get_inbuf).
*
* <len> bytes are capied, starting at the offset <offset>.
*
* Return values :
* >0 : number of bytes read, equal to requested size.
* =0 : not enough data available. <blk> is left undefined.
* <0 : no more bytes readable because output is shut.
*
* The status of the corresponding buffer is not changed. The caller must call
* applet_skip_input() to update it.
*/
static inline int applet_getblk(const struct appctx *appctx, char *blk, int len, int offset)
{
const struct buffer *buf;
int ret;
ret = applet_may_get(appctx, len+offset);
if (ret <= 0)
return ret;
buf = ((appctx_app_test(appctx, APPLET_FL_NEW_API))
? &appctx->inbuf
: sc_ob(appctx_sc(appctx)));
return b_getblk(buf, blk, len, offset);
}
/* Gets one text block representing a word from the applet input buffer (see
* appet_get_inbuf).
*
* The separator is waited for as long as some data can still be received and the
* destination is not full. Otherwise, the string may be returned as is, without
* the separator.
*
* Return values :
* >0 : number of bytes read. Includes the separator if present before len or end.
* =0 : no separator before end found. <str> is left undefined.
* <0 : no more bytes readable because output is shut.
*
* The status of the corresponding buffer is not changed. The caller must call
* applet_skip_input() to update it.
*/
static inline int applet_getword(const struct appctx *appctx, char *str, int len, char sep)
{
const struct buffer *buf;
char *p;
size_t input, max = len;
int ret = 0;
ret = applet_may_get(appctx, 1);
if (ret <= 0)
goto out;
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
buf = &appctx->inbuf;
input = b_data(buf);
}
else {
struct stconn *sc = appctx_sc(appctx);
buf = sc_ob(sc);
input = co_data(sc_oc(sc));
}
if (max > input) {
max = input;
str[max-1] = 0;
}
p = b_head(buf);
ret = 0;
while (max) {
*str++ = *p;
ret++;
max--;
if (*p == sep)
goto out;
p = b_next(buf, p);
}
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
if (ret < len && (ret < input || b_room(buf)) &&
!se_fl_test(appctx->sedesc, SE_FL_SHW))
ret = 0;
}
else {
struct stconn *sc = appctx_sc(appctx);
if (ret < len && (ret < input || channel_may_recv(sc_oc(sc))) &&
!(sc->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)))
ret = 0;
}
out:
if (max)
*str = 0;
return ret;
}
/* Gets one text block representing a line from the applet input buffer (see
* appet_get_inbuf).
*
* The '\n' is waited for as long as some data can still be received and the
* destination is not full. Otherwise, the string may be returned as is, without
* the '\n'.
*
* Return values :
* >0 : number of bytes read. Includes the \n if present before len or end.
* =0 : no '\n' before end found. <str> is left undefined.
* <0 : no more bytes readable because output is shut.
*
* The status of the corresponding buffer is not changed. The caller must call
* applet_skip_input() to update it.
*/
static inline int applet_getline(const struct appctx *appctx, char *str, int len)
{
return applet_getword(appctx, str, len, '\n');
}
/* Gets one or two blocks of data at once from the applet input buffer (see appet_get_inbuf),
*
* Data are not copied.
*
* Return values :
* >0 : number of blocks filled (1 or 2). blk1 is always filled before blk2.
* =0 : not enough data available. <blk*> are left undefined.
* <0 : no more bytes readable because output is shut.
*
* The status of the corresponding buffer is not changed. The caller must call
* applet_skip_input() to update it.
*/
static inline int applet_getblk_nc(const struct appctx *appctx, const char **blk1, size_t *len1, const char **blk2, size_t *len2)
{
const struct buffer *buf;
size_t max;
int ret;
ret = applet_may_get(appctx, 1);
if (ret <= 0)
return ret;
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
buf = &appctx->inbuf;
max = b_data(buf);
}
else {
struct stconn *sc = appctx_sc(appctx);
buf = sc_ob(sc);
max = co_data(sc_oc(sc));
}
return b_getblk_nc(buf, blk1, len1, blk2, len2, 0, max);
}
/* Gets one or two blocks of text representing a word from the applet input
* buffer (see appet_get_inbuf).
*
* Data are not copied. The separator is waited for as long as some data can
* still be received and the destination is not full. Otherwise, the string may
* be returned as is, without the separator.
*
* Return values :
* >0 : number of bytes read. Includes the separator if present before len or end.
* =0 : no separator before end found. <str> is left undefined.
* <0 : no more bytes readable because output is shut.
*
* The status of the corresponding buffer is not changed. The caller must call
* applet_skip_input() to update it.
*/
static inline int applet_getword_nc(const struct appctx *appctx, const char **blk1, size_t *len1, const char **blk2, size_t *len2, char sep)
{
int ret;
size_t l;
ret = applet_getblk_nc(appctx, blk1, len1, blk2, len2);
if (unlikely(ret <= 0))
return ret;
for (l = 0; l < *len1 && (*blk1)[l] != sep; l++);
if (l < *len1 && (*blk1)[l] == sep) {
*len1 = l + 1;
return 1;
}
if (ret >= 2) {
for (l = 0; l < *len2 && (*blk2)[l] != sep; l++);
if (l < *len2 && (*blk2)[l] == sep) {
*len2 = l + 1;
return 2;
}
}
/* If we have found no LF and the buffer is full or the SC is shut, then
* the resulting string is made of the concatenation of the pending
* blocks (1 or 2).
*/
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
if (b_full(&appctx->inbuf) || se_fl_test(appctx->sedesc, SE_FL_SHW))
return ret;
}
else {
struct stconn *sc = appctx_sc(appctx);
if (!channel_may_recv(sc_oc(sc)) || sc->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED))
return ret;
}
/* No LF yet and not shut yet */
return 0;
}
/* Gets one or two blocks of text representing a line from the applet input
* buffer (see appet_get_inbuf).
*
* Data are not copied. The '\n' is waited for as long as some data can still be
* received and the destination is not full. Otherwise, the string may be
* returned as is, without the '\n'.
*
* Return values :
* >0 : number of bytes read. Includes the \n if present before len or end.
* =0 : no '\n' before end found. <str> is left undefined.
* <0 : no more bytes readable because output is shut.
*
* The status of the corresponding buffer is not changed. The caller must call
* applet_skip_input() to update it.
*/
static inline int applet_getline_nc(const struct appctx *appctx, const char **blk1, size_t *len1, const char **blk2, size_t *len2)
{
return applet_getword_nc(appctx, blk1, len1, blk2, len2, '\n');
}
#endif /* _HAPROXY_APPLET_H */
/*

View file

@ -26,9 +26,9 @@
#include <haproxy/compiler.h>
/* A few notes for the macros and functions here:
* - this file is painful to edit, most operations exist in 2 variants,
* no-thread, and threads (with gcc>=4.7). Be careful when modifying
* it not to break any of them.
* - this file is painful to edit, most operations exist in 3 variants,
* no-thread, threads with gcc<4.7, threads with gcc>=4.7. Be careful when
* modifying it not to break any of them.
*
* - macros named HA_ATOMIC_* are or use in the general case, they contain the
* required memory barriers to guarantee sequential consistency
@ -191,10 +191,122 @@
/* Threads are ENABLED, all atomic ops are made thread-safe. By extension they
* can also be used for inter-process synchronization but one must verify that
* the code still builds with threads disabled. Code below requires C11 atomics
* as present in gcc >= 4.7 or clang.
* the code still builds with threads disabled.
*/
#if defined(__GNUC__) && (__GNUC__ < 4 || __GNUC__ == 4 && __GNUC_MINOR__ < 7) && !defined(__clang__)
/* gcc < 4.7 */
#define HA_ATOMIC_LOAD(val) \
({ \
typeof((val)) __val_load = (val); \
typeof(*(val)) __ret_val = \
({ __sync_synchronize(); *(volatile typeof(__val_load))__val_load; }); \
__sync_synchronize(); \
__ret_val; \
})
#define HA_ATOMIC_STORE(val, new) \
({ \
typeof((val)) __val_store = (val); \
typeof(*(val)) __old_store; \
typeof((new)) __new_store = (new); \
do { __old_store = *__val_store; \
} while (!__sync_bool_compare_and_swap(__val_store, __old_store, __new_store) && __ha_cpu_relax()); \
})
#define HA_ATOMIC_XCHG(val, new) \
({ \
typeof((val)) __val_xchg = (val); \
typeof(*(val)) __old_xchg; \
typeof((new)) __new_xchg = (new); \
do { __old_xchg = *__val_xchg; \
} while (!__sync_bool_compare_and_swap(__val_xchg, __old_xchg, __new_xchg) && __ha_cpu_relax()); \
__old_xchg; \
})
#define HA_ATOMIC_AND(val, flags) do { __sync_and_and_fetch(val, flags); } while (0)
#define HA_ATOMIC_OR(val, flags) do { __sync_or_and_fetch(val, flags); } while (0)
#define HA_ATOMIC_ADD(val, i) do { __sync_add_and_fetch(val, i); } while (0)
#define HA_ATOMIC_SUB(val, i) do { __sync_sub_and_fetch(val, i); } while (0)
#define HA_ATOMIC_INC(val) do { __sync_add_and_fetch(val, 1); } while (0)
#define HA_ATOMIC_DEC(val) do { __sync_sub_and_fetch(val, 1); } while (0)
#define HA_ATOMIC_AND_FETCH(val, flags) __sync_and_and_fetch(val, flags)
#define HA_ATOMIC_OR_FETCH(val, flags) __sync_or_and_fetch(val, flags)
#define HA_ATOMIC_ADD_FETCH(val, i) __sync_add_and_fetch(val, i)
#define HA_ATOMIC_SUB_FETCH(val, i) __sync_sub_and_fetch(val, i)
#define HA_ATOMIC_FETCH_AND(val, flags) __sync_fetch_and_and(val, flags)
#define HA_ATOMIC_FETCH_OR(val, flags) __sync_fetch_and_or(val, flags)
#define HA_ATOMIC_FETCH_ADD(val, i) __sync_fetch_and_add(val, i)
#define HA_ATOMIC_FETCH_SUB(val, i) __sync_fetch_and_sub(val, i)
#define HA_ATOMIC_BTS(val, bit) \
({ \
typeof(*(val)) __b_bts = (1UL << (bit)); \
__sync_fetch_and_or((val), __b_bts) & __b_bts; \
})
#define HA_ATOMIC_BTR(val, bit) \
({ \
typeof(*(val)) __b_btr = (1UL << (bit)); \
__sync_fetch_and_and((val), ~__b_btr) & __b_btr; \
})
/* the CAS is a bit complicated. The older API doesn't support returning the
* value and the swap's result at the same time. So here we take what looks
* like the safest route, consisting in using the boolean version guaranteeing
* that the operation was performed or not, and we snoop a previous value. If
* the compare succeeds, we return. If it fails, we return the previous value,
* but only if it differs from the expected one. If it's the same it's a race
* thus we try again to avoid confusing a possibly sensitive caller.
*/
#define HA_ATOMIC_CAS(val, old, new) \
({ \
typeof((val)) __val_cas = (val); \
typeof((old)) __oldp_cas = (old); \
typeof(*(old)) __oldv_cas; \
typeof((new)) __new_cas = (new); \
int __ret_cas; \
do { \
__oldv_cas = *__val_cas; \
__ret_cas = __sync_bool_compare_and_swap(__val_cas, *__oldp_cas, __new_cas); \
} while (!__ret_cas && *__oldp_cas == __oldv_cas && __ha_cpu_relax()); \
if (!__ret_cas) \
*__oldp_cas = __oldv_cas; \
__ret_cas; \
})
/* warning, n is a pointer to the double value for dwcas */
#define HA_ATOMIC_DWCAS(val, o, n) __ha_cas_dw(val, o, n)
#define HA_ATOMIC_UPDATE_MAX(val, new) \
({ \
typeof(val) __val = (val); \
typeof(*(val)) __old_max = *__val; \
typeof(*(val)) __new_max = (new); \
\
while (__old_max < __new_max && \
!HA_ATOMIC_CAS(__val, &__old_max, __new_max) && __ha_cpu_relax()); \
*__val; \
})
#define HA_ATOMIC_UPDATE_MIN(val, new) \
({ \
typeof(val) __val = (val); \
typeof(*(val)) __old_min = *__val; \
typeof(*(val)) __new_min = (new); \
\
while (__old_min > __new_min && \
!HA_ATOMIC_CAS(__val, &__old_min, __new_min) && __ha_cpu_relax()); \
*__val; \
})
#else /* gcc */
/* gcc >= 4.7 or clang */
#define HA_ATOMIC_STORE(val, new) __atomic_store_n(val, new, __ATOMIC_RELEASE)
#define HA_ATOMIC_LOAD(val) __atomic_load_n(val, __ATOMIC_ACQUIRE)
#define HA_ATOMIC_XCHG(val, new) __atomic_exchange_n(val, new, __ATOMIC_ACQ_REL)
@ -408,6 +520,8 @@
/* warning, n is a pointer to the double value for dwcas */
#define _HA_ATOMIC_DWCAS(val, o, n) __ha_cas_dw(val, o, n)
#endif /* gcc >= 4.7 */
/* Here come a few architecture-specific double-word CAS and barrier
* implementations.
*/
@ -685,13 +799,12 @@ static __inline int __ha_cas_dw(void *target, void *compare, void *set)
#else /* unknown / unhandled architecture, fall back to generic barriers */
#define __ha_barrier_atomic_load() __atomic_thread_fence(__ATOMIC_ACQUIRE)
#define __ha_barrier_atomic_store() __atomic_thread_fence(__ATOMIC_RELEASE)
#define __ha_barrier_atomic_full() __atomic_thread_fence(__ATOMIC_SEQ_CST)
#define __ha_barrier_load() __atomic_thread_fence(__ATOMIC_ACQUIRE)
#define __ha_barrier_store() __atomic_thread_fence(__ATOMIC_RELEASE)
#define __ha_barrier_full() __atomic_thread_fence(__ATOMIC_SEQ_CST)
#define __ha_barrier_atomic_load __sync_synchronize
#define __ha_barrier_atomic_store __sync_synchronize
#define __ha_barrier_atomic_full __sync_synchronize
#define __ha_barrier_load __sync_synchronize
#define __ha_barrier_store __sync_synchronize
#define __ha_barrier_full __sync_synchronize
/* Note: there is no generic DWCAS */
/* short-lived CPU relaxation */

View file

@ -144,12 +144,6 @@
*/
#define BE_WEIGHT_SCALE 16
/* LB parameters for all algorithms, with one instance per thread-group */
struct lbprm_per_tgrp {
union {
struct lb_fwrr_per_tgrp fwrr;
};
};
/* LB parameters for all algorithms */
struct lbprm {
union { /* LB parameters depending on the algo type */
@ -168,15 +162,12 @@ struct lbprm {
int wmult; /* ratio between user weight and effective weight */
int wdiv; /* ratio between effective weight and user weight */
int hash_balance_factor; /* load balancing factor * 100, 0 if disabled */
unsigned int lb_free_list_nb; /* Number of elements in the free list */
struct sample_expr *expr; /* sample expression for "balance (log-)hash" */
char *arg_str; /* name of the URL parameter/header/cookie used for hashing */
int arg_len; /* strlen(arg_str), computed only once */
int arg_opt1; /* extra option 1 for the LB algo (algo-specific) */
int arg_opt2; /* extra option 2 for the LB algo (algo-specific) */
int arg_opt3; /* extra option 3 for the LB algo (algo-specific) */
uint64_t lb_seq; /* sequence number for algos who need it */
struct mt_list lb_free_list; /* LB tree elements available */
__decl_thread(HA_RWLOCK_T lock);
struct server *fbck; /* first backup server when !PR_O_USE_ALL_BK, or NULL */
@ -189,9 +180,6 @@ struct lbprm {
void (*set_server_status_down)(struct server *); /* to be called after status changes to DOWN // srvlock */
void (*server_take_conn)(struct server *); /* to be called when connection is assigned */
void (*server_drop_conn)(struct server *); /* to be called when connection is dropped */
void (*server_requeue)(struct server *); /* function used to place the server where it must be */
void (*proxy_deinit)(struct proxy *); /* to be called when we're destroying the proxy */
void (*server_deinit)(struct server *); /* to be called when we're destroying the server */
};
#endif /* _HAPROXY_BACKEND_T_H */

View file

@ -45,18 +45,6 @@ int assign_server_and_queue(struct stream *s);
int alloc_bind_address(struct sockaddr_storage **ss,
struct server *srv, struct proxy *be,
struct stream *s);
int be_reuse_mode(const struct proxy *be, const struct server *srv);
int64_t be_calculate_conn_hash(struct server *srv, struct stream *strm,
struct session *sess,
struct sockaddr_storage *src,
struct sockaddr_storage *dst,
struct ist name);
int be_reuse_connection(int64_t hash, struct session *sess,
struct proxy *be, struct server *srv,
struct stconn *sc, enum obj_type *target, int not_first_req);
int srv_redispatch_connect(struct stream *t);
void back_try_conn_req(struct stream *s);
void back_handle_st_req(struct stream *s);
@ -85,21 +73,10 @@ static inline int be_usable_srv(struct proxy *be)
return be->srv_bck;
}
/* Returns true if <be> backend can be used as target to a switching rules. */
static inline int be_is_eligible(const struct proxy *be)
{
/* A disabled or unpublished backend cannot be selected for traffic.
* Note that STOPPED state is ignored as there is a risk of breaking
* requests during soft-stop.
*/
return !(be->flags & (PR_FL_DISABLED|PR_FL_BE_UNPUBLISHED));
}
/* set the time of last session on the backend */
static inline void be_set_sess_last(struct proxy *be)
{
if (be->be_counters.shared.tg)
HA_ATOMIC_STORE(&be->be_counters.shared.tg[tgid - 1]->last_sess, ns_to_sec(now_ns));
be->be_counters.last_sess = ns_to_sec(now_ns);
}
/* This function returns non-zero if the designated server will be
@ -179,12 +156,6 @@ void set_backend_down(struct proxy *be);
unsigned int gen_hash(const struct proxy* px, const char* key, unsigned long len);
/* Returns true if connection reuse is supported by <be> backend. */
static inline int be_supports_conn_reuse(const struct proxy *be)
{
return be->mode == PR_MODE_HTTP || be->mode == PR_MODE_SPOP;
}
#endif /* _HAPROXY_BACKEND_H */
/*

View file

@ -40,23 +40,6 @@
#define DPRINTF(x...)
#endif
/* Let's make DEBUG_STRESS equal to zero if not set or not valid, or to
* 1 if set. This way it is always set and should be easy to use in "if ()"
* statements without requiring ifdefs, while remaining compatible with
* "#if DEBUG_STRESS > 0". We also force DEBUG_STRICT and DEBUG_STRICT_ACTION
* when stressed.
*/
#if !defined(DEBUG_STRESS)
# define DEBUG_STRESS 0
#elif DEBUG_STRESS != 0
# undef DEBUG_STRESS
# define DEBUG_STRESS 1 // make sure comparison >0 always works
# undef DEBUG_STRICT
# define DEBUG_STRICT 2 // enable BUG_ON
# undef DEBUG_STRICT_ACTION
# define DEBUG_STRICT_ACTION 3 // enable crash on match
#endif
#define DUMP_TRACE() do { extern void ha_backtrace_to_stderr(void); ha_backtrace_to_stderr(); } while (0)
/* First, let's try to handle some arch-specific crashing methods. We prefer
@ -85,7 +68,7 @@
#else // not x86
/* generic implementation, causes a segfault */
static inline __attribute((always_inline,noreturn,unused)) void ha_crash_now(void)
static inline __attribute((always_inline)) void ha_crash_now(void)
{
#if __GNUC_PREREQ__(5, 0)
#pragma GCC diagnostic push
@ -178,8 +161,6 @@ static __attribute__((noinline,noreturn,unused)) void abort_with_line(uint line)
* COUNT_IF() invocation requires a special section ("dbg_cnt") hence a modern
* linker.
*/
extern unsigned int debug_enable_counters;
#if !defined(USE_OBSOLETE_LINKER)
/* type of checks that can be verified. We cannot really distinguish between
@ -241,44 +222,30 @@ extern __attribute__((__weak__)) struct debug_count __stop_dbg_cnt HA_SECTION_S
_HA_ATOMIC_INC(&__dbg_cnt_##_line.count); \
} while (0)
/* Matrix for DEBUG_COUNTERS:
* 0 : only BUG_ON() and CHECK_IF() are reported (super rare)
* 1 : COUNT_GLITCH() are also reported (rare)
* COUNT_IF() are also reported only if debug_enable_counters was set
* 2 : COUNT_IF() are also reported unless debug_enable_counters was reset
*/
/* Core of the COUNT_IF() macro, checks the condition and counts one hit if
* true. It's only enabled at DEBUG_COUNTERS >= 1, and enabled by default if
* DEBUG_COUNTERS >= 2.
* true.
*/
# if defined(DEBUG_COUNTERS) && (DEBUG_COUNTERS >= 1)
# define _COUNT_IF(cond, file, line, ...) \
(unlikely(cond) ? ({ \
if (debug_enable_counters) \
__DBG_COUNT(cond, file, line, DBG_COUNT_IF, __VA_ARGS__); \
1; /* let's return the true condition */ \
#define _COUNT_IF(cond, file, line, ...) \
(unlikely(cond) ? ({ \
__DBG_COUNT(cond, file, line, DBG_COUNT_IF, __VA_ARGS__); \
1; /* let's return the true condition */ \
}) : 0)
# else
# define _COUNT_IF(cond, file, line, ...) DISGUISE(unlikely(cond) ? 1 : 0)
# endif
/* DEBUG_COUNTERS enables counting the number of glitches per line of code. The
/* DEBUG_GLITCHES enables counting the number of glitches per line of code. The
* condition is empty (nothing to write there), except maybe __VA_ARGS at the
* end.
*/
# if !defined(DEBUG_COUNTERS) || (DEBUG_COUNTERS == 0)
# if !defined(DEBUG_GLITCHES)
# define _COUNT_GLITCH(file, line, ...) do { } while (0)
# else
# define _COUNT_GLITCH(file, line, ...) do { \
__DBG_COUNT(, file, line, DBG_GLITCH, __VA_ARGS__); \
} while (0)
# endif
# endif
#else /* USE_OBSOLETE_LINKER not defined below */
# define __DBG_COUNT(cond, file, line, type, ...) do { } while (0)
# define _COUNT_IF(cond, file, line, ...) DISGUISE(unlikely(cond) ? 1 : 0)
# define _COUNT_IF(cond, file, line, ...) DISGUISE(cond)
# define _COUNT_GLITCH(file, line, ...) do { } while (0)
#endif
@ -424,20 +391,6 @@ extern __attribute__((__weak__)) struct debug_count __stop_dbg_cnt HA_SECTION_S
# define COUNT_IF_HOT(cond, ...) DISGUISE(cond)
#endif
/* turn BUG_ON_STRESS() into a real statement when DEBUG_STRESS is set,
* otherwise simply ignore it, at the risk of failing to notice if the
* condition would build at all. We don't really care if BUG_ON_STRESS
* doesn't always build, because it's meant to be used only in certain
* scenarios, possibly requiring certain combinations of options. We
* just want to be certain that the condition is not implemented at all
* when not used, so as to encourage developers to put a lot of them at
* zero cost.
*/
#if DEBUG_STRESS > 0
# define BUG_ON_STRESS(cond, ...) BUG_ON(cond, __VA_ARGS__)
#else
# define BUG_ON_STRESS(cond, ...) do { } while (0)
#endif
/* When not optimizing, clang won't remove that code, so only compile it in when optimizing */
#if defined(__GNUC__) && defined(__OPTIMIZE__)
@ -537,7 +490,7 @@ struct mem_stats {
size_t size;
struct ha_caller caller;
const void *extra; // extra info specific to this call (e.g. pool ptr)
} ALIGNED(sizeof(void*));
} __attribute__((aligned(sizeof(void*))));
#undef calloc
#define calloc(x,y) ({ \
@ -651,172 +604,9 @@ struct mem_stats {
_HA_ATOMIC_ADD(&_.size, __y); \
strdup(__x); \
})
#undef ha_aligned_alloc
#define ha_aligned_alloc(a,s) ({ \
size_t __a = (a); \
size_t __s = (s); \
static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
.caller = { \
.file = __FILE__, .line = __LINE__, \
.what = MEM_STATS_TYPE_MALLOC, \
.func = __func__, \
}, \
}; \
HA_WEAK(__start_mem_stats); \
HA_WEAK(__stop_mem_stats); \
_HA_ATOMIC_INC(&_.calls); \
_HA_ATOMIC_ADD(&_.size, __s); \
_ha_aligned_alloc(__a, __s); \
})
#undef ha_aligned_zalloc
#define ha_aligned_zalloc(a,s) ({ \
size_t __a = (a); \
size_t __s = (s); \
static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
.caller = { \
.file = __FILE__, .line = __LINE__, \
.what = MEM_STATS_TYPE_CALLOC, \
.func = __func__, \
}, \
}; \
HA_WEAK(__start_mem_stats); \
HA_WEAK(__stop_mem_stats); \
_HA_ATOMIC_INC(&_.calls); \
_HA_ATOMIC_ADD(&_.size, __s); \
_ha_aligned_zalloc(__a, __s); \
})
#undef ha_aligned_alloc_safe
#define ha_aligned_alloc_safe(a,s) ({ \
size_t __a = (a); \
size_t __s = (s); \
static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
.caller = { \
.file = __FILE__, .line = __LINE__, \
.what = MEM_STATS_TYPE_MALLOC, \
.func = __func__, \
}, \
}; \
HA_WEAK(__start_mem_stats); \
HA_WEAK(__stop_mem_stats); \
_HA_ATOMIC_INC(&_.calls); \
_HA_ATOMIC_ADD(&_.size, __s); \
_ha_aligned_alloc_safe(__a, __s); \
})
#undef ha_aligned_zalloc_safe
#define ha_aligned_zalloc_safe(a,s) ({ \
size_t __a = (a); \
size_t __s = (s); \
static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
.caller = { \
.file = __FILE__, .line = __LINE__, \
.what = MEM_STATS_TYPE_CALLOC, \
.func = __func__, \
}, \
}; \
HA_WEAK(__start_mem_stats); \
HA_WEAK(__stop_mem_stats); \
_HA_ATOMIC_INC(&_.calls); \
_HA_ATOMIC_ADD(&_.size, __s); \
_ha_aligned_zalloc_safe(__a, __s); \
})
// Since the type is known, the .extra field will contain its name
#undef ha_aligned_alloc_typed
#define ha_aligned_alloc_typed(cnt,type) ({ \
size_t __a = __alignof__(type); \
size_t __s = ((size_t)cnt) * sizeof(type); \
static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
.caller = { \
.file = __FILE__, .line = __LINE__, \
.what = MEM_STATS_TYPE_MALLOC, \
.func = __func__, \
}, \
.extra = #type, \
}; \
HA_WEAK(__start_mem_stats); \
HA_WEAK(__stop_mem_stats); \
_HA_ATOMIC_INC(&_.calls); \
_HA_ATOMIC_ADD(&_.size, __s); \
(type*)_ha_aligned_alloc(__a, __s); \
})
// Since the type is known, the .extra field will contain its name
#undef ha_aligned_zalloc_typed
#define ha_aligned_zalloc_typed(cnt,type) ({ \
size_t __a = __alignof__(type); \
size_t __s = ((size_t)cnt) * sizeof(type); \
static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
.caller = { \
.file = __FILE__, .line = __LINE__, \
.what = MEM_STATS_TYPE_CALLOC, \
.func = __func__, \
}, \
.extra = #type, \
}; \
HA_WEAK(__start_mem_stats); \
HA_WEAK(__stop_mem_stats); \
_HA_ATOMIC_INC(&_.calls); \
_HA_ATOMIC_ADD(&_.size, __s); \
(type*)_ha_aligned_zalloc_safe(__a, __s); \
})
#undef ha_aligned_free
#define ha_aligned_free(x) ({ \
typeof(x) __x = (x); \
static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
.caller = { \
.file = __FILE__, .line = __LINE__, \
.what = MEM_STATS_TYPE_FREE, \
.func = __func__, \
}, \
}; \
HA_WEAK(__start_mem_stats); \
HA_WEAK(__stop_mem_stats); \
if (__builtin_constant_p((x))) { \
HA_LINK_ERROR(call_to_ha_aligned_free_attempts_to_free_a_constant); \
} \
if (__x) \
_HA_ATOMIC_INC(&_.calls); \
_ha_aligned_free(__x); \
})
#undef ha_aligned_free_size
#define ha_aligned_free_size(p,s) ({ \
void *__p = (p); size_t __s = (s); \
static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
.caller = { \
.file = __FILE__, .line = __LINE__, \
.what = MEM_STATS_TYPE_FREE, \
.func = __func__, \
}, \
}; \
HA_WEAK(__start_mem_stats); \
HA_WEAK(__stop_mem_stats); \
if (__builtin_constant_p((p))) { \
HA_LINK_ERROR(call_to_ha_aligned_free_attempts_to_free_a_constant); \
} \
if (__p) { \
_HA_ATOMIC_INC(&_.calls); \
_HA_ATOMIC_ADD(&_.size, __s); \
} \
_ha_aligned_free(__p); \
})
#else // DEBUG_MEM_STATS
#define will_free(x, y) do { } while (0)
#define ha_aligned_alloc(a,s) _ha_aligned_alloc(a, s)
#define ha_aligned_zalloc(a,s) _ha_aligned_zalloc(a, s)
#define ha_aligned_alloc_safe(a,s) _ha_aligned_alloc_safe(a, s)
#define ha_aligned_zalloc_safe(a,s) _ha_aligned_zalloc_safe(a, s)
#define ha_aligned_alloc_typed(cnt,type) ((type*)_ha_aligned_alloc(__alignof__(type), ((size_t)cnt) * sizeof(type)))
#define ha_aligned_zalloc_typed(cnt,type) ((type*)_ha_aligned_zalloc(__alignof__(type), ((size_t)cnt) * sizeof(type)))
#define ha_aligned_free(p) _ha_aligned_free(p)
#define ha_aligned_free_size(p,s) _ha_aligned_free(p)
#endif /* DEBUG_MEM_STATS*/

46
include/haproxy/cbuf-t.h Normal file
View file

@ -0,0 +1,46 @@
/*
* include/haprox/cbuf-t.h
* This file contains definition for circular buffers.
*
* Copyright 2021 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation, version 2.1
* exclusively.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef _HAPROXY_CBUF_T_H
#define _HAPROXY_CBUF_T_H
#ifdef USE_QUIC
#ifndef USE_OPENSSL
#error "Must define USE_OPENSSL"
#endif
#endif
#include <stddef.h>
#include <haproxy/list-t.h>
extern struct pool_head *pool_head_cbuf;
struct cbuf {
/* buffer */
unsigned char *buf;
/* buffer size */
size_t sz;
/* Writer index */
size_t wr;
/* Reader index */
size_t rd;
};
#endif /* _HAPROXY_CBUF_T_H */

136
include/haproxy/cbuf.h Normal file
View file

@ -0,0 +1,136 @@
/*
* include/haprox/cbuf.h
* This file contains definitions and prototypes for circular buffers.
* Inspired from Linux circular buffers (include/linux/circ_buf.h).
*
* Copyright 2021 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation, version 2.1
* exclusively.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef _HAPROXY_CBUF_H
#define _HAPROXY_CBUF_H
#ifdef USE_QUIC
#ifndef USE_OPENSSL
#error "Must define USE_OPENSSL"
#endif
#endif
#include <haproxy/atomic.h>
#include <haproxy/list.h>
#include <haproxy/cbuf-t.h>
struct cbuf *cbuf_new(unsigned char *buf, size_t sz);
void cbuf_free(struct cbuf *cbuf);
/* Amount of data between <rd> and <wr> */
#define CBUF_DATA(wr, rd, size) (((wr) - (rd)) & ((size) - 1))
/* Return the writer position in <cbuf>.
* To be used only by the writer!
*/
static inline unsigned char *cb_wr(struct cbuf *cbuf)
{
return cbuf->buf + cbuf->wr;
}
/* Reset the reader index.
* To be used by a reader!
*/
static inline void cb_rd_reset(struct cbuf *cbuf)
{
cbuf->rd = 0;
}
/* Reset the writer index.
* To be used by a writer!
*/
static inline void cb_wr_reset(struct cbuf *cbuf)
{
cbuf->wr = 0;
}
/* Increase <cbuf> circular buffer data by <count>.
* To be used by a writer!
*/
static inline void cb_add(struct cbuf *cbuf, size_t count)
{
cbuf->wr = (cbuf->wr + count) & (cbuf->sz - 1);
}
/* Return the reader position in <cbuf>.
* To be used only by the reader!
*/
static inline unsigned char *cb_rd(struct cbuf *cbuf)
{
return cbuf->buf + cbuf->rd;
}
/* Skip <count> byte in <cbuf> circular buffer.
* To be used by a reader!
*/
static inline void cb_del(struct cbuf *cbuf, size_t count)
{
cbuf->rd = (cbuf->rd + count) & (cbuf->sz - 1);
}
/* Return the amount of data left in <cbuf>.
* To be used only by the writer!
*/
static inline size_t cb_data(struct cbuf *cbuf)
{
size_t rd;
rd = HA_ATOMIC_LOAD(&cbuf->rd);
return CBUF_DATA(cbuf->wr, rd, cbuf->sz);
}
/* Return the amount of room left in <cbuf> minus 1 to distinguish
* the case where the buffer is full from the case where is is empty
* To be used only by the write!
*/
static inline size_t cb_room(struct cbuf *cbuf)
{
size_t rd;
rd = HA_ATOMIC_LOAD(&cbuf->rd);
return CBUF_DATA(rd, cbuf->wr + 1, cbuf->sz);
}
/* Return the amount of contiguous data left in <cbuf>.
* To be used only by the reader!
*/
static inline size_t cb_contig_data(struct cbuf *cbuf)
{
size_t end, n;
end = cbuf->sz - cbuf->rd;
n = (HA_ATOMIC_LOAD(&cbuf->wr) + end) & (cbuf->sz - 1);
return n < end ? n : end;
}
/* Return the amount of contiguous space left in <cbuf>.
* To be used only by the writer!
*/
static inline size_t cb_contig_space(struct cbuf *cbuf)
{
size_t end, n;
end = cbuf->sz - 1 - cbuf->wr;
n = (HA_ATOMIC_LOAD(&cbuf->rd) + end) & (cbuf->sz - 1);
return n <= end ? n : end + 1;
}
#endif /* _HAPROXY_CBUF_H */

View file

@ -54,8 +54,6 @@ enum cond_predicate {
CFG_PRED_OSSL_VERSION_ATLEAST, // "openssl_version_atleast"
CFG_PRED_OSSL_VERSION_BEFORE, // "openssl_version_before"
CFG_PRED_SSLLIB_NAME_STARTSWITH, // "ssllib_name_startswith"
CFG_PRED_AWSLC_API_ATLEAST, // "awslc_api_atleast"
CFG_PRED_AWSLC_API_BEFORE, // "awslc_api_before"
CFG_PRED_ENABLED, // "enabled"
};

View file

@ -23,7 +23,6 @@
#define _HAPROXY_CFGPARSE_H
#include <haproxy/api.h>
#include <haproxy/proxy-t.h>
struct hap_cpuset;
struct proxy;
@ -39,7 +38,6 @@ struct acl_cond;
#define CFG_CRTLIST 5
#define CFG_CRTSTORE 6
#define CFG_TRACES 7
#define CFG_ACME 8
/* various keyword modifiers */
enum kw_mod {
@ -111,15 +109,9 @@ extern char *cursection;
extern int non_global_section_parsed;
extern struct proxy *curproxy;
extern struct proxy *last_defproxy;
extern char initial_cwd[PATH_MAX];
int cfg_parse_global(const char *file, int linenum, char **args, int inv);
int cfg_parse_listen(const char *file, int linenum, char **args, int inv);
int cfg_parse_listen_match_option(const char *file, int linenum, int kwm,
const struct cfg_opt config_opts[], int *err_code,
char **args, int mode, int cap,
int *options, int *no_options);
int cfg_parse_traces(const char *file, int linenum, char **args, int inv);
int cfg_parse_track_sc_num(unsigned int *track_sc_num,
const char *arg, const char *end, char **err);
@ -141,7 +133,7 @@ int warnif_misplaced_tcp_req_sess(struct proxy *proxy, const char *file, int lin
int warnif_misplaced_tcp_req_cont(struct proxy *proxy, const char *file, int line, const char *arg, const char *arg2);
int warnif_misplaced_tcp_res_cont(struct proxy *proxy, const char *file, int line, const char *arg, const char *arg2);
int warnif_misplaced_quic_init(struct proxy *proxy, const char *file, int line, const char *arg, const char *arg2);
int warnif_cond_conflicts(const struct acl_cond *cond, unsigned int where, char **err);
int warnif_cond_conflicts(const struct acl_cond *cond, unsigned int where, const char *file, int line);
int warnif_tcp_http_cond(const struct proxy *px, const struct acl_cond *cond);
int too_many_args_idx(int maxarg, int index, char **args, char **msg, int *err_code);
int too_many_args(int maxarg, char **args, char **msg, int *err_code);
@ -159,10 +151,6 @@ ssize_t load_cfg_in_mem(char* filename, char** cfg_content);
#define REGISTER_CONFIG_SECTION(name, parse, post) \
INITCALL3(STG_REGISTER, cfg_register_section, (name), (parse), (post))
/* simplified way to define a post section parser */
#define REGISTER_CONFIG_POST_SECTION(name, post) \
INITCALL3(STG_REGISTER, cfg_register_section, (name), NULL, (post))
#define REGISTER_CONFIG_POSTPARSER(name, parser) \
INITCALL2(STG_REGISTER, cfg_register_postparser, (name), (parser))

View file

@ -204,6 +204,7 @@ struct channel {
unsigned short last_read; /* 16 lower bits of last read date (max pause=65s) */
unsigned char xfer_large; /* number of consecutive large xfers */
unsigned char xfer_small; /* number of consecutive small xfers */
unsigned long long total; /* total data read */
int analyse_exp; /* expiration date for current analysers (if set) */
};

View file

@ -323,6 +323,7 @@ static inline void channel_init(struct channel *chn)
chn->to_forward = 0;
chn->last_read = now_ms;
chn->xfer_small = chn->xfer_large = 0;
chn->total = 0;
chn->analysers = 0;
chn->flags = 0;
chn->output = 0;
@ -376,6 +377,7 @@ static inline void channel_add_input(struct channel *chn, unsigned int len)
c_adv(chn, fwd);
}
/* notify that some data was read */
chn->total += len;
chn->flags |= CF_READ_EVENT;
}

View file

@ -172,7 +172,6 @@ struct check {
char desc[HCHK_DESC_LEN]; /* health check description */
signed char use_ssl; /* use SSL for health checks (1: on, 0: server mode, -1: off) */
int send_proxy; /* send a PROXY protocol header with checks */
int reuse_pool; /* try to reuse idle connections */
struct tcpcheck_rules *tcpcheck_rules; /* tcp-check send / expect rules */
struct tcpcheck_rule *current_step; /* current step when using tcpcheck */
int inter, fastinter, downinter; /* checks: time in milliseconds */
@ -188,7 +187,6 @@ struct check {
char **envp; /* the environment to use if running a process-based check */
struct pid_list *curpid; /* entry in pid_list used for current process-based test, or -1 if not in test */
struct sockaddr_storage addr; /* the address to check */
char *pool_conn_name; /* conn name used on reuse */
char *sni; /* Server name */
char *alpn_str; /* ALPN to use for checks */
int alpn_len; /* ALPN string length */

View file

@ -24,7 +24,7 @@
#include <haproxy/applet-t.h>
/* Access level for a stats socket (appctx->cli_ctx.level) */
/* Access level for a stats socket (appctx->cli_level) */
#define ACCESS_LVL_NONE 0x0000
#define ACCESS_LVL_USER 0x0001
#define ACCESS_LVL_OPER 0x0002
@ -41,13 +41,11 @@
#define ACCESS_MCLI_SEVERITY_STR 0x0200 /* 'set severity-output string' on master CLI */
/* flags for appctx->st1 */
#define APPCTX_CLI_ST1_PAYLOAD (1 << 0)
#define APPCTX_CLI_ST1_NOLF (1 << 1)
#define APPCTX_CLI_ST1_LASTCMD (1 << 2)
#define APPCTX_CLI_ST1_INTER (1 << 3) /* interactive mode (i.e. don't close after 1st cmd) */
#define APPCTX_CLI_ST1_PROMPT (1 << 4) /* display prompt */
#define APPCTX_CLI_ST1_TIMED (1 << 5) /* display timer in prompt */
#define APPCTX_CLI_ST1_YIELD (1 << 6) /* forced yield between commands */
#define APPCTX_CLI_ST1_PROMPT (1 << 0)
#define APPCTX_CLI_ST1_PAYLOAD (1 << 1)
#define APPCTX_CLI_ST1_NOLF (1 << 2)
#define APPCTX_CLI_ST1_TIMED (1 << 3)
#define APPCTX_CLI_ST1_LASTCMD (1 << 4)
#define CLI_PREFIX_KW_NB 5
#define CLI_MAX_MATCHES 5
@ -57,8 +55,8 @@
enum {
CLI_ST_INIT = 0, /* initial state, must leave to zero ! */
CLI_ST_END, /* final state, let's close */
CLI_ST_PARSE_CMDLINE, /* wait for a full command line */
CLI_ST_PROCESS_CMDLINE, /* process all commands on the command line */
CLI_ST_GETREQ, /* wait for a request */
CLI_ST_PARSEREQ, /* parse a request */
CLI_ST_OUTPUT, /* all states after this one are responses */
CLI_ST_PROMPT, /* display the prompt (first output, same code) */
CLI_ST_PRINT, /* display const message in cli->msg */

View file

@ -28,7 +28,7 @@
extern struct timeval start_date; /* the process's start date in wall-clock time */
extern struct timeval ready_date; /* date when the process was considered ready */
extern ullong start_time_ns; /* the process's start date in internal monotonic time (ns) */
extern volatile ullong *global_now_ns;/* common monotonic date between all threads, in ns (wraps every 585 yr) */
extern volatile ullong global_now_ns; /* common monotonic date between all threads, in ns (wraps every 585 yr) */
extern THREAD_LOCAL ullong now_ns; /* internal monotonic date derived from real clock, in ns (wraps every 585 yr) */
extern THREAD_LOCAL struct timeval date; /* the real current date (wall-clock time) */
@ -49,8 +49,6 @@ uint clock_report_idle(void);
void clock_leaving_poll(int timeout, int interrupted);
void clock_entering_poll(void);
void clock_adjust_now_offset(void);
void clock_set_now_offset(llong ofs);
llong clock_get_now_offset(void);
static inline void clock_update_date(int max_wait, int interrupted)
{

View file

@ -94,21 +94,11 @@ typedef struct { } empty_t;
# endif
#endif
/* unsafe ones for use with constant macros needed in initializers */
#ifndef _MIN
#define _MIN(a, b) ((a < b) ? a : b)
#endif
#ifndef _MAX
#define _MAX(a, b) ((a > b) ? a : b)
#endif
/* safe versions for use anywhere except in initializers */
#ifndef MIN
#define MIN(a, b) ({ \
typeof(a) _a = (a); \
typeof(a) _b = (b); \
_MIN(_a, _b); \
((_a < _b) ? _a : _b); \
})
#endif
@ -116,15 +106,10 @@ typedef struct { } empty_t;
#define MAX(a, b) ({ \
typeof(a) _a = (a); \
typeof(a) _b = (b); \
_MAX(_a, _b); \
((_a > _b) ? _a : _b); \
})
#endif
/* always set a _POSIX_VERSION if there isn't any, in order to ease compares */
#ifndef _POSIX_VERSION
# define _POSIX_VERSION 0
#endif
/* this is for libc5 for example */
#ifndef TCP_NODELAY
#define TCP_NODELAY 1

View file

@ -31,23 +31,6 @@
#include <stdlib.h>
#endif
/* DEFVAL() returns either the second argument as-is, or <def> if absent. This
* is for use in macros arguments.
*/
#define DEFVAL(_def,...) _FIRST_ARG(NULL, ##__VA_ARGS__, (_def))
/* DEFNULL() returns either the argument as-is, or NULL if absent. This is for
* use in macros arguments.
*/
#define DEFNULL(...) DEFVAL(NULL, ##__VA_ARGS__)
/* DEFZERO() returns either the argument as-is, or 0 if absent. This is for
* use in macros arguments.
*/
#define DEFZERO(...) DEFVAL(0, ##__VA_ARGS__)
#define _FIRST_ARG(a, b, ...) b
/*
* Gcc before 3.0 needs [0] to declare a variable-size array
*/
@ -72,24 +55,6 @@
#define ___equals_1(x) ____equals_1(comma_for_one ## x 1)
#define __equals_1(x) ___equals_1(x)
/* same but checks if defined as zero, useful to distinguish between -DFOO and
* -DFOO=0.
*/
#define comma_for_zero0 ,
#define _____equals_0(x, y, ...) (y)
#define ____equals_0(x, ...) _____equals_0(x, 0)
#define ___equals_0(x) ____equals_0(comma_for_zero ## x 1)
#define __equals_0(x) ___equals_0(x)
/* same but checks if defined as empty, useful to distinguish between -DFOO= and
* -DFOO=anything.
*/
#define comma_for_empty ,
#define _____def_as_empty(x, y, ...) (y)
#define ____def_as_empty(x, ...) _____def_as_empty(x, 0)
#define ___def_as_empty(x) ____def_as_empty(comma_for_empty ## x 1)
#define __def_as_empty(x) ___def_as_empty(x)
/* gcc 5 and clang 3 brought __has_attribute(), which is not well documented in
* the case of gcc, but is convenient since handled at the preprocessor level.
* In both cases it's possible to test for __has_attribute() using ifdef. When
@ -318,11 +283,6 @@
#define _TOSTR(x) #x
#define TOSTR(x) _TOSTR(x)
/* concatenates the two strings after resolving possible macros */
#undef CONCAT // Turns out NetBSD defines it to the same in exec_elf.h
#define _CONCAT(a,b) a ## b
#define CONCAT(a,b) _CONCAT(a,b)
/*
* Gcc >= 3 provides the ability for the program to give hints to the
* compiler about what branch of an if is most likely to be taken. This
@ -367,7 +327,7 @@
* <type> which has its member <name> stored at address <ptr>.
*/
#ifndef container_of
#define container_of(ptr, type, name) ((type *)(((char *)(ptr)) - offsetof(type, name)))
#define container_of(ptr, type, name) ((type *)(((void *)(ptr)) - ((long)&((type *)0)->name)))
#endif
/* returns a pointer to the structure of type <type> which has its member <name>
@ -376,7 +336,7 @@
#ifndef container_of_safe
#define container_of_safe(ptr, type, name) \
({ void *__p = (ptr); \
__p ? (type *)((char *)__p - offsetof(type, name)) : (type *)0; \
__p ? (type *)(__p - ((long)&((type *)0)->name)) : (type *)0; \
})
#endif
@ -432,13 +392,6 @@
* for multi_threading, see THREAD_PAD() below. *
\*****************************************************************************/
/* Cache line size for alignment purposes. This value is incorrect for some
* Apple CPUs which have 128 bytes cache lines.
*/
#ifndef CACHELINE_SIZE
#define CACHELINE_SIZE 64
#endif
/* sets alignment for current field or variable */
#ifndef ALIGNED
#define ALIGNED(x) __attribute__((aligned(x)))
@ -462,12 +415,12 @@
#endif
#endif
/* Sets alignment for current field or variable only when threads are enabled.
* When no parameters are provided, we align to the cache line size.
/* sets alignment for current field or variable only when threads are enabled.
* Typically used to respect cache line alignment to avoid false sharing.
*/
#ifndef THREAD_ALIGNED
#ifdef USE_THREAD
#define THREAD_ALIGNED(...) ALIGNED(DEFVAL(CACHELINE_SIZE, ##__VA_ARGS__))
#define THREAD_ALIGNED(x) __attribute__((aligned(x)))
#else
#define THREAD_ALIGNED(x)
#endif
@ -500,44 +453,32 @@
#endif
#endif
/* Add an optional alignment for next fields in a structure, only when threads
* are enabled. When no parameters are provided, we align to the cache line size.
/* add an optional alignment for next fields in a structure, only when threads
* are enabled. Typically used to respect cache line alignment to avoid false
* sharing.
*/
#ifndef THREAD_ALIGN
#ifdef USE_THREAD
#define THREAD_ALIGN(...) union { } ALIGNED(DEFVAL(CACHELINE_SIZE, ##__VA_ARGS__))
#define THREAD_ALIGN(x) union { } ALIGNED(x)
#else
#define THREAD_ALIGN(x)
#endif
#endif
/* add padding of the specified size */
#define _PAD(x,l) char __pad_##l[x]
/* add optional padding of the specified size between fields in a structure,
* only when threads are enabled. This is used to avoid false sharing of cache
* lines for dynamically allocated structures which cannot guarantee alignment.
*/
#ifndef THREAD_PAD
# ifdef USE_THREAD
# define _THREAD_PAD(x,l) _PAD(x, l)
# define __THREAD_PAD(x,l) char __pad_##l[x]
# define _THREAD_PAD(x,l) __THREAD_PAD(x, l)
# define THREAD_PAD(x) _THREAD_PAD(x, __LINE__)
# else
# define THREAD_PAD(x)
# endif
#endif
/* add mandatory padding of the specified size between fields in a structure,
* This is used to avoid false sharing of cache lines for dynamically allocated
* structures which cannot guarantee alignment, or to ensure that the size of
* the struct remains consistent on architectures with different alignment
* constraints
*/
#ifndef ALWAYS_PAD
# define _ALWAYS_PAD(x,l) _PAD(x, l)
# define ALWAYS_PAD(x) _ALWAYS_PAD(x, __LINE__)
#endif
/* The THREAD_LOCAL type attribute defines thread-local storage and is defined
* to __thread when threads are enabled or empty when disabled.
*/
@ -558,18 +499,6 @@
#define __decl_thread(decl)
#endif
/* The __decl_thread_var() statement declares a variable when threads are enabled
* or replaces it with an dummy statement to avoid placing a lone semi-colon. The
* purpose is to condition the presence of some variables or to the fact that
* threads are enabled, without having to enclose them inside an ugly
* #ifdef USE_THREAD/#endif clause.
*/
#ifdef USE_THREAD
#define __decl_thread_var(decl) decl
#else
#define __decl_thread_var(decl) enum { CONCAT(_dummy_var_decl_,__LINE__), }
#endif
/* clang has a __has_feature() macro which reports true/false on a number of
* internally supported features. Let's make sure this macro is always defined
* and returns zero when not supported.
@ -578,15 +507,4 @@
#define __has_feature(x) 0
#endif
/* gcc 15 throws warning if fixed-size char array does not contain a terminating
* NUL. gcc has an attribute 'nonstring', which allows to suppress this warning
* for such array declarations. But it's not the case for clang and other
* compilers.
*/
#if __has_attribute(nonstring)
#define __nonstring __attribute__ ((nonstring))
#else
#define __nonstring
#endif
#endif /* _HAPROXY_COMPILER_H */

Some files were not shown because too many files have changed in this diff Show more