Compare commits

..

No commits in common. "master" and "v3.2-dev7" have entirely different histories.

825 changed files with 22347 additions and 62373 deletions

View file

@ -1,7 +1,7 @@
FreeBSD_task: FreeBSD_task:
freebsd_instance: freebsd_instance:
matrix: matrix:
image_family: freebsd-14-3 image_family: freebsd-14-2
only_if: $CIRRUS_BRANCH =~ 'master|next' only_if: $CIRRUS_BRANCH =~ 'master|next'
install_script: install_script:
- pkg update -f && pkg upgrade -y && pkg install -y openssl git gmake lua54 socat pcre2 - pkg update -f && pkg upgrade -y && pkg install -y openssl git gmake lua54 socat pcre2

View file

@ -1,34 +0,0 @@
name: 'setup VTest'
description: 'ssss'
runs:
using: "composite"
steps:
- name: Setup coredumps
if: ${{ startsWith(matrix.os, 'ubuntu-') }}
shell: bash
run: |
sudo sysctl -w fs.suid_dumpable=1
sudo sysctl kernel.core_pattern=/tmp/core.%h.%e.%t
- name: Setup ulimit for core dumps
shell: bash
run: |
# This is required for macOS which does not actually allow to increase
# the '-n' soft limit to the hard limit, thus failing to run.
ulimit -n 65536
ulimit -c unlimited
- name: Install VTest
shell: bash
run: |
scripts/build-vtest.sh
- name: Install problem matcher for VTest
shell: bash
# This allows one to more easily see which tests fail.
run: echo "::add-matcher::.github/vtest.json"

View file

@ -19,7 +19,7 @@ defaults
frontend h2 frontend h2
mode http mode http
bind 127.0.0.1:8443 ssl crt reg-tests/ssl/certs/common.pem alpn h2,http/1.1 bind 127.0.0.1:8443 ssl crt reg-tests/ssl/common.pem alpn h2,http/1.1
default_backend h2b default_backend h2b
backend h2b backend h2b

90
.github/matrix.py vendored
View file

@ -125,11 +125,9 @@ def main(ref_name):
# Ubuntu # Ubuntu
if "haproxy-" in ref_name: if "haproxy-" in ref_name:
os = "ubuntu-24.04" # stable branch os = "ubuntu-22.04" # stable branch
os_arm = "ubuntu-24.04-arm" # stable branch
else: else:
os = "ubuntu-24.04" # development branch os = "ubuntu-24.04" # development branch
os_arm = "ubuntu-24.04-arm" # development branch
TARGET = "linux-glibc" TARGET = "linux-glibc"
for CC in ["gcc", "clang"]: for CC in ["gcc", "clang"]:
@ -174,37 +172,36 @@ def main(ref_name):
# ASAN # ASAN
for os_asan in [os, os_arm]: matrix.append(
matrix.append( {
{ "name": "{}, {}, ASAN, all features".format(os, CC),
"name": "{}, {}, ASAN, all features".format(os_asan, CC), "os": os,
"os": os_asan, "TARGET": TARGET,
"TARGET": TARGET, "CC": CC,
"CC": CC, "FLAGS": [
"FLAGS": [ "USE_OBSOLETE_LINKER=1",
"USE_OBSOLETE_LINKER=1", 'ARCH_FLAGS="-g -fsanitize=address"',
'ARCH_FLAGS="-g -fsanitize=address"', 'OPT_CFLAGS="-O1"',
'OPT_CFLAGS="-O1"', "USE_ZLIB=1",
"USE_ZLIB=1", "USE_OT=1",
"USE_OT=1", "OT_INC=${HOME}/opt-ot/include",
"OT_INC=${HOME}/opt-ot/include", "OT_LIB=${HOME}/opt-ot/lib",
"OT_LIB=${HOME}/opt-ot/lib", "OT_RUNPATH=1",
"OT_RUNPATH=1", "USE_PCRE2=1",
"USE_PCRE2=1", "USE_PCRE2_JIT=1",
"USE_PCRE2_JIT=1", "USE_LUA=1",
"USE_LUA=1", "USE_OPENSSL=1",
"USE_OPENSSL=1", "USE_WURFL=1",
"USE_WURFL=1", "WURFL_INC=addons/wurfl/dummy",
"WURFL_INC=addons/wurfl/dummy", "WURFL_LIB=addons/wurfl/dummy",
"WURFL_LIB=addons/wurfl/dummy", "USE_DEVICEATLAS=1",
"USE_DEVICEATLAS=1", "DEVICEATLAS_SRC=addons/deviceatlas/dummy",
"DEVICEATLAS_SRC=addons/deviceatlas/dummy", "USE_PROMEX=1",
"USE_PROMEX=1", "USE_51DEGREES=1",
"USE_51DEGREES=1", "51DEGREES_SRC=addons/51degrees/dummy/pattern",
"51DEGREES_SRC=addons/51degrees/dummy/pattern", ],
], }
} )
)
for compression in ["USE_ZLIB=1"]: for compression in ["USE_ZLIB=1"]:
matrix.append( matrix.append(
@ -221,8 +218,7 @@ def main(ref_name):
"stock", "stock",
"OPENSSL_VERSION=1.0.2u", "OPENSSL_VERSION=1.0.2u",
"OPENSSL_VERSION=1.1.1s", "OPENSSL_VERSION=1.1.1s",
"OPENSSL_VERSION=3.5.1", "QUICTLS=yes",
"QUICTLS_VERSION=OpenSSL_1_1_1w-quic1",
"WOLFSSL_VERSION=5.7.0", "WOLFSSL_VERSION=5.7.0",
"AWS_LC_VERSION=1.39.0", "AWS_LC_VERSION=1.39.0",
# "BORINGSSL=yes", # "BORINGSSL=yes",
@ -236,7 +232,8 @@ def main(ref_name):
for ssl in ssl_versions: for ssl in ssl_versions:
flags = ["USE_OPENSSL=1"] flags = ["USE_OPENSSL=1"]
skipdup=0 if ssl == "BORINGSSL=yes" or ssl == "QUICTLS=yes" or "LIBRESSL" in ssl or "WOLFSSL" in ssl or "AWS_LC" in ssl:
flags.append("USE_QUIC=1")
if "WOLFSSL" in ssl: if "WOLFSSL" in ssl:
flags.append("USE_OPENSSL_WOLFSSL=1") flags.append("USE_OPENSSL_WOLFSSL=1")
if "AWS_LC" in ssl: if "AWS_LC" in ssl:
@ -246,23 +243,8 @@ def main(ref_name):
flags.append("SSL_INC=${HOME}/opt/include") flags.append("SSL_INC=${HOME}/opt/include")
if "LIBRESSL" in ssl and "latest" in ssl: if "LIBRESSL" in ssl and "latest" in ssl:
ssl = determine_latest_libressl(ssl) ssl = determine_latest_libressl(ssl)
skipdup=1
if "OPENSSL" in ssl and "latest" in ssl: if "OPENSSL" in ssl and "latest" in ssl:
ssl = determine_latest_openssl(ssl) ssl = determine_latest_openssl(ssl)
skipdup=1
# if "latest" equals a version already in the list
if ssl in ssl_versions and skipdup == 1:
continue
openssl_supports_quic = False
try:
openssl_supports_quic = version.Version(ssl.split("OPENSSL_VERSION=",1)[1]) >= version.Version("3.5.0")
except:
pass
if ssl == "BORINGSSL=yes" or "QUICTLS" in ssl or "LIBRESSL" in ssl or "WOLFSSL" in ssl or "AWS_LC" in ssl or openssl_supports_quic:
flags.append("USE_QUIC=1")
matrix.append( matrix.append(
{ {
@ -280,7 +262,7 @@ def main(ref_name):
if "haproxy-" in ref_name: if "haproxy-" in ref_name:
os = "macos-13" # stable branch os = "macos-13" # stable branch
else: else:
os = "macos-26" # development branch os = "macos-15" # development branch
TARGET = "osx" TARGET = "osx"
for CC in ["clang"]: for CC in ["clang"]:

View file

@ -5,8 +5,82 @@ on:
- cron: "0 0 * * 4" - cron: "0 0 * * 4"
workflow_dispatch: workflow_dispatch:
permissions:
contents: read
jobs: jobs:
test: test:
uses: ./.github/workflows/aws-lc-template.yml runs-on: ubuntu-latest
with: steps:
command: "from matrix import determine_latest_aws_lc_fips; print(determine_latest_aws_lc_fips(''))" - uses: actions/checkout@v4
- name: Install VTest
run: |
scripts/build-vtest.sh
- name: Determine latest AWS-LC release
id: get_aws_lc_release
run: |
result=$(cd .github && python3 -c "from matrix import determine_latest_aws_lc_fips; print(determine_latest_aws_lc_fips(''))")
echo $result
echo "result=$result" >> $GITHUB_OUTPUT
- name: Cache AWS-LC
id: cache_aws_lc
uses: actions/cache@v4
with:
path: '~/opt/'
key: ssl-${{ steps.get_aws_lc_release.outputs.result }}-Ubuntu-latest-gcc
- name: Install apt dependencies
run: |
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
sudo apt-get --no-install-recommends -y install socat gdb
- name: Install AWS-LC
if: ${{ steps.cache_ssl.outputs.cache-hit != 'true' }}
run: env ${{ steps.get_aws_lc_release.outputs.result }} scripts/build-ssl.sh
- name: Compile HAProxy
run: |
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
USE_OPENSSL_AWSLC=1 USE_QUIC=1 \
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
DEBUG="-DDEBUG_POOL_INTEGRITY" \
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
sudo make install
- name: Show HAProxy version
id: show-version
run: |
ldd $(which haproxy)
haproxy -vv
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
- name: Install problem matcher for VTest
run: echo "::add-matcher::.github/vtest.json"
- name: Run VTest for HAProxy
id: vtest
run: |
# This is required for macOS which does not actually allow to increase
# the '-n' soft limit to the hard limit, thus failing to run.
ulimit -n 65536
# allow to catch coredumps
ulimit -c unlimited
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Show VTest results
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
printf "::group::"
cat $folder/INFO
cat $folder/LOG
echo "::endgroup::"
done
exit 1
- name: Show coredumps
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
failed=false
shopt -s nullglob
for file in /tmp/core.*; do
failed=true
printf "::group::"
gdb -ex 'thread apply all bt full' ./haproxy $file
echo "::endgroup::"
done
if [ "$failed" = true ]; then
exit 1;
fi

View file

@ -1,94 +0,0 @@
name: AWS-LC template
on:
workflow_call:
inputs:
command:
required: true
type: string
permissions:
contents: read
jobs:
test:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
steps:
- uses: actions/checkout@v5
- name: Determine latest AWS-LC release
id: get_aws_lc_release
run: |
result=$(cd .github && python3 -c "${{ inputs.command }}")
echo $result
echo "result=$result" >> $GITHUB_OUTPUT
- name: Cache AWS-LC
id: cache_aws_lc
uses: actions/cache@v4
with:
path: '~/opt/'
key: ssl-${{ steps.get_aws_lc_release.outputs.result }}-Ubuntu-latest-gcc
- name: Install apt dependencies
run: |
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
sudo apt-get --no-install-recommends -y install socat gdb jose
- name: Install AWS-LC
if: ${{ steps.cache_ssl.outputs.cache-hit != 'true' }}
run: env ${{ steps.get_aws_lc_release.outputs.result }} scripts/build-ssl.sh
- name: Compile HAProxy
run: |
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
USE_OPENSSL_AWSLC=1 USE_QUIC=1 \
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
sudo make install
- name: Show HAProxy version
id: show-version
run: |
ldd $(which haproxy)
haproxy -vv
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
- uses: ./.github/actions/setup-vtest
- name: Run VTest for HAProxy
id: vtest
run: |
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Run Unit tests
id: unittests
run: |
make unit-tests
- name: Show VTest results
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
printf "::group::"
cat $folder/INFO
cat $folder/LOG
echo "::endgroup::"
done
exit 1
- name: Show coredumps
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
failed=false
shopt -s nullglob
for file in /tmp/core.*; do
failed=true
printf "::group::"
gdb -ex 'thread apply all bt full' ./haproxy $file
echo "::endgroup::"
done
if [ "$failed" = true ]; then
exit 1;
fi
- name: Show Unit-Tests results
if: ${{ failure() && steps.unittests.outcome == 'failure' }}
run: |
for result in ${TMPDIR:-/tmp}/ha-unittests-*/results/res.*; do
printf "::group::"
cat $result
echo "::endgroup::"
done
exit 1

View file

@ -5,8 +5,82 @@ on:
- cron: "0 0 * * 4" - cron: "0 0 * * 4"
workflow_dispatch: workflow_dispatch:
permissions:
contents: read
jobs: jobs:
test: test:
uses: ./.github/workflows/aws-lc-template.yml runs-on: ubuntu-latest
with: steps:
command: "from matrix import determine_latest_aws_lc; print(determine_latest_aws_lc(''))" - uses: actions/checkout@v4
- name: Install VTest
run: |
scripts/build-vtest.sh
- name: Determine latest AWS-LC release
id: get_aws_lc_release
run: |
result=$(cd .github && python3 -c "from matrix import determine_latest_aws_lc; print(determine_latest_aws_lc(''))")
echo $result
echo "result=$result" >> $GITHUB_OUTPUT
- name: Cache AWS-LC
id: cache_aws_lc
uses: actions/cache@v4
with:
path: '~/opt/'
key: ssl-${{ steps.get_aws_lc_release.outputs.result }}-Ubuntu-latest-gcc
- name: Install apt dependencies
run: |
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
sudo apt-get --no-install-recommends -y install socat gdb
- name: Install AWS-LC
if: ${{ steps.cache_ssl.outputs.cache-hit != 'true' }}
run: env ${{ steps.get_aws_lc_release.outputs.result }} scripts/build-ssl.sh
- name: Compile HAProxy
run: |
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
USE_OPENSSL_AWSLC=1 USE_QUIC=1 \
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
DEBUG="-DDEBUG_POOL_INTEGRITY" \
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
sudo make install
- name: Show HAProxy version
id: show-version
run: |
ldd $(which haproxy)
haproxy -vv
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
- name: Install problem matcher for VTest
run: echo "::add-matcher::.github/vtest.json"
- name: Run VTest for HAProxy
id: vtest
run: |
# This is required for macOS which does not actually allow to increase
# the '-n' soft limit to the hard limit, thus failing to run.
ulimit -n 65536
# allow to catch coredumps
ulimit -c unlimited
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Show VTest results
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
printf "::group::"
cat $folder/INFO
cat $folder/LOG
echo "::endgroup::"
done
exit 1
- name: Show coredumps
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
failed=false
shopt -s nullglob
for file in /tmp/core.*; do
failed=true
printf "::group::"
gdb -ex 'thread apply all bt full' ./haproxy $file
echo "::endgroup::"
done
if [ "$failed" = true ]; then
exit 1;
fi

View file

@ -3,7 +3,6 @@ name: Spelling Check
on: on:
schedule: schedule:
- cron: "0 0 * * 2" - cron: "0 0 * * 2"
workflow_dispatch:
permissions: permissions:
contents: read contents: read
@ -11,12 +10,12 @@ permissions:
jobs: jobs:
codespell: codespell:
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }} if: ${{ github.repository_owner == 'haproxy' }}
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v4
- uses: codespell-project/codespell-problem-matcher@v1.2.0 - uses: codespell-project/codespell-problem-matcher@v1.2.0
- uses: codespell-project/actions-codespell@master - uses: codespell-project/actions-codespell@master
with: with:
skip: CHANGELOG,Makefile,*.fig,*.pem,./doc/design-thoughts,./doc/internals skip: CHANGELOG,Makefile,*.fig,*.pem,./doc/design-thoughts,./doc/internals
ignore_words_list: pres,ist,ists,hist,wan,ca,cas,que,ans,te,nd,referer,ot,uint,iif,fo,keep-alives,dosen,ifset,thrid,strack,ba,chck,hel,unx,mor,clen,collet,bu,htmp,siz,experim ignore_words_list: ist,ists,hist,wan,ca,cas,que,ans,te,nd,referer,ot,uint,iif,fo,keep-alives,dosen,ifset,thrid,strack,ba,chck,hel,unx,mor,clen,collet,bu,htmp,siz,experim
uri_ignore_words_list: trafic,ressources uri_ignore_words_list: trafic,ressources

View file

@ -11,10 +11,15 @@ permissions:
jobs: jobs:
h2spec: h2spec:
name: h2spec name: h2spec
runs-on: ubuntu-latest runs-on: ${{ matrix.os }}
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }} strategy:
matrix:
include:
- TARGET: linux-glibc
CC: gcc
os: ubuntu-latest
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v4
- name: Install h2spec - name: Install h2spec
id: install-h2spec id: install-h2spec
run: | run: |
@ -23,12 +28,12 @@ jobs:
tar xvf h2spec.tar.gz tar xvf h2spec.tar.gz
sudo install -m755 h2spec /usr/local/bin/h2spec sudo install -m755 h2spec /usr/local/bin/h2spec
echo "version=${H2SPEC_VERSION}" >> $GITHUB_OUTPUT echo "version=${H2SPEC_VERSION}" >> $GITHUB_OUTPUT
- name: Compile HAProxy with gcc - name: Compile HAProxy with ${{ matrix.CC }}
run: | run: |
make -j$(nproc) all \ make -j$(nproc) all \
ERR=1 \ ERR=1 \
TARGET=linux-glibc \ TARGET=${{ matrix.TARGET }} \
CC=gcc \ CC=${{ matrix.CC }} \
DEBUG="-DDEBUG_POOL_INTEGRITY" \ DEBUG="-DDEBUG_POOL_INTEGRITY" \
USE_OPENSSL=1 USE_OPENSSL=1
sudo make install sudo make install

View file

@ -10,7 +10,7 @@ jobs:
build: build:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v4
- name: Compile admin/halog/halog - name: Compile admin/halog/halog
run: | run: |
make admin/halog/halog make admin/halog/halog

View file

@ -15,9 +15,9 @@ permissions:
jobs: jobs:
scan: scan:
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }} if: ${{ github.repository_owner == 'haproxy' }}
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v4
- name: Install apt dependencies - name: Install apt dependencies
run: | run: |
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
@ -27,7 +27,7 @@ jobs:
libsystemd-dev libsystemd-dev
- name: Install QUICTLS - name: Install QUICTLS
run: | run: |
QUICTLS_VERSION=OpenSSL_1_1_1w-quic1 scripts/build-ssl.sh QUICTLS=yes scripts/build-ssl.sh
- name: Download Coverity build tool - name: Download Coverity build tool
run: | run: |
wget -c -N https://scan.coverity.com/download/linux64 --post-data "token=${{ secrets.COVERITY_SCAN_TOKEN }}&project=Haproxy" -O coverity_tool.tar.gz wget -c -N https://scan.coverity.com/download/linux64 --post-data "token=${{ secrets.COVERITY_SCAN_TOKEN }}&project=Haproxy" -O coverity_tool.tar.gz
@ -38,7 +38,7 @@ jobs:
- name: Build with Coverity build tool - name: Build with Coverity build tool
run: | run: |
export PATH=`pwd`/coverity_tool/bin:$PATH export PATH=`pwd`/coverity_tool/bin:$PATH
cov-build --dir cov-int make CC=clang TARGET=linux-glibc USE_ZLIB=1 USE_PCRE2=1 USE_PCRE2_JIT=1 USE_LUA=1 USE_OPENSSL=1 USE_QUIC=1 USE_WURFL=1 WURFL_INC=addons/wurfl/dummy WURFL_LIB=addons/wurfl/dummy USE_DEVICEATLAS=1 DEVICEATLAS_SRC=addons/deviceatlas/dummy USE_51DEGREES=1 51DEGREES_SRC=addons/51degrees/dummy/pattern ADDLIB=\"-Wl,-rpath,$HOME/opt/lib/\" SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include DEBUG+=-DDEBUG_STRICT=2 DEBUG+=-DDEBUG_USE_ABORT=1 cov-build --dir cov-int make CC=clang TARGET=linux-glibc USE_ZLIB=1 USE_PCRE2=1 USE_PCRE2_JIT=1 USE_LUA=1 USE_OPENSSL=1 USE_QUIC=1 USE_WURFL=1 WURFL_INC=addons/wurfl/dummy WURFL_LIB=addons/wurfl/dummy USE_DEVICEATLAS=1 DEVICEATLAS_SRC=addons/deviceatlas/dummy USE_51DEGREES=1 51DEGREES_SRC=addons/51degrees/dummy/pattern ADDLIB=\"-Wl,-rpath,$HOME/opt/lib/\" SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include DEBUG+=-DDEBUG_STRICT=1 DEBUG+=-DDEBUG_USE_ABORT=1
- name: Submit build result to Coverity Scan - name: Submit build result to Coverity Scan
run: | run: |
tar czvf cov.tar.gz cov-int tar czvf cov.tar.gz cov-int

View file

@ -91,7 +91,7 @@ jobs:
} }
] ]
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }} if: ${{ github.repository_owner == 'haproxy' }}
steps: steps:
- name: install packages - name: install packages
run: | run: |
@ -99,12 +99,12 @@ jobs:
sudo apt-get -yq --force-yes install \ sudo apt-get -yq --force-yes install \
gcc-${{ matrix.platform.arch }} \ gcc-${{ matrix.platform.arch }} \
${{ matrix.platform.libs }} ${{ matrix.platform.libs }}
- uses: actions/checkout@v5 - uses: actions/checkout@v4
- name: install quictls - name: install quictls
run: | run: |
QUICTLS_EXTRA_ARGS="--cross-compile-prefix=${{ matrix.platform.arch }}- ${{ matrix.platform.target }}" QUICTLS_VERSION=OpenSSL_1_1_1w-quic1 scripts/build-ssl.sh QUICTLS_EXTRA_ARGS="--cross-compile-prefix=${{ matrix.platform.arch }}- ${{ matrix.platform.target }}" QUICTLS=yes scripts/build-ssl.sh
- name: Build - name: Build
run: | run: |

View file

@ -1,4 +1,4 @@
name: Fedora/Rawhide/OpenSSL name: Fedora/Rawhide/QuicTLS
on: on:
schedule: schedule:
@ -13,24 +13,26 @@ jobs:
strategy: strategy:
matrix: matrix:
platform: [ platform: [
{ name: x64, cc: gcc, ADDLIB_ATOMIC: "", ARCH_FLAGS: "" }, { name: x64, cc: gcc, QUICTLS_EXTRA_ARGS: "", ADDLIB_ATOMIC: "", ARCH_FLAGS: "" },
{ name: x64, cc: clang, ADDLIB_ATOMIC: "", ARCH_FLAGS: "" }, { name: x64, cc: clang, QUICTLS_EXTRA_ARGS: "", ADDLIB_ATOMIC: "", ARCH_FLAGS: "" },
{ name: x86, cc: gcc, ADDLIB_ATOMIC: "-latomic", ARCH_FLAGS: "-m32" }, { name: x86, cc: gcc, QUICTLS_EXTRA_ARGS: "-m32 linux-generic32", ADDLIB_ATOMIC: "-latomic", ARCH_FLAGS: "-m32" },
{ name: x86, cc: clang, ADDLIB_ATOMIC: "-latomic", ARCH_FLAGS: "-m32" } { name: x86, cc: clang, QUICTLS_EXTRA_ARGS: "-m32 linux-generic32", ADDLIB_ATOMIC: "-latomic", ARCH_FLAGS: "-m32" }
] ]
fail-fast: false
name: ${{ matrix.platform.cc }}.${{ matrix.platform.name }} name: ${{ matrix.platform.cc }}.${{ matrix.platform.name }}
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }} if: ${{ github.repository_owner == 'haproxy' }}
container: container:
image: fedora:rawhide image: fedora:rawhide
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v4
- name: Install dependencies - name: Install dependencies
run: | run: |
dnf -y install awk diffutils git pcre-devel zlib-devel pcre2-devel 'perl(FindBin)' perl-IPC-Cmd 'perl(File::Copy)' 'perl(File::Compare)' lua-devel socat findutils systemd-devel clang openssl-devel.x86_64 dnf -y install diffutils git pcre-devel zlib-devel pcre2-devel 'perl(FindBin)' perl-IPC-Cmd 'perl(File::Copy)' 'perl(File::Compare)' lua-devel socat findutils systemd-devel clang
dnf -y install 'perl(FindBin)' 'perl(File::Compare)' perl-IPC-Cmd 'perl(File::Copy)' glibc-devel.i686 lua-devel.i686 lua-devel.x86_64 systemd-devel.i686 zlib-ng-compat-devel.i686 pcre-devel.i686 libatomic.i686 openssl-devel.i686 dnf -y install 'perl(FindBin)' 'perl(File::Compare)' perl-IPC-Cmd 'perl(File::Copy)' glibc-devel.i686 lua-devel.i686 lua-devel.x86_64 systemd-devel.i686 zlib-ng-compat-devel.i686 pcre-devel.i686 libatomic.i686
- uses: ./.github/actions/setup-vtest - name: Install VTest
run: scripts/build-vtest.sh
- name: Install QuicTLS
run: QUICTLS=yes QUICTLS_EXTRA_ARGS="${{ matrix.platform.QUICTLS_EXTRA_ARGS }}" scripts/build-ssl.sh
- name: Build contrib tools - name: Build contrib tools
run: | run: |
make admin/halog/halog make admin/halog/halog
@ -39,7 +41,7 @@ jobs:
make dev/hpack/decode dev/hpack/gen-enc dev/hpack/gen-rht make dev/hpack/decode dev/hpack/gen-enc dev/hpack/gen-rht
- name: Compile HAProxy with ${{ matrix.platform.cc }} - name: Compile HAProxy with ${{ matrix.platform.cc }}
run: | run: |
make -j3 CC=${{ matrix.platform.cc }} V=1 ERR=1 TARGET=linux-glibc DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" USE_PROMEX=1 USE_OPENSSL=1 USE_QUIC=1 USE_ZLIB=1 USE_PCRE=1 USE_PCRE_JIT=1 USE_LUA=1 ADDLIB="${{ matrix.platform.ADDLIB_ATOMIC }}" ARCH_FLAGS="${{ matrix.platform.ARCH_FLAGS }}" make -j3 CC=${{ matrix.platform.cc }} V=1 ERR=1 TARGET=linux-glibc USE_OPENSSL=1 USE_QUIC=1 USE_ZLIB=1 USE_PCRE=1 USE_PCRE_JIT=1 USE_LUA=1 ADDLIB="${{ matrix.platform.ADDLIB_ATOMIC }} -Wl,-rpath,${HOME}/opt/lib" SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include ARCH_FLAGS="${{ matrix.platform.ARCH_FLAGS }}"
make install make install
- name: Show HAProxy version - name: Show HAProxy version
id: show-version id: show-version
@ -49,13 +51,6 @@ jobs:
echo "::endgroup::" echo "::endgroup::"
haproxy -vv haproxy -vv
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
#
# TODO: review this workaround later
- name: relax crypto policies
run: |
dnf -y install crypto-policies-scripts
echo LEGACY > /etc/crypto-policies/config
update-crypto-policies
- name: Run VTest for HAProxy ${{ steps.show-version.outputs.version }} - name: Run VTest for HAProxy ${{ steps.show-version.outputs.version }}
id: vtest id: vtest
run: | run: |
@ -69,7 +64,3 @@ jobs:
cat $folder/LOG cat $folder/LOG
echo "::endgroup::" echo "::endgroup::"
done done
- name: Run Unit tests
id: unittests
run: |
make unit-tests

View file

@ -8,12 +8,12 @@ on:
jobs: jobs:
gcc: gcc:
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }} if: ${{ github.repository_owner == 'haproxy' }}
permissions: permissions:
contents: read contents: read
steps: steps:
- name: "Checkout repository" - name: "Checkout repository"
uses: actions/checkout@v5 uses: actions/checkout@v4
- name: "Build on VM" - name: "Build on VM"
uses: vmactions/solaris-vm@v1 uses: vmactions/solaris-vm@v1

View file

@ -20,13 +20,13 @@ jobs:
run: | run: |
ulimit -c unlimited ulimit -c unlimited
echo '/tmp/core/core.%h.%e.%t' > /proc/sys/kernel/core_pattern echo '/tmp/core/core.%h.%e.%t' > /proc/sys/kernel/core_pattern
- uses: actions/checkout@v5 - uses: actions/checkout@v4
- name: Install dependencies - name: Install dependencies
run: apk add gcc gdb make tar git python3 libc-dev linux-headers pcre-dev pcre2-dev openssl-dev lua5.3-dev grep socat curl musl-dbg lua5.3-dbg jose run: apk add gcc gdb make tar git python3 libc-dev linux-headers pcre-dev pcre2-dev openssl-dev lua5.3-dev grep socat curl musl-dbg lua5.3-dbg
- name: Install VTest - name: Install VTest
run: scripts/build-vtest.sh run: scripts/build-vtest.sh
- name: Build - name: Build
run: make -j$(nproc) TARGET=linux-musl DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" ARCH_FLAGS='-ggdb3' CC=cc V=1 USE_LUA=1 LUA_INC=/usr/include/lua5.3 LUA_LIB=/usr/lib/lua5.3 USE_OPENSSL=1 USE_PCRE2=1 USE_PCRE2_JIT=1 USE_PROMEX=1 run: make -j$(nproc) TARGET=linux-musl ARCH_FLAGS='-ggdb3' CC=cc V=1 USE_LUA=1 LUA_INC=/usr/include/lua5.3 LUA_LIB=/usr/lib/lua5.3 USE_OPENSSL=1 USE_PCRE2=1 USE_PCRE2_JIT=1 USE_PROMEX=1
- name: Show version - name: Show version
run: ./haproxy -vv run: ./haproxy -vv
- name: Show linked libraries - name: Show linked libraries
@ -37,10 +37,6 @@ jobs:
- name: Run VTest - name: Run VTest
id: vtest id: vtest
run: make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel run: make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Run Unit tests
id: unittests
run: |
make unit-tests
- name: Show coredumps - name: Show coredumps
if: ${{ failure() && steps.vtest.outcome == 'failure' }} if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: | run: |
@ -64,13 +60,3 @@ jobs:
cat $folder/LOG cat $folder/LOG
echo "::endgroup::" echo "::endgroup::"
done done
- name: Show Unit-Tests results
if: ${{ failure() && steps.unittests.outcome == 'failure' }}
run: |
for result in ${TMPDIR:-/tmp}/ha-unittests-*/results/res.*; do
printf "::group::"
cat $result
echo "::endgroup::"
done
exit 1

View file

@ -8,12 +8,12 @@ on:
jobs: jobs:
gcc: gcc:
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }} if: ${{ github.repository_owner == 'haproxy' }}
permissions: permissions:
contents: read contents: read
steps: steps:
- name: "Checkout repository" - name: "Checkout repository"
uses: actions/checkout@v5 uses: actions/checkout@v4
- name: "Build on VM" - name: "Build on VM"
uses: vmactions/netbsd-vm@v1 uses: vmactions/netbsd-vm@v1

View file

@ -1,82 +0,0 @@
name: openssl ECH
on:
schedule:
- cron: "0 3 * * *"
workflow_dispatch:
permissions:
contents: read
jobs:
test:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
steps:
- uses: actions/checkout@v5
- name: Install VTest
run: |
scripts/build-vtest.sh
- name: Install apt dependencies
run: |
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
sudo apt-get --no-install-recommends -y install socat gdb
sudo apt-get --no-install-recommends -y install libpsl-dev
- name: Install OpenSSL+ECH
run: env OPENSSL_VERSION="git-feature/ech" GIT_TYPE="branch" scripts/build-ssl.sh
- name: Install curl+ECH
run: env SSL_LIB=${HOME}/opt/ scripts/build-curl.sh
- name: Compile HAProxy
run: |
make -j$(nproc) CC=gcc TARGET=linux-glibc \
USE_QUIC=1 USE_OPENSSL=1 USE_ECH=1 \
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/" \
ARCH_FLAGS="-ggdb3 -fsanitize=address"
sudo make install
- name: Show HAProxy version
id: show-version
run: |
ldd $(which haproxy)
haproxy -vv
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
- name: Install problem matcher for VTest
run: echo "::add-matcher::.github/vtest.json"
- name: Run VTest for HAProxy
id: vtest
run: |
# This is required for macOS which does not actually allow to increase
# the '-n' soft limit to the hard limit, thus failing to run.
ulimit -n 65536
# allow to catch coredumps
ulimit -c unlimited
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Show VTest results
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
printf "::group::"
cat $folder/INFO
cat $folder/LOG
echo "::endgroup::"
done
exit 1
- name: Run Unit tests
id: unittests
run: |
make unit-tests
- name: Show coredumps
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
failed=false
shopt -s nullglob
for file in /tmp/core.*; do
failed=true
printf "::group::"
gdb -ex 'thread apply all bt full' ./haproxy $file
echo "::endgroup::"
done
if [ "$failed" = true ]; then
exit 1;
fi

View file

@ -1,77 +0,0 @@
name: openssl master
on:
schedule:
- cron: "0 3 * * *"
workflow_dispatch:
permissions:
contents: read
jobs:
test:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
steps:
- uses: actions/checkout@v5
- name: Install apt dependencies
run: |
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
sudo apt-get --no-install-recommends -y install socat gdb
sudo apt-get --no-install-recommends -y install libpsl-dev
- uses: ./.github/actions/setup-vtest
- name: Install OpenSSL master
run: env OPENSSL_VERSION="git-master" GIT_TYPE="branch" scripts/build-ssl.sh
- name: Compile HAProxy
run: |
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
USE_QUIC=1 USE_OPENSSL=1 \
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
sudo make install
- name: Show HAProxy version
id: show-version
run: |
ldd $(which haproxy)
haproxy -vv
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
- name: Install problem matcher for VTest
run: echo "::add-matcher::.github/vtest.json"
- name: Run VTest for HAProxy
id: vtest
run: |
# This is required for macOS which does not actually allow to increase
# the '-n' soft limit to the hard limit, thus failing to run.
ulimit -n 65536
# allow to catch coredumps
ulimit -c unlimited
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Show VTest results
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
printf "::group::"
cat $folder/INFO
cat $folder/LOG
echo "::endgroup::"
done
exit 1
- name: Run Unit tests
id: unittests
run: |
make unit-tests
- name: Show coredumps
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
failed=false
shopt -s nullglob
for file in /tmp/core.*; do
failed=true
printf "::group::"
gdb -ex 'thread apply all bt full' ./haproxy $file
echo "::endgroup::"
done
if [ "$failed" = true ]; then
exit 1;
fi

View file

@ -0,0 +1,34 @@
#
# special purpose CI: test against OpenSSL built in "no-deprecated" mode
# let us run those builds weekly
#
# for example, OpenWRT uses such OpenSSL builds (those builds are smaller)
#
#
# some details might be found at NL: https://www.mail-archive.com/haproxy@formilux.org/msg35759.html
# GH: https://github.com/haproxy/haproxy/issues/367
name: openssl no-deprecated
on:
schedule:
- cron: "0 0 * * 4"
workflow_dispatch:
permissions:
contents: read
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install VTest
run: |
scripts/build-vtest.sh
- name: Compile HAProxy
run: |
make DEFINE="-DOPENSSL_API_COMPAT=0x10100000L -DOPENSSL_NO_DEPRECATED" -j3 CC=gcc ERR=1 TARGET=linux-glibc USE_OPENSSL=1
- name: Run VTest
run: |
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel

View file

@ -13,13 +13,13 @@ on:
jobs: jobs:
build: build:
runs-on: ubuntu-24.04 runs-on: ubuntu-24.04
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }} if: ${{ github.repository_owner == 'haproxy' }}
permissions: permissions:
contents: read contents: read
packages: write packages: write
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v4
- name: Log in to the Container registry - name: Log in to the Container registry
uses: docker/login-action@v3 uses: docker/login-action@v3
@ -35,7 +35,7 @@ jobs:
context: https://github.com/haproxytech/haproxy-qns.git context: https://github.com/haproxytech/haproxy-qns.git
push: true push: true
build-args: | build-args: |
SSLLIB=AWS-LC SSLLIB: AWS-LC
tags: ghcr.io/${{ github.repository }}:aws-lc tags: ghcr.io/${{ github.repository }}:aws-lc
- name: Cleanup registry - name: Cleanup registry
@ -61,10 +61,10 @@ jobs:
name: ${{ matrix.suite.client }} name: ${{ matrix.suite.client }}
runs-on: ubuntu-24.04 runs-on: ubuntu-24.04
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }} if: ${{ github.repository_owner == 'haproxy' }}
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v4
- name: Log in to the Container registry - name: Log in to the Container registry
uses: docker/login-action@v3 uses: docker/login-action@v3

View file

@ -13,13 +13,13 @@ on:
jobs: jobs:
build: build:
runs-on: ubuntu-24.04 runs-on: ubuntu-24.04
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }} if: ${{ github.repository_owner == 'haproxy' }}
permissions: permissions:
contents: read contents: read
packages: write packages: write
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v4
- name: Log in to the Container registry - name: Log in to the Container registry
uses: docker/login-action@v3 uses: docker/login-action@v3
@ -35,7 +35,7 @@ jobs:
context: https://github.com/haproxytech/haproxy-qns.git context: https://github.com/haproxytech/haproxy-qns.git
push: true push: true
build-args: | build-args: |
SSLLIB=LibreSSL SSLLIB: LibreSSL
tags: ghcr.io/${{ github.repository }}:libressl tags: ghcr.io/${{ github.repository }}:libressl
- name: Cleanup registry - name: Cleanup registry
@ -59,10 +59,10 @@ jobs:
name: ${{ matrix.suite.client }} name: ${{ matrix.suite.client }}
runs-on: ubuntu-24.04 runs-on: ubuntu-24.04
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }} if: ${{ github.repository_owner == 'haproxy' }}
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v4
- name: Log in to the Container registry - name: Log in to the Container registry
uses: docker/login-action@v3 uses: docker/login-action@v3

View file

@ -1,74 +0,0 @@
#
# weekly run against modern QuicTLS branch, i.e. https://github.com/quictls/quictls
#
name: QuicTLS
on:
schedule:
- cron: "0 0 * * 4"
workflow_dispatch:
permissions:
contents: read
jobs:
test:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
steps:
- uses: actions/checkout@v5
- name: Install apt dependencies
run: |
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
sudo apt-get --no-install-recommends -y install socat gdb
- name: Install QuicTLS
run: env QUICTLS_VERSION=main QUICTLS_URL=https://github.com/quictls/quictls scripts/build-ssl.sh
- name: Compile HAProxy
run: |
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
USE_QUIC=1 USE_OPENSSL=1 \
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/" \
ARCH_FLAGS="-ggdb3 -fsanitize=address"
sudo make install
- name: Show HAProxy version
id: show-version
run: |
ldd $(which haproxy)
haproxy -vv
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
- uses: ./.github/actions/setup-vtest
- name: Run VTest for HAProxy
id: vtest
run: |
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Show VTest results
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
printf "::group::"
cat $folder/INFO
cat $folder/LOG
echo "::endgroup::"
done
exit 1
- name: Run Unit tests
id: unittests
run: |
make unit-tests
- name: Show coredumps
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
failed=false
shopt -s nullglob
for file in /tmp/core.*; do
failed=true
printf "::group::"
gdb -ex 'thread apply all bt full' ./haproxy $file
echo "::endgroup::"
done
if [ "$failed" = true ]; then
exit 1;
fi

View file

@ -23,7 +23,7 @@ jobs:
outputs: outputs:
matrix: ${{ steps.set-matrix.outputs.matrix }} matrix: ${{ steps.set-matrix.outputs.matrix }}
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v4
- name: Generate Build Matrix - name: Generate Build Matrix
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
@ -44,10 +44,16 @@ jobs:
TMPDIR: /tmp TMPDIR: /tmp
OT_CPP_VERSION: 1.6.0 OT_CPP_VERSION: 1.6.0
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v4
with: with:
fetch-depth: 100 fetch-depth: 100
- name: Setup coredumps
if: ${{ startsWith(matrix.os, 'ubuntu-') }}
run: |
sudo sysctl -w fs.suid_dumpable=1
sudo sysctl kernel.core_pattern=/tmp/core.%h.%e.%t
# #
# Github Action cache key cannot contain comma, so we calculate it based on job name # Github Action cache key cannot contain comma, so we calculate it based on job name
# #
@ -57,7 +63,7 @@ jobs:
echo "key=$(echo ${{ matrix.name }} | sha256sum | awk '{print $1}')" >> $GITHUB_OUTPUT echo "key=$(echo ${{ matrix.name }} | sha256sum | awk '{print $1}')" >> $GITHUB_OUTPUT
- name: Cache SSL libs - name: Cache SSL libs
if: ${{ matrix.ssl && matrix.ssl != 'stock' && matrix.ssl != 'BORINGSSL=yes' && !contains(matrix.ssl, 'QUICTLS') }} if: ${{ matrix.ssl && matrix.ssl != 'stock' && matrix.ssl != 'BORINGSSL=yes' && matrix.ssl != 'QUICTLS=yes' }}
id: cache_ssl id: cache_ssl
uses: actions/cache@v4 uses: actions/cache@v4
with: with:
@ -70,7 +76,7 @@ jobs:
uses: actions/cache@v4 uses: actions/cache@v4
with: with:
path: '~/opt-ot/' path: '~/opt-ot/'
key: ${{ matrix.os }}-ot-${{ matrix.CC }}-${{ env.OT_CPP_VERSION }}-${{ contains(matrix.name, 'ASAN') }} key: ot-${{ matrix.CC }}-${{ env.OT_CPP_VERSION }}-${{ contains(matrix.name, 'ASAN') }}
- name: Install apt dependencies - name: Install apt dependencies
if: ${{ startsWith(matrix.os, 'ubuntu-') }} if: ${{ startsWith(matrix.os, 'ubuntu-') }}
run: | run: |
@ -80,14 +86,15 @@ jobs:
${{ contains(matrix.FLAGS, 'USE_PCRE2=1') && 'libpcre2-dev' || '' }} \ ${{ contains(matrix.FLAGS, 'USE_PCRE2=1') && 'libpcre2-dev' || '' }} \
${{ contains(matrix.ssl, 'BORINGSSL=yes') && 'ninja-build' || '' }} \ ${{ contains(matrix.ssl, 'BORINGSSL=yes') && 'ninja-build' || '' }} \
socat \ socat \
gdb \ gdb
jose
- name: Install brew dependencies - name: Install brew dependencies
if: ${{ startsWith(matrix.os, 'macos-') }} if: ${{ startsWith(matrix.os, 'macos-') }}
run: | run: |
brew install socat brew install socat
brew install lua brew install lua
- uses: ./.github/actions/setup-vtest - name: Install VTest
run: |
scripts/build-vtest.sh
- name: Install SSL ${{ matrix.ssl }} - name: Install SSL ${{ matrix.ssl }}
if: ${{ matrix.ssl && matrix.ssl != 'stock' && steps.cache_ssl.outputs.cache-hit != 'true' }} if: ${{ matrix.ssl && matrix.ssl != 'stock' && steps.cache_ssl.outputs.cache-hit != 'true' }}
run: env ${{ matrix.ssl }} scripts/build-ssl.sh run: env ${{ matrix.ssl }} scripts/build-ssl.sh
@ -113,16 +120,7 @@ jobs:
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \ DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
${{ join(matrix.FLAGS, ' ') }} \ ${{ join(matrix.FLAGS, ' ') }} \
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/" ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
sudo make install-bin sudo make install
- name: Compile admin/halog/halog
run: |
make -j$(nproc) admin/halog/halog \
ERR=1 \
TARGET=${{ matrix.TARGET }} \
CC=${{ matrix.CC }} \
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
${{ join(matrix.FLAGS, ' ') }} \
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
- name: Show HAProxy version - name: Show HAProxy version
id: show-version id: show-version
run: | run: |
@ -137,9 +135,16 @@ jobs:
echo "::endgroup::" echo "::endgroup::"
haproxy -vv haproxy -vv
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
- name: Install problem matcher for VTest
# This allows one to more easily see which tests fail.
run: echo "::add-matcher::.github/vtest.json"
- name: Run VTest for HAProxy ${{ steps.show-version.outputs.version }} - name: Run VTest for HAProxy ${{ steps.show-version.outputs.version }}
id: vtest id: vtest
run: | run: |
# This is required for macOS which does not actually allow to increase
# the '-n' soft limit to the hard limit, thus failing to run.
ulimit -n 65536
ulimit -c unlimited
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Show VTest results - name: Show VTest results
if: ${{ failure() && steps.vtest.outcome == 'failure' }} if: ${{ failure() && steps.vtest.outcome == 'failure' }}

View file

@ -35,7 +35,7 @@ jobs:
- USE_THREAD=1 - USE_THREAD=1
- USE_ZLIB=1 - USE_ZLIB=1
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v4
- uses: msys2/setup-msys2@v2 - uses: msys2/setup-msys2@v2
with: with:
install: >- install: >-

View file

@ -11,13 +11,15 @@ permissions:
jobs: jobs:
test: test:
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v4
- name: Install VTest
run: |
scripts/build-vtest.sh
- name: Install apt dependencies - name: Install apt dependencies
run: | run: |
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
sudo apt-get --no-install-recommends -y install socat gdb jose sudo apt-get --no-install-recommends -y install socat gdb
- name: Install WolfSSL - name: Install WolfSSL
run: env WOLFSSL_VERSION=git-master WOLFSSL_DEBUG=1 scripts/build-ssl.sh run: env WOLFSSL_VERSION=git-master WOLFSSL_DEBUG=1 scripts/build-ssl.sh
- name: Compile HAProxy - name: Compile HAProxy
@ -25,7 +27,7 @@ jobs:
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \ make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
USE_OPENSSL_WOLFSSL=1 USE_QUIC=1 \ USE_OPENSSL_WOLFSSL=1 USE_QUIC=1 \
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \ SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \ DEBUG="-DDEBUG_POOL_INTEGRITY" \
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/" \ ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/" \
ARCH_FLAGS="-ggdb3 -fsanitize=address" ARCH_FLAGS="-ggdb3 -fsanitize=address"
sudo make install sudo make install
@ -35,15 +37,17 @@ jobs:
ldd $(which haproxy) ldd $(which haproxy)
haproxy -vv haproxy -vv
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
- uses: ./.github/actions/setup-vtest - name: Install problem matcher for VTest
run: echo "::add-matcher::.github/vtest.json"
- name: Run VTest for HAProxy - name: Run VTest for HAProxy
id: vtest id: vtest
run: | run: |
# This is required for macOS which does not actually allow to increase
# the '-n' soft limit to the hard limit, thus failing to run.
ulimit -n 65536
# allow to catch coredumps
ulimit -c unlimited
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Run Unit tests
id: unittests
run: |
make unit-tests
- name: Show VTest results - name: Show VTest results
if: ${{ failure() && steps.vtest.outcome == 'failure' }} if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: | run: |
@ -68,13 +72,3 @@ jobs:
if [ "$failed" = true ]; then if [ "$failed" = true ]; then
exit 1; exit 1;
fi fi
- name: Show Unit-Tests results
if: ${{ failure() && steps.unittests.outcome == 'failure' }}
run: |
for result in ${TMPDIR:-/tmp}/ha-unittests-*/results/res.*; do
printf "::group::"
cat $result
echo "::endgroup::"
done
exit 1

View file

@ -171,17 +171,7 @@ feedback for developers:
as the previous releases that had 6 months to stabilize. In terms of as the previous releases that had 6 months to stabilize. In terms of
stability it really means that the point zero version already accumulated stability it really means that the point zero version already accumulated
6 months of fixes and that it is much safer to use even just after it is 6 months of fixes and that it is much safer to use even just after it is
released. There is one exception though, features marked as "experimental" released.
are not guaranteed to be maintained beyond the release of the next LTS
branch. The rationale here is that the experimental status is made to
expose an early preview of a feature, that is often incomplete, not always
in its definitive form regarding configuration, and for which developers
are seeking feedback from the users. It is even possible that changes will
be brought within the stable branch and it may happen that the feature
breaks. It is not imaginable to always be able to backport bug fixes too
far in this context since the code and configuration may change quite a
bit. Users who want to try experimental features are expected to upgrade
quickly to benefit from the improvements made to that feature.
- for developers, given that the odd versions are solely used by highly - for developers, given that the odd versions are solely used by highly
skilled users, it's easier to get advanced traces and captures, and there skilled users, it's easier to get advanced traces and captures, and there

2073
CHANGELOG

File diff suppressed because it is too large Load diff

View file

@ -1010,7 +1010,7 @@ you notice you're already practising some of them:
- continue to send pull requests after having been explained why they are not - continue to send pull requests after having been explained why they are not
welcome. welcome.
- give wrong advice to people asking for help, or sending them patches to - give wrong advices to people asking for help, or sending them patches to
try which make no sense, waste their time, and give them a bad impression try which make no sense, waste their time, and give them a bad impression
of the people working on the project. of the people working on the project.

77
INSTALL
View file

@ -111,22 +111,20 @@ HAProxy requires a working GCC or Clang toolchain and GNU make :
may want to retry with "gmake" which is the name commonly used for GNU make may want to retry with "gmake" which is the name commonly used for GNU make
on BSD systems. on BSD systems.
- GCC >= 4.7 (up to 15 tested). Older versions are no longer supported due to - GCC >= 4.2 (up to 14 tested). Older versions can be made to work with a
the latest mt_list update which only uses c11-like atomics. Newer versions few minor adaptations if really needed. Newer versions may sometimes break
may sometimes break due to compiler regressions or behaviour changes. The due to compiler regressions or behaviour changes. The version shipped with
version shipped with your operating system is very likely to work with no your operating system is very likely to work with no trouble. Clang >= 3.0
trouble. Clang >= 3.0 is also known to work as an alternative solution, and is also known to work as an alternative solution. Recent versions may emit
versions up to 19 were successfully tested. Recent versions may emit a bit a bit more warnings that are worth reporting as they may reveal real bugs.
more warnings that are worth reporting as they may reveal real bugs. TCC TCC (https://repo.or.cz/tinycc.git) is also usable for developers but will
(https://repo.or.cz/tinycc.git) is also usable for developers but will not not support threading and was found at least once to produce bad code in
support threading and was found at least once to produce bad code in some some rare corner cases (since fixed). But it builds extremely quickly
rare corner cases (since fixed). But it builds extremely quickly (typically (typically half a second for the whole project) and is very convenient to
half a second for the whole project) and is very convenient to run quick run quick tests during API changes or code refactoring.
tests during API changes or code refactoring.
- GNU ld (binutils package), with no particular version. Other linkers might - GNU ld (binutils package), with no particular version. Other linkers might
work but were not tested. The default one from your operating system will work but were not tested.
normally work.
On debian or Ubuntu systems and their derivatives, you may get all these tools On debian or Ubuntu systems and their derivatives, you may get all these tools
at once by issuing the two following commands : at once by issuing the two following commands :
@ -237,7 +235,7 @@ to forcefully enable it using "USE_LIBCRYPT=1".
----------------- -----------------
For SSL/TLS, it is necessary to use a cryptography library. HAProxy currently For SSL/TLS, it is necessary to use a cryptography library. HAProxy currently
supports the OpenSSL library, and is known to build and work with branches supports the OpenSSL library, and is known to build and work with branches
1.0.0, 1.0.1, 1.0.2, 1.1.0, 1.1.1, and 3.0 to 3.6. It is recommended to use 1.0.0, 1.0.1, 1.0.2, 1.1.0, 1.1.1, and 3.0 to 3.4. It is recommended to use
at least OpenSSL 1.1.1 to have support for all SSL keywords and configuration at least OpenSSL 1.1.1 to have support for all SSL keywords and configuration
in HAProxy. OpenSSL follows a long-term support cycle similar to HAProxy's, in HAProxy. OpenSSL follows a long-term support cycle similar to HAProxy's,
and each of the branches above receives its own fixes, without forcing you to and each of the branches above receives its own fixes, without forcing you to
@ -259,15 +257,11 @@ reported to work as well. While there are some efforts from the community to
ensure they work well, OpenSSL remains the primary target and this means that ensure they work well, OpenSSL remains the primary target and this means that
in case of conflicting choices, OpenSSL support will be favored over other in case of conflicting choices, OpenSSL support will be favored over other
options. Note that QUIC is not fully supported when haproxy is built with options. Note that QUIC is not fully supported when haproxy is built with
OpenSSL < 3.5.2 version. In this case, QUICTLS or AWS-LC are the preferred OpenSSL. In this case, QUICTLS is the preferred alternative. As of writing
alternatives. As of writing this, the QuicTLS project follows OpenSSL very this, the QuicTLS project follows OpenSSL very closely and provides update
closely and provides update simultaneously, but being a volunteer-driven simultaneously, but being a volunteer-driven project, its long-term future does
project, its long-term future does not look certain enough to convince not look certain enough to convince operating systems to package it, so it
operating systems to package it, so it needs to be build locally. Recent needs to be build locally. See the section about QUIC in this document.
versions of AWS-LC (>= 1.22 and the FIPS branches) are pretty complete and
generally more performant than other OpenSSL derivatives, but may behave
slightly differently, particularly when dealing with outdated setups. See
the section about QUIC in this document.
A fifth option is wolfSSL (https://github.com/wolfSSL/wolfssl). It is the only A fifth option is wolfSSL (https://github.com/wolfSSL/wolfssl). It is the only
supported alternative stack not based on OpenSSL, yet which implements almost supported alternative stack not based on OpenSSL, yet which implements almost
@ -494,8 +488,8 @@ target. Common issues may include:
other supported compatible library. other supported compatible library.
- many "dereferencing pointer 'sa.985' does break strict-aliasing rules" - many "dereferencing pointer 'sa.985' does break strict-aliasing rules"
=> these warnings happen on old compilers (typically gcc before 7.x), => these warnings happen on old compilers (typically gcc-4.4), and may
and may safely be ignored; newer ones are better on these. safely be ignored; newer ones are better on these.
4.11) QUIC 4.11) QUIC
@ -504,11 +498,10 @@ QUIC is the new transport layer protocol and is required for HTTP/3. This
protocol stack is currently supported as an experimental feature in haproxy on protocol stack is currently supported as an experimental feature in haproxy on
the frontend side. In order to enable it, use "USE_QUIC=1 USE_OPENSSL=1". the frontend side. In order to enable it, use "USE_QUIC=1 USE_OPENSSL=1".
Note that QUIC is not always fully supported by the OpenSSL library depending on Note that QUIC is not fully supported by the OpenSSL library. Indeed QUIC 0-RTT
its version. Indeed QUIC 0-RTT cannot be supported by OpenSSL for versions before cannot be supported by OpenSSL contrary to others libraries with full QUIC
3.5 contrary to others libraries with full QUIC support. The preferred option is support. The preferred option is to use QUICTLS. This is a fork of OpenSSL with
to use QUICTLS. This is a fork of OpenSSL with a QUIC-compatible API. Its a QUIC-compatible API. Its repository is available at this location:
repository is available at this location:
https://github.com/quictls/openssl https://github.com/quictls/openssl
@ -536,18 +529,14 @@ way assuming that wolfSSL was installed in /opt/wolfssl-5.6.0 as shown in 4.5:
SSL_INC=/opt/wolfssl-5.6.0/include SSL_LIB=/opt/wolfssl-5.6.0/lib SSL_INC=/opt/wolfssl-5.6.0/include SSL_LIB=/opt/wolfssl-5.6.0/lib
LDFLAGS="-Wl,-rpath,/opt/wolfssl-5.6.0/lib" LDFLAGS="-Wl,-rpath,/opt/wolfssl-5.6.0/lib"
As last resort, haproxy may be compiled against OpenSSL as follows from 3.5 As last resort, haproxy may be compiled against OpenSSL as follows:
version with 0-RTT support:
$ make TARGET=generic USE_OPENSSL=1 USE_QUIC=1
or as follows for all OpenSSL versions but without O-RTT support:
$ make TARGET=generic USE_OPENSSL=1 USE_QUIC=1 USE_QUIC_OPENSSL_COMPAT=1 $ make TARGET=generic USE_OPENSSL=1 USE_QUIC=1 USE_QUIC_OPENSSL_COMPAT=1
In addition to this requirements, the QUIC listener bindings must be explicitly Note that QUIC 0-RTT is not supported by haproxy QUIC stack when built against
enabled with a specific QUIC tuning parameter. (see "limited-quic" global OpenSSL. In addition to this compilation requirements, the QUIC listener
parameter of haproxy Configuration Manual). bindings must be explicitly enabled with a specific QUIC tuning parameter.
(see "limited-quic" global parameter of haproxy Configuration Manual).
5) How to build HAProxy 5) How to build HAProxy
@ -563,9 +552,9 @@ It goes into more details with the main options.
To build haproxy, you have to choose your target OS amongst the following ones To build haproxy, you have to choose your target OS amongst the following ones
and assign it to the TARGET variable : and assign it to the TARGET variable :
- linux-glibc for Linux kernel 4.17 and above - linux-glibc for Linux kernel 2.6.28 and above
- linux-glibc-legacy for Linux kernel 2.6.28 and above without new features - linux-glibc-legacy for Linux kernel 2.6.28 and above without new features
- linux-musl for Linux kernel 4.17 and above with musl libc - linux-musl for Linux kernel 2.6.28 and above with musl libc
- solaris for Solaris 10 and above - solaris for Solaris 10 and above
- freebsd for FreeBSD 10 and above - freebsd for FreeBSD 10 and above
- dragonfly for DragonFlyBSD 4.3 and above - dragonfly for DragonFlyBSD 4.3 and above
@ -765,8 +754,8 @@ forced to produce final binaries, and must not be used during bisect sessions,
as it will often lead to the wrong commit. as it will often lead to the wrong commit.
Examples: Examples:
# silence strict-aliasing warnings with old gcc-5.5: # silence strict-aliasing warnings with old gcc-4.4:
$ make -j$(nproc) TARGET=linux-glibc CC=gcc-55 CFLAGS=-fno-strict-aliasing $ make -j$(nproc) TARGET=linux-glibc CC=gcc-44 CFLAGS=-fno-strict-aliasing
# disable all warning options: # disable all warning options:
$ make -j$(nproc) TARGET=linux-glibc CC=mycc WARN_CFLAGS= NOWARN_CFLAGS= $ make -j$(nproc) TARGET=linux-glibc CC=mycc WARN_CFLAGS= NOWARN_CFLAGS=

View file

@ -35,7 +35,6 @@
# USE_OPENSSL : enable use of OpenSSL. Recommended, but see below. # USE_OPENSSL : enable use of OpenSSL. Recommended, but see below.
# USE_OPENSSL_AWSLC : enable use of AWS-LC # USE_OPENSSL_AWSLC : enable use of AWS-LC
# USE_OPENSSL_WOLFSSL : enable use of wolfSSL with the OpenSSL API # USE_OPENSSL_WOLFSSL : enable use of wolfSSL with the OpenSSL API
# USE_ECH : enable use of ECH with the OpenSSL API
# USE_QUIC : enable use of QUIC with the quictls API (quictls, libressl, boringssl) # USE_QUIC : enable use of QUIC with the quictls API (quictls, libressl, boringssl)
# USE_QUIC_OPENSSL_COMPAT : enable use of QUIC with the standard openssl API (limited features) # USE_QUIC_OPENSSL_COMPAT : enable use of QUIC with the standard openssl API (limited features)
# USE_ENGINE : enable use of OpenSSL Engine. # USE_ENGINE : enable use of OpenSSL Engine.
@ -63,8 +62,6 @@
# USE_MEMORY_PROFILING : enable the memory profiler. Linux-glibc only. # USE_MEMORY_PROFILING : enable the memory profiler. Linux-glibc only.
# USE_LIBATOMIC : force to link with/without libatomic. Automatic. # USE_LIBATOMIC : force to link with/without libatomic. Automatic.
# USE_PTHREAD_EMULATION : replace pthread's rwlocks with ours # USE_PTHREAD_EMULATION : replace pthread's rwlocks with ours
# USE_SHM_OPEN : use shm_open() for features that can make use of shared memory
# USE_KTLS : use kTLS.(requires at least Linux 4.17).
# #
# Options can be forced by specifying "USE_xxx=1" or can be disabled by using # Options can be forced by specifying "USE_xxx=1" or can be disabled by using
# "USE_xxx=" (empty string). The list of enabled and disabled options for a # "USE_xxx=" (empty string). The list of enabled and disabled options for a
@ -214,8 +211,7 @@ UNIT_TEST_SCRIPT=./scripts/run-unittests.sh
# undefined behavior to silently produce invalid code. For this reason we have # undefined behavior to silently produce invalid code. For this reason we have
# to use -fwrapv or -fno-strict-overflow to guarantee the intended behavior. # to use -fwrapv or -fno-strict-overflow to guarantee the intended behavior.
# It is preferable not to change this option in order to avoid breakage. # It is preferable not to change this option in order to avoid breakage.
STD_CFLAGS := $(call cc-opt-alt,-fwrapv,-fno-strict-overflow) \ STD_CFLAGS := $(call cc-opt-alt,-fwrapv,-fno-strict-overflow)
$(call cc-opt,-fvect-cost-model=very-cheap)
#### Compiler-specific flags to enable certain classes of warnings. #### Compiler-specific flags to enable certain classes of warnings.
# Some are hard-coded, others are enabled only if supported. # Some are hard-coded, others are enabled only if supported.
@ -265,9 +261,9 @@ endif
# without appearing here. Currently defined DEBUG macros include DEBUG_FULL, # without appearing here. Currently defined DEBUG macros include DEBUG_FULL,
# DEBUG_MEM_STATS, DEBUG_DONT_SHARE_POOLS, DEBUG_FD, DEBUG_POOL_INTEGRITY, # DEBUG_MEM_STATS, DEBUG_DONT_SHARE_POOLS, DEBUG_FD, DEBUG_POOL_INTEGRITY,
# DEBUG_NO_POOLS, DEBUG_FAIL_ALLOC, DEBUG_STRICT_ACTION=[0-3], DEBUG_HPACK, # DEBUG_NO_POOLS, DEBUG_FAIL_ALLOC, DEBUG_STRICT_ACTION=[0-3], DEBUG_HPACK,
# DEBUG_AUTH, DEBUG_SPOE, DEBUG_UAF, DEBUG_THREAD=0-2, DEBUG_STRICT, DEBUG_DEV, # DEBUG_AUTH, DEBUG_SPOE, DEBUG_UAF, DEBUG_THREAD, DEBUG_STRICT, DEBUG_DEV,
# DEBUG_TASK, DEBUG_MEMORY_POOLS, DEBUG_POOL_TRACING, DEBUG_QPACK, DEBUG_LIST, # DEBUG_TASK, DEBUG_MEMORY_POOLS, DEBUG_POOL_TRACING, DEBUG_QPACK, DEBUG_LIST,
# DEBUG_COUNTERS=[0-2], DEBUG_STRESS, DEBUG_UNIT. # DEBUG_GLITCHES, DEBUG_STRESS, DEBUG_UNIT.
DEBUG = DEBUG =
#### Trace options #### Trace options
@ -342,16 +338,14 @@ use_opts = USE_EPOLL USE_KQUEUE USE_NETFILTER USE_POLL \
USE_TPROXY USE_LINUX_TPROXY USE_LINUX_CAP \ USE_TPROXY USE_LINUX_TPROXY USE_LINUX_CAP \
USE_LINUX_SPLICE USE_LIBCRYPT USE_CRYPT_H USE_ENGINE \ USE_LINUX_SPLICE USE_LIBCRYPT USE_CRYPT_H USE_ENGINE \
USE_GETADDRINFO USE_OPENSSL USE_OPENSSL_WOLFSSL USE_OPENSSL_AWSLC \ USE_GETADDRINFO USE_OPENSSL USE_OPENSSL_WOLFSSL USE_OPENSSL_AWSLC \
USE_ECH \
USE_SSL USE_LUA USE_ACCEPT4 USE_CLOSEFROM USE_ZLIB USE_SLZ \ USE_SSL USE_LUA USE_ACCEPT4 USE_CLOSEFROM USE_ZLIB USE_SLZ \
USE_CPU_AFFINITY USE_TFO USE_NS USE_DL USE_RT USE_LIBATOMIC \ USE_CPU_AFFINITY USE_TFO USE_NS USE_DL USE_RT USE_LIBATOMIC \
USE_MATH USE_DEVICEATLAS USE_51DEGREES \ USE_MATH USE_DEVICEATLAS USE_51DEGREES \
USE_WURFL USE_OBSOLETE_LINKER USE_PRCTL USE_PROCCTL \ USE_WURFL USE_OBSOLETE_LINKER USE_PRCTL USE_PROCCTL \
USE_THREAD_DUMP USE_EVPORTS USE_OT USE_QUIC USE_PROMEX \ USE_THREAD_DUMP USE_EVPORTS USE_OT USE_QUIC USE_PROMEX \
USE_MEMORY_PROFILING USE_SHM_OPEN \ USE_MEMORY_PROFILING \
USE_STATIC_PCRE USE_STATIC_PCRE2 \ USE_STATIC_PCRE USE_STATIC_PCRE2 \
USE_PCRE USE_PCRE_JIT USE_PCRE2 USE_PCRE2_JIT \ USE_PCRE USE_PCRE_JIT USE_PCRE2 USE_PCRE2_JIT USE_QUIC_OPENSSL_COMPAT
USE_QUIC_OPENSSL_COMPAT USE_KTLS
# preset all variables for all supported build options among use_opts # preset all variables for all supported build options among use_opts
$(reset_opts_vars) $(reset_opts_vars)
@ -382,13 +376,13 @@ ifeq ($(TARGET),haiku)
set_target_defaults = $(call default_opts,USE_POLL USE_TPROXY USE_OBSOLETE_LINKER) set_target_defaults = $(call default_opts,USE_POLL USE_TPROXY USE_OBSOLETE_LINKER)
endif endif
# For linux >= 4.17 and glibc # For linux >= 2.6.28 and glibc
ifeq ($(TARGET),linux-glibc) ifeq ($(TARGET),linux-glibc)
set_target_defaults = $(call default_opts, \ set_target_defaults = $(call default_opts, \
USE_POLL USE_TPROXY USE_LIBCRYPT USE_DL USE_RT USE_CRYPT_H USE_NETFILTER \ USE_POLL USE_TPROXY USE_LIBCRYPT USE_DL USE_RT USE_CRYPT_H USE_NETFILTER \
USE_CPU_AFFINITY USE_THREAD USE_EPOLL USE_LINUX_TPROXY USE_LINUX_CAP \ USE_CPU_AFFINITY USE_THREAD USE_EPOLL USE_LINUX_TPROXY USE_LINUX_CAP \
USE_ACCEPT4 USE_LINUX_SPLICE USE_PRCTL USE_THREAD_DUMP USE_NS USE_TFO \ USE_ACCEPT4 USE_LINUX_SPLICE USE_PRCTL USE_THREAD_DUMP USE_NS USE_TFO \
USE_GETADDRINFO USE_BACKTRACE USE_SHM_OPEN USE_KTLS) USE_GETADDRINFO USE_BACKTRACE)
INSTALL = install -v INSTALL = install -v
endif endif
@ -401,13 +395,13 @@ ifeq ($(TARGET),linux-glibc-legacy)
INSTALL = install -v INSTALL = install -v
endif endif
# For linux >= 4.17 and musl # For linux >= 2.6.28 and musl
ifeq ($(TARGET),linux-musl) ifeq ($(TARGET),linux-musl)
set_target_defaults = $(call default_opts, \ set_target_defaults = $(call default_opts, \
USE_POLL USE_TPROXY USE_LIBCRYPT USE_DL USE_RT USE_CRYPT_H USE_NETFILTER \ USE_POLL USE_TPROXY USE_LIBCRYPT USE_DL USE_RT USE_CRYPT_H USE_NETFILTER \
USE_CPU_AFFINITY USE_THREAD USE_EPOLL USE_LINUX_TPROXY USE_LINUX_CAP \ USE_CPU_AFFINITY USE_THREAD USE_EPOLL USE_LINUX_TPROXY USE_LINUX_CAP \
USE_ACCEPT4 USE_LINUX_SPLICE USE_PRCTL USE_THREAD_DUMP USE_NS USE_TFO \ USE_ACCEPT4 USE_LINUX_SPLICE USE_PRCTL USE_THREAD_DUMP USE_NS USE_TFO \
USE_GETADDRINFO USE_BACKTRACE USE_SHM_OPEN USE_KTLS) USE_GETADDRINFO)
INSTALL = install -v INSTALL = install -v
endif endif
@ -598,16 +592,10 @@ endif
ifneq ($(USE_BACKTRACE:0=),) ifneq ($(USE_BACKTRACE:0=),)
BACKTRACE_LDFLAGS = -Wl,$(if $(EXPORT_SYMBOL),$(EXPORT_SYMBOL),--export-dynamic) BACKTRACE_LDFLAGS = -Wl,$(if $(EXPORT_SYMBOL),$(EXPORT_SYMBOL),--export-dynamic)
BACKTRACE_CFLAGS = -fno-omit-frame-pointer
endif
ifneq ($(USE_MEMORY_PROFILING:0=),)
MEMORY_PROFILING_CFLAGS = -fno-optimize-sibling-calls
endif endif
ifneq ($(USE_CPU_AFFINITY:0=),) ifneq ($(USE_CPU_AFFINITY:0=),)
OPTIONS_OBJS += src/cpuset.o OPTIONS_OBJS += src/cpuset.o
OPTIONS_OBJS += src/cpu_topo.o
endif endif
# OpenSSL is packaged in various forms and with various dependencies. # OpenSSL is packaged in various forms and with various dependencies.
@ -640,10 +628,9 @@ ifneq ($(USE_OPENSSL:0=),)
SSL_LDFLAGS := $(if $(SSL_LIB),-L$(SSL_LIB)) -lssl -lcrypto SSL_LDFLAGS := $(if $(SSL_LIB),-L$(SSL_LIB)) -lssl -lcrypto
endif endif
USE_SSL := $(if $(USE_SSL:0=),$(USE_SSL:0=),implicit) USE_SSL := $(if $(USE_SSL:0=),$(USE_SSL:0=),implicit)
OPTIONS_OBJS += src/ssl_sock.o src/ssl_ckch.o src/ssl_ocsp.o src/ssl_crtlist.o \ OPTIONS_OBJS += src/ssl_sock.o src/ssl_ckch.o src/ssl_ocsp.o src/ssl_crtlist.o \
src/ssl_sample.o src/cfgparse-ssl.o src/ssl_gencert.o \ src/ssl_sample.o src/cfgparse-ssl.o src/ssl_gencert.o \
src/ssl_utils.o src/jwt.o src/ssl_clienthello.o src/jws.o src/acme.o \ src/ssl_utils.o src/jwt.o src/ssl_clienthello.o src/jws.o
src/ssl_trace.o src/jwe.o
endif endif
ifneq ($(USE_ENGINE:0=),) ifneq ($(USE_ENGINE:0=),)
@ -670,7 +657,7 @@ OPTIONS_OBJS += src/mux_quic.o src/h3.o src/quic_rx.o src/quic_tx.o \
src/quic_cc_nocc.o src/quic_cc.o src/quic_pacing.o \ src/quic_cc_nocc.o src/quic_cc.o src/quic_pacing.o \
src/h3_stats.o src/quic_stats.o src/qpack-enc.o \ src/h3_stats.o src/quic_stats.o src/qpack-enc.o \
src/qpack-tbl.o src/quic_cc_drs.o src/quic_fctl.o \ src/qpack-tbl.o src/quic_cc_drs.o src/quic_fctl.o \
src/quic_enc.o src/cbuf.o src/quic_enc.o
endif endif
ifneq ($(USE_QUIC_OPENSSL_COMPAT:0=),) ifneq ($(USE_QUIC_OPENSSL_COMPAT:0=),)
@ -969,15 +956,15 @@ OBJS += src/mux_h2.o src/mux_h1.o src/mux_fcgi.o src/log.o \
src/cache.o src/stconn.o src/http_htx.o src/debug.o \ src/cache.o src/stconn.o src/http_htx.o src/debug.o \
src/check.o src/stats-html.o src/haproxy.o src/listener.o \ src/check.o src/stats-html.o src/haproxy.o src/listener.o \
src/applet.o src/pattern.o src/cfgparse-listen.o \ src/applet.o src/pattern.o src/cfgparse-listen.o \
src/flt_spoe.o src/cebis_tree.o src/http_ext.o \ src/flt_spoe.o src/cebuis_tree.o src/http_ext.o \
src/http_act.o src/http_fetch.o src/cebs_tree.o \ src/http_act.o src/http_fetch.o src/cebus_tree.o \
src/cebib_tree.o src/http_client.o src/dns.o \ src/cebuib_tree.o src/http_client.o src/dns.o \
src/cebb_tree.o src/vars.o src/event_hdl.o src/tcp_rules.o \ src/cebub_tree.o src/vars.o src/event_hdl.o src/tcp_rules.o \
src/trace.o src/stats-proxy.o src/pool.o src/stats.o \ src/trace.o src/stats-proxy.o src/pool.o src/stats.o \
src/cfgparse-global.o src/filters.o src/mux_pt.o \ src/cfgparse-global.o src/filters.o src/mux_pt.o \
src/flt_http_comp.o src/sock.o src/h1.o src/sink.o \ src/flt_http_comp.o src/sock.o src/h1.o src/sink.o \
src/ceba_tree.o src/session.o src/payload.o src/htx.o \ src/cebua_tree.o src/session.o src/payload.o src/htx.o \
src/cebl_tree.o src/ceb32_tree.o src/ceb64_tree.o \ src/cebul_tree.o src/cebu32_tree.o src/cebu64_tree.o \
src/server_state.o src/proto_rhttp.o src/flt_trace.o src/fd.o \ src/server_state.o src/proto_rhttp.o src/flt_trace.o src/fd.o \
src/task.o src/map.o src/fcgi-app.o src/h2.o src/mworker.o \ src/task.o src/map.o src/fcgi-app.o src/h2.o src/mworker.o \
src/tcp_sample.o src/mjson.o src/h1_htx.o src/tcp_act.o \ src/tcp_sample.o src/mjson.o src/h1_htx.o src/tcp_act.o \
@ -992,9 +979,9 @@ OBJS += src/mux_h2.o src/mux_h1.o src/mux_fcgi.o src/log.o \
src/cfgcond.o src/proto_udp.o src/lb_fwlc.o src/ebmbtree.o \ src/cfgcond.o src/proto_udp.o src/lb_fwlc.o src/ebmbtree.o \
src/proto_uxdg.o src/cfgdiag.o src/sock_unix.o src/sha1.o \ src/proto_uxdg.o src/cfgdiag.o src/sock_unix.o src/sha1.o \
src/lb_fas.o src/clock.o src/sock_inet.o src/ev_select.o \ src/lb_fas.o src/clock.o src/sock_inet.o src/ev_select.o \
src/lb_map.o src/shctx.o src/hpack-dec.o src/net_helper.o \ src/lb_map.o src/shctx.o src/mworker-prog.o src/hpack-dec.o \
src/arg.o src/signal.o src/fix.o src/dynbuf.o src/guid.o \ src/arg.o src/signal.o src/fix.o src/dynbuf.o src/guid.o \
src/cfgparse-tcp.o src/lb_ss.o src/chunk.o src/counters.o \ src/cfgparse-tcp.o src/lb_ss.o src/chunk.o \
src/cfgparse-unix.o src/regex.o src/fcgi.o src/uri_auth.o \ src/cfgparse-unix.o src/regex.o src/fcgi.o src/uri_auth.o \
src/eb64tree.o src/eb32tree.o src/eb32sctree.o src/lru.o \ src/eb64tree.o src/eb32tree.o src/eb32sctree.o src/lru.o \
src/limits.o src/ebimtree.o src/wdt.o src/hpack-tbl.o \ src/limits.o src/ebimtree.o src/wdt.o src/hpack-tbl.o \
@ -1002,8 +989,7 @@ OBJS += src/mux_h2.o src/mux_h1.o src/mux_fcgi.o src/log.o \
src/ebsttree.o src/freq_ctr.o src/systemd.o src/init.o \ src/ebsttree.o src/freq_ctr.o src/systemd.o src/init.o \
src/http_acl.o src/dict.o src/dgram.o src/pipe.o \ src/http_acl.o src/dict.o src/dgram.o src/pipe.o \
src/hpack-huff.o src/hpack-enc.o src/ebtree.o src/hash.o \ src/hpack-huff.o src/hpack-enc.o src/ebtree.o src/hash.o \
src/httpclient_cli.o src/version.o src/ncbmbuf.o src/ech.o \ src/version.o
src/cfgparse-peers.o
ifneq ($(TRACE),) ifneq ($(TRACE),)
OBJS += src/calltrace.o OBJS += src/calltrace.o
@ -1124,11 +1110,6 @@ install-doc:
$(INSTALL) -m 644 doc/$$x.txt "$(DESTDIR)$(DOCDIR)" ; \ $(INSTALL) -m 644 doc/$$x.txt "$(DESTDIR)$(DOCDIR)" ; \
done done
install-admin:
$(Q)$(INSTALL) -d "$(DESTDIR)$(SBINDIR)"
$(Q)$(INSTALL) admin/cli/haproxy-dump-certs "$(DESTDIR)$(SBINDIR)"
$(Q)$(INSTALL) admin/cli/haproxy-reload "$(DESTDIR)$(SBINDIR)"
install-bin: install-bin:
$(Q)for i in haproxy $(EXTRA); do \ $(Q)for i in haproxy $(EXTRA); do \
if ! [ -e "$$i" ]; then \ if ! [ -e "$$i" ]; then \
@ -1139,7 +1120,7 @@ install-bin:
$(Q)$(INSTALL) -d "$(DESTDIR)$(SBINDIR)" $(Q)$(INSTALL) -d "$(DESTDIR)$(SBINDIR)"
$(Q)$(INSTALL) haproxy $(EXTRA) "$(DESTDIR)$(SBINDIR)" $(Q)$(INSTALL) haproxy $(EXTRA) "$(DESTDIR)$(SBINDIR)"
install: install-bin install-admin install-man install-doc install: install-bin install-man install-doc
uninstall: uninstall:
$(Q)rm -f "$(DESTDIR)$(MANDIR)"/man1/haproxy.1 $(Q)rm -f "$(DESTDIR)$(MANDIR)"/man1/haproxy.1
@ -1296,8 +1277,6 @@ unit-tests:
# options for all commits within RANGE. RANGE may be either a git range # options for all commits within RANGE. RANGE may be either a git range
# such as ref1..ref2 or a single commit, in which case all commits from # such as ref1..ref2 or a single commit, in which case all commits from
# the master branch to this one will be tested. # the master branch to this one will be tested.
# Will execute TEST_CMD for each commit if defined, and will stop in case of
# failure.
range: range:
$(Q)[ -d .git/. ] || { echo "## Fatal: \"make $@\" may only be used inside a Git repository."; exit 1; } $(Q)[ -d .git/. ] || { echo "## Fatal: \"make $@\" may only be used inside a Git repository."; exit 1; }
@ -1323,7 +1302,6 @@ range:
echo "[ $$index/$$count ] $$commit #############################"; \ echo "[ $$index/$$count ] $$commit #############################"; \
git checkout -q $$commit || die 1; \ git checkout -q $$commit || die 1; \
$(MAKE) all || die 1; \ $(MAKE) all || die 1; \
[ -z "$(TEST_CMD)" ] || $(TEST_CMD) || die 1; \
index=$$((index + 1)); \ index=$$((index + 1)); \
done; \ done; \
echo;echo "Done! $${count} commit(s) built successfully for RANGE $${RANGE}" ; \ echo;echo "Done! $${count} commit(s) built successfully for RANGE $${RANGE}" ; \

View file

@ -1,2 +1,2 @@
$Format:%ci$ $Format:%ci$
2026/01/22 2025/03/07

View file

@ -1 +1 @@
3.4-dev3 3.2-dev7

View file

@ -5,8 +5,7 @@ CXX := c++
CXXLIB := -lstdc++ CXXLIB := -lstdc++
ifeq ($(DEVICEATLAS_SRC),) ifeq ($(DEVICEATLAS_SRC),)
OPTIONS_CFLAGS += -I$(DEVICEATLAS_INC) OPTIONS_LDFLAGS += -lda
OPTIONS_LDFLAGS += -Wl,-rpath,$(DEVICEATLAS_LIB) -L$(DEVICEATLAS_LIB) -lda
else else
DEVICEATLAS_INC = $(DEVICEATLAS_SRC) DEVICEATLAS_INC = $(DEVICEATLAS_SRC)
DEVICEATLAS_LIB = $(DEVICEATLAS_SRC) DEVICEATLAS_LIB = $(DEVICEATLAS_SRC)

View file

@ -212,7 +212,7 @@ da_status_t da_atlas_compile(void *ctx, da_read_fn readfn, da_setpos_fn setposfn
* da_getpropid on the atlas, and if generated by the search, the ID will be consistent across * da_getpropid on the atlas, and if generated by the search, the ID will be consistent across
* different calls to search. * different calls to search.
* Properties added by a search that are neither in the compiled atlas, nor in the extra_props list * Properties added by a search that are neither in the compiled atlas, nor in the extra_props list
* Are assigned an ID within the context that is not transferable through different search results * Are assigned an ID within the context that is not transferrable through different search results
* within the same atlas. * within the same atlas.
* @param atlas Atlas instance * @param atlas Atlas instance
* @param extra_props properties * @param extra_props properties

View file

@ -94,7 +94,7 @@ name must be preceded by a minus character ('-'). Here are examples:
* Add section description as label for all metrics * Add section description as label for all metrics
It is possible to set a description in global and proxy sections, via the It is possible to set a description in global and proxy sections, via the
"description" directive. The global description is exposed if it is define via "description" directive. The global descrption is exposed if it is define via
the "haproxy_process_description" metric. But the descriptions provided in proxy the "haproxy_process_description" metric. But the descriptions provided in proxy
sections are not dumped. However, it is possible to add it as a label for all sections are not dumped. However, it is possible to add it as a label for all
metrics of the corresponding section, including the global one. To do so, metrics of the corresponding section, including the global one. To do so,
@ -389,9 +389,6 @@ listed below. Metrics from extra counters are not listed.
| haproxy_server_max_connect_time_seconds | | haproxy_server_max_connect_time_seconds |
| haproxy_server_max_response_time_seconds | | haproxy_server_max_response_time_seconds |
| haproxy_server_max_total_time_seconds | | haproxy_server_max_total_time_seconds |
| haproxy_server_agent_status |
| haproxy_server_agent_code |
| haproxy_server_agent_duration_seconds |
| haproxy_server_internal_errors_total | | haproxy_server_internal_errors_total |
| haproxy_server_unsafe_idle_connections_current | | haproxy_server_unsafe_idle_connections_current |
| haproxy_server_safe_idle_connections_current | | haproxy_server_safe_idle_connections_current |

View file

@ -32,11 +32,11 @@
/* Prometheus exporter flags (ctx->flags) */ /* Prometheus exporter flags (ctx->flags) */
#define PROMEX_FL_METRIC_HDR 0x00000001 #define PROMEX_FL_METRIC_HDR 0x00000001
#define PROMEX_FL_BODYLESS_RESP 0x00000002 #define PROMEX_FL_INFO_METRIC 0x00000002
/* unused: 0x00000004 */ #define PROMEX_FL_FRONT_METRIC 0x00000004
/* unused: 0x00000008 */ #define PROMEX_FL_BACK_METRIC 0x00000008
/* unused: 0x00000010 */ #define PROMEX_FL_SRV_METRIC 0x00000010
/* unused: 0x00000020 */ #define PROMEX_FL_LI_METRIC 0x00000020
#define PROMEX_FL_MODULE_METRIC 0x00000040 #define PROMEX_FL_MODULE_METRIC 0x00000040
#define PROMEX_FL_SCOPE_GLOBAL 0x00000080 #define PROMEX_FL_SCOPE_GLOBAL 0x00000080
#define PROMEX_FL_SCOPE_FRONT 0x00000100 #define PROMEX_FL_SCOPE_FRONT 0x00000100

File diff suppressed because it is too large Load diff

View file

@ -1,235 +0,0 @@
#!/bin/bash
#
# Dump certificates from the HAProxy stats or master socket to the filesystem
# Experimental script
#
set -e
export BASEPATH=${BASEPATH:-/etc/haproxy}/
export SOCKET=${SOCKET:-/var/run/haproxy-master.sock}
export DRY_RUN=0
export DEBUG=
export VERBOSE=
export M="@1 "
export TMP
vecho() {
[ -n "$VERBOSE" ] && echo "$@"
return 0
}
read_certificate() {
name=$1
crt_filename=
key_filename=
OFS=$IFS
IFS=":"
while read -r key value; do
case "$key" in
"Crt filename")
crt_filename="${value# }"
key_filename="${value# }"
;;
"Key filename")
key_filename="${value# }"
;;
esac
done < <(echo "${M}show ssl cert ${name}" | socat "${SOCKET}" -)
IFS=$OFS
if [ -z "$crt_filename" ] || [ -z "$key_filename" ]; then
return 1
fi
# handle fields without a crt-base/key-base
[ "${crt_filename:0:1}" != "/" ] && crt_filename="${BASEPATH}${crt_filename}"
[ "${key_filename:0:1}" != "/" ] && key_filename="${BASEPATH}${key_filename}"
vecho "name:$name"
vecho "crt:$crt_filename"
vecho "key:$key_filename"
export NAME="$name"
export CRT_FILENAME="$crt_filename"
export KEY_FILENAME="$key_filename"
return 0
}
cmp_certkey() {
prev=$1
new=$2
if [ ! -f "$prev" ]; then
return 1;
fi
if ! cmp -s <(openssl x509 -in "$prev" -noout -fingerprint -sha256) <(openssl x509 -in "$new" -noout -fingerprint -sha256); then
return 1
fi
return 0
}
dump_certificate() {
name=$1
prev_crt=$2
prev_key=$3
r="tmp.${RANDOM}"
d="old.$(date +%s)"
new_crt="$TMP/$(basename "$prev_crt").${r}"
new_key="$TMP/$(basename "$prev_key").${r}"
if ! touch "${new_crt}" || ! touch "${new_key}"; then
echo "[ALERT] ($$) : can't dump \"$name\", can't create tmp files" >&2
return 1
fi
echo "${M}dump ssl cert ${name}" | socat "${SOCKET}" - | openssl pkey >> "${new_key}"
# use crl2pkcs7 as a way to dump multiple x509, storeutl could be used in modern versions of openssl
echo "${M}dump ssl cert ${name}" | socat "${SOCKET}" - | openssl crl2pkcs7 -nocrl -certfile /dev/stdin | openssl pkcs7 -print_certs >> "${new_crt}"
if ! cmp -s <(openssl x509 -in "${new_crt}" -pubkey -noout) <(openssl pkey -in "${new_key}" -pubout); then
echo "[ALERT] ($$) : Private key \"${new_key}\" and public key \"${new_crt}\" don't match" >&2
return 1
fi
if cmp_certkey "${prev_crt}" "${new_crt}"; then
echo "[NOTICE] ($$) : ${crt_filename} is already up to date" >&2
return 0
fi
# dry run will just return before trying to move the files
if [ "${DRY_RUN}" != "0" ]; then
return 0
fi
# move the current certificates to ".old.timestamp"
if [ -f "${prev_crt}" ] && [ -f "${prev_key}" ]; then
mv "${prev_crt}" "${prev_crt}.${d}"
[ "${prev_crt}" != "${prev_key}" ] && mv "${prev_key}" "${prev_key}.${d}"
fi
# move the new certificates to old place
mv "${new_crt}" "${prev_crt}"
[ "${prev_crt}" != "${prev_key}" ] && mv "${new_key}" "${prev_key}"
return 0
}
dump_all_certificates() {
echo "${M}show ssl cert" | socat "${SOCKET}" - | grep -v '^#' | grep -v '^$' | while read -r line; do
export NAME
export CRT_FILENAME
export KEY_FILENAME
if read_certificate "$line"; then
dump_certificate "$NAME" "$CRT_FILENAME" "$KEY_FILENAME"
else
echo "[WARNING] ($$) : can't dump \"$name\", crt/key filename details not found in \"show ssl cert\"" >&2
fi
done
}
usage() {
echo "Usage:"
echo " $0 [options]* [cert]*"
echo ""
echo " Dump certificates from the HAProxy stats or master socket to the filesystem"
echo " Require socat and openssl"
echo " EXPERIMENTAL script, backup your files!"
echo " The script will move your previous files to FILE.old.unixtimestamp (ex: foo.com.pem.old.1759044998)"
echo ""
echo "Options:"
echo " -S, --master-socket <path> Use the master socket at <path> (default: ${SOCKET})"
echo " -s, --socket <path> Use the stats socket at <path>"
echo " -p, --path <path> Specifiy a base path for relative files (default: ${BASEPATH})"
echo " -n, --dry-run Read certificates on the socket but don't dump them"
echo " -d, --debug Debug mode, set -x"
echo " -v, --verbose Verbose mode"
echo " -h, --help This help"
echo " -- End of options"
echo ""
echo "Examples:"
echo " $0 -v -p ${BASEPATH} -S ${SOCKET}"
echo " $0 -v -p ${BASEPATH} -S ${SOCKET} bar.com.rsa.pem"
echo " $0 -v -p ${BASEPATH} -S ${SOCKET} -- foo.com.ecdsa.pem bar.com.rsa.pem"
}
main() {
while [ -n "$1" ]; do
case "$1" in
-S|--master-socket)
SOCKET="$2"
M="@1 "
shift 2
;;
-s|--socket)
SOCKET="$2"
M=
shift 2
;;
-p|--path)
BASEPATH="$2/"
shift 2
;;
-n|--dry-run)
DRY_RUN=1
shift
;;
-d|--debug)
DEBUG=1
shift
;;
-v|--verbose)
VERBOSE=1
shift
;;
-h|--help)
usage "$@"
exit 0
;;
--)
shift
break
;;
-*)
echo "[ALERT] ($$) : Unknown option '$1'" >&2
usage "$@"
exit 1
;;
*)
break
;;
esac
done
if [ -n "$DEBUG" ]; then
set -x
fi
TMP=${TMP:-$(mktemp -d)}
if [ -z "$1" ]; then
dump_all_certificates
else
# compute the certificates names at the end of the command
while [ -n "$1" ]; do
if ! read_certificate "$1"; then
echo "[ALERT] ($$) : can't dump \"$1\", crt/key filename details not found in \"show ssl cert\"" >&2
exit 1
fi
[ "${DRY_RUN}" = "0" ] && dump_certificate "$NAME" "$CRT_FILENAME" "$KEY_FILENAME"
shift
done
fi
}
trap 'rm -rf -- "$TMP"' EXIT
main "$@"

View file

@ -1,113 +0,0 @@
#!/bin/bash
set -e
export VERBOSE=1
export TIMEOUT=90
export MASTER_SOCKET=${MASTER_SOCKET:-/var/run/haproxy-master.sock}
export RET=
alert() {
if [ "$VERBOSE" -ge "1" ]; then
echo "[ALERT] $*" >&2
fi
}
reload() {
while read -r line; do
if [ "$line" = "Success=0" ]; then
RET=1
elif [ "$line" = "Success=1" ]; then
RET=0
elif [ "$line" = "Another reload is still in progress." ]; then
alert "$line"
elif [ "$line" = "--" ]; then
continue;
else
if [ "$RET" = 1 ] && [ "$VERBOSE" = "2" ]; then
echo "$line" >&2
elif [ "$VERBOSE" = "3" ]; then
echo "$line" >&2
fi
fi
done < <(echo "reload" | socat -t"${TIMEOUT}" "${MASTER_SOCKET}" -)
if [ -z "$RET" ]; then
alert "Couldn't finish the reload before the timeout (${TIMEOUT})."
return 1
fi
return "$RET"
}
usage() {
echo "Usage:"
echo " $0 [options]*"
echo ""
echo " Trigger a reload from the master socket"
echo " Require socat"
echo " EXPERIMENTAL script!"
echo ""
echo "Options:"
echo " -S, --master-socket <path> Use the master socket at <path> (default: ${MASTER_SOCKET})"
echo " -d, --debug Debug mode, set -x"
echo " -t, --timeout Timeout (socat -t) (default: ${TIMEOUT})"
echo " -s, --silent Silent mode (no output)"
echo " -v, --verbose Verbose output (output from haproxy on failure)"
echo " -vv Even more verbose output (output from haproxy on success and failure)"
echo " -h, --help This help"
echo ""
echo "Examples:"
echo " $0 -S ${MASTER_SOCKET} -d ${TIMEOUT}"
}
main() {
while [ -n "$1" ]; do
case "$1" in
-S|--master-socket)
MASTER_SOCKET="$2"
shift 2
;;
-t|--timeout)
TIMEOUT="$2"
shift 2
;;
-s|--silent)
VERBOSE=0
shift
;;
-v|--verbose)
VERBOSE=2
shift
;;
-vv|--verbose)
VERBOSE=3
shift
;;
-d|--debug)
DEBUG=1
shift
;;
-h|--help)
usage "$@"
exit 0
;;
*)
echo "[ALERT] ($$) : Unknown option '$1'" >&2
usage "$@"
exit 1
;;
esac
done
if [ -n "$DEBUG" ]; then
set -x
fi
}
main "$@"
reload

View file

@ -123,22 +123,6 @@ struct url_stat {
#define FILT2_PRESERVE_QUERY 0x02 #define FILT2_PRESERVE_QUERY 0x02
#define FILT2_EXTRACT_CAPTURE 0x04 #define FILT2_EXTRACT_CAPTURE 0x04
#define FILT_OUTPUT_FMT (FILT_COUNT_ONLY| \
FILT_COUNT_STATUS| \
FILT_COUNT_SRV_STATUS| \
FILT_COUNT_COOK_CODES| \
FILT_COUNT_TERM_CODES| \
FILT_COUNT_URL_ONLY| \
FILT_COUNT_URL_COUNT| \
FILT_COUNT_URL_ERR| \
FILT_COUNT_URL_TAVG| \
FILT_COUNT_URL_TTOT| \
FILT_COUNT_URL_TAVGO| \
FILT_COUNT_URL_TTOTO| \
FILT_COUNT_URL_BAVG| \
FILT_COUNT_URL_BTOT| \
FILT_COUNT_IP_COUNT)
unsigned int filter = 0; unsigned int filter = 0;
unsigned int filter2 = 0; unsigned int filter2 = 0;
unsigned int filter_invert = 0; unsigned int filter_invert = 0;
@ -208,7 +192,7 @@ void help()
" you can also use -n to start from earlier then field %d\n" " you can also use -n to start from earlier then field %d\n"
" -query preserve the query string for per-URL (-u*) statistics\n" " -query preserve the query string for per-URL (-u*) statistics\n"
"\n" "\n"
"Output format - **only one** may be used at a time\n" "Output format - only one may be used at a time\n"
" -c only report the number of lines that would have been printed\n" " -c only report the number of lines that would have been printed\n"
" -pct output connect and response times percentiles\n" " -pct output connect and response times percentiles\n"
" -st output number of requests per HTTP status code\n" " -st output number of requests per HTTP status code\n"
@ -914,9 +898,6 @@ int main(int argc, char **argv)
if (!filter && !filter2) if (!filter && !filter2)
die("No action specified.\n"); die("No action specified.\n");
if ((filter & FILT_OUTPUT_FMT) & ((filter & FILT_OUTPUT_FMT) - 1))
die("Please, set only one output filter.\n");
if (filter & FILT_ACC_COUNT && !filter_acc_count) if (filter & FILT_ACC_COUNT && !filter_acc_count)
filter_acc_count=1; filter_acc_count=1;
@ -1571,10 +1552,6 @@ void filter_count_srv_status(const char *accept_field, const char *time_field, s
if (!srv_node) { if (!srv_node) {
/* server not yet in the tree, let's create it */ /* server not yet in the tree, let's create it */
srv = (void *)calloc(1, sizeof(struct srv_st) + e - b + 1); srv = (void *)calloc(1, sizeof(struct srv_st) + e - b + 1);
if (unlikely(!srv)) {
fprintf(stderr, "%s: not enough memory\n", __FUNCTION__);
exit(1);
}
srv_node = &srv->node; srv_node = &srv->node;
memcpy(&srv_node->key, b, e - b); memcpy(&srv_node->key, b, e - b);
srv_node->key[e - b] = '\0'; srv_node->key[e - b] = '\0';
@ -1684,10 +1661,6 @@ void filter_count_url(const char *accept_field, const char *time_field, struct t
*/ */
if (unlikely(!ustat)) if (unlikely(!ustat))
ustat = calloc(1, sizeof(*ustat)); ustat = calloc(1, sizeof(*ustat));
if (unlikely(!ustat)) {
fprintf(stderr, "%s: not enough memory\n", __FUNCTION__);
exit(1);
}
ustat->nb_err = err; ustat->nb_err = err;
ustat->nb_req = 1; ustat->nb_req = 1;

View file

@ -6,9 +6,9 @@ Wants=network-online.target
[Service] [Service]
EnvironmentFile=-/etc/default/haproxy EnvironmentFile=-/etc/default/haproxy
EnvironmentFile=-/etc/sysconfig/haproxy EnvironmentFile=-/etc/sysconfig/haproxy
Environment="CONFIG=/etc/haproxy/haproxy.cfg" "PIDFILE=/run/haproxy.pid" "CFGDIR=/etc/haproxy/conf.d" "EXTRAOPTS=-S /run/haproxy-master.sock" Environment="CONFIG=/etc/haproxy/haproxy.cfg" "PIDFILE=/run/haproxy.pid" "EXTRAOPTS=-S /run/haproxy-master.sock"
ExecStart=@SBINDIR@/haproxy -Ws -f $CONFIG -f $CFGDIR -p $PIDFILE $EXTRAOPTS ExecStart=@SBINDIR@/haproxy -Ws -f $CONFIG -p $PIDFILE $EXTRAOPTS
ExecReload=@SBINDIR@/haproxy -Ws -f $CONFIG -f $CFGDIR -c $EXTRAOPTS ExecReload=@SBINDIR@/haproxy -Ws -f $CONFIG -c $EXTRAOPTS
ExecReload=/bin/kill -USR2 $MAINPID ExecReload=/bin/kill -USR2 $MAINPID
KillMode=mixed KillMode=mixed
Restart=always Restart=always

View file

@ -25,7 +25,7 @@ end
# returns $node filled with the first node of ebroot $arg0 # returns $node filled with the first node of ebroot $arg0
define ebtree_first define ebtree_first
# browse ebtree left until encountering leaf # browse ebtree left until encoutering leaf
set $node = (struct eb_node *)$arg0->b[0] set $node = (struct eb_node *)$arg0->b[0]
while 1 while 1
_ebtree_set_tag_node $node _ebtree_set_tag_node $node
@ -76,7 +76,7 @@ end
# returns $node filled with the first node of ebroot $arg0 # returns $node filled with the first node of ebroot $arg0
define ebsctree_first define ebsctree_first
# browse ebsctree left until encountering leaf # browse ebsctree left until encoutering leaf
set $node = (struct eb32sc_node *)$arg0->b[0] set $node = (struct eb32sc_node *)$arg0->b[0]
while 1 while 1
_ebsctree_set_tag_node $node _ebsctree_set_tag_node $node

View file

@ -1,19 +0,0 @@
# show non-null memprofile entries with method, alloc/free counts/tot and caller
define memprof_dump
set $i = 0
set $meth={ "UNKN", "MALL", "CALL", "REAL", "STRD", "FREE", "P_AL", "P_FR", "STND", "VALL", "ALAL", "PALG", "MALG", "PVAL" }
while $i < sizeof(memprof_stats) / sizeof(memprof_stats[0])
if memprof_stats[$i].alloc_calls || memprof_stats[$i].free_calls
set $m = memprof_stats[$i].method
printf "m:%s ac:%u fc:%u at:%u ft:%u ", $meth[$m], \
memprof_stats[$i].alloc_calls, memprof_stats[$i].free_calls, \
memprof_stats[$i].alloc_tot, memprof_stats[$i].free_tot
output/a memprof_stats[$i].caller
printf "\n"
end
set $i = $i + 1
end
end

View file

@ -224,7 +224,7 @@ function Dec:tcp_payload(txn, chn)
ff = "" ff = ""
for i = 7, 0, -1 do for i = 7, 0, -1 do
if (((self.st[dir].fflg >> i) & 1) ~= 0) then if (((self.st[dir].fflg >> i) & 1) ~= 0) then
if self.st[dir].ftyp <= 9 and h2ff[self.st[dir].ftyp][i] ~= nil then if h2ff[self.st[dir].ftyp][i] ~= nil then
ff = ff .. ((ff == "") and "" or "+") ff = ff .. ((ff == "") and "" or "+")
ff = ff .. h2ff[self.st[dir].ftyp][i] ff = ff .. h2ff[self.st[dir].ftyp][i]
else else

View file

@ -59,9 +59,9 @@ struct ring_v2 {
struct ring_v2a { struct ring_v2a {
size_t size; // storage size size_t size; // storage size
size_t rsvd; // header length (used for file-backed maps) size_t rsvd; // header length (used for file-backed maps)
size_t tail ALIGNED(64); // storage tail size_t tail __attribute__((aligned(64))); // storage tail
size_t head ALIGNED(64); // storage head size_t head __attribute__((aligned(64))); // storage head
char area[0] ALIGNED(64); // storage area begins immediately here char area[0] __attribute__((aligned(64))); // storage area begins immediately here
}; };
/* display the message and exit with the code */ /* display the message and exit with the code */

View file

@ -1,5 +1,4 @@
#define _GNU_SOURCE #define _GNU_SOURCE
#include <errno.h>
#include <limits.h> #include <limits.h>
#include <sched.h> #include <sched.h>
#include <stdio.h> #include <stdio.h>
@ -12,22 +11,6 @@
static char prog_full_path[PATH_MAX]; static char prog_full_path[PATH_MAX];
long sysconf(int name)
{
if (name == _SC_NPROCESSORS_ONLN ||
name == _SC_NPROCESSORS_CONF) {
const char *ncpu = getenv("NCPU");
int n;
n = ncpu ? atoi(ncpu) : CPU_SETSIZE;
if (n < 0 || n > CPU_SETSIZE)
n = CPU_SETSIZE;
return n;
}
errno = EINVAL;
return -1;
}
/* return a cpu_set having the first $NCPU set */ /* return a cpu_set having the first $NCPU set */
int sched_getaffinity(pid_t pid, size_t cpusetsize, cpu_set_t *mask) int sched_getaffinity(pid_t pid, size_t cpusetsize, cpu_set_t *mask)
{ {

View file

@ -1,70 +0,0 @@
BEGININPUT
BEGINCONTEXT
HAProxy's development cycle consists in one development branch, and multiple
maintenance branches.
All the development is made into the development branch exclusively. This
includes mostly new features, doc updates, cleanups and or course, fixes.
The maintenance branches, also called stable branches, never see any
development, and only receive ultra-safe fixes for bugs that affect them,
that are picked from the development branch.
Branches are numbered in 0.1 increments. Every 6 months, upon a new major
release, the development branch enters maintenance and a new development branch
is created with a new, higher version. The current development branch is
3.3-dev, and maintenance branches are 3.2 and below.
Fixes created in the development branch for issues that were introduced in an
earlier branch are applied in descending order to each and every version till
that branch that introduced the issue: 3.2 first, then 3.1, then 3.0, then 2.9
and so on. This operation is called "backporting". A fix for an issue is never
backported beyond the branch that introduced the issue. An important point is
that the project maintainers really aim at zero regression in maintenance
branches, so they're never willing to take any risk backporting patches that
are not deemed strictly necessary.
Fixes consist of patches managed using the Git version control tool and are
identified by a Git commit ID and a commit message. For this reason we
indistinctly talk about backporting fixes, commits, or patches; all mean the
same thing. When mentioning commit IDs, developers always use a short form
made of the first 8 characters only, and expect the AI assistant to do the
same.
It seldom happens that some fixes depend on changes that were brought by other
patches that were not in some branches and that will need to be backported as
well for the fix to work. In this case, such information is explicitly provided
in the commit message by the patch's author in natural language.
Developers are serious and always indicate if a patch needs to be backported.
Sometimes they omit the exact target branch, or they will say that the patch is
"needed" in some older branch, but it means the same. If a commit message
doesn't mention any backport instructions, it means that the commit does not
have to be backported. And patches that are not strictly bug fixes nor doc
improvements are normally not backported. For example, fixes for design
limitations, architectural improvements and performance optimizations are
considered too risky for a backport. Finally, all bug fixes are tagged as
"BUG" at the beginning of their subject line. Patches that are not tagged as
such are not bugs, and must never be backported unless their commit message
explicitly requests so.
ENDCONTEXT
A developer is reviewing the development branch, trying to spot which commits
need to be backported to maintenance branches. This person is already expert
on HAProxy and everything related to Git, patch management, and the risks
associated with backports, so he doesn't want to be told how to proceed nor to
review the contents of the patch.
The goal for this developer is to get some help from the AI assistant to save
some precious time on this tedious review work. In order to do a better job, he
needs an accurate summary of the information and instructions found in each
commit message. Specifically he needs to figure if the patch fixes a problem
affecting an older branch or not, if it needs to be backported, if so to which
branches, and if other patches need to be backported along with it.
The indented text block below after an "id" line and starting with a Subject line
is a commit message from the HAProxy development branch that describes a patch
applied to that branch, starting with its subject line, please read it carefully.

View file

@ -1,29 +0,0 @@
ENDINPUT
BEGININSTRUCTION
You are an AI assistant that follows instruction extremely well. Help as much
as you can, responding to a single question using a single response.
The developer wants to know if he needs to backport the patch above to fix
maintenance branches, for which branches, and what possible dependencies might
be mentioned in the commit message. Carefully study the commit message and its
backporting instructions if any (otherwise it should probably not be backported),
then provide a very concise and short summary that will help the developer decide
to backport it, or simply to skip it.
Start by explaining in one or two sentences what you recommend for this one and why.
Finally, based on your analysis, give your general conclusion as "Conclusion: X"
where X is a single word among:
- "yes", if you recommend to backport the patch right now either because
it explicitly states this or because it's a fix for a bug that affects
a maintenance branch (3.2 or lower);
- "wait", if this patch explicitly mentions that it must be backported, but
only after waiting some time.
- "no", if nothing clearly indicates a necessity to backport this patch (e.g.
lack of explicit backport instructions, or it's just an improvement);
- "uncertain" otherwise for cases not covered above
ENDINSTRUCTION
Explanation:

View file

@ -1,70 +0,0 @@
BEGININPUT
BEGINCONTEXT
HAProxy's development cycle consists in one development branch, and multiple
maintenance branches.
All the development is made into the development branch exclusively. This
includes mostly new features, doc updates, cleanups and or course, fixes.
The maintenance branches, also called stable branches, never see any
development, and only receive ultra-safe fixes for bugs that affect them,
that are picked from the development branch.
Branches are numbered in 0.1 increments. Every 6 months, upon a new major
release, the development branch enters maintenance and a new development branch
is created with a new, higher version. The current development branch is
3.4-dev, and maintenance branches are 3.3 and below.
Fixes created in the development branch for issues that were introduced in an
earlier branch are applied in descending order to each and every version till
that branch that introduced the issue: 3.3 first, then 3.2, then 3.1, then 3.0
and so on. This operation is called "backporting". A fix for an issue is never
backported beyond the branch that introduced the issue. An important point is
that the project maintainers really aim at zero regression in maintenance
branches, so they're never willing to take any risk backporting patches that
are not deemed strictly necessary.
Fixes consist of patches managed using the Git version control tool and are
identified by a Git commit ID and a commit message. For this reason we
indistinctly talk about backporting fixes, commits, or patches; all mean the
same thing. When mentioning commit IDs, developers always use a short form
made of the first 8 characters only, and expect the AI assistant to do the
same.
It seldom happens that some fixes depend on changes that were brought by other
patches that were not in some branches and that will need to be backported as
well for the fix to work. In this case, such information is explicitly provided
in the commit message by the patch's author in natural language.
Developers are serious and always indicate if a patch needs to be backported.
Sometimes they omit the exact target branch, or they will say that the patch is
"needed" in some older branch, but it means the same. If a commit message
doesn't mention any backport instructions, it means that the commit does not
have to be backported. And patches that are not strictly bug fixes nor doc
improvements are normally not backported. For example, fixes for design
limitations, architectural improvements and performance optimizations are
considered too risky for a backport. Finally, all bug fixes are tagged as
"BUG" at the beginning of their subject line. Patches that are not tagged as
such are not bugs, and must never be backported unless their commit message
explicitly requests so.
ENDCONTEXT
A developer is reviewing the development branch, trying to spot which commits
need to be backported to maintenance branches. This person is already expert
on HAProxy and everything related to Git, patch management, and the risks
associated with backports, so he doesn't want to be told how to proceed nor to
review the contents of the patch.
The goal for this developer is to get some help from the AI assistant to save
some precious time on this tedious review work. In order to do a better job, he
needs an accurate summary of the information and instructions found in each
commit message. Specifically he needs to figure if the patch fixes a problem
affecting an older branch or not, if it needs to be backported, if so to which
branches, and if other patches need to be backported along with it.
The indented text block below after an "id" line and starting with a Subject line
is a commit message from the HAProxy development branch that describes a patch
applied to that branch, starting with its subject line, please read it carefully.

View file

@ -1,29 +0,0 @@
ENDINPUT
BEGININSTRUCTION
You are an AI assistant that follows instruction extremely well. Help as much
as you can, responding to a single question using a single response.
The developer wants to know if he needs to backport the patch above to fix
maintenance branches, for which branches, and what possible dependencies might
be mentioned in the commit message. Carefully study the commit message and its
backporting instructions if any (otherwise it should probably not be backported),
then provide a very concise and short summary that will help the developer decide
to backport it, or simply to skip it.
Start by explaining in one or two sentences what you recommend for this one and why.
Finally, based on your analysis, give your general conclusion as "Conclusion: X"
where X is a single word among:
- "yes", if you recommend to backport the patch right now either because
it explicitly states this or because it's a fix for a bug that affects
a maintenance branch (3.3 or lower);
- "wait", if this patch explicitly mentions that it must be backported, but
only after waiting some time.
- "no", if nothing clearly indicates a necessity to backport this patch (e.g.
lack of explicit backport instructions, or it's just an improvement);
- "uncertain" otherwise for cases not covered above
ENDINSTRUCTION
Explanation:

View file

@ -22,8 +22,7 @@ STABLE=$(cd "$HAPROXY_DIR" && git describe --tags "v${BRANCH}-dev0^" |cut -f1,2
PATCHES_DIR="$PATCHES_PFX"-"$BRANCH" PATCHES_DIR="$PATCHES_PFX"-"$BRANCH"
(cd "$HAPROXY_DIR" (cd "$HAPROXY_DIR"
# avoid git pull, it chokes on forced push git pull
git remote update origin; git reset origin/master;git checkout -f
last_file=$(ls -1 "$PATCHES_DIR"/*.patch 2>/dev/null | tail -n1) last_file=$(ls -1 "$PATCHES_DIR"/*.patch 2>/dev/null | tail -n1)
if [ -n "$last_file" ]; then if [ -n "$last_file" ]; then
restart=$(head -n1 "$last_file" | cut -f2 -d' ') restart=$(head -n1 "$last_file" | cut -f2 -d' ')

BIN
dev/phash/a.out Executable file

Binary file not shown.

View file

@ -3,9 +3,7 @@ DeviceAtlas Device Detection
In order to add DeviceAtlas Device Detection support, you would need to download In order to add DeviceAtlas Device Detection support, you would need to download
the API source code from https://deviceatlas.com/deviceatlas-haproxy-module. the API source code from https://deviceatlas.com/deviceatlas-haproxy-module.
Once extracted, two modes are supported : Once extracted :
1/ Build HAProxy and DeviceAtlas in one command
$ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_SRC=<path to the API root folder> $ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_SRC=<path to the API root folder>
@ -16,6 +14,10 @@ directory. Also, in the case the api cache support is not needed and/or a C++ to
$ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_SRC=<path to the API root folder> DEVICEATLAS_NOCACHE=1 $ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_SRC=<path to the API root folder> DEVICEATLAS_NOCACHE=1
However, if the API had been installed beforehand, DEVICEATLAS_SRC
can be omitted. Note that the DeviceAtlas C API version supported is from the 3.x
releases series (3.2.1 minimum recommended).
For HAProxy developers who need to verify that their changes didn't accidentally For HAProxy developers who need to verify that their changes didn't accidentally
break the DeviceAtlas code, it is possible to build a dummy library provided in break the DeviceAtlas code, it is possible to build a dummy library provided in
the addons/deviceatlas/dummy directory and to use it as an alternative for the the addons/deviceatlas/dummy directory and to use it as an alternative for the
@ -25,29 +27,6 @@ validate API changes :
$ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_SRC=$PWD/addons/deviceatlas/dummy $ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_SRC=$PWD/addons/deviceatlas/dummy
2/ Build and install DeviceAtlas according to https://docs.deviceatlas.com/apis/enterprise/c/<release version>/README.html
For example :
In the deviceatlas library folder :
$ cmake .
$ make
$ sudo make install
In the HAProxy folder :
$ make TARGET=<target> USE_DEVICEATLAS=1
Note that if the -DCMAKE_INSTALL_PREFIX cmake option had been used, it is necessary to set as well DEVICEATLAS_LIB and
DEVICEATLAS_INC as follow :
$ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_INC=<CMAKE_INSTALL_PREFIX value>/include DEVICEATLAS_LIB=<CMAKE_INSTALL_PREFIX value>/lib
For example :
$ cmake -DCMAKE_INSTALL_PREFIX=/opt/local
$ make
$ sudo make install
$ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_INC=/opt/local/include DEVICEATLAS_LIB=/opt/local/lib
Note that DEVICEATLAS_SRC is omitted in this case.
These are supported DeviceAtlas directives (see doc/configuration.txt) : These are supported DeviceAtlas directives (see doc/configuration.txt) :
- deviceatlas-json-file <path to the DeviceAtlas JSON data file>. - deviceatlas-json-file <path to the DeviceAtlas JSON data file>.
- deviceatlas-log-level <number> (0 to 3, level of information returned by - deviceatlas-log-level <number> (0 to 3, level of information returned by

View file

@ -362,7 +362,7 @@ option set-process-time <var name>
latency added by the SPOE processing for the last handled event or group. latency added by the SPOE processing for the last handled event or group.
If several events or groups are processed for the same stream, this value If several events or groups are processed for the same stream, this value
will be overridden. will be overrideen.
See also: "option set-total-time". See also: "option set-total-time".

View file

@ -3,7 +3,7 @@
A number of contributors are often embarrassed with coding style issues, they A number of contributors are often embarrassed with coding style issues, they
don't always know if they're doing it right, especially since the coding style don't always know if they're doing it right, especially since the coding style
has evolved along the years. What is explained here is not necessarily what is has elvoved along the years. What is explained here is not necessarily what is
applied in the code, but new code should as much as possible conform to this applied in the code, but new code should as much as possible conform to this
style. Coding style fixes happen when code is replaced. It is useless to send style. Coding style fixes happen when code is replaced. It is useless to send
patches to fix coding style only, they will be rejected, unless they belong to patches to fix coding style only, they will be rejected, unless they belong to

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -5,7 +5,7 @@
The buffer list API allows one to share a certain amount of buffers between The buffer list API allows one to share a certain amount of buffers between
multiple entities, which will each see their own as lists of buffers, while multiple entities, which will each see their own as lists of buffers, while
keeping a shared free list. The immediate use case is for muxes, which may keeping a sharedd free list. The immediate use case is for muxes, which may
want to allocate up to a certain number of buffers per connection, shared want to allocate up to a certain number of buffers per connection, shared
among all streams. In this case, each stream will first request a new list among all streams. In this case, each stream will first request a new list
for its own use, then may request extra entries from the free list. At any for its own use, then may request extra entries from the free list. At any

View file

@ -1,86 +0,0 @@
2025-08-13 - Memory allocation in HAProxy 3.3
The vast majority of dynamic memory allocations are performed from pools. Pools
are optimized to store pre-calibrated objects of the right size for a given
usage, try to favor locality and hot objects as much as possible, and are
heavily instrumented to detect and help debug a wide class of bugs including
buffer overflows, use-after-free, etc.
For objects of random sizes, or those used only at configuration time, pools
are not suited, and the regular malloc/free family is available, in addition of
a few others.
The standard allocation calls are intercepted at the code level (#define) when
the code is compiled with -DDEBUG_MEM_STATS. For this reason, these calls are
redefined as macros in "bug.h", and one must not try to use the pointers to
such functions, as this may break DEBUG_MEM_STATS. This provides fine-grained
stats about allocation/free per line of source code using locally implemented
counters that can be consulted by "debug dev memstats". The calls are
categorized into one of "calloc", "free", "malloc", "realloc", "strdup",
"p_alloc", "p_free", the latter two designating pools. Extra calls such as
memalign() and similar are also intercepted and counted as malloc.
Due to the nature of this replacement, DEBUG_MEM_STATS cannot see operations
performed in libraries or dependencies.
In addition to DEBUG_MEM_STATS, when haproxy is built with USE_MEMORY_PROFILING
the standard functions are wrapped by new ones defined in "activity.c", which
also hold counters by call place. These ones are able to trace activity in
libraries because the functions check the return pointer to figure where the
call was made. The approach is different and relies on a large hash table. The
files, function names and line numbers are not know, but by passing the pointer
to dladdr(), we can often resolve most of these symbols. These operations are
consulted via "show profiling memory". It must first be enabled either in the
global config "profiling.memory on" or the CLI using "set profiling memory on".
Memory profiling can also track pool allocations and frees thanks to knowing
the size of the element and knowing a place where to store it. Some future
evolutions might consider making this possible as well for pure malloc/free
too by leveraging malloc_usable_size() a bit more.
Finally, 3.3 brought aligned allocations. These are made available via a new
family of functions around ha_aligned_alloc() that simply map to either
posix_memalign(), memalign() or _aligned_malloc() for CYGWIN, depending on
which one is available. This latter one requires to pass the pointer to
_aligned_free() instead of free(), so for this reason, all aligned allocations
have to be released using ha_aligned_free(). Since this mostly happens on
configuration elements, in practice it's not as inconvenient as it can sound.
These functions are in reality macros handled in "bug.h" like the previous
ones in order to deal with DEBUG_MEM_STATS. All "alloc" variants are reported
in memstats as "malloc". All "zalloc" variants are reported in memstats as
"calloc".
The currently available allocators are the following:
- void *ha_aligned_alloc(size_t align, size_t size)
- void *ha_aligned_zalloc(size_t align, size_t size)
Equivalent of malloc() but aligned to <align> bytes. The alignment MUST be
at least as large as one word and MUST be a power of two. The "zalloc"
variant also zeroes the area on success. Both return NULL on failure.
- void *ha_aligned_alloc_safe(size_t align, size_t size)
- void *ha_aligned_zalloc_safe(size_t align, size_t size)
Equivalent of malloc() but aligned to <align> bytes. The alignment is
automatically adjusted to the nearest larger power of two that is at least
as large as a word. The "zalloc" variant also zeroes the area on
success. Both return NULL on failure.
- (type *)ha_aligned_alloc_typed(size_t count, type)
(type *)ha_aligned_zalloc_typed(size_t count, type)
This macro returns an area aligned to the required alignment for type
<type>, large enough for <count> objects of this type, and the result is a
pointer of this type. The goal is to ease allocation of known structures
whose alignment is not necessarily known to the developer (and to avoid
encouraging to hard-code alignment). The cast in return also provides a
last-minute control in case a wrong type is mistakenly used due to a poor
copy-paste or an extra "*" after the type. When DEBUG_MEM_STATS is in use,
the type is stored as a string in the ".extra" field so that it can be
displayed in "debug dev memstats". The "zalloc" variant also zeroes the
area on success. Both return NULL on failure.
- void ha_aligned_free(void *ptr)
Frees the area pointed to by ptr. It is the equivalent of free() but for
objects allocated using one of the functions above.

View file

@ -245,30 +245,6 @@ mt_list_pop(l)
#=========# #=========#
mt_list_pop_locked(l)
Removes the list's first element, returns it locked. If the list was empty,
NULL is returned. A macro MT_LIST_POP_LOCKED() is provided for a
more convenient use; instead of returning the list element, it will return
the structure holding the element, taking care of preserving the NULL.
before:
+---+ +---+ +---+ +---+ +---+ +---+ +---+
#=>| L |<===>| A |<===>| B |<===>| C |<===>| D |<===>| E |<===>| F |<=#
# +---+ +---+ +---+ +---+ +---+ +---+ +---+ #
#=====================================================================#
after:
+---+ +---+ +---+ +---+ +---+ +---+
#=>| L |<===>| B |<===>| C |<===>| D |<===>| E |<===>| F |<=#
# +---+ +---+ +---+ +---+ +---+ +---+ #
#===========================================================#
+---+
# x| A |x #
# +---+ #
#=========#
_mt_list_lock_next(elt) _mt_list_lock_next(elt)
Locks the link that starts at the next pointer of the designated element. Locks the link that starts at the next pointer of the designated element.
The link is replaced by two locked pointers, and a pointer to the next The link is replaced by two locked pointers, and a pointer to the next
@ -400,9 +376,6 @@ mt_list_lock_prev(elt)
Return A elt Return A elt
value: <===> value: <===>
mt_list_try_lock_prev(elt)
Does the same thing as mt_list_lock_prev(), except if the list is
locked already, it returns { NULL, NULL } instead of waiting.
mt_list_lock_elem(elt) mt_list_lock_elem(elt)
Locks the element only. Both of its pointers are replaced by two locked Locks the element only. Both of its pointers are replaced by two locked

View file

@ -1,4 +1,4 @@
2025-08-11 - Pools structure and API 2022-02-24 - Pools structure and API
1. Background 1. Background
------------- -------------
@ -204,14 +204,6 @@ the cache, when this option is set, objects are picked from the cache from the
oldest one instead of the freshest one. This way even late memory corruptions oldest one instead of the freshest one. This way even late memory corruptions
have a chance to be detected. have a chance to be detected.
Another non-destructive approach is to use "-dMbackup". A full copy of the
object is made after its end, which eases inspection (e.g. of the parts
scratched by the pool_item elements), and a comparison is made upon allocation
of that object, just like with "-dMintegrity", causing a crash on mismatch. The
initial 4 words corresponding to the list are ignored as well. Note that when
both "-dMbackup" and "-dMintegrity" are used, the copy is performed before
being scratched, and the comparison is done by "-dMintegrity" only.
When build option DEBUG_MEMORY_POOLS is set, or the boot-time option "-dMtag" When build option DEBUG_MEMORY_POOLS is set, or the boot-time option "-dMtag"
is passed on the executable's command line, pool objects are allocated with is passed on the executable's command line, pool objects are allocated with
one extra pointer compared to the requested size, so that the bytes that follow one extra pointer compared to the requested size, so that the bytes that follow
@ -239,6 +231,10 @@ currently in use:
+------------+ +------------+ / is set at build time +------------+ +------------+ / is set at build time
or -dMtag at boot time or -dMtag at boot time
Right now no provisions are made to return objects aligned on larger boundaries
than those currently covered by malloc() (i.e. two pointers). This need appears
from time to time and the layout above might evolve a little bit if needed.
4. Storage in the process-wide shared pool 4. Storage in the process-wide shared pool
------------------------------------------ ------------------------------------------
@ -346,25 +342,7 @@ struct pool_head *create_pool(char *name, uint size, uint flags)
"-dMno-merge" is passed on the executable's command line, the pools "-dMno-merge" is passed on the executable's command line, the pools
also need to have the exact same name to be merged. In addition, unless also need to have the exact same name to be merged. In addition, unless
MEM_F_EXACT is set in <flags>, the object size will usually be rounded MEM_F_EXACT is set in <flags>, the object size will usually be rounded
up to the size of pointers (16 or 32 bytes). MEM_F_UAF may be set on a up to the size of pointers (16 or 32 bytes). The name that will appear
per-pool basis to enable the UAF detection only for this specific pool,
saving the massive overhead of global usage. The name that will appear
in the pool upon merging is the name of the first created pool. The
returned pointer is the new (or reused) pool head, or NULL upon error.
Pools created this way must be destroyed using pool_destroy().
struct pool_head *create_aligned_pool(char *name, uint size, uint align, uint flags)
Create a new pool named <name> for objects of size <size> bytes and
aligned to <align> bytes (0 meaning use the platform's default). Pool
names are truncated to their first 11 characters. Pools of very similar
size will usually be merged if both have set the flag MEM_F_SHARED in
<flags>. When DEBUG_DONT_SHARE_POOLS was set at build time, or
"-dMno-merge" is passed on the executable's command line, the pools
also need to have the exact same name to be merged. In addition, unless
MEM_F_EXACT is set in <flags>, the object size will usually be rounded
up to the size of pointers (16 or 32 bytes). MEM_F_UAF may be set on a
per-pool basis to enable the UAF detection only for this specific pool,
saving the massive overhead of global usage. The name that will appear
in the pool upon merging is the name of the first created pool. The in the pool upon merging is the name of the first created pool. The
returned pointer is the new (or reused) pool head, or NULL upon error. returned pointer is the new (or reused) pool head, or NULL upon error.
Pools created this way must be destroyed using pool_destroy(). Pools created this way must be destroyed using pool_destroy().
@ -482,20 +460,6 @@ complicate maintenance.
A few macros exist to ease the declaration of pools: A few macros exist to ease the declaration of pools:
DECLARE_ALIGNED_POOL(ptr, name, size, align)
Placed at the top level of a file, this declares a global memory pool
as variable <ptr>, name <name> and size <size> bytes per element, all
of which will be aligned to <align> bytes. The alignment will be
rounded up to the next power of two and will be at least as large as a
word on the platform. This is made via a call to REGISTER_ALIGNED_POOL()
and by assigning the resulting pointer to variable <ptr>. <ptr> will be
created of type "struct pool_head *". If the pool needs to be visible
outside of the function (which is likely), it will also need to be
declared somewhere as "extern struct pool_head *<ptr>;". It is
recommended to place such declarations very early in the source file so
that the variable is already known to all subsequent functions which
may use it.
DECLARE_POOL(ptr, name, size) DECLARE_POOL(ptr, name, size)
Placed at the top level of a file, this declares a global memory pool Placed at the top level of a file, this declares a global memory pool
as variable <ptr>, name <name> and size <size> bytes per element. This as variable <ptr>, name <name> and size <size> bytes per element. This
@ -507,17 +471,6 @@ DECLARE_POOL(ptr, name, size)
declarations very early in the source file so that the variable is declarations very early in the source file so that the variable is
already known to all subsequent functions which may use it. already known to all subsequent functions which may use it.
DECLARE_STATIC_ALIGNED_POOL(ptr, name, size, align)
Placed at the top level of a file, this declares a global memory pool
as variable <ptr>, name <name> and size <size> bytes per element, all
of which will be aligned to <align> bytes. The alignment will be
rounded up to the next power of two and will be at least as large as a
word on the platform. This is made via a call to REGISTER_ALIGNED_POOL()
and by assigning the resulting pointer to local variable <ptr>. <ptr>
will be created of type "static struct pool_head *". It is recommended
to place such declarations very early in the source file so that the
variable is already known to all subsequent functions which may use it.
DECLARE_STATIC_POOL(ptr, name, size) DECLARE_STATIC_POOL(ptr, name, size)
Placed at the top level of a file, this declares a static memory pool Placed at the top level of a file, this declares a static memory pool
as variable <ptr>, name <name> and size <size> bytes per element. This as variable <ptr>, name <name> and size <size> bytes per element. This
@ -527,42 +480,6 @@ DECLARE_STATIC_POOL(ptr, name, size)
early in the source file so that the variable is already known to all early in the source file so that the variable is already known to all
subsequent functions which may use it. subsequent functions which may use it.
DECLARE_STATIC_TYPED_POOL(ptr, name, type[, extra[, align]])
Placed at the top level of a file, this declares a global memory pool
as variable <ptr>, name <name>, and configured to allocate objects of
type <type>. It is optionally possible to grow these objects by <extra>
bytes (e.g. if they contain some variable length data at the end), and
to force them to be aligned to <align> bytes. If only alignment is
desired without extra data, pass 0 as <extra>. Alignment must be at
least as large as the type's, and a control is enforced at declaration
time so that objects cannot be less aligned than what is promised to
the compiler. The default alignment of zero indicates that the default
one (from the type) should be used. This is made via a call to
REGISTER_ALIGNED_POOL() and by assigning the resulting pointer to local
variable <ptr>. <ptr> will be created of type "static struct pool_head
*". It is recommended to place such declarations very early in the
source file so that the variable is already known to all subsequent
functions which may use it.
DECLARE_TYPED_POOL(ptr, name, type[, extra[, align]])
Placed at the top level of a file, this declares a global memory pool
as variable <ptr>, name <name>, and configured to allocate objects of
type <type>. It is optionally possible to grow these objects by <extra>
bytes (e.g. if they contain some variable length data at the end), and
to force them to be aligned to <align> bytes. If only alignment is
desired without extra data, pass 0 as <extra>. Alignment must be at
least as large as the type's, and a control is enforced at declaration
time so that objects cannot be less aligned than what is promised to
the compiler. The default alignment of zero indicates that the default
one (from the type) should be used. This is made via a call to
REGISTER_ALIGNED_POOL() and by assigning the resulting pointer to
variable <ptr>. <ptr> will be created of type "struct pool_head *". If
the pool needs to be visible outside of the function (which is likely),
it will also need to be declared somewhere as "extern struct pool_head
*<ptr>;". It is recommended to place such declarations very early in
the source file so that the variable is already known to all subsequent
functions which may use it.
6. Build options 6. Build options
---------------- ----------------

View file

@ -11,7 +11,7 @@ default init, this was controversial but fedora and archlinux already uses it.
At this time HAProxy still had a multi-process model, and the way haproxy is At this time HAProxy still had a multi-process model, and the way haproxy is
working was incompatible with the daemon mode. working was incompatible with the daemon mode.
Systemd is compatible with traditional forking services, but somehow HAProxy Systemd is compatible with traditionnal forking services, but somehow HAProxy
is different. To work correctly, systemd needs a main PID, this is the PID of is different. To work correctly, systemd needs a main PID, this is the PID of
the process that systemd will supervises. the process that systemd will supervises.
@ -45,7 +45,7 @@ However the wrapper suffered from several problems:
### mworker V1 ### mworker V1
HAProxy 1.8 got rid of the wrapper which was replaced by the master worker HAProxy 1.8 got ride of the wrapper which was replaced by the master worker
mode. This first version was basically a reintegration of the wrapper features mode. This first version was basically a reintegration of the wrapper features
within HAProxy. HAProxy is launched with the -W flag, read the configuration and within HAProxy. HAProxy is launched with the -W flag, read the configuration and
then fork. In mworker mode, the master is usually launched as a root process, then fork. In mworker mode, the master is usually launched as a root process,
@ -86,7 +86,7 @@ retrieved automatically.
The master is supervising the workers, when a current worker (not a previous one The master is supervising the workers, when a current worker (not a previous one
from before the reload) is exiting without being asked for a reload, the master from before the reload) is exiting without being asked for a reload, the master
will emit an "exit-on-failure" error and will kill every workers with a SIGTERM will emit an "exit-on-failure" error and will kill every workers with a SIGTERM
and exits with the same error code than the failed worker, this behavior can be and exits with the same error code than the failed master, this behavior can be
changed by using the "no exit-on-failure" option in the global section. changed by using the "no exit-on-failure" option in the global section.
While the master is supervising the workers using the wait() function, the While the master is supervising the workers using the wait() function, the
@ -186,8 +186,8 @@ number that can be found in HAPROXY_PROCESSES. With this change the stats socket
in the configuration is less useful and everything can be done from the master in the configuration is less useful and everything can be done from the master
CLI. CLI.
With 2.7, the reload mechanism of the master CLI evolved, with previous versions, With 2.7, the reload mecanism of the master CLI evolved, with previous versions,
this mechanism was asynchronous, so once the `reload` command was received, the this mecanism was asynchronous, so once the `reload` command was received, the
master would reload, the active master CLI connection was closed, and there was master would reload, the active master CLI connection was closed, and there was
no way to return a status as a response to the `reload` command. To achieve a no way to return a status as a response to the `reload` command. To achieve a
synchronous reload, a dedicated sockpair is used, one side uses a master CLI synchronous reload, a dedicated sockpair is used, one side uses a master CLI

View file

@ -1,53 +0,0 @@
2025/09/16 - SHM stats file storage description and hints
Shm stats file (used to share thread-groupable statistics over multiple
process through the "shm-stats-file" directive) is made of:
- a main header which describes the file version, the processes making
use of it, the common clock source and hints about the number of
objects that are currently stored or provisionned in the file.
- an indefinite number of "objects" blocks coming right after the
main header, all blocks have the same size which is the size of the
maximum underlying object that may be stored. The main header tells
how many objects are stored in the file.
File header looks like this (32/64 bits systems):
0 8 16 32 48 64
+-------+---------+----------------+-------------------+-------------------+
| VERSION | 2 bytes | global_now_ms (global mono date in ms)|
|MAJOR | MINOR | hole | |
+----------------------------------+---------------------------------------+
| global_now_ns (global mono date in ns) |
+--------------------------------------------------------------------------+
| now_offset (offset applied to global monotonic date |
| on startup) |
+--------------------------------------------------------------------------+
| Process slot : | 1byte x 64
| pid | heartbeat (ticks) |
+----------------------------------+---------------------------------------+
| objects | objects slots |
| (used objects) | (available for use) |
+----------------------------------+---------------------------------------+
| padding (for future use) | 128 bytes
+--------------------------------------------------------------------------+
Object block looks like this:
0 8 16 32 48 64
+-------+---------+----------------+-------------------+-------------------+
| GUID | 128 bytes
+ (zero terminated) +
| |
+-------+---------+--------------------------------------------------------+
| tgid | type | padding |
+-------+---------+--------------------------------------------------------+
| users (bitmask of process slots making use of the obj) |
+--------------------------------------------------------------------------+
| object data |
| (version dependent) |
| struct be_counters_shared_tg or |
| struct fe_counters_shared_tg |
+--------------------------------------------------------------------------+
| padding (to anticipate evolutions) | 64 bytes
+--------------------------------------------------------------------------+

View file

@ -21,7 +21,7 @@ falls back to CLOCK_REALTIME. The former is more accurate as it really counts
the time spent in the process, while the latter might also account for time the time spent in the process, while the latter might also account for time
stuck on paging in etc. stuck on paging in etc.
Then wdt_ping() is called to arm the timer. It's set to trigger every Then wdt_ping() is called to arm the timer. t's set to trigger every
<wdt_warn_blocked_traffic_ns> interval. It is also called by wdt_handler() <wdt_warn_blocked_traffic_ns> interval. It is also called by wdt_handler()
to reprogram a new wakeup after it has ticked. to reprogram a new wakeup after it has ticked.
@ -37,18 +37,15 @@ If the thread was not marked as stuck, it's verified that no progress was made
for at least one second, in which case the TH_FL_STUCK flag is set. The lack of for at least one second, in which case the TH_FL_STUCK flag is set. The lack of
progress is measured by the distance between the thread's current cpu_time and progress is measured by the distance between the thread's current cpu_time and
its prev_cpu_time. If the lack of progress is at least as large as the warning its prev_cpu_time. If the lack of progress is at least as large as the warning
threshold, then the signal is bounced to the faulty thread if it's not the threshold and no context switch happened since last call, ha_stuck_warning() is
current one. Since this bounce is based on the time spent without update, it called to emit a warning about that thread. In any case the context switch
already doesn't happen often. counter for that thread is updated.
Once on the faulty thread, two checks are performed: If the thread was already marked as stuck, then the thread is considered as
1) if the thread was already marked as stuck, then the thread is considered definitely stuck. Then ha_panic() is directly called if the thread is the
as definitely stuck, and ha_panic() is called. It will not return. current one, otherwise ha_kill() is used to resend the signal directly to the
target thread, which will in turn go through this handler and handle the panic
2) a check is made to verify if the scheduler is still ticking, by reading itself.
and setting a variable that only the scheduler can clear when leaving a
task. If the scheduler didn't make any progress, ha_stuck_warning() is
called to emit a warning about that thread.
Most of the time there's no panic of course, and a wdt_ping() is performed Most of the time there's no panic of course, and a wdt_ping() is performed
before leaving the handler to reprogram a check for that thread. before leaving the handler to reprogram a check for that thread.
@ -64,12 +61,12 @@ set TAINTED_WARN_BLOCKED_TRAFFIC.
ha_panic() uses the current thread's trash buffer to produce the messages, as ha_panic() uses the current thread's trash buffer to produce the messages, as
we don't care about its contents since that thread will never return. However we don't care about its contents since that thread will never return. However
ha_stuck_warning() instead uses a local 8kB buffer in the thread's stack. ha_stuck_warning() instead uses a local 4kB buffer in the thread's stack.
ha_panic() will call ha_thread_dump_fill() for each thread, to complete the ha_panic() will call ha_thread_dump_fill() for each thread, to complete the
buffer being filled with each thread's dump messages. ha_stuck_warning() only buffer being filled with each thread's dump messages. ha_stuck_warning() only
calls ha_thread_dump_one(), which works on the current thread. In both cases calls the function for the current thread. In both cases the message is then
the message is then directly sent to fd #2 (stderr) and ha_thread_dump_done() directly sent to fd #2 (stderr) and ha_thread_dump_one() is called to release
is called to release the dumped thread. the dumped thread.
Both print a few extra messages, but ha_panic() just ends by looping on abort() Both print a few extra messages, but ha_panic() just ends by looping on abort()
until the process dies. until the process dies.
@ -88,12 +85,6 @@ to point to the target buffer. The thread_dump_buffer has 4 possible values:
will keep their own copy of their own dump so that it can be later found in will keep their own copy of their own dump so that it can be later found in
the core file for inspection. the core file for inspection.
A copy of the last valid thread_dump_buffer used is kept in last_dump_buffer,
for easier post-mortem analysis. This one may be NULL or even invalid, but
usually during a panic it will be valid, and may reveal useful hints even if it
still contains the dump of the last warning. Usually this will point to a trash
buffer or to stack area.
ha_thread_dump_fill() then either directly calls ha_thread_dump_one() if the ha_thread_dump_fill() then either directly calls ha_thread_dump_one() if the
target thread is the current thread, or sends the target thread DEBUGSIG target thread is the current thread, or sends the target thread DEBUGSIG
(SIGURG) if it's a different thread. This signal is initialized at boot time (SIGURG) if it's a different thread. This signal is initialized at boot time
@ -113,19 +104,13 @@ ha_dump_backtrace() before returning.
ha_dump_backtrace() produces a backtrace into a local buffer (100 entries max), ha_dump_backtrace() produces a backtrace into a local buffer (100 entries max),
then dumps the code bytes nearby the crashing instrution, dumps pointers and then dumps the code bytes nearby the crashing instrution, dumps pointers and
tries to resolve function names, and sends all of that into the target buffer. tries to resolve function names, and sends all of that into the target buffer.
On some architectures (x86_64, arm64), it will also try to detect and decode
call instructions and resolve them to called functions.
3. Improvements 3. Improvements
--------------- ---------------
The symbols resolution is extremely expensive, particularly for the warnings The symbols resolution is extremely expensive, particularly for the warnings
which should be fast. But we need it, it's just unfortunate that it strikes at which should be fast. But we need it, it's just unfortunate that it strikes at
the wrong moment. At least ha_dump_backtrace() does disable signals while it's the wrong moment.
resolving, in order to avoid unwanted re-entrance. In addition, the called
function resolve_sym_name() uses some locking and refrains from calling the
dladdr family of functions in a re-entrant way (in the worst case only well
known symbols will be resolved)..
In an ideal case, ha_dump_backtrace() would dump the pointers to a local array, In an ideal case, ha_dump_backtrace() would dump the pointers to a local array,
which would then later be resolved asynchronously in a tasklet. This can work which would then later be resolved asynchronously in a tasklet. This can work

View file

@ -1,7 +1,7 @@
----------------------- -----------------------
HAProxy Starter Guide HAProxy Starter Guide
----------------------- -----------------------
version 3.4 version 3.2
This document is an introduction to HAProxy for all those who don't know it, as This document is an introduction to HAProxy for all those who don't know it, as
@ -1693,7 +1693,7 @@ A small team of trusted developers will receive it and will be able to propose
a fix. We usually don't use embargoes and once a fix is available it gets a fix. We usually don't use embargoes and once a fix is available it gets
merged. In some rare circumstances it can happen that a release is coordinated merged. In some rare circumstances it can happen that a release is coordinated
with software vendors. Please note that this process usually messes up with with software vendors. Please note that this process usually messes up with
everyone's work, and that rushed up releases can sometimes introduce new bugs, eveyone's work, and that rushed up releases can sometimes introduce new bugs,
so it's best avoided unless strictly necessary; as such, there is often little so it's best avoided unless strictly necessary; as such, there is often little
consideration for reports that needlessly cause such extra burden, and the best consideration for reports that needlessly cause such extra burden, and the best
way to see your work credited usually is to provide a working fix, which will way to see your work credited usually is to provide a working fix, which will

View file

@ -893,9 +893,7 @@ Core class
**context**: init, task, action **context**: init, task, action
This function returns a new object of a *httpclient* class. An *httpclient* This function returns a new object of a *httpclient* class.
object must be used to process one and only one request. It must never be
reused to process several requests.
:returns: A :ref:`httpclient_class` object. :returns: A :ref:`httpclient_class` object.
@ -928,25 +926,12 @@ Core class
its work and wants to give back the control to HAProxy without executing the its work and wants to give back the control to HAProxy without executing the
remaining code. It can be seen as a multi-level "return". remaining code. It can be seen as a multi-level "return".
.. js:function:: core.wait([milliseconds])
**context**: task, action
Give back the hand at the HAProxy scheduler. Unlike :js:func:`core.yield`
the task will not be woken up automatically to resume as fast as possible.
Instead, it will wait for an event to wake the task. If milliseconds argument
is provided then the Lua execution will be automatically resumed passed this
delay even if no event caused the task to wake itself up.
:param integer milliseconds: automatic wakeup passed this delay. (optional)
.. js:function:: core.yield() .. js:function:: core.yield()
**context**: task, action **context**: task, action
Give back the hand at the HAProxy scheduler. It is used when the LUA Give back the hand at the HAProxy scheduler. It is used when the LUA
processing consumes a lot of processing time. Lua execution will be resumed processing consumes a lot of processing time.
automatically (automatic reschedule).
.. js:function:: core.parse_addr(address) .. js:function:: core.parse_addr(address)
@ -1089,13 +1074,18 @@ Core class
perform the heavy job in a dedicated task and allow remaining events to be perform the heavy job in a dedicated task and allow remaining events to be
processed more quickly. processed more quickly.
.. js:function:: core.use_native_mailers_config() .. js:function:: core.disable_legacy_mailers()
**context**: body **LEGACY**
Inform haproxy that the script will make use of the native "mailers" **context**: body, init
config section (although legacy). In other words, inform haproxy that
:js:func:`Proxy.get_mailers()` will be used later in the program. Disable the sending of email alerts through the legacy email sending
function when mailers are used in the configuration.
Use this when sending email alerts directly from lua.
:see: :js:func:`Proxy.get_mailers()`
.. _proxy_class: .. _proxy_class:
@ -1224,14 +1214,8 @@ Proxy class
**LEGACY** **LEGACY**
Returns a table containing legacy mailers config (from haproxy configuration Returns a table containing mailers config for the current proxy or nil
file) for the current proxy or nil if mailers are not available for the proxy. if mailers are not available for the proxy.
.. warning::
When relying on :js:func:`Proxy.get_mailers()` to retrieve mailers
configuration, :js:func:`core.use_native_mailers_config()` must be called
first from body or init context to inform haproxy that Lua makes use of the
legacy mailers config.
:param class_proxy px: A :ref:`proxy_class` which indicates the manipulated :param class_proxy px: A :ref:`proxy_class` which indicates the manipulated
proxy. proxy.
@ -1248,6 +1232,10 @@ ProxyMailers class
This class provides mailers config for a given proxy. This class provides mailers config for a given proxy.
If sending emails directly from lua, please consider
:js:func:`core.disable_legacy_mailers()` to disable the email sending from
haproxy. (Or email alerts will be sent twice...)
.. js:attribute:: ProxyMailers.track_server_health .. js:attribute:: ProxyMailers.track_server_health
Boolean set to true if the option "log-health-checks" is configured on Boolean set to true if the option "log-health-checks" is configured on
@ -1881,17 +1869,6 @@ Queue class
Use :js:func:`core.queue` to get a new Queue object. Use :js:func:`core.queue` to get a new Queue object.
.. js:function:: Queue.alarm()
**context**: task, action, service
Sets a wakeup alarm on the current Lua context so that when new data
becomes available on the Queue, the current Lua context is woken up
automatically. It can be combined with :js:func:`core.wait` to wait
for Queue events.
:param class_queue queue: A :ref:`queue_class` to the current queue
.. js:function:: Queue.size(queue) .. js:function:: Queue.size(queue)
This function returns the number of items within the Queue. This function returns the number of items within the Queue.
@ -2580,9 +2557,7 @@ HTTPClient class
.. js:class:: HTTPClient .. js:class:: HTTPClient
The httpclient class allows issue of outbound HTTP requests through a simple The httpclient class allows issue of outbound HTTP requests through a simple
API without the knowledge of HAProxy internals. Any instance must be used to API without the knowledge of HAProxy internals.
process one and only one request. It must never be reused to process several
requests.
.. js:function:: HTTPClient.get(httpclient, request) .. js:function:: HTTPClient.get(httpclient, request)
.. js:function:: HTTPClient.head(httpclient, request) .. js:function:: HTTPClient.head(httpclient, request)
@ -3489,7 +3464,7 @@ Patref class
in case of duplicated entries, only the first matching entry is returned. in case of duplicated entries, only the first matching entry is returned.
.. Warning:: .. Warning::
Not meant to be shared between multiple contexts. If multiple contexts Not meant to be shared bewteen multiple contexts. If multiple contexts
need to work on the same pattern reference, each context should have need to work on the same pattern reference, each context should have
its own patref object. its own patref object.
@ -3519,7 +3494,7 @@ Patref class
.. js:function:: Patref.commit(ref) .. js:function:: Patref.commit(ref)
Tries to commit pending Patref object updates, that is updates made to the Tries to commit pending Patref object updates, that is updates made to the
local object will be committed to the underlying pattern reference storage local object will be committed to the underlying patter reference storage
in an atomic manner upon success. Upon failure, local pending updates are in an atomic manner upon success. Upon failure, local pending updates are
lost. Upon success, all other pending updates on the pattern reference lost. Upon success, all other pending updates on the pattern reference
(e.g.: "prepare" from the cli or from other Patref Lua objects) started (e.g.: "prepare" from the cli or from other Patref Lua objects) started
@ -3911,31 +3886,16 @@ AppletTCP class
:param class_AppletTCP applet: An :ref:`applettcp_class` :param class_AppletTCP applet: An :ref:`applettcp_class`
:returns: a string. The string can be empty if we reach the end of the stream. :returns: a string. The string can be empty if we reach the end of the stream.
.. js:function:: AppletTCP.receive(applet, [size, [timeout]]) .. js:function:: AppletTCP.receive(applet, [size])
Reads data from the TCP stream, according to the specified read *size*. If the Reads data from the TCP stream, according to the specified read *size*. If the
*size* is missing, the function tries to read all the content of the stream *size* is missing, the function tries to read all the content of the stream
until the end. An optional timeout may be specified in milliseconds. In this until the end.
case the function will return no longer than this delay, with the amount of
available data, or nil if there is no data. An empty string is returned if the
connection is closed.
:param class_AppletTCP applet: An :ref:`applettcp_class` :param class_AppletTCP applet: An :ref:`applettcp_class`
:param integer size: the required read size. :param integer size: the required read size.
:returns: return nil if the timeout has expired and no data was available but :returns: always return a string, the string can be empty if the connection is
can still be received. Otherwise, a string is returned, possibly an empty closed.
string if the connection is closed.
.. js:function:: AppletTCP.try_receive(applet)
Reads available data from the TCP stream and returns immediately. Returns a
string containing read bytes or nil if no bytes are available at that time. An
empty string is returned if the connection is closed.
:param class_AppletTCP applet: An :ref:`applettcp_class`
:returns: return nil if no data was available but can still be
received. Otherwise, a string is returned, possibly an empty string if the
connection is closed.
.. js:function:: AppletTCP.send(appletmsg) .. js:function:: AppletTCP.send(appletmsg)
@ -4612,27 +4572,6 @@ HTTPMessage class
data by default. data by default.
:returns: an integer containing the amount of bytes copied or -1. :returns: an integer containing the amount of bytes copied or -1.
.. js:function:: HTTPMessage.set_body_len(http_msg, length)
This function changes the expected payload length of the HTTP message
**http_msg**. **length** can be an integer value. In that case, a
"Content-Length" header is added with the given value. It is also possible to
pass the **"chunked"** string instead of an integer value to force the HTTP
message to be chunk-encoded. In that case, a "Transfer-Encoding" header is
added with the "chunked" value. In both cases, all existing "Content-Length"
and "Transfer-Encoding" headers are removed.
This function should be used in the filter context to be able to alter the
payload of the HTTP message. The internal state of the HTTP message is updated
accordingly. :js:func:`HTTPMessage.add_header()` or
:js:func:`HTTPMessage.set_header()` functions must be used in that case.
:param class_httpmessage http_msg: The manipulated HTTP message.
:param type length: The new payload length to set. It can be an integer or
the string "chunked".
:returns: true if the payload length was successfully updated, false
otherwise.
.. js:function:: HTTPMessage.set_eom(http_msg) .. js:function:: HTTPMessage.set_eom(http_msg)
This function set the end of message for the HTTP message **http_msg**. This function set the end of message for the HTTP message **http_msg**.

File diff suppressed because it is too large Load diff

View file

@ -28,9 +28,7 @@ Revision history
string encoding. With contributions from Andriy Palamarchuk string encoding. With contributions from Andriy Palamarchuk
(Amazon.com). (Amazon.com).
2020/03/05 - added the unique ID TLV type (Tim Düsterhus) 2020/03/05 - added the unique ID TLV type (Tim Düsterhus)
2025/09/09 - added SSL-related TLVs for key exchange group and signature
scheme (Steven Collison)
2026/01/15 - added SSL client certificate TLV (Simon Ser)
1. Background 1. Background
@ -537,21 +535,18 @@ the information they choose to publish.
The following types have already been registered for the <type> field : The following types have already been registered for the <type> field :
#define PP2_TYPE_ALPN 0x01 #define PP2_TYPE_ALPN 0x01
#define PP2_TYPE_AUTHORITY 0x02 #define PP2_TYPE_AUTHORITY 0x02
#define PP2_TYPE_CRC32C 0x03 #define PP2_TYPE_CRC32C 0x03
#define PP2_TYPE_NOOP 0x04 #define PP2_TYPE_NOOP 0x04
#define PP2_TYPE_UNIQUE_ID 0x05 #define PP2_TYPE_UNIQUE_ID 0x05
#define PP2_TYPE_SSL 0x20 #define PP2_TYPE_SSL 0x20
#define PP2_SUBTYPE_SSL_VERSION 0x21 #define PP2_SUBTYPE_SSL_VERSION 0x21
#define PP2_SUBTYPE_SSL_CN 0x22 #define PP2_SUBTYPE_SSL_CN 0x22
#define PP2_SUBTYPE_SSL_CIPHER 0x23 #define PP2_SUBTYPE_SSL_CIPHER 0x23
#define PP2_SUBTYPE_SSL_SIG_ALG 0x24 #define PP2_SUBTYPE_SSL_SIG_ALG 0x24
#define PP2_SUBTYPE_SSL_KEY_ALG 0x25 #define PP2_SUBTYPE_SSL_KEY_ALG 0x25
#define PP2_SUBTYPE_SSL_GROUP 0x26 #define PP2_TYPE_NETNS 0x30
#define PP2_SUBTYPE_SSL_SIG_SCHEME 0x27
#define PP2_SUBTYPE_SSL_CLIENT_CERT 0x28
#define PP2_TYPE_NETNS 0x30
2.2.1 PP2_TYPE_ALPN 2.2.1 PP2_TYPE_ALPN
@ -659,25 +654,13 @@ of the used cipher, for example "ECDHE-RSA-AES128-GCM-SHA256".
The second level TLV PP2_SUBTYPE_SSL_SIG_ALG provides the US-ASCII string name The second level TLV PP2_SUBTYPE_SSL_SIG_ALG provides the US-ASCII string name
of the algorithm used to sign the certificate presented by the frontend when of the algorithm used to sign the certificate presented by the frontend when
the incoming connection was made over an SSL/TLS transport layer, for example the incoming connection was made over an SSL/TLS transport layer, for example
"RSA-SHA256". "SHA256".
The second level TLV PP2_SUBTYPE_SSL_KEY_ALG provides the US-ASCII string name The second level TLV PP2_SUBTYPE_SSL_KEY_ALG provides the US-ASCII string name
of the algorithm used to generate the key of the certificate presented by the of the algorithm used to generate the key of the certificate presented by the
frontend when the incoming connection was made over an SSL/TLS transport layer, frontend when the incoming connection was made over an SSL/TLS transport layer,
for example "RSA2048". for example "RSA2048".
The second level TLV PP2_SUBTYPE_SSL_GROUP provides the US-ASCII string name of
the key exchange algorithm used for the frontend TLS connection, for example
"secp256r1".
The second level TLV PP2_SUBTYPE_SSL_SIG_SCHEME provides the US-ASCII string
name of the algorithm the frontend used to sign the ServerKeyExchange or
CertificateVerify message, for example "rsa_pss_rsae_sha256".
The optional second level TLV PP2_SUBTYPE_SSL_CLIENT_CERT provides the raw
X.509 client certificate encoded in ASN.1 DER. The frontend may choose to omit
this TLV depending on configuration.
In all cases, the string representation (in UTF8) of the Common Name field In all cases, the string representation (in UTF8) of the Common Name field
(OID: 2.5.4.3) of the client certificate's Distinguished Name, is appended (OID: 2.5.4.3) of the client certificate's Distinguished Name, is appended
using the TLV format and the type PP2_SUBTYPE_SSL_CN. E.g. "example.com". using the TLV format and the type PP2_SUBTYPE_SSL_CN. E.g. "example.com".

View file

@ -24,7 +24,7 @@ vtest installation
------------------------ ------------------------
To use vtest you will have to download and compile the recent vtest To use vtest you will have to download and compile the recent vtest
sources found at https://github.com/vtest/VTest2. sources found at https://github.com/vtest/VTest.
To compile vtest: To compile vtest:

View file

@ -1,14 +0,0 @@
global
default-path config
tune.lua.bool-sample-conversion normal
# load all games here
lua-load lua/trisdemo.lua
defaults
timeout client 1h
# map one TCP port to each game
.notice 'use "socat TCP-CONNECT:0:7001 STDIO,raw,echo=0" to start playing'
frontend trisdemo
bind :7001
tcp-request content use-service lua.trisdemo

View file

@ -3,7 +3,7 @@
-- Provides a pure lua alternative to tcpcheck mailers. -- Provides a pure lua alternative to tcpcheck mailers.
-- --
-- To be loaded using "lua-load" from haproxy configuration to handle -- To be loaded using "lua-load" from haproxy configuration to handle
-- email-alerts directly from lua -- email-alerts directly from lua and disable legacy tcpcheck implementation.
local SYSLOG_LEVEL = { local SYSLOG_LEVEL = {
["EMERG"] = 0, ["EMERG"] = 0,
@ -364,9 +364,9 @@ local function srv_event_add(event, data)
mailers_track_server_events(data.reference) mailers_track_server_events(data.reference)
end end
-- tell haproxy that we do use the legacy native "mailers" config section
-- which allows us to retrieve mailers configuration using Proxy:get_mailers() -- disable legacy email-alerts since email-alerts will be sent from lua directly
core.use_native_mailers_config() core.disable_legacy_mailers()
-- event subscriptions are purposely performed in an init function to prevent -- event subscriptions are purposely performed in an init function to prevent
-- email alerts from being generated too early (when process is starting up) -- email alerts from being generated too early (when process is starting up)

View file

@ -1,251 +0,0 @@
-- Example game of falling pieces for HAProxy CLI/Applet
local board_width = 10
local board_height = 20
local game_name = "Lua Tris Demo"
-- Shapes with IDs for color mapping
local pieces = {
{id = 1, shape = {{1,1,1,1}}}, -- I (Cyan)
{id = 2, shape = {{1,1},{1,1}}}, -- O (Yellow)
{id = 3, shape = {{0,1,0},{1,1,1}}}, -- T (Purple)
{id = 4, shape = {{0,1,1},{1,1,0}}}, -- S (Green)
{id = 5, shape = {{1,1,0},{0,1,1}}}, -- Z (Red)
{id = 6, shape = {{1,0,0},{1,1,1}}}, -- J (Blue)
{id = 7, shape = {{0,0,1},{1,1,1}}} -- L (Orange)
}
-- ANSI escape codes
local clear_screen = "\27[2J"
local cursor_home = "\27[H"
local cursor_hide = "\27[?25l"
local cursor_show = "\27[?25h"
local reset_color = "\27[0m"
local color_codes = {
[1] = "\27[1;36m", -- I: Cyan
[2] = "\27[1;37m", -- O: White
[3] = "\27[1;35m", -- T: Purple
[4] = "\27[1;32m", -- S: Green
[5] = "\27[1;31m", -- Z: Red
[6] = "\27[1;34m", -- J: Blue
[7] = "\27[1;33m" -- L: Yellow
}
local function init_board()
local board = {}
for y = 1, board_height do
board[y] = {}
for x = 1, board_width do
board[y][x] = 0 -- 0 for empty, piece ID for placed blocks
end
end
return board
end
local function can_place_piece(board, piece, px, py)
for y = 1, #piece do
for x = 1, #piece[1] do
if piece[y][x] == 1 then
local board_x = px + x - 1
local board_y = py + y - 1
if board_x < 1 or board_x > board_width or board_y > board_height or
(board_y >= 1 and board[board_y][board_x] ~= 0) then
return false
end
end
end
end
return true
end
local function place_piece(board, piece, piece_id, px, py)
for y = 1, #piece do
for x = 1, #piece[1] do
if piece[y][x] == 1 then
local board_x = px + x - 1
local board_y = py + y - 1
if board_y >= 1 and board_y <= board_height then
board[board_y][board_x] = piece_id -- Store piece ID for color
end
end
end
end
end
local function clear_lines(board)
local lines_cleared = 0
local y = board_height
while y >= 1 do
local full = true
for x = 1, board_width do
if board[y][x] == 0 then
full = false
break
end
end
if full then
table.remove(board, y)
table.insert(board, 1, {})
for x = 1, board_width do
board[1][x] = 0
end
lines_cleared = lines_cleared + 1
else
y = y - 1
end
end
return lines_cleared
end
local function rotate_piece(piece, piece_id, px, py, board)
local new_piece = {}
for x = 1, #piece[1] do
new_piece[x] = {}
for y = 1, #piece do
new_piece[x][#piece + 1 - y] = piece[y][x]
end
end
if can_place_piece(board, new_piece, px, py) then
return new_piece
end
return piece
end
function render(applet, board, piece, piece_id, px, py, score)
local output = cursor_home
output = output .. game_name .. " - Lines: " .. score .. "\r\n"
output = output .. "+" .. string.rep("-", board_width * 2) .. "+\r\n"
for y = 1, board_height do
output = output .. "|"
for x = 1, board_width do
local char = " "
-- Current piece
for py_idx = 1, #piece do
for px_idx = 1, #piece[1] do
if piece[py_idx][px_idx] == 1 then
local board_x = px + px_idx - 1
local board_y = py + py_idx - 1
if board_x == x and board_y == y then
char = color_codes[piece_id] .. "[]" .. reset_color
end
end
end
end
-- Placed blocks
if board[y][x] ~= 0 then
char = color_codes[board[y][x]] .. "[]" .. reset_color
end
output = output .. char
end
output = output .. "|\r\n"
end
output = output .. "+" .. string.rep("-", board_width * 2) .. "+\r\n"
output = output .. "Use arrow keys to move, Up to rotate, q to quit"
applet:send(output)
end
function handler(applet)
local board = init_board()
local piece_idx = math.random(#pieces)
local current_piece = pieces[piece_idx].shape
local piece_id = pieces[piece_idx].id
local piece_x = math.floor(board_width / 2) - math.floor(#current_piece[1] / 2)
local piece_y = 1
local score = 0
local game_over = false
local delay = 500
if not can_place_piece(board, current_piece, piece_x, piece_y) then
game_over = true
end
applet:send(cursor_hide)
applet:send(clear_screen)
-- fall the piece by one line every delay
local function fall_piece()
while not game_over do
piece_y = piece_y + 1
if not can_place_piece(board, current_piece, piece_x, piece_y) then
piece_y = piece_y - 1
place_piece(board, current_piece, piece_id, piece_x, piece_y)
score = score + clear_lines(board)
piece_idx = math.random(#pieces)
current_piece = pieces[piece_idx].shape
piece_id = pieces[piece_idx].id
piece_x = math.floor(board_width / 2) - math.floor(#current_piece[1] / 2)
piece_y = 1
if not can_place_piece(board, current_piece, piece_x, piece_y) then
game_over = true
end
end
core.msleep(delay)
end
end
core.register_task(fall_piece)
local function drop_piece()
while can_place_piece(board, current_piece, piece_x, piece_y) do
piece_y = piece_y + 1
end
piece_y = piece_y - 1
place_piece(board, current_piece, piece_id, piece_x, piece_y)
score = score + clear_lines(board)
piece_idx = math.random(#pieces)
current_piece = pieces[piece_idx].shape
piece_id = pieces[piece_idx].id
piece_x = math.floor(board_width / 2) - math.floor(#current_piece[1] / 2)
piece_y = 1
if not can_place_piece(board, current_piece, piece_x, piece_y) then
game_over = true
end
render(applet, board, current_piece, piece_id, piece_x, piece_y, score)
end
while not game_over do
render(applet, board, current_piece, piece_id, piece_x, piece_y, score)
-- update the delay based on the score: 500 for 0 lines to 100ms for 100 lines.
if score >= 100 then
delay = 100
else
delay = 500 - 4*score
end
local input = applet:receive(1, delay)
if input then
if input == "" or input == "q" then
game_over = true
elseif input == "\27" then
local a = applet:receive(1, delay)
if a == "[" then
local b = applet:receive(1, delay)
if b == "A" then -- Up arrow (rotate clockwise)
current_piece = rotate_piece(current_piece, piece_id, piece_x, piece_y, board)
elseif b == "B" then -- Down arrow (full drop)
drop_piece()
elseif b == "C" then -- Right arrow
piece_x = piece_x + 1
if not can_place_piece(board, current_piece, piece_x, piece_y) then
piece_x = piece_x - 1
end
elseif b == "D" then -- Left arrow
piece_x = piece_x - 1
if not can_place_piece(board, current_piece, piece_x, piece_y) then
piece_x = piece_x + 1
end
end
end
end
end
end
applet:send(clear_screen .. cursor_home .. "Game Over! Lines: " .. score .. "\r\n" .. cursor_show)
end
-- works as a TCP applet
core.register_service("trisdemo", "tcp", handler)
-- may also work on the CLI but requires an unbuffered handler
core.register_cli({"trisdemo"}, "Play a simple falling pieces game", handler)

View file

@ -1,104 +0,0 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
#ifndef _ACME_T_H_
#define _ACME_T_H_
#include <haproxy/istbuf.h>
#include <haproxy/openssl-compat.h>
#define ACME_RETRY 5
/* acme section configuration */
struct acme_cfg {
char *filename; /* config filename */
int linenum; /* config linenum */
char *name; /* section name */
int reuse_key; /* do we need to renew the private key */
char *directory; /* directory URL */
char *map; /* storage for tokens + thumbprint */
struct {
char *contact; /* email associated to account */
char *file; /* account key filename */
EVP_PKEY *pkey; /* account PKEY */
char *thumbprint; /* account PKEY JWS thumbprint */
} account;
struct {
int type; /* EVP_PKEY_EC or EVP_PKEY_RSA */
int bits; /* bits for RSA */
int curves; /* NID of curves */
} key;
char *challenge; /* HTTP-01, DNS-01, etc */
char *vars; /* variables put in the dpapi sink */
char *provider; /* DNS provider put in the dpapi sink */
struct acme_cfg *next;
};
enum acme_st {
ACME_RESOURCES = 0,
ACME_NEWNONCE,
ACME_CHKACCOUNT,
ACME_NEWACCOUNT,
ACME_NEWORDER,
ACME_AUTH,
ACME_CHALLENGE,
ACME_CHKCHALLENGE,
ACME_FINALIZE,
ACME_CHKORDER,
ACME_CERTIFICATE,
ACME_END
};
enum http_st {
ACME_HTTP_REQ,
ACME_HTTP_RES,
};
struct acme_auth {
struct ist dns; /* dns entry */
struct ist auth; /* auth URI */
struct ist chall; /* challenge URI */
struct ist token; /* token */
int ready; /* is the challenge ready ? */
void *next;
};
/* acme task context */
struct acme_ctx {
enum acme_st state;
enum http_st http_state;
int retries;
int retryafter;
struct httpclient *hc;
struct acme_cfg *cfg;
struct ckch_store *store;
struct {
struct ist newNonce;
struct ist newAccount;
struct ist newOrder;
} resources;
struct ist nonce;
struct ist kid;
struct ist order;
struct acme_auth *auths;
struct acme_auth *next_auth;
X509_REQ *req;
struct ist finalize;
struct ist certificate;
struct task *task;
struct ebmb_node node;
char name[VAR_ARRAY];
};
#define ACME_EV_SCHED (1ULL << 0) /* scheduling wakeup */
#define ACME_EV_NEW (1ULL << 1) /* new task */
#define ACME_EV_TASK (1ULL << 2) /* Task handler */
#define ACME_EV_REQ (1ULL << 3) /* HTTP Request */
#define ACME_EV_RES (1ULL << 4) /* HTTP Response */
#define ACME_VERB_CLEAN 1
#define ACME_VERB_MINIMAL 2
#define ACME_VERB_SIMPLE 3
#define ACME_VERB_ADVANCED 4
#define ACME_VERB_COMPLETE 5
#endif

View file

@ -1,12 +0,0 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
#ifndef _ACME_H_
#define _ACME_H_
#include <haproxy/ssl_ckch-t.h>
int ckch_conf_acme_init(void *value, char *buf, struct ckch_store *s, int cli, const char *filename, int linenum, char **err);
EVP_PKEY *acme_gen_tmp_pkey();
X509 *acme_gen_tmp_x509();
#endif

View file

@ -66,8 +66,7 @@ enum act_parse_ret {
enum act_opt { enum act_opt {
ACT_OPT_NONE = 0x00000000, /* no flag */ ACT_OPT_NONE = 0x00000000, /* no flag */
ACT_OPT_FINAL = 0x00000001, /* last call, cannot yield */ ACT_OPT_FINAL = 0x00000001, /* last call, cannot yield */
ACT_OPT_FINAL_EARLY = 0x00000002, /* set in addition to ACT_OPT_FINAL if last call occurs earlier than normal due to unexpected IO/error */ ACT_OPT_FIRST = 0x00000002, /* first call for this action */
ACT_OPT_FIRST = 0x00000004, /* first call for this action */
}; };
/* Flags used to describe the action. */ /* Flags used to describe the action. */

View file

@ -76,12 +76,12 @@ struct memprof_stats {
const void *caller; const void *caller;
enum memprof_method method; enum memprof_method method;
/* 4-7 bytes hole here */ /* 4-7 bytes hole here */
unsigned long long locked_calls;
unsigned long long alloc_calls; unsigned long long alloc_calls;
unsigned long long free_calls; unsigned long long free_calls;
unsigned long long alloc_tot; unsigned long long alloc_tot;
unsigned long long free_tot; unsigned long long free_tot;
void *info; // for pools, ptr to the pool void *info; // for pools, ptr to the pool
void *pad; // pad to 64
}; };
#endif #endif
@ -125,8 +125,8 @@ struct activity {
unsigned int ctr2; // general purposee debug counter unsigned int ctr2; // general purposee debug counter
#endif #endif
char __pad[0]; // unused except to check remaining room char __pad[0]; // unused except to check remaining room
char __end[0] THREAD_ALIGNED(); char __end[0] __attribute__((aligned(64))); // align size to 64.
} THREAD_ALIGNED(); };
/* 256 entries for callers * callees should be highly sufficient (~45 seen usually) */ /* 256 entries for callers * callees should be highly sufficient (~45 seen usually) */
#define SCHED_ACT_HASH_BITS 8 #define SCHED_ACT_HASH_BITS 8
@ -143,10 +143,7 @@ struct sched_activity {
uint64_t calls; uint64_t calls;
uint64_t cpu_time; uint64_t cpu_time;
uint64_t lat_time; uint64_t lat_time;
uint64_t lkw_time; /* lock waiting time */ };
uint64_t lkd_time; /* locked time */
uint64_t mem_time; /* memory ops wait time */
} THREAD_ALIGNED();
#endif /* _HAPROXY_ACTIVITY_T_H */ #endif /* _HAPROXY_ACTIVITY_T_H */

View file

@ -47,7 +47,7 @@
#define APPCTX_FL_ERROR 0x00000080 #define APPCTX_FL_ERROR 0x00000080
#define APPCTX_FL_SHUTDOWN 0x00000100 /* applet was shut down (->release() called if any). No more data exchange with SCs */ #define APPCTX_FL_SHUTDOWN 0x00000100 /* applet was shut down (->release() called if any). No more data exchange with SCs */
#define APPCTX_FL_WANT_DIE 0x00000200 /* applet was running and requested to die */ #define APPCTX_FL_WANT_DIE 0x00000200 /* applet was running and requested to die */
/* unused: 0x00000400 */ #define APPCTX_FL_INOUT_BUFS 0x00000400 /* applet uses its own buffers */
#define APPCTX_FL_FASTFWD 0x00000800 /* zero-copy forwarding is in-use, don't fill the outbuf */ #define APPCTX_FL_FASTFWD 0x00000800 /* zero-copy forwarding is in-use, don't fill the outbuf */
#define APPCTX_FL_IN_MAYALLOC 0x00001000 /* applet may try again to allocate its inbuf */ #define APPCTX_FL_IN_MAYALLOC 0x00001000 /* applet may try again to allocate its inbuf */
#define APPCTX_FL_OUT_MAYALLOC 0x00002000 /* applet may try again to allocate its outbuf */ #define APPCTX_FL_OUT_MAYALLOC 0x00002000 /* applet may try again to allocate its outbuf */
@ -73,22 +73,17 @@ static forceinline char *appctx_show_flags(char *buf, size_t len, const char *de
_(APPCTX_FL_OUTBLK_ALLOC, _(APPCTX_FL_OUTBLK_FULL, _(APPCTX_FL_OUTBLK_ALLOC, _(APPCTX_FL_OUTBLK_FULL,
_(APPCTX_FL_EOI, _(APPCTX_FL_EOS, _(APPCTX_FL_EOI, _(APPCTX_FL_EOS,
_(APPCTX_FL_ERR_PENDING, _(APPCTX_FL_ERROR, _(APPCTX_FL_ERR_PENDING, _(APPCTX_FL_ERROR,
_(APPCTX_FL_SHUTDOWN, _(APPCTX_FL_WANT_DIE, _(APPCTX_FL_SHUTDOWN, _(APPCTX_FL_WANT_DIE, _(APPCTX_FL_INOUT_BUFS,
_(APPCTX_FL_FASTFWD, _(APPCTX_FL_IN_MAYALLOC, _(APPCTX_FL_OUT_MAYALLOC))))))))))))); _(APPCTX_FL_FASTFWD, _(APPCTX_FL_IN_MAYALLOC, _(APPCTX_FL_OUT_MAYALLOC))))))))))))));
/* epilogue */ /* epilogue */
_(~0U); _(~0U);
return buf; return buf;
#undef _ #undef _
} }
#define APPLET_FL_NEW_API 0x00000001 /* Set if the applet is based on the new API (using applet's buffers) */
#define APPLET_FL_WARNED 0x00000002 /* Set when warning was already emitted about a legacy applet */
#define APPLET_FL_HTX 0x00000004 /* Set if the applet is using HTX buffers */
/* Applet descriptor */ /* Applet descriptor */
struct applet { struct applet {
enum obj_type obj_type; /* object type = OBJ_TYPE_APPLET */ enum obj_type obj_type; /* object type = OBJ_TYPE_APPLET */
unsigned int flags; /* APPLET_FL_* flags */
/* 3 unused bytes here */ /* 3 unused bytes here */
char *name; /* applet's name to report in logs */ char *name; /* applet's name to report in logs */
int (*init)(struct appctx *); /* callback to init resources, may be NULL. int (*init)(struct appctx *); /* callback to init resources, may be NULL.
@ -106,36 +101,29 @@ struct applet {
struct appctx { struct appctx {
enum obj_type obj_type; /* OBJ_TYPE_APPCTX */ enum obj_type obj_type; /* OBJ_TYPE_APPCTX */
/* 3 unused bytes here */ /* 3 unused bytes here */
unsigned int st0; /* Main applet state. May be used by any applet */ unsigned int st0; /* CLI state for stats, session state for peers */
unsigned int st1; /* Applet substate. Mau be used by any applet */ unsigned int st1; /* prompt/payload (bitwise OR of APPCTX_CLI_ST1_*) for stats, session error for peers */
unsigned int flags; /* APPCTX_FL_* */ unsigned int flags; /* APPCTX_FL_* */
struct buffer inbuf; struct buffer inbuf;
struct buffer outbuf; struct buffer outbuf;
size_t to_forward; size_t to_forward;
struct buffer *chunk; /* used to store unfinished commands */
struct applet *applet; /* applet this context refers to */ struct applet *applet; /* applet this context refers to */
struct session *sess; /* session for frontend applets (NULL for backend applets) */ struct session *sess; /* session for frontend applets (NULL for backend applets) */
struct sedesc *sedesc; /* stream endpoint descriptor the applet is attached to */ struct sedesc *sedesc; /* stream endpoint descriptor the applet is attached to */
struct act_rule *rule; /* rule associated with the applet. */
struct { int (*io_handler)(struct appctx *appctx); /* used within the cli_io_handler when st0 = CLI_ST_CALLBACK */
struct buffer *cmdline; /* used to store unfinished commands */ void (*io_release)(struct appctx *appctx); /* used within the cli_io_handler when st0 = CLI_ST_CALLBACK,
if the command is terminated or the session released */
int severity_output; /* used within the cli_io_handler to format severity output of informational feedback */ int cli_severity_output; /* used within the cli_io_handler to format severity output of informational feedback */
int level; /* the level of CLI which can be lowered dynamically */ int cli_level; /* the level of CLI which can be lowered dynamically */
char payload_pat[8]; /* Payload pattern */ char cli_payload_pat[8]; /* Payload pattern */
char *payload; /* Pointer on the payload. NULL if no payload */ uint32_t cli_anon_key; /* the key to anonymise with the hash in cli */
uint32_t anon_key; /* the key to anonymise with the hash in cli */
/* XXX 4 unused bytes here */
int (*io_handler)(struct appctx *appctx); /* used within the cli_io_handler when st0 = CLI_ST_CALLBACK */
void (*io_release)(struct appctx *appctx); /* used within the cli_io_handler when st0 = CLI_ST_CALLBACK,
if the command is terminated or the session released */
} cli_ctx; /* context dedicated to the CLI applet */
struct buffer_wait buffer_wait; /* position in the list of objects waiting for a buffer */ struct buffer_wait buffer_wait; /* position in the list of objects waiting for a buffer */
struct task *t; /* task associated to the applet */ struct task *t; /* task associated to the applet */
struct freq_ctr call_rate; /* appctx call rate */ struct freq_ctr call_rate; /* appctx call rate */
/* XXX 4 unused bytes here */
struct mt_list wait_entry; /* entry in a list of waiters for an event (e.g. ring events) */ struct mt_list wait_entry; /* entry in a list of waiters for an event (e.g. ring events) */
/* The pointer seen by application code is appctx->svcctx. In 2.7 the /* The pointer seen by application code is appctx->svcctx. In 2.7 the

View file

@ -58,16 +58,10 @@ size_t appctx_raw_snd_buf(struct appctx *appctx, struct buffer *buf, size_t coun
size_t appctx_snd_buf(struct stconn *sc, struct buffer *buf, size_t count, unsigned int flags); size_t appctx_snd_buf(struct stconn *sc, struct buffer *buf, size_t count, unsigned int flags);
int appctx_fastfwd(struct stconn *sc, unsigned int count, unsigned int flags); int appctx_fastfwd(struct stconn *sc, unsigned int count, unsigned int flags);
ssize_t applet_append_line(void *ctx, struct ist v1, struct ist v2, size_t ofs, size_t len, char delim); ssize_t applet_append_line(void *ctx, struct ist v1, struct ist v2, size_t ofs, size_t len);
static forceinline void applet_fl_set(struct appctx *appctx, uint on); static forceinline void applet_fl_set(struct appctx *appctx, uint on);
static forceinline void applet_fl_clr(struct appctx *appctx, uint off); static forceinline void applet_fl_clr(struct appctx *appctx, uint off);
static forceinline uint appctx_app_test(const struct appctx *appctx, uint test)
{
return (appctx->applet->flags & test);
}
static inline struct appctx *appctx_new_here(struct applet *applet, struct sedesc *sedesc) static inline struct appctx *appctx_new_here(struct applet *applet, struct sedesc *sedesc)
{ {
return appctx_new_on(applet, sedesc, tid); return appctx_new_on(applet, sedesc, tid);
@ -122,7 +116,7 @@ static inline int appctx_init(struct appctx *appctx)
* the appctx will be fully initialized. The session and the stream will * the appctx will be fully initialized. The session and the stream will
* eventually be created. The affinity must be set now ! * eventually be created. The affinity must be set now !
*/ */
BUG_ON(appctx->t->tid != -1 && appctx->t->tid != tid); BUG_ON(appctx->t->tid != tid);
task_set_thread(appctx->t, tid); task_set_thread(appctx->t, tid);
if (appctx->applet->init) if (appctx->applet->init)
@ -152,9 +146,6 @@ static inline void __appctx_free(struct appctx *appctx)
#define appctx_wakeup(ctx) \ #define appctx_wakeup(ctx) \
_task_wakeup((ctx)->t, TASK_WOKEN_OTHER, MK_CALLER(WAKEUP_TYPE_APPCTX_WAKEUP, 0, 0)) _task_wakeup((ctx)->t, TASK_WOKEN_OTHER, MK_CALLER(WAKEUP_TYPE_APPCTX_WAKEUP, 0, 0))
#define appctx_schedule(ctx, w) \
_task_schedule((ctx)->t, w, MK_CALLER(WAKEUP_TYPE_TASK_SCHEDULE, 0, 0))
/* returns the stream connector the appctx is attached to, via the sedesc */ /* returns the stream connector the appctx is attached to, via the sedesc */
static inline struct stconn *appctx_sc(const struct appctx *appctx) static inline struct stconn *appctx_sc(const struct appctx *appctx)
{ {
@ -288,156 +279,13 @@ static inline void applet_expect_data(struct appctx *appctx)
se_fl_clr(appctx->sedesc, SE_FL_EXP_NO_DATA); se_fl_clr(appctx->sedesc, SE_FL_EXP_NO_DATA);
} }
/* Returns the buffer containing data pushed to the applet by the stream. For
* applets using its own buffers it is the appctx input buffer. For legacy
* applet, it is the output channel buffer.
*/
static inline struct buffer *applet_get_inbuf(struct appctx *appctx)
{
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
if (applet_fl_test(appctx, APPCTX_FL_INBLK_ALLOC) || !appctx_get_buf(appctx, &appctx->inbuf))
return NULL;
return &appctx->inbuf;
}
else
return sc_ob(appctx_sc(appctx));
}
/* Returns the buffer containing data pushed by the applets to the stream. For
* applets using its own buffer it is the appctx output buffer. For legacy
* applet, it is the input channel buffer.
*/
static inline struct buffer *applet_get_outbuf(struct appctx *appctx)
{
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
if (applet_fl_test(appctx, APPCTX_FL_OUTBLK_ALLOC|APPCTX_FL_OUTBLK_FULL) ||
!appctx_get_buf(appctx, &appctx->outbuf))
return NULL;
return &appctx->outbuf;
}
else
return sc_ib(appctx_sc(appctx));
}
/* Returns the amount of HTX data in the input buffer (see applet_get_inbuf) */
static inline size_t applet_htx_input_data(const struct appctx *appctx)
{
if (appctx_app_test(appctx, APPLET_FL_NEW_API))
return htx_used_space(htxbuf(&appctx->inbuf));
else
return co_data(sc_oc(appctx_sc(appctx)));
}
/* Returns the amount of data in the input buffer (see applet_get_inbuf) */
static inline size_t applet_input_data(const struct appctx *appctx)
{
if (appctx_app_test(appctx, APPLET_FL_HTX))
return applet_htx_input_data(appctx);
if (appctx_app_test(appctx, APPLET_FL_NEW_API))
return b_data(&appctx->inbuf);
else
return co_data(sc_oc(appctx_sc(appctx)));
}
/* Returns the amount of HTX data in the output buffer (see applet_get_outbuf) */
static inline size_t applet_htx_output_data(const struct appctx *appctx)
{
if (appctx_app_test(appctx, APPLET_FL_NEW_API))
return htx_used_space(htxbuf(&appctx->outbuf));
else
return ci_data(sc_ic(appctx_sc(appctx)));
}
/* Returns the amount of data in the output buffer (see applet_get_outbuf) */
static inline size_t applet_output_data(const struct appctx *appctx)
{
if (appctx_app_test(appctx, APPLET_FL_HTX))
return applet_htx_output_data(appctx);
if (appctx_app_test(appctx, APPLET_FL_NEW_API))
return b_data(&appctx->outbuf);
else
return ci_data(sc_ic(appctx_sc(appctx)));
}
/* Skips <len> bytes from the input buffer (see applet_get_inbuf).
*
* This is useful when data have been read directly from the buffer. It is
* illegal to call this function with <len> causing a wrapping at the end of the
* buffer. It's the caller's responsibility to ensure that <len> is never larger
* than available output data.
*
* This function is not HTX aware.
*/
static inline void applet_skip_input(struct appctx *appctx, size_t len)
{
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
b_del(&appctx->inbuf, len);
applet_fl_clr(appctx, APPCTX_FL_INBLK_FULL);
}
else
co_skip(sc_oc(appctx_sc(appctx)), len);
}
/* Removes all bytes from the input buffer (see applet_get_inbuf).
*/
static inline void applet_reset_input(struct appctx *appctx)
{
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
b_reset(&appctx->inbuf);
applet_fl_clr(appctx, APPCTX_FL_INBLK_FULL);
}
else
co_skip(sc_oc(appctx_sc(appctx)), co_data(sc_oc(appctx_sc(appctx))));
}
/* Returns the amount of space available at the HTX output buffer (see applet_get_outbuf).
*/
static inline size_t applet_htx_output_room(const struct appctx *appctx)
{
if (appctx_app_test(appctx, APPLET_FL_NEW_API))
return htx_free_data_space(htxbuf(&appctx->outbuf));
else
return channel_recv_max(sc_ic(appctx_sc(appctx)));
}
/* Returns the amount of space available at the output buffer (see applet_get_outbuf).
*/
static inline size_t applet_output_room(const struct appctx *appctx)
{
if (appctx_app_test(appctx, APPLET_FL_HTX))
return applet_htx_output_room(appctx);
if (appctx_app_test(appctx, APPLET_FL_NEW_API))
return b_room(&appctx->outbuf);
else
return channel_recv_max(sc_ic(appctx_sc(appctx)));
}
/*Indicates that the applet have more data to deliver and it needs more room in
* the output buffer to do so (see applet_get_outbuf).
*
* For applets using its own buffers, <room_needed> is not used and only
* <appctx> flags are updated. For legacy applets, the amount of free space
* required must be specified. In this last case, it is the caller
* responsibility to be sure <room_needed> is valid.
*/
static inline void applet_need_room(struct appctx *appctx, size_t room_needed)
{
if (appctx_app_test(appctx, APPLET_FL_NEW_API))
applet_have_more_data(appctx);
else
sc_need_room(appctx_sc(appctx), room_needed);
}
/* Should only be used via wrappers applet_putchk() / applet_putchk_stress(). */ /* Should only be used via wrappers applet_putchk() / applet_putchk_stress(). */
static inline int _applet_putchk(struct appctx *appctx, struct buffer *chunk, static inline int _applet_putchk(struct appctx *appctx, struct buffer *chunk,
int stress) int stress)
{ {
int ret; int ret;
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) { if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
if (unlikely(stress) ? if (unlikely(stress) ?
b_data(&appctx->outbuf) : b_data(&appctx->outbuf) :
b_data(chunk) > b_room(&appctx->outbuf)) { b_data(chunk) > b_room(&appctx->outbuf)) {
@ -467,10 +315,9 @@ static inline int _applet_putchk(struct appctx *appctx, struct buffer *chunk,
return ret; return ret;
} }
/* writes chunk <chunk> into the applet output buffer (see applet_get_outbuf). /* writes chunk <chunk> into the input channel of the stream attached to this
* * appctx's endpoint, and marks the SC_FL_NEED_ROOM on a channel full error.
* Returns the number of written bytes on success or -1 on error (lake of space, * See ci_putchk() for the list of return codes.
* shutdown, invalid call...)
*/ */
static inline int applet_putchk(struct appctx *appctx, struct buffer *chunk) static inline int applet_putchk(struct appctx *appctx, struct buffer *chunk)
{ {
@ -483,16 +330,15 @@ static inline int applet_putchk_stress(struct appctx *appctx, struct buffer *chu
return _applet_putchk(appctx, chunk, 1); return _applet_putchk(appctx, chunk, 1);
} }
/* writes <len> chars from <blk> into the applet output buffer (see applet_get_outbuf). /* writes <len> chars from <blk> into the input channel of the stream attached
* * to this appctx's endpoint, and marks the SC_FL_NEED_ROOM on a channel full
* Returns the number of written bytes on success or -1 on error (lake of space, * error. See ci_putblk() for the list of return codes.
* shutdown, invalid call...)
*/ */
static inline int applet_putblk(struct appctx *appctx, const char *blk, int len) static inline int applet_putblk(struct appctx *appctx, const char *blk, int len)
{ {
int ret; int ret;
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) { if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
if (len > b_room(&appctx->outbuf)) { if (len > b_room(&appctx->outbuf)) {
applet_fl_set(appctx, APPCTX_FL_OUTBLK_FULL); applet_fl_set(appctx, APPCTX_FL_OUTBLK_FULL);
ret = -1; ret = -1;
@ -518,17 +364,16 @@ static inline int applet_putblk(struct appctx *appctx, const char *blk, int len)
return ret; return ret;
} }
/* writes chars from <str> up to the trailing zero (excluded) into the applet /* writes chars from <str> up to the trailing zero (excluded) into the input
* output buffer (see applet_get_outbuf). * channel of the stream attached to this appctx's endpoint, and marks the
* * SC_FL_NEED_ROOM on a channel full error. See ci_putstr() for the list of
* Returns the number of written bytes on success or -1 on error (lake of space, * return codes.
* shutdown, invalid call...)
*/ */
static inline int applet_putstr(struct appctx *appctx, const char *str) static inline int applet_putstr(struct appctx *appctx, const char *str)
{ {
int ret; int ret;
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) { if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
int len = strlen(str); int len = strlen(str);
if (len > b_room(&appctx->outbuf)) { if (len > b_room(&appctx->outbuf)) {
@ -555,16 +400,15 @@ static inline int applet_putstr(struct appctx *appctx, const char *str)
return ret; return ret;
} }
/* writes character <chr> into the applet's output buffer (see applet_get_outbuf). /* writes character <chr> into the input channel of the stream attached to this
* * appctx's endpoint, and marks the SC_FL_NEED_ROOM on a channel full error.
* Returns the number of written bytes on success or -1 on error (lake of space, * See ci_putchr() for the list of return codes.
* shutdown, invalid call...)
*/ */
static inline int applet_putchr(struct appctx *appctx, char chr) static inline int applet_putchr(struct appctx *appctx, char chr)
{ {
int ret; int ret;
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) { if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
if (b_full(&appctx->outbuf)) { if (b_full(&appctx->outbuf)) {
applet_fl_set(appctx, APPCTX_FL_OUTBLK_FULL); applet_fl_set(appctx, APPCTX_FL_OUTBLK_FULL);
ret = -1; ret = -1;
@ -591,283 +435,6 @@ static inline int applet_putchr(struct appctx *appctx, char chr)
return ret; return ret;
} }
static inline int applet_may_get(const struct appctx *appctx, size_t len)
{
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
if (len > b_data(&appctx->inbuf)) {
if (se_fl_test(appctx->sedesc, SE_FL_SHW))
return -1;
return 0;
}
}
else {
const struct stconn *sc = appctx_sc(appctx);
if ((sc->flags & SC_FL_SHUT_DONE) || len > co_data(sc_oc(sc))) {
if (sc->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED))
return -1;
return 0;
}
}
return 1;
}
/* Gets one char from the applet input buffer (see appet_get_inbuf),
*
* Return values :
* 1 : number of bytes read, equal to requested size.
* =0 : not enough data available. <c> is left undefined.
* <0 : no more bytes readable because output is shut.
*
* The status of the corresponding buffer is not changed. The caller must call
* applet_skip_input() to update it.
*/
static inline int applet_getchar(const struct appctx *appctx, char *c)
{
int ret;
ret = applet_may_get(appctx, 1);
if (ret <= 0)
return ret;
*c = ((appctx_app_test(appctx, APPLET_FL_NEW_API))
? *(b_head(&appctx->inbuf))
: *(co_head(sc_oc(appctx_sc(appctx)))));
return 1;
}
/* Copies one full block of data from the applet input buffer (see
* appet_get_inbuf).
*
* <len> bytes are capied, starting at the offset <offset>.
*
* Return values :
* >0 : number of bytes read, equal to requested size.
* =0 : not enough data available. <blk> is left undefined.
* <0 : no more bytes readable because output is shut.
*
* The status of the corresponding buffer is not changed. The caller must call
* applet_skip_input() to update it.
*/
static inline int applet_getblk(const struct appctx *appctx, char *blk, int len, int offset)
{
const struct buffer *buf;
int ret;
ret = applet_may_get(appctx, len+offset);
if (ret <= 0)
return ret;
buf = ((appctx_app_test(appctx, APPLET_FL_NEW_API))
? &appctx->inbuf
: sc_ob(appctx_sc(appctx)));
return b_getblk(buf, blk, len, offset);
}
/* Gets one text block representing a word from the applet input buffer (see
* appet_get_inbuf).
*
* The separator is waited for as long as some data can still be received and the
* destination is not full. Otherwise, the string may be returned as is, without
* the separator.
*
* Return values :
* >0 : number of bytes read. Includes the separator if present before len or end.
* =0 : no separator before end found. <str> is left undefined.
* <0 : no more bytes readable because output is shut.
*
* The status of the corresponding buffer is not changed. The caller must call
* applet_skip_input() to update it.
*/
static inline int applet_getword(const struct appctx *appctx, char *str, int len, char sep)
{
const struct buffer *buf;
char *p;
size_t input, max = len;
int ret = 0;
ret = applet_may_get(appctx, 1);
if (ret <= 0)
goto out;
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
buf = &appctx->inbuf;
input = b_data(buf);
}
else {
struct stconn *sc = appctx_sc(appctx);
buf = sc_ob(sc);
input = co_data(sc_oc(sc));
}
if (max > input) {
max = input;
str[max-1] = 0;
}
p = b_head(buf);
ret = 0;
while (max) {
*str++ = *p;
ret++;
max--;
if (*p == sep)
goto out;
p = b_next(buf, p);
}
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
if (ret < len && (ret < input || b_room(buf)) &&
!se_fl_test(appctx->sedesc, SE_FL_SHW))
ret = 0;
}
else {
struct stconn *sc = appctx_sc(appctx);
if (ret < len && (ret < input || channel_may_recv(sc_oc(sc))) &&
!(sc->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)))
ret = 0;
}
out:
if (max)
*str = 0;
return ret;
}
/* Gets one text block representing a line from the applet input buffer (see
* appet_get_inbuf).
*
* The '\n' is waited for as long as some data can still be received and the
* destination is not full. Otherwise, the string may be returned as is, without
* the '\n'.
*
* Return values :
* >0 : number of bytes read. Includes the \n if present before len or end.
* =0 : no '\n' before end found. <str> is left undefined.
* <0 : no more bytes readable because output is shut.
*
* The status of the corresponding buffer is not changed. The caller must call
* applet_skip_input() to update it.
*/
static inline int applet_getline(const struct appctx *appctx, char *str, int len)
{
return applet_getword(appctx, str, len, '\n');
}
/* Gets one or two blocks of data at once from the applet input buffer (see appet_get_inbuf),
*
* Data are not copied.
*
* Return values :
* >0 : number of blocks filled (1 or 2). blk1 is always filled before blk2.
* =0 : not enough data available. <blk*> are left undefined.
* <0 : no more bytes readable because output is shut.
*
* The status of the corresponding buffer is not changed. The caller must call
* applet_skip_input() to update it.
*/
static inline int applet_getblk_nc(const struct appctx *appctx, const char **blk1, size_t *len1, const char **blk2, size_t *len2)
{
const struct buffer *buf;
size_t max;
int ret;
ret = applet_may_get(appctx, 1);
if (ret <= 0)
return ret;
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
buf = &appctx->inbuf;
max = b_data(buf);
}
else {
struct stconn *sc = appctx_sc(appctx);
buf = sc_ob(sc);
max = co_data(sc_oc(sc));
}
return b_getblk_nc(buf, blk1, len1, blk2, len2, 0, max);
}
/* Gets one or two blocks of text representing a word from the applet input
* buffer (see appet_get_inbuf).
*
* Data are not copied. The separator is waited for as long as some data can
* still be received and the destination is not full. Otherwise, the string may
* be returned as is, without the separator.
*
* Return values :
* >0 : number of bytes read. Includes the separator if present before len or end.
* =0 : no separator before end found. <str> is left undefined.
* <0 : no more bytes readable because output is shut.
*
* The status of the corresponding buffer is not changed. The caller must call
* applet_skip_input() to update it.
*/
static inline int applet_getword_nc(const struct appctx *appctx, const char **blk1, size_t *len1, const char **blk2, size_t *len2, char sep)
{
int ret;
size_t l;
ret = applet_getblk_nc(appctx, blk1, len1, blk2, len2);
if (unlikely(ret <= 0))
return ret;
for (l = 0; l < *len1 && (*blk1)[l] != sep; l++);
if (l < *len1 && (*blk1)[l] == sep) {
*len1 = l + 1;
return 1;
}
if (ret >= 2) {
for (l = 0; l < *len2 && (*blk2)[l] != sep; l++);
if (l < *len2 && (*blk2)[l] == sep) {
*len2 = l + 1;
return 2;
}
}
/* If we have found no LF and the buffer is full or the SC is shut, then
* the resulting string is made of the concatenation of the pending
* blocks (1 or 2).
*/
if (appctx_app_test(appctx, APPLET_FL_NEW_API)) {
if (b_full(&appctx->inbuf) || se_fl_test(appctx->sedesc, SE_FL_SHW))
return ret;
}
else {
struct stconn *sc = appctx_sc(appctx);
if (!channel_may_recv(sc_oc(sc)) || sc->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED))
return ret;
}
/* No LF yet and not shut yet */
return 0;
}
/* Gets one or two blocks of text representing a line from the applet input
* buffer (see appet_get_inbuf).
*
* Data are not copied. The '\n' is waited for as long as some data can still be
* received and the destination is not full. Otherwise, the string may be
* returned as is, without the '\n'.
*
* Return values :
* >0 : number of bytes read. Includes the \n if present before len or end.
* =0 : no '\n' before end found. <str> is left undefined.
* <0 : no more bytes readable because output is shut.
*
* The status of the corresponding buffer is not changed. The caller must call
* applet_skip_input() to update it.
*/
static inline int applet_getline_nc(const struct appctx *appctx, const char **blk1, size_t *len1, const char **blk2, size_t *len2)
{
return applet_getword_nc(appctx, blk1, len1, blk2, len2, '\n');
}
#endif /* _HAPROXY_APPLET_H */ #endif /* _HAPROXY_APPLET_H */
/* /*

View file

@ -26,9 +26,9 @@
#include <haproxy/compiler.h> #include <haproxy/compiler.h>
/* A few notes for the macros and functions here: /* A few notes for the macros and functions here:
* - this file is painful to edit, most operations exist in 2 variants, * - this file is painful to edit, most operations exist in 3 variants,
* no-thread, and threads (with gcc>=4.7). Be careful when modifying * no-thread, threads with gcc<4.7, threads with gcc>=4.7. Be careful when
* it not to break any of them. * modifying it not to break any of them.
* *
* - macros named HA_ATOMIC_* are or use in the general case, they contain the * - macros named HA_ATOMIC_* are or use in the general case, they contain the
* required memory barriers to guarantee sequential consistency * required memory barriers to guarantee sequential consistency
@ -191,10 +191,122 @@
/* Threads are ENABLED, all atomic ops are made thread-safe. By extension they /* Threads are ENABLED, all atomic ops are made thread-safe. By extension they
* can also be used for inter-process synchronization but one must verify that * can also be used for inter-process synchronization but one must verify that
* the code still builds with threads disabled. Code below requires C11 atomics * the code still builds with threads disabled.
* as present in gcc >= 4.7 or clang.
*/ */
#if defined(__GNUC__) && (__GNUC__ < 4 || __GNUC__ == 4 && __GNUC_MINOR__ < 7) && !defined(__clang__)
/* gcc < 4.7 */
#define HA_ATOMIC_LOAD(val) \
({ \
typeof((val)) __val_load = (val); \
typeof(*(val)) __ret_val = \
({ __sync_synchronize(); *(volatile typeof(__val_load))__val_load; }); \
__sync_synchronize(); \
__ret_val; \
})
#define HA_ATOMIC_STORE(val, new) \
({ \
typeof((val)) __val_store = (val); \
typeof(*(val)) __old_store; \
typeof((new)) __new_store = (new); \
do { __old_store = *__val_store; \
} while (!__sync_bool_compare_and_swap(__val_store, __old_store, __new_store) && __ha_cpu_relax()); \
})
#define HA_ATOMIC_XCHG(val, new) \
({ \
typeof((val)) __val_xchg = (val); \
typeof(*(val)) __old_xchg; \
typeof((new)) __new_xchg = (new); \
do { __old_xchg = *__val_xchg; \
} while (!__sync_bool_compare_and_swap(__val_xchg, __old_xchg, __new_xchg) && __ha_cpu_relax()); \
__old_xchg; \
})
#define HA_ATOMIC_AND(val, flags) do { __sync_and_and_fetch(val, flags); } while (0)
#define HA_ATOMIC_OR(val, flags) do { __sync_or_and_fetch(val, flags); } while (0)
#define HA_ATOMIC_ADD(val, i) do { __sync_add_and_fetch(val, i); } while (0)
#define HA_ATOMIC_SUB(val, i) do { __sync_sub_and_fetch(val, i); } while (0)
#define HA_ATOMIC_INC(val) do { __sync_add_and_fetch(val, 1); } while (0)
#define HA_ATOMIC_DEC(val) do { __sync_sub_and_fetch(val, 1); } while (0)
#define HA_ATOMIC_AND_FETCH(val, flags) __sync_and_and_fetch(val, flags)
#define HA_ATOMIC_OR_FETCH(val, flags) __sync_or_and_fetch(val, flags)
#define HA_ATOMIC_ADD_FETCH(val, i) __sync_add_and_fetch(val, i)
#define HA_ATOMIC_SUB_FETCH(val, i) __sync_sub_and_fetch(val, i)
#define HA_ATOMIC_FETCH_AND(val, flags) __sync_fetch_and_and(val, flags)
#define HA_ATOMIC_FETCH_OR(val, flags) __sync_fetch_and_or(val, flags)
#define HA_ATOMIC_FETCH_ADD(val, i) __sync_fetch_and_add(val, i)
#define HA_ATOMIC_FETCH_SUB(val, i) __sync_fetch_and_sub(val, i)
#define HA_ATOMIC_BTS(val, bit) \
({ \
typeof(*(val)) __b_bts = (1UL << (bit)); \
__sync_fetch_and_or((val), __b_bts) & __b_bts; \
})
#define HA_ATOMIC_BTR(val, bit) \
({ \
typeof(*(val)) __b_btr = (1UL << (bit)); \
__sync_fetch_and_and((val), ~__b_btr) & __b_btr; \
})
/* the CAS is a bit complicated. The older API doesn't support returning the
* value and the swap's result at the same time. So here we take what looks
* like the safest route, consisting in using the boolean version guaranteeing
* that the operation was performed or not, and we snoop a previous value. If
* the compare succeeds, we return. If it fails, we return the previous value,
* but only if it differs from the expected one. If it's the same it's a race
* thus we try again to avoid confusing a possibly sensitive caller.
*/
#define HA_ATOMIC_CAS(val, old, new) \
({ \
typeof((val)) __val_cas = (val); \
typeof((old)) __oldp_cas = (old); \
typeof(*(old)) __oldv_cas; \
typeof((new)) __new_cas = (new); \
int __ret_cas; \
do { \
__oldv_cas = *__val_cas; \
__ret_cas = __sync_bool_compare_and_swap(__val_cas, *__oldp_cas, __new_cas); \
} while (!__ret_cas && *__oldp_cas == __oldv_cas && __ha_cpu_relax()); \
if (!__ret_cas) \
*__oldp_cas = __oldv_cas; \
__ret_cas; \
})
/* warning, n is a pointer to the double value for dwcas */
#define HA_ATOMIC_DWCAS(val, o, n) __ha_cas_dw(val, o, n)
#define HA_ATOMIC_UPDATE_MAX(val, new) \
({ \
typeof(val) __val = (val); \
typeof(*(val)) __old_max = *__val; \
typeof(*(val)) __new_max = (new); \
\
while (__old_max < __new_max && \
!HA_ATOMIC_CAS(__val, &__old_max, __new_max) && __ha_cpu_relax()); \
*__val; \
})
#define HA_ATOMIC_UPDATE_MIN(val, new) \
({ \
typeof(val) __val = (val); \
typeof(*(val)) __old_min = *__val; \
typeof(*(val)) __new_min = (new); \
\
while (__old_min > __new_min && \
!HA_ATOMIC_CAS(__val, &__old_min, __new_min) && __ha_cpu_relax()); \
*__val; \
})
#else /* gcc */
/* gcc >= 4.7 or clang */
#define HA_ATOMIC_STORE(val, new) __atomic_store_n(val, new, __ATOMIC_RELEASE) #define HA_ATOMIC_STORE(val, new) __atomic_store_n(val, new, __ATOMIC_RELEASE)
#define HA_ATOMIC_LOAD(val) __atomic_load_n(val, __ATOMIC_ACQUIRE) #define HA_ATOMIC_LOAD(val) __atomic_load_n(val, __ATOMIC_ACQUIRE)
#define HA_ATOMIC_XCHG(val, new) __atomic_exchange_n(val, new, __ATOMIC_ACQ_REL) #define HA_ATOMIC_XCHG(val, new) __atomic_exchange_n(val, new, __ATOMIC_ACQ_REL)
@ -408,6 +520,8 @@
/* warning, n is a pointer to the double value for dwcas */ /* warning, n is a pointer to the double value for dwcas */
#define _HA_ATOMIC_DWCAS(val, o, n) __ha_cas_dw(val, o, n) #define _HA_ATOMIC_DWCAS(val, o, n) __ha_cas_dw(val, o, n)
#endif /* gcc >= 4.7 */
/* Here come a few architecture-specific double-word CAS and barrier /* Here come a few architecture-specific double-word CAS and barrier
* implementations. * implementations.
*/ */
@ -685,13 +799,12 @@ static __inline int __ha_cas_dw(void *target, void *compare, void *set)
#else /* unknown / unhandled architecture, fall back to generic barriers */ #else /* unknown / unhandled architecture, fall back to generic barriers */
#define __ha_barrier_atomic_load() __atomic_thread_fence(__ATOMIC_ACQUIRE) #define __ha_barrier_atomic_load __sync_synchronize
#define __ha_barrier_atomic_store() __atomic_thread_fence(__ATOMIC_RELEASE) #define __ha_barrier_atomic_store __sync_synchronize
#define __ha_barrier_atomic_full() __atomic_thread_fence(__ATOMIC_SEQ_CST) #define __ha_barrier_atomic_full __sync_synchronize
#define __ha_barrier_load() __atomic_thread_fence(__ATOMIC_ACQUIRE) #define __ha_barrier_load __sync_synchronize
#define __ha_barrier_store() __atomic_thread_fence(__ATOMIC_RELEASE) #define __ha_barrier_store __sync_synchronize
#define __ha_barrier_full() __atomic_thread_fence(__ATOMIC_SEQ_CST) #define __ha_barrier_full __sync_synchronize
/* Note: there is no generic DWCAS */ /* Note: there is no generic DWCAS */
/* short-lived CPU relaxation */ /* short-lived CPU relaxation */

View file

@ -144,12 +144,6 @@
*/ */
#define BE_WEIGHT_SCALE 16 #define BE_WEIGHT_SCALE 16
/* LB parameters for all algorithms, with one instance per thread-group */
struct lbprm_per_tgrp {
union {
struct lb_fwrr_per_tgrp fwrr;
};
};
/* LB parameters for all algorithms */ /* LB parameters for all algorithms */
struct lbprm { struct lbprm {
union { /* LB parameters depending on the algo type */ union { /* LB parameters depending on the algo type */
@ -168,15 +162,12 @@ struct lbprm {
int wmult; /* ratio between user weight and effective weight */ int wmult; /* ratio between user weight and effective weight */
int wdiv; /* ratio between effective weight and user weight */ int wdiv; /* ratio between effective weight and user weight */
int hash_balance_factor; /* load balancing factor * 100, 0 if disabled */ int hash_balance_factor; /* load balancing factor * 100, 0 if disabled */
unsigned int lb_free_list_nb; /* Number of elements in the free list */
struct sample_expr *expr; /* sample expression for "balance (log-)hash" */ struct sample_expr *expr; /* sample expression for "balance (log-)hash" */
char *arg_str; /* name of the URL parameter/header/cookie used for hashing */ char *arg_str; /* name of the URL parameter/header/cookie used for hashing */
int arg_len; /* strlen(arg_str), computed only once */ int arg_len; /* strlen(arg_str), computed only once */
int arg_opt1; /* extra option 1 for the LB algo (algo-specific) */ int arg_opt1; /* extra option 1 for the LB algo (algo-specific) */
int arg_opt2; /* extra option 2 for the LB algo (algo-specific) */ int arg_opt2; /* extra option 2 for the LB algo (algo-specific) */
int arg_opt3; /* extra option 3 for the LB algo (algo-specific) */ int arg_opt3; /* extra option 3 for the LB algo (algo-specific) */
uint64_t lb_seq; /* sequence number for algos who need it */
struct mt_list lb_free_list; /* LB tree elements available */
__decl_thread(HA_RWLOCK_T lock); __decl_thread(HA_RWLOCK_T lock);
struct server *fbck; /* first backup server when !PR_O_USE_ALL_BK, or NULL */ struct server *fbck; /* first backup server when !PR_O_USE_ALL_BK, or NULL */
@ -190,8 +181,6 @@ struct lbprm {
void (*server_take_conn)(struct server *); /* to be called when connection is assigned */ void (*server_take_conn)(struct server *); /* to be called when connection is assigned */
void (*server_drop_conn)(struct server *); /* to be called when connection is dropped */ void (*server_drop_conn)(struct server *); /* to be called when connection is dropped */
void (*server_requeue)(struct server *); /* function used to place the server where it must be */ void (*server_requeue)(struct server *); /* function used to place the server where it must be */
void (*proxy_deinit)(struct proxy *); /* to be called when we're destroying the proxy */
void (*server_deinit)(struct server *); /* to be called when we're destroying the server */
}; };
#endif /* _HAPROXY_BACKEND_T_H */ #endif /* _HAPROXY_BACKEND_T_H */

View file

@ -45,18 +45,6 @@ int assign_server_and_queue(struct stream *s);
int alloc_bind_address(struct sockaddr_storage **ss, int alloc_bind_address(struct sockaddr_storage **ss,
struct server *srv, struct proxy *be, struct server *srv, struct proxy *be,
struct stream *s); struct stream *s);
int be_reuse_mode(const struct proxy *be, const struct server *srv);
int64_t be_calculate_conn_hash(struct server *srv, struct stream *strm,
struct session *sess,
struct sockaddr_storage *src,
struct sockaddr_storage *dst,
struct ist name);
int be_reuse_connection(int64_t hash, struct session *sess,
struct proxy *be, struct server *srv,
struct stconn *sc, enum obj_type *target, int not_first_req);
int srv_redispatch_connect(struct stream *t); int srv_redispatch_connect(struct stream *t);
void back_try_conn_req(struct stream *s); void back_try_conn_req(struct stream *s);
void back_handle_st_req(struct stream *s); void back_handle_st_req(struct stream *s);
@ -85,21 +73,10 @@ static inline int be_usable_srv(struct proxy *be)
return be->srv_bck; return be->srv_bck;
} }
/* Returns true if <be> backend can be used as target to a switching rules. */
static inline int be_is_eligible(const struct proxy *be)
{
/* A disabled or unpublished backend cannot be selected for traffic.
* Note that STOPPED state is ignored as there is a risk of breaking
* requests during soft-stop.
*/
return !(be->flags & (PR_FL_DISABLED|PR_FL_BE_UNPUBLISHED));
}
/* set the time of last session on the backend */ /* set the time of last session on the backend */
static inline void be_set_sess_last(struct proxy *be) static inline void be_set_sess_last(struct proxy *be)
{ {
if (be->be_counters.shared.tg) be->be_counters.last_sess = ns_to_sec(now_ns);
HA_ATOMIC_STORE(&be->be_counters.shared.tg[tgid - 1]->last_sess, ns_to_sec(now_ns));
} }
/* This function returns non-zero if the designated server will be /* This function returns non-zero if the designated server will be
@ -179,12 +156,6 @@ void set_backend_down(struct proxy *be);
unsigned int gen_hash(const struct proxy* px, const char* key, unsigned long len); unsigned int gen_hash(const struct proxy* px, const char* key, unsigned long len);
/* Returns true if connection reuse is supported by <be> backend. */
static inline int be_supports_conn_reuse(const struct proxy *be)
{
return be->mode == PR_MODE_HTTP || be->mode == PR_MODE_SPOP;
}
#endif /* _HAPROXY_BACKEND_H */ #endif /* _HAPROXY_BACKEND_H */
/* /*

View file

@ -40,23 +40,6 @@
#define DPRINTF(x...) #define DPRINTF(x...)
#endif #endif
/* Let's make DEBUG_STRESS equal to zero if not set or not valid, or to
* 1 if set. This way it is always set and should be easy to use in "if ()"
* statements without requiring ifdefs, while remaining compatible with
* "#if DEBUG_STRESS > 0". We also force DEBUG_STRICT and DEBUG_STRICT_ACTION
* when stressed.
*/
#if !defined(DEBUG_STRESS)
# define DEBUG_STRESS 0
#elif DEBUG_STRESS != 0
# undef DEBUG_STRESS
# define DEBUG_STRESS 1 // make sure comparison >0 always works
# undef DEBUG_STRICT
# define DEBUG_STRICT 2 // enable BUG_ON
# undef DEBUG_STRICT_ACTION
# define DEBUG_STRICT_ACTION 3 // enable crash on match
#endif
#define DUMP_TRACE() do { extern void ha_backtrace_to_stderr(void); ha_backtrace_to_stderr(); } while (0) #define DUMP_TRACE() do { extern void ha_backtrace_to_stderr(void); ha_backtrace_to_stderr(); } while (0)
/* First, let's try to handle some arch-specific crashing methods. We prefer /* First, let's try to handle some arch-specific crashing methods. We prefer
@ -85,7 +68,7 @@
#else // not x86 #else // not x86
/* generic implementation, causes a segfault */ /* generic implementation, causes a segfault */
static inline __attribute((always_inline,noreturn,unused)) void ha_crash_now(void) static inline __attribute((always_inline)) void ha_crash_now(void)
{ {
#if __GNUC_PREREQ__(5, 0) #if __GNUC_PREREQ__(5, 0)
#pragma GCC diagnostic push #pragma GCC diagnostic push
@ -178,8 +161,6 @@ static __attribute__((noinline,noreturn,unused)) void abort_with_line(uint line)
* COUNT_IF() invocation requires a special section ("dbg_cnt") hence a modern * COUNT_IF() invocation requires a special section ("dbg_cnt") hence a modern
* linker. * linker.
*/ */
extern unsigned int debug_enable_counters;
#if !defined(USE_OBSOLETE_LINKER) #if !defined(USE_OBSOLETE_LINKER)
/* type of checks that can be verified. We cannot really distinguish between /* type of checks that can be verified. We cannot really distinguish between
@ -241,44 +222,30 @@ extern __attribute__((__weak__)) struct debug_count __stop_dbg_cnt HA_SECTION_S
_HA_ATOMIC_INC(&__dbg_cnt_##_line.count); \ _HA_ATOMIC_INC(&__dbg_cnt_##_line.count); \
} while (0) } while (0)
/* Matrix for DEBUG_COUNTERS:
* 0 : only BUG_ON() and CHECK_IF() are reported (super rare)
* 1 : COUNT_GLITCH() are also reported (rare)
* COUNT_IF() are also reported only if debug_enable_counters was set
* 2 : COUNT_IF() are also reported unless debug_enable_counters was reset
*/
/* Core of the COUNT_IF() macro, checks the condition and counts one hit if /* Core of the COUNT_IF() macro, checks the condition and counts one hit if
* true. It's only enabled at DEBUG_COUNTERS >= 1, and enabled by default if * true.
* DEBUG_COUNTERS >= 2.
*/ */
# if defined(DEBUG_COUNTERS) && (DEBUG_COUNTERS >= 1) #define _COUNT_IF(cond, file, line, ...) \
# define _COUNT_IF(cond, file, line, ...) \ (unlikely(cond) ? ({ \
(unlikely(cond) ? ({ \ __DBG_COUNT(cond, file, line, DBG_COUNT_IF, __VA_ARGS__); \
if (debug_enable_counters) \ 1; /* let's return the true condition */ \
__DBG_COUNT(cond, file, line, DBG_COUNT_IF, __VA_ARGS__); \
1; /* let's return the true condition */ \
}) : 0) }) : 0)
# else
# define _COUNT_IF(cond, file, line, ...) DISGUISE(unlikely(cond) ? 1 : 0)
# endif
/* DEBUG_COUNTERS enables counting the number of glitches per line of code. The /* DEBUG_GLITCHES enables counting the number of glitches per line of code. The
* condition is empty (nothing to write there), except maybe __VA_ARGS at the * condition is empty (nothing to write there), except maybe __VA_ARGS at the
* end. * end.
*/ */
# if !defined(DEBUG_COUNTERS) || (DEBUG_COUNTERS == 0) # if !defined(DEBUG_GLITCHES)
# define _COUNT_GLITCH(file, line, ...) do { } while (0) # define _COUNT_GLITCH(file, line, ...) do { } while (0)
# else # else
# define _COUNT_GLITCH(file, line, ...) do { \ # define _COUNT_GLITCH(file, line, ...) do { \
__DBG_COUNT(, file, line, DBG_GLITCH, __VA_ARGS__); \ __DBG_COUNT(, file, line, DBG_GLITCH, __VA_ARGS__); \
} while (0) } while (0)
# endif # endif
#else /* USE_OBSOLETE_LINKER not defined below */ #else /* USE_OBSOLETE_LINKER not defined below */
# define __DBG_COUNT(cond, file, line, type, ...) do { } while (0) # define __DBG_COUNT(cond, file, line, type, ...) do { } while (0)
# define _COUNT_IF(cond, file, line, ...) DISGUISE(unlikely(cond) ? 1 : 0) # define _COUNT_IF(cond, file, line, ...) DISGUISE(cond)
# define _COUNT_GLITCH(file, line, ...) do { } while (0) # define _COUNT_GLITCH(file, line, ...) do { } while (0)
#endif #endif
@ -424,20 +391,6 @@ extern __attribute__((__weak__)) struct debug_count __stop_dbg_cnt HA_SECTION_S
# define COUNT_IF_HOT(cond, ...) DISGUISE(cond) # define COUNT_IF_HOT(cond, ...) DISGUISE(cond)
#endif #endif
/* turn BUG_ON_STRESS() into a real statement when DEBUG_STRESS is set,
* otherwise simply ignore it, at the risk of failing to notice if the
* condition would build at all. We don't really care if BUG_ON_STRESS
* doesn't always build, because it's meant to be used only in certain
* scenarios, possibly requiring certain combinations of options. We
* just want to be certain that the condition is not implemented at all
* when not used, so as to encourage developers to put a lot of them at
* zero cost.
*/
#if DEBUG_STRESS > 0
# define BUG_ON_STRESS(cond, ...) BUG_ON(cond, __VA_ARGS__)
#else
# define BUG_ON_STRESS(cond, ...) do { } while (0)
#endif
/* When not optimizing, clang won't remove that code, so only compile it in when optimizing */ /* When not optimizing, clang won't remove that code, so only compile it in when optimizing */
#if defined(__GNUC__) && defined(__OPTIMIZE__) #if defined(__GNUC__) && defined(__OPTIMIZE__)
@ -537,7 +490,7 @@ struct mem_stats {
size_t size; size_t size;
struct ha_caller caller; struct ha_caller caller;
const void *extra; // extra info specific to this call (e.g. pool ptr) const void *extra; // extra info specific to this call (e.g. pool ptr)
} ALIGNED(sizeof(void*)); } __attribute__((aligned(sizeof(void*))));
#undef calloc #undef calloc
#define calloc(x,y) ({ \ #define calloc(x,y) ({ \
@ -651,172 +604,9 @@ struct mem_stats {
_HA_ATOMIC_ADD(&_.size, __y); \ _HA_ATOMIC_ADD(&_.size, __y); \
strdup(__x); \ strdup(__x); \
}) })
#undef ha_aligned_alloc
#define ha_aligned_alloc(a,s) ({ \
size_t __a = (a); \
size_t __s = (s); \
static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
.caller = { \
.file = __FILE__, .line = __LINE__, \
.what = MEM_STATS_TYPE_MALLOC, \
.func = __func__, \
}, \
}; \
HA_WEAK(__start_mem_stats); \
HA_WEAK(__stop_mem_stats); \
_HA_ATOMIC_INC(&_.calls); \
_HA_ATOMIC_ADD(&_.size, __s); \
_ha_aligned_alloc(__a, __s); \
})
#undef ha_aligned_zalloc
#define ha_aligned_zalloc(a,s) ({ \
size_t __a = (a); \
size_t __s = (s); \
static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
.caller = { \
.file = __FILE__, .line = __LINE__, \
.what = MEM_STATS_TYPE_CALLOC, \
.func = __func__, \
}, \
}; \
HA_WEAK(__start_mem_stats); \
HA_WEAK(__stop_mem_stats); \
_HA_ATOMIC_INC(&_.calls); \
_HA_ATOMIC_ADD(&_.size, __s); \
_ha_aligned_zalloc(__a, __s); \
})
#undef ha_aligned_alloc_safe
#define ha_aligned_alloc_safe(a,s) ({ \
size_t __a = (a); \
size_t __s = (s); \
static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
.caller = { \
.file = __FILE__, .line = __LINE__, \
.what = MEM_STATS_TYPE_MALLOC, \
.func = __func__, \
}, \
}; \
HA_WEAK(__start_mem_stats); \
HA_WEAK(__stop_mem_stats); \
_HA_ATOMIC_INC(&_.calls); \
_HA_ATOMIC_ADD(&_.size, __s); \
_ha_aligned_alloc_safe(__a, __s); \
})
#undef ha_aligned_zalloc_safe
#define ha_aligned_zalloc_safe(a,s) ({ \
size_t __a = (a); \
size_t __s = (s); \
static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
.caller = { \
.file = __FILE__, .line = __LINE__, \
.what = MEM_STATS_TYPE_CALLOC, \
.func = __func__, \
}, \
}; \
HA_WEAK(__start_mem_stats); \
HA_WEAK(__stop_mem_stats); \
_HA_ATOMIC_INC(&_.calls); \
_HA_ATOMIC_ADD(&_.size, __s); \
_ha_aligned_zalloc_safe(__a, __s); \
})
// Since the type is known, the .extra field will contain its name
#undef ha_aligned_alloc_typed
#define ha_aligned_alloc_typed(cnt,type) ({ \
size_t __a = __alignof__(type); \
size_t __s = ((size_t)cnt) * sizeof(type); \
static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
.caller = { \
.file = __FILE__, .line = __LINE__, \
.what = MEM_STATS_TYPE_MALLOC, \
.func = __func__, \
}, \
.extra = #type, \
}; \
HA_WEAK(__start_mem_stats); \
HA_WEAK(__stop_mem_stats); \
_HA_ATOMIC_INC(&_.calls); \
_HA_ATOMIC_ADD(&_.size, __s); \
(type*)_ha_aligned_alloc(__a, __s); \
})
// Since the type is known, the .extra field will contain its name
#undef ha_aligned_zalloc_typed
#define ha_aligned_zalloc_typed(cnt,type) ({ \
size_t __a = __alignof__(type); \
size_t __s = ((size_t)cnt) * sizeof(type); \
static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
.caller = { \
.file = __FILE__, .line = __LINE__, \
.what = MEM_STATS_TYPE_CALLOC, \
.func = __func__, \
}, \
.extra = #type, \
}; \
HA_WEAK(__start_mem_stats); \
HA_WEAK(__stop_mem_stats); \
_HA_ATOMIC_INC(&_.calls); \
_HA_ATOMIC_ADD(&_.size, __s); \
(type*)_ha_aligned_zalloc_safe(__a, __s); \
})
#undef ha_aligned_free
#define ha_aligned_free(x) ({ \
typeof(x) __x = (x); \
static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
.caller = { \
.file = __FILE__, .line = __LINE__, \
.what = MEM_STATS_TYPE_FREE, \
.func = __func__, \
}, \
}; \
HA_WEAK(__start_mem_stats); \
HA_WEAK(__stop_mem_stats); \
if (__builtin_constant_p((x))) { \
HA_LINK_ERROR(call_to_ha_aligned_free_attempts_to_free_a_constant); \
} \
if (__x) \
_HA_ATOMIC_INC(&_.calls); \
_ha_aligned_free(__x); \
})
#undef ha_aligned_free_size
#define ha_aligned_free_size(p,s) ({ \
void *__p = (p); size_t __s = (s); \
static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
.caller = { \
.file = __FILE__, .line = __LINE__, \
.what = MEM_STATS_TYPE_FREE, \
.func = __func__, \
}, \
}; \
HA_WEAK(__start_mem_stats); \
HA_WEAK(__stop_mem_stats); \
if (__builtin_constant_p((p))) { \
HA_LINK_ERROR(call_to_ha_aligned_free_attempts_to_free_a_constant); \
} \
if (__p) { \
_HA_ATOMIC_INC(&_.calls); \
_HA_ATOMIC_ADD(&_.size, __s); \
} \
_ha_aligned_free(__p); \
})
#else // DEBUG_MEM_STATS #else // DEBUG_MEM_STATS
#define will_free(x, y) do { } while (0) #define will_free(x, y) do { } while (0)
#define ha_aligned_alloc(a,s) _ha_aligned_alloc(a, s)
#define ha_aligned_zalloc(a,s) _ha_aligned_zalloc(a, s)
#define ha_aligned_alloc_safe(a,s) _ha_aligned_alloc_safe(a, s)
#define ha_aligned_zalloc_safe(a,s) _ha_aligned_zalloc_safe(a, s)
#define ha_aligned_alloc_typed(cnt,type) ((type*)_ha_aligned_alloc(__alignof__(type), ((size_t)cnt) * sizeof(type)))
#define ha_aligned_zalloc_typed(cnt,type) ((type*)_ha_aligned_zalloc(__alignof__(type), ((size_t)cnt) * sizeof(type)))
#define ha_aligned_free(p) _ha_aligned_free(p)
#define ha_aligned_free_size(p,s) _ha_aligned_free(p)
#endif /* DEBUG_MEM_STATS*/ #endif /* DEBUG_MEM_STATS*/

46
include/haproxy/cbuf-t.h Normal file
View file

@ -0,0 +1,46 @@
/*
* include/haprox/cbuf-t.h
* This file contains definition for circular buffers.
*
* Copyright 2021 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation, version 2.1
* exclusively.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef _HAPROXY_CBUF_T_H
#define _HAPROXY_CBUF_T_H
#ifdef USE_QUIC
#ifndef USE_OPENSSL
#error "Must define USE_OPENSSL"
#endif
#endif
#include <stddef.h>
#include <haproxy/list-t.h>
extern struct pool_head *pool_head_cbuf;
struct cbuf {
/* buffer */
unsigned char *buf;
/* buffer size */
size_t sz;
/* Writer index */
size_t wr;
/* Reader index */
size_t rd;
};
#endif /* _HAPROXY_CBUF_T_H */

136
include/haproxy/cbuf.h Normal file
View file

@ -0,0 +1,136 @@
/*
* include/haprox/cbuf.h
* This file contains definitions and prototypes for circular buffers.
* Inspired from Linux circular buffers (include/linux/circ_buf.h).
*
* Copyright 2021 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation, version 2.1
* exclusively.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef _HAPROXY_CBUF_H
#define _HAPROXY_CBUF_H
#ifdef USE_QUIC
#ifndef USE_OPENSSL
#error "Must define USE_OPENSSL"
#endif
#endif
#include <haproxy/atomic.h>
#include <haproxy/list.h>
#include <haproxy/cbuf-t.h>
struct cbuf *cbuf_new(unsigned char *buf, size_t sz);
void cbuf_free(struct cbuf *cbuf);
/* Amount of data between <rd> and <wr> */
#define CBUF_DATA(wr, rd, size) (((wr) - (rd)) & ((size) - 1))
/* Return the writer position in <cbuf>.
* To be used only by the writer!
*/
static inline unsigned char *cb_wr(struct cbuf *cbuf)
{
return cbuf->buf + cbuf->wr;
}
/* Reset the reader index.
* To be used by a reader!
*/
static inline void cb_rd_reset(struct cbuf *cbuf)
{
cbuf->rd = 0;
}
/* Reset the writer index.
* To be used by a writer!
*/
static inline void cb_wr_reset(struct cbuf *cbuf)
{
cbuf->wr = 0;
}
/* Increase <cbuf> circular buffer data by <count>.
* To be used by a writer!
*/
static inline void cb_add(struct cbuf *cbuf, size_t count)
{
cbuf->wr = (cbuf->wr + count) & (cbuf->sz - 1);
}
/* Return the reader position in <cbuf>.
* To be used only by the reader!
*/
static inline unsigned char *cb_rd(struct cbuf *cbuf)
{
return cbuf->buf + cbuf->rd;
}
/* Skip <count> byte in <cbuf> circular buffer.
* To be used by a reader!
*/
static inline void cb_del(struct cbuf *cbuf, size_t count)
{
cbuf->rd = (cbuf->rd + count) & (cbuf->sz - 1);
}
/* Return the amount of data left in <cbuf>.
* To be used only by the writer!
*/
static inline size_t cb_data(struct cbuf *cbuf)
{
size_t rd;
rd = HA_ATOMIC_LOAD(&cbuf->rd);
return CBUF_DATA(cbuf->wr, rd, cbuf->sz);
}
/* Return the amount of room left in <cbuf> minus 1 to distinguish
* the case where the buffer is full from the case where is is empty
* To be used only by the write!
*/
static inline size_t cb_room(struct cbuf *cbuf)
{
size_t rd;
rd = HA_ATOMIC_LOAD(&cbuf->rd);
return CBUF_DATA(rd, cbuf->wr + 1, cbuf->sz);
}
/* Return the amount of contiguous data left in <cbuf>.
* To be used only by the reader!
*/
static inline size_t cb_contig_data(struct cbuf *cbuf)
{
size_t end, n;
end = cbuf->sz - cbuf->rd;
n = (HA_ATOMIC_LOAD(&cbuf->wr) + end) & (cbuf->sz - 1);
return n < end ? n : end;
}
/* Return the amount of contiguous space left in <cbuf>.
* To be used only by the writer!
*/
static inline size_t cb_contig_space(struct cbuf *cbuf)
{
size_t end, n;
end = cbuf->sz - 1 - cbuf->wr;
n = (HA_ATOMIC_LOAD(&cbuf->rd) + end) & (cbuf->sz - 1);
return n <= end ? n : end + 1;
}
#endif /* _HAPROXY_CBUF_H */

View file

@ -54,8 +54,6 @@ enum cond_predicate {
CFG_PRED_OSSL_VERSION_ATLEAST, // "openssl_version_atleast" CFG_PRED_OSSL_VERSION_ATLEAST, // "openssl_version_atleast"
CFG_PRED_OSSL_VERSION_BEFORE, // "openssl_version_before" CFG_PRED_OSSL_VERSION_BEFORE, // "openssl_version_before"
CFG_PRED_SSLLIB_NAME_STARTSWITH, // "ssllib_name_startswith" CFG_PRED_SSLLIB_NAME_STARTSWITH, // "ssllib_name_startswith"
CFG_PRED_AWSLC_API_ATLEAST, // "awslc_api_atleast"
CFG_PRED_AWSLC_API_BEFORE, // "awslc_api_before"
CFG_PRED_ENABLED, // "enabled" CFG_PRED_ENABLED, // "enabled"
}; };

View file

@ -39,7 +39,6 @@ struct acl_cond;
#define CFG_CRTLIST 5 #define CFG_CRTLIST 5
#define CFG_CRTSTORE 6 #define CFG_CRTSTORE 6
#define CFG_TRACES 7 #define CFG_TRACES 7
#define CFG_ACME 8
/* various keyword modifiers */ /* various keyword modifiers */
enum kw_mod { enum kw_mod {
@ -111,7 +110,6 @@ extern char *cursection;
extern int non_global_section_parsed; extern int non_global_section_parsed;
extern struct proxy *curproxy; extern struct proxy *curproxy;
extern struct proxy *last_defproxy;
extern char initial_cwd[PATH_MAX]; extern char initial_cwd[PATH_MAX];
int cfg_parse_global(const char *file, int linenum, char **args, int inv); int cfg_parse_global(const char *file, int linenum, char **args, int inv);
@ -141,7 +139,7 @@ int warnif_misplaced_tcp_req_sess(struct proxy *proxy, const char *file, int lin
int warnif_misplaced_tcp_req_cont(struct proxy *proxy, const char *file, int line, const char *arg, const char *arg2); int warnif_misplaced_tcp_req_cont(struct proxy *proxy, const char *file, int line, const char *arg, const char *arg2);
int warnif_misplaced_tcp_res_cont(struct proxy *proxy, const char *file, int line, const char *arg, const char *arg2); int warnif_misplaced_tcp_res_cont(struct proxy *proxy, const char *file, int line, const char *arg, const char *arg2);
int warnif_misplaced_quic_init(struct proxy *proxy, const char *file, int line, const char *arg, const char *arg2); int warnif_misplaced_quic_init(struct proxy *proxy, const char *file, int line, const char *arg, const char *arg2);
int warnif_cond_conflicts(const struct acl_cond *cond, unsigned int where, char **err); int warnif_cond_conflicts(const struct acl_cond *cond, unsigned int where, const char *file, int line);
int warnif_tcp_http_cond(const struct proxy *px, const struct acl_cond *cond); int warnif_tcp_http_cond(const struct proxy *px, const struct acl_cond *cond);
int too_many_args_idx(int maxarg, int index, char **args, char **msg, int *err_code); int too_many_args_idx(int maxarg, int index, char **args, char **msg, int *err_code);
int too_many_args(int maxarg, char **args, char **msg, int *err_code); int too_many_args(int maxarg, char **args, char **msg, int *err_code);

View file

@ -204,6 +204,7 @@ struct channel {
unsigned short last_read; /* 16 lower bits of last read date (max pause=65s) */ unsigned short last_read; /* 16 lower bits of last read date (max pause=65s) */
unsigned char xfer_large; /* number of consecutive large xfers */ unsigned char xfer_large; /* number of consecutive large xfers */
unsigned char xfer_small; /* number of consecutive small xfers */ unsigned char xfer_small; /* number of consecutive small xfers */
unsigned long long total; /* total data read */
int analyse_exp; /* expiration date for current analysers (if set) */ int analyse_exp; /* expiration date for current analysers (if set) */
}; };

View file

@ -323,6 +323,7 @@ static inline void channel_init(struct channel *chn)
chn->to_forward = 0; chn->to_forward = 0;
chn->last_read = now_ms; chn->last_read = now_ms;
chn->xfer_small = chn->xfer_large = 0; chn->xfer_small = chn->xfer_large = 0;
chn->total = 0;
chn->analysers = 0; chn->analysers = 0;
chn->flags = 0; chn->flags = 0;
chn->output = 0; chn->output = 0;
@ -376,6 +377,7 @@ static inline void channel_add_input(struct channel *chn, unsigned int len)
c_adv(chn, fwd); c_adv(chn, fwd);
} }
/* notify that some data was read */ /* notify that some data was read */
chn->total += len;
chn->flags |= CF_READ_EVENT; chn->flags |= CF_READ_EVENT;
} }

View file

@ -172,7 +172,6 @@ struct check {
char desc[HCHK_DESC_LEN]; /* health check description */ char desc[HCHK_DESC_LEN]; /* health check description */
signed char use_ssl; /* use SSL for health checks (1: on, 0: server mode, -1: off) */ signed char use_ssl; /* use SSL for health checks (1: on, 0: server mode, -1: off) */
int send_proxy; /* send a PROXY protocol header with checks */ int send_proxy; /* send a PROXY protocol header with checks */
int reuse_pool; /* try to reuse idle connections */
struct tcpcheck_rules *tcpcheck_rules; /* tcp-check send / expect rules */ struct tcpcheck_rules *tcpcheck_rules; /* tcp-check send / expect rules */
struct tcpcheck_rule *current_step; /* current step when using tcpcheck */ struct tcpcheck_rule *current_step; /* current step when using tcpcheck */
int inter, fastinter, downinter; /* checks: time in milliseconds */ int inter, fastinter, downinter; /* checks: time in milliseconds */
@ -188,7 +187,6 @@ struct check {
char **envp; /* the environment to use if running a process-based check */ char **envp; /* the environment to use if running a process-based check */
struct pid_list *curpid; /* entry in pid_list used for current process-based test, or -1 if not in test */ struct pid_list *curpid; /* entry in pid_list used for current process-based test, or -1 if not in test */
struct sockaddr_storage addr; /* the address to check */ struct sockaddr_storage addr; /* the address to check */
char *pool_conn_name; /* conn name used on reuse */
char *sni; /* Server name */ char *sni; /* Server name */
char *alpn_str; /* ALPN to use for checks */ char *alpn_str; /* ALPN to use for checks */
int alpn_len; /* ALPN string length */ int alpn_len; /* ALPN string length */

View file

@ -24,7 +24,7 @@
#include <haproxy/applet-t.h> #include <haproxy/applet-t.h>
/* Access level for a stats socket (appctx->cli_ctx.level) */ /* Access level for a stats socket (appctx->cli_level) */
#define ACCESS_LVL_NONE 0x0000 #define ACCESS_LVL_NONE 0x0000
#define ACCESS_LVL_USER 0x0001 #define ACCESS_LVL_USER 0x0001
#define ACCESS_LVL_OPER 0x0002 #define ACCESS_LVL_OPER 0x0002
@ -41,13 +41,11 @@
#define ACCESS_MCLI_SEVERITY_STR 0x0200 /* 'set severity-output string' on master CLI */ #define ACCESS_MCLI_SEVERITY_STR 0x0200 /* 'set severity-output string' on master CLI */
/* flags for appctx->st1 */ /* flags for appctx->st1 */
#define APPCTX_CLI_ST1_PAYLOAD (1 << 0) #define APPCTX_CLI_ST1_PROMPT (1 << 0)
#define APPCTX_CLI_ST1_NOLF (1 << 1) #define APPCTX_CLI_ST1_PAYLOAD (1 << 1)
#define APPCTX_CLI_ST1_LASTCMD (1 << 2) #define APPCTX_CLI_ST1_NOLF (1 << 2)
#define APPCTX_CLI_ST1_INTER (1 << 3) /* interactive mode (i.e. don't close after 1st cmd) */ #define APPCTX_CLI_ST1_TIMED (1 << 3)
#define APPCTX_CLI_ST1_PROMPT (1 << 4) /* display prompt */ #define APPCTX_CLI_ST1_LASTCMD (1 << 4)
#define APPCTX_CLI_ST1_TIMED (1 << 5) /* display timer in prompt */
#define APPCTX_CLI_ST1_YIELD (1 << 6) /* forced yield between commands */
#define CLI_PREFIX_KW_NB 5 #define CLI_PREFIX_KW_NB 5
#define CLI_MAX_MATCHES 5 #define CLI_MAX_MATCHES 5
@ -57,8 +55,8 @@
enum { enum {
CLI_ST_INIT = 0, /* initial state, must leave to zero ! */ CLI_ST_INIT = 0, /* initial state, must leave to zero ! */
CLI_ST_END, /* final state, let's close */ CLI_ST_END, /* final state, let's close */
CLI_ST_PARSE_CMDLINE, /* wait for a full command line */ CLI_ST_GETREQ, /* wait for a request */
CLI_ST_PROCESS_CMDLINE, /* process all commands on the command line */ CLI_ST_PARSEREQ, /* parse a request */
CLI_ST_OUTPUT, /* all states after this one are responses */ CLI_ST_OUTPUT, /* all states after this one are responses */
CLI_ST_PROMPT, /* display the prompt (first output, same code) */ CLI_ST_PROMPT, /* display the prompt (first output, same code) */
CLI_ST_PRINT, /* display const message in cli->msg */ CLI_ST_PRINT, /* display const message in cli->msg */

View file

@ -28,7 +28,7 @@
extern struct timeval start_date; /* the process's start date in wall-clock time */ extern struct timeval start_date; /* the process's start date in wall-clock time */
extern struct timeval ready_date; /* date when the process was considered ready */ extern struct timeval ready_date; /* date when the process was considered ready */
extern ullong start_time_ns; /* the process's start date in internal monotonic time (ns) */ extern ullong start_time_ns; /* the process's start date in internal monotonic time (ns) */
extern volatile ullong *global_now_ns;/* common monotonic date between all threads, in ns (wraps every 585 yr) */ extern volatile ullong global_now_ns; /* common monotonic date between all threads, in ns (wraps every 585 yr) */
extern THREAD_LOCAL ullong now_ns; /* internal monotonic date derived from real clock, in ns (wraps every 585 yr) */ extern THREAD_LOCAL ullong now_ns; /* internal monotonic date derived from real clock, in ns (wraps every 585 yr) */
extern THREAD_LOCAL struct timeval date; /* the real current date (wall-clock time) */ extern THREAD_LOCAL struct timeval date; /* the real current date (wall-clock time) */
@ -49,8 +49,6 @@ uint clock_report_idle(void);
void clock_leaving_poll(int timeout, int interrupted); void clock_leaving_poll(int timeout, int interrupted);
void clock_entering_poll(void); void clock_entering_poll(void);
void clock_adjust_now_offset(void); void clock_adjust_now_offset(void);
void clock_set_now_offset(llong ofs);
llong clock_get_now_offset(void);
static inline void clock_update_date(int max_wait, int interrupted) static inline void clock_update_date(int max_wait, int interrupted)
{ {

View file

@ -94,21 +94,11 @@ typedef struct { } empty_t;
# endif # endif
#endif #endif
/* unsafe ones for use with constant macros needed in initializers */
#ifndef _MIN
#define _MIN(a, b) ((a < b) ? a : b)
#endif
#ifndef _MAX
#define _MAX(a, b) ((a > b) ? a : b)
#endif
/* safe versions for use anywhere except in initializers */
#ifndef MIN #ifndef MIN
#define MIN(a, b) ({ \ #define MIN(a, b) ({ \
typeof(a) _a = (a); \ typeof(a) _a = (a); \
typeof(a) _b = (b); \ typeof(a) _b = (b); \
_MIN(_a, _b); \ ((_a < _b) ? _a : _b); \
}) })
#endif #endif
@ -116,15 +106,10 @@ typedef struct { } empty_t;
#define MAX(a, b) ({ \ #define MAX(a, b) ({ \
typeof(a) _a = (a); \ typeof(a) _a = (a); \
typeof(a) _b = (b); \ typeof(a) _b = (b); \
_MAX(_a, _b); \ ((_a > _b) ? _a : _b); \
}) })
#endif #endif
/* always set a _POSIX_VERSION if there isn't any, in order to ease compares */
#ifndef _POSIX_VERSION
# define _POSIX_VERSION 0
#endif
/* this is for libc5 for example */ /* this is for libc5 for example */
#ifndef TCP_NODELAY #ifndef TCP_NODELAY
#define TCP_NODELAY 1 #define TCP_NODELAY 1

View file

@ -31,23 +31,6 @@
#include <stdlib.h> #include <stdlib.h>
#endif #endif
/* DEFVAL() returns either the second argument as-is, or <def> if absent. This
* is for use in macros arguments.
*/
#define DEFVAL(_def,...) _FIRST_ARG(NULL, ##__VA_ARGS__, (_def))
/* DEFNULL() returns either the argument as-is, or NULL if absent. This is for
* use in macros arguments.
*/
#define DEFNULL(...) DEFVAL(NULL, ##__VA_ARGS__)
/* DEFZERO() returns either the argument as-is, or 0 if absent. This is for
* use in macros arguments.
*/
#define DEFZERO(...) DEFVAL(0, ##__VA_ARGS__)
#define _FIRST_ARG(a, b, ...) b
/* /*
* Gcc before 3.0 needs [0] to declare a variable-size array * Gcc before 3.0 needs [0] to declare a variable-size array
*/ */
@ -72,24 +55,6 @@
#define ___equals_1(x) ____equals_1(comma_for_one ## x 1) #define ___equals_1(x) ____equals_1(comma_for_one ## x 1)
#define __equals_1(x) ___equals_1(x) #define __equals_1(x) ___equals_1(x)
/* same but checks if defined as zero, useful to distinguish between -DFOO and
* -DFOO=0.
*/
#define comma_for_zero0 ,
#define _____equals_0(x, y, ...) (y)
#define ____equals_0(x, ...) _____equals_0(x, 0)
#define ___equals_0(x) ____equals_0(comma_for_zero ## x 1)
#define __equals_0(x) ___equals_0(x)
/* same but checks if defined as empty, useful to distinguish between -DFOO= and
* -DFOO=anything.
*/
#define comma_for_empty ,
#define _____def_as_empty(x, y, ...) (y)
#define ____def_as_empty(x, ...) _____def_as_empty(x, 0)
#define ___def_as_empty(x) ____def_as_empty(comma_for_empty ## x 1)
#define __def_as_empty(x) ___def_as_empty(x)
/* gcc 5 and clang 3 brought __has_attribute(), which is not well documented in /* gcc 5 and clang 3 brought __has_attribute(), which is not well documented in
* the case of gcc, but is convenient since handled at the preprocessor level. * the case of gcc, but is convenient since handled at the preprocessor level.
* In both cases it's possible to test for __has_attribute() using ifdef. When * In both cases it's possible to test for __has_attribute() using ifdef. When
@ -318,11 +283,6 @@
#define _TOSTR(x) #x #define _TOSTR(x) #x
#define TOSTR(x) _TOSTR(x) #define TOSTR(x) _TOSTR(x)
/* concatenates the two strings after resolving possible macros */
#undef CONCAT // Turns out NetBSD defines it to the same in exec_elf.h
#define _CONCAT(a,b) a ## b
#define CONCAT(a,b) _CONCAT(a,b)
/* /*
* Gcc >= 3 provides the ability for the program to give hints to the * Gcc >= 3 provides the ability for the program to give hints to the
* compiler about what branch of an if is most likely to be taken. This * compiler about what branch of an if is most likely to be taken. This
@ -367,7 +327,7 @@
* <type> which has its member <name> stored at address <ptr>. * <type> which has its member <name> stored at address <ptr>.
*/ */
#ifndef container_of #ifndef container_of
#define container_of(ptr, type, name) ((type *)(((char *)(ptr)) - offsetof(type, name))) #define container_of(ptr, type, name) ((type *)(((void *)(ptr)) - ((long)&((type *)0)->name)))
#endif #endif
/* returns a pointer to the structure of type <type> which has its member <name> /* returns a pointer to the structure of type <type> which has its member <name>
@ -376,7 +336,7 @@
#ifndef container_of_safe #ifndef container_of_safe
#define container_of_safe(ptr, type, name) \ #define container_of_safe(ptr, type, name) \
({ void *__p = (ptr); \ ({ void *__p = (ptr); \
__p ? (type *)((char *)__p - offsetof(type, name)) : (type *)0; \ __p ? (type *)(__p - ((long)&((type *)0)->name)) : (type *)0; \
}) })
#endif #endif
@ -432,13 +392,6 @@
* for multi_threading, see THREAD_PAD() below. * * for multi_threading, see THREAD_PAD() below. *
\*****************************************************************************/ \*****************************************************************************/
/* Cache line size for alignment purposes. This value is incorrect for some
* Apple CPUs which have 128 bytes cache lines.
*/
#ifndef CACHELINE_SIZE
#define CACHELINE_SIZE 64
#endif
/* sets alignment for current field or variable */ /* sets alignment for current field or variable */
#ifndef ALIGNED #ifndef ALIGNED
#define ALIGNED(x) __attribute__((aligned(x))) #define ALIGNED(x) __attribute__((aligned(x)))
@ -462,12 +415,12 @@
#endif #endif
#endif #endif
/* Sets alignment for current field or variable only when threads are enabled. /* sets alignment for current field or variable only when threads are enabled.
* When no parameters are provided, we align to the cache line size. * Typically used to respect cache line alignment to avoid false sharing.
*/ */
#ifndef THREAD_ALIGNED #ifndef THREAD_ALIGNED
#ifdef USE_THREAD #ifdef USE_THREAD
#define THREAD_ALIGNED(...) ALIGNED(DEFVAL(CACHELINE_SIZE, ##__VA_ARGS__)) #define THREAD_ALIGNED(x) __attribute__((aligned(x)))
#else #else
#define THREAD_ALIGNED(x) #define THREAD_ALIGNED(x)
#endif #endif
@ -500,44 +453,32 @@
#endif #endif
#endif #endif
/* Add an optional alignment for next fields in a structure, only when threads /* add an optional alignment for next fields in a structure, only when threads
* are enabled. When no parameters are provided, we align to the cache line size. * are enabled. Typically used to respect cache line alignment to avoid false
* sharing.
*/ */
#ifndef THREAD_ALIGN #ifndef THREAD_ALIGN
#ifdef USE_THREAD #ifdef USE_THREAD
#define THREAD_ALIGN(...) union { } ALIGNED(DEFVAL(CACHELINE_SIZE, ##__VA_ARGS__)) #define THREAD_ALIGN(x) union { } ALIGNED(x)
#else #else
#define THREAD_ALIGN(x) #define THREAD_ALIGN(x)
#endif #endif
#endif #endif
/* add padding of the specified size */
#define _PAD(x,l) char __pad_##l[x]
/* add optional padding of the specified size between fields in a structure, /* add optional padding of the specified size between fields in a structure,
* only when threads are enabled. This is used to avoid false sharing of cache * only when threads are enabled. This is used to avoid false sharing of cache
* lines for dynamically allocated structures which cannot guarantee alignment. * lines for dynamically allocated structures which cannot guarantee alignment.
*/ */
#ifndef THREAD_PAD #ifndef THREAD_PAD
# ifdef USE_THREAD # ifdef USE_THREAD
# define _THREAD_PAD(x,l) _PAD(x, l) # define __THREAD_PAD(x,l) char __pad_##l[x]
# define _THREAD_PAD(x,l) __THREAD_PAD(x, l)
# define THREAD_PAD(x) _THREAD_PAD(x, __LINE__) # define THREAD_PAD(x) _THREAD_PAD(x, __LINE__)
# else # else
# define THREAD_PAD(x) # define THREAD_PAD(x)
# endif # endif
#endif #endif
/* add mandatory padding of the specified size between fields in a structure,
* This is used to avoid false sharing of cache lines for dynamically allocated
* structures which cannot guarantee alignment, or to ensure that the size of
* the struct remains consistent on architectures with different alignment
* constraints
*/
#ifndef ALWAYS_PAD
# define _ALWAYS_PAD(x,l) _PAD(x, l)
# define ALWAYS_PAD(x) _ALWAYS_PAD(x, __LINE__)
#endif
/* The THREAD_LOCAL type attribute defines thread-local storage and is defined /* The THREAD_LOCAL type attribute defines thread-local storage and is defined
* to __thread when threads are enabled or empty when disabled. * to __thread when threads are enabled or empty when disabled.
*/ */
@ -558,18 +499,6 @@
#define __decl_thread(decl) #define __decl_thread(decl)
#endif #endif
/* The __decl_thread_var() statement declares a variable when threads are enabled
* or replaces it with an dummy statement to avoid placing a lone semi-colon. The
* purpose is to condition the presence of some variables or to the fact that
* threads are enabled, without having to enclose them inside an ugly
* #ifdef USE_THREAD/#endif clause.
*/
#ifdef USE_THREAD
#define __decl_thread_var(decl) decl
#else
#define __decl_thread_var(decl) enum { CONCAT(_dummy_var_decl_,__LINE__), }
#endif
/* clang has a __has_feature() macro which reports true/false on a number of /* clang has a __has_feature() macro which reports true/false on a number of
* internally supported features. Let's make sure this macro is always defined * internally supported features. Let's make sure this macro is always defined
* and returns zero when not supported. * and returns zero when not supported.
@ -578,15 +507,4 @@
#define __has_feature(x) 0 #define __has_feature(x) 0
#endif #endif
/* gcc 15 throws warning if fixed-size char array does not contain a terminating
* NUL. gcc has an attribute 'nonstring', which allows to suppress this warning
* for such array declarations. But it's not the case for clang and other
* compilers.
*/
#if __has_attribute(nonstring)
#define __nonstring __attribute__ ((nonstring))
#else
#define __nonstring
#endif
#endif /* _HAPROXY_COMPILER_H */ #endif /* _HAPROXY_COMPILER_H */

View file

@ -28,7 +28,7 @@
#include <netinet/ip.h> #include <netinet/ip.h>
#include <netinet/ip6.h> #include <netinet/ip6.h>
#include <import/cebtree.h> #include <import/ebtree-t.h>
#include <import/ist.h> #include <import/ist.h>
#include <haproxy/api-t.h> #include <haproxy/api-t.h>
@ -68,50 +68,6 @@ struct ssl_sock_ctx;
* conn_cond_update_polling(). * conn_cond_update_polling().
*/ */
/* A bit of explanation is required for backend connection reuse. A connection
* may be shared between multiple streams of the same thread (e.g. h2, fcgi,
* quic) and may be reused by subsequent streams of a different thread if it
* is totally idle (i.e. not used at all). In order to permit other streams
* to find a connection, it has to appear in lists and/or trees that reflect
* its current state. If the connection is full and cannot be shared anymore,
* it is not in any of such places. The various states are the following:
*
* - private: a private connection is not visible to other threads. It is
* attached via its <idle_list> member to the <conn_list> head of a
* sess_priv_conns struct specific to the server, itself attached to the
* session. Only other streams of the same session may find this connection.
* Such connections include totally idle connections as well as connections
* with available slots left. The <hash_node> part is still used to store
* the hash key but the tree node part is otherwise left unused.
*
* - avail: an available connection is a connection that has at least one
* stream in use and at least one slot available for a new stream. Such a
* connection is indexed in the server's <avail_conns> member based on the
* key of the hash_node. It cannot be used by other threads, and is not
* present in the server's <idle_conn_list>, so its <idle_list> member is
* always empty. Since this connection is in use by a single thread and
* cannot be taken over, it doesn't require any locking to enter/leave the
* tree.
*
* - safe: a safe connection is an idle connection that has proven that it
* could reliably be reused. Such a connection may be taken over at any
* instant by other threads, and must only be manipulated under the server's
* <idle_lock>. It is indexed in the server's <safe_conns> member based on
* the key of the hash_node. It is attached to the server's <idle_conn_list>
* via its <idle_list> member. It may be purged after too long inactivity,
* though the thread responsible for doing this will first take it over. Such
* a connection has (conn->flags & CO_FL_LIST_MASK) = CO_FL_SAFE_LIST.
*
* - idle: a purely idle connection has not yet proven that it could reliably
* be reused. Such a connection may be taken over at any instant by other
* threads, and must only be manipulated under the server's <idle_lock>. It
* is indexed in the server's <idle_conns> member based on the key of the
* hash_node. It is attached to the server's <idle_conn_list> via its
* <idle_list> member. It may be purged after too long inactivity, though the
* thread responsible for doing this will first take it over. Such a
* connection has (conn->flags & CO_FL_LIST_MASK) = CO_FL_IDLE_LIST.
*/
/* flags for use in connection->flags. Please also update the conn_show_flags() /* flags for use in connection->flags. Please also update the conn_show_flags()
* function below in case of changes. * function below in case of changes.
*/ */
@ -144,8 +100,9 @@ enum {
*/ */
CO_FL_WAIT_ROOM = 0x00000800, /* data sink is full */ CO_FL_WAIT_ROOM = 0x00000800, /* data sink is full */
CO_FL_WANT_SPLICING = 0x00001000, /* we wish to use splicing on the connection when possible */ /* These flags are used to report whether the from/to addresses are set or not */
CO_FL_SSL_NO_CACHED_INFO = 0x00002000, /* Don't use any cached information when creating a new SSL connection */ /* unused: 0x00001000 */
/* unused: 0x00002000 */
CO_FL_EARLY_SSL_HS = 0x00004000, /* We have early data pending, don't start SSL handshake yet */ CO_FL_EARLY_SSL_HS = 0x00004000, /* We have early data pending, don't start SSL handshake yet */
CO_FL_EARLY_DATA = 0x00008000, /* At least some of the data are early data */ CO_FL_EARLY_DATA = 0x00008000, /* At least some of the data are early data */
@ -212,13 +169,13 @@ static forceinline char *conn_show_flags(char *buf, size_t len, const char *deli
/* flags */ /* flags */
_(CO_FL_SAFE_LIST, _(CO_FL_IDLE_LIST, _(CO_FL_CTRL_READY, _(CO_FL_SAFE_LIST, _(CO_FL_IDLE_LIST, _(CO_FL_CTRL_READY,
_(CO_FL_REVERSED, _(CO_FL_ACT_REVERSING, _(CO_FL_OPT_MARK, _(CO_FL_OPT_TOS, _(CO_FL_REVERSED, _(CO_FL_ACT_REVERSING, _(CO_FL_OPT_MARK, _(CO_FL_OPT_TOS,
_(CO_FL_XPRT_READY, _(CO_FL_WANT_DRAIN, _(CO_FL_WAIT_ROOM, _(CO_FL_SSL_NO_CACHED_INFO, _(CO_FL_EARLY_SSL_HS, _(CO_FL_XPRT_READY, _(CO_FL_WANT_DRAIN, _(CO_FL_WAIT_ROOM, _(CO_FL_EARLY_SSL_HS,
_(CO_FL_EARLY_DATA, _(CO_FL_SOCKS4_SEND, _(CO_FL_SOCKS4_RECV, _(CO_FL_SOCK_RD_SH, _(CO_FL_EARLY_DATA, _(CO_FL_SOCKS4_SEND, _(CO_FL_SOCKS4_RECV, _(CO_FL_SOCK_RD_SH,
_(CO_FL_SOCK_WR_SH, _(CO_FL_ERROR, _(CO_FL_FDLESS, _(CO_FL_WAIT_L4_CONN, _(CO_FL_SOCK_WR_SH, _(CO_FL_ERROR, _(CO_FL_FDLESS, _(CO_FL_WAIT_L4_CONN,
_(CO_FL_WAIT_L6_CONN, _(CO_FL_SEND_PROXY, _(CO_FL_ACCEPT_PROXY, _(CO_FL_ACCEPT_CIP, _(CO_FL_WAIT_L6_CONN, _(CO_FL_SEND_PROXY, _(CO_FL_ACCEPT_PROXY, _(CO_FL_ACCEPT_CIP,
_(CO_FL_SSL_WAIT_HS, _(CO_FL_PRIVATE, _(CO_FL_RCVD_PROXY, _(CO_FL_SESS_IDLE, _(CO_FL_SSL_WAIT_HS, _(CO_FL_PRIVATE, _(CO_FL_RCVD_PROXY, _(CO_FL_SESS_IDLE,
_(CO_FL_XPRT_TRACKED _(CO_FL_XPRT_TRACKED
))))))))))))))))))))))))))))); ))))))))))))))))))))))))))));
/* epilogue */ /* epilogue */
_(~0U); _(~0U);
return buf; return buf;
@ -329,7 +286,6 @@ enum {
CO_RFL_KEEP_RECV = 0x0008, /* Instruct the mux to still wait for read events */ CO_RFL_KEEP_RECV = 0x0008, /* Instruct the mux to still wait for read events */
CO_RFL_BUF_NOT_STUCK = 0x0010, /* Buffer is not stuck. Optims are possible during data copy */ CO_RFL_BUF_NOT_STUCK = 0x0010, /* Buffer is not stuck. Optims are possible during data copy */
CO_RFL_MAY_SPLICE = 0x0020, /* The producer can use the kernel splicing */ CO_RFL_MAY_SPLICE = 0x0020, /* The producer can use the kernel splicing */
CO_RFL_TRY_HARDER = 0x0040, /* Try to read till READ0 even on short reads */
}; };
/* flags that can be passed to xprt->snd_buf() and mux->snd_buf() */ /* flags that can be passed to xprt->snd_buf() and mux->snd_buf() */
@ -376,7 +332,7 @@ enum proto_proxy_side {
/* ctl command used by mux->ctl() */ /* ctl command used by mux->ctl() */
enum mux_ctl_type { enum mux_ctl_type {
MUX_CTL_STATUS, /* Expects an int as output, sets it to a combination of MUX_CTL_STATUS flags */ MUX_CTL_STATUS, /* Expects an int as output, sets it to a combinaison of MUX_CTL_STATUS flags */
MUX_CTL_EXIT_STATUS, /* Expects an int as output, sets the mux exist/error/http status, if known or 0 */ MUX_CTL_EXIT_STATUS, /* Expects an int as output, sets the mux exist/error/http status, if known or 0 */
MUX_CTL_REVERSE_CONN, /* Notify about an active reverse connection accepted. */ MUX_CTL_REVERSE_CONN, /* Notify about an active reverse connection accepted. */
MUX_CTL_SUBS_RECV, /* Notify the mux it must wait for read events again */ MUX_CTL_SUBS_RECV, /* Notify the mux it must wait for read events again */
@ -433,24 +389,14 @@ union conn_handle {
int fd; /* file descriptor, for regular sockets (CO_FL_FDLESS=0) */ int fd; /* file descriptor, for regular sockets (CO_FL_FDLESS=0) */
}; };
enum xprt_capabilities {
XPRT_CAN_SPLICE,
};
enum xprt_splice_cap {
XPRT_CONN_CAN_NOT_SPLICE, /* This connection can't, and won't ever be able to splice */
XPRT_CONN_COULD_SPLICE, /* This connection can't splice, but may later */
XPRT_CONN_CAN_SPLICE /* This connection can splice */
};
/* xprt_ops describes transport-layer operations for a connection. They /* xprt_ops describes transport-layer operations for a connection. They
* generally run over a socket-based control layer, but not always. Some * generally run over a socket-based control layer, but not always. Some
* of them are used for data transfer with the upper layer (rcv_*, snd_*) * of them are used for data transfer with the upper layer (rcv_*, snd_*)
* and the other ones are used to setup and release the transport layer. * and the other ones are used to setup and release the transport layer.
*/ */
struct xprt_ops { struct xprt_ops {
size_t (*rcv_buf)(struct connection *conn, void *xprt_ctx, struct buffer *buf, size_t count, void *msg_control, size_t *msg_controllen, int flags); /* recv callback */ size_t (*rcv_buf)(struct connection *conn, void *xprt_ctx, struct buffer *buf, size_t count, int flags); /* recv callback */
size_t (*snd_buf)(struct connection *conn, void *xprt_ctx, const struct buffer *buf, size_t count, void *msg_control, size_t msg_controllen, int flags); /* send callback */ size_t (*snd_buf)(struct connection *conn, void *xprt_ctx, const struct buffer *buf, size_t count, int flags); /* send callback */
int (*rcv_pipe)(struct connection *conn, void *xprt_ctx, struct pipe *pipe, unsigned int count); /* recv-to-pipe callback */ int (*rcv_pipe)(struct connection *conn, void *xprt_ctx, struct pipe *pipe, unsigned int count); /* recv-to-pipe callback */
int (*snd_pipe)(struct connection *conn, void *xprt_ctx, struct pipe *pipe, unsigned int count); /* send-to-pipe callback */ int (*snd_pipe)(struct connection *conn, void *xprt_ctx, struct pipe *pipe, unsigned int count); /* send-to-pipe callback */
void (*shutr)(struct connection *conn, void *xprt_ctx, int); /* shutr function */ void (*shutr)(struct connection *conn, void *xprt_ctx, int); /* shutr function */
@ -474,12 +420,6 @@ struct xprt_ops {
struct ssl_sock_ctx *(*get_ssl_sock_ctx)(struct connection *); /* retrieve the ssl_sock_ctx in use, or NULL if none */ struct ssl_sock_ctx *(*get_ssl_sock_ctx)(struct connection *); /* retrieve the ssl_sock_ctx in use, or NULL if none */
int (*show_fd)(struct buffer *, const struct connection *, const void *ctx); /* append some data about xprt for "show fd"; returns non-zero if suspicious */ int (*show_fd)(struct buffer *, const struct connection *, const void *ctx); /* append some data about xprt for "show fd"; returns non-zero if suspicious */
void (*dump_info)(struct buffer *, const struct connection *); void (*dump_info)(struct buffer *, const struct connection *);
/*
* Returns the value for various capabilities.
* Returns 0 if the capability is known, with the actual value in arg,
* or -1 otherwise
*/
int (*get_capability)(struct connection *connection, void *xprt_ctx, enum xprt_capabilities, void *arg);
}; };
/* mux_ops describes the mux operations, which are to be performed at the /* mux_ops describes the mux operations, which are to be performed at the
@ -509,6 +449,8 @@ struct mux_ops {
int (*unsubscribe)(struct stconn *sc, int event_type, struct wait_event *es); /* Unsubscribe <es> from events */ int (*unsubscribe)(struct stconn *sc, int event_type, struct wait_event *es); /* Unsubscribe <es> from events */
int (*sctl)(struct stconn *sc, enum mux_sctl_type mux_sctl, void *arg); /* Provides information about the mux stream */ int (*sctl)(struct stconn *sc, enum mux_sctl_type mux_sctl, void *arg); /* Provides information about the mux stream */
int (*avail_streams)(struct connection *conn); /* Returns the number of streams still available for a connection */ int (*avail_streams)(struct connection *conn); /* Returns the number of streams still available for a connection */
int (*avail_streams_bidi)(struct connection *conn); /* Returns the number of bidirectional streams still available for a connection */
int (*avail_streams_uni)(struct connection *conn); /* Returns the number of unidirectional streams still available for a connection */
int (*used_streams)(struct connection *conn); /* Returns the number of streams in use on a connection. */ int (*used_streams)(struct connection *conn); /* Returns the number of streams in use on a connection. */
void (*destroy)(void *ctx); /* Let the mux know one of its users left, so it may have to disappear */ void (*destroy)(void *ctx); /* Let the mux know one of its users left, so it may have to disappear */
int (*ctl)(struct connection *conn, enum mux_ctl_type mux_ctl, void *arg); /* Provides information about the mux connection */ int (*ctl)(struct connection *conn, enum mux_ctl_type mux_ctl, void *arg); /* Provides information about the mux connection */
@ -567,7 +509,7 @@ enum conn_hash_params_t {
#define CONN_HASH_PARAMS_TYPE_COUNT 7 #define CONN_HASH_PARAMS_TYPE_COUNT 7
#define CONN_HASH_PAYLOAD_LEN \ #define CONN_HASH_PAYLOAD_LEN \
(((sizeof(((struct conn_hash_node *)0)->key)) * 8) - CONN_HASH_PARAMS_TYPE_COUNT) (((sizeof(((struct conn_hash_node *)0)->node.key)) * 8) - CONN_HASH_PARAMS_TYPE_COUNT)
#define CONN_HASH_GET_PAYLOAD(hash) \ #define CONN_HASH_GET_PAYLOAD(hash) \
(((hash) << CONN_HASH_PARAMS_TYPE_COUNT) >> CONN_HASH_PARAMS_TYPE_COUNT) (((hash) << CONN_HASH_PARAMS_TYPE_COUNT) >> CONN_HASH_PARAMS_TYPE_COUNT)
@ -599,14 +541,6 @@ struct conn_tlv_list {
} __attribute__((packed)); } __attribute__((packed));
/* node for backend connection in the idle trees for http-reuse
* A connection is identified by a hash generated from its specific parameters
*/
struct conn_hash_node {
struct ceb_node node; /* indexes the hashing key for safe/idle/avail */
uint64_t key; /* the hashing key, also used by session-owned */
};
/* This structure describes a connection with its methods and data. /* This structure describes a connection with its methods and data.
* A connection may be performed to proxy or server via a local or remote * A connection may be performed to proxy or server via a local or remote
* socket, and can also be made to an internal applet. It can support * socket, and can also be made to an internal applet. It can support
@ -631,14 +565,12 @@ struct connection {
/* second cache line */ /* second cache line */
struct wait_event *subs; /* Task to wake when awaited events are ready */ struct wait_event *subs; /* Task to wake when awaited events are ready */
union { union {
/* Backend connections only */ struct list idle_list; /* list element for idle connection in server idle list */
struct { struct mt_list toremove_list; /* list element when idle connection is ready to be purged */
struct mt_list toremove_list; /* list element when idle connection is ready to be purged */ };
struct list idle_list; /* list element for idle connection in server idle list */ union {
struct list sess_el; /* used by private connections, list elem into session */ struct list sess_el; /* used by private backend conns, list elem into session */
}; struct list stopping_list; /* used by frontend conns, attach point in mux stopping list */
/* Frontend connections only */
struct list stopping_list; /* attach point in mux stopping list */
}; };
union conn_handle handle; /* connection handle at the socket layer */ union conn_handle handle; /* connection handle at the socket layer */
const struct netns_entry *proxy_netns; const struct netns_entry *proxy_netns;
@ -652,7 +584,7 @@ struct connection {
/* used to identify a backend connection for http-reuse, /* used to identify a backend connection for http-reuse,
* thus only present if conn.target is of type OBJ_TYPE_SERVER * thus only present if conn.target is of type OBJ_TYPE_SERVER
*/ */
struct conn_hash_node hash_node; struct conn_hash_node *hash_node;
/* Members used if connection must be reversed. */ /* Members used if connection must be reversed. */
struct { struct {
@ -660,18 +592,24 @@ struct connection {
struct buffer name; /* Only used for passive reverse. Used as SNI when connection added to server idle pool. */ struct buffer name; /* Only used for passive reverse. Used as SNI when connection added to server idle pool. */
} reverse; } reverse;
uint64_t sni_hash; /* Hash of the SNI. Used to cache the TLS session and try to reuse it. set to 0 is there is no SNI */
uint32_t term_evts_log; /* Termination events log: first 4 events reported from fd, handshake or xprt */ uint32_t term_evts_log; /* Termination events log: first 4 events reported from fd, handshake or xprt */
uint32_t mark; /* set network mark, if CO_FL_OPT_MARK is set */ uint32_t mark; /* set network mark, if CO_FL_OPT_MARK is set */
uint8_t tos; /* set ip tos, if CO_FL_OPT_TOS is set */ uint8_t tos; /* set ip tos, if CO_FL_OPT_TOS is set */
}; };
/* node for backend connection in the idle trees for http-reuse
* A connection is identified by a hash generated from its specific parameters
*/
struct conn_hash_node {
struct eb64_node node; /* contains the hashing key */
struct connection *conn; /* connection owner of the node */
};
struct mux_proto_list { struct mux_proto_list {
const struct ist token; /* token name and length. Empty is catch-all */ const struct ist token; /* token name and length. Empty is catch-all */
enum proto_proxy_mode mode; enum proto_proxy_mode mode;
enum proto_proxy_side side; enum proto_proxy_side side;
const struct mux_ops *mux; const struct mux_ops *mux;
const char *alpn; /* Default alpn to set by default when the mux protocol is forced (optional, in binary form) */
struct list list; struct list list;
}; };
@ -795,7 +733,7 @@ struct idle_conns {
struct mt_list toremove_conns; struct mt_list toremove_conns;
struct task *cleanup_task; struct task *cleanup_task;
__decl_thread(HA_SPINLOCK_T idle_conns_lock); __decl_thread(HA_SPINLOCK_T idle_conns_lock);
} THREAD_ALIGNED(); } THREAD_ALIGNED(64);
/* Termination events logs: /* Termination events logs:

View file

@ -39,6 +39,7 @@
#include <haproxy/task-t.h> #include <haproxy/task-t.h>
extern struct pool_head *pool_head_connection; extern struct pool_head *pool_head_connection;
extern struct pool_head *pool_head_conn_hash_node;
extern struct pool_head *pool_head_sockaddr; extern struct pool_head *pool_head_sockaddr;
extern struct pool_head *pool_head_pp_tlv_128; extern struct pool_head *pool_head_pp_tlv_128;
extern struct pool_head *pool_head_pp_tlv_256; extern struct pool_head *pool_head_pp_tlv_256;
@ -74,7 +75,7 @@ int conn_send_socks4_proxy_request(struct connection *conn);
int conn_recv_socks4_proxy_response(struct connection *conn); int conn_recv_socks4_proxy_response(struct connection *conn);
/* If we delayed the mux creation because we were waiting for the handshake, do it now */ /* If we delayed the mux creation because we were waiting for the handshake, do it now */
int conn_create_mux(struct connection *conn, int *closed_connection); int conn_create_mux(struct connection *conn);
int conn_notify_mux(struct connection *conn, int old_flags, int forced_wake); int conn_notify_mux(struct connection *conn, int old_flags, int forced_wake);
int conn_upgrade_mux_fe(struct connection *conn, void *ctx, struct buffer *buf, int conn_upgrade_mux_fe(struct connection *conn, void *ctx, struct buffer *buf,
struct ist mux_proto, int mode); struct ist mux_proto, int mode);
@ -83,13 +84,14 @@ int conn_install_mux_be(struct connection *conn, void *ctx, struct session *sess
const struct mux_ops *force_mux_ops); const struct mux_ops *force_mux_ops);
int conn_install_mux_chk(struct connection *conn, void *ctx, struct session *sess); int conn_install_mux_chk(struct connection *conn, void *ctx, struct session *sess);
void conn_delete_from_tree(struct connection *conn, int thr); void conn_delete_from_tree(struct connection *conn);
void conn_init(struct connection *conn, void *target); void conn_init(struct connection *conn, void *target);
struct connection *conn_new(void *target); struct connection *conn_new(void *target);
void conn_free(struct connection *conn); void conn_free(struct connection *conn);
void conn_release(struct connection *conn); void conn_release(struct connection *conn);
void conn_set_errno(struct connection *conn, int err); void conn_set_errno(struct connection *conn, int err);
struct conn_hash_node *conn_alloc_hash_node(struct connection *conn);
struct sockaddr_storage *sockaddr_alloc(struct sockaddr_storage **sap, const struct sockaddr_storage *orig, socklen_t len); struct sockaddr_storage *sockaddr_alloc(struct sockaddr_storage **sap, const struct sockaddr_storage *orig, socklen_t len);
void sockaddr_free(struct sockaddr_storage **sap); void sockaddr_free(struct sockaddr_storage **sap);
@ -706,19 +708,6 @@ static inline void conn_set_reverse(struct connection *conn, enum obj_type *targ
conn->reverse.target = target; conn->reverse.target = target;
} }
/* Returns idle-ping value for <conn> depending on its proxy side. */
static inline int conn_idle_ping(const struct connection *conn)
{
if (conn_is_back(conn)) {
struct server *srv = objt_server(conn->target);
return srv ? srv->idle_ping : TICK_ETERNITY;
}
else {
struct session *sess = conn->owner;
return sess->listener->bind_conf->idle_ping;
}
}
/* Returns the listener instance for connection used for active reverse. */ /* Returns the listener instance for connection used for active reverse. */
static inline struct listener *conn_active_reverse_listener(const struct connection *conn) static inline struct listener *conn_active_reverse_listener(const struct connection *conn)
{ {
@ -795,7 +784,7 @@ static inline const char *tevt_evts2str(uint32_t evts)
if (!evt) if (!evt)
continue; continue;
/* Backend location are displayed in capital letter */ /* Backend location are displayed in captial letter */
is_back = !!((evt >> 4) & 0x8); is_back = !!((evt >> 4) & 0x8);
switch ((enum term_event_loc)((evt >> 4) & ~0x8)) { switch ((enum term_event_loc)((evt >> 4) & ~0x8)) {
case tevt_loc_fd: tevt_evts_str[idx++] = (is_back ? 'F' : 'f'); break; case tevt_loc_fd: tevt_evts_str[idx++] = (is_back ? 'F' : 'f'); break;

View file

@ -25,164 +25,108 @@
#include <haproxy/freq_ctr-t.h> #include <haproxy/freq_ctr-t.h>
#define COUNTERS_SHARED_F_NONE 0x0000
#define COUNTERS_SHARED_F_LOCAL 0x0001 // shared counter struct is actually process-local
// common to fe_counters_shared and be_counters_shared
#define COUNTERS_SHARED \
struct { \
uint16_t flags; /* COUNTERS_SHARED_F flags */\
};
/* /!\ any change performed here will impact shm-stats-file mapping because the
* struct is embedded in shm_stats_file_object struct, so proceed with caution
* and change shm stats file version if needed. Also please always keep this
* struct 64b-aligned.
*/
#define COUNTERS_SHARED_TG \
struct { \
long long srv_aborts; /* aborted responses during DATA phase caused by the server */\
long long cli_aborts; /* aborted responses during DATA phase caused by the client */\
long long internal_errors; /* internal processing errors */\
long long failed_rewrites; /* failed rewrites (warning) */\
long long req_in; /* number of bytes received from the client */\
long long req_out; /* number of bytes sent to the server */\
long long res_in; /* number of bytes received from the server */\
long long res_out; /* number of bytes sent to the client */\
long long denied_resp; /* blocked responses because of security concerns */\
long long denied_req; /* blocked requests because of security concerns */\
long long cum_sess; /* cumulated number of accepted connections */\
/* compression counters, index 0 for requests, 1 for responses */\
long long comp_in[2]; /* input bytes fed to the compressor */\
long long comp_out[2]; /* output bytes emitted by the compressor */\
long long comp_byp[2]; /* input bytes that bypassed the compressor (cpu/ram/bw limitation) */\
struct freq_ctr sess_per_sec; /* sessions per second on this server (3x32b) */\
unsigned int last_state_change; /* last time, when the state was changed (32b) */\
/* we're still 64b-aligned here */ \
}
// for convenience (generic pointer)
struct counters_shared {
COUNTERS_SHARED;
struct {
COUNTERS_SHARED_TG;
} **tg;
};
/*
* /!\ any change performed here will impact shm-stats-file mapping because the
* struct is embedded in shm_stats_file_object struct, so proceed with caution
* and change shm stats file version if needed
*/
struct fe_counters_shared_tg {
COUNTERS_SHARED_TG;
long long denied_sess; /* denied session requests (tcp-req-sess rules) */
long long denied_conn; /* denied connection requests (tcp-req-conn rules) */
long long intercepted_req; /* number of monitoring or stats requests intercepted by the frontend */
long long cum_conn; /* cumulated number of received connections */
struct freq_ctr conn_per_sec; /* received connections per second on the frontend */
struct freq_ctr req_per_sec; /* HTTP requests per second on the frontend */
long long cum_sess_ver[3]; /* cumulated number of h1/h2/h3 sessions */
union {
struct {
long long cum_req[4]; /* cumulated number of processed other/h1/h2/h3 requests */
long long cache_hits; /* cache hits */
long long cache_lookups;/* cache lookups */
long long comp_rsp; /* number of compressed responses */
long long rsp[6]; /* http response codes */
} http;
} p; /* protocol-specific stats */
long long failed_req; /* failed requests (eg: invalid or timeout) */
} ALIGNED(8);
struct fe_counters_shared {
COUNTERS_SHARED;
struct fe_counters_shared_tg **tg;
};
/* counters used by listeners and frontends */ /* counters used by listeners and frontends */
struct fe_counters { struct fe_counters {
struct fe_counters_shared shared; /* shared counters */
unsigned int conn_max; /* max # of active sessions */ unsigned int conn_max; /* max # of active sessions */
long long cum_conn; /* cumulated number of received connections */
long long cum_sess; /* cumulated number of accepted connections */
long long cum_sess_ver[3]; /* cumulated number of h1/h2/h3 sessions */
unsigned int cps_max; /* maximum of new connections received per second */ unsigned int cps_max; /* maximum of new connections received per second */
unsigned int sps_max; /* maximum of new connections accepted per second (sessions) */ unsigned int sps_max; /* maximum of new connections accepted per second (sessions) */
struct freq_ctr _sess_per_sec; /* sessions per second on this frontend, used to compute sps_max (internal use only) */
struct freq_ctr _conn_per_sec; /* connections per second on this frontend, used to compute cps_max (internal use only) */ long long bytes_in; /* number of bytes transferred from the client to the server */
long long bytes_out; /* number of bytes transferred from the server to the client */
/* compression counters, index 0 for requests, 1 for responses */
long long comp_in[2]; /* input bytes fed to the compressor */
long long comp_out[2]; /* output bytes emitted by the compressor */
long long comp_byp[2]; /* input bytes that bypassed the compressor (cpu/ram/bw limitation) */
long long denied_req; /* blocked requests because of security concerns */
long long denied_resp; /* blocked responses because of security concerns */
long long failed_req; /* failed requests (eg: invalid or timeout) */
long long denied_conn; /* denied connection requests (tcp-req-conn rules) */
long long denied_sess; /* denied session requests (tcp-req-sess rules) */
long long failed_rewrites; /* failed rewrites (warning) */
long long internal_errors; /* internal processing errors */
long long cli_aborts; /* aborted responses during DATA phase caused by the client */
long long srv_aborts; /* aborted responses during DATA phase caused by the server */
long long intercepted_req; /* number of monitoring or stats requests intercepted by the frontend */
union { union {
struct { struct {
unsigned int rps_max; /* maximum of new HTTP requests second observed */ long long cum_req[4]; /* cumulated number of processed other/h1/h2/h3 requests */
struct freq_ctr _req_per_sec; /* HTTP requests per second on the frontend, only used to compute rps_max */
} http;
} p; /* protocol-specific stats */
};
/* /!\ any change performed here will impact shm-stats-file mapping because the
* struct is embedded in shm_stats_file_object struct, so proceed with caution
* and change shm stats file version if needed. Pay attention to keeping the
* struct 64b-aligned.
*/
struct be_counters_shared_tg {
COUNTERS_SHARED_TG;
long long cum_lbconn; /* cumulated number of sessions processed by load balancing (BE only) */
long long connect; /* number of connection establishment attempts */
long long reuse; /* number of connection reuses */
long long failed_checks, failed_hana; /* failed health checks and health analyses for servers */
long long down_trans; /* up->down transitions */
union {
struct {
long long cum_req; /* cumulated number of processed HTTP requests */
long long cache_hits; /* cache hits */
long long cache_lookups;/* cache lookups */
long long comp_rsp; /* number of compressed responses */ long long comp_rsp; /* number of compressed responses */
unsigned int rps_max; /* maximum of new HTTP requests second observed */
long long rsp[6]; /* http response codes */ long long rsp[6]; /* http response codes */
long long cache_lookups;/* cache lookups */
long long cache_hits; /* cache hits */
} http; } http;
} p; /* protocol-specific stats */ } p; /* protocol-specific stats */
long long redispatches; /* retried and redispatched connections (BE only) */ struct freq_ctr sess_per_sec; /* sessions per second on this server */
long long retries; /* retried and redispatched connections (BE only) */ struct freq_ctr req_per_sec; /* HTTP requests per second on the frontend */
long long failed_resp; /* failed responses (BE only) */ struct freq_ctr conn_per_sec; /* received connections per second on the frontend */
long long failed_conns; /* failed connect() attempts (BE only) */
unsigned int last_sess; /* last session time */
/* 32-bit hole here */
} ALIGNED(8);
struct be_counters_shared { unsigned long last_change; /* last time, when the state was changed */
COUNTERS_SHARED;
struct be_counters_shared_tg **tg;
}; };
/* counters used by servers and backends */ /* counters used by servers and backends */
struct be_counters { struct be_counters {
struct be_counters_shared shared; /* shared counters */
unsigned int conn_max; /* max # of active sessions */ unsigned int conn_max; /* max # of active sessions */
long long cum_sess; /* cumulated number of accepted connections */
long long cum_lbconn; /* cumulated number of sessions processed by load balancing (BE only) */
unsigned int cps_max; /* maximum of new connections received per second */ unsigned int cps_max; /* maximum of new connections received per second */
unsigned int sps_max; /* maximum of new connections accepted per second (sessions) */ unsigned int sps_max; /* maximum of new connections accepted per second (sessions) */
unsigned int nbpend_max; /* max number of pending connections with no server assigned yet */ unsigned int nbpend_max; /* max number of pending connections with no server assigned yet */
unsigned int cur_sess_max; /* max number of currently active sessions */ unsigned int cur_sess_max; /* max number of currently active sessions */
struct freq_ctr _sess_per_sec; /* sessions per second on this frontend, used to compute sps_max (internal use only) */ long long bytes_in; /* number of bytes transferred from the client to the server */
long long bytes_out; /* number of bytes transferred from the server to the client */
/* compression counters, index 0 for requests, 1 for responses */
long long comp_in[2]; /* input bytes fed to the compressor */
long long comp_out[2]; /* output bytes emitted by the compressor */
long long comp_byp[2]; /* input bytes that bypassed the compressor (cpu/ram/bw limitation) */
long long denied_req; /* blocked requests because of security concerns */
long long denied_resp; /* blocked responses because of security concerns */
long long connect; /* number of connection establishment attempts */
long long reuse; /* number of connection reuses */
long long failed_conns; /* failed connect() attempts (BE only) */
long long failed_resp; /* failed responses (BE only) */
long long cli_aborts; /* aborted responses during DATA phase caused by the client */
long long srv_aborts; /* aborted responses during DATA phase caused by the server */
long long retries; /* retried and redispatched connections (BE only) */
long long redispatches; /* retried and redispatched connections (BE only) */
long long failed_rewrites; /* failed rewrites (warning) */
long long internal_errors; /* internal processing errors */
long long failed_checks, failed_hana; /* failed health checks and health analyses for servers */
long long down_trans; /* up->down transitions */
unsigned int q_time, c_time, d_time, t_time; /* sums of conn_time, queue_time, data_time, total_time */ unsigned int q_time, c_time, d_time, t_time; /* sums of conn_time, queue_time, data_time, total_time */
unsigned int qtime_max, ctime_max, dtime_max, ttime_max; /* maximum of conn_time, queue_time, data_time, total_time observed */ unsigned int qtime_max, ctime_max, dtime_max, ttime_max; /* maximum of conn_time, queue_time, data_time, total_time observed */
union { union {
struct { struct {
long long cum_req; /* cumulated number of processed HTTP requests */
long long comp_rsp; /* number of compressed responses */
unsigned int rps_max; /* maximum of new HTTP requests second observed */ unsigned int rps_max; /* maximum of new HTTP requests second observed */
long long rsp[6]; /* http response codes */
long long cache_lookups;/* cache lookups */
long long cache_hits; /* cache hits */
} http; } http;
} p; /* protocol-specific stats */ } p; /* protocol-specific stats */
struct freq_ctr sess_per_sec; /* sessions per second on this server */
unsigned long last_sess; /* last session time */
unsigned long last_change; /* last time, when the state was changed */
}; };
#endif /* _HAPROXY_COUNTERS_T_H */ #endif /* _HAPROXY_COUNTERS_T_H */

View file

@ -1,104 +0,0 @@
/*
* include/haproxy/counters.h
* objects counters management
*
* Copyright 2025 HAProxy Technologies
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation, version 2.1
* exclusively.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef _HAPROXY_COUNTERS_H
# define _HAPROXY_COUNTERS_H
#include <stddef.h>
#include <haproxy/counters-t.h>
#include <haproxy/guid-t.h>
int counters_fe_shared_prepare(struct fe_counters_shared *counters, const struct guid_node *guid, char **errmsg);
int counters_be_shared_prepare(struct be_counters_shared *counters, const struct guid_node *guid, char **errmsg);
void counters_fe_shared_drop(struct fe_counters_shared *counters);
void counters_be_shared_drop(struct be_counters_shared *counters);
/* time oriented helper: get last time (relative to current time) on a given
* <scounter> array, for <elem> member (one member per thread group) which is
* assumed to be unsigned long type.
*
* wrapping is handled by taking the lowest diff between now and last counter.
* But since wrapping is expected once every ~136 years (starting 01/01/1970),
* perhaps it's not worth the extra CPU cost.. let's see.
*/
#define COUNTERS_SHARED_LAST_OFFSET(scounters, type, offset) \
({ \
unsigned long last = 0; \
unsigned long now_seconds = ns_to_sec(now_ns); \
int it; \
\
if (scounters) \
last = HA_ATOMIC_LOAD((type *)((char *)scounters[0] + offset));\
for (it = 1; (it < global.nbtgroups && scounters); it++) { \
unsigned long cur = HA_ATOMIC_LOAD((type *)((char *)scounters[it] + offset));\
if ((now_seconds - cur) < (now_seconds - last)) \
last = cur; \
} \
last; \
})
#define COUNTERS_SHARED_LAST(scounters, elem) \
({ \
int offset = offsetof(typeof(**scounters), elem); \
unsigned long last = COUNTERS_SHARED_LAST_OFFSET(scounters, typeof(scounters[0]->elem), offset); \
\
last; \
})
/* generic unsigned integer addition for all <elem> members from
* <scounters> array (one member per thread group)
* <rfunc> is function taking pointer as parameter to read from the memory
* location pointed to scounters[it].elem
*/
#define COUNTERS_SHARED_TOTAL_OFFSET(scounters, type, offset, rfunc) \
({ \
uint64_t __ret = 0; \
int it; \
\
for (it = 0; (it < global.nbtgroups && scounters); it++) \
__ret += rfunc((type *)((char *)scounters[it] + offset)); \
__ret; \
})
#define COUNTERS_SHARED_TOTAL(scounters, elem, rfunc) \
({ \
int offset = offsetof(typeof(**scounters), elem); \
uint64_t __ret = COUNTERS_SHARED_TOTAL_OFFSET(scounters, typeof(scounters[0]->elem), offset, rfunc);\
\
__ret; \
})
/* same as COUNTERS_SHARED_TOTAL but with <rfunc> taking 2 extras arguments:
* <arg1> and <arg2>
*/
#define COUNTERS_SHARED_TOTAL_ARG2(scounters, elem, rfunc, arg1, arg2) \
({ \
uint64_t __ret = 0; \
int it; \
\
for (it = 0; (it < global.nbtgroups && scounters); it++) \
__ret += rfunc(&scounters[it]->elem, arg1, arg2); \
__ret; \
})
#endif /* _HAPROXY_COUNTERS_H */

View file

@ -1,68 +0,0 @@
#ifndef _HAPROXY_CPU_TOPO_T_H
#define _HAPROXY_CPU_TOPO_T_H
#include <haproxy/api-t.h>
#include <haproxy/cpuset-t.h>
/* CPU state flags used with CPU topology detection (ha_cpu_topo.st). We try
* hard to rely on known info. For example we don't claim a CPU is bound or
* online if we don't know, reason why instead we store offline or excluded.
* Other flags like DONT_USE indicate a user's choice while IGNORED indicates
* the result of an automated selection. Two marks are available for allocation
* algorithms to temporarily compare/select/evict CPUs. These must be cleared
* after use.
*/
#define HA_CPU_F_EXCLUDED 0x0001 // this CPU was excluded at boot
#define HA_CPU_F_OFFLINE 0x0002 // this CPU is known to be offline
#define HA_CPU_F_DONT_USE 0x0004 // this CPU must not be used
#define HA_CPU_F_IGNORED 0x0008 // this CPU will not be used
#define HA_CPU_F_EXCL_MASK 0x000F // mask of bits that exclude a CPU
#define HA_CPU_F_MARK1 0x0010 // for temporary internal use only
#define HA_CPU_F_MARK2 0x0020 // for temporary internal use only
#define HA_CPU_F_MARK_MASK 0x0030 // mask to drop the two marks above
/* CPU topology descriptor. All the ID and IDX fields are initialized to -1
* when not known. The identifiers there are mostly assigned on the fly using
* increments and have no particular representation except the fact that CPUs
* having the same ID there share the same designated resource. The flags are
* preset to zero.
*/
struct ha_cpu_topo {
ushort st; // state flags (HA_CPU_F_*)
short idx; // CPU index as passed to the OS. Initially the entry index.
short ca_id[5]; // cache ID for each level (L0 to L4)
short ts_id; // thread-set identifier (generally core number)
short cl_gid; // cluster global identifier (group of more intimate cores)
short cl_lid; // cluster local identifier (per {pkg,node})
short no_id; // NUMA node identifier
short pk_id; // package identifier
short th_cnt; // number of siblings threads
short th_id; // thread ID among siblings of the same core
short capa; // estimated CPU relative capacity; more is better
};
/* Description of a CPU cluster. */
struct ha_cpu_cluster {
uint idx; /* used when sorting, normally the entry index */
uint capa; /* total capacity */
uint nb_cores; /* total distinct cores */
uint nb_cpu; /* total CPUs */
};
/* Description of a CPU selection policy. For now it only associates an option
* name with a callback function that is supposed to adjust the global.nbthread
* and global.nbtgroups based on the policy, the topology, and the constraints
* on the number of threads which must be between tmin and tmax included, and
* the number of thread groups which must be between gmin and gmax included.
* The callback also takes the policy number (cpu_policy) and a pointer to a
* string to write an error to in case of failure (in which case ret must be
* < 0 and the caller will fre the location). More settings might come later.
*/
struct ha_cpu_policy {
const char *name; /* option name in the configuration */
const char *desc; /* short description for help messages */
int (*fct)(int policy, int tmin, int tmax, int gmin, int gmax, char **err);
int arg; /* optional arg for the function */
};
#endif /* _HAPROXY_CPU_TOPO_T_H */

View file

@ -1,85 +0,0 @@
#ifndef _HAPROXY_CPU_TOPO_H
#define _HAPROXY_CPU_TOPO_H
#include <haproxy/api.h>
#include <haproxy/chunk.h>
#include <haproxy/cpuset-t.h>
#include <haproxy/cpu_topo-t.h>
extern int cpu_topo_maxcpus;
extern int cpu_topo_lastcpu;
extern struct ha_cpu_topo *ha_cpu_topo;
/* non-zero if we're certain that taskset or similar was used to force CPUs */
extern int cpu_mask_forced;
/* Detects CPUs that are online on the system. It may rely on FS access (e.g.
* /sys on Linux). Returns the number of CPUs detected or 0 if the detection
* failed.
*/
int ha_cpuset_detect_online(struct hap_cpuset *set);
/* Detects the CPUs that will be used based on the ones the process is bound to.
* Returns non-zero on success, zero on failure. Note that it may not be
* performed in the function above because some calls may rely on other items
* being allocated (e.g. trash).
*/
int cpu_detect_usable(void);
/* detect the CPU topology based on info in /sys */
int cpu_detect_topology(void);
/* fix missing info in the CPU topology */
void cpu_fixup_topology(void);
/* compose clusters */
void cpu_compose_clusters(void);
/* apply remaining topology-based cpu set restrictions */
void cpu_refine_cpusets(void);
/* apply the chosen CPU policy. Returns < 0 on failure with a message in *err
* that must be freed by the caller if non-null.
*/
int cpu_apply_policy(int tmin, int tmax, int gmin, int gmax, char **err);
/* Detects CPUs that are bound to the current process. Returns the number of
* CPUs detected or 0 if the detection failed.
*/
int ha_cpuset_detect_bound(struct hap_cpuset *set);
/* Returns true if at least one cpu-map directive was configured, otherwise
* false.
*/
int cpu_map_configured(void);
/* Dump the CPU topology <topo> for up to cpu_topo_maxcpus CPUs for
* debugging purposes. Offline CPUs are skipped.
*/
void cpu_topo_debug(const struct ha_cpu_topo *topo);
/* Dump the summary of CPU topology <topo>, i.e. clusters info and thread-cpu
* bindings.
*/
void cpu_topo_dump_summary(const struct ha_cpu_topo *topo, struct buffer *trash);
/* re-order a CPU topology array by locality to help form groups. */
void cpu_reorder_by_locality(struct ha_cpu_topo *topo, int entries);
/* re-order a CPU topology array by CPU index only, to undo the function above,
* in case other calls need to be made on top of this.
*/
void cpu_reorder_by_index(struct ha_cpu_topo *topo, int entries);
/* re-order a CPU topology array by cluster id. */
void cpu_reorder_by_cluster(struct ha_cpu_topo *topo, int entries);
/* Functions used by qsort to compare hardware CPUs (not meant to be used from
* outside cpu_topo).
*/
int _cmp_cpu_index(const void *a, const void *b);
int _cmp_cpu_locality(const void *a, const void *b);
int _cmp_cpu_cluster(const void *a, const void *b);
int _cmp_cpu_cluster_capa(const void *a, const void *b);
#endif /* _HAPROXY_CPU_TOPO_H */

View file

@ -44,13 +44,15 @@ int ha_cpuset_ffs(const struct hap_cpuset *set);
*/ */
void ha_cpuset_assign(struct hap_cpuset *dst, struct hap_cpuset *src); void ha_cpuset_assign(struct hap_cpuset *dst, struct hap_cpuset *src);
/* returns true if the sets are equal */
int ha_cpuset_isequal(const struct hap_cpuset *dst, const struct hap_cpuset *src);
/* Returns the biggest index plus one usable on the platform. /* Returns the biggest index plus one usable on the platform.
*/ */
int ha_cpuset_size(void); int ha_cpuset_size(void);
/* Detects CPUs that are bound to the current process. Returns the number of
* CPUs detected or 0 if the detection failed.
*/
int ha_cpuset_detect_bound(struct hap_cpuset *set);
/* Parse cpu sets. Each CPU set is either a unique number between 0 and /* Parse cpu sets. Each CPU set is either a unique number between 0 and
* ha_cpuset_size() - 1 or a range with two such numbers delimited by a dash * ha_cpuset_size() - 1 or a range with two such numbers delimited by a dash
* ('-'). Each CPU set can be a list of unique numbers or ranges separated by * ('-'). Each CPU set can be a list of unique numbers or ranges separated by
@ -60,18 +62,15 @@ int ha_cpuset_size(void);
*/ */
int parse_cpu_set(const char **args, struct hap_cpuset *cpu_set, char **err); int parse_cpu_set(const char **args, struct hap_cpuset *cpu_set, char **err);
/* Print a cpu-set as compactly as possible and returns the output length.
* Returns >size if it cannot emit anything due to length constraints, in which
* case it will match what is at least needed to go further, and may return 0
* for an empty set. It will emit series of comma-delimited ranges in the form
* "beg[-end]".
*/
int print_cpu_set(char *output, size_t size, const struct hap_cpuset *cpu_set);
/* Parse a linux cpu map string representing to a numeric cpu mask map /* Parse a linux cpu map string representing to a numeric cpu mask map
* The cpu map string is a list of 4-byte hex strings separated by commas, with * The cpu map string is a list of 4-byte hex strings separated by commas, with
* most-significant byte first, one bit per cpu number. * most-significant byte first, one bit per cpu number.
*/ */
void parse_cpumap(char *cpumap_str, struct hap_cpuset *cpu_set); void parse_cpumap(char *cpumap_str, struct hap_cpuset *cpu_set);
/* Returns true if at least one cpu-map directive was configured, otherwise
* false.
*/
int cpu_map_configured(void);
#endif /* _HAPROXY_CPUSET_H */ #endif /* _HAPROXY_CPUSET_H */

Some files were not shown because too many files have changed in this diff Show more