mirror of
https://github.com/haproxy/haproxy.git
synced 2026-02-10 06:13:20 -05:00
Compare commits
No commits in common. "master" and "v3.1-dev10" have entirely different histories.
master
...
v3.1-dev10
915 changed files with 29083 additions and 85007 deletions
|
|
@ -1,7 +1,7 @@
|
|||
FreeBSD_task:
|
||||
freebsd_instance:
|
||||
matrix:
|
||||
image_family: freebsd-14-3
|
||||
image_family: freebsd-14-1
|
||||
only_if: $CIRRUS_BRANCH =~ 'master|next'
|
||||
install_script:
|
||||
- pkg update -f && pkg upgrade -y && pkg install -y openssl git gmake lua54 socat pcre2
|
||||
|
|
|
|||
34
.github/actions/setup-vtest/action.yml
vendored
34
.github/actions/setup-vtest/action.yml
vendored
|
|
@ -1,34 +0,0 @@
|
|||
name: 'setup VTest'
|
||||
description: 'ssss'
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
|
||||
- name: Setup coredumps
|
||||
if: ${{ startsWith(matrix.os, 'ubuntu-') }}
|
||||
shell: bash
|
||||
run: |
|
||||
sudo sysctl -w fs.suid_dumpable=1
|
||||
sudo sysctl kernel.core_pattern=/tmp/core.%h.%e.%t
|
||||
|
||||
- name: Setup ulimit for core dumps
|
||||
shell: bash
|
||||
run: |
|
||||
# This is required for macOS which does not actually allow to increase
|
||||
# the '-n' soft limit to the hard limit, thus failing to run.
|
||||
ulimit -n 65536
|
||||
ulimit -c unlimited
|
||||
|
||||
- name: Install VTest
|
||||
shell: bash
|
||||
run: |
|
||||
scripts/build-vtest.sh
|
||||
|
||||
- name: Install problem matcher for VTest
|
||||
shell: bash
|
||||
# This allows one to more easily see which tests fail.
|
||||
run: echo "::add-matcher::.github/vtest.json"
|
||||
|
||||
|
||||
|
||||
6
.github/h2spec.config
vendored
6
.github/h2spec.config
vendored
|
|
@ -19,9 +19,9 @@ defaults
|
|||
|
||||
frontend h2
|
||||
mode http
|
||||
bind 127.0.0.1:8443 ssl crt reg-tests/ssl/certs/common.pem alpn h2,http/1.1
|
||||
default_backend h2b
|
||||
bind 127.0.0.1:8443 ssl crt reg-tests/ssl/common.pem alpn h2,http/1.1
|
||||
default_backend h2
|
||||
|
||||
backend h2b
|
||||
backend h2
|
||||
errorfile 200 .github/errorfile
|
||||
http-request deny deny_status 200
|
||||
|
|
|
|||
125
.github/matrix.py
vendored
125
.github/matrix.py
vendored
|
|
@ -67,37 +67,6 @@ def determine_latest_aws_lc(ssl):
|
|||
latest_tag = max(valid_tags, key=aws_lc_version_string_to_num)
|
||||
return "AWS_LC_VERSION={}".format(latest_tag[1:])
|
||||
|
||||
def aws_lc_fips_version_string_to_num(version_string):
|
||||
return tuple(map(int, version_string[12:].split('.')))
|
||||
|
||||
def aws_lc_fips_version_valid(version_string):
|
||||
return re.match('^AWS-LC-FIPS-[0-9]+(\.[0-9]+)*$', version_string)
|
||||
|
||||
@functools.lru_cache(5)
|
||||
def determine_latest_aws_lc_fips(ssl):
|
||||
# the AWS-LC-FIPS tags are at the end of the list, so let's get a lot
|
||||
tags = get_all_github_tags("https://api.github.com/repos/aws/aws-lc/tags?per_page=200")
|
||||
if not tags:
|
||||
return "AWS_LC_FIPS_VERSION=failed_to_detect"
|
||||
valid_tags = list(filter(aws_lc_fips_version_valid, tags))
|
||||
latest_tag = max(valid_tags, key=aws_lc_fips_version_string_to_num)
|
||||
return "AWS_LC_FIPS_VERSION={}".format(latest_tag[12:])
|
||||
|
||||
def wolfssl_version_string_to_num(version_string):
|
||||
return tuple(map(int, version_string[1:].removesuffix('-stable').split('.')))
|
||||
|
||||
def wolfssl_version_valid(version_string):
|
||||
return re.match('^v[0-9]+(\.[0-9]+)*-stable$', version_string)
|
||||
|
||||
@functools.lru_cache(5)
|
||||
def determine_latest_wolfssl(ssl):
|
||||
tags = get_all_github_tags("https://api.github.com/repos/wolfssl/wolfssl/tags")
|
||||
if not tags:
|
||||
return "WOLFSSL_VERSION=failed_to_detect"
|
||||
valid_tags = list(filter(wolfssl_version_valid, tags))
|
||||
latest_tag = max(valid_tags, key=wolfssl_version_string_to_num)
|
||||
return "WOLFSSL_VERSION={}".format(latest_tag[1:].removesuffix('-stable'))
|
||||
|
||||
@functools.lru_cache(5)
|
||||
def determine_latest_libressl(ssl):
|
||||
try:
|
||||
|
|
@ -125,11 +94,9 @@ def main(ref_name):
|
|||
# Ubuntu
|
||||
|
||||
if "haproxy-" in ref_name:
|
||||
os = "ubuntu-24.04" # stable branch
|
||||
os_arm = "ubuntu-24.04-arm" # stable branch
|
||||
os = "ubuntu-22.04" # stable branch
|
||||
else:
|
||||
os = "ubuntu-24.04" # development branch
|
||||
os_arm = "ubuntu-24.04-arm" # development branch
|
||||
os = "ubuntu-latest" # development branch
|
||||
|
||||
TARGET = "linux-glibc"
|
||||
for CC in ["gcc", "clang"]:
|
||||
|
|
@ -160,6 +127,7 @@ def main(ref_name):
|
|||
"USE_PCRE2_JIT=1",
|
||||
"USE_LUA=1",
|
||||
"USE_OPENSSL=1",
|
||||
"USE_SYSTEMD=1",
|
||||
"USE_WURFL=1",
|
||||
"WURFL_INC=addons/wurfl/dummy",
|
||||
"WURFL_LIB=addons/wurfl/dummy",
|
||||
|
|
@ -174,37 +142,37 @@ def main(ref_name):
|
|||
|
||||
# ASAN
|
||||
|
||||
for os_asan in [os, os_arm]:
|
||||
matrix.append(
|
||||
{
|
||||
"name": "{}, {}, ASAN, all features".format(os_asan, CC),
|
||||
"os": os_asan,
|
||||
"TARGET": TARGET,
|
||||
"CC": CC,
|
||||
"FLAGS": [
|
||||
"USE_OBSOLETE_LINKER=1",
|
||||
'ARCH_FLAGS="-g -fsanitize=address"',
|
||||
'OPT_CFLAGS="-O1"',
|
||||
"USE_ZLIB=1",
|
||||
"USE_OT=1",
|
||||
"OT_INC=${HOME}/opt-ot/include",
|
||||
"OT_LIB=${HOME}/opt-ot/lib",
|
||||
"OT_RUNPATH=1",
|
||||
"USE_PCRE2=1",
|
||||
"USE_PCRE2_JIT=1",
|
||||
"USE_LUA=1",
|
||||
"USE_OPENSSL=1",
|
||||
"USE_WURFL=1",
|
||||
"WURFL_INC=addons/wurfl/dummy",
|
||||
"WURFL_LIB=addons/wurfl/dummy",
|
||||
"USE_DEVICEATLAS=1",
|
||||
"DEVICEATLAS_SRC=addons/deviceatlas/dummy",
|
||||
"USE_PROMEX=1",
|
||||
"USE_51DEGREES=1",
|
||||
"51DEGREES_SRC=addons/51degrees/dummy/pattern",
|
||||
],
|
||||
}
|
||||
)
|
||||
matrix.append(
|
||||
{
|
||||
"name": "{}, {}, ASAN, all features".format(os, CC),
|
||||
"os": os,
|
||||
"TARGET": TARGET,
|
||||
"CC": CC,
|
||||
"FLAGS": [
|
||||
"USE_OBSOLETE_LINKER=1",
|
||||
'ARCH_FLAGS="-g -fsanitize=address"',
|
||||
'OPT_CFLAGS="-O1"',
|
||||
"USE_ZLIB=1",
|
||||
"USE_OT=1",
|
||||
"OT_INC=${HOME}/opt-ot/include",
|
||||
"OT_LIB=${HOME}/opt-ot/lib",
|
||||
"OT_RUNPATH=1",
|
||||
"USE_PCRE2=1",
|
||||
"USE_PCRE2_JIT=1",
|
||||
"USE_LUA=1",
|
||||
"USE_OPENSSL=1",
|
||||
"USE_SYSTEMD=1",
|
||||
"USE_WURFL=1",
|
||||
"WURFL_INC=addons/wurfl/dummy",
|
||||
"WURFL_LIB=addons/wurfl/dummy",
|
||||
"USE_DEVICEATLAS=1",
|
||||
"DEVICEATLAS_SRC=addons/deviceatlas/dummy",
|
||||
"USE_PROMEX=1",
|
||||
"USE_51DEGREES=1",
|
||||
"51DEGREES_SRC=addons/51degrees/dummy/pattern",
|
||||
],
|
||||
}
|
||||
)
|
||||
|
||||
for compression in ["USE_ZLIB=1"]:
|
||||
matrix.append(
|
||||
|
|
@ -221,10 +189,9 @@ def main(ref_name):
|
|||
"stock",
|
||||
"OPENSSL_VERSION=1.0.2u",
|
||||
"OPENSSL_VERSION=1.1.1s",
|
||||
"OPENSSL_VERSION=3.5.1",
|
||||
"QUICTLS_VERSION=OpenSSL_1_1_1w-quic1",
|
||||
"QUICTLS=yes",
|
||||
"WOLFSSL_VERSION=5.7.0",
|
||||
"AWS_LC_VERSION=1.39.0",
|
||||
"AWS_LC_VERSION=1.29.0",
|
||||
# "BORINGSSL=yes",
|
||||
]
|
||||
|
||||
|
|
@ -236,7 +203,8 @@ def main(ref_name):
|
|||
|
||||
for ssl in ssl_versions:
|
||||
flags = ["USE_OPENSSL=1"]
|
||||
skipdup=0
|
||||
if ssl == "BORINGSSL=yes" or ssl == "QUICTLS=yes" or "LIBRESSL" in ssl or "WOLFSSL" in ssl or "AWS_LC" in ssl:
|
||||
flags.append("USE_QUIC=1")
|
||||
if "WOLFSSL" in ssl:
|
||||
flags.append("USE_OPENSSL_WOLFSSL=1")
|
||||
if "AWS_LC" in ssl:
|
||||
|
|
@ -246,23 +214,8 @@ def main(ref_name):
|
|||
flags.append("SSL_INC=${HOME}/opt/include")
|
||||
if "LIBRESSL" in ssl and "latest" in ssl:
|
||||
ssl = determine_latest_libressl(ssl)
|
||||
skipdup=1
|
||||
if "OPENSSL" in ssl and "latest" in ssl:
|
||||
ssl = determine_latest_openssl(ssl)
|
||||
skipdup=1
|
||||
|
||||
# if "latest" equals a version already in the list
|
||||
if ssl in ssl_versions and skipdup == 1:
|
||||
continue
|
||||
|
||||
openssl_supports_quic = False
|
||||
try:
|
||||
openssl_supports_quic = version.Version(ssl.split("OPENSSL_VERSION=",1)[1]) >= version.Version("3.5.0")
|
||||
except:
|
||||
pass
|
||||
|
||||
if ssl == "BORINGSSL=yes" or "QUICTLS" in ssl or "LIBRESSL" in ssl or "WOLFSSL" in ssl or "AWS_LC" in ssl or openssl_supports_quic:
|
||||
flags.append("USE_QUIC=1")
|
||||
|
||||
matrix.append(
|
||||
{
|
||||
|
|
@ -280,7 +233,7 @@ def main(ref_name):
|
|||
if "haproxy-" in ref_name:
|
||||
os = "macos-13" # stable branch
|
||||
else:
|
||||
os = "macos-26" # development branch
|
||||
os = "macos-14" # development branch
|
||||
|
||||
TARGET = "osx"
|
||||
for CC in ["clang"]:
|
||||
|
|
|
|||
12
.github/workflows/aws-lc-fips.yml
vendored
12
.github/workflows/aws-lc-fips.yml
vendored
|
|
@ -1,12 +0,0 @@
|
|||
name: AWS-LC-FIPS
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0 * * 4"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
test:
|
||||
uses: ./.github/workflows/aws-lc-template.yml
|
||||
with:
|
||||
command: "from matrix import determine_latest_aws_lc_fips; print(determine_latest_aws_lc_fips(''))"
|
||||
94
.github/workflows/aws-lc-template.yml
vendored
94
.github/workflows/aws-lc-template.yml
vendored
|
|
@ -1,94 +0,0 @@
|
|||
name: AWS-LC template
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
command:
|
||||
required: true
|
||||
type: string
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- name: Determine latest AWS-LC release
|
||||
id: get_aws_lc_release
|
||||
run: |
|
||||
result=$(cd .github && python3 -c "${{ inputs.command }}")
|
||||
echo $result
|
||||
echo "result=$result" >> $GITHUB_OUTPUT
|
||||
- name: Cache AWS-LC
|
||||
id: cache_aws_lc
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: '~/opt/'
|
||||
key: ssl-${{ steps.get_aws_lc_release.outputs.result }}-Ubuntu-latest-gcc
|
||||
- name: Install apt dependencies
|
||||
run: |
|
||||
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
|
||||
sudo apt-get --no-install-recommends -y install socat gdb jose
|
||||
- name: Install AWS-LC
|
||||
if: ${{ steps.cache_ssl.outputs.cache-hit != 'true' }}
|
||||
run: env ${{ steps.get_aws_lc_release.outputs.result }} scripts/build-ssl.sh
|
||||
- name: Compile HAProxy
|
||||
run: |
|
||||
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
|
||||
USE_OPENSSL_AWSLC=1 USE_QUIC=1 \
|
||||
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
|
||||
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
|
||||
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
|
||||
sudo make install
|
||||
- name: Show HAProxy version
|
||||
id: show-version
|
||||
run: |
|
||||
ldd $(which haproxy)
|
||||
haproxy -vv
|
||||
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
||||
- uses: ./.github/actions/setup-vtest
|
||||
- name: Run VTest for HAProxy
|
||||
id: vtest
|
||||
run: |
|
||||
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
||||
- name: Run Unit tests
|
||||
id: unittests
|
||||
run: |
|
||||
make unit-tests
|
||||
- name: Show VTest results
|
||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||
run: |
|
||||
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
|
||||
printf "::group::"
|
||||
cat $folder/INFO
|
||||
cat $folder/LOG
|
||||
echo "::endgroup::"
|
||||
done
|
||||
exit 1
|
||||
- name: Show coredumps
|
||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||
run: |
|
||||
failed=false
|
||||
shopt -s nullglob
|
||||
for file in /tmp/core.*; do
|
||||
failed=true
|
||||
printf "::group::"
|
||||
gdb -ex 'thread apply all bt full' ./haproxy $file
|
||||
echo "::endgroup::"
|
||||
done
|
||||
if [ "$failed" = true ]; then
|
||||
exit 1;
|
||||
fi
|
||||
- name: Show Unit-Tests results
|
||||
if: ${{ failure() && steps.unittests.outcome == 'failure' }}
|
||||
run: |
|
||||
for result in ${TMPDIR:-/tmp}/ha-unittests-*/results/res.*; do
|
||||
printf "::group::"
|
||||
cat $result
|
||||
echo "::endgroup::"
|
||||
done
|
||||
exit 1
|
||||
|
||||
60
.github/workflows/aws-lc.yml
vendored
60
.github/workflows/aws-lc.yml
vendored
|
|
@ -5,8 +5,62 @@ on:
|
|||
- cron: "0 0 * * 4"
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
test:
|
||||
uses: ./.github/workflows/aws-lc-template.yml
|
||||
with:
|
||||
command: "from matrix import determine_latest_aws_lc; print(determine_latest_aws_lc(''))"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install VTest
|
||||
run: |
|
||||
scripts/build-vtest.sh
|
||||
- name: Determine latest AWS-LC release
|
||||
id: get_aws_lc_release
|
||||
run: |
|
||||
result=$(cd .github && python3 -c "from matrix import determine_latest_aws_lc; print(determine_latest_aws_lc(''))")
|
||||
echo $result
|
||||
echo "result=$result" >> $GITHUB_OUTPUT
|
||||
- name: Cache AWS-LC
|
||||
id: cache_aws_lc
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: '~/opt/'
|
||||
key: ssl-${{ steps.get_aws_lc_release.outputs.result }}-Ubuntu-latest-gcc
|
||||
- name: Install AWS-LC
|
||||
if: ${{ steps.cache_ssl.outputs.cache-hit != 'true' }}
|
||||
run: env ${{ steps.get_aws_lc_release.outputs.result }} scripts/build-ssl.sh
|
||||
- name: Compile HAProxy
|
||||
run: |
|
||||
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
|
||||
USE_OPENSSL_AWSLC=1 USE_QUIC=1 \
|
||||
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
|
||||
DEBUG="-DDEBUG_POOL_INTEGRITY" \
|
||||
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
|
||||
sudo make install
|
||||
- name: Show HAProxy version
|
||||
id: show-version
|
||||
run: |
|
||||
ldd $(which haproxy)
|
||||
haproxy -vv
|
||||
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
||||
- name: Install problem matcher for VTest
|
||||
run: echo "::add-matcher::.github/vtest.json"
|
||||
- name: Run VTest for HAProxy
|
||||
id: vtest
|
||||
run: |
|
||||
# This is required for macOS which does not actually allow to increase
|
||||
# the '-n' soft limit to the hard limit, thus failing to run.
|
||||
ulimit -n 65536
|
||||
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
||||
- name: Show VTest results
|
||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||
run: |
|
||||
for folder in ${TMPDIR}/haregtests-*/vtc.*; do
|
||||
printf "::group::"
|
||||
cat $folder/INFO
|
||||
cat $folder/LOG
|
||||
echo "::endgroup::"
|
||||
done
|
||||
exit 1
|
||||
|
|
|
|||
7
.github/workflows/codespell.yml
vendored
7
.github/workflows/codespell.yml
vendored
|
|
@ -3,7 +3,6 @@ name: Spelling Check
|
|||
on:
|
||||
schedule:
|
||||
- cron: "0 0 * * 2"
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
|
@ -11,12 +10,12 @@ permissions:
|
|||
jobs:
|
||||
codespell:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||
if: ${{ github.repository_owner == 'haproxy' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
- uses: codespell-project/codespell-problem-matcher@v1.2.0
|
||||
- uses: codespell-project/actions-codespell@master
|
||||
with:
|
||||
skip: CHANGELOG,Makefile,*.fig,*.pem,./doc/design-thoughts,./doc/internals
|
||||
ignore_words_list: pres,ist,ists,hist,wan,ca,cas,que,ans,te,nd,referer,ot,uint,iif,fo,keep-alives,dosen,ifset,thrid,strack,ba,chck,hel,unx,mor,clen,collet,bu,htmp,siz,experim
|
||||
ignore_words_list: ist,ists,hist,wan,ca,cas,que,ans,te,nd,referer,ot,uint,iif,fo,keep-alives,dosen,ifset,thrid,strack,ba,chck,hel,unx,mor,clen,collet,bu,htmp,siz,experim
|
||||
uri_ignore_words_list: trafic,ressources
|
||||
|
|
|
|||
17
.github/workflows/compliance.yml
vendored
17
.github/workflows/compliance.yml
vendored
|
|
@ -11,10 +11,15 @@ permissions:
|
|||
jobs:
|
||||
h2spec:
|
||||
name: h2spec
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- TARGET: linux-glibc
|
||||
CC: gcc
|
||||
os: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install h2spec
|
||||
id: install-h2spec
|
||||
run: |
|
||||
|
|
@ -23,12 +28,12 @@ jobs:
|
|||
tar xvf h2spec.tar.gz
|
||||
sudo install -m755 h2spec /usr/local/bin/h2spec
|
||||
echo "version=${H2SPEC_VERSION}" >> $GITHUB_OUTPUT
|
||||
- name: Compile HAProxy with gcc
|
||||
- name: Compile HAProxy with ${{ matrix.CC }}
|
||||
run: |
|
||||
make -j$(nproc) all \
|
||||
ERR=1 \
|
||||
TARGET=linux-glibc \
|
||||
CC=gcc \
|
||||
TARGET=${{ matrix.TARGET }} \
|
||||
CC=${{ matrix.CC }} \
|
||||
DEBUG="-DDEBUG_POOL_INTEGRITY" \
|
||||
USE_OPENSSL=1
|
||||
sudo make install
|
||||
|
|
|
|||
2
.github/workflows/contrib.yml
vendored
2
.github/workflows/contrib.yml
vendored
|
|
@ -10,7 +10,7 @@ jobs:
|
|||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
- name: Compile admin/halog/halog
|
||||
run: |
|
||||
make admin/halog/halog
|
||||
|
|
|
|||
9
.github/workflows/coverity.yml
vendored
9
.github/workflows/coverity.yml
vendored
|
|
@ -15,19 +15,18 @@ permissions:
|
|||
jobs:
|
||||
scan:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||
if: ${{ github.repository_owner == 'haproxy' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install apt dependencies
|
||||
run: |
|
||||
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
|
||||
sudo apt-get --no-install-recommends -y install \
|
||||
liblua5.4-dev \
|
||||
libpcre2-dev \
|
||||
libsystemd-dev
|
||||
- name: Install QUICTLS
|
||||
run: |
|
||||
QUICTLS_VERSION=OpenSSL_1_1_1w-quic1 scripts/build-ssl.sh
|
||||
QUICTLS=yes scripts/build-ssl.sh
|
||||
- name: Download Coverity build tool
|
||||
run: |
|
||||
wget -c -N https://scan.coverity.com/download/linux64 --post-data "token=${{ secrets.COVERITY_SCAN_TOKEN }}&project=Haproxy" -O coverity_tool.tar.gz
|
||||
|
|
@ -38,7 +37,7 @@ jobs:
|
|||
- name: Build with Coverity build tool
|
||||
run: |
|
||||
export PATH=`pwd`/coverity_tool/bin:$PATH
|
||||
cov-build --dir cov-int make CC=clang TARGET=linux-glibc USE_ZLIB=1 USE_PCRE2=1 USE_PCRE2_JIT=1 USE_LUA=1 USE_OPENSSL=1 USE_QUIC=1 USE_WURFL=1 WURFL_INC=addons/wurfl/dummy WURFL_LIB=addons/wurfl/dummy USE_DEVICEATLAS=1 DEVICEATLAS_SRC=addons/deviceatlas/dummy USE_51DEGREES=1 51DEGREES_SRC=addons/51degrees/dummy/pattern ADDLIB=\"-Wl,-rpath,$HOME/opt/lib/\" SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include DEBUG+=-DDEBUG_STRICT=2 DEBUG+=-DDEBUG_USE_ABORT=1
|
||||
cov-build --dir cov-int make CC=clang TARGET=linux-glibc USE_ZLIB=1 USE_PCRE=1 USE_PCRE_JIT=1 USE_LUA=1 USE_OPENSSL=1 USE_QUIC=1 USE_SYSTEMD=1 USE_WURFL=1 WURFL_INC=addons/wurfl/dummy WURFL_LIB=addons/wurfl/dummy USE_DEVICEATLAS=1 DEVICEATLAS_SRC=addons/deviceatlas/dummy USE_51DEGREES=1 51DEGREES_SRC=addons/51degrees/dummy/pattern ADDLIB=\"-Wl,-rpath,$HOME/opt/lib/\" SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include DEBUG+=-DDEBUG_STRICT=1 DEBUG+=-DDEBUG_USE_ABORT=1
|
||||
- name: Submit build result to Coverity Scan
|
||||
run: |
|
||||
tar czvf cov.tar.gz cov-int
|
||||
|
|
|
|||
7
.github/workflows/cross-zoo.yml
vendored
7
.github/workflows/cross-zoo.yml
vendored
|
|
@ -6,7 +6,6 @@ name: Cross Compile
|
|||
on:
|
||||
schedule:
|
||||
- cron: "0 0 21 * *"
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
|
@ -91,7 +90,7 @@ jobs:
|
|||
}
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||
if: ${{ github.repository_owner == 'haproxy' }}
|
||||
steps:
|
||||
- name: install packages
|
||||
run: |
|
||||
|
|
@ -99,12 +98,12 @@ jobs:
|
|||
sudo apt-get -yq --force-yes install \
|
||||
gcc-${{ matrix.platform.arch }} \
|
||||
${{ matrix.platform.libs }}
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
|
||||
- name: install quictls
|
||||
run: |
|
||||
QUICTLS_EXTRA_ARGS="--cross-compile-prefix=${{ matrix.platform.arch }}- ${{ matrix.platform.target }}" QUICTLS_VERSION=OpenSSL_1_1_1w-quic1 scripts/build-ssl.sh
|
||||
QUICTLS_EXTRA_ARGS="--cross-compile-prefix=${{ matrix.platform.arch }}- ${{ matrix.platform.target }}" QUICTLS=yes scripts/build-ssl.sh
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
|
|
|
|||
40
.github/workflows/fedora-rawhide.yml
vendored
40
.github/workflows/fedora-rawhide.yml
vendored
|
|
@ -1,9 +1,8 @@
|
|||
name: Fedora/Rawhide/OpenSSL
|
||||
name: Fedora/Rawhide/QuicTLS
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0 25 * *"
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
|
@ -13,24 +12,26 @@ jobs:
|
|||
strategy:
|
||||
matrix:
|
||||
platform: [
|
||||
{ name: x64, cc: gcc, ADDLIB_ATOMIC: "", ARCH_FLAGS: "" },
|
||||
{ name: x64, cc: clang, ADDLIB_ATOMIC: "", ARCH_FLAGS: "" },
|
||||
{ name: x86, cc: gcc, ADDLIB_ATOMIC: "-latomic", ARCH_FLAGS: "-m32" },
|
||||
{ name: x86, cc: clang, ADDLIB_ATOMIC: "-latomic", ARCH_FLAGS: "-m32" }
|
||||
{ name: x64, cc: gcc, QUICTLS_EXTRA_ARGS: "", ADDLIB_ATOMIC: "", ARCH_FLAGS: "" },
|
||||
{ name: x64, cc: clang, QUICTLS_EXTRA_ARGS: "", ADDLIB_ATOMIC: "", ARCH_FLAGS: "" },
|
||||
{ name: x86, cc: gcc, QUICTLS_EXTRA_ARGS: "-m32 linux-generic32", ADDLIB_ATOMIC: "-latomic", ARCH_FLAGS: "-m32" },
|
||||
{ name: x86, cc: clang, QUICTLS_EXTRA_ARGS: "-m32 linux-generic32", ADDLIB_ATOMIC: "-latomic", ARCH_FLAGS: "-m32" }
|
||||
]
|
||||
fail-fast: false
|
||||
name: ${{ matrix.platform.cc }}.${{ matrix.platform.name }}
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||
if: ${{ github.repository_owner == 'haproxy' }}
|
||||
container:
|
||||
image: fedora:rawhide
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
dnf -y install awk diffutils git pcre-devel zlib-devel pcre2-devel 'perl(FindBin)' perl-IPC-Cmd 'perl(File::Copy)' 'perl(File::Compare)' lua-devel socat findutils systemd-devel clang openssl-devel.x86_64
|
||||
dnf -y install 'perl(FindBin)' 'perl(File::Compare)' perl-IPC-Cmd 'perl(File::Copy)' glibc-devel.i686 lua-devel.i686 lua-devel.x86_64 systemd-devel.i686 zlib-ng-compat-devel.i686 pcre-devel.i686 libatomic.i686 openssl-devel.i686
|
||||
- uses: ./.github/actions/setup-vtest
|
||||
dnf -y install diffutils git pcre-devel zlib-devel pcre2-devel 'perl(FindBin)' perl-IPC-Cmd 'perl(File::Copy)' 'perl(File::Compare)' lua-devel socat findutils systemd-devel clang
|
||||
dnf -y install 'perl(FindBin)' 'perl(File::Compare)' perl-IPC-Cmd 'perl(File::Copy)' glibc-devel.i686 lua-devel.i686 lua-devel.x86_64 systemd-devel.i686 zlib-ng-compat-devel.i686 pcre-devel.i686 libatomic.i686
|
||||
- name: Install VTest
|
||||
run: scripts/build-vtest.sh
|
||||
- name: Install QuicTLS
|
||||
run: QUICTLS=yes QUICTLS_EXTRA_ARGS="${{ matrix.platform.QUICTLS_EXTRA_ARGS }}" scripts/build-ssl.sh
|
||||
- name: Build contrib tools
|
||||
run: |
|
||||
make admin/halog/halog
|
||||
|
|
@ -39,7 +40,7 @@ jobs:
|
|||
make dev/hpack/decode dev/hpack/gen-enc dev/hpack/gen-rht
|
||||
- name: Compile HAProxy with ${{ matrix.platform.cc }}
|
||||
run: |
|
||||
make -j3 CC=${{ matrix.platform.cc }} V=1 ERR=1 TARGET=linux-glibc DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" USE_PROMEX=1 USE_OPENSSL=1 USE_QUIC=1 USE_ZLIB=1 USE_PCRE=1 USE_PCRE_JIT=1 USE_LUA=1 ADDLIB="${{ matrix.platform.ADDLIB_ATOMIC }}" ARCH_FLAGS="${{ matrix.platform.ARCH_FLAGS }}"
|
||||
make -j3 CC=${{ matrix.platform.cc }} V=1 ERR=1 TARGET=linux-glibc USE_OPENSSL=1 USE_QUIC=1 USE_ZLIB=1 USE_PCRE=1 USE_PCRE_JIT=1 USE_LUA=1 USE_SYSTEMD=1 ADDLIB="${{ matrix.platform.ADDLIB_ATOMIC }} -Wl,-rpath,${HOME}/opt/lib" SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include ARCH_FLAGS="${{ matrix.platform.ARCH_FLAGS }}"
|
||||
make install
|
||||
- name: Show HAProxy version
|
||||
id: show-version
|
||||
|
|
@ -49,13 +50,6 @@ jobs:
|
|||
echo "::endgroup::"
|
||||
haproxy -vv
|
||||
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
||||
#
|
||||
# TODO: review this workaround later
|
||||
- name: relax crypto policies
|
||||
run: |
|
||||
dnf -y install crypto-policies-scripts
|
||||
echo LEGACY > /etc/crypto-policies/config
|
||||
update-crypto-policies
|
||||
- name: Run VTest for HAProxy ${{ steps.show-version.outputs.version }}
|
||||
id: vtest
|
||||
run: |
|
||||
|
|
@ -63,13 +57,9 @@ jobs:
|
|||
- name: Show VTest results
|
||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||
run: |
|
||||
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
|
||||
for folder in ${TMPDIR}/haregtests-*/vtc.*; do
|
||||
printf "::group::"
|
||||
cat $folder/INFO
|
||||
cat $folder/LOG
|
||||
echo "::endgroup::"
|
||||
done
|
||||
- name: Run Unit tests
|
||||
id: unittests
|
||||
run: |
|
||||
make unit-tests
|
||||
|
|
|
|||
5
.github/workflows/illumos.yml
vendored
5
.github/workflows/illumos.yml
vendored
|
|
@ -3,17 +3,16 @@ name: Illumos
|
|||
on:
|
||||
schedule:
|
||||
- cron: "0 0 25 * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
gcc:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||
if: ${{ github.repository_owner == 'haproxy' }}
|
||||
permissions:
|
||||
contents: read
|
||||
steps:
|
||||
- name: "Checkout repository"
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: "Build on VM"
|
||||
uses: vmactions/solaris-vm@v1
|
||||
|
|
|
|||
20
.github/workflows/musl.yml
vendored
20
.github/workflows/musl.yml
vendored
|
|
@ -20,13 +20,13 @@ jobs:
|
|||
run: |
|
||||
ulimit -c unlimited
|
||||
echo '/tmp/core/core.%h.%e.%t' > /proc/sys/kernel/core_pattern
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install dependencies
|
||||
run: apk add gcc gdb make tar git python3 libc-dev linux-headers pcre-dev pcre2-dev openssl-dev lua5.3-dev grep socat curl musl-dbg lua5.3-dbg jose
|
||||
run: apk add gcc gdb make tar git python3 libc-dev linux-headers pcre-dev pcre2-dev openssl-dev lua5.3-dev grep socat curl musl-dbg lua5.3-dbg
|
||||
- name: Install VTest
|
||||
run: scripts/build-vtest.sh
|
||||
- name: Build
|
||||
run: make -j$(nproc) TARGET=linux-musl DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" ARCH_FLAGS='-ggdb3' CC=cc V=1 USE_LUA=1 LUA_INC=/usr/include/lua5.3 LUA_LIB=/usr/lib/lua5.3 USE_OPENSSL=1 USE_PCRE2=1 USE_PCRE2_JIT=1 USE_PROMEX=1
|
||||
run: make -j$(nproc) TARGET=linux-musl ARCH_FLAGS='-ggdb3' CC=cc V=1 USE_LUA=1 LUA_INC=/usr/include/lua5.3 LUA_LIB=/usr/lib/lua5.3 USE_OPENSSL=1 USE_PCRE2=1 USE_PCRE2_JIT=1 USE_PROMEX=1
|
||||
- name: Show version
|
||||
run: ./haproxy -vv
|
||||
- name: Show linked libraries
|
||||
|
|
@ -37,10 +37,6 @@ jobs:
|
|||
- name: Run VTest
|
||||
id: vtest
|
||||
run: make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
||||
- name: Run Unit tests
|
||||
id: unittests
|
||||
run: |
|
||||
make unit-tests
|
||||
- name: Show coredumps
|
||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||
run: |
|
||||
|
|
@ -64,13 +60,3 @@ jobs:
|
|||
cat $folder/LOG
|
||||
echo "::endgroup::"
|
||||
done
|
||||
- name: Show Unit-Tests results
|
||||
if: ${{ failure() && steps.unittests.outcome == 'failure' }}
|
||||
run: |
|
||||
for result in ${TMPDIR:-/tmp}/ha-unittests-*/results/res.*; do
|
||||
printf "::group::"
|
||||
cat $result
|
||||
echo "::endgroup::"
|
||||
done
|
||||
exit 1
|
||||
|
||||
|
|
|
|||
5
.github/workflows/netbsd.yml
vendored
5
.github/workflows/netbsd.yml
vendored
|
|
@ -3,17 +3,16 @@ name: NetBSD
|
|||
on:
|
||||
schedule:
|
||||
- cron: "0 0 25 * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
gcc:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||
if: ${{ github.repository_owner == 'haproxy' }}
|
||||
permissions:
|
||||
contents: read
|
||||
steps:
|
||||
- name: "Checkout repository"
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: "Build on VM"
|
||||
uses: vmactions/netbsd-vm@v1
|
||||
|
|
|
|||
82
.github/workflows/openssl-ech.yml
vendored
82
.github/workflows/openssl-ech.yml
vendored
|
|
@ -1,82 +0,0 @@
|
|||
name: openssl ECH
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 3 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- name: Install VTest
|
||||
run: |
|
||||
scripts/build-vtest.sh
|
||||
- name: Install apt dependencies
|
||||
run: |
|
||||
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
|
||||
sudo apt-get --no-install-recommends -y install socat gdb
|
||||
sudo apt-get --no-install-recommends -y install libpsl-dev
|
||||
- name: Install OpenSSL+ECH
|
||||
run: env OPENSSL_VERSION="git-feature/ech" GIT_TYPE="branch" scripts/build-ssl.sh
|
||||
- name: Install curl+ECH
|
||||
run: env SSL_LIB=${HOME}/opt/ scripts/build-curl.sh
|
||||
- name: Compile HAProxy
|
||||
run: |
|
||||
make -j$(nproc) CC=gcc TARGET=linux-glibc \
|
||||
USE_QUIC=1 USE_OPENSSL=1 USE_ECH=1 \
|
||||
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
|
||||
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
|
||||
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/" \
|
||||
ARCH_FLAGS="-ggdb3 -fsanitize=address"
|
||||
sudo make install
|
||||
- name: Show HAProxy version
|
||||
id: show-version
|
||||
run: |
|
||||
ldd $(which haproxy)
|
||||
haproxy -vv
|
||||
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
||||
- name: Install problem matcher for VTest
|
||||
run: echo "::add-matcher::.github/vtest.json"
|
||||
- name: Run VTest for HAProxy
|
||||
id: vtest
|
||||
run: |
|
||||
# This is required for macOS which does not actually allow to increase
|
||||
# the '-n' soft limit to the hard limit, thus failing to run.
|
||||
ulimit -n 65536
|
||||
# allow to catch coredumps
|
||||
ulimit -c unlimited
|
||||
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
||||
- name: Show VTest results
|
||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||
run: |
|
||||
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
|
||||
printf "::group::"
|
||||
cat $folder/INFO
|
||||
cat $folder/LOG
|
||||
echo "::endgroup::"
|
||||
done
|
||||
exit 1
|
||||
- name: Run Unit tests
|
||||
id: unittests
|
||||
run: |
|
||||
make unit-tests
|
||||
- name: Show coredumps
|
||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||
run: |
|
||||
failed=false
|
||||
shopt -s nullglob
|
||||
for file in /tmp/core.*; do
|
||||
failed=true
|
||||
printf "::group::"
|
||||
gdb -ex 'thread apply all bt full' ./haproxy $file
|
||||
echo "::endgroup::"
|
||||
done
|
||||
if [ "$failed" = true ]; then
|
||||
exit 1;
|
||||
fi
|
||||
77
.github/workflows/openssl-master.yml
vendored
77
.github/workflows/openssl-master.yml
vendored
|
|
@ -1,77 +0,0 @@
|
|||
name: openssl master
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 3 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- name: Install apt dependencies
|
||||
run: |
|
||||
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
|
||||
sudo apt-get --no-install-recommends -y install socat gdb
|
||||
sudo apt-get --no-install-recommends -y install libpsl-dev
|
||||
- uses: ./.github/actions/setup-vtest
|
||||
- name: Install OpenSSL master
|
||||
run: env OPENSSL_VERSION="git-master" GIT_TYPE="branch" scripts/build-ssl.sh
|
||||
- name: Compile HAProxy
|
||||
run: |
|
||||
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
|
||||
USE_QUIC=1 USE_OPENSSL=1 \
|
||||
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
|
||||
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
|
||||
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
|
||||
sudo make install
|
||||
- name: Show HAProxy version
|
||||
id: show-version
|
||||
run: |
|
||||
ldd $(which haproxy)
|
||||
haproxy -vv
|
||||
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
||||
- name: Install problem matcher for VTest
|
||||
run: echo "::add-matcher::.github/vtest.json"
|
||||
- name: Run VTest for HAProxy
|
||||
id: vtest
|
||||
run: |
|
||||
# This is required for macOS which does not actually allow to increase
|
||||
# the '-n' soft limit to the hard limit, thus failing to run.
|
||||
ulimit -n 65536
|
||||
# allow to catch coredumps
|
||||
ulimit -c unlimited
|
||||
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
||||
- name: Show VTest results
|
||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||
run: |
|
||||
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
|
||||
printf "::group::"
|
||||
cat $folder/INFO
|
||||
cat $folder/LOG
|
||||
echo "::endgroup::"
|
||||
done
|
||||
exit 1
|
||||
- name: Run Unit tests
|
||||
id: unittests
|
||||
run: |
|
||||
make unit-tests
|
||||
- name: Show coredumps
|
||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||
run: |
|
||||
failed=false
|
||||
shopt -s nullglob
|
||||
for file in /tmp/core.*; do
|
||||
failed=true
|
||||
printf "::group::"
|
||||
gdb -ex 'thread apply all bt full' ./haproxy $file
|
||||
echo "::endgroup::"
|
||||
done
|
||||
if [ "$failed" = true ]; then
|
||||
exit 1;
|
||||
fi
|
||||
33
.github/workflows/openssl-nodeprecated.yml
vendored
Normal file
33
.github/workflows/openssl-nodeprecated.yml
vendored
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
#
|
||||
# special purpose CI: test against OpenSSL built in "no-deprecated" mode
|
||||
# let us run those builds weekly
|
||||
#
|
||||
# for example, OpenWRT uses such OpenSSL builds (those builds are smaller)
|
||||
#
|
||||
#
|
||||
# some details might be found at NL: https://www.mail-archive.com/haproxy@formilux.org/msg35759.html
|
||||
# GH: https://github.com/haproxy/haproxy/issues/367
|
||||
|
||||
name: openssl no-deprecated
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0 * * 4"
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install VTest
|
||||
run: |
|
||||
scripts/build-vtest.sh
|
||||
- name: Compile HAProxy
|
||||
run: |
|
||||
make DEFINE="-DOPENSSL_API_COMPAT=0x10100000L -DOPENSSL_NO_DEPRECATED" -j3 CC=gcc ERR=1 TARGET=linux-glibc USE_OPENSSL=1
|
||||
- name: Run VTest
|
||||
run: |
|
||||
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
||||
22
.github/workflows/quic-interop-aws-lc.yml
vendored
22
.github/workflows/quic-interop-aws-lc.yml
vendored
|
|
@ -13,13 +13,12 @@ on:
|
|||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-24.04
|
||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@v3
|
||||
|
|
@ -28,24 +27,18 @@ jobs:
|
|||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Clone docker image definition
|
||||
run: |
|
||||
git clone -b aws-lc https://github.com/haproxytech/haproxy-qns
|
||||
|
||||
- name: Build and push Docker image
|
||||
id: push
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: https://github.com/haproxytech/haproxy-qns.git
|
||||
context: haproxy-qns
|
||||
push: true
|
||||
build-args: |
|
||||
SSLLIB=AWS-LC
|
||||
tags: ghcr.io/${{ github.repository }}:aws-lc
|
||||
|
||||
- name: Cleanup registry
|
||||
uses: actions/delete-package-versions@v5
|
||||
with:
|
||||
owner: ${{ github.repository_owner }}
|
||||
package-name: 'haproxy'
|
||||
package-type: container
|
||||
min-versions-to-keep: 1
|
||||
delete-only-untagged-versions: 'true'
|
||||
|
||||
run:
|
||||
needs: build
|
||||
|
|
@ -61,10 +54,9 @@ jobs:
|
|||
|
||||
name: ${{ matrix.suite.client }}
|
||||
runs-on: ubuntu-24.04
|
||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@v3
|
||||
|
|
|
|||
29
.github/workflows/quic-interop-libressl.yml
vendored
29
.github/workflows/quic-interop-libressl.yml
vendored
|
|
@ -1,4 +1,7 @@
|
|||
#
|
||||
# chacha20 test must be enabled after LibreSSL-4.0
|
||||
# issue for tracking: https://github.com/haproxy/haproxy/issues/2569
|
||||
#
|
||||
# goodput,crosstraffic are not run on purpose, those tests are intended to bandwidth measurement, we currently do not want to use GitHub runners for that
|
||||
#
|
||||
|
||||
|
|
@ -13,13 +16,12 @@ on:
|
|||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-24.04
|
||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@v3
|
||||
|
|
@ -28,41 +30,34 @@ jobs:
|
|||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Clone docker image definition
|
||||
run: |
|
||||
git clone -b libressl https://github.com/haproxytech/haproxy-qns
|
||||
|
||||
- name: Build and push Docker image
|
||||
id: push
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: https://github.com/haproxytech/haproxy-qns.git
|
||||
context: haproxy-qns
|
||||
push: true
|
||||
build-args: |
|
||||
SSLLIB=LibreSSL
|
||||
tags: ghcr.io/${{ github.repository }}:libressl
|
||||
|
||||
- name: Cleanup registry
|
||||
uses: actions/delete-package-versions@v5
|
||||
with:
|
||||
owner: ${{ github.repository_owner }}
|
||||
package-name: 'haproxy'
|
||||
package-type: container
|
||||
min-versions-to-keep: 1
|
||||
delete-only-untagged-versions: 'true'
|
||||
|
||||
run:
|
||||
needs: build
|
||||
strategy:
|
||||
matrix:
|
||||
suite: [
|
||||
{ client: picoquic, tests: "handshake,transfer,longrtt,chacha20,multiplexing,retry,http3,blackhole,amplificationlimit,handshakeloss,transferloss,handshakecorruption,transfercorruption,v2" },
|
||||
{ client: quic-go, tests: "handshake,transfer,longrtt,chacha20,multiplexing,retry,http3,blackhole,amplificationlimit,transferloss,transfercorruption,v2" }
|
||||
{ client: picoquic, tests: "handshake,transfer,longrtt,multiplexing,retry,http3,blackhole,amplificationlimit,handshakeloss,transferloss,handshakecorruption,transfercorruption,v2" },
|
||||
{ client: quic-go, tests: "handshake,transfer,longrtt,multiplexing,retry,http3,blackhole,amplificationlimit,transferloss,transfercorruption,v2" }
|
||||
]
|
||||
fail-fast: false
|
||||
|
||||
name: ${{ matrix.suite.client }}
|
||||
runs-on: ubuntu-24.04
|
||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@v3
|
||||
|
|
|
|||
74
.github/workflows/quictls.yml
vendored
74
.github/workflows/quictls.yml
vendored
|
|
@ -1,74 +0,0 @@
|
|||
#
|
||||
# weekly run against modern QuicTLS branch, i.e. https://github.com/quictls/quictls
|
||||
#
|
||||
|
||||
name: QuicTLS
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0 * * 4"
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- name: Install apt dependencies
|
||||
run: |
|
||||
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
|
||||
sudo apt-get --no-install-recommends -y install socat gdb
|
||||
- name: Install QuicTLS
|
||||
run: env QUICTLS_VERSION=main QUICTLS_URL=https://github.com/quictls/quictls scripts/build-ssl.sh
|
||||
- name: Compile HAProxy
|
||||
run: |
|
||||
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
|
||||
USE_QUIC=1 USE_OPENSSL=1 \
|
||||
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
|
||||
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
|
||||
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/" \
|
||||
ARCH_FLAGS="-ggdb3 -fsanitize=address"
|
||||
sudo make install
|
||||
- name: Show HAProxy version
|
||||
id: show-version
|
||||
run: |
|
||||
ldd $(which haproxy)
|
||||
haproxy -vv
|
||||
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
||||
- uses: ./.github/actions/setup-vtest
|
||||
- name: Run VTest for HAProxy
|
||||
id: vtest
|
||||
run: |
|
||||
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
||||
- name: Show VTest results
|
||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||
run: |
|
||||
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
|
||||
printf "::group::"
|
||||
cat $folder/INFO
|
||||
cat $folder/LOG
|
||||
echo "::endgroup::"
|
||||
done
|
||||
exit 1
|
||||
- name: Run Unit tests
|
||||
id: unittests
|
||||
run: |
|
||||
make unit-tests
|
||||
- name: Show coredumps
|
||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||
run: |
|
||||
failed=false
|
||||
shopt -s nullglob
|
||||
for file in /tmp/core.*; do
|
||||
failed=true
|
||||
printf "::group::"
|
||||
gdb -ex 'thread apply all bt full' ./haproxy $file
|
||||
echo "::endgroup::"
|
||||
done
|
||||
if [ "$failed" = true ]; then
|
||||
exit 1;
|
||||
fi
|
||||
65
.github/workflows/vtest.yml
vendored
65
.github/workflows/vtest.yml
vendored
|
|
@ -23,7 +23,7 @@ jobs:
|
|||
outputs:
|
||||
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
- name: Generate Build Matrix
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
|
@ -44,10 +44,16 @@ jobs:
|
|||
TMPDIR: /tmp
|
||||
OT_CPP_VERSION: 1.6.0
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 100
|
||||
|
||||
- name: Setup coredumps
|
||||
if: ${{ startsWith(matrix.os, 'ubuntu-') }}
|
||||
run: |
|
||||
sudo sysctl -w fs.suid_dumpable=1
|
||||
sudo sysctl kernel.core_pattern=/tmp/core.%h.%e.%t
|
||||
|
||||
#
|
||||
# Github Action cache key cannot contain comma, so we calculate it based on job name
|
||||
#
|
||||
|
|
@ -57,7 +63,7 @@ jobs:
|
|||
echo "key=$(echo ${{ matrix.name }} | sha256sum | awk '{print $1}')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Cache SSL libs
|
||||
if: ${{ matrix.ssl && matrix.ssl != 'stock' && matrix.ssl != 'BORINGSSL=yes' && !contains(matrix.ssl, 'QUICTLS') }}
|
||||
if: ${{ matrix.ssl && matrix.ssl != 'stock' && matrix.ssl != 'BORINGSSL=yes' && matrix.ssl != 'QUICTLS=yes' }}
|
||||
id: cache_ssl
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
|
|
@ -70,7 +76,7 @@ jobs:
|
|||
uses: actions/cache@v4
|
||||
with:
|
||||
path: '~/opt-ot/'
|
||||
key: ${{ matrix.os }}-ot-${{ matrix.CC }}-${{ env.OT_CPP_VERSION }}-${{ contains(matrix.name, 'ASAN') }}
|
||||
key: ot-${{ matrix.CC }}-${{ env.OT_CPP_VERSION }}-${{ contains(matrix.name, 'ASAN') }}
|
||||
- name: Install apt dependencies
|
||||
if: ${{ startsWith(matrix.os, 'ubuntu-') }}
|
||||
run: |
|
||||
|
|
@ -80,14 +86,15 @@ jobs:
|
|||
${{ contains(matrix.FLAGS, 'USE_PCRE2=1') && 'libpcre2-dev' || '' }} \
|
||||
${{ contains(matrix.ssl, 'BORINGSSL=yes') && 'ninja-build' || '' }} \
|
||||
socat \
|
||||
gdb \
|
||||
jose
|
||||
gdb
|
||||
- name: Install brew dependencies
|
||||
if: ${{ startsWith(matrix.os, 'macos-') }}
|
||||
run: |
|
||||
brew install socat
|
||||
brew install lua
|
||||
- uses: ./.github/actions/setup-vtest
|
||||
- name: Install VTest
|
||||
run: |
|
||||
scripts/build-vtest.sh
|
||||
- name: Install SSL ${{ matrix.ssl }}
|
||||
if: ${{ matrix.ssl && matrix.ssl != 'stock' && steps.cache_ssl.outputs.cache-hit != 'true' }}
|
||||
run: env ${{ matrix.ssl }} scripts/build-ssl.sh
|
||||
|
|
@ -110,19 +117,10 @@ jobs:
|
|||
ERR=1 \
|
||||
TARGET=${{ matrix.TARGET }} \
|
||||
CC=${{ matrix.CC }} \
|
||||
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
|
||||
${{ join(matrix.FLAGS, ' ') }} \
|
||||
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
|
||||
sudo make install-bin
|
||||
- name: Compile admin/halog/halog
|
||||
run: |
|
||||
make -j$(nproc) admin/halog/halog \
|
||||
ERR=1 \
|
||||
TARGET=${{ matrix.TARGET }} \
|
||||
CC=${{ matrix.CC }} \
|
||||
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
|
||||
DEBUG="-DDEBUG_POOL_INTEGRITY" \
|
||||
${{ join(matrix.FLAGS, ' ') }} \
|
||||
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
|
||||
sudo make install
|
||||
- name: Show HAProxy version
|
||||
id: show-version
|
||||
run: |
|
||||
|
|
@ -137,33 +135,36 @@ jobs:
|
|||
echo "::endgroup::"
|
||||
haproxy -vv
|
||||
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
||||
- name: Install problem matcher for VTest
|
||||
# This allows one to more easily see which tests fail.
|
||||
run: echo "::add-matcher::.github/vtest.json"
|
||||
- name: Run VTest for HAProxy ${{ steps.show-version.outputs.version }}
|
||||
id: vtest
|
||||
run: |
|
||||
# This is required for macOS which does not actually allow to increase
|
||||
# the '-n' soft limit to the hard limit, thus failing to run.
|
||||
ulimit -n 65536
|
||||
ulimit -c unlimited
|
||||
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
||||
- name: Config syntax check memleak smoke testing
|
||||
if: ${{ contains(matrix.name, 'ASAN') }}
|
||||
run: |
|
||||
./haproxy -dI -f .github/h2spec.config -c
|
||||
./haproxy -dI -f examples/content-sw-sample.cfg -c
|
||||
./haproxy -dI -f examples/option-http_proxy.cfg -c
|
||||
./haproxy -dI -f examples/quick-test.cfg -c
|
||||
./haproxy -dI -f examples/transparent_proxy.cfg -c
|
||||
- name: Show VTest results
|
||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||
run: |
|
||||
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
|
||||
for folder in ${TMPDIR}/haregtests-*/vtc.*; do
|
||||
printf "::group::"
|
||||
cat $folder/INFO
|
||||
cat $folder/LOG
|
||||
echo "::endgroup::"
|
||||
done
|
||||
exit 1
|
||||
- name: Run Unit tests
|
||||
id: unittests
|
||||
run: |
|
||||
make unit-tests
|
||||
- name: Show Unit-Tests results
|
||||
if: ${{ failure() && steps.unittests.outcome == 'failure' }}
|
||||
run: |
|
||||
for result in ${TMPDIR:-/tmp}/ha-unittests-*/results/res.*; do
|
||||
printf "::group::"
|
||||
cat $result
|
||||
echo "::endgroup::"
|
||||
done
|
||||
exit 1
|
||||
|
||||
- name: Show coredumps
|
||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||
run: |
|
||||
|
|
|
|||
2
.github/workflows/windows.yml
vendored
2
.github/workflows/windows.yml
vendored
|
|
@ -35,7 +35,7 @@ jobs:
|
|||
- USE_THREAD=1
|
||||
- USE_ZLIB=1
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
- uses: msys2/setup-msys2@v2
|
||||
with:
|
||||
install: >-
|
||||
|
|
|
|||
80
.github/workflows/wolfssl.yml
vendored
80
.github/workflows/wolfssl.yml
vendored
|
|
@ -1,80 +0,0 @@
|
|||
name: WolfSSL
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0 * * 4"
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- name: Install apt dependencies
|
||||
run: |
|
||||
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
|
||||
sudo apt-get --no-install-recommends -y install socat gdb jose
|
||||
- name: Install WolfSSL
|
||||
run: env WOLFSSL_VERSION=git-master WOLFSSL_DEBUG=1 scripts/build-ssl.sh
|
||||
- name: Compile HAProxy
|
||||
run: |
|
||||
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
|
||||
USE_OPENSSL_WOLFSSL=1 USE_QUIC=1 \
|
||||
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
|
||||
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
|
||||
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/" \
|
||||
ARCH_FLAGS="-ggdb3 -fsanitize=address"
|
||||
sudo make install
|
||||
- name: Show HAProxy version
|
||||
id: show-version
|
||||
run: |
|
||||
ldd $(which haproxy)
|
||||
haproxy -vv
|
||||
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
||||
- uses: ./.github/actions/setup-vtest
|
||||
- name: Run VTest for HAProxy
|
||||
id: vtest
|
||||
run: |
|
||||
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
||||
- name: Run Unit tests
|
||||
id: unittests
|
||||
run: |
|
||||
make unit-tests
|
||||
- name: Show VTest results
|
||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||
run: |
|
||||
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
|
||||
printf "::group::"
|
||||
cat $folder/INFO
|
||||
cat $folder/LOG
|
||||
echo "::endgroup::"
|
||||
done
|
||||
exit 1
|
||||
- name: Show coredumps
|
||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||
run: |
|
||||
failed=false
|
||||
shopt -s nullglob
|
||||
for file in /tmp/core.*; do
|
||||
failed=true
|
||||
printf "::group::"
|
||||
gdb -ex 'thread apply all bt full' ./haproxy $file
|
||||
echo "::endgroup::"
|
||||
done
|
||||
if [ "$failed" = true ]; then
|
||||
exit 1;
|
||||
fi
|
||||
- name: Show Unit-Tests results
|
||||
if: ${{ failure() && steps.unittests.outcome == 'failure' }}
|
||||
run: |
|
||||
for result in ${TMPDIR:-/tmp}/ha-unittests-*/results/res.*; do
|
||||
printf "::group::"
|
||||
cat $result
|
||||
echo "::endgroup::"
|
||||
done
|
||||
exit 1
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
|
|
@ -57,4 +57,3 @@ dev/udp/udp-perturb
|
|||
/src/dlmalloc.c
|
||||
/tests/test_hashes
|
||||
doc/lua-api/_build
|
||||
dev/term_events/term_events
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ branches:
|
|||
|
||||
env:
|
||||
global:
|
||||
- FLAGS="USE_LUA=1 USE_OPENSSL=1 USE_PCRE=1 USE_PCRE_JIT=1 USE_ZLIB=1"
|
||||
- FLAGS="USE_LUA=1 USE_OPENSSL=1 USE_PCRE=1 USE_PCRE_JIT=1 USE_SYSTEMD=1 USE_ZLIB=1"
|
||||
- TMPDIR=/tmp
|
||||
|
||||
addons:
|
||||
|
|
|
|||
12
BRANCHES
12
BRANCHES
|
|
@ -171,17 +171,7 @@ feedback for developers:
|
|||
as the previous releases that had 6 months to stabilize. In terms of
|
||||
stability it really means that the point zero version already accumulated
|
||||
6 months of fixes and that it is much safer to use even just after it is
|
||||
released. There is one exception though, features marked as "experimental"
|
||||
are not guaranteed to be maintained beyond the release of the next LTS
|
||||
branch. The rationale here is that the experimental status is made to
|
||||
expose an early preview of a feature, that is often incomplete, not always
|
||||
in its definitive form regarding configuration, and for which developers
|
||||
are seeking feedback from the users. It is even possible that changes will
|
||||
be brought within the stable branch and it may happen that the feature
|
||||
breaks. It is not imaginable to always be able to backport bug fixes too
|
||||
far in this context since the code and configuration may change quite a
|
||||
bit. Users who want to try experimental features are expected to upgrade
|
||||
quickly to benefit from the improvements made to that feature.
|
||||
released.
|
||||
|
||||
- for developers, given that the odd versions are solely used by highly
|
||||
skilled users, it's easier to get advanced traces and captures, and there
|
||||
|
|
|
|||
|
|
@ -1010,7 +1010,7 @@ you notice you're already practising some of them:
|
|||
- continue to send pull requests after having been explained why they are not
|
||||
welcome.
|
||||
|
||||
- give wrong advice to people asking for help, or sending them patches to
|
||||
- give wrong advices to people asking for help, or sending them patches to
|
||||
try which make no sense, waste their time, and give them a bad impression
|
||||
of the people working on the project.
|
||||
|
||||
|
|
|
|||
83
INSTALL
83
INSTALL
|
|
@ -111,22 +111,20 @@ HAProxy requires a working GCC or Clang toolchain and GNU make :
|
|||
may want to retry with "gmake" which is the name commonly used for GNU make
|
||||
on BSD systems.
|
||||
|
||||
- GCC >= 4.7 (up to 15 tested). Older versions are no longer supported due to
|
||||
the latest mt_list update which only uses c11-like atomics. Newer versions
|
||||
may sometimes break due to compiler regressions or behaviour changes. The
|
||||
version shipped with your operating system is very likely to work with no
|
||||
trouble. Clang >= 3.0 is also known to work as an alternative solution, and
|
||||
versions up to 19 were successfully tested. Recent versions may emit a bit
|
||||
more warnings that are worth reporting as they may reveal real bugs. TCC
|
||||
(https://repo.or.cz/tinycc.git) is also usable for developers but will not
|
||||
support threading and was found at least once to produce bad code in some
|
||||
rare corner cases (since fixed). But it builds extremely quickly (typically
|
||||
half a second for the whole project) and is very convenient to run quick
|
||||
tests during API changes or code refactoring.
|
||||
- GCC >= 4.2 (up to 13 tested). Older versions can be made to work with a
|
||||
few minor adaptations if really needed. Newer versions may sometimes break
|
||||
due to compiler regressions or behaviour changes. The version shipped with
|
||||
your operating system is very likely to work with no trouble. Clang >= 3.0
|
||||
is also known to work as an alternative solution. Recent versions may emit
|
||||
a bit more warnings that are worth reporting as they may reveal real bugs.
|
||||
TCC (https://repo.or.cz/tinycc.git) is also usable for developers but will
|
||||
not support threading and was found at least once to produce bad code in
|
||||
some rare corner cases (since fixed). But it builds extremely quickly
|
||||
(typically half a second for the whole project) and is very convenient to
|
||||
run quick tests during API changes or code refactoring.
|
||||
|
||||
- GNU ld (binutils package), with no particular version. Other linkers might
|
||||
work but were not tested. The default one from your operating system will
|
||||
normally work.
|
||||
work but were not tested.
|
||||
|
||||
On debian or Ubuntu systems and their derivatives, you may get all these tools
|
||||
at once by issuing the two following commands :
|
||||
|
|
@ -237,7 +235,7 @@ to forcefully enable it using "USE_LIBCRYPT=1".
|
|||
-----------------
|
||||
For SSL/TLS, it is necessary to use a cryptography library. HAProxy currently
|
||||
supports the OpenSSL library, and is known to build and work with branches
|
||||
1.0.0, 1.0.1, 1.0.2, 1.1.0, 1.1.1, and 3.0 to 3.6. It is recommended to use
|
||||
1.0.0, 1.0.1, 1.0.2, 1.1.0, 1.1.1, and 3.0 to 3.3. It is recommended to use
|
||||
at least OpenSSL 1.1.1 to have support for all SSL keywords and configuration
|
||||
in HAProxy. OpenSSL follows a long-term support cycle similar to HAProxy's,
|
||||
and each of the branches above receives its own fixes, without forcing you to
|
||||
|
|
@ -259,15 +257,11 @@ reported to work as well. While there are some efforts from the community to
|
|||
ensure they work well, OpenSSL remains the primary target and this means that
|
||||
in case of conflicting choices, OpenSSL support will be favored over other
|
||||
options. Note that QUIC is not fully supported when haproxy is built with
|
||||
OpenSSL < 3.5.2 version. In this case, QUICTLS or AWS-LC are the preferred
|
||||
alternatives. As of writing this, the QuicTLS project follows OpenSSL very
|
||||
closely and provides update simultaneously, but being a volunteer-driven
|
||||
project, its long-term future does not look certain enough to convince
|
||||
operating systems to package it, so it needs to be build locally. Recent
|
||||
versions of AWS-LC (>= 1.22 and the FIPS branches) are pretty complete and
|
||||
generally more performant than other OpenSSL derivatives, but may behave
|
||||
slightly differently, particularly when dealing with outdated setups. See
|
||||
the section about QUIC in this document.
|
||||
OpenSSL. In this case, QUICTLS is the preferred alternative. As of writing
|
||||
this, the QuicTLS project follows OpenSSL very closely and provides update
|
||||
simultaneously, but being a volunteer-driven project, its long-term future does
|
||||
not look certain enough to convince operating systems to package it, so it
|
||||
needs to be build locally. See the section about QUIC in this document.
|
||||
|
||||
A fifth option is wolfSSL (https://github.com/wolfSSL/wolfssl). It is the only
|
||||
supported alternative stack not based on OpenSSL, yet which implements almost
|
||||
|
|
@ -471,6 +465,12 @@ are the extra libraries that may be referenced at build time :
|
|||
on Linux. It is automatically detected and may be disabled
|
||||
using "USE_DL=", though it should never harm.
|
||||
|
||||
- USE_SYSTEMD=1 enables support for the sdnotify features of systemd,
|
||||
allowing better integration with systemd on Linux systems
|
||||
which come with it. It is never enabled by default so there
|
||||
is no need to disable it.
|
||||
|
||||
|
||||
4.10) Common errors
|
||||
-------------------
|
||||
Some build errors may happen depending on the options combinations or the
|
||||
|
|
@ -494,8 +494,8 @@ target. Common issues may include:
|
|||
other supported compatible library.
|
||||
|
||||
- many "dereferencing pointer 'sa.985' does break strict-aliasing rules"
|
||||
=> these warnings happen on old compilers (typically gcc before 7.x),
|
||||
and may safely be ignored; newer ones are better on these.
|
||||
=> these warnings happen on old compilers (typically gcc-4.4), and may
|
||||
safely be ignored; newer ones are better on these.
|
||||
|
||||
|
||||
4.11) QUIC
|
||||
|
|
@ -504,11 +504,10 @@ QUIC is the new transport layer protocol and is required for HTTP/3. This
|
|||
protocol stack is currently supported as an experimental feature in haproxy on
|
||||
the frontend side. In order to enable it, use "USE_QUIC=1 USE_OPENSSL=1".
|
||||
|
||||
Note that QUIC is not always fully supported by the OpenSSL library depending on
|
||||
its version. Indeed QUIC 0-RTT cannot be supported by OpenSSL for versions before
|
||||
3.5 contrary to others libraries with full QUIC support. The preferred option is
|
||||
to use QUICTLS. This is a fork of OpenSSL with a QUIC-compatible API. Its
|
||||
repository is available at this location:
|
||||
Note that QUIC is not fully supported by the OpenSSL library. Indeed QUIC 0-RTT
|
||||
cannot be supported by OpenSSL contrary to others libraries with full QUIC
|
||||
support. The preferred option is to use QUICTLS. This is a fork of OpenSSL with
|
||||
a QUIC-compatible API. Its repository is available at this location:
|
||||
|
||||
https://github.com/quictls/openssl
|
||||
|
||||
|
|
@ -536,18 +535,14 @@ way assuming that wolfSSL was installed in /opt/wolfssl-5.6.0 as shown in 4.5:
|
|||
SSL_INC=/opt/wolfssl-5.6.0/include SSL_LIB=/opt/wolfssl-5.6.0/lib
|
||||
LDFLAGS="-Wl,-rpath,/opt/wolfssl-5.6.0/lib"
|
||||
|
||||
As last resort, haproxy may be compiled against OpenSSL as follows from 3.5
|
||||
version with 0-RTT support:
|
||||
|
||||
$ make TARGET=generic USE_OPENSSL=1 USE_QUIC=1
|
||||
|
||||
or as follows for all OpenSSL versions but without O-RTT support:
|
||||
As last resort, haproxy may be compiled against OpenSSL as follows:
|
||||
|
||||
$ make TARGET=generic USE_OPENSSL=1 USE_QUIC=1 USE_QUIC_OPENSSL_COMPAT=1
|
||||
|
||||
In addition to this requirements, the QUIC listener bindings must be explicitly
|
||||
enabled with a specific QUIC tuning parameter. (see "limited-quic" global
|
||||
parameter of haproxy Configuration Manual).
|
||||
Note that QUIC 0-RTT is not supported by haproxy QUIC stack when built against
|
||||
OpenSSL. In addition to this compilation requirements, the QUIC listener
|
||||
bindings must be explicitly enabled with a specific QUIC tuning parameter.
|
||||
(see "limited-quic" global parameter of haproxy Configuration Manual).
|
||||
|
||||
|
||||
5) How to build HAProxy
|
||||
|
|
@ -563,9 +558,9 @@ It goes into more details with the main options.
|
|||
To build haproxy, you have to choose your target OS amongst the following ones
|
||||
and assign it to the TARGET variable :
|
||||
|
||||
- linux-glibc for Linux kernel 4.17 and above
|
||||
- linux-glibc for Linux kernel 2.6.28 and above
|
||||
- linux-glibc-legacy for Linux kernel 2.6.28 and above without new features
|
||||
- linux-musl for Linux kernel 4.17 and above with musl libc
|
||||
- linux-musl for Linux kernel 2.6.28 and above with musl libc
|
||||
- solaris for Solaris 10 and above
|
||||
- freebsd for FreeBSD 10 and above
|
||||
- dragonfly for DragonFlyBSD 4.3 and above
|
||||
|
|
@ -765,8 +760,8 @@ forced to produce final binaries, and must not be used during bisect sessions,
|
|||
as it will often lead to the wrong commit.
|
||||
|
||||
Examples:
|
||||
# silence strict-aliasing warnings with old gcc-5.5:
|
||||
$ make -j$(nproc) TARGET=linux-glibc CC=gcc-55 CFLAGS=-fno-strict-aliasing
|
||||
# silence strict-aliasing warnings with old gcc-4.4:
|
||||
$ make -j$(nproc) TARGET=linux-glibc CC=gcc-44 CFLAGS=-fno-strict-aliasing
|
||||
|
||||
# disable all warning options:
|
||||
$ make -j$(nproc) TARGET=linux-glibc CC=mycc WARN_CFLAGS= NOWARN_CFLAGS=
|
||||
|
|
|
|||
211
Makefile
211
Makefile
|
|
@ -35,7 +35,6 @@
|
|||
# USE_OPENSSL : enable use of OpenSSL. Recommended, but see below.
|
||||
# USE_OPENSSL_AWSLC : enable use of AWS-LC
|
||||
# USE_OPENSSL_WOLFSSL : enable use of wolfSSL with the OpenSSL API
|
||||
# USE_ECH : enable use of ECH with the OpenSSL API
|
||||
# USE_QUIC : enable use of QUIC with the quictls API (quictls, libressl, boringssl)
|
||||
# USE_QUIC_OPENSSL_COMPAT : enable use of QUIC with the standard openssl API (limited features)
|
||||
# USE_ENGINE : enable use of OpenSSL Engine.
|
||||
|
|
@ -57,14 +56,14 @@
|
|||
# USE_DEVICEATLAS : enable DeviceAtlas api.
|
||||
# USE_51DEGREES : enable third party device detection library from 51Degrees
|
||||
# USE_WURFL : enable WURFL detection library from Scientiamobile
|
||||
# USE_SYSTEMD : enable sd_notify() support.
|
||||
# USE_OBSOLETE_LINKER : use when the linker fails to emit __start_init/__stop_init
|
||||
# USE_THREAD_DUMP : use the more advanced thread state dump system. Automatic.
|
||||
# USE_OT : enable the OpenTracing filter
|
||||
# USE_MEMORY_PROFILING : enable the memory profiler. Linux-glibc only.
|
||||
# USE_LIBATOMIC : force to link with/without libatomic. Automatic.
|
||||
# USE_PTHREAD_EMULATION : replace pthread's rwlocks with ours
|
||||
# USE_SHM_OPEN : use shm_open() for features that can make use of shared memory
|
||||
# USE_KTLS : use kTLS.(requires at least Linux 4.17).
|
||||
# USE_SHM_OPEN : use shm_open() for the startup-logs
|
||||
#
|
||||
# Options can be forced by specifying "USE_xxx=1" or can be disabled by using
|
||||
# "USE_xxx=" (empty string). The list of enabled and disabled options for a
|
||||
|
|
@ -136,12 +135,7 @@
|
|||
# VTEST_PROGRAM : location of the vtest program to run reg-tests.
|
||||
# DEBUG_USE_ABORT: use abort() for program termination, see include/haproxy/bug.h for details
|
||||
|
||||
#### Add -Werror when set to non-empty, and make Makefile stop on warnings.
|
||||
#### It must be declared before includes because it's used there.
|
||||
ERR =
|
||||
|
||||
include include/make/verbose.mk
|
||||
include include/make/errors.mk
|
||||
include include/make/compiler.mk
|
||||
include include/make/options.mk
|
||||
|
||||
|
|
@ -165,7 +159,7 @@ TARGET =
|
|||
CPU =
|
||||
ifneq ($(CPU),)
|
||||
ifneq ($(CPU),generic)
|
||||
$(call $(complain),the "CPU" variable was forced to "$(CPU)" but is no longer \
|
||||
$(warning Warning: the "CPU" variable was forced to "$(CPU)" but is no longer \
|
||||
used and will be ignored. For native builds, modern compilers generally \
|
||||
prefer that the string "-march=native" is passed in CPU_CFLAGS or CFLAGS. \
|
||||
For other CPU-specific options, please read suggestions in the INSTALL file.)
|
||||
|
|
@ -175,7 +169,7 @@ endif
|
|||
#### No longer used
|
||||
ARCH =
|
||||
ifneq ($(ARCH),)
|
||||
$(call $(complain),the "ARCH" variable was forced to "$(ARCH)" but is no \
|
||||
$(warning Warning: the "ARCH" variable was forced to "$(ARCH)" but is no \
|
||||
longer used and will be ignored. Please check the INSTALL file for other \
|
||||
options, but usually in order to pass arch-specific options, ARCH_FLAGS, \
|
||||
CFLAGS or LDFLAGS are preferred.)
|
||||
|
|
@ -193,7 +187,7 @@ OPT_CFLAGS = -O2
|
|||
#### No longer used
|
||||
DEBUG_CFLAGS =
|
||||
ifneq ($(DEBUG_CFLAGS),)
|
||||
$(call $(complain),DEBUG_CFLAGS was forced to "$(DEBUG_CFLAGS)" but is no \
|
||||
$(warning Warning: DEBUG_CFLAGS was forced to "$(DEBUG_CFLAGS)" but is no \
|
||||
longer used and will be ignored. If you have ported this build setting from \
|
||||
and older version, it is likely that you just want to pass these options \
|
||||
to the CFLAGS variable. If you are passing some debugging-related options \
|
||||
|
|
@ -201,10 +195,12 @@ $(call $(complain),DEBUG_CFLAGS was forced to "$(DEBUG_CFLAGS)" but is no \
|
|||
both the compilation and linking stages.)
|
||||
endif
|
||||
|
||||
#### Add -Werror when set to non-empty
|
||||
ERR =
|
||||
|
||||
#### May be used to force running a specific set of reg-tests
|
||||
REG_TEST_FILES =
|
||||
REG_TEST_SCRIPT=./scripts/run-regtests.sh
|
||||
UNIT_TEST_SCRIPT=./scripts/run-unittests.sh
|
||||
|
||||
#### Standard C definition
|
||||
# Compiler-specific flags that may be used to set the standard behavior we
|
||||
|
|
@ -214,8 +210,7 @@ UNIT_TEST_SCRIPT=./scripts/run-unittests.sh
|
|||
# undefined behavior to silently produce invalid code. For this reason we have
|
||||
# to use -fwrapv or -fno-strict-overflow to guarantee the intended behavior.
|
||||
# It is preferable not to change this option in order to avoid breakage.
|
||||
STD_CFLAGS := $(call cc-opt-alt,-fwrapv,-fno-strict-overflow) \
|
||||
$(call cc-opt,-fvect-cost-model=very-cheap)
|
||||
STD_CFLAGS := $(call cc-opt-alt,-fwrapv,-fno-strict-overflow)
|
||||
|
||||
#### Compiler-specific flags to enable certain classes of warnings.
|
||||
# Some are hard-coded, others are enabled only if supported.
|
||||
|
|
@ -252,7 +247,7 @@ endif
|
|||
#### No longer used
|
||||
SMALL_OPTS =
|
||||
ifneq ($(SMALL_OPTS),)
|
||||
$(call $(complain),SMALL_OPTS was forced to "$(SMALL_OPTS)" but is no longer \
|
||||
$(warning Warning: SMALL_OPTS was forced to "$(SMALL_OPTS)" but is no longer \
|
||||
used and will be ignored. Please check if this setting are still relevant, \
|
||||
and move it either to DEFINE or to CFLAGS instead.)
|
||||
endif
|
||||
|
|
@ -265,9 +260,8 @@ endif
|
|||
# without appearing here. Currently defined DEBUG macros include DEBUG_FULL,
|
||||
# DEBUG_MEM_STATS, DEBUG_DONT_SHARE_POOLS, DEBUG_FD, DEBUG_POOL_INTEGRITY,
|
||||
# DEBUG_NO_POOLS, DEBUG_FAIL_ALLOC, DEBUG_STRICT_ACTION=[0-3], DEBUG_HPACK,
|
||||
# DEBUG_AUTH, DEBUG_SPOE, DEBUG_UAF, DEBUG_THREAD=0-2, DEBUG_STRICT, DEBUG_DEV,
|
||||
# DEBUG_TASK, DEBUG_MEMORY_POOLS, DEBUG_POOL_TRACING, DEBUG_QPACK, DEBUG_LIST,
|
||||
# DEBUG_COUNTERS=[0-2], DEBUG_STRESS, DEBUG_UNIT.
|
||||
# DEBUG_AUTH, DEBUG_SPOE, DEBUG_UAF, DEBUG_THREAD, DEBUG_STRICT, DEBUG_DEV,
|
||||
# DEBUG_TASK, DEBUG_MEMORY_POOLS, DEBUG_POOL_TRACING, DEBUG_QPACK, DEBUG_LIST.
|
||||
DEBUG =
|
||||
|
||||
#### Trace options
|
||||
|
|
@ -342,16 +336,14 @@ use_opts = USE_EPOLL USE_KQUEUE USE_NETFILTER USE_POLL \
|
|||
USE_TPROXY USE_LINUX_TPROXY USE_LINUX_CAP \
|
||||
USE_LINUX_SPLICE USE_LIBCRYPT USE_CRYPT_H USE_ENGINE \
|
||||
USE_GETADDRINFO USE_OPENSSL USE_OPENSSL_WOLFSSL USE_OPENSSL_AWSLC \
|
||||
USE_ECH \
|
||||
USE_SSL USE_LUA USE_ACCEPT4 USE_CLOSEFROM USE_ZLIB USE_SLZ \
|
||||
USE_CPU_AFFINITY USE_TFO USE_NS USE_DL USE_RT USE_LIBATOMIC \
|
||||
USE_MATH USE_DEVICEATLAS USE_51DEGREES \
|
||||
USE_WURFL USE_OBSOLETE_LINKER USE_PRCTL USE_PROCCTL \
|
||||
USE_WURFL USE_SYSTEMD USE_OBSOLETE_LINKER USE_PRCTL USE_PROCCTL \
|
||||
USE_THREAD_DUMP USE_EVPORTS USE_OT USE_QUIC USE_PROMEX \
|
||||
USE_MEMORY_PROFILING USE_SHM_OPEN \
|
||||
USE_STATIC_PCRE USE_STATIC_PCRE2 \
|
||||
USE_PCRE USE_PCRE_JIT USE_PCRE2 USE_PCRE2_JIT \
|
||||
USE_QUIC_OPENSSL_COMPAT USE_KTLS
|
||||
USE_PCRE USE_PCRE_JIT USE_PCRE2 USE_PCRE2_JIT USE_QUIC_OPENSSL_COMPAT
|
||||
|
||||
# preset all variables for all supported build options among use_opts
|
||||
$(reset_opts_vars)
|
||||
|
|
@ -382,13 +374,13 @@ ifeq ($(TARGET),haiku)
|
|||
set_target_defaults = $(call default_opts,USE_POLL USE_TPROXY USE_OBSOLETE_LINKER)
|
||||
endif
|
||||
|
||||
# For linux >= 4.17 and glibc
|
||||
# For linux >= 2.6.28 and glibc
|
||||
ifeq ($(TARGET),linux-glibc)
|
||||
set_target_defaults = $(call default_opts, \
|
||||
USE_POLL USE_TPROXY USE_LIBCRYPT USE_DL USE_RT USE_CRYPT_H USE_NETFILTER \
|
||||
USE_CPU_AFFINITY USE_THREAD USE_EPOLL USE_LINUX_TPROXY USE_LINUX_CAP \
|
||||
USE_ACCEPT4 USE_LINUX_SPLICE USE_PRCTL USE_THREAD_DUMP USE_NS USE_TFO \
|
||||
USE_GETADDRINFO USE_BACKTRACE USE_SHM_OPEN USE_KTLS)
|
||||
USE_GETADDRINFO USE_BACKTRACE USE_SHM_OPEN USE_SYSTEMD)
|
||||
INSTALL = install -v
|
||||
endif
|
||||
|
||||
|
|
@ -401,13 +393,13 @@ ifeq ($(TARGET),linux-glibc-legacy)
|
|||
INSTALL = install -v
|
||||
endif
|
||||
|
||||
# For linux >= 4.17 and musl
|
||||
# For linux >= 2.6.28 and musl
|
||||
ifeq ($(TARGET),linux-musl)
|
||||
set_target_defaults = $(call default_opts, \
|
||||
USE_POLL USE_TPROXY USE_LIBCRYPT USE_DL USE_RT USE_CRYPT_H USE_NETFILTER \
|
||||
USE_CPU_AFFINITY USE_THREAD USE_EPOLL USE_LINUX_TPROXY USE_LINUX_CAP \
|
||||
USE_ACCEPT4 USE_LINUX_SPLICE USE_PRCTL USE_THREAD_DUMP USE_NS USE_TFO \
|
||||
USE_GETADDRINFO USE_BACKTRACE USE_SHM_OPEN USE_KTLS)
|
||||
USE_GETADDRINFO USE_SHM_OPEN)
|
||||
INSTALL = install -v
|
||||
endif
|
||||
|
||||
|
|
@ -424,7 +416,7 @@ endif
|
|||
ifeq ($(TARGET),freebsd)
|
||||
set_target_defaults = $(call default_opts, \
|
||||
USE_POLL USE_TPROXY USE_LIBCRYPT USE_THREAD USE_CPU_AFFINITY USE_KQUEUE \
|
||||
USE_ACCEPT4 USE_CLOSEFROM USE_GETADDRINFO USE_PROCCTL)
|
||||
USE_ACCEPT4 USE_CLOSEFROM USE_GETADDRINFO USE_PROCCTL USE_SHM_OPEN)
|
||||
endif
|
||||
|
||||
# kFreeBSD glibc
|
||||
|
|
@ -598,16 +590,10 @@ endif
|
|||
|
||||
ifneq ($(USE_BACKTRACE:0=),)
|
||||
BACKTRACE_LDFLAGS = -Wl,$(if $(EXPORT_SYMBOL),$(EXPORT_SYMBOL),--export-dynamic)
|
||||
BACKTRACE_CFLAGS = -fno-omit-frame-pointer
|
||||
endif
|
||||
|
||||
ifneq ($(USE_MEMORY_PROFILING:0=),)
|
||||
MEMORY_PROFILING_CFLAGS = -fno-optimize-sibling-calls
|
||||
endif
|
||||
|
||||
ifneq ($(USE_CPU_AFFINITY:0=),)
|
||||
OPTIONS_OBJS += src/cpuset.o
|
||||
OPTIONS_OBJS += src/cpu_topo.o
|
||||
endif
|
||||
|
||||
# OpenSSL is packaged in various forms and with various dependencies.
|
||||
|
|
@ -640,10 +626,7 @@ ifneq ($(USE_OPENSSL:0=),)
|
|||
SSL_LDFLAGS := $(if $(SSL_LIB),-L$(SSL_LIB)) -lssl -lcrypto
|
||||
endif
|
||||
USE_SSL := $(if $(USE_SSL:0=),$(USE_SSL:0=),implicit)
|
||||
OPTIONS_OBJS += src/ssl_sock.o src/ssl_ckch.o src/ssl_ocsp.o src/ssl_crtlist.o \
|
||||
src/ssl_sample.o src/cfgparse-ssl.o src/ssl_gencert.o \
|
||||
src/ssl_utils.o src/jwt.o src/ssl_clienthello.o src/jws.o src/acme.o \
|
||||
src/ssl_trace.o src/jwe.o
|
||||
OPTIONS_OBJS += src/ssl_sock.o src/ssl_ckch.o src/ssl_ocsp.o src/ssl_crtlist.o src/ssl_sample.o src/cfgparse-ssl.o src/ssl_gencert.o src/ssl_utils.o src/jwt.o src/ssl_clienthello.o
|
||||
endif
|
||||
|
||||
ifneq ($(USE_ENGINE:0=),)
|
||||
|
|
@ -656,21 +639,20 @@ endif
|
|||
|
||||
ifneq ($(USE_QUIC:0=),)
|
||||
|
||||
OPTIONS_OBJS += src/mux_quic.o src/h3.o src/quic_rx.o src/quic_tx.o \
|
||||
|
||||
OPTIONS_OBJS += src/quic_rx.o src/mux_quic.o src/h3.o src/quic_tx.o \
|
||||
src/quic_conn.o src/quic_frame.o src/quic_sock.o \
|
||||
src/quic_tls.o src/quic_ssl.o src/proto_quic.o \
|
||||
src/quic_cli.o src/quic_trace.o src/quic_tp.o \
|
||||
src/quic_cid.o src/quic_stream.o \
|
||||
src/quic_retransmit.o src/quic_loss.o \
|
||||
src/hq_interop.o src/quic_cc_cubic.o \
|
||||
src/quic_cc_bbr.o src/quic_retry.o \
|
||||
src/cfgparse-quic.o src/xprt_quic.o src/quic_token.o \
|
||||
src/quic_ack.o src/qpack-dec.o src/quic_cc_newreno.o \
|
||||
src/qmux_http.o src/qmux_trace.o src/quic_rules.o \
|
||||
src/quic_cc_nocc.o src/quic_cc.o src/quic_pacing.o \
|
||||
src/h3_stats.o src/quic_stats.o src/qpack-enc.o \
|
||||
src/qpack-tbl.o src/quic_cc_drs.o src/quic_fctl.o \
|
||||
src/quic_enc.o
|
||||
src/quic_ssl.o src/quic_tls.o src/proto_quic.o \
|
||||
src/quic_trace.o src/quic_cli.o src/quic_tp.o \
|
||||
src/quic_cid.o src/quic_retransmit.o src/quic_retry.o \
|
||||
src/quic_loss.o src/quic_cc_cubic.o src/quic_stream.o \
|
||||
src/xprt_quic.o src/quic_ack.o src/hq_interop.o \
|
||||
src/quic_cc_newreno.o src/qmux_http.o \
|
||||
src/quic_cc_nocc.o src/qpack-dec.o src/quic_cc.o \
|
||||
src/cfgparse-quic.o src/qmux_trace.o src/qpack-enc.o \
|
||||
src/qpack-tbl.o src/h3_stats.o src/quic_stats.o \
|
||||
src/quic_fctl.o src/cbuf.o src/quic_rules.o \
|
||||
src/quic_token.o
|
||||
endif
|
||||
|
||||
ifneq ($(USE_QUIC_OPENSSL_COMPAT:0=),)
|
||||
|
|
@ -782,6 +764,10 @@ ifneq ($(USE_WURFL:0=),)
|
|||
WURFL_LDFLAGS = $(if $(WURFL_LIB),-L$(WURFL_LIB)) -lwurfl
|
||||
endif
|
||||
|
||||
ifneq ($(USE_SYSTEMD:0=),)
|
||||
OPTIONS_OBJS += src/systemd.o
|
||||
endif
|
||||
|
||||
ifneq ($(USE_PCRE:0=)$(USE_STATIC_PCRE:0=)$(USE_PCRE_JIT:0=),)
|
||||
ifneq ($(USE_PCRE2:0=)$(USE_STATIC_PCRE2:0=)$(USE_PCRE2_JIT:0=),)
|
||||
$(error cannot compile both PCRE and PCRE2 support)
|
||||
|
|
@ -951,7 +937,7 @@ all:
|
|||
@echo
|
||||
@exit 1
|
||||
else
|
||||
all: dev/flags/flags haproxy $(EXTRA)
|
||||
all: haproxy dev/flags/flags $(EXTRA)
|
||||
endif # obsolete targets
|
||||
endif # TARGET
|
||||
|
||||
|
|
@ -961,49 +947,47 @@ ifneq ($(EXTRA_OBJS),)
|
|||
OBJS += $(EXTRA_OBJS)
|
||||
endif
|
||||
|
||||
OBJS += src/mux_h2.o src/mux_h1.o src/mux_fcgi.o src/log.o \
|
||||
src/server.o src/stream.o src/tcpcheck.o src/http_ana.o \
|
||||
src/stick_table.o src/tools.o src/mux_spop.o src/sample.o \
|
||||
src/activity.o src/cfgparse.o src/peers.o src/cli.o \
|
||||
src/backend.o src/connection.o src/resolvers.o src/proxy.o \
|
||||
src/cache.o src/stconn.o src/http_htx.o src/debug.o \
|
||||
src/check.o src/stats-html.o src/haproxy.o src/listener.o \
|
||||
src/applet.o src/pattern.o src/cfgparse-listen.o \
|
||||
src/flt_spoe.o src/cebis_tree.o src/http_ext.o \
|
||||
src/http_act.o src/http_fetch.o src/cebs_tree.o \
|
||||
src/cebib_tree.o src/http_client.o src/dns.o \
|
||||
src/cebb_tree.o src/vars.o src/event_hdl.o src/tcp_rules.o \
|
||||
src/trace.o src/stats-proxy.o src/pool.o src/stats.o \
|
||||
src/cfgparse-global.o src/filters.o src/mux_pt.o \
|
||||
src/flt_http_comp.o src/sock.o src/h1.o src/sink.o \
|
||||
src/ceba_tree.o src/session.o src/payload.o src/htx.o \
|
||||
src/cebl_tree.o src/ceb32_tree.o src/ceb64_tree.o \
|
||||
src/server_state.o src/proto_rhttp.o src/flt_trace.o src/fd.o \
|
||||
src/task.o src/map.o src/fcgi-app.o src/h2.o src/mworker.o \
|
||||
src/tcp_sample.o src/mjson.o src/h1_htx.o src/tcp_act.o \
|
||||
src/ring.o src/flt_bwlim.o src/acl.o src/thread.o src/queue.o \
|
||||
src/http_rules.o src/http.o src/channel.o src/proto_tcp.o \
|
||||
src/mqtt.o src/lb_chash.o src/extcheck.o src/dns_ring.o \
|
||||
src/errors.o src/ncbuf.o src/compression.o src/http_conv.o \
|
||||
src/frontend.o src/stats-json.o src/proto_sockpair.o \
|
||||
src/raw_sock.o src/action.o src/stats-file.o src/buf.o \
|
||||
src/xprt_handshake.o src/proto_uxst.o src/lb_fwrr.o \
|
||||
src/uri_normalizer.o src/mailers.o src/protocol.o \
|
||||
src/cfgcond.o src/proto_udp.o src/lb_fwlc.o src/ebmbtree.o \
|
||||
src/proto_uxdg.o src/cfgdiag.o src/sock_unix.o src/sha1.o \
|
||||
src/lb_fas.o src/clock.o src/sock_inet.o src/ev_select.o \
|
||||
src/lb_map.o src/shctx.o src/hpack-dec.o src/net_helper.o \
|
||||
src/arg.o src/signal.o src/fix.o src/dynbuf.o src/guid.o \
|
||||
src/cfgparse-tcp.o src/lb_ss.o src/chunk.o src/counters.o \
|
||||
src/cfgparse-unix.o src/regex.o src/fcgi.o src/uri_auth.o \
|
||||
src/eb64tree.o src/eb32tree.o src/eb32sctree.o src/lru.o \
|
||||
src/limits.o src/ebimtree.o src/wdt.o src/hpack-tbl.o \
|
||||
src/ebistree.o src/base64.o src/auth.o src/time.o \
|
||||
src/ebsttree.o src/freq_ctr.o src/systemd.o src/init.o \
|
||||
src/http_acl.o src/dict.o src/dgram.o src/pipe.o \
|
||||
src/hpack-huff.o src/hpack-enc.o src/ebtree.o src/hash.o \
|
||||
src/httpclient_cli.o src/version.o src/ncbmbuf.o src/ech.o \
|
||||
src/cfgparse-peers.o
|
||||
OBJS += src/mux_h2.o src/mux_h1.o src/mux_fcgi.o src/stream.o \
|
||||
src/log.o src/server.o src/tcpcheck.o src/http_ana.o \
|
||||
src/stick_table.o src/tools.o src/sample.o src/flt_spoe.o \
|
||||
src/cfgparse.o src/peers.o src/cli.o src/resolvers.o \
|
||||
src/connection.o src/backend.o src/cache.o src/http_htx.o \
|
||||
src/proxy.o src/stconn.o src/check.o src/haproxy.o \
|
||||
src/stats-html.o src/listener.o src/pattern.o src/debug.o \
|
||||
src/cfgparse-listen.o src/http_client.o src/activity.o \
|
||||
src/applet.o src/http_act.o src/http_fetch.o src/http_ext.o \
|
||||
src/dns.o src/vars.o src/tcp_rules.o src/pool.o src/stats.o \
|
||||
src/stats-proxy.o src/sink.o src/filters.o src/mux_pt.o \
|
||||
src/event_hdl.o src/server_state.o src/h1_htx.o src/h1.o \
|
||||
src/flt_http_comp.o src/task.o src/payload.o src/fcgi-app.o \
|
||||
src/map.o src/trace.o src/tcp_sample.o src/tcp_act.o \
|
||||
src/session.o src/htx.o src/cfgparse-global.o src/mjson.o \
|
||||
src/h2.o src/ring.o src/fd.o src/sock.o src/mworker.o \
|
||||
src/flt_trace.o src/thread.o src/proto_rhttp.o src/acl.o \
|
||||
src/http.o src/flt_bwlim.o src/channel.o src/queue.o \
|
||||
src/mqtt.o src/proto_tcp.o src/lb_chash.o src/http_rules.o \
|
||||
src/errors.o src/extcheck.o src/dns_ring.o src/stats-json.o \
|
||||
src/http_conv.o src/frontend.o src/proto_sockpair.o \
|
||||
src/compression.o src/ncbuf.o src/stats-file.o src/raw_sock.o \
|
||||
src/lb_fwrr.o src/action.o src/uri_normalizer.o src/buf.o \
|
||||
src/proto_uxst.o src/ebmbtree.o src/xprt_handshake.o \
|
||||
src/protocol.o src/proto_udp.o src/lb_fwlc.o src/sha1.o \
|
||||
src/proto_uxdg.o src/mailers.o src/lb_fas.o src/cfgcond.o \
|
||||
src/cfgdiag.o src/sock_unix.o src/sock_inet.o \
|
||||
src/mworker-prog.o src/lb_map.o src/ev_select.o src/shctx.o \
|
||||
src/hpack-dec.o src/fix.o src/clock.o src/cfgparse-tcp.o \
|
||||
src/arg.o src/signal.o src/fcgi.o src/dynbuf.o src/regex.o \
|
||||
src/lru.o src/lb_ss.o src/eb64tree.o src/chunk.o \
|
||||
src/cfgparse-unix.o src/guid.o src/ebimtree.o src/eb32tree.o \
|
||||
src/eb32sctree.o src/base64.o src/uri_auth.o src/time.o \
|
||||
src/hpack-tbl.o src/ebsttree.o src/ebistree.o src/auth.o \
|
||||
src/hpack-huff.o src/freq_ctr.o src/dict.o src/wdt.o \
|
||||
src/pipe.o src/init.o src/http_acl.o src/hpack-enc.o \
|
||||
src/cebu32_tree.o src/cebu64_tree.o src/cebua_tree.o \
|
||||
src/cebub_tree.o src/cebuib_tree.o src/cebuis_tree.o \
|
||||
src/cebul_tree.o src/cebus_tree.o \
|
||||
src/ebtree.o src/dgram.o src/hash.o src/version.o \
|
||||
src/limits.o src/mux_spop.o
|
||||
|
||||
ifneq ($(TRACE),)
|
||||
OBJS += src/calltrace.o
|
||||
|
|
@ -1038,9 +1022,8 @@ help:
|
|||
# TARGET variable is not set since we're not building, by definition.
|
||||
IGNORE_OPTS=help install install-man install-doc install-bin \
|
||||
uninstall clean tags cscope tar git-tar version update-version \
|
||||
opts reg-tests reg-tests-help unit-tests admin/halog/halog dev/flags/flags \
|
||||
dev/haring/haring dev/ncpu/ncpu dev/poll/poll dev/tcploop/tcploop \
|
||||
dev/term_events/term_events
|
||||
opts reg-tests reg-tests-help admin/halog/halog dev/flags/flags \
|
||||
dev/haring/haring dev/poll/poll dev/tcploop/tcploop
|
||||
|
||||
ifneq ($(TARGET),)
|
||||
ifeq ($(filter $(firstword $(MAKECMDGOALS)),$(IGNORE_OPTS)),)
|
||||
|
|
@ -1077,9 +1060,6 @@ dev/haring/haring: dev/haring/haring.o
|
|||
dev/hpack/%: dev/hpack/%.o
|
||||
$(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS)
|
||||
|
||||
dev/ncpu/ncpu:
|
||||
$(cmd_MAKE) -C dev/ncpu ncpu V='$(V)'
|
||||
|
||||
dev/poll/poll:
|
||||
$(cmd_MAKE) -C dev/poll poll CC='$(CC)' OPTIMIZE='$(COPTS)' V='$(V)'
|
||||
|
||||
|
|
@ -1092,16 +1072,13 @@ dev/tcploop/tcploop:
|
|||
dev/udp/udp-perturb: dev/udp/udp-perturb.o
|
||||
$(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS)
|
||||
|
||||
dev/term_events/term_events: dev/term_events/term_events.o
|
||||
$(cmd_LD) $(ARCH_FLAGS) $(LDFLAGS) -o $@ $^ $(LDOPTS)
|
||||
|
||||
# rebuild it every time
|
||||
.PHONY: src/version.c dev/ncpu/ncpu dev/poll/poll dev/tcploop/tcploop
|
||||
.PHONY: src/version.c dev/poll/poll dev/tcploop/tcploop
|
||||
|
||||
src/calltrace.o: src/calltrace.c $(DEP)
|
||||
$(cmd_CC) $(TRACE_COPTS) -c -o $@ $<
|
||||
|
||||
src/version.o: src/version.c $(DEP)
|
||||
src/haproxy.o: src/haproxy.c $(DEP)
|
||||
$(cmd_CC) $(COPTS) \
|
||||
-DBUILD_TARGET='"$(strip $(TARGET))"' \
|
||||
-DBUILD_CC='"$(strip $(CC))"' \
|
||||
|
|
@ -1124,11 +1101,6 @@ install-doc:
|
|||
$(INSTALL) -m 644 doc/$$x.txt "$(DESTDIR)$(DOCDIR)" ; \
|
||||
done
|
||||
|
||||
install-admin:
|
||||
$(Q)$(INSTALL) -d "$(DESTDIR)$(SBINDIR)"
|
||||
$(Q)$(INSTALL) admin/cli/haproxy-dump-certs "$(DESTDIR)$(SBINDIR)"
|
||||
$(Q)$(INSTALL) admin/cli/haproxy-reload "$(DESTDIR)$(SBINDIR)"
|
||||
|
||||
install-bin:
|
||||
$(Q)for i in haproxy $(EXTRA); do \
|
||||
if ! [ -e "$$i" ]; then \
|
||||
|
|
@ -1139,7 +1111,7 @@ install-bin:
|
|||
$(Q)$(INSTALL) -d "$(DESTDIR)$(SBINDIR)"
|
||||
$(Q)$(INSTALL) haproxy $(EXTRA) "$(DESTDIR)$(SBINDIR)"
|
||||
|
||||
install: install-bin install-admin install-man install-doc
|
||||
install: install-bin install-man install-doc
|
||||
|
||||
uninstall:
|
||||
$(Q)rm -f "$(DESTDIR)$(MANDIR)"/man1/haproxy.1
|
||||
|
|
@ -1161,13 +1133,10 @@ clean:
|
|||
$(Q)rm -f addons/ot/src/*.[oas]
|
||||
$(Q)rm -f addons/wurfl/*.[oas] addons/wurfl/dummy/*.[oas]
|
||||
$(Q)rm -f admin/*/*.[oas] admin/*/*/*.[oas]
|
||||
$(Q)rm -f dev/*/*.[oas]
|
||||
$(Q)rm -f dev/flags/flags
|
||||
|
||||
distclean: clean
|
||||
$(Q)rm -f admin/iprange/iprange admin/iprange/ip6range admin/halog/halog
|
||||
$(Q)rm -f admin/dyncookie/dyncookie
|
||||
$(Q)rm -f dev/haring/haring dev/ncpu/ncpu{,.so} dev/poll/poll dev/tcploop/tcploop
|
||||
$(Q)rm -f dev/*/*.[oas]
|
||||
$(Q)rm -f dev/flags/flags dev/haring/haring dev/poll/poll dev/tcploop/tcploop
|
||||
$(Q)rm -f dev/hpack/decode dev/hpack/gen-enc dev/hpack/gen-rht
|
||||
$(Q)rm -f dev/qpack/decode
|
||||
|
||||
|
|
@ -1287,17 +1256,10 @@ reg-tests-help:
|
|||
|
||||
.PHONY: reg-tests reg-tests-help
|
||||
|
||||
unit-tests:
|
||||
$(Q)$(UNIT_TEST_SCRIPT)
|
||||
.PHONY: unit-tests
|
||||
|
||||
|
||||
# "make range" iteratively builds using "make all" and the exact same build
|
||||
# options for all commits within RANGE. RANGE may be either a git range
|
||||
# such as ref1..ref2 or a single commit, in which case all commits from
|
||||
# the master branch to this one will be tested.
|
||||
# Will execute TEST_CMD for each commit if defined, and will stop in case of
|
||||
# failure.
|
||||
|
||||
range:
|
||||
$(Q)[ -d .git/. ] || { echo "## Fatal: \"make $@\" may only be used inside a Git repository."; exit 1; }
|
||||
|
|
@ -1323,7 +1285,6 @@ range:
|
|||
echo "[ $$index/$$count ] $$commit #############################"; \
|
||||
git checkout -q $$commit || die 1; \
|
||||
$(MAKE) all || die 1; \
|
||||
[ -z "$(TEST_CMD)" ] || $(TEST_CMD) || die 1; \
|
||||
index=$$((index + 1)); \
|
||||
done; \
|
||||
echo;echo "Done! $${count} commit(s) built successfully for RANGE $${RANGE}" ; \
|
||||
|
|
|
|||
2
VERDATE
2
VERDATE
|
|
@ -1,2 +1,2 @@
|
|||
$Format:%ci$
|
||||
2026/02/04
|
||||
2024/10/16
|
||||
|
|
|
|||
2
VERSION
2
VERSION
|
|
@ -1 +1 @@
|
|||
3.4-dev4
|
||||
3.1-dev10
|
||||
|
|
|
|||
|
|
@ -5,8 +5,7 @@ CXX := c++
|
|||
CXXLIB := -lstdc++
|
||||
|
||||
ifeq ($(DEVICEATLAS_SRC),)
|
||||
OPTIONS_CFLAGS += -I$(DEVICEATLAS_INC)
|
||||
OPTIONS_LDFLAGS += -Wl,-rpath,$(DEVICEATLAS_LIB) -L$(DEVICEATLAS_LIB) -lda
|
||||
OPTIONS_LDFLAGS += -lda
|
||||
else
|
||||
DEVICEATLAS_INC = $(DEVICEATLAS_SRC)
|
||||
DEVICEATLAS_LIB = $(DEVICEATLAS_SRC)
|
||||
|
|
|
|||
|
|
@ -212,7 +212,7 @@ da_status_t da_atlas_compile(void *ctx, da_read_fn readfn, da_setpos_fn setposfn
|
|||
* da_getpropid on the atlas, and if generated by the search, the ID will be consistent across
|
||||
* different calls to search.
|
||||
* Properties added by a search that are neither in the compiled atlas, nor in the extra_props list
|
||||
* Are assigned an ID within the context that is not transferable through different search results
|
||||
* Are assigned an ID within the context that is not transferrable through different search results
|
||||
* within the same atlas.
|
||||
* @param atlas Atlas instance
|
||||
* @param extra_props properties
|
||||
|
|
|
|||
|
|
@ -47,12 +47,6 @@ via the OpenTracing API with OpenTracing compatible servers (tracers).
|
|||
Currently, tracers that support this API include Datadog, Jaeger, LightStep
|
||||
and Zipkin.
|
||||
|
||||
Note: The OpenTracing filter shouldn't be used for new designs as OpenTracing
|
||||
itself is no longer maintained nor supported by its authors. A
|
||||
replacement filter base on OpenTelemetry is currently under development
|
||||
and is expected to be ready around HAProxy 3.2. As such OpenTracing will
|
||||
be deprecated in 3.3 and removed in 3.5.
|
||||
|
||||
The OT filter was primarily tested with the Jaeger tracer, while configurations
|
||||
for both Datadog and Zipkin tracers were also set in the test directory.
|
||||
|
||||
|
|
|
|||
|
|
@ -718,7 +718,7 @@ static void flt_ot_check_timeouts(struct stream *s, struct filter *f)
|
|||
if (flt_ot_is_disabled(f FLT_OT_DBG_ARGS(, -1)))
|
||||
FLT_OT_RETURN();
|
||||
|
||||
s->pending_events |= STRM_EVT_MSG;
|
||||
s->pending_events |= TASK_WOKEN_MSG;
|
||||
|
||||
flt_ot_return_void(f, &err);
|
||||
|
||||
|
|
|
|||
|
|
@ -39,21 +39,14 @@
|
|||
*/
|
||||
static void flt_ot_vars_scope_dump(struct vars *vars, const char *scope)
|
||||
{
|
||||
int i;
|
||||
const struct var *var;
|
||||
|
||||
if (vars == NULL)
|
||||
return;
|
||||
|
||||
vars_rdlock(vars);
|
||||
for (i = 0; i < VAR_NAME_ROOTS; i++) {
|
||||
struct ceb_node *node = cebu64_first(&(vars->name_root[i]));
|
||||
|
||||
for ( ; node != NULL; node = cebu64_next(&(vars->name_root[i]), node)) {
|
||||
struct var *var = container_of(node, struct var, node);
|
||||
|
||||
FLT_OT_DBG(2, "'%s.%016" PRIx64 "' -> '%.*s'", scope, var->name_hash, (int)b_data(&(var->data.u.str)), b_orig(&(var->data.u.str)));
|
||||
}
|
||||
}
|
||||
list_for_each_entry(var, &(vars->head), l)
|
||||
FLT_OT_DBG(2, "'%s.%016" PRIx64 "' -> '%.*s'", scope, var->name_hash, (int)b_data(&(var->data.u.str)), b_orig(&(var->data.u.str)));
|
||||
vars_rdunlock(vars);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -91,18 +91,6 @@ name must be preceded by a minus character ('-'). Here are examples:
|
|||
# Only dump frontends, backends and servers status
|
||||
/metrics?metrics=haproxy_frontend_status,haproxy_backend_status,haproxy_server_status
|
||||
|
||||
* Add section description as label for all metrics
|
||||
|
||||
It is possible to set a description in global and proxy sections, via the
|
||||
"description" directive. The global description is exposed if it is define via
|
||||
the "haproxy_process_description" metric. But the descriptions provided in proxy
|
||||
sections are not dumped. However, it is possible to add it as a label for all
|
||||
metrics of the corresponding section, including the global one. To do so,
|
||||
"desc-labels" parameter must be set:
|
||||
|
||||
/metrics?desc-labels
|
||||
|
||||
/ metrics?scope=frontend&desc-labels
|
||||
|
||||
* Dump extra counters
|
||||
|
||||
|
|
@ -205,8 +193,6 @@ listed below. Metrics from extra counters are not listed.
|
|||
| haproxy_process_current_tasks |
|
||||
| haproxy_process_current_run_queue |
|
||||
| haproxy_process_idle_time_percent |
|
||||
| haproxy_process_node |
|
||||
| haproxy_process_description |
|
||||
| haproxy_process_stopping |
|
||||
| haproxy_process_jobs |
|
||||
| haproxy_process_unstoppable_jobs |
|
||||
|
|
@ -389,9 +375,6 @@ listed below. Metrics from extra counters are not listed.
|
|||
| haproxy_server_max_connect_time_seconds |
|
||||
| haproxy_server_max_response_time_seconds |
|
||||
| haproxy_server_max_total_time_seconds |
|
||||
| haproxy_server_agent_status |
|
||||
| haproxy_server_agent_code |
|
||||
| haproxy_server_agent_duration_seconds |
|
||||
| haproxy_server_internal_errors_total |
|
||||
| haproxy_server_unsafe_idle_connections_current |
|
||||
| haproxy_server_safe_idle_connections_current |
|
||||
|
|
|
|||
|
|
@ -32,11 +32,11 @@
|
|||
|
||||
/* Prometheus exporter flags (ctx->flags) */
|
||||
#define PROMEX_FL_METRIC_HDR 0x00000001
|
||||
#define PROMEX_FL_BODYLESS_RESP 0x00000002
|
||||
/* unused: 0x00000004 */
|
||||
/* unused: 0x00000008 */
|
||||
/* unused: 0x00000010 */
|
||||
/* unused: 0x00000020 */
|
||||
#define PROMEX_FL_INFO_METRIC 0x00000002
|
||||
#define PROMEX_FL_FRONT_METRIC 0x00000004
|
||||
#define PROMEX_FL_BACK_METRIC 0x00000008
|
||||
#define PROMEX_FL_SRV_METRIC 0x00000010
|
||||
#define PROMEX_FL_LI_METRIC 0x00000020
|
||||
#define PROMEX_FL_MODULE_METRIC 0x00000040
|
||||
#define PROMEX_FL_SCOPE_GLOBAL 0x00000080
|
||||
#define PROMEX_FL_SCOPE_FRONT 0x00000100
|
||||
|
|
@ -47,7 +47,6 @@
|
|||
#define PROMEX_FL_NO_MAINT_SRV 0x00002000
|
||||
#define PROMEX_FL_EXTRA_COUNTERS 0x00004000
|
||||
#define PROMEX_FL_INC_METRIC_BY_DEFAULT 0x00008000
|
||||
#define PROMEX_FL_DESC_LABELS 0x00010000
|
||||
|
||||
#define PROMEX_FL_SCOPE_ALL (PROMEX_FL_SCOPE_GLOBAL | PROMEX_FL_SCOPE_FRONT | \
|
||||
PROMEX_FL_SCOPE_LI | PROMEX_FL_SCOPE_BACK | \
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -1,235 +0,0 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Dump certificates from the HAProxy stats or master socket to the filesystem
|
||||
# Experimental script
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
export BASEPATH=${BASEPATH:-/etc/haproxy}/
|
||||
export SOCKET=${SOCKET:-/var/run/haproxy-master.sock}
|
||||
export DRY_RUN=0
|
||||
export DEBUG=
|
||||
export VERBOSE=
|
||||
export M="@1 "
|
||||
export TMP
|
||||
|
||||
vecho() {
|
||||
|
||||
[ -n "$VERBOSE" ] && echo "$@"
|
||||
return 0
|
||||
}
|
||||
|
||||
read_certificate() {
|
||||
name=$1
|
||||
crt_filename=
|
||||
key_filename=
|
||||
|
||||
OFS=$IFS
|
||||
IFS=":"
|
||||
|
||||
while read -r key value; do
|
||||
case "$key" in
|
||||
"Crt filename")
|
||||
crt_filename="${value# }"
|
||||
key_filename="${value# }"
|
||||
;;
|
||||
"Key filename")
|
||||
key_filename="${value# }"
|
||||
;;
|
||||
esac
|
||||
done < <(echo "${M}show ssl cert ${name}" | socat "${SOCKET}" -)
|
||||
IFS=$OFS
|
||||
|
||||
if [ -z "$crt_filename" ] || [ -z "$key_filename" ]; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
# handle fields without a crt-base/key-base
|
||||
[ "${crt_filename:0:1}" != "/" ] && crt_filename="${BASEPATH}${crt_filename}"
|
||||
[ "${key_filename:0:1}" != "/" ] && key_filename="${BASEPATH}${key_filename}"
|
||||
|
||||
vecho "name:$name"
|
||||
vecho "crt:$crt_filename"
|
||||
vecho "key:$key_filename"
|
||||
|
||||
export NAME="$name"
|
||||
export CRT_FILENAME="$crt_filename"
|
||||
export KEY_FILENAME="$key_filename"
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
cmp_certkey() {
|
||||
prev=$1
|
||||
new=$2
|
||||
|
||||
if [ ! -f "$prev" ]; then
|
||||
return 1;
|
||||
fi
|
||||
|
||||
if ! cmp -s <(openssl x509 -in "$prev" -noout -fingerprint -sha256) <(openssl x509 -in "$new" -noout -fingerprint -sha256); then
|
||||
return 1
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
dump_certificate() {
|
||||
name=$1
|
||||
prev_crt=$2
|
||||
prev_key=$3
|
||||
r="tmp.${RANDOM}"
|
||||
d="old.$(date +%s)"
|
||||
new_crt="$TMP/$(basename "$prev_crt").${r}"
|
||||
new_key="$TMP/$(basename "$prev_key").${r}"
|
||||
|
||||
if ! touch "${new_crt}" || ! touch "${new_key}"; then
|
||||
echo "[ALERT] ($$) : can't dump \"$name\", can't create tmp files" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo "${M}dump ssl cert ${name}" | socat "${SOCKET}" - | openssl pkey >> "${new_key}"
|
||||
# use crl2pkcs7 as a way to dump multiple x509, storeutl could be used in modern versions of openssl
|
||||
echo "${M}dump ssl cert ${name}" | socat "${SOCKET}" - | openssl crl2pkcs7 -nocrl -certfile /dev/stdin | openssl pkcs7 -print_certs >> "${new_crt}"
|
||||
|
||||
if ! cmp -s <(openssl x509 -in "${new_crt}" -pubkey -noout) <(openssl pkey -in "${new_key}" -pubout); then
|
||||
echo "[ALERT] ($$) : Private key \"${new_key}\" and public key \"${new_crt}\" don't match" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
if cmp_certkey "${prev_crt}" "${new_crt}"; then
|
||||
echo "[NOTICE] ($$) : ${crt_filename} is already up to date" >&2
|
||||
return 0
|
||||
fi
|
||||
|
||||
# dry run will just return before trying to move the files
|
||||
if [ "${DRY_RUN}" != "0" ]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
# move the current certificates to ".old.timestamp"
|
||||
if [ -f "${prev_crt}" ] && [ -f "${prev_key}" ]; then
|
||||
mv "${prev_crt}" "${prev_crt}.${d}"
|
||||
[ "${prev_crt}" != "${prev_key}" ] && mv "${prev_key}" "${prev_key}.${d}"
|
||||
fi
|
||||
|
||||
# move the new certificates to old place
|
||||
mv "${new_crt}" "${prev_crt}"
|
||||
[ "${prev_crt}" != "${prev_key}" ] && mv "${new_key}" "${prev_key}"
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
dump_all_certificates() {
|
||||
echo "${M}show ssl cert" | socat "${SOCKET}" - | grep -v '^#' | grep -v '^$' | while read -r line; do
|
||||
export NAME
|
||||
export CRT_FILENAME
|
||||
export KEY_FILENAME
|
||||
|
||||
if read_certificate "$line"; then
|
||||
dump_certificate "$NAME" "$CRT_FILENAME" "$KEY_FILENAME"
|
||||
else
|
||||
echo "[WARNING] ($$) : can't dump \"$name\", crt/key filename details not found in \"show ssl cert\"" >&2
|
||||
fi
|
||||
|
||||
done
|
||||
}
|
||||
|
||||
usage() {
|
||||
echo "Usage:"
|
||||
echo " $0 [options]* [cert]*"
|
||||
echo ""
|
||||
echo " Dump certificates from the HAProxy stats or master socket to the filesystem"
|
||||
echo " Require socat and openssl"
|
||||
echo " EXPERIMENTAL script, backup your files!"
|
||||
echo " The script will move your previous files to FILE.old.unixtimestamp (ex: foo.com.pem.old.1759044998)"
|
||||
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " -S, --master-socket <path> Use the master socket at <path> (default: ${SOCKET})"
|
||||
echo " -s, --socket <path> Use the stats socket at <path>"
|
||||
echo " -p, --path <path> Specifiy a base path for relative files (default: ${BASEPATH})"
|
||||
echo " -n, --dry-run Read certificates on the socket but don't dump them"
|
||||
echo " -d, --debug Debug mode, set -x"
|
||||
echo " -v, --verbose Verbose mode"
|
||||
echo " -h, --help This help"
|
||||
echo " -- End of options"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " $0 -v -p ${BASEPATH} -S ${SOCKET}"
|
||||
echo " $0 -v -p ${BASEPATH} -S ${SOCKET} bar.com.rsa.pem"
|
||||
echo " $0 -v -p ${BASEPATH} -S ${SOCKET} -- foo.com.ecdsa.pem bar.com.rsa.pem"
|
||||
}
|
||||
|
||||
main() {
|
||||
while [ -n "$1" ]; do
|
||||
case "$1" in
|
||||
-S|--master-socket)
|
||||
SOCKET="$2"
|
||||
M="@1 "
|
||||
shift 2
|
||||
;;
|
||||
-s|--socket)
|
||||
SOCKET="$2"
|
||||
M=
|
||||
shift 2
|
||||
;;
|
||||
-p|--path)
|
||||
BASEPATH="$2/"
|
||||
shift 2
|
||||
;;
|
||||
-n|--dry-run)
|
||||
DRY_RUN=1
|
||||
shift
|
||||
;;
|
||||
-d|--debug)
|
||||
DEBUG=1
|
||||
shift
|
||||
;;
|
||||
-v|--verbose)
|
||||
VERBOSE=1
|
||||
shift
|
||||
;;
|
||||
-h|--help)
|
||||
usage "$@"
|
||||
exit 0
|
||||
;;
|
||||
--)
|
||||
shift
|
||||
break
|
||||
;;
|
||||
-*)
|
||||
echo "[ALERT] ($$) : Unknown option '$1'" >&2
|
||||
usage "$@"
|
||||
exit 1
|
||||
;;
|
||||
*)
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ -n "$DEBUG" ]; then
|
||||
set -x
|
||||
fi
|
||||
|
||||
TMP=${TMP:-$(mktemp -d)}
|
||||
|
||||
if [ -z "$1" ]; then
|
||||
dump_all_certificates
|
||||
else
|
||||
# compute the certificates names at the end of the command
|
||||
while [ -n "$1" ]; do
|
||||
if ! read_certificate "$1"; then
|
||||
echo "[ALERT] ($$) : can't dump \"$1\", crt/key filename details not found in \"show ssl cert\"" >&2
|
||||
exit 1
|
||||
fi
|
||||
[ "${DRY_RUN}" = "0" ] && dump_certificate "$NAME" "$CRT_FILENAME" "$KEY_FILENAME"
|
||||
shift
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
trap 'rm -rf -- "$TMP"' EXIT
|
||||
main "$@"
|
||||
|
|
@ -1,113 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
export VERBOSE=1
|
||||
export TIMEOUT=90
|
||||
export MASTER_SOCKET=${MASTER_SOCKET:-/var/run/haproxy-master.sock}
|
||||
export RET=
|
||||
|
||||
alert() {
|
||||
if [ "$VERBOSE" -ge "1" ]; then
|
||||
echo "[ALERT] $*" >&2
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
reload() {
|
||||
while read -r line; do
|
||||
|
||||
if [ "$line" = "Success=0" ]; then
|
||||
RET=1
|
||||
elif [ "$line" = "Success=1" ]; then
|
||||
RET=0
|
||||
elif [ "$line" = "Another reload is still in progress." ]; then
|
||||
alert "$line"
|
||||
elif [ "$line" = "--" ]; then
|
||||
continue;
|
||||
else
|
||||
if [ "$RET" = 1 ] && [ "$VERBOSE" = "2" ]; then
|
||||
echo "$line" >&2
|
||||
elif [ "$VERBOSE" = "3" ]; then
|
||||
echo "$line" >&2
|
||||
fi
|
||||
fi
|
||||
|
||||
done < <(echo "reload" | socat -t"${TIMEOUT}" "${MASTER_SOCKET}" -)
|
||||
|
||||
if [ -z "$RET" ]; then
|
||||
alert "Couldn't finish the reload before the timeout (${TIMEOUT})."
|
||||
return 1
|
||||
fi
|
||||
|
||||
return "$RET"
|
||||
}
|
||||
|
||||
usage() {
|
||||
echo "Usage:"
|
||||
echo " $0 [options]*"
|
||||
echo ""
|
||||
echo " Trigger a reload from the master socket"
|
||||
echo " Require socat"
|
||||
echo " EXPERIMENTAL script!"
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " -S, --master-socket <path> Use the master socket at <path> (default: ${MASTER_SOCKET})"
|
||||
echo " -d, --debug Debug mode, set -x"
|
||||
echo " -t, --timeout Timeout (socat -t) (default: ${TIMEOUT})"
|
||||
echo " -s, --silent Silent mode (no output)"
|
||||
echo " -v, --verbose Verbose output (output from haproxy on failure)"
|
||||
echo " -vv Even more verbose output (output from haproxy on success and failure)"
|
||||
echo " -h, --help This help"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " $0 -S ${MASTER_SOCKET} -d ${TIMEOUT}"
|
||||
}
|
||||
|
||||
|
||||
main() {
|
||||
while [ -n "$1" ]; do
|
||||
case "$1" in
|
||||
-S|--master-socket)
|
||||
MASTER_SOCKET="$2"
|
||||
shift 2
|
||||
;;
|
||||
-t|--timeout)
|
||||
TIMEOUT="$2"
|
||||
shift 2
|
||||
;;
|
||||
-s|--silent)
|
||||
VERBOSE=0
|
||||
shift
|
||||
;;
|
||||
-v|--verbose)
|
||||
VERBOSE=2
|
||||
shift
|
||||
;;
|
||||
-vv|--verbose)
|
||||
VERBOSE=3
|
||||
shift
|
||||
;;
|
||||
-d|--debug)
|
||||
DEBUG=1
|
||||
shift
|
||||
;;
|
||||
-h|--help)
|
||||
usage "$@"
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
echo "[ALERT] ($$) : Unknown option '$1'" >&2
|
||||
usage "$@"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ -n "$DEBUG" ]; then
|
||||
set -x
|
||||
fi
|
||||
}
|
||||
|
||||
main "$@"
|
||||
reload
|
||||
|
|
@ -123,22 +123,6 @@ struct url_stat {
|
|||
#define FILT2_PRESERVE_QUERY 0x02
|
||||
#define FILT2_EXTRACT_CAPTURE 0x04
|
||||
|
||||
#define FILT_OUTPUT_FMT (FILT_COUNT_ONLY| \
|
||||
FILT_COUNT_STATUS| \
|
||||
FILT_COUNT_SRV_STATUS| \
|
||||
FILT_COUNT_COOK_CODES| \
|
||||
FILT_COUNT_TERM_CODES| \
|
||||
FILT_COUNT_URL_ONLY| \
|
||||
FILT_COUNT_URL_COUNT| \
|
||||
FILT_COUNT_URL_ERR| \
|
||||
FILT_COUNT_URL_TAVG| \
|
||||
FILT_COUNT_URL_TTOT| \
|
||||
FILT_COUNT_URL_TAVGO| \
|
||||
FILT_COUNT_URL_TTOTO| \
|
||||
FILT_COUNT_URL_BAVG| \
|
||||
FILT_COUNT_URL_BTOT| \
|
||||
FILT_COUNT_IP_COUNT)
|
||||
|
||||
unsigned int filter = 0;
|
||||
unsigned int filter2 = 0;
|
||||
unsigned int filter_invert = 0;
|
||||
|
|
@ -208,7 +192,7 @@ void help()
|
|||
" you can also use -n to start from earlier then field %d\n"
|
||||
" -query preserve the query string for per-URL (-u*) statistics\n"
|
||||
"\n"
|
||||
"Output format - **only one** may be used at a time\n"
|
||||
"Output format - only one may be used at a time\n"
|
||||
" -c only report the number of lines that would have been printed\n"
|
||||
" -pct output connect and response times percentiles\n"
|
||||
" -st output number of requests per HTTP status code\n"
|
||||
|
|
@ -914,9 +898,6 @@ int main(int argc, char **argv)
|
|||
if (!filter && !filter2)
|
||||
die("No action specified.\n");
|
||||
|
||||
if ((filter & FILT_OUTPUT_FMT) & ((filter & FILT_OUTPUT_FMT) - 1))
|
||||
die("Please, set only one output filter.\n");
|
||||
|
||||
if (filter & FILT_ACC_COUNT && !filter_acc_count)
|
||||
filter_acc_count=1;
|
||||
|
||||
|
|
@ -1571,10 +1552,6 @@ void filter_count_srv_status(const char *accept_field, const char *time_field, s
|
|||
if (!srv_node) {
|
||||
/* server not yet in the tree, let's create it */
|
||||
srv = (void *)calloc(1, sizeof(struct srv_st) + e - b + 1);
|
||||
if (unlikely(!srv)) {
|
||||
fprintf(stderr, "%s: not enough memory\n", __FUNCTION__);
|
||||
exit(1);
|
||||
}
|
||||
srv_node = &srv->node;
|
||||
memcpy(&srv_node->key, b, e - b);
|
||||
srv_node->key[e - b] = '\0';
|
||||
|
|
@ -1684,10 +1661,6 @@ void filter_count_url(const char *accept_field, const char *time_field, struct t
|
|||
*/
|
||||
if (unlikely(!ustat))
|
||||
ustat = calloc(1, sizeof(*ustat));
|
||||
if (unlikely(!ustat)) {
|
||||
fprintf(stderr, "%s: not enough memory\n", __FUNCTION__);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
ustat->nb_err = err;
|
||||
ustat->nb_req = 1;
|
||||
|
|
|
|||
|
|
@ -6,9 +6,9 @@ Wants=network-online.target
|
|||
[Service]
|
||||
EnvironmentFile=-/etc/default/haproxy
|
||||
EnvironmentFile=-/etc/sysconfig/haproxy
|
||||
Environment="CONFIG=/etc/haproxy/haproxy.cfg" "PIDFILE=/run/haproxy.pid" "CFGDIR=/etc/haproxy/conf.d" "EXTRAOPTS=-S /run/haproxy-master.sock"
|
||||
ExecStart=@SBINDIR@/haproxy -Ws -f $CONFIG -f $CFGDIR -p $PIDFILE $EXTRAOPTS
|
||||
ExecReload=@SBINDIR@/haproxy -Ws -f $CONFIG -f $CFGDIR -c $EXTRAOPTS
|
||||
Environment="CONFIG=/etc/haproxy/haproxy.cfg" "PIDFILE=/run/haproxy.pid" "EXTRAOPTS=-S /run/haproxy-master.sock"
|
||||
ExecStart=@SBINDIR@/haproxy -Ws -f $CONFIG -p $PIDFILE $EXTRAOPTS
|
||||
ExecReload=@SBINDIR@/haproxy -Ws -f $CONFIG -c $EXTRAOPTS
|
||||
ExecReload=/bin/kill -USR2 $MAINPID
|
||||
KillMode=mixed
|
||||
Restart=always
|
||||
|
|
|
|||
|
|
@ -195,7 +195,7 @@ while read -r; do
|
|||
! [[ "$REPLY" =~ [[:blank:]]h2c.*\.flg=([0-9a-fx]*) ]] || append_flag b.h2c.flg h2c "${BASH_REMATCH[1]}"
|
||||
elif [ $ctx = cob ]; then
|
||||
! [[ "$REPLY" =~ [[:blank:]]flags=([0-9a-fx]*) ]] || append_flag b.co.flg conn "${BASH_REMATCH[1]}"
|
||||
! [[ "$REPLY" =~ [[:blank:]]fd.state=([0-9a-fx]*) ]] || append_flag b.co.fd.st fd 0x"${BASH_REMATCH[1]}"
|
||||
! [[ "$REPLY" =~ [[:blank:]]fd.state=([0-9a-fx]*) ]] || append_flag b.co.fd.st fd "${BASH_REMATCH[1]}"
|
||||
elif [ $ctx = res ]; then
|
||||
! [[ "$REPLY" =~ [[:blank:]]\(f=([0-9a-fx]*) ]] || append_flag res.flg chn "${BASH_REMATCH[1]}"
|
||||
! [[ "$REPLY" =~ [[:blank:]]an=([0-9a-fx]*) ]] || append_flag res.ana ana "${BASH_REMATCH[1]}"
|
||||
|
|
|
|||
|
|
@ -1,118 +0,0 @@
|
|||
# sets $tag and $node from $arg0, for internal use only
|
||||
define _ebtree_set_tag_node
|
||||
set $tag = (unsigned long)$arg0 & 0x1
|
||||
set $node = (unsigned long)$arg0 & 0xfffffffffffffffe
|
||||
set $node = (struct eb_node *)$node
|
||||
end
|
||||
|
||||
# get root from any node (leaf of node), returns in $node
|
||||
define ebtree_root
|
||||
set $node = (struct eb_root *)$arg0->node_p
|
||||
if $node == 0
|
||||
# sole node
|
||||
set $node = (struct eb_root *)$arg0->leaf_p
|
||||
end
|
||||
# walk up
|
||||
while 1
|
||||
_ebtree_set_tag_node $node
|
||||
if $node->branches.b[1] == 0
|
||||
break
|
||||
end
|
||||
set $node = $node->node_p
|
||||
end
|
||||
# root returned in $node
|
||||
end
|
||||
|
||||
# returns $node filled with the first node of ebroot $arg0
|
||||
define ebtree_first
|
||||
# browse ebtree left until encountering leaf
|
||||
set $node = (struct eb_node *)$arg0->b[0]
|
||||
while 1
|
||||
_ebtree_set_tag_node $node
|
||||
if $tag == 0
|
||||
loop_break
|
||||
end
|
||||
set $node = (struct eb_root *)$node->branches.b[0]
|
||||
end
|
||||
# extract last node
|
||||
_ebtree_set_tag_node $node
|
||||
end
|
||||
|
||||
# finds next ebtree node after $arg0, and returns it in $node
|
||||
define ebtree_next
|
||||
# get parent
|
||||
set $node = (struct eb_root *)$arg0->leaf_p
|
||||
# Walking up from right branch, so we cannot be below root
|
||||
# while (eb_gettag(t) != EB_LEFT) // #define EB_LEFT 0
|
||||
while 1
|
||||
_ebtree_set_tag_node $node
|
||||
if $tag == 0
|
||||
loop_break
|
||||
end
|
||||
set $node = (struct eb_root *)$node->node_p
|
||||
end
|
||||
set $node = (struct eb_root *)$node->branches.b[1]
|
||||
# walk down (left side => 0)
|
||||
# while (eb_gettag(start) == EB_NODE) // #define EB_NODE 1
|
||||
while 1
|
||||
_ebtree_set_tag_node $node
|
||||
if $node == 0
|
||||
loop_break
|
||||
end
|
||||
if $tag != 1
|
||||
loop_break
|
||||
end
|
||||
set $node = (struct eb_root *)$node->branches.b[0]
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
# sets $tag and $node from $arg0, for internal use only
|
||||
define _ebsctree_set_tag_node
|
||||
set $tag = (unsigned long)$arg0 & 0x1
|
||||
set $node = (unsigned long)$arg0 & 0xfffffffffffffffe
|
||||
set $node = (struct eb32sc_node *)$node
|
||||
end
|
||||
|
||||
# returns $node filled with the first node of ebroot $arg0
|
||||
define ebsctree_first
|
||||
# browse ebsctree left until encountering leaf
|
||||
set $node = (struct eb32sc_node *)$arg0->b[0]
|
||||
while 1
|
||||
_ebsctree_set_tag_node $node
|
||||
if $tag == 0
|
||||
loop_break
|
||||
end
|
||||
set $node = (struct eb_root *)$node->branches.b[0]
|
||||
end
|
||||
# extract last node
|
||||
_ebsctree_set_tag_node $node
|
||||
end
|
||||
|
||||
# finds next ebtree node after $arg0, and returns it in $node
|
||||
define ebsctree_next
|
||||
# get parent
|
||||
set $node = (struct eb_root *)$arg0->node.leaf_p
|
||||
# Walking up from right branch, so we cannot be below root
|
||||
# while (eb_gettag(t) != EB_LEFT) // #define EB_LEFT 0
|
||||
while 1
|
||||
_ebsctree_set_tag_node $node
|
||||
if $tag == 0
|
||||
loop_break
|
||||
end
|
||||
set $node = (struct eb_root *)$node->node.node_p
|
||||
end
|
||||
set $node = (struct eb_root *)$node->node.branches.b[1]
|
||||
# walk down (left side => 0)
|
||||
# while (eb_gettag(start) == EB_NODE) // #define EB_NODE 1
|
||||
while 1
|
||||
_ebsctree_set_tag_node $node
|
||||
if $node == 0
|
||||
loop_break
|
||||
end
|
||||
if $tag != 1
|
||||
loop_break
|
||||
end
|
||||
set $node = (struct eb_root *)$node->node.branches.b[0]
|
||||
end
|
||||
end
|
||||
|
|
@ -1,26 +0,0 @@
|
|||
# lists entries starting at list head $arg0
|
||||
define list_dump
|
||||
set $h = $arg0
|
||||
set $p = *(void **)$h
|
||||
while ($p != $h)
|
||||
printf "%#lx\n", $p
|
||||
if ($p == 0)
|
||||
loop_break
|
||||
end
|
||||
set $p = *(void **)$p
|
||||
end
|
||||
end
|
||||
|
||||
# list all entries starting at list head $arg0 until meeting $arg1
|
||||
define list_find
|
||||
set $h = $arg0
|
||||
set $k = $arg1
|
||||
set $p = *(void **)$h
|
||||
while ($p != $h)
|
||||
printf "%#lx\n", $p
|
||||
if ($p == 0 || $p == $k)
|
||||
loop_break
|
||||
end
|
||||
set $p = *(void **)$p
|
||||
end
|
||||
end
|
||||
|
|
@ -1,19 +0,0 @@
|
|||
# show non-null memprofile entries with method, alloc/free counts/tot and caller
|
||||
|
||||
define memprof_dump
|
||||
set $i = 0
|
||||
set $meth={ "UNKN", "MALL", "CALL", "REAL", "STRD", "FREE", "P_AL", "P_FR", "STND", "VALL", "ALAL", "PALG", "MALG", "PVAL" }
|
||||
while $i < sizeof(memprof_stats) / sizeof(memprof_stats[0])
|
||||
if memprof_stats[$i].alloc_calls || memprof_stats[$i].free_calls
|
||||
set $m = memprof_stats[$i].method
|
||||
printf "m:%s ac:%u fc:%u at:%u ft:%u ", $meth[$m], \
|
||||
memprof_stats[$i].alloc_calls, memprof_stats[$i].free_calls, \
|
||||
memprof_stats[$i].alloc_tot, memprof_stats[$i].free_tot
|
||||
output/a memprof_stats[$i].caller
|
||||
printf "\n"
|
||||
end
|
||||
set $i = $i + 1
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
|
|
@ -1,21 +0,0 @@
|
|||
# dump pool contents (2.9 and above, with buckets)
|
||||
define pools_dump
|
||||
set $h = $po
|
||||
set $p = *(void **)$h
|
||||
while ($p != $h)
|
||||
set $e = (struct pool_head *)(((char *)$p) - (unsigned long)&((struct pool_head *)0)->list)
|
||||
|
||||
set $total = 0
|
||||
set $used = 0
|
||||
set $idx = 0
|
||||
while $idx < sizeof($e->buckets) / sizeof($e->buckets[0])
|
||||
set $total=$total + $e->buckets[$idx].allocated
|
||||
set $used=$used + $e->buckets[$idx].used
|
||||
set $idx=$idx + 1
|
||||
end
|
||||
|
||||
set $mem = $total * $e->size
|
||||
printf "list=%#lx pool_head=%p name=%s size=%u alloc=%u used=%u mem=%u\n", $p, $e, $e->name, $e->size, $total, $used, $mem
|
||||
set $p = *(void **)$p
|
||||
end
|
||||
end
|
||||
|
|
@ -1,47 +0,0 @@
|
|||
# This script will set the post_mortem struct pointer ($pm) from the one found
|
||||
# in the "post_mortem" symbol. If not found or if not correct, it's the same
|
||||
# address as the "_post_mortem" section, which can be found using "info files"
|
||||
# or "objdump -h" on the executable. The guessed value is the by a first call
|
||||
# to pm_init, but if not correct, you just need to call pm_init again with the
|
||||
# correct pointer, e.g:
|
||||
# pm_init 0xcfd400
|
||||
|
||||
define pm_init
|
||||
set $pm = (struct post_mortem*)$arg0
|
||||
set $g = $pm.global
|
||||
set $ti = $pm.thread_info
|
||||
set $tc = $pm.thread_ctx
|
||||
set $tgi = $pm.tgroup_info
|
||||
set $tgc = $pm.tgroup_ctx
|
||||
set $fd = $pm.fdtab
|
||||
set $pxh = *$pm.proxies
|
||||
set $po = $pm.pools
|
||||
set $ac = $pm.activity
|
||||
end
|
||||
|
||||
# show basic info on the running process (OS, uid, etc)
|
||||
define pm_show_info
|
||||
print $pm->platform
|
||||
print $pm->process
|
||||
end
|
||||
|
||||
# show thread IDs to easily map between gdb threads and tid
|
||||
define pm_show_threads
|
||||
set $t = 0
|
||||
while $t < $g.nbthread
|
||||
printf "Tid %4d: pthread_id=%#lx stack_top=%#lx\n", $t, $ti[$t].pth_id, $ti[$t].stack_top
|
||||
set $t = $t + 1
|
||||
end
|
||||
end
|
||||
|
||||
# dump all threads' dump buffers
|
||||
define pm_show_thread_dump
|
||||
set $t = 0
|
||||
while $t < $g.nbthread
|
||||
printf "%s\n", $tc[$t].thread_dump_buffer->area
|
||||
set $t = $t + 1
|
||||
end
|
||||
end
|
||||
|
||||
# initialize the various pointers
|
||||
pm_init &post_mortem
|
||||
|
|
@ -1,25 +0,0 @@
|
|||
# list proxies starting with the one in argument (typically $pxh)
|
||||
define px_list
|
||||
set $p = (struct proxy *)$arg0
|
||||
while ($p != 0)
|
||||
printf "%p (", $p
|
||||
if $p->cap & 0x10
|
||||
printf "LB,"
|
||||
end
|
||||
if $p->cap & 0x1
|
||||
printf "FE,"
|
||||
end
|
||||
if $p->cap & 0x2
|
||||
printf "BE,"
|
||||
end
|
||||
printf "%s)", $p->id
|
||||
if $p->cap & 0x1
|
||||
printf " feconn=%u cmax=%u cum_conn=%llu cpsmax=%u", $p->feconn, $p->fe_counters.conn_max, $p->fe_counters.cum_conn, $p->fe_counters.cps_max
|
||||
end
|
||||
if $p->cap & 0x2
|
||||
printf " beconn=%u served=%u queued=%u qmax=%u cum_sess=%llu wact=%u", $p->beconn, $p->served, $p->queue.length, $p->be_counters.nbpend_max, $p->be_counters.cum_sess, $p->lbprm.tot_wact
|
||||
end
|
||||
printf "\n"
|
||||
set $p = ($p)->next
|
||||
end
|
||||
end
|
||||
|
|
@ -1,9 +0,0 @@
|
|||
# list servers in a proxy whose pointer is passed in argument
|
||||
define px_list_srv
|
||||
set $h = (struct proxy *)$arg0
|
||||
set $p = ($h)->srv
|
||||
while ($p != 0)
|
||||
printf "%#lx %s maxconn=%u cur_sess=%u max_sess=%u served=%u queued=%u st=%u->%u ew=%u sps_max=%u\n", $p, $p->id, $p->maxconn, $p->cur_sess, $p->counters.cur_sess_max, $p->served, $p->queue.length, $p->cur_state, $p->next_state, $p->cur_eweight, $p->counters.sps_max
|
||||
set $p = ($p)->next
|
||||
end
|
||||
end
|
||||
|
|
@ -1,18 +0,0 @@
|
|||
# list all streams for all threads
|
||||
define stream_dump
|
||||
set $t = 0
|
||||
while $t < $g.nbthread
|
||||
set $h = &$tc[$t].streams
|
||||
printf "Tid %4d: &streams=%p\n", $t, $h
|
||||
set $p = *(void **)$h
|
||||
while ($p != $h)
|
||||
set $s = (struct stream *)(((char *)$p) - (unsigned long)&((struct stream *)0)->list)
|
||||
printf " &list=%#lx strm=%p uid=%u strm.fe=%s strm.flg=%#x strm.list={n=%p,p=%p}\n", $p, $s, $s->uniq_id, $s->sess->fe->id, $s->flags, $s->list.n, $s->list.p
|
||||
if ($p == 0)
|
||||
loop_break
|
||||
end
|
||||
set $p = *(void **)$p
|
||||
end
|
||||
set $t = $t + 1
|
||||
end
|
||||
end
|
||||
|
|
@ -1,247 +0,0 @@
|
|||
-- This is an HTTP/2 tracer for a TCP proxy. It will decode the frames that are
|
||||
-- exchanged between the client and the server and indicate their direction,
|
||||
-- types, flags and lengths. Lines are prefixed with a connection number modulo
|
||||
-- 4096 that allows to sort out multiplexed exchanges. In order to use this,
|
||||
-- simply load this file in the global section and use it from a TCP proxy:
|
||||
--
|
||||
-- global
|
||||
-- lua-load "dev/h2/h2-tracer.lua"
|
||||
--
|
||||
-- listen h2_sniffer
|
||||
-- mode tcp
|
||||
-- bind :8002
|
||||
-- filter lua.h2-tracer #hex
|
||||
-- server s1 127.0.0.1:8003
|
||||
--
|
||||
|
||||
-- define the decoder's class here
|
||||
Dec = {}
|
||||
Dec.id = "Lua H2 tracer"
|
||||
Dec.flags = 0
|
||||
Dec.__index = Dec
|
||||
Dec.args = {} -- args passed by the filter's declaration
|
||||
Dec.cid = 0 -- next connection ID
|
||||
|
||||
-- prefix to indent responses
|
||||
res_pfx = " | "
|
||||
|
||||
-- H2 frame types
|
||||
h2ft = {
|
||||
[0] = "DATA",
|
||||
[1] = "HEADERS",
|
||||
[2] = "PRIORITY",
|
||||
[3] = "RST_STREAM",
|
||||
[4] = "SETTINGS",
|
||||
[5] = "PUSH_PROMISE",
|
||||
[6] = "PING",
|
||||
[7] = "GOAWAY",
|
||||
[8] = "WINDOW_UPDATE",
|
||||
[9] = "CONTINUATION",
|
||||
}
|
||||
|
||||
h2ff = {
|
||||
[0] = { [0] = "ES", [3] = "PADDED" }, -- data
|
||||
[1] = { [0] = "ES", [2] = "EH", [3] = "PADDED", [5] = "PRIORITY" }, -- headers
|
||||
[2] = { }, -- priority
|
||||
[3] = { }, -- rst_stream
|
||||
[4] = { [0] = "ACK" }, -- settings
|
||||
[5] = { [2] = "EH", [3] = "PADDED" }, -- push_promise
|
||||
[6] = { [0] = "ACK" }, -- ping
|
||||
[7] = { }, -- goaway
|
||||
[8] = { }, -- window_update
|
||||
[9] = { [2] = "EH" }, -- continuation
|
||||
}
|
||||
|
||||
function Dec:new()
|
||||
local dec = {}
|
||||
|
||||
setmetatable(dec, Dec)
|
||||
dec.do_hex = false
|
||||
if (Dec.args[1] == "hex") then
|
||||
dec.do_hex = true
|
||||
end
|
||||
|
||||
Dec.cid = Dec.cid+1
|
||||
-- mix the thread number when multithreading.
|
||||
dec.cid = Dec.cid + 64 * core.thread
|
||||
|
||||
-- state per dir. [1]=req [2]=res
|
||||
dec.st = {
|
||||
[1] = {
|
||||
hdr = { 0, 0, 0, 0, 0, 0, 0, 0, 0 },
|
||||
fofs = 0,
|
||||
flen = 0,
|
||||
ftyp = 0,
|
||||
fflg = 0,
|
||||
sid = 0,
|
||||
tot = 0,
|
||||
},
|
||||
[2] = {
|
||||
hdr = { 0, 0, 0, 0, 0, 0, 0, 0, 0 },
|
||||
fofs = 0,
|
||||
flen = 0,
|
||||
ftyp = 0,
|
||||
fflg = 0,
|
||||
sid = 0,
|
||||
tot = 0,
|
||||
},
|
||||
}
|
||||
return dec
|
||||
end
|
||||
|
||||
function Dec:start_analyze(txn, chn)
|
||||
if chn:is_resp() then
|
||||
io.write(string.format("[%03x] ", self.cid % 4096) .. res_pfx .. "### res start\n")
|
||||
else
|
||||
io.write(string.format("[%03x] ", self.cid % 4096) .. "### req start\n")
|
||||
end
|
||||
filter.register_data_filter(self, chn)
|
||||
end
|
||||
|
||||
function Dec:end_analyze(txn, chn)
|
||||
if chn:is_resp() then
|
||||
io.write(string.format("[%03x] ", self.cid % 4096) .. res_pfx .. "### res end: " .. self.st[2].tot .. " bytes total\n")
|
||||
else
|
||||
io.write(string.format("[%03x] ", self.cid % 4096) .. "### req end: " ..self.st[1].tot.. " bytes total\n")
|
||||
end
|
||||
end
|
||||
|
||||
function Dec:tcp_payload(txn, chn)
|
||||
local data = { }
|
||||
local dofs = 1
|
||||
local pfx = ""
|
||||
local dir = 1
|
||||
local sofs = 0
|
||||
local ft = ""
|
||||
local ff = ""
|
||||
|
||||
if chn:is_resp() then
|
||||
pfx = res_pfx
|
||||
dir = 2
|
||||
end
|
||||
|
||||
pfx = string.format("[%03x] ", self.cid % 4096) .. pfx
|
||||
|
||||
-- stream offset before processing
|
||||
sofs = self.st[dir].tot
|
||||
|
||||
if (chn:input() > 0) then
|
||||
data = chn:data()
|
||||
self.st[dir].tot = self.st[dir].tot + chn:input()
|
||||
end
|
||||
|
||||
if (chn:input() > 0 and self.do_hex ~= false) then
|
||||
io.write("\n" .. pfx .. "Hex:\n")
|
||||
for i = 1, #data do
|
||||
if ((i & 7) == 1) then io.write(pfx) end
|
||||
io.write(string.format("0x%02x ", data:sub(i, i):byte()))
|
||||
if ((i & 7) == 0 or i == #data) then io.write("\n") end
|
||||
end
|
||||
end
|
||||
|
||||
-- start at byte 1 in the <data> string
|
||||
dofs = 1
|
||||
|
||||
-- the first 24 bytes are expected to be an H2 preface on the request
|
||||
if (dir == 1 and sofs < 24) then
|
||||
-- let's not check it for now
|
||||
local bytes = self.st[dir].tot - sofs
|
||||
if (sofs + self.st[dir].tot >= 24) then
|
||||
-- skip what was missing from the preface
|
||||
dofs = dofs + 24 - sofs
|
||||
sofs = 24
|
||||
io.write(pfx .. "[PREFACE len=24]\n")
|
||||
else
|
||||
-- consume more preface bytes
|
||||
sofs = sofs + self.st[dir].tot
|
||||
return
|
||||
end
|
||||
end
|
||||
|
||||
-- parse contents as long as there are pending data
|
||||
|
||||
while true do
|
||||
-- check if we need to consume data from the current frame
|
||||
-- flen is the number of bytes left before the frame's end.
|
||||
if (self.st[dir].flen > 0) then
|
||||
if dofs > #data then return end -- missing data
|
||||
if (#data - dofs + 1 < self.st[dir].flen) then
|
||||
-- insufficient data
|
||||
self.st[dir].flen = self.st[dir].flen - (#data - dofs + 1)
|
||||
io.write(pfx .. string.format("%32s\n", "... -" .. (#data - dofs + 1) .. " = " .. self.st[dir].flen))
|
||||
dofs = #data + 1
|
||||
return
|
||||
else
|
||||
-- enough data to finish
|
||||
if (dofs == 1) then
|
||||
-- only print a partial size if the frame was interrupted
|
||||
io.write(pfx .. string.format("%32s\n", "... -" .. self.st[dir].flen .. " = 0"))
|
||||
end
|
||||
dofs = dofs + self.st[dir].flen
|
||||
self.st[dir].flen = 0
|
||||
end
|
||||
end
|
||||
|
||||
-- here, flen = 0, we're at the beginning of a new frame --
|
||||
|
||||
-- read possibly missing header bytes until dec.fofs == 9
|
||||
while self.st[dir].fofs < 9 do
|
||||
if dofs > #data then return end -- missing data
|
||||
self.st[dir].hdr[self.st[dir].fofs + 1] = data:sub(dofs, dofs):byte()
|
||||
dofs = dofs + 1
|
||||
self.st[dir].fofs = self.st[dir].fofs + 1
|
||||
end
|
||||
|
||||
-- we have a full frame header here
|
||||
if (self.do_hex ~= false) then
|
||||
io.write("\n" .. pfx .. string.format("hdr=%02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
|
||||
self.st[dir].hdr[1], self.st[dir].hdr[2], self.st[dir].hdr[3],
|
||||
self.st[dir].hdr[4], self.st[dir].hdr[5], self.st[dir].hdr[6],
|
||||
self.st[dir].hdr[7], self.st[dir].hdr[8], self.st[dir].hdr[9]))
|
||||
end
|
||||
|
||||
-- we have a full frame header, we'll be ready
|
||||
-- for a new frame once the data is gone
|
||||
self.st[dir].flen = self.st[dir].hdr[1] * 65536 +
|
||||
self.st[dir].hdr[2] * 256 +
|
||||
self.st[dir].hdr[3]
|
||||
self.st[dir].ftyp = self.st[dir].hdr[4]
|
||||
self.st[dir].fflg = self.st[dir].hdr[5]
|
||||
self.st[dir].sid = self.st[dir].hdr[6] * 16777216 +
|
||||
self.st[dir].hdr[7] * 65536 +
|
||||
self.st[dir].hdr[8] * 256 +
|
||||
self.st[dir].hdr[9]
|
||||
self.st[dir].fofs = 0
|
||||
|
||||
-- decode frame type
|
||||
if self.st[dir].ftyp <= 9 then
|
||||
ft = h2ft[self.st[dir].ftyp]
|
||||
else
|
||||
ft = string.format("TYPE_0x%02x\n", self.st[dir].ftyp)
|
||||
end
|
||||
|
||||
-- decode frame flags for frame type <ftyp>
|
||||
ff = ""
|
||||
for i = 7, 0, -1 do
|
||||
if (((self.st[dir].fflg >> i) & 1) ~= 0) then
|
||||
if self.st[dir].ftyp <= 9 and h2ff[self.st[dir].ftyp][i] ~= nil then
|
||||
ff = ff .. ((ff == "") and "" or "+")
|
||||
ff = ff .. h2ff[self.st[dir].ftyp][i]
|
||||
else
|
||||
ff = ff .. ((ff == "") and "" or "+")
|
||||
ff = ff .. string.format("0x%02x", 1<<i)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
io.write(pfx .. string.format("[%s %ssid=%u len=%u (bytes=%u)]\n",
|
||||
ft, (ff == "") and "" or ff .. " ",
|
||||
self.st[dir].sid, self.st[dir].flen,
|
||||
(#data - dofs + 1)))
|
||||
end
|
||||
end
|
||||
|
||||
core.register_filter("h2-tracer", Dec, function(dec, args)
|
||||
Dec.args = args
|
||||
return dec
|
||||
end)
|
||||
|
|
@ -59,9 +59,9 @@ struct ring_v2 {
|
|||
struct ring_v2a {
|
||||
size_t size; // storage size
|
||||
size_t rsvd; // header length (used for file-backed maps)
|
||||
size_t tail ALIGNED(64); // storage tail
|
||||
size_t head ALIGNED(64); // storage head
|
||||
char area[0] ALIGNED(64); // storage area begins immediately here
|
||||
size_t tail __attribute__((aligned(64))); // storage tail
|
||||
size_t head __attribute__((aligned(64))); // storage head
|
||||
char area[0] __attribute__((aligned(64))); // storage area begins immediately here
|
||||
};
|
||||
|
||||
/* display the message and exit with the code */
|
||||
|
|
|
|||
|
|
@ -1,31 +0,0 @@
|
|||
include ../../include/make/verbose.mk
|
||||
|
||||
CC = cc
|
||||
OPTIMIZE = -O2 -g
|
||||
DEFINE =
|
||||
INCLUDE =
|
||||
OBJS = ncpu.so ncpu
|
||||
OBJDUMP = objdump
|
||||
|
||||
all: $(OBJS)
|
||||
|
||||
%.o: %.c
|
||||
$(cmd_CC) $(OPTIMIZE) $(DEFINE) $(INCLUDE) -shared -fPIC -c -o $@ $^
|
||||
|
||||
%.so: %.o
|
||||
$(cmd_CC) -pie -o $@ $^
|
||||
$(Q)rm -f $^
|
||||
|
||||
%: %.so
|
||||
$(call qinfo, PATCHING)set -- $$($(OBJDUMP) -j .dynamic -h $^ | fgrep .dynamic); \
|
||||
ofs=$$6; size=$$3; \
|
||||
dd status=none bs=1 count=$$((0x$$ofs)) if=$^ of=$^-p1; \
|
||||
dd status=none bs=1 skip=$$((0x$$ofs)) count=$$((0x$$size)) if=$^ of=$^-p2; \
|
||||
dd status=none bs=1 skip=$$((0x$$ofs+0x$$size)) if=$^ of=$^-p3; \
|
||||
sed -e 's,\xfb\xff\xff\x6f\x00\x00\x00\x00\x00\x00\x00\x08,\xfb\xff\xff\x6f\x00\x00\x00\x00\x00\x00\x00\x00,g' < $^-p2 > $^-p2-patched; \
|
||||
cat $^-p1 $^-p2-patched $^-p3 > "$@"
|
||||
$(Q)rm -f $^-p*
|
||||
$(Q)chmod 755 "$@"
|
||||
|
||||
clean:
|
||||
rm -f $(OBJS) *.[oas] *.so-* *~
|
||||
136
dev/ncpu/ncpu.c
136
dev/ncpu/ncpu.c
|
|
@ -1,136 +0,0 @@
|
|||
#define _GNU_SOURCE
|
||||
#include <errno.h>
|
||||
#include <limits.h>
|
||||
#include <sched.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
|
||||
// gcc -fPIC -shared -O2 -o ncpu{.so,.c}
|
||||
// NCPU=16 LD_PRELOAD=$PWD/ncpu.so command args...
|
||||
|
||||
static char prog_full_path[PATH_MAX];
|
||||
|
||||
long sysconf(int name)
|
||||
{
|
||||
if (name == _SC_NPROCESSORS_ONLN ||
|
||||
name == _SC_NPROCESSORS_CONF) {
|
||||
const char *ncpu = getenv("NCPU");
|
||||
int n;
|
||||
|
||||
n = ncpu ? atoi(ncpu) : CPU_SETSIZE;
|
||||
if (n < 0 || n > CPU_SETSIZE)
|
||||
n = CPU_SETSIZE;
|
||||
return n;
|
||||
}
|
||||
errno = EINVAL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* return a cpu_set having the first $NCPU set */
|
||||
int sched_getaffinity(pid_t pid, size_t cpusetsize, cpu_set_t *mask)
|
||||
{
|
||||
const char *ncpu;
|
||||
int i, n;
|
||||
|
||||
CPU_ZERO_S(cpusetsize, mask);
|
||||
|
||||
ncpu = getenv("NCPU");
|
||||
n = ncpu ? atoi(ncpu) : CPU_SETSIZE;
|
||||
if (n < 0 || n > CPU_SETSIZE)
|
||||
n = CPU_SETSIZE;
|
||||
|
||||
for (i = 0; i < n; i++)
|
||||
CPU_SET_S(i, cpusetsize, mask);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* silently ignore the operation */
|
||||
int sched_setaffinity(pid_t pid, size_t cpusetsize, const cpu_set_t *mask)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void usage(const char *argv0)
|
||||
{
|
||||
fprintf(stderr,
|
||||
"Usage: %s [-n ncpu] [cmd [args...]]\n"
|
||||
" Will install itself in LD_PRELOAD before calling <cmd> with args.\n"
|
||||
" The number of CPUs may also come from variable NCPU or default to %d.\n"
|
||||
"\n"
|
||||
"",
|
||||
argv0, CPU_SETSIZE);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* Called in wrapper mode, no longer supported on recent glibc */
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
const char *argv0 = argv[0];
|
||||
char *preload;
|
||||
int plen;
|
||||
|
||||
prog_full_path[0] = 0;
|
||||
plen = readlink("/proc/self/exe", prog_full_path, sizeof(prog_full_path) - 1);
|
||||
if (plen != -1)
|
||||
prog_full_path[plen] = 0;
|
||||
else
|
||||
plen = snprintf(prog_full_path, sizeof(prog_full_path), "%s", argv[0]);
|
||||
|
||||
while (1) {
|
||||
argc--;
|
||||
argv++;
|
||||
|
||||
if (argc < 1)
|
||||
usage(argv0);
|
||||
|
||||
if (strcmp(argv[0], "--") == 0) {
|
||||
argc--;
|
||||
argv++;
|
||||
break;
|
||||
}
|
||||
else if (strcmp(argv[0], "-n") == 0) {
|
||||
if (argc < 2)
|
||||
usage(argv0);
|
||||
|
||||
if (setenv("NCPU", argv[1], 1) != 0)
|
||||
usage(argv0);
|
||||
argc--;
|
||||
argv++;
|
||||
}
|
||||
else {
|
||||
/* unknown arg, that's the command */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* here the only args left start with the cmd name */
|
||||
|
||||
/* now we'll concatenate ourselves at the end of the LD_PRELOAD variable */
|
||||
preload = getenv("LD_PRELOAD");
|
||||
if (preload) {
|
||||
int olen = strlen(preload);
|
||||
preload = realloc(preload, olen + 1 + plen + 1);
|
||||
if (!preload) {
|
||||
perror("realloc");
|
||||
exit(2);
|
||||
}
|
||||
preload[olen] = ' ';
|
||||
memcpy(preload + olen + 1, prog_full_path, plen);
|
||||
preload[olen + 1 + plen] = 0;
|
||||
}
|
||||
else {
|
||||
preload = prog_full_path;
|
||||
}
|
||||
|
||||
if (setenv("LD_PRELOAD", preload, 1) < 0) {
|
||||
perror("setenv");
|
||||
exit(2);
|
||||
}
|
||||
|
||||
execvp(*argv, argv);
|
||||
perror("execve");
|
||||
exit(2);
|
||||
}
|
||||
|
|
@ -1,70 +0,0 @@
|
|||
BEGININPUT
|
||||
BEGINCONTEXT
|
||||
|
||||
HAProxy's development cycle consists in one development branch, and multiple
|
||||
maintenance branches.
|
||||
|
||||
All the development is made into the development branch exclusively. This
|
||||
includes mostly new features, doc updates, cleanups and or course, fixes.
|
||||
|
||||
The maintenance branches, also called stable branches, never see any
|
||||
development, and only receive ultra-safe fixes for bugs that affect them,
|
||||
that are picked from the development branch.
|
||||
|
||||
Branches are numbered in 0.1 increments. Every 6 months, upon a new major
|
||||
release, the development branch enters maintenance and a new development branch
|
||||
is created with a new, higher version. The current development branch is
|
||||
3.2-dev, and maintenance branches are 3.1 and below.
|
||||
|
||||
Fixes created in the development branch for issues that were introduced in an
|
||||
earlier branch are applied in descending order to each and every version till
|
||||
that branch that introduced the issue: 3.1 first, then 3.0, then 2.9, then 2.8
|
||||
and so on. This operation is called "backporting". A fix for an issue is never
|
||||
backported beyond the branch that introduced the issue. An important point is
|
||||
that the project maintainers really aim at zero regression in maintenance
|
||||
branches, so they're never willing to take any risk backporting patches that
|
||||
are not deemed strictly necessary.
|
||||
|
||||
Fixes consist of patches managed using the Git version control tool and are
|
||||
identified by a Git commit ID and a commit message. For this reason we
|
||||
indistinctly talk about backporting fixes, commits, or patches; all mean the
|
||||
same thing. When mentioning commit IDs, developers always use a short form
|
||||
made of the first 8 characters only, and expect the AI assistant to do the
|
||||
same.
|
||||
|
||||
It seldom happens that some fixes depend on changes that were brought by other
|
||||
patches that were not in some branches and that will need to be backported as
|
||||
well for the fix to work. In this case, such information is explicitly provided
|
||||
in the commit message by the patch's author in natural language.
|
||||
|
||||
Developers are serious and always indicate if a patch needs to be backported.
|
||||
Sometimes they omit the exact target branch, or they will say that the patch is
|
||||
"needed" in some older branch, but it means the same. If a commit message
|
||||
doesn't mention any backport instructions, it means that the commit does not
|
||||
have to be backported. And patches that are not strictly bug fixes nor doc
|
||||
improvements are normally not backported. For example, fixes for design
|
||||
limitations, architectural improvements and performance optimizations are
|
||||
considered too risky for a backport. Finally, all bug fixes are tagged as
|
||||
"BUG" at the beginning of their subject line. Patches that are not tagged as
|
||||
such are not bugs, and must never be backported unless their commit message
|
||||
explicitly requests so.
|
||||
|
||||
ENDCONTEXT
|
||||
|
||||
A developer is reviewing the development branch, trying to spot which commits
|
||||
need to be backported to maintenance branches. This person is already expert
|
||||
on HAProxy and everything related to Git, patch management, and the risks
|
||||
associated with backports, so he doesn't want to be told how to proceed nor to
|
||||
review the contents of the patch.
|
||||
|
||||
The goal for this developer is to get some help from the AI assistant to save
|
||||
some precious time on this tedious review work. In order to do a better job, he
|
||||
needs an accurate summary of the information and instructions found in each
|
||||
commit message. Specifically he needs to figure if the patch fixes a problem
|
||||
affecting an older branch or not, if it needs to be backported, if so to which
|
||||
branches, and if other patches need to be backported along with it.
|
||||
|
||||
The indented text block below after an "id" line and starting with a Subject line
|
||||
is a commit message from the HAProxy development branch that describes a patch
|
||||
applied to that branch, starting with its subject line, please read it carefully.
|
||||
|
||||
|
|
@ -1,29 +0,0 @@
|
|||
|
||||
ENDINPUT
|
||||
BEGININSTRUCTION
|
||||
|
||||
You are an AI assistant that follows instruction extremely well. Help as much
|
||||
as you can, responding to a single question using a single response.
|
||||
|
||||
The developer wants to know if he needs to backport the patch above to fix
|
||||
maintenance branches, for which branches, and what possible dependencies might
|
||||
be mentioned in the commit message. Carefully study the commit message and its
|
||||
backporting instructions if any (otherwise it should probably not be backported),
|
||||
then provide a very concise and short summary that will help the developer decide
|
||||
to backport it, or simply to skip it.
|
||||
|
||||
Start by explaining in one or two sentences what you recommend for this one and why.
|
||||
Finally, based on your analysis, give your general conclusion as "Conclusion: X"
|
||||
where X is a single word among:
|
||||
- "yes", if you recommend to backport the patch right now either because
|
||||
it explicitly states this or because it's a fix for a bug that affects
|
||||
a maintenance branch (3.1 or lower);
|
||||
- "wait", if this patch explicitly mentions that it must be backported, but
|
||||
only after waiting some time.
|
||||
- "no", if nothing clearly indicates a necessity to backport this patch (e.g.
|
||||
lack of explicit backport instructions, or it's just an improvement);
|
||||
- "uncertain" otherwise for cases not covered above
|
||||
|
||||
ENDINSTRUCTION
|
||||
|
||||
Explanation:
|
||||
|
|
@ -1,70 +0,0 @@
|
|||
BEGININPUT
|
||||
BEGINCONTEXT
|
||||
|
||||
HAProxy's development cycle consists in one development branch, and multiple
|
||||
maintenance branches.
|
||||
|
||||
All the development is made into the development branch exclusively. This
|
||||
includes mostly new features, doc updates, cleanups and or course, fixes.
|
||||
|
||||
The maintenance branches, also called stable branches, never see any
|
||||
development, and only receive ultra-safe fixes for bugs that affect them,
|
||||
that are picked from the development branch.
|
||||
|
||||
Branches are numbered in 0.1 increments. Every 6 months, upon a new major
|
||||
release, the development branch enters maintenance and a new development branch
|
||||
is created with a new, higher version. The current development branch is
|
||||
3.3-dev, and maintenance branches are 3.2 and below.
|
||||
|
||||
Fixes created in the development branch for issues that were introduced in an
|
||||
earlier branch are applied in descending order to each and every version till
|
||||
that branch that introduced the issue: 3.2 first, then 3.1, then 3.0, then 2.9
|
||||
and so on. This operation is called "backporting". A fix for an issue is never
|
||||
backported beyond the branch that introduced the issue. An important point is
|
||||
that the project maintainers really aim at zero regression in maintenance
|
||||
branches, so they're never willing to take any risk backporting patches that
|
||||
are not deemed strictly necessary.
|
||||
|
||||
Fixes consist of patches managed using the Git version control tool and are
|
||||
identified by a Git commit ID and a commit message. For this reason we
|
||||
indistinctly talk about backporting fixes, commits, or patches; all mean the
|
||||
same thing. When mentioning commit IDs, developers always use a short form
|
||||
made of the first 8 characters only, and expect the AI assistant to do the
|
||||
same.
|
||||
|
||||
It seldom happens that some fixes depend on changes that were brought by other
|
||||
patches that were not in some branches and that will need to be backported as
|
||||
well for the fix to work. In this case, such information is explicitly provided
|
||||
in the commit message by the patch's author in natural language.
|
||||
|
||||
Developers are serious and always indicate if a patch needs to be backported.
|
||||
Sometimes they omit the exact target branch, or they will say that the patch is
|
||||
"needed" in some older branch, but it means the same. If a commit message
|
||||
doesn't mention any backport instructions, it means that the commit does not
|
||||
have to be backported. And patches that are not strictly bug fixes nor doc
|
||||
improvements are normally not backported. For example, fixes for design
|
||||
limitations, architectural improvements and performance optimizations are
|
||||
considered too risky for a backport. Finally, all bug fixes are tagged as
|
||||
"BUG" at the beginning of their subject line. Patches that are not tagged as
|
||||
such are not bugs, and must never be backported unless their commit message
|
||||
explicitly requests so.
|
||||
|
||||
ENDCONTEXT
|
||||
|
||||
A developer is reviewing the development branch, trying to spot which commits
|
||||
need to be backported to maintenance branches. This person is already expert
|
||||
on HAProxy and everything related to Git, patch management, and the risks
|
||||
associated with backports, so he doesn't want to be told how to proceed nor to
|
||||
review the contents of the patch.
|
||||
|
||||
The goal for this developer is to get some help from the AI assistant to save
|
||||
some precious time on this tedious review work. In order to do a better job, he
|
||||
needs an accurate summary of the information and instructions found in each
|
||||
commit message. Specifically he needs to figure if the patch fixes a problem
|
||||
affecting an older branch or not, if it needs to be backported, if so to which
|
||||
branches, and if other patches need to be backported along with it.
|
||||
|
||||
The indented text block below after an "id" line and starting with a Subject line
|
||||
is a commit message from the HAProxy development branch that describes a patch
|
||||
applied to that branch, starting with its subject line, please read it carefully.
|
||||
|
||||
|
|
@ -1,29 +0,0 @@
|
|||
|
||||
ENDINPUT
|
||||
BEGININSTRUCTION
|
||||
|
||||
You are an AI assistant that follows instruction extremely well. Help as much
|
||||
as you can, responding to a single question using a single response.
|
||||
|
||||
The developer wants to know if he needs to backport the patch above to fix
|
||||
maintenance branches, for which branches, and what possible dependencies might
|
||||
be mentioned in the commit message. Carefully study the commit message and its
|
||||
backporting instructions if any (otherwise it should probably not be backported),
|
||||
then provide a very concise and short summary that will help the developer decide
|
||||
to backport it, or simply to skip it.
|
||||
|
||||
Start by explaining in one or two sentences what you recommend for this one and why.
|
||||
Finally, based on your analysis, give your general conclusion as "Conclusion: X"
|
||||
where X is a single word among:
|
||||
- "yes", if you recommend to backport the patch right now either because
|
||||
it explicitly states this or because it's a fix for a bug that affects
|
||||
a maintenance branch (3.2 or lower);
|
||||
- "wait", if this patch explicitly mentions that it must be backported, but
|
||||
only after waiting some time.
|
||||
- "no", if nothing clearly indicates a necessity to backport this patch (e.g.
|
||||
lack of explicit backport instructions, or it's just an improvement);
|
||||
- "uncertain" otherwise for cases not covered above
|
||||
|
||||
ENDINSTRUCTION
|
||||
|
||||
Explanation:
|
||||
|
|
@ -1,70 +0,0 @@
|
|||
BEGININPUT
|
||||
BEGINCONTEXT
|
||||
|
||||
HAProxy's development cycle consists in one development branch, and multiple
|
||||
maintenance branches.
|
||||
|
||||
All the development is made into the development branch exclusively. This
|
||||
includes mostly new features, doc updates, cleanups and or course, fixes.
|
||||
|
||||
The maintenance branches, also called stable branches, never see any
|
||||
development, and only receive ultra-safe fixes for bugs that affect them,
|
||||
that are picked from the development branch.
|
||||
|
||||
Branches are numbered in 0.1 increments. Every 6 months, upon a new major
|
||||
release, the development branch enters maintenance and a new development branch
|
||||
is created with a new, higher version. The current development branch is
|
||||
3.4-dev, and maintenance branches are 3.3 and below.
|
||||
|
||||
Fixes created in the development branch for issues that were introduced in an
|
||||
earlier branch are applied in descending order to each and every version till
|
||||
that branch that introduced the issue: 3.3 first, then 3.2, then 3.1, then 3.0
|
||||
and so on. This operation is called "backporting". A fix for an issue is never
|
||||
backported beyond the branch that introduced the issue. An important point is
|
||||
that the project maintainers really aim at zero regression in maintenance
|
||||
branches, so they're never willing to take any risk backporting patches that
|
||||
are not deemed strictly necessary.
|
||||
|
||||
Fixes consist of patches managed using the Git version control tool and are
|
||||
identified by a Git commit ID and a commit message. For this reason we
|
||||
indistinctly talk about backporting fixes, commits, or patches; all mean the
|
||||
same thing. When mentioning commit IDs, developers always use a short form
|
||||
made of the first 8 characters only, and expect the AI assistant to do the
|
||||
same.
|
||||
|
||||
It seldom happens that some fixes depend on changes that were brought by other
|
||||
patches that were not in some branches and that will need to be backported as
|
||||
well for the fix to work. In this case, such information is explicitly provided
|
||||
in the commit message by the patch's author in natural language.
|
||||
|
||||
Developers are serious and always indicate if a patch needs to be backported.
|
||||
Sometimes they omit the exact target branch, or they will say that the patch is
|
||||
"needed" in some older branch, but it means the same. If a commit message
|
||||
doesn't mention any backport instructions, it means that the commit does not
|
||||
have to be backported. And patches that are not strictly bug fixes nor doc
|
||||
improvements are normally not backported. For example, fixes for design
|
||||
limitations, architectural improvements and performance optimizations are
|
||||
considered too risky for a backport. Finally, all bug fixes are tagged as
|
||||
"BUG" at the beginning of their subject line. Patches that are not tagged as
|
||||
such are not bugs, and must never be backported unless their commit message
|
||||
explicitly requests so.
|
||||
|
||||
ENDCONTEXT
|
||||
|
||||
A developer is reviewing the development branch, trying to spot which commits
|
||||
need to be backported to maintenance branches. This person is already expert
|
||||
on HAProxy and everything related to Git, patch management, and the risks
|
||||
associated with backports, so he doesn't want to be told how to proceed nor to
|
||||
review the contents of the patch.
|
||||
|
||||
The goal for this developer is to get some help from the AI assistant to save
|
||||
some precious time on this tedious review work. In order to do a better job, he
|
||||
needs an accurate summary of the information and instructions found in each
|
||||
commit message. Specifically he needs to figure if the patch fixes a problem
|
||||
affecting an older branch or not, if it needs to be backported, if so to which
|
||||
branches, and if other patches need to be backported along with it.
|
||||
|
||||
The indented text block below after an "id" line and starting with a Subject line
|
||||
is a commit message from the HAProxy development branch that describes a patch
|
||||
applied to that branch, starting with its subject line, please read it carefully.
|
||||
|
||||
|
|
@ -1,29 +0,0 @@
|
|||
|
||||
ENDINPUT
|
||||
BEGININSTRUCTION
|
||||
|
||||
You are an AI assistant that follows instruction extremely well. Help as much
|
||||
as you can, responding to a single question using a single response.
|
||||
|
||||
The developer wants to know if he needs to backport the patch above to fix
|
||||
maintenance branches, for which branches, and what possible dependencies might
|
||||
be mentioned in the commit message. Carefully study the commit message and its
|
||||
backporting instructions if any (otherwise it should probably not be backported),
|
||||
then provide a very concise and short summary that will help the developer decide
|
||||
to backport it, or simply to skip it.
|
||||
|
||||
Start by explaining in one or two sentences what you recommend for this one and why.
|
||||
Finally, based on your analysis, give your general conclusion as "Conclusion: X"
|
||||
where X is a single word among:
|
||||
- "yes", if you recommend to backport the patch right now either because
|
||||
it explicitly states this or because it's a fix for a bug that affects
|
||||
a maintenance branch (3.3 or lower);
|
||||
- "wait", if this patch explicitly mentions that it must be backported, but
|
||||
only after waiting some time.
|
||||
- "no", if nothing clearly indicates a necessity to backport this patch (e.g.
|
||||
lack of explicit backport instructions, or it's just an improvement);
|
||||
- "uncertain" otherwise for cases not covered above
|
||||
|
||||
ENDINSTRUCTION
|
||||
|
||||
Explanation:
|
||||
|
|
@ -22,8 +22,7 @@ STABLE=$(cd "$HAPROXY_DIR" && git describe --tags "v${BRANCH}-dev0^" |cut -f1,2
|
|||
PATCHES_DIR="$PATCHES_PFX"-"$BRANCH"
|
||||
|
||||
(cd "$HAPROXY_DIR"
|
||||
# avoid git pull, it chokes on forced push
|
||||
git remote update origin; git reset origin/master;git checkout -f
|
||||
git pull
|
||||
last_file=$(ls -1 "$PATCHES_DIR"/*.patch 2>/dev/null | tail -n1)
|
||||
if [ -n "$last_file" ]; then
|
||||
restart=$(head -n1 "$last_file" | cut -f2 -d' ')
|
||||
|
|
|
|||
|
|
@ -17,9 +17,9 @@
|
|||
//const int codes[CODES] = { 200,400,401,403,404,405,407,408,410,413,421,422,425,429,500,501,502,503,504};
|
||||
|
||||
#define CODES 32
|
||||
const int codes[CODES] = { 200,400,401,403,404,405,407,408,410,413,414,421,422,425,429,431,500,501,502,503,504,
|
||||
const int codes[CODES] = { 200,400,401,403,404,405,407,408,410,413,421,422,425,429,500,501,502,503,504,
|
||||
/* padding entries below, which will fall back to the default code */
|
||||
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1};
|
||||
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1};
|
||||
|
||||
unsigned mul, xor;
|
||||
unsigned bmul = 0, bxor = 0;
|
||||
|
|
|
|||
|
|
@ -1,233 +0,0 @@
|
|||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include <haproxy/connection-t.h>
|
||||
#include <haproxy/intops.h>
|
||||
|
||||
struct tevt_info {
|
||||
const char *loc;
|
||||
const char **types;
|
||||
};
|
||||
|
||||
|
||||
/* will be sufficient for even largest flag names */
|
||||
static char buf[4096];
|
||||
static size_t bsz = sizeof(buf);
|
||||
|
||||
|
||||
static const char *tevt_unknown_types[16] = {
|
||||
[ 0] = "-", [ 1] = "-", [ 2] = "-", [ 3] = "-",
|
||||
[ 4] = "-", [ 5] = "-", [ 6] = "-", [ 7] = "-",
|
||||
[ 8] = "-", [ 9] = "-", [10] = "-", [11] = "-",
|
||||
[12] = "-", [13] = "-", [14] = "-", [15] = "-",
|
||||
};
|
||||
|
||||
static const char *tevt_fd_types[16] = {
|
||||
[ 0] = "-", [ 1] = "shutw", [ 2] = "shutr", [ 3] = "rcv_err",
|
||||
[ 4] = "snd_err", [ 5] = "-", [ 6] = "-", [ 7] = "conn_err",
|
||||
[ 8] = "intercepted", [ 9] = "conn_poll_err", [10] = "poll_err", [11] = "poll_hup",
|
||||
[12] = "-", [13] = "-", [14] = "-", [15] = "-",
|
||||
};
|
||||
|
||||
static const char *tevt_hs_types[16] = {
|
||||
[ 0] = "-", [ 1] = "-", [ 2] = "-", [ 3] = "rcv_err",
|
||||
[ 4] = "snd_err", [ 5] = "-", [ 6] = "-", [ 7] = "-",
|
||||
[ 8] = "-", [ 9] = "-", [10] = "-", [11] = "-",
|
||||
[12] = "-", [13] = "-", [14] = "-", [15] = "-",
|
||||
};
|
||||
|
||||
static const char *tevt_xprt_types[16] = {
|
||||
[ 0] = "-", [ 1] = "shutw", [ 2] = "shutr", [ 3] = "rcv_err",
|
||||
[ 4] = "snd_err", [ 5] = "-", [ 6] = "-", [ 7] = "-",
|
||||
[ 8] = "-", [ 9] = "-", [10] = "-", [11] = "-",
|
||||
[12] = "-", [13] = "-", [14] = "-", [15] = "-",
|
||||
};
|
||||
|
||||
static const char *tevt_muxc_types[16] = {
|
||||
[ 0] = "-", [ 1] = "shutw", [ 2] = "shutr", [ 3] = "rcv_err",
|
||||
[ 4] = "snd_err", [ 5] = "truncated_shutr", [ 6] = "truncated_rcv_err", [ 7] = "tout",
|
||||
[ 8] = "goaway_rcvd", [ 9] = "proto_err", [10] = "internal_err", [11] = "other_err",
|
||||
[12] = "graceful_shut", [13] = "-", [14] = "-", [15] = "-",
|
||||
};
|
||||
|
||||
static const char *tevt_se_types[16] = {
|
||||
[ 0] = "-", [ 1] = "shutw", [ 2] = "eos", [ 3] = "rcv_err",
|
||||
[ 4] = "snd_err", [ 5] = "truncated_eos", [ 6] = "truncated_rcv_err", [ 7] = "-",
|
||||
[ 8] = "rst_rcvd", [ 9] = "proto_err", [10] = "internal_err", [11] = "other_err",
|
||||
[12] = "cancelled", [13] = "-", [14] = "-", [15] = "-",
|
||||
};
|
||||
|
||||
static const char *tevt_strm_types[16] = {
|
||||
[ 0] = "-", [ 1] = "shutw", [ 2] = "eos", [ 3] = "rcv_err",
|
||||
[ 4] = "snd_err", [ 5] = "truncated_eos", [ 6] = "truncated_rcv_err", [ 7] = "tout",
|
||||
[ 8] = "intercepted", [ 9] = "proto_err", [10] = "internal_err", [11] = "other_err",
|
||||
[12] = "aborted", [13] = "-", [14] = "-", [15] = "-",
|
||||
};
|
||||
|
||||
static const struct tevt_info tevt_location[26] = {
|
||||
[ 0] = {.loc = "-", .types = tevt_unknown_types}, [ 1] = {.loc = "-", .types = tevt_unknown_types},
|
||||
[ 2] = {.loc = "-", .types = tevt_unknown_types}, [ 3] = {.loc = "-", .types = tevt_unknown_types},
|
||||
[ 4] = {.loc = "se", .types = tevt_se_types}, [ 5] = {.loc = "fd", .types = tevt_fd_types},
|
||||
[ 6] = {.loc = "-", .types = tevt_unknown_types}, [ 7] = {.loc = "hs", .types = tevt_hs_types},
|
||||
[ 8] = {.loc = "-", .types = tevt_unknown_types}, [ 9] = {.loc = "-", .types = tevt_unknown_types},
|
||||
[10] = {.loc = "-", .types = tevt_unknown_types}, [11] = {.loc = "-", .types = tevt_unknown_types},
|
||||
[12] = {.loc = "muxc", .types = tevt_muxc_types}, [13] = {.loc = "-", .types = tevt_unknown_types},
|
||||
[14] = {.loc = "-", .types = tevt_unknown_types}, [15] = {.loc = "-", .types = tevt_unknown_types},
|
||||
[16] = {.loc = "-", .types = tevt_unknown_types}, [17] = {.loc = "-", .types = tevt_unknown_types},
|
||||
[18] = {.loc = "strm", .types = tevt_strm_types}, [19] = {.loc = "-", .types = tevt_unknown_types},
|
||||
[20] = {.loc = "-", .types = tevt_unknown_types}, [21] = {.loc = "-", .types = tevt_unknown_types},
|
||||
[22] = {.loc = "-", .types = tevt_unknown_types}, [23] = {.loc = "xprt", .types = tevt_xprt_types},
|
||||
[24] = {.loc = "-", .types = tevt_unknown_types}, [25] = {.loc = "-", .types = tevt_unknown_types},
|
||||
};
|
||||
|
||||
void usage_exit(const char *name)
|
||||
{
|
||||
fprintf(stderr, "Usage: %s { value* | - }\n", name);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
char *to_upper(char *dst, const char *src)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; src[i]; i++)
|
||||
dst[i] = toupper(src[i]);
|
||||
dst[i] = 0;
|
||||
return dst;
|
||||
}
|
||||
|
||||
char *tevt_show_events(char *buf, size_t len, const char *delim, const char *value)
|
||||
{
|
||||
char loc[5];
|
||||
int ret;
|
||||
|
||||
if (!value || !*value) {
|
||||
snprintf(buf, len, "##NONE");
|
||||
goto end;
|
||||
}
|
||||
if (strcmp(value, "-") == 0) {
|
||||
snprintf(buf, len, "##UNK");
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (strlen(value) % 2 != 0) {
|
||||
snprintf(buf, len, "##INV");
|
||||
goto end;
|
||||
}
|
||||
|
||||
while (*value) {
|
||||
struct tevt_info info;
|
||||
char l = value[0];
|
||||
char t = value[1];
|
||||
|
||||
if (!isalpha(l) || !isxdigit(t)) {
|
||||
snprintf(buf, len, "##INV");
|
||||
goto end;
|
||||
}
|
||||
|
||||
info = tevt_location[tolower(l) - 'a'];
|
||||
ret = snprintf(buf, len, "%s:%s%s",
|
||||
isupper(l) ? to_upper(loc, info.loc) : info.loc,
|
||||
info.types[hex2i(t)],
|
||||
value[2] != 0 ? delim : "");
|
||||
if (ret < 0)
|
||||
break;
|
||||
len -= ret;
|
||||
buf += ret;
|
||||
value += 2;
|
||||
}
|
||||
|
||||
end:
|
||||
return buf;
|
||||
}
|
||||
|
||||
char *tevt_show_tuple_events(char *buf, size_t len, char *value)
|
||||
{
|
||||
char *p = value;
|
||||
|
||||
/* skip '{' */
|
||||
p++;
|
||||
while (*p) {
|
||||
char *v;
|
||||
char c;
|
||||
|
||||
while (*p == ' ' || *p == '\t')
|
||||
p++;
|
||||
|
||||
v = p;
|
||||
while (*p && *p != ',' && *p != '}')
|
||||
p++;
|
||||
c = *p;
|
||||
*p = 0;
|
||||
|
||||
tevt_show_events(buf, len, " > ", v);
|
||||
printf("\t- %s\n", buf);
|
||||
|
||||
*p = c;
|
||||
if (*p == ',')
|
||||
p++;
|
||||
else if (*p == '}')
|
||||
break;
|
||||
else {
|
||||
printf("\t- ##INV\n");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
*buf = 0;
|
||||
return buf;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
const char *name = argv[0];
|
||||
char line[128];
|
||||
char *value;
|
||||
int multi = 0;
|
||||
int use_stdin = 0;
|
||||
char *err;
|
||||
|
||||
while (argc == 1)
|
||||
usage_exit(name);
|
||||
|
||||
argv++; argc--;
|
||||
if (argc > 1)
|
||||
multi = 1;
|
||||
|
||||
if (strcmp(argv[0], "-") == 0)
|
||||
use_stdin = 1;
|
||||
|
||||
while (argc > 0) {
|
||||
if (use_stdin) {
|
||||
value = fgets(line, sizeof(line), stdin);
|
||||
if (!value)
|
||||
break;
|
||||
|
||||
/* skip common leading delimiters that slip from copy-paste */
|
||||
while (*value == ' ' || *value == '\t' || *value == ':' || *value == '=')
|
||||
value++;
|
||||
|
||||
err = value;
|
||||
while (*err && *err != '\n')
|
||||
err++;
|
||||
*err = 0;
|
||||
}
|
||||
else {
|
||||
value = argv[0];
|
||||
argv++; argc--;
|
||||
}
|
||||
|
||||
if (multi)
|
||||
printf("### %-8s : ", value);
|
||||
|
||||
if (*value == '{') {
|
||||
if (!use_stdin)
|
||||
printf("\n");
|
||||
tevt_show_tuple_events(buf, bsz, value);
|
||||
}
|
||||
else
|
||||
tevt_show_events(buf, bsz, " > ", value);
|
||||
printf("%s\n", buf);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -3,9 +3,7 @@ DeviceAtlas Device Detection
|
|||
|
||||
In order to add DeviceAtlas Device Detection support, you would need to download
|
||||
the API source code from https://deviceatlas.com/deviceatlas-haproxy-module.
|
||||
Once extracted, two modes are supported :
|
||||
|
||||
1/ Build HAProxy and DeviceAtlas in one command
|
||||
Once extracted :
|
||||
|
||||
$ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_SRC=<path to the API root folder>
|
||||
|
||||
|
|
@ -16,6 +14,10 @@ directory. Also, in the case the api cache support is not needed and/or a C++ to
|
|||
|
||||
$ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_SRC=<path to the API root folder> DEVICEATLAS_NOCACHE=1
|
||||
|
||||
However, if the API had been installed beforehand, DEVICEATLAS_SRC
|
||||
can be omitted. Note that the DeviceAtlas C API version supported is from the 3.x
|
||||
releases series (3.2.1 minimum recommended).
|
||||
|
||||
For HAProxy developers who need to verify that their changes didn't accidentally
|
||||
break the DeviceAtlas code, it is possible to build a dummy library provided in
|
||||
the addons/deviceatlas/dummy directory and to use it as an alternative for the
|
||||
|
|
@ -25,29 +27,6 @@ validate API changes :
|
|||
|
||||
$ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_SRC=$PWD/addons/deviceatlas/dummy
|
||||
|
||||
2/ Build and install DeviceAtlas according to https://docs.deviceatlas.com/apis/enterprise/c/<release version>/README.html
|
||||
|
||||
For example :
|
||||
In the deviceatlas library folder :
|
||||
$ cmake .
|
||||
$ make
|
||||
$ sudo make install
|
||||
|
||||
In the HAProxy folder :
|
||||
$ make TARGET=<target> USE_DEVICEATLAS=1
|
||||
|
||||
Note that if the -DCMAKE_INSTALL_PREFIX cmake option had been used, it is necessary to set as well DEVICEATLAS_LIB and
|
||||
DEVICEATLAS_INC as follow :
|
||||
$ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_INC=<CMAKE_INSTALL_PREFIX value>/include DEVICEATLAS_LIB=<CMAKE_INSTALL_PREFIX value>/lib
|
||||
|
||||
For example :
|
||||
$ cmake -DCMAKE_INSTALL_PREFIX=/opt/local
|
||||
$ make
|
||||
$ sudo make install
|
||||
$ make TARGET=<target> USE_DEVICEATLAS=1 DEVICEATLAS_INC=/opt/local/include DEVICEATLAS_LIB=/opt/local/lib
|
||||
|
||||
Note that DEVICEATLAS_SRC is omitted in this case.
|
||||
|
||||
These are supported DeviceAtlas directives (see doc/configuration.txt) :
|
||||
- deviceatlas-json-file <path to the DeviceAtlas JSON data file>.
|
||||
- deviceatlas-log-level <number> (0 to 3, level of information returned by
|
||||
|
|
|
|||
|
|
@ -362,7 +362,7 @@ option set-process-time <var name>
|
|||
latency added by the SPOE processing for the last handled event or group.
|
||||
|
||||
If several events or groups are processed for the same stream, this value
|
||||
will be overridden.
|
||||
will be overrideen.
|
||||
|
||||
See also: "option set-total-time".
|
||||
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
A number of contributors are often embarrassed with coding style issues, they
|
||||
don't always know if they're doing it right, especially since the coding style
|
||||
has evolved along the years. What is explained here is not necessarily what is
|
||||
has elvoved along the years. What is explained here is not necessarily what is
|
||||
applied in the code, but new code should as much as possible conform to this
|
||||
style. Coding style fixes happen when code is replaced. It is useless to send
|
||||
patches to fix coding style only, they will be rejected, unless they belong to
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -1,114 +0,0 @@
|
|||
2024-10-28 - error reporting
|
||||
----------------------------
|
||||
|
||||
- rules:
|
||||
-> stream->current_rule ~= yielding rule or error
|
||||
pb: not always set.
|
||||
-> todo: curr_rule_in_progress points to &rule->conf (file+line)
|
||||
- set on ACT_RET_ERR, ACT_RET_YIELD, ACT_RET_INV.
|
||||
- sample_fetch: curr_rule
|
||||
|
||||
- filters:
|
||||
-> strm_flt.filters[2] (1 per direction) ~= yielding filter or error
|
||||
-> to check: what to do on forward filters (e.g. compression)
|
||||
-> check spoe / waf (stream data)
|
||||
-> sample_fetch: curr_filt
|
||||
|
||||
- cleanup:
|
||||
- last_rule_line + last_rule_file can point to &rule->conf
|
||||
|
||||
- xprt:
|
||||
- all handshakes use the dummy xprt "xprt_handshake" ("HS"). No data
|
||||
exchange is possible there. The ctx is of type xprt_handshake_ctx
|
||||
for all of them, and contains a wait_event.
|
||||
=> conn->xprt_ctx->wait_event contains the sub for current handshake
|
||||
*if* xprt points to xprt_handshake.
|
||||
- at most 2 active xprt at once: top and bottom (bottom=raw_sock)
|
||||
|
||||
- proposal:
|
||||
- combine 2 bits for muxc, 2 bits for xprt, 4 bits for fd (active,ready).
|
||||
=> 8 bits for muxc and below. QUIC uses something different TBD.
|
||||
|
||||
- muxs uses 6 bits max (ex: h2 send_list, fctl_list, full etc; h1: full,
|
||||
blocked connect...).
|
||||
|
||||
- 2 bits for sc's sub
|
||||
|
||||
- mux_sctl to retrieve a 32-bit code padded right, limited to 16 bits
|
||||
for now.
|
||||
=> [ 0000 | 0000 | 0000 | 0000 | SC | MUXS | MUXC | XPRT | FD ]
|
||||
2 6 2 2 4
|
||||
- sample-fetch for each side.
|
||||
|
||||
- shut / abort
|
||||
- history, almost human-readable.
|
||||
- event locations:
|
||||
- fd (detected by rawsock)
|
||||
- handshake (detected by xprt_handshake). Eg. parsing or address encoding
|
||||
- xprt (ssl)
|
||||
- muxc
|
||||
- se: muxs / applet
|
||||
- stream
|
||||
|
||||
< 8 total. +8 to distinguish front from back at stream level.
|
||||
suggest:
|
||||
- F, H, X, M, E, S front or back
|
||||
- f, h, x, m, e, s back or front
|
||||
|
||||
- event types:
|
||||
- 0 = no event yet
|
||||
- 1 = timeout
|
||||
- 2 = intercepted (rule, etc)
|
||||
- 3 unused
|
||||
|
||||
// shutr / shutw: +1 if other side already shut
|
||||
- 4 = aligned shutr
|
||||
- 6 = aligned recv error
|
||||
- 8 = early shutr (truncation)
|
||||
- 10 = early error (truncation)
|
||||
- 12 = shutw
|
||||
- 14 = send error
|
||||
|
||||
- event location = MSB
|
||||
event type = LSB
|
||||
|
||||
appending a single event:
|
||||
-- if code not full --
|
||||
code <<= 8;
|
||||
code |= location << 4;
|
||||
code |= event type;
|
||||
|
||||
- up to 4 events per connection in 32-bit mode stored on connection
|
||||
(since raw_sock & ssl_sock need to access it).
|
||||
|
||||
- SE (muxs/applet) store their event log in the SD: se_event_log (64 bits).
|
||||
|
||||
- muxs must aggregate the connection's flags with its own:
|
||||
- store last known connection state in SD: conn_event_log
|
||||
- detect changes at the connection level by comparing with SD conn_event_log
|
||||
- create a new SD event with difference(s) into SD se_event_log
|
||||
- update connection state in SD conn_event_log
|
||||
|
||||
- stream
|
||||
- store their event log in the stream: strm_event_log (64 bits).
|
||||
- for each side:
|
||||
- store last known SE state in SD: last_se_event_log
|
||||
- detect changes at the SE level by comparing with SD se_event_log
|
||||
- create a new STREAM event with difference(s) into STREAM strm_event_log
|
||||
and patch the location depending on front vs back (+8 for back).
|
||||
- update SE state in SD last_se_event_log
|
||||
|
||||
=> strm_event_log contains a composite of each side + stream.
|
||||
- converted to string using the location letters
|
||||
- if more event types needed later, can enlarge bits and use another letter.
|
||||
- note: also possible to create an exhaustive enumeration of all possible codes
|
||||
(types+locations).
|
||||
|
||||
- sample fetch to retrieve strm_event_log.
|
||||
|
||||
- Note that fc_err and fc_err_str are already usable
|
||||
|
||||
- questions:
|
||||
- htx layer needed ?
|
||||
- ability to map EOI/EOS etc to SE activity ?
|
||||
- we'd like to detect an HTTP response before end of POST.
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
The buffer list API allows one to share a certain amount of buffers between
|
||||
multiple entities, which will each see their own as lists of buffers, while
|
||||
keeping a shared free list. The immediate use case is for muxes, which may
|
||||
keeping a sharedd free list. The immediate use case is for muxes, which may
|
||||
want to allocate up to a certain number of buffers per connection, shared
|
||||
among all streams. In this case, each stream will first request a new list
|
||||
for its own use, then may request extra entries from the free list. At any
|
||||
|
|
|
|||
|
|
@ -540,15 +540,14 @@ message. These functions are used by HTX analyzers or by multiplexers.
|
|||
the amount of data drained.
|
||||
|
||||
- htx_xfer_blks() transfers HTX blocks from an HTX message to another,
|
||||
stopping after the first block of a specified type is transferred or when
|
||||
a specific amount of bytes, including meta-data, was moved. If the tail
|
||||
block is a DATA block, it may be partially moved. All other block are
|
||||
transferred at once or kept. This function returns a mixed value, with the
|
||||
last block moved, or NULL if nothing was moved, and the amount of data
|
||||
transferred. When HEADERS or TRAILERS blocks must be transferred, this
|
||||
function transfers all of them. Otherwise, if it is not possible, it
|
||||
triggers an error. It is the caller responsibility to transfer all headers
|
||||
or trailers at once.
|
||||
stopping on the first block of a specified type or when a specific amount
|
||||
of bytes, including meta-data, was moved. If the tail block is a DATA
|
||||
block, it may be partially moved. All other block are transferred at once
|
||||
or kept. This function returns a mixed value, with the last block moved,
|
||||
or NULL if nothing was moved, and the amount of data transferred. When
|
||||
HEADERS or TRAILERS blocks must be transferred, this function transfers
|
||||
all of them. Otherwise, if it is not possible, it triggers an error. It is
|
||||
the caller responsibility to transfer all headers or trailers at once.
|
||||
|
||||
- htx_append_msg() append an HTX message to another one. All the message is
|
||||
copied or nothing. So, if an error occurred, a rollback is performed. This
|
||||
|
|
|
|||
|
|
@ -314,16 +314,6 @@ alphanumerically ordered:
|
|||
call to cfg_register_section() with the three arguments at stage
|
||||
STG_REGISTER.
|
||||
|
||||
You can only register a section once, but you can register post callbacks
|
||||
multiple time for this section with REGISTER_CONFIG_POST_SECTION().
|
||||
|
||||
- REGISTER_CONFIG_POST_SECTION(name, post)
|
||||
|
||||
Registers a function which will be called after a section is parsed. This is
|
||||
the same as the <post> argument in REGISTER_CONFIG_SECTION(), the difference
|
||||
is that it allows to register multiple <post> callbacks and to register them
|
||||
elsewhere in the code.
|
||||
|
||||
- REGISTER_PER_THREAD_ALLOC(fct)
|
||||
|
||||
Registers a call to register_per_thread_alloc(fct) at stage STG_REGISTER.
|
||||
|
|
|
|||
|
|
@ -1,86 +0,0 @@
|
|||
2025-08-13 - Memory allocation in HAProxy 3.3
|
||||
|
||||
The vast majority of dynamic memory allocations are performed from pools. Pools
|
||||
are optimized to store pre-calibrated objects of the right size for a given
|
||||
usage, try to favor locality and hot objects as much as possible, and are
|
||||
heavily instrumented to detect and help debug a wide class of bugs including
|
||||
buffer overflows, use-after-free, etc.
|
||||
|
||||
For objects of random sizes, or those used only at configuration time, pools
|
||||
are not suited, and the regular malloc/free family is available, in addition of
|
||||
a few others.
|
||||
|
||||
The standard allocation calls are intercepted at the code level (#define) when
|
||||
the code is compiled with -DDEBUG_MEM_STATS. For this reason, these calls are
|
||||
redefined as macros in "bug.h", and one must not try to use the pointers to
|
||||
such functions, as this may break DEBUG_MEM_STATS. This provides fine-grained
|
||||
stats about allocation/free per line of source code using locally implemented
|
||||
counters that can be consulted by "debug dev memstats". The calls are
|
||||
categorized into one of "calloc", "free", "malloc", "realloc", "strdup",
|
||||
"p_alloc", "p_free", the latter two designating pools. Extra calls such as
|
||||
memalign() and similar are also intercepted and counted as malloc.
|
||||
|
||||
Due to the nature of this replacement, DEBUG_MEM_STATS cannot see operations
|
||||
performed in libraries or dependencies.
|
||||
|
||||
In addition to DEBUG_MEM_STATS, when haproxy is built with USE_MEMORY_PROFILING
|
||||
the standard functions are wrapped by new ones defined in "activity.c", which
|
||||
also hold counters by call place. These ones are able to trace activity in
|
||||
libraries because the functions check the return pointer to figure where the
|
||||
call was made. The approach is different and relies on a large hash table. The
|
||||
files, function names and line numbers are not know, but by passing the pointer
|
||||
to dladdr(), we can often resolve most of these symbols. These operations are
|
||||
consulted via "show profiling memory". It must first be enabled either in the
|
||||
global config "profiling.memory on" or the CLI using "set profiling memory on".
|
||||
Memory profiling can also track pool allocations and frees thanks to knowing
|
||||
the size of the element and knowing a place where to store it. Some future
|
||||
evolutions might consider making this possible as well for pure malloc/free
|
||||
too by leveraging malloc_usable_size() a bit more.
|
||||
|
||||
Finally, 3.3 brought aligned allocations. These are made available via a new
|
||||
family of functions around ha_aligned_alloc() that simply map to either
|
||||
posix_memalign(), memalign() or _aligned_malloc() for CYGWIN, depending on
|
||||
which one is available. This latter one requires to pass the pointer to
|
||||
_aligned_free() instead of free(), so for this reason, all aligned allocations
|
||||
have to be released using ha_aligned_free(). Since this mostly happens on
|
||||
configuration elements, in practice it's not as inconvenient as it can sound.
|
||||
These functions are in reality macros handled in "bug.h" like the previous
|
||||
ones in order to deal with DEBUG_MEM_STATS. All "alloc" variants are reported
|
||||
in memstats as "malloc". All "zalloc" variants are reported in memstats as
|
||||
"calloc".
|
||||
|
||||
The currently available allocators are the following:
|
||||
|
||||
- void *ha_aligned_alloc(size_t align, size_t size)
|
||||
- void *ha_aligned_zalloc(size_t align, size_t size)
|
||||
|
||||
Equivalent of malloc() but aligned to <align> bytes. The alignment MUST be
|
||||
at least as large as one word and MUST be a power of two. The "zalloc"
|
||||
variant also zeroes the area on success. Both return NULL on failure.
|
||||
|
||||
- void *ha_aligned_alloc_safe(size_t align, size_t size)
|
||||
- void *ha_aligned_zalloc_safe(size_t align, size_t size)
|
||||
|
||||
Equivalent of malloc() but aligned to <align> bytes. The alignment is
|
||||
automatically adjusted to the nearest larger power of two that is at least
|
||||
as large as a word. The "zalloc" variant also zeroes the area on
|
||||
success. Both return NULL on failure.
|
||||
|
||||
- (type *)ha_aligned_alloc_typed(size_t count, type)
|
||||
(type *)ha_aligned_zalloc_typed(size_t count, type)
|
||||
|
||||
This macro returns an area aligned to the required alignment for type
|
||||
<type>, large enough for <count> objects of this type, and the result is a
|
||||
pointer of this type. The goal is to ease allocation of known structures
|
||||
whose alignment is not necessarily known to the developer (and to avoid
|
||||
encouraging to hard-code alignment). The cast in return also provides a
|
||||
last-minute control in case a wrong type is mistakenly used due to a poor
|
||||
copy-paste or an extra "*" after the type. When DEBUG_MEM_STATS is in use,
|
||||
the type is stored as a string in the ".extra" field so that it can be
|
||||
displayed in "debug dev memstats". The "zalloc" variant also zeroes the
|
||||
area on success. Both return NULL on failure.
|
||||
|
||||
- void ha_aligned_free(void *ptr)
|
||||
|
||||
Frees the area pointed to by ptr. It is the equivalent of free() but for
|
||||
objects allocated using one of the functions above.
|
||||
|
|
@ -245,30 +245,6 @@ mt_list_pop(l)
|
|||
#=========#
|
||||
|
||||
|
||||
mt_list_pop_locked(l)
|
||||
Removes the list's first element, returns it locked. If the list was empty,
|
||||
NULL is returned. A macro MT_LIST_POP_LOCKED() is provided for a
|
||||
more convenient use; instead of returning the list element, it will return
|
||||
the structure holding the element, taking care of preserving the NULL.
|
||||
|
||||
before:
|
||||
+---+ +---+ +---+ +---+ +---+ +---+ +---+
|
||||
#=>| L |<===>| A |<===>| B |<===>| C |<===>| D |<===>| E |<===>| F |<=#
|
||||
# +---+ +---+ +---+ +---+ +---+ +---+ +---+ #
|
||||
#=====================================================================#
|
||||
|
||||
after:
|
||||
+---+ +---+ +---+ +---+ +---+ +---+
|
||||
#=>| L |<===>| B |<===>| C |<===>| D |<===>| E |<===>| F |<=#
|
||||
# +---+ +---+ +---+ +---+ +---+ +---+ #
|
||||
#===========================================================#
|
||||
|
||||
+---+
|
||||
# x| A |x #
|
||||
# +---+ #
|
||||
#=========#
|
||||
|
||||
|
||||
_mt_list_lock_next(elt)
|
||||
Locks the link that starts at the next pointer of the designated element.
|
||||
The link is replaced by two locked pointers, and a pointer to the next
|
||||
|
|
@ -400,9 +376,6 @@ mt_list_lock_prev(elt)
|
|||
Return A elt
|
||||
value: <===>
|
||||
|
||||
mt_list_try_lock_prev(elt)
|
||||
Does the same thing as mt_list_lock_prev(), except if the list is
|
||||
locked already, it returns { NULL, NULL } instead of waiting.
|
||||
|
||||
mt_list_lock_elem(elt)
|
||||
Locks the element only. Both of its pointers are replaced by two locked
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
2025-08-11 - Pools structure and API
|
||||
2022-02-24 - Pools structure and API
|
||||
|
||||
1. Background
|
||||
-------------
|
||||
|
|
@ -204,14 +204,6 @@ the cache, when this option is set, objects are picked from the cache from the
|
|||
oldest one instead of the freshest one. This way even late memory corruptions
|
||||
have a chance to be detected.
|
||||
|
||||
Another non-destructive approach is to use "-dMbackup". A full copy of the
|
||||
object is made after its end, which eases inspection (e.g. of the parts
|
||||
scratched by the pool_item elements), and a comparison is made upon allocation
|
||||
of that object, just like with "-dMintegrity", causing a crash on mismatch. The
|
||||
initial 4 words corresponding to the list are ignored as well. Note that when
|
||||
both "-dMbackup" and "-dMintegrity" are used, the copy is performed before
|
||||
being scratched, and the comparison is done by "-dMintegrity" only.
|
||||
|
||||
When build option DEBUG_MEMORY_POOLS is set, or the boot-time option "-dMtag"
|
||||
is passed on the executable's command line, pool objects are allocated with
|
||||
one extra pointer compared to the requested size, so that the bytes that follow
|
||||
|
|
@ -239,6 +231,10 @@ currently in use:
|
|||
+------------+ +------------+ / is set at build time
|
||||
or -dMtag at boot time
|
||||
|
||||
Right now no provisions are made to return objects aligned on larger boundaries
|
||||
than those currently covered by malloc() (i.e. two pointers). This need appears
|
||||
from time to time and the layout above might evolve a little bit if needed.
|
||||
|
||||
|
||||
4. Storage in the process-wide shared pool
|
||||
------------------------------------------
|
||||
|
|
@ -346,25 +342,7 @@ struct pool_head *create_pool(char *name, uint size, uint flags)
|
|||
"-dMno-merge" is passed on the executable's command line, the pools
|
||||
also need to have the exact same name to be merged. In addition, unless
|
||||
MEM_F_EXACT is set in <flags>, the object size will usually be rounded
|
||||
up to the size of pointers (16 or 32 bytes). MEM_F_UAF may be set on a
|
||||
per-pool basis to enable the UAF detection only for this specific pool,
|
||||
saving the massive overhead of global usage. The name that will appear
|
||||
in the pool upon merging is the name of the first created pool. The
|
||||
returned pointer is the new (or reused) pool head, or NULL upon error.
|
||||
Pools created this way must be destroyed using pool_destroy().
|
||||
|
||||
struct pool_head *create_aligned_pool(char *name, uint size, uint align, uint flags)
|
||||
Create a new pool named <name> for objects of size <size> bytes and
|
||||
aligned to <align> bytes (0 meaning use the platform's default). Pool
|
||||
names are truncated to their first 11 characters. Pools of very similar
|
||||
size will usually be merged if both have set the flag MEM_F_SHARED in
|
||||
<flags>. When DEBUG_DONT_SHARE_POOLS was set at build time, or
|
||||
"-dMno-merge" is passed on the executable's command line, the pools
|
||||
also need to have the exact same name to be merged. In addition, unless
|
||||
MEM_F_EXACT is set in <flags>, the object size will usually be rounded
|
||||
up to the size of pointers (16 or 32 bytes). MEM_F_UAF may be set on a
|
||||
per-pool basis to enable the UAF detection only for this specific pool,
|
||||
saving the massive overhead of global usage. The name that will appear
|
||||
up to the size of pointers (16 or 32 bytes). The name that will appear
|
||||
in the pool upon merging is the name of the first created pool. The
|
||||
returned pointer is the new (or reused) pool head, or NULL upon error.
|
||||
Pools created this way must be destroyed using pool_destroy().
|
||||
|
|
@ -482,20 +460,6 @@ complicate maintenance.
|
|||
|
||||
A few macros exist to ease the declaration of pools:
|
||||
|
||||
DECLARE_ALIGNED_POOL(ptr, name, size, align)
|
||||
Placed at the top level of a file, this declares a global memory pool
|
||||
as variable <ptr>, name <name> and size <size> bytes per element, all
|
||||
of which will be aligned to <align> bytes. The alignment will be
|
||||
rounded up to the next power of two and will be at least as large as a
|
||||
word on the platform. This is made via a call to REGISTER_ALIGNED_POOL()
|
||||
and by assigning the resulting pointer to variable <ptr>. <ptr> will be
|
||||
created of type "struct pool_head *". If the pool needs to be visible
|
||||
outside of the function (which is likely), it will also need to be
|
||||
declared somewhere as "extern struct pool_head *<ptr>;". It is
|
||||
recommended to place such declarations very early in the source file so
|
||||
that the variable is already known to all subsequent functions which
|
||||
may use it.
|
||||
|
||||
DECLARE_POOL(ptr, name, size)
|
||||
Placed at the top level of a file, this declares a global memory pool
|
||||
as variable <ptr>, name <name> and size <size> bytes per element. This
|
||||
|
|
@ -507,17 +471,6 @@ DECLARE_POOL(ptr, name, size)
|
|||
declarations very early in the source file so that the variable is
|
||||
already known to all subsequent functions which may use it.
|
||||
|
||||
DECLARE_STATIC_ALIGNED_POOL(ptr, name, size, align)
|
||||
Placed at the top level of a file, this declares a global memory pool
|
||||
as variable <ptr>, name <name> and size <size> bytes per element, all
|
||||
of which will be aligned to <align> bytes. The alignment will be
|
||||
rounded up to the next power of two and will be at least as large as a
|
||||
word on the platform. This is made via a call to REGISTER_ALIGNED_POOL()
|
||||
and by assigning the resulting pointer to local variable <ptr>. <ptr>
|
||||
will be created of type "static struct pool_head *". It is recommended
|
||||
to place such declarations very early in the source file so that the
|
||||
variable is already known to all subsequent functions which may use it.
|
||||
|
||||
DECLARE_STATIC_POOL(ptr, name, size)
|
||||
Placed at the top level of a file, this declares a static memory pool
|
||||
as variable <ptr>, name <name> and size <size> bytes per element. This
|
||||
|
|
@ -527,42 +480,6 @@ DECLARE_STATIC_POOL(ptr, name, size)
|
|||
early in the source file so that the variable is already known to all
|
||||
subsequent functions which may use it.
|
||||
|
||||
DECLARE_STATIC_TYPED_POOL(ptr, name, type[, extra[, align]])
|
||||
Placed at the top level of a file, this declares a global memory pool
|
||||
as variable <ptr>, name <name>, and configured to allocate objects of
|
||||
type <type>. It is optionally possible to grow these objects by <extra>
|
||||
bytes (e.g. if they contain some variable length data at the end), and
|
||||
to force them to be aligned to <align> bytes. If only alignment is
|
||||
desired without extra data, pass 0 as <extra>. Alignment must be at
|
||||
least as large as the type's, and a control is enforced at declaration
|
||||
time so that objects cannot be less aligned than what is promised to
|
||||
the compiler. The default alignment of zero indicates that the default
|
||||
one (from the type) should be used. This is made via a call to
|
||||
REGISTER_ALIGNED_POOL() and by assigning the resulting pointer to local
|
||||
variable <ptr>. <ptr> will be created of type "static struct pool_head
|
||||
*". It is recommended to place such declarations very early in the
|
||||
source file so that the variable is already known to all subsequent
|
||||
functions which may use it.
|
||||
|
||||
DECLARE_TYPED_POOL(ptr, name, type[, extra[, align]])
|
||||
Placed at the top level of a file, this declares a global memory pool
|
||||
as variable <ptr>, name <name>, and configured to allocate objects of
|
||||
type <type>. It is optionally possible to grow these objects by <extra>
|
||||
bytes (e.g. if they contain some variable length data at the end), and
|
||||
to force them to be aligned to <align> bytes. If only alignment is
|
||||
desired without extra data, pass 0 as <extra>. Alignment must be at
|
||||
least as large as the type's, and a control is enforced at declaration
|
||||
time so that objects cannot be less aligned than what is promised to
|
||||
the compiler. The default alignment of zero indicates that the default
|
||||
one (from the type) should be used. This is made via a call to
|
||||
REGISTER_ALIGNED_POOL() and by assigning the resulting pointer to
|
||||
variable <ptr>. <ptr> will be created of type "struct pool_head *". If
|
||||
the pool needs to be visible outside of the function (which is likely),
|
||||
it will also need to be declared somewhere as "extern struct pool_head
|
||||
*<ptr>;". It is recommended to place such declarations very early in
|
||||
the source file so that the variable is already known to all subsequent
|
||||
functions which may use it.
|
||||
|
||||
|
||||
6. Build options
|
||||
----------------
|
||||
|
|
|
|||
|
|
@ -98,37 +98,19 @@ void task_set_thread(t, id)
|
|||
indicate "any thread". It's ignored and replaced by zero when threads
|
||||
are disabled.
|
||||
|
||||
void tasklet_wakeup(tl, [flags])
|
||||
void tasklet_wakeup(tl)
|
||||
Make sure that tasklet <tl> will wake up, that is, will execute at
|
||||
least once. The tasklet will run on its assigned thread, or on any
|
||||
thread if its TID is negative. An optional <flags> value may be passed
|
||||
to set a wakeup cause on the tasklet's flags, typically TASK_WOKEN_* or
|
||||
TASK_F_UEVT*. When not set, 0 is passed (i.e. no flags are changed).
|
||||
thread if its TID is negative.
|
||||
|
||||
struct list *tasklet_wakeup_after(head, tl, [flags])
|
||||
Schedule tasklet <tl> to run immediately the current one if <head> is
|
||||
NULL, or after the last queued one if <head> is non-null. The new head
|
||||
is returned, to be passed to the next call. The purpose here is to
|
||||
permit instant wakeups of resumed tasklets that still preserve
|
||||
ordering between them. A typical use case is for a mux' I/O handler to
|
||||
instantly wake up a series of urgent streams before continuing with
|
||||
already queued tasklets. This may induce extra latencies for pending
|
||||
jobs and must only be used extremely carefully when it's certain that
|
||||
the processing will benefit from using fresh data from the L1 cache.
|
||||
An optional <flags> value may be passed to set a wakeup cause on the
|
||||
tasklet's flags, typically TASK_WOKEN_* or TASK_F_UEVT*. When not set,
|
||||
0 is passed (i.e. no flags are changed).
|
||||
|
||||
void tasklet_wakeup_on(tl, thr, [flags])
|
||||
void tasklet_wakeup_on(tl, thr)
|
||||
Make sure that tasklet <tl> will wake up on thread <thr>, that is, will
|
||||
execute at least once. The designated thread may only differ from the
|
||||
calling one if the tasklet is already configured to run on another
|
||||
thread, and it is not permitted to self-assign a tasklet if its tid is
|
||||
negative, as it may already be scheduled to run somewhere else. Just in
|
||||
case, only use tasklet_wakeup() which will pick the tasklet's assigned
|
||||
thread ID. An optional <flags> value may be passed to set a wakeup
|
||||
cause on the tasklet's flags, typically TASK_WOKEN_* or TASK_F_UEVT*.
|
||||
When not set, 0 is passed (i.e. no flags are changed).
|
||||
thread ID.
|
||||
|
||||
struct tasklet *tasklet_new()
|
||||
Allocate a new tasklet and set it to run by default on the calling
|
||||
|
|
@ -215,14 +197,6 @@ state field before the call to ->process()
|
|||
|
||||
- TASK_WOKEN_OTHER any other application-defined wake-up reason.
|
||||
|
||||
- TASK_F_UEVT1 one-shot user-defined event type 1. This is application
|
||||
specific, and reset to 0 when the handler is called.
|
||||
|
||||
- TASK_F_UEVT2 one-shot user-defined event type 2. This is application
|
||||
specific, and reset to 0 when the handler is called.
|
||||
|
||||
- TASK_F_UEVT3 one-shot user-defined event type 3. This is application
|
||||
specific, and reset to 0 when the handler is called.
|
||||
|
||||
In addition, a few persistent flags may be observed or manipulated by the
|
||||
application, both for tasks and tasklets:
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ default init, this was controversial but fedora and archlinux already uses it.
|
|||
At this time HAProxy still had a multi-process model, and the way haproxy is
|
||||
working was incompatible with the daemon mode.
|
||||
|
||||
Systemd is compatible with traditional forking services, but somehow HAProxy
|
||||
Systemd is compatible with traditionnal forking services, but somehow HAProxy
|
||||
is different. To work correctly, systemd needs a main PID, this is the PID of
|
||||
the process that systemd will supervises.
|
||||
|
||||
|
|
@ -45,7 +45,7 @@ However the wrapper suffered from several problems:
|
|||
|
||||
### mworker V1
|
||||
|
||||
HAProxy 1.8 got rid of the wrapper which was replaced by the master worker
|
||||
HAProxy 1.8 got ride of the wrapper which was replaced by the master worker
|
||||
mode. This first version was basically a reintegration of the wrapper features
|
||||
within HAProxy. HAProxy is launched with the -W flag, read the configuration and
|
||||
then fork. In mworker mode, the master is usually launched as a root process,
|
||||
|
|
@ -86,7 +86,7 @@ retrieved automatically.
|
|||
The master is supervising the workers, when a current worker (not a previous one
|
||||
from before the reload) is exiting without being asked for a reload, the master
|
||||
will emit an "exit-on-failure" error and will kill every workers with a SIGTERM
|
||||
and exits with the same error code than the failed worker, this behavior can be
|
||||
and exits with the same error code than the failed master, this behavior can be
|
||||
changed by using the "no exit-on-failure" option in the global section.
|
||||
|
||||
While the master is supervising the workers using the wait() function, the
|
||||
|
|
@ -186,8 +186,8 @@ number that can be found in HAPROXY_PROCESSES. With this change the stats socket
|
|||
in the configuration is less useful and everything can be done from the master
|
||||
CLI.
|
||||
|
||||
With 2.7, the reload mechanism of the master CLI evolved, with previous versions,
|
||||
this mechanism was asynchronous, so once the `reload` command was received, the
|
||||
With 2.7, the reload mecanism of the master CLI evolved, with previous versions,
|
||||
this mecanism was asynchronous, so once the `reload` command was received, the
|
||||
master would reload, the active master CLI connection was closed, and there was
|
||||
no way to return a status as a response to the `reload` command. To achieve a
|
||||
synchronous reload, a dedicated sockpair is used, one side uses a master CLI
|
||||
|
|
@ -208,38 +208,3 @@ starts with -st to achieve a hard stop on the previous worker.
|
|||
Version 3.0 got rid of the libsystemd dependencies for sd_notify() after the
|
||||
events of xz/openssh, the function is now implemented directly in haproxy in
|
||||
src/systemd.c.
|
||||
|
||||
### mworker V3
|
||||
|
||||
This version was implemented with HAProxy 3.1, the goal was to stop parsing and
|
||||
applying the configuration in the master process.
|
||||
|
||||
One of the caveats of the previous implementation was that the parser could take
|
||||
a lot of time, and the master process would be stuck in the parser instead of
|
||||
handling its polling loop, signals etc. Some parts of the configuration parsing
|
||||
could also be less reliable with third-party code (EXTRA_OBJS), it could, for
|
||||
example, allow opening FDs and not closing them before the reload which
|
||||
would crash the master after a few reloads.
|
||||
|
||||
The startup of the master-worker was reorganized this way:
|
||||
|
||||
- the "discovery" mode, which is a lighter configuration parsing step, only
|
||||
applies the configuration which need to be effective for the master process.
|
||||
For example, "master-worker", "mworker-max-reloads" and less than 20 other
|
||||
keywords that are identified by KWF_DISCOVERY in the code. It is really fast
|
||||
as it don't need all the configuration to be applied in the master process.
|
||||
|
||||
- the master will then fork a worker, with a PROC_O_INIT flag. This worker has
|
||||
a temporary sockpair connected to the master CLI. Once the worker is forked,
|
||||
the master initializes its configuration and starts its polling loop.
|
||||
|
||||
- The newly forked worker will try to parse the configuration, which could
|
||||
result in a failure (exit 1), or any bad error code. In case of success, the
|
||||
worker will send a "READY" message to the master CLI then close this FD. At
|
||||
this step everything was initialized and the worker can enter its polling
|
||||
loop.
|
||||
|
||||
- The master then waits for the worker, it could:
|
||||
* receive the READY message over the mCLI, resulting in a successful loading
|
||||
of haproxy
|
||||
* receive a SIGCHLD, meaning the worker exited and couldn't load
|
||||
|
|
|
|||
|
|
@ -1,53 +0,0 @@
|
|||
2025/09/16 - SHM stats file storage description and hints
|
||||
|
||||
Shm stats file (used to share thread-groupable statistics over multiple
|
||||
process through the "shm-stats-file" directive) is made of:
|
||||
|
||||
- a main header which describes the file version, the processes making
|
||||
use of it, the common clock source and hints about the number of
|
||||
objects that are currently stored or provisionned in the file.
|
||||
- an indefinite number of "objects" blocks coming right after the
|
||||
main header, all blocks have the same size which is the size of the
|
||||
maximum underlying object that may be stored. The main header tells
|
||||
how many objects are stored in the file.
|
||||
|
||||
File header looks like this (32/64 bits systems):
|
||||
|
||||
0 8 16 32 48 64
|
||||
+-------+---------+----------------+-------------------+-------------------+
|
||||
| VERSION | 2 bytes | global_now_ms (global mono date in ms)|
|
||||
|MAJOR | MINOR | hole | |
|
||||
+----------------------------------+---------------------------------------+
|
||||
| global_now_ns (global mono date in ns) |
|
||||
+--------------------------------------------------------------------------+
|
||||
| now_offset (offset applied to global monotonic date |
|
||||
| on startup) |
|
||||
+--------------------------------------------------------------------------+
|
||||
| Process slot : | 1byte x 64
|
||||
| pid | heartbeat (ticks) |
|
||||
+----------------------------------+---------------------------------------+
|
||||
| objects | objects slots |
|
||||
| (used objects) | (available for use) |
|
||||
+----------------------------------+---------------------------------------+
|
||||
| padding (for future use) | 128 bytes
|
||||
+--------------------------------------------------------------------------+
|
||||
|
||||
Object block looks like this:
|
||||
|
||||
0 8 16 32 48 64
|
||||
+-------+---------+----------------+-------------------+-------------------+
|
||||
| GUID | 128 bytes
|
||||
+ (zero terminated) +
|
||||
| |
|
||||
+-------+---------+--------------------------------------------------------+
|
||||
| tgid | type | padding |
|
||||
+-------+---------+--------------------------------------------------------+
|
||||
| users (bitmask of process slots making use of the obj) |
|
||||
+--------------------------------------------------------------------------+
|
||||
| object data |
|
||||
| (version dependent) |
|
||||
| struct be_counters_shared_tg or |
|
||||
| struct fe_counters_shared_tg |
|
||||
+--------------------------------------------------------------------------+
|
||||
| padding (to anticipate evolutions) | 64 bytes
|
||||
+--------------------------------------------------------------------------+
|
||||
|
|
@ -1,144 +0,0 @@
|
|||
2025-02-13 - Details of the watchdog's internals
|
||||
------------------------------------------------
|
||||
|
||||
1. The watchdog timer
|
||||
---------------------
|
||||
|
||||
The watchdog sets up a timer that triggers every 1 to 1000ms. This is pre-
|
||||
initialized by init_wdt() which positions wdt_handler() as the signal handler
|
||||
of signal WDTSIG (SIGALRM).
|
||||
|
||||
But this is not sufficient, an alarm actually has to be set. This is done for
|
||||
each thread by init_wdt_per_thread() which calls clock_setup_signal_timer()
|
||||
which in turn enables a ticking timer for the current thread, that delivers
|
||||
the WDTSIG signal (SIGALRM) to the process. Since there's no notion of thread
|
||||
at this point, there are as many timers as there are threads, and each signal
|
||||
comes with an integer value which in fact contains the thread number as passed
|
||||
to clock_setup_signal_timer() during initialization.
|
||||
|
||||
The timer preferably uses CLOCK_THREAD_CPUTIME_ID if available, otherwise
|
||||
falls back to CLOCK_REALTIME. The former is more accurate as it really counts
|
||||
the time spent in the process, while the latter might also account for time
|
||||
stuck on paging in etc.
|
||||
|
||||
Then wdt_ping() is called to arm the timer. It's set to trigger every
|
||||
<wdt_warn_blocked_traffic_ns> interval. It is also called by wdt_handler()
|
||||
to reprogram a new wakeup after it has ticked.
|
||||
|
||||
When wdt_handler() is called, it reads the thread number in si_value.sival_int,
|
||||
as positioned during initialization. Most of the time the signal lands on the
|
||||
wrong thread (typically thread 1 regardless of the reported thread). From this
|
||||
point, the function retrieves the various info related to that thread's recent
|
||||
activity (its current time and flags), ignores corner cases such as if that
|
||||
thread is already dumping another one, being dumped, in the poller, has quit,
|
||||
etc.
|
||||
|
||||
If the thread was not marked as stuck, it's verified that no progress was made
|
||||
for at least one second, in which case the TH_FL_STUCK flag is set. The lack of
|
||||
progress is measured by the distance between the thread's current cpu_time and
|
||||
its prev_cpu_time. If the lack of progress is at least as large as the warning
|
||||
threshold, then the signal is bounced to the faulty thread if it's not the
|
||||
current one. Since this bounce is based on the time spent without update, it
|
||||
already doesn't happen often.
|
||||
|
||||
Once on the faulty thread, two checks are performed:
|
||||
1) if the thread was already marked as stuck, then the thread is considered
|
||||
as definitely stuck, and ha_panic() is called. It will not return.
|
||||
|
||||
2) a check is made to verify if the scheduler is still ticking, by reading
|
||||
and setting a variable that only the scheduler can clear when leaving a
|
||||
task. If the scheduler didn't make any progress, ha_stuck_warning() is
|
||||
called to emit a warning about that thread.
|
||||
|
||||
Most of the time there's no panic of course, and a wdt_ping() is performed
|
||||
before leaving the handler to reprogram a check for that thread.
|
||||
|
||||
2. The debug handler
|
||||
--------------------
|
||||
|
||||
Both ha_panic() and ha_stuck_warning() are quite similar. In both cases, they
|
||||
will first verify that no panic is in progress and just return if so. This is
|
||||
verified using mark_tained() which atomically sets a tainted bit and returns
|
||||
the previous value. ha_panic() sets TAINTED_PANIC while ha_stuck_warning() will
|
||||
set TAINTED_WARN_BLOCKED_TRAFFIC.
|
||||
|
||||
ha_panic() uses the current thread's trash buffer to produce the messages, as
|
||||
we don't care about its contents since that thread will never return. However
|
||||
ha_stuck_warning() instead uses a local 8kB buffer in the thread's stack.
|
||||
ha_panic() will call ha_thread_dump_fill() for each thread, to complete the
|
||||
buffer being filled with each thread's dump messages. ha_stuck_warning() only
|
||||
calls ha_thread_dump_one(), which works on the current thread. In both cases
|
||||
the message is then directly sent to fd #2 (stderr) and ha_thread_dump_done()
|
||||
is called to release the dumped thread.
|
||||
|
||||
Both print a few extra messages, but ha_panic() just ends by looping on abort()
|
||||
until the process dies.
|
||||
|
||||
ha_thread_dump_fill() uses a locking mechanism to make sure that each thread is
|
||||
only dumped once at a time. For this it atomically sets is thread_dump_buffer
|
||||
to point to the target buffer. The thread_dump_buffer has 4 possible values:
|
||||
- NULL: no dump in progress
|
||||
- a valid, even, pointer: this is the pointer to the buffer that's currently
|
||||
in the process of being filled by the thread
|
||||
- a valid pointer + 1: this is the pointer of the now filled buffer, that the
|
||||
caller can consume. The atomic |1 at the end marks the end of the dump.
|
||||
- 0x2: this indicates to the dumping function that it is responsible for
|
||||
assigning its own buffer itself (used by the debug_handler to pick one of
|
||||
its own trash buffers during a panic). The idea here is that each thread
|
||||
will keep their own copy of their own dump so that it can be later found in
|
||||
the core file for inspection.
|
||||
|
||||
A copy of the last valid thread_dump_buffer used is kept in last_dump_buffer,
|
||||
for easier post-mortem analysis. This one may be NULL or even invalid, but
|
||||
usually during a panic it will be valid, and may reveal useful hints even if it
|
||||
still contains the dump of the last warning. Usually this will point to a trash
|
||||
buffer or to stack area.
|
||||
|
||||
ha_thread_dump_fill() then either directly calls ha_thread_dump_one() if the
|
||||
target thread is the current thread, or sends the target thread DEBUGSIG
|
||||
(SIGURG) if it's a different thread. This signal is initialized at boot time
|
||||
by init_debug() to call handler debug_handler().
|
||||
|
||||
debug_handler() then operates on the target thread and recognizes that it must
|
||||
allocate its own buffer if the pointer is 0x2, calls ha_thread_dump_one(), then
|
||||
waits forever (it does not return from the signal handler so as to make sure
|
||||
the dumped thread will not badly interact with other ones).
|
||||
|
||||
ha_thread_dump_one() collects some info, that it prints all along into the
|
||||
target buffer. Depending on the situation, it will dump current tasks or not,
|
||||
may mark that Lua is involved and TAINTED_LUA_STUCK, and if running in shared
|
||||
mode, also taint the process with TAINTED_LUA_STUCK_SHARED. It calls
|
||||
ha_dump_backtrace() before returning.
|
||||
|
||||
ha_dump_backtrace() produces a backtrace into a local buffer (100 entries max),
|
||||
then dumps the code bytes nearby the crashing instrution, dumps pointers and
|
||||
tries to resolve function names, and sends all of that into the target buffer.
|
||||
On some architectures (x86_64, arm64), it will also try to detect and decode
|
||||
call instructions and resolve them to called functions.
|
||||
|
||||
3. Improvements
|
||||
---------------
|
||||
|
||||
The symbols resolution is extremely expensive, particularly for the warnings
|
||||
which should be fast. But we need it, it's just unfortunate that it strikes at
|
||||
the wrong moment. At least ha_dump_backtrace() does disable signals while it's
|
||||
resolving, in order to avoid unwanted re-entrance. In addition, the called
|
||||
function resolve_sym_name() uses some locking and refrains from calling the
|
||||
dladdr family of functions in a re-entrant way (in the worst case only well
|
||||
known symbols will be resolved)..
|
||||
|
||||
In an ideal case, ha_dump_backtrace() would dump the pointers to a local array,
|
||||
which would then later be resolved asynchronously in a tasklet. This can work
|
||||
because the code bytes will not change either so the dump can be done at once
|
||||
there.
|
||||
|
||||
However the tasks dumps are not much compatible with this. For example
|
||||
ha_task_dump() makes a number of tests and itself will call hlua_traceback() if
|
||||
needed, so it might still need to be dumped in real time synchronously and
|
||||
buffered. But then it's difficult to reassemble chunks of text between the
|
||||
backtrace (that needs to be resolved later) and the tasks/lua parts. Or maybe
|
||||
we can afford to disable Lua trace dumps in warnings and keep them only for
|
||||
panics (where the asynchronous resolution is not needed) ?
|
||||
|
||||
Also differentiating the call paths for warnings and panics is not something
|
||||
easy either.
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
-----------------------
|
||||
HAProxy Starter Guide
|
||||
-----------------------
|
||||
version 3.4
|
||||
version 3.1
|
||||
|
||||
|
||||
This document is an introduction to HAProxy for all those who don't know it, as
|
||||
|
|
@ -1693,7 +1693,7 @@ A small team of trusted developers will receive it and will be able to propose
|
|||
a fix. We usually don't use embargoes and once a fix is available it gets
|
||||
merged. In some rare circumstances it can happen that a release is coordinated
|
||||
with software vendors. Please note that this process usually messes up with
|
||||
everyone's work, and that rushed up releases can sometimes introduce new bugs,
|
||||
eveyone's work, and that rushed up releases can sometimes introduce new bugs,
|
||||
so it's best avoided unless strictly necessary; as such, there is often little
|
||||
consideration for reports that needlessly cause such extra burden, and the best
|
||||
way to see your work credited usually is to provide a working fix, which will
|
||||
|
|
|
|||
|
|
@ -348,20 +348,8 @@ Core class
|
|||
end
|
||||
..
|
||||
|
||||
.. js:function:: core.get_patref(name)
|
||||
|
||||
**context**: init, task, action, sample-fetch, converter
|
||||
|
||||
Find the pattern object *name* used by HAProxy. It corresponds to the
|
||||
generic pattern reference used to handle both ACL ands Maps.
|
||||
|
||||
:param string name: reference name
|
||||
:returns: A :ref:`patref_class` object.
|
||||
|
||||
.. js:function:: core.add_acl(name, key)
|
||||
|
||||
**LEGACY**
|
||||
|
||||
**context**: init, task, action, sample-fetch, converter
|
||||
|
||||
Add the ACL *key* in the ACLs list referenced by *name*.
|
||||
|
|
@ -369,14 +357,8 @@ Core class
|
|||
:param string name: the name that reference the ACL entries.
|
||||
:param string key: the key which will be added.
|
||||
|
||||
.. Note::
|
||||
This function is not optimal due to systematic Map reference lookup.
|
||||
It is recommended to use :js:func:`Patref.add()` instead.
|
||||
|
||||
.. js:function:: core.del_acl(name, key)
|
||||
|
||||
**LEGACY**
|
||||
|
||||
**context**: init, task, action, sample-fetch, converter
|
||||
|
||||
Delete the ACL entry referenced by the key *key* in the list of ACLs
|
||||
|
|
@ -385,14 +367,8 @@ Core class
|
|||
:param string name: the name that reference the ACL entries.
|
||||
:param string key: the key which will be deleted.
|
||||
|
||||
.. Note::
|
||||
This function is not optimal due to systematic Map reference lookup.
|
||||
It is recommended to use :js:func:`Patref.del()` instead.
|
||||
|
||||
.. js:function:: core.del_map(name, key)
|
||||
|
||||
**LEGACY**
|
||||
|
||||
**context**: init, task, action, sample-fetch, converter
|
||||
|
||||
Delete the map entry indexed with the specified key in the list of maps
|
||||
|
|
@ -401,10 +377,6 @@ Core class
|
|||
:param string name: the name that reference the map entries.
|
||||
:param string key: the key which will be deleted.
|
||||
|
||||
.. Note::
|
||||
This function is not optimal due to systematic Map reference lookup.
|
||||
It is recommended to use :js:func:`Patref.del()` instead.
|
||||
|
||||
.. js:function:: core.get_info()
|
||||
|
||||
**context**: body, init, task, action, sample-fetch, converter
|
||||
|
|
@ -513,7 +485,7 @@ Core class
|
|||
|
||||
.. js:function:: core.msleep(milliseconds)
|
||||
|
||||
**context**: task, action
|
||||
**context**: body, init, task, action
|
||||
|
||||
The `core.msleep()` stops the Lua execution between specified milliseconds.
|
||||
|
||||
|
|
@ -858,8 +830,6 @@ Core class
|
|||
|
||||
.. js:function:: core.set_map(name, key, value)
|
||||
|
||||
**LEGACY**
|
||||
|
||||
**context**: init, task, action, sample-fetch, converter
|
||||
|
||||
Set the value *value* associated to the key *key* in the map referenced by
|
||||
|
|
@ -869,13 +839,9 @@ Core class
|
|||
:param string key: the key to set or replace
|
||||
:param string value: the associated value
|
||||
|
||||
.. Note::
|
||||
This function is not optimal due to systematic Map reference lookup.
|
||||
It is recommended to use :js:func:`Patref.set()` instead.
|
||||
|
||||
.. js:function:: core.sleep(int seconds)
|
||||
|
||||
**context**: task, action
|
||||
**context**: body, init, task, action
|
||||
|
||||
The `core.sleep()` functions stop the Lua execution between specified seconds.
|
||||
|
||||
|
|
@ -893,9 +859,7 @@ Core class
|
|||
|
||||
**context**: init, task, action
|
||||
|
||||
This function returns a new object of a *httpclient* class. An *httpclient*
|
||||
object must be used to process one and only one request. It must never be
|
||||
reused to process several requests.
|
||||
This function returns a new object of a *httpclient* class.
|
||||
|
||||
:returns: A :ref:`httpclient_class` object.
|
||||
|
||||
|
|
@ -928,25 +892,12 @@ Core class
|
|||
its work and wants to give back the control to HAProxy without executing the
|
||||
remaining code. It can be seen as a multi-level "return".
|
||||
|
||||
.. js:function:: core.wait([milliseconds])
|
||||
|
||||
**context**: task, action
|
||||
|
||||
Give back the hand at the HAProxy scheduler. Unlike :js:func:`core.yield`
|
||||
the task will not be woken up automatically to resume as fast as possible.
|
||||
Instead, it will wait for an event to wake the task. If milliseconds argument
|
||||
is provided then the Lua execution will be automatically resumed passed this
|
||||
delay even if no event caused the task to wake itself up.
|
||||
|
||||
:param integer milliseconds: automatic wakeup passed this delay. (optional)
|
||||
|
||||
.. js:function:: core.yield()
|
||||
|
||||
**context**: task, action
|
||||
**context**: task, action, sample-fetch, converter
|
||||
|
||||
Give back the hand at the HAProxy scheduler. It is used when the LUA
|
||||
processing consumes a lot of processing time. Lua execution will be resumed
|
||||
automatically (automatic reschedule).
|
||||
processing consumes a lot of processing time.
|
||||
|
||||
.. js:function:: core.parse_addr(address)
|
||||
|
||||
|
|
@ -1089,13 +1040,18 @@ Core class
|
|||
perform the heavy job in a dedicated task and allow remaining events to be
|
||||
processed more quickly.
|
||||
|
||||
.. js:function:: core.use_native_mailers_config()
|
||||
.. js:function:: core.disable_legacy_mailers()
|
||||
|
||||
**context**: body
|
||||
**LEGACY**
|
||||
|
||||
Inform haproxy that the script will make use of the native "mailers"
|
||||
config section (although legacy). In other words, inform haproxy that
|
||||
:js:func:`Proxy.get_mailers()` will be used later in the program.
|
||||
**context**: body, init
|
||||
|
||||
Disable the sending of email alerts through the legacy email sending
|
||||
function when mailers are used in the configuration.
|
||||
|
||||
Use this when sending email alerts directly from lua.
|
||||
|
||||
:see: :js:func:`Proxy.get_mailers()`
|
||||
|
||||
.. _proxy_class:
|
||||
|
||||
|
|
@ -1224,14 +1180,8 @@ Proxy class
|
|||
|
||||
**LEGACY**
|
||||
|
||||
Returns a table containing legacy mailers config (from haproxy configuration
|
||||
file) for the current proxy or nil if mailers are not available for the proxy.
|
||||
|
||||
.. warning::
|
||||
When relying on :js:func:`Proxy.get_mailers()` to retrieve mailers
|
||||
configuration, :js:func:`core.use_native_mailers_config()` must be called
|
||||
first from body or init context to inform haproxy that Lua makes use of the
|
||||
legacy mailers config.
|
||||
Returns a table containing mailers config for the current proxy or nil
|
||||
if mailers are not available for the proxy.
|
||||
|
||||
:param class_proxy px: A :ref:`proxy_class` which indicates the manipulated
|
||||
proxy.
|
||||
|
|
@ -1248,6 +1198,10 @@ ProxyMailers class
|
|||
|
||||
This class provides mailers config for a given proxy.
|
||||
|
||||
If sending emails directly from lua, please consider
|
||||
:js:func:`core.disable_legacy_mailers()` to disable the email sending from
|
||||
haproxy. (Or email alerts will be sent twice...)
|
||||
|
||||
.. js:attribute:: ProxyMailers.track_server_health
|
||||
|
||||
Boolean set to true if the option "log-health-checks" is configured on
|
||||
|
|
@ -1881,17 +1835,6 @@ Queue class
|
|||
|
||||
Use :js:func:`core.queue` to get a new Queue object.
|
||||
|
||||
.. js:function:: Queue.alarm()
|
||||
|
||||
**context**: task, action, service
|
||||
|
||||
Sets a wakeup alarm on the current Lua context so that when new data
|
||||
becomes available on the Queue, the current Lua context is woken up
|
||||
automatically. It can be combined with :js:func:`core.wait` to wait
|
||||
for Queue events.
|
||||
|
||||
:param class_queue queue: A :ref:`queue_class` to the current queue
|
||||
|
||||
.. js:function:: Queue.size(queue)
|
||||
|
||||
This function returns the number of items within the Queue.
|
||||
|
|
@ -2580,9 +2523,7 @@ HTTPClient class
|
|||
.. js:class:: HTTPClient
|
||||
|
||||
The httpclient class allows issue of outbound HTTP requests through a simple
|
||||
API without the knowledge of HAProxy internals. Any instance must be used to
|
||||
process one and only one request. It must never be reused to process several
|
||||
requests.
|
||||
API without the knowledge of HAProxy internals.
|
||||
|
||||
.. js:function:: HTTPClient.get(httpclient, request)
|
||||
.. js:function:: HTTPClient.head(httpclient, request)
|
||||
|
|
@ -3471,178 +3412,6 @@ Map class
|
|||
:param string str: Is the string used as key.
|
||||
:returns: a string containing the result or empty string if no match.
|
||||
|
||||
.. _patref_class:
|
||||
|
||||
Patref class
|
||||
=================
|
||||
|
||||
.. js:class:: Patref
|
||||
|
||||
Patref object corresponds to the internal HAProxy pat_ref element which
|
||||
is used to store ACL and MAP elements. It is identified by its name
|
||||
(reference) which often is a filename, unless it is prefixed by 'virt@'
|
||||
for virtual references or 'opt@' for references that don't necessarily
|
||||
point to real file. From Lua, :ref:`patref_class` object may be used to
|
||||
directly manipulate existing pattern reference storage. For convenience,
|
||||
Patref objects may be directly accessed and listed as a table thanks to
|
||||
index and pairs metamethods. Note however that for the index metamethod,
|
||||
in case of duplicated entries, only the first matching entry is returned.
|
||||
|
||||
.. Warning::
|
||||
Not meant to be shared between multiple contexts. If multiple contexts
|
||||
need to work on the same pattern reference, each context should have
|
||||
its own patref object.
|
||||
|
||||
Patref object is obtained using the :js:func:`core.get_patref()`
|
||||
function
|
||||
|
||||
.. js:function:: Patref.get_name(ref)
|
||||
|
||||
:returns: the name of the pattern reference object.
|
||||
|
||||
.. js:function:: Patref.is_map(ref)
|
||||
|
||||
:returns: true if the pattern reference is used to handle maps instead
|
||||
of acl, false otherwise.
|
||||
|
||||
.. js:function:: Patref.purge(ref)
|
||||
|
||||
Completely prune all pattern reference entries pointed to by Patref object.
|
||||
This special operation doesn't require committing.
|
||||
|
||||
.. js:function:: Patref.prepare(ref)
|
||||
|
||||
Create a new empty version for Patref Object. It can be used to manipulate
|
||||
the Patref object with update methods without applying the updates until the
|
||||
commit() method is called.
|
||||
|
||||
.. js:function:: Patref.commit(ref)
|
||||
|
||||
Tries to commit pending Patref object updates, that is updates made to the
|
||||
local object will be committed to the underlying pattern reference storage
|
||||
in an atomic manner upon success. Upon failure, local pending updates are
|
||||
lost. Upon success, all other pending updates on the pattern reference
|
||||
(e.g.: "prepare" from the cli or from other Patref Lua objects) started
|
||||
before the new one will be pruned.
|
||||
|
||||
:returns: true on success and nil on failure (followed by an error message).
|
||||
|
||||
See :js:func:`Patref.prepare()` and :js:func:`Patref.giveup()`
|
||||
|
||||
.. js:function:: Patref.giveup(ref)
|
||||
|
||||
Drop the pending patref version created using Patref:prepare(): get back to
|
||||
live dataset.
|
||||
|
||||
.. js:function:: Patref.add(ref, key[, value])
|
||||
|
||||
Add a new key to the pattern reference, with associated value for maps.
|
||||
|
||||
:param string key: the string used as a key
|
||||
:param string value: the string used as value to be associated with the key
|
||||
(only relevant for maps)
|
||||
:returns: true on success and nil on failure (followed by an error message).
|
||||
|
||||
.. Note::
|
||||
Affects the live pattern reference version, unless :js:func:`Patref.prepare()`
|
||||
was called and is still ongoing (waiting for commit or giveup)
|
||||
|
||||
.. js:function:: patref.add_bulk(ref, table)
|
||||
|
||||
Adds multiple entries at once to the Pattern reference. It is recommended
|
||||
to use this one over :js:func:`Patref.prepare()` to add a lot of entries
|
||||
at once because this one is more efficient.
|
||||
|
||||
:param table table: For ACL, a table of keys strings: t[0] = "key1",
|
||||
t[1] = "key2"...
|
||||
|
||||
For Maps, a table of key:value string pairs: t["key"] = "value"
|
||||
:returns: true on success and nil on failure (followed by an error message).
|
||||
|
||||
.. Note::
|
||||
Affects the live pattern reference version, unless :js:func:`Patref.prepare()`
|
||||
was called and is still pending (waiting for commit or giveup)
|
||||
|
||||
.. js:function:: Patref.del(ref, key)
|
||||
|
||||
Delete all entries matching the input key in the pattern reference. In
|
||||
case of duplicate keys, all keys are removed.
|
||||
|
||||
:param string key: the string used as a key
|
||||
:returns: true on success and false on failure.
|
||||
|
||||
.. Note::
|
||||
Affects the live pattern reference version, unless :js:func:`Patref.prepare()`
|
||||
was called and is still ongoing (waiting for commit or giveup)
|
||||
|
||||
.. js:function:: Patref.set(ref, key, value[, force])
|
||||
|
||||
Only relevant for maps. Set existing entries matching key to the provided
|
||||
value. In case of duplicate keys, all matching keys will be set to the new
|
||||
value.
|
||||
|
||||
:param string key: the string used as a key
|
||||
:param string value: the string used as value
|
||||
:param boolean force: create the entry if it doesn't exist (optional,
|
||||
defaults to false)
|
||||
:returns: true on success and nil on failure (followed by an error message)
|
||||
|
||||
.. Note::
|
||||
Affects the live pattern reference version, unless :js:func:`Patref.prepare()`
|
||||
was called and is still ongoing (waiting for commit or giveup)
|
||||
|
||||
.. js:function:: Patref.event_sub(ref, event_types, func)
|
||||
|
||||
Register a function that will be called on specific PAT_REF events.
|
||||
See :js:func:`core.event_sub()` for generalities. Please note however that
|
||||
for performance reasons pattern reference events can only be subscribed
|
||||
per pattern reference (not globally). What this means is that the provided
|
||||
callback function will only be called for events affecting the pattern
|
||||
reference pointed by the Patref object (ref) passed as parameter.
|
||||
|
||||
If you want to be notified for events on a given set of pattern references, it
|
||||
is still possible to perform as many per-patref subscriptions as needed.
|
||||
|
||||
Also, for PAT_REF events, no event data is provided (known as "event_data" in
|
||||
callback function's prototype from :js:func:`core.event_sub()`)
|
||||
|
||||
The list of the available event types for the PAT_REF family are:
|
||||
|
||||
* **PAT_REF_ADD**: element was added to the current version of the pattern
|
||||
reference
|
||||
* **PAT_REF_DEL**: element was deleted from the current version of the
|
||||
pattern reference
|
||||
* **PAT_REF_SET**: element was modified in the current version of the
|
||||
pattern reference
|
||||
* **PAT_REF_CLEAR**: all elements were cleared from the current version of
|
||||
the pattern reference
|
||||
* **PAT_REF_COMMIT**: pending element(s) was/were committed in the current
|
||||
version of the pattern reference
|
||||
|
||||
.. Note::
|
||||
Use **PAT_REF** in **event_types** to subscribe to all pattern reference
|
||||
events types at once.
|
||||
|
||||
Here is a working example showing how to trigger a callback function for the
|
||||
pattern reference associated to file "test.map":
|
||||
|
||||
.. code-block:: lua
|
||||
|
||||
core.register_init(function()
|
||||
-- We assume that "test.map" is a map file referenced in haproxy config
|
||||
-- file, thus it is loaded during config parsing and is expected to be
|
||||
-- available at init Lua stage. Indeed, the below code wouldn't work if
|
||||
-- used directly within body context, as at that time the config is not
|
||||
-- fully parsed.
|
||||
local map_patref = core.get_patref("test.map")
|
||||
map_patref:event_sub({"PAT_REF_ADD"}, function(event, data, sub)
|
||||
-- in the patref event handler
|
||||
print("entry added!")
|
||||
end)
|
||||
end)
|
||||
|
||||
..
|
||||
|
||||
.. _applethttp_class:
|
||||
|
||||
AppletHTTP class
|
||||
|
|
@ -3911,31 +3680,16 @@ AppletTCP class
|
|||
:param class_AppletTCP applet: An :ref:`applettcp_class`
|
||||
:returns: a string. The string can be empty if we reach the end of the stream.
|
||||
|
||||
.. js:function:: AppletTCP.receive(applet, [size, [timeout]])
|
||||
.. js:function:: AppletTCP.receive(applet, [size])
|
||||
|
||||
Reads data from the TCP stream, according to the specified read *size*. If the
|
||||
*size* is missing, the function tries to read all the content of the stream
|
||||
until the end. An optional timeout may be specified in milliseconds. In this
|
||||
case the function will return no longer than this delay, with the amount of
|
||||
available data, or nil if there is no data. An empty string is returned if the
|
||||
connection is closed.
|
||||
until the end.
|
||||
|
||||
:param class_AppletTCP applet: An :ref:`applettcp_class`
|
||||
:param integer size: the required read size.
|
||||
:returns: return nil if the timeout has expired and no data was available but
|
||||
can still be received. Otherwise, a string is returned, possibly an empty
|
||||
string if the connection is closed.
|
||||
|
||||
.. js:function:: AppletTCP.try_receive(applet)
|
||||
|
||||
Reads available data from the TCP stream and returns immediately. Returns a
|
||||
string containing read bytes or nil if no bytes are available at that time. An
|
||||
empty string is returned if the connection is closed.
|
||||
|
||||
:param class_AppletTCP applet: An :ref:`applettcp_class`
|
||||
:returns: return nil if no data was available but can still be
|
||||
received. Otherwise, a string is returned, possibly an empty string if the
|
||||
connection is closed.
|
||||
:returns: always return a string, the string can be empty if the connection is
|
||||
closed.
|
||||
|
||||
.. js:function:: AppletTCP.send(appletmsg)
|
||||
|
||||
|
|
@ -4612,27 +4366,6 @@ HTTPMessage class
|
|||
data by default.
|
||||
:returns: an integer containing the amount of bytes copied or -1.
|
||||
|
||||
.. js:function:: HTTPMessage.set_body_len(http_msg, length)
|
||||
|
||||
This function changes the expected payload length of the HTTP message
|
||||
**http_msg**. **length** can be an integer value. In that case, a
|
||||
"Content-Length" header is added with the given value. It is also possible to
|
||||
pass the **"chunked"** string instead of an integer value to force the HTTP
|
||||
message to be chunk-encoded. In that case, a "Transfer-Encoding" header is
|
||||
added with the "chunked" value. In both cases, all existing "Content-Length"
|
||||
and "Transfer-Encoding" headers are removed.
|
||||
|
||||
This function should be used in the filter context to be able to alter the
|
||||
payload of the HTTP message. The internal state of the HTTP message is updated
|
||||
accordingly. :js:func:`HTTPMessage.add_header()` or
|
||||
:js:func:`HTTPMessage.set_header()` functions must be used in that case.
|
||||
|
||||
:param class_httpmessage http_msg: The manipulated HTTP message.
|
||||
:param type length: The new payload length to set. It can be an integer or
|
||||
the string "chunked".
|
||||
:returns: true if the payload length was successfully updated, false
|
||||
otherwise.
|
||||
|
||||
.. js:function:: HTTPMessage.set_eom(http_msg)
|
||||
|
||||
This function set the end of message for the HTTP message **http_msg**.
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -28,9 +28,7 @@ Revision history
|
|||
string encoding. With contributions from Andriy Palamarchuk
|
||||
(Amazon.com).
|
||||
2020/03/05 - added the unique ID TLV type (Tim Düsterhus)
|
||||
2025/09/09 - added SSL-related TLVs for key exchange group and signature
|
||||
scheme (Steven Collison)
|
||||
2026/01/15 - added SSL client certificate TLV (Simon Ser)
|
||||
|
||||
|
||||
1. Background
|
||||
|
||||
|
|
@ -537,21 +535,18 @@ the information they choose to publish.
|
|||
|
||||
The following types have already been registered for the <type> field :
|
||||
|
||||
#define PP2_TYPE_ALPN 0x01
|
||||
#define PP2_TYPE_AUTHORITY 0x02
|
||||
#define PP2_TYPE_CRC32C 0x03
|
||||
#define PP2_TYPE_NOOP 0x04
|
||||
#define PP2_TYPE_UNIQUE_ID 0x05
|
||||
#define PP2_TYPE_SSL 0x20
|
||||
#define PP2_SUBTYPE_SSL_VERSION 0x21
|
||||
#define PP2_SUBTYPE_SSL_CN 0x22
|
||||
#define PP2_SUBTYPE_SSL_CIPHER 0x23
|
||||
#define PP2_SUBTYPE_SSL_SIG_ALG 0x24
|
||||
#define PP2_SUBTYPE_SSL_KEY_ALG 0x25
|
||||
#define PP2_SUBTYPE_SSL_GROUP 0x26
|
||||
#define PP2_SUBTYPE_SSL_SIG_SCHEME 0x27
|
||||
#define PP2_SUBTYPE_SSL_CLIENT_CERT 0x28
|
||||
#define PP2_TYPE_NETNS 0x30
|
||||
#define PP2_TYPE_ALPN 0x01
|
||||
#define PP2_TYPE_AUTHORITY 0x02
|
||||
#define PP2_TYPE_CRC32C 0x03
|
||||
#define PP2_TYPE_NOOP 0x04
|
||||
#define PP2_TYPE_UNIQUE_ID 0x05
|
||||
#define PP2_TYPE_SSL 0x20
|
||||
#define PP2_SUBTYPE_SSL_VERSION 0x21
|
||||
#define PP2_SUBTYPE_SSL_CN 0x22
|
||||
#define PP2_SUBTYPE_SSL_CIPHER 0x23
|
||||
#define PP2_SUBTYPE_SSL_SIG_ALG 0x24
|
||||
#define PP2_SUBTYPE_SSL_KEY_ALG 0x25
|
||||
#define PP2_TYPE_NETNS 0x30
|
||||
|
||||
|
||||
2.2.1 PP2_TYPE_ALPN
|
||||
|
|
@ -659,25 +654,13 @@ of the used cipher, for example "ECDHE-RSA-AES128-GCM-SHA256".
|
|||
The second level TLV PP2_SUBTYPE_SSL_SIG_ALG provides the US-ASCII string name
|
||||
of the algorithm used to sign the certificate presented by the frontend when
|
||||
the incoming connection was made over an SSL/TLS transport layer, for example
|
||||
"RSA-SHA256".
|
||||
"SHA256".
|
||||
|
||||
The second level TLV PP2_SUBTYPE_SSL_KEY_ALG provides the US-ASCII string name
|
||||
of the algorithm used to generate the key of the certificate presented by the
|
||||
frontend when the incoming connection was made over an SSL/TLS transport layer,
|
||||
for example "RSA2048".
|
||||
|
||||
The second level TLV PP2_SUBTYPE_SSL_GROUP provides the US-ASCII string name of
|
||||
the key exchange algorithm used for the frontend TLS connection, for example
|
||||
"secp256r1".
|
||||
|
||||
The second level TLV PP2_SUBTYPE_SSL_SIG_SCHEME provides the US-ASCII string
|
||||
name of the algorithm the frontend used to sign the ServerKeyExchange or
|
||||
CertificateVerify message, for example "rsa_pss_rsae_sha256".
|
||||
|
||||
The optional second level TLV PP2_SUBTYPE_SSL_CLIENT_CERT provides the raw
|
||||
X.509 client certificate encoded in ASN.1 DER. The frontend may choose to omit
|
||||
this TLV depending on configuration.
|
||||
|
||||
In all cases, the string representation (in UTF8) of the Common Name field
|
||||
(OID: 2.5.4.3) of the client certificate's Distinguished Name, is appended
|
||||
using the TLV format and the type PP2_SUBTYPE_SSL_CN. E.g. "example.com".
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ vtest installation
|
|||
------------------------
|
||||
|
||||
To use vtest you will have to download and compile the recent vtest
|
||||
sources found at https://github.com/vtest/VTest2.
|
||||
sources found at https://github.com/vtest/VTest.
|
||||
|
||||
To compile vtest:
|
||||
|
||||
|
|
|
|||
|
|
@ -1,14 +0,0 @@
|
|||
global
|
||||
default-path config
|
||||
tune.lua.bool-sample-conversion normal
|
||||
# load all games here
|
||||
lua-load lua/trisdemo.lua
|
||||
|
||||
defaults
|
||||
timeout client 1h
|
||||
|
||||
# map one TCP port to each game
|
||||
.notice 'use "socat TCP-CONNECT:0:7001 STDIO,raw,echo=0" to start playing'
|
||||
frontend trisdemo
|
||||
bind :7001
|
||||
tcp-request content use-service lua.trisdemo
|
||||
|
|
@ -3,7 +3,7 @@
|
|||
-- Provides a pure lua alternative to tcpcheck mailers.
|
||||
--
|
||||
-- To be loaded using "lua-load" from haproxy configuration to handle
|
||||
-- email-alerts directly from lua
|
||||
-- email-alerts directly from lua and disable legacy tcpcheck implementation.
|
||||
|
||||
local SYSLOG_LEVEL = {
|
||||
["EMERG"] = 0,
|
||||
|
|
@ -364,9 +364,9 @@ local function srv_event_add(event, data)
|
|||
mailers_track_server_events(data.reference)
|
||||
end
|
||||
|
||||
-- tell haproxy that we do use the legacy native "mailers" config section
|
||||
-- which allows us to retrieve mailers configuration using Proxy:get_mailers()
|
||||
core.use_native_mailers_config()
|
||||
|
||||
-- disable legacy email-alerts since email-alerts will be sent from lua directly
|
||||
core.disable_legacy_mailers()
|
||||
|
||||
-- event subscriptions are purposely performed in an init function to prevent
|
||||
-- email alerts from being generated too early (when process is starting up)
|
||||
|
|
|
|||
|
|
@ -1,251 +0,0 @@
|
|||
-- Example game of falling pieces for HAProxy CLI/Applet
|
||||
local board_width = 10
|
||||
local board_height = 20
|
||||
local game_name = "Lua Tris Demo"
|
||||
|
||||
-- Shapes with IDs for color mapping
|
||||
local pieces = {
|
||||
{id = 1, shape = {{1,1,1,1}}}, -- I (Cyan)
|
||||
{id = 2, shape = {{1,1},{1,1}}}, -- O (Yellow)
|
||||
{id = 3, shape = {{0,1,0},{1,1,1}}}, -- T (Purple)
|
||||
{id = 4, shape = {{0,1,1},{1,1,0}}}, -- S (Green)
|
||||
{id = 5, shape = {{1,1,0},{0,1,1}}}, -- Z (Red)
|
||||
{id = 6, shape = {{1,0,0},{1,1,1}}}, -- J (Blue)
|
||||
{id = 7, shape = {{0,0,1},{1,1,1}}} -- L (Orange)
|
||||
}
|
||||
|
||||
-- ANSI escape codes
|
||||
local clear_screen = "\27[2J"
|
||||
local cursor_home = "\27[H"
|
||||
local cursor_hide = "\27[?25l"
|
||||
local cursor_show = "\27[?25h"
|
||||
local reset_color = "\27[0m"
|
||||
|
||||
local color_codes = {
|
||||
[1] = "\27[1;36m", -- I: Cyan
|
||||
[2] = "\27[1;37m", -- O: White
|
||||
[3] = "\27[1;35m", -- T: Purple
|
||||
[4] = "\27[1;32m", -- S: Green
|
||||
[5] = "\27[1;31m", -- Z: Red
|
||||
[6] = "\27[1;34m", -- J: Blue
|
||||
[7] = "\27[1;33m" -- L: Yellow
|
||||
}
|
||||
|
||||
local function init_board()
|
||||
local board = {}
|
||||
for y = 1, board_height do
|
||||
board[y] = {}
|
||||
for x = 1, board_width do
|
||||
board[y][x] = 0 -- 0 for empty, piece ID for placed blocks
|
||||
end
|
||||
end
|
||||
return board
|
||||
end
|
||||
|
||||
local function can_place_piece(board, piece, px, py)
|
||||
for y = 1, #piece do
|
||||
for x = 1, #piece[1] do
|
||||
if piece[y][x] == 1 then
|
||||
local board_x = px + x - 1
|
||||
local board_y = py + y - 1
|
||||
if board_x < 1 or board_x > board_width or board_y > board_height or
|
||||
(board_y >= 1 and board[board_y][board_x] ~= 0) then
|
||||
return false
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
return true
|
||||
end
|
||||
|
||||
local function place_piece(board, piece, piece_id, px, py)
|
||||
for y = 1, #piece do
|
||||
for x = 1, #piece[1] do
|
||||
if piece[y][x] == 1 then
|
||||
local board_x = px + x - 1
|
||||
local board_y = py + y - 1
|
||||
if board_y >= 1 and board_y <= board_height then
|
||||
board[board_y][board_x] = piece_id -- Store piece ID for color
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
local function clear_lines(board)
|
||||
local lines_cleared = 0
|
||||
local y = board_height
|
||||
while y >= 1 do
|
||||
local full = true
|
||||
for x = 1, board_width do
|
||||
if board[y][x] == 0 then
|
||||
full = false
|
||||
break
|
||||
end
|
||||
end
|
||||
if full then
|
||||
table.remove(board, y)
|
||||
table.insert(board, 1, {})
|
||||
for x = 1, board_width do
|
||||
board[1][x] = 0
|
||||
end
|
||||
lines_cleared = lines_cleared + 1
|
||||
else
|
||||
y = y - 1
|
||||
end
|
||||
end
|
||||
return lines_cleared
|
||||
end
|
||||
|
||||
local function rotate_piece(piece, piece_id, px, py, board)
|
||||
local new_piece = {}
|
||||
for x = 1, #piece[1] do
|
||||
new_piece[x] = {}
|
||||
for y = 1, #piece do
|
||||
new_piece[x][#piece + 1 - y] = piece[y][x]
|
||||
end
|
||||
end
|
||||
if can_place_piece(board, new_piece, px, py) then
|
||||
return new_piece
|
||||
end
|
||||
return piece
|
||||
end
|
||||
|
||||
function render(applet, board, piece, piece_id, px, py, score)
|
||||
local output = cursor_home
|
||||
output = output .. game_name .. " - Lines: " .. score .. "\r\n"
|
||||
output = output .. "+" .. string.rep("-", board_width * 2) .. "+\r\n"
|
||||
for y = 1, board_height do
|
||||
output = output .. "|"
|
||||
for x = 1, board_width do
|
||||
local char = " "
|
||||
-- Current piece
|
||||
for py_idx = 1, #piece do
|
||||
for px_idx = 1, #piece[1] do
|
||||
if piece[py_idx][px_idx] == 1 then
|
||||
local board_x = px + px_idx - 1
|
||||
local board_y = py + py_idx - 1
|
||||
if board_x == x and board_y == y then
|
||||
char = color_codes[piece_id] .. "[]" .. reset_color
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
-- Placed blocks
|
||||
if board[y][x] ~= 0 then
|
||||
char = color_codes[board[y][x]] .. "[]" .. reset_color
|
||||
end
|
||||
output = output .. char
|
||||
end
|
||||
output = output .. "|\r\n"
|
||||
end
|
||||
output = output .. "+" .. string.rep("-", board_width * 2) .. "+\r\n"
|
||||
output = output .. "Use arrow keys to move, Up to rotate, q to quit"
|
||||
applet:send(output)
|
||||
end
|
||||
|
||||
function handler(applet)
|
||||
local board = init_board()
|
||||
local piece_idx = math.random(#pieces)
|
||||
local current_piece = pieces[piece_idx].shape
|
||||
local piece_id = pieces[piece_idx].id
|
||||
local piece_x = math.floor(board_width / 2) - math.floor(#current_piece[1] / 2)
|
||||
local piece_y = 1
|
||||
local score = 0
|
||||
local game_over = false
|
||||
local delay = 500
|
||||
|
||||
if not can_place_piece(board, current_piece, piece_x, piece_y) then
|
||||
game_over = true
|
||||
end
|
||||
|
||||
applet:send(cursor_hide)
|
||||
applet:send(clear_screen)
|
||||
|
||||
-- fall the piece by one line every delay
|
||||
local function fall_piece()
|
||||
while not game_over do
|
||||
piece_y = piece_y + 1
|
||||
if not can_place_piece(board, current_piece, piece_x, piece_y) then
|
||||
piece_y = piece_y - 1
|
||||
place_piece(board, current_piece, piece_id, piece_x, piece_y)
|
||||
score = score + clear_lines(board)
|
||||
piece_idx = math.random(#pieces)
|
||||
current_piece = pieces[piece_idx].shape
|
||||
piece_id = pieces[piece_idx].id
|
||||
piece_x = math.floor(board_width / 2) - math.floor(#current_piece[1] / 2)
|
||||
piece_y = 1
|
||||
if not can_place_piece(board, current_piece, piece_x, piece_y) then
|
||||
game_over = true
|
||||
end
|
||||
end
|
||||
core.msleep(delay)
|
||||
end
|
||||
end
|
||||
|
||||
core.register_task(fall_piece)
|
||||
|
||||
local function drop_piece()
|
||||
while can_place_piece(board, current_piece, piece_x, piece_y) do
|
||||
piece_y = piece_y + 1
|
||||
end
|
||||
piece_y = piece_y - 1
|
||||
place_piece(board, current_piece, piece_id, piece_x, piece_y)
|
||||
score = score + clear_lines(board)
|
||||
piece_idx = math.random(#pieces)
|
||||
current_piece = pieces[piece_idx].shape
|
||||
piece_id = pieces[piece_idx].id
|
||||
piece_x = math.floor(board_width / 2) - math.floor(#current_piece[1] / 2)
|
||||
piece_y = 1
|
||||
if not can_place_piece(board, current_piece, piece_x, piece_y) then
|
||||
game_over = true
|
||||
end
|
||||
render(applet, board, current_piece, piece_id, piece_x, piece_y, score)
|
||||
end
|
||||
|
||||
while not game_over do
|
||||
render(applet, board, current_piece, piece_id, piece_x, piece_y, score)
|
||||
|
||||
-- update the delay based on the score: 500 for 0 lines to 100ms for 100 lines.
|
||||
if score >= 100 then
|
||||
delay = 100
|
||||
else
|
||||
delay = 500 - 4*score
|
||||
end
|
||||
|
||||
local input = applet:receive(1, delay)
|
||||
if input then
|
||||
if input == "" or input == "q" then
|
||||
game_over = true
|
||||
elseif input == "\27" then
|
||||
local a = applet:receive(1, delay)
|
||||
if a == "[" then
|
||||
local b = applet:receive(1, delay)
|
||||
if b == "A" then -- Up arrow (rotate clockwise)
|
||||
current_piece = rotate_piece(current_piece, piece_id, piece_x, piece_y, board)
|
||||
elseif b == "B" then -- Down arrow (full drop)
|
||||
drop_piece()
|
||||
elseif b == "C" then -- Right arrow
|
||||
piece_x = piece_x + 1
|
||||
if not can_place_piece(board, current_piece, piece_x, piece_y) then
|
||||
piece_x = piece_x - 1
|
||||
end
|
||||
elseif b == "D" then -- Left arrow
|
||||
piece_x = piece_x - 1
|
||||
if not can_place_piece(board, current_piece, piece_x, piece_y) then
|
||||
piece_x = piece_x + 1
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
applet:send(clear_screen .. cursor_home .. "Game Over! Lines: " .. score .. "\r\n" .. cursor_show)
|
||||
end
|
||||
|
||||
-- works as a TCP applet
|
||||
core.register_service("trisdemo", "tcp", handler)
|
||||
|
||||
-- may also work on the CLI but requires an unbuffered handler
|
||||
core.register_cli({"trisdemo"}, "Play a simple falling pieces game", handler)
|
||||
|
|
@ -1,21 +0,0 @@
|
|||
# This configuration example enables all trace available and output them on
|
||||
# stderr.
|
||||
# "stream" traces need haproxy to be compiled with -DDEBUG_DEV or -DDEBUG_FULL
|
||||
|
||||
.if version_atleast(3.1-dev8)
|
||||
traces
|
||||
trace all sink stderr level developer
|
||||
|
||||
trace pt verbosity complete start now
|
||||
trace h1 verbosity complete start now
|
||||
trace h2 verbosity complete start now
|
||||
trace fcgi verbosity complete start now
|
||||
trace spop verbosity complete start now
|
||||
trace stream verbosity complete start now
|
||||
trace check verbosity complete start now
|
||||
trace applet verbosity complete start now
|
||||
trace h3 start now
|
||||
trace quic start now
|
||||
trace qmux start now
|
||||
trace peers start now
|
||||
.endif
|
||||
|
|
@ -50,9 +50,6 @@ static inline int acl_pass(enum acl_test_res res)
|
|||
* NULL if not found.
|
||||
*/
|
||||
struct acl *find_acl_by_name(const char *name, struct list *head);
|
||||
struct acl *find_acl_default(const char *acl_name, struct list *known_acl,
|
||||
char **err, struct arg_list *al,
|
||||
const char *file, int line);
|
||||
|
||||
/* Return a pointer to the ACL keyword <kw> within the list starting at <head>,
|
||||
* or NULL if not found. Note that if <kw> contains an opening parenthesis,
|
||||
|
|
@ -104,26 +101,6 @@ struct acl_cond *build_acl_cond(const char *file, int line, struct list *known_a
|
|||
*/
|
||||
enum acl_test_res acl_exec_cond(struct acl_cond *cond, struct proxy *px, struct session *sess, struct stream *strm, unsigned int opt);
|
||||
|
||||
|
||||
/* helper that combines acl_exec_cond() and acl_pass(), and also takes into
|
||||
* account cond->pol in order to return either 1 if the cond should pass and
|
||||
* 0 otherwise
|
||||
* <cond> may be NULL, in which case 1 is returned as the cond cannot fail
|
||||
*/
|
||||
static inline int acl_match_cond(struct acl_cond *cond, struct proxy *px, struct session *sess, struct stream *strm, unsigned int opt)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!cond)
|
||||
return 1;
|
||||
|
||||
ret = acl_pass(acl_exec_cond(cond, px, sess, strm, opt));
|
||||
if (cond->pol == ACL_COND_UNLESS)
|
||||
ret = !ret;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Returns a pointer to the first ACL conflicting with usage at place <where>
|
||||
* which is one of the SMP_VAL_* bits indicating a check place, or NULL if
|
||||
* no conflict is found. Only full conflicts are detected (ACL is not usable).
|
||||
|
|
|
|||
|
|
@ -1,104 +0,0 @@
|
|||
/* SPDX-License-Identifier: LGPL-2.1-or-later */
|
||||
#ifndef _ACME_T_H_
|
||||
#define _ACME_T_H_
|
||||
|
||||
#include <haproxy/istbuf.h>
|
||||
#include <haproxy/openssl-compat.h>
|
||||
|
||||
#define ACME_RETRY 5
|
||||
|
||||
/* acme section configuration */
|
||||
struct acme_cfg {
|
||||
char *filename; /* config filename */
|
||||
int linenum; /* config linenum */
|
||||
char *name; /* section name */
|
||||
int reuse_key; /* do we need to renew the private key */
|
||||
char *directory; /* directory URL */
|
||||
char *map; /* storage for tokens + thumbprint */
|
||||
struct {
|
||||
char *contact; /* email associated to account */
|
||||
char *file; /* account key filename */
|
||||
EVP_PKEY *pkey; /* account PKEY */
|
||||
char *thumbprint; /* account PKEY JWS thumbprint */
|
||||
} account;
|
||||
|
||||
struct {
|
||||
int type; /* EVP_PKEY_EC or EVP_PKEY_RSA */
|
||||
int bits; /* bits for RSA */
|
||||
int curves; /* NID of curves */
|
||||
} key;
|
||||
char *challenge; /* HTTP-01, DNS-01, etc */
|
||||
char *vars; /* variables put in the dpapi sink */
|
||||
char *provider; /* DNS provider put in the dpapi sink */
|
||||
struct acme_cfg *next;
|
||||
};
|
||||
|
||||
enum acme_st {
|
||||
ACME_RESOURCES = 0,
|
||||
ACME_NEWNONCE,
|
||||
ACME_CHKACCOUNT,
|
||||
ACME_NEWACCOUNT,
|
||||
ACME_NEWORDER,
|
||||
ACME_AUTH,
|
||||
ACME_CHALLENGE,
|
||||
ACME_CHKCHALLENGE,
|
||||
ACME_FINALIZE,
|
||||
ACME_CHKORDER,
|
||||
ACME_CERTIFICATE,
|
||||
ACME_END
|
||||
};
|
||||
|
||||
enum http_st {
|
||||
ACME_HTTP_REQ,
|
||||
ACME_HTTP_RES,
|
||||
};
|
||||
|
||||
struct acme_auth {
|
||||
struct ist dns; /* dns entry */
|
||||
struct ist auth; /* auth URI */
|
||||
struct ist chall; /* challenge URI */
|
||||
struct ist token; /* token */
|
||||
int ready; /* is the challenge ready ? */
|
||||
void *next;
|
||||
};
|
||||
|
||||
/* acme task context */
|
||||
struct acme_ctx {
|
||||
enum acme_st state;
|
||||
enum http_st http_state;
|
||||
int retries;
|
||||
int retryafter;
|
||||
struct httpclient *hc;
|
||||
struct acme_cfg *cfg;
|
||||
struct ckch_store *store;
|
||||
struct {
|
||||
struct ist newNonce;
|
||||
struct ist newAccount;
|
||||
struct ist newOrder;
|
||||
} resources;
|
||||
struct ist nonce;
|
||||
struct ist kid;
|
||||
struct ist order;
|
||||
struct acme_auth *auths;
|
||||
struct acme_auth *next_auth;
|
||||
X509_REQ *req;
|
||||
struct ist finalize;
|
||||
struct ist certificate;
|
||||
struct task *task;
|
||||
struct ebmb_node node;
|
||||
char name[VAR_ARRAY];
|
||||
};
|
||||
|
||||
#define ACME_EV_SCHED (1ULL << 0) /* scheduling wakeup */
|
||||
#define ACME_EV_NEW (1ULL << 1) /* new task */
|
||||
#define ACME_EV_TASK (1ULL << 2) /* Task handler */
|
||||
#define ACME_EV_REQ (1ULL << 3) /* HTTP Request */
|
||||
#define ACME_EV_RES (1ULL << 4) /* HTTP Response */
|
||||
|
||||
#define ACME_VERB_CLEAN 1
|
||||
#define ACME_VERB_MINIMAL 2
|
||||
#define ACME_VERB_SIMPLE 3
|
||||
#define ACME_VERB_ADVANCED 4
|
||||
#define ACME_VERB_COMPLETE 5
|
||||
|
||||
#endif
|
||||
|
|
@ -1,12 +0,0 @@
|
|||
/* SPDX-License-Identifier: LGPL-2.1-or-later */
|
||||
#ifndef _ACME_H_
|
||||
#define _ACME_H_
|
||||
|
||||
#include <haproxy/ssl_ckch-t.h>
|
||||
|
||||
int ckch_conf_acme_init(void *value, char *buf, struct ckch_store *s, int cli, const char *filename, int linenum, char **err);
|
||||
EVP_PKEY *acme_gen_tmp_pkey();
|
||||
X509 *acme_gen_tmp_x509();
|
||||
|
||||
|
||||
#endif
|
||||
|
|
@ -66,8 +66,7 @@ enum act_parse_ret {
|
|||
enum act_opt {
|
||||
ACT_OPT_NONE = 0x00000000, /* no flag */
|
||||
ACT_OPT_FINAL = 0x00000001, /* last call, cannot yield */
|
||||
ACT_OPT_FINAL_EARLY = 0x00000002, /* set in addition to ACT_OPT_FINAL if last call occurs earlier than normal due to unexpected IO/error */
|
||||
ACT_OPT_FIRST = 0x00000004, /* first call for this action */
|
||||
ACT_OPT_FIRST = 0x00000002, /* first call for this action */
|
||||
};
|
||||
|
||||
/* Flags used to describe the action. */
|
||||
|
|
|
|||
|
|
@ -42,28 +42,12 @@ enum memprof_method {
|
|||
MEMPROF_METH_MALLOC,
|
||||
MEMPROF_METH_CALLOC,
|
||||
MEMPROF_METH_REALLOC,
|
||||
MEMPROF_METH_STRDUP,
|
||||
MEMPROF_METH_FREE,
|
||||
MEMPROF_METH_P_ALLOC, // pool_alloc()
|
||||
MEMPROF_METH_P_FREE, // pool_free()
|
||||
MEMPROF_METH_STRNDUP, // _POSIX_C_SOURCE >= 200809L || glibc >= 2.10
|
||||
MEMPROF_METH_VALLOC, // _BSD_SOURCE || _XOPEN_SOURCE>=500 || glibc >= 2.12
|
||||
MEMPROF_METH_ALIGNED_ALLOC, // _ISOC11_SOURCE
|
||||
MEMPROF_METH_POSIX_MEMALIGN, // _POSIX_C_SOURCE >= 200112L
|
||||
MEMPROF_METH_MEMALIGN, // obsolete
|
||||
MEMPROF_METH_PVALLOC, // obsolete
|
||||
MEMPROF_METH_METHODS /* count, must be last */
|
||||
};
|
||||
|
||||
/* mask of 1 << method to match those which free. Note that we don't count
|
||||
* p_alloc among them since p_alloc only has an optionally valid free counter
|
||||
* but which is reported by another call in any case since p_alloc itself does
|
||||
* not free.
|
||||
*/
|
||||
#define MEMPROF_FREE_MASK ((1UL << MEMPROF_METH_REALLOC) | \
|
||||
(1UL << MEMPROF_METH_FREE) | \
|
||||
(1UL << MEMPROF_METH_P_FREE))
|
||||
|
||||
/* stats:
|
||||
* - malloc increases alloc
|
||||
* - free increases free (if non null)
|
||||
|
|
@ -76,12 +60,12 @@ struct memprof_stats {
|
|||
const void *caller;
|
||||
enum memprof_method method;
|
||||
/* 4-7 bytes hole here */
|
||||
unsigned long long locked_calls;
|
||||
unsigned long long alloc_calls;
|
||||
unsigned long long free_calls;
|
||||
unsigned long long alloc_tot;
|
||||
unsigned long long free_tot;
|
||||
void *info; // for pools, ptr to the pool
|
||||
void *pad; // pad to 64
|
||||
};
|
||||
#endif
|
||||
|
||||
|
|
@ -125,8 +109,8 @@ struct activity {
|
|||
unsigned int ctr2; // general purposee debug counter
|
||||
#endif
|
||||
char __pad[0]; // unused except to check remaining room
|
||||
char __end[0] THREAD_ALIGNED();
|
||||
} THREAD_ALIGNED();
|
||||
char __end[0] __attribute__((aligned(64))); // align size to 64.
|
||||
};
|
||||
|
||||
/* 256 entries for callers * callees should be highly sufficient (~45 seen usually) */
|
||||
#define SCHED_ACT_HASH_BITS 8
|
||||
|
|
@ -143,10 +127,7 @@ struct sched_activity {
|
|||
uint64_t calls;
|
||||
uint64_t cpu_time;
|
||||
uint64_t lat_time;
|
||||
uint64_t lkw_time; /* lock waiting time */
|
||||
uint64_t lkd_time; /* locked time */
|
||||
uint64_t mem_time; /* memory ops wait time */
|
||||
} THREAD_ALIGNED();
|
||||
};
|
||||
|
||||
#endif /* _HAPROXY_ACTIVITY_T_H */
|
||||
|
||||
|
|
|
|||
|
|
@ -35,12 +35,6 @@ struct sched_activity *sched_activity_entry(struct sched_activity *array, const
|
|||
|
||||
#ifdef USE_MEMORY_PROFILING
|
||||
struct memprof_stats *memprof_get_bin(const void *ra, enum memprof_method meth);
|
||||
void memprof_remove_stale_info(const void *info);
|
||||
#else
|
||||
static inline void memprof_remove_stale_info(const void *info)
|
||||
{
|
||||
/* nothing to do */
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _HAPROXY_ACTIVITY_H */
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@
|
|||
/* flags for appctx->state */
|
||||
|
||||
/* Room for per-command context (mostly CLI commands but not only) */
|
||||
#define APPLET_MAX_SVCCTX 256
|
||||
#define APPLET_MAX_SVCCTX 128
|
||||
|
||||
/* Appctx Flags */
|
||||
#define APPCTX_FL_INBLK_ALLOC 0x00000001
|
||||
|
|
@ -47,7 +47,7 @@
|
|||
#define APPCTX_FL_ERROR 0x00000080
|
||||
#define APPCTX_FL_SHUTDOWN 0x00000100 /* applet was shut down (->release() called if any). No more data exchange with SCs */
|
||||
#define APPCTX_FL_WANT_DIE 0x00000200 /* applet was running and requested to die */
|
||||
/* unused: 0x00000400 */
|
||||
#define APPCTX_FL_INOUT_BUFS 0x00000400 /* applet uses its own buffers */
|
||||
#define APPCTX_FL_FASTFWD 0x00000800 /* zero-copy forwarding is in-use, don't fill the outbuf */
|
||||
#define APPCTX_FL_IN_MAYALLOC 0x00001000 /* applet may try again to allocate its inbuf */
|
||||
#define APPCTX_FL_OUT_MAYALLOC 0x00002000 /* applet may try again to allocate its outbuf */
|
||||
|
|
@ -73,22 +73,17 @@ static forceinline char *appctx_show_flags(char *buf, size_t len, const char *de
|
|||
_(APPCTX_FL_OUTBLK_ALLOC, _(APPCTX_FL_OUTBLK_FULL,
|
||||
_(APPCTX_FL_EOI, _(APPCTX_FL_EOS,
|
||||
_(APPCTX_FL_ERR_PENDING, _(APPCTX_FL_ERROR,
|
||||
_(APPCTX_FL_SHUTDOWN, _(APPCTX_FL_WANT_DIE,
|
||||
_(APPCTX_FL_FASTFWD, _(APPCTX_FL_IN_MAYALLOC, _(APPCTX_FL_OUT_MAYALLOC)))))))))))));
|
||||
_(APPCTX_FL_SHUTDOWN, _(APPCTX_FL_WANT_DIE, _(APPCTX_FL_INOUT_BUFS,
|
||||
_(APPCTX_FL_FASTFWD, _(APPCTX_FL_IN_MAYALLOC, _(APPCTX_FL_OUT_MAYALLOC))))))))))))));
|
||||
/* epilogue */
|
||||
_(~0U);
|
||||
return buf;
|
||||
#undef _
|
||||
}
|
||||
|
||||
#define APPLET_FL_NEW_API 0x00000001 /* Set if the applet is based on the new API (using applet's buffers) */
|
||||
#define APPLET_FL_WARNED 0x00000002 /* Set when warning was already emitted about a legacy applet */
|
||||
#define APPLET_FL_HTX 0x00000004 /* Set if the applet is using HTX buffers */
|
||||
|
||||
/* Applet descriptor */
|
||||
struct applet {
|
||||
enum obj_type obj_type; /* object type = OBJ_TYPE_APPLET */
|
||||
unsigned int flags; /* APPLET_FL_* flags */
|
||||
/* 3 unused bytes here */
|
||||
char *name; /* applet's name to report in logs */
|
||||
int (*init)(struct appctx *); /* callback to init resources, may be NULL.
|
||||
|
|
@ -106,36 +101,29 @@ struct applet {
|
|||
struct appctx {
|
||||
enum obj_type obj_type; /* OBJ_TYPE_APPCTX */
|
||||
/* 3 unused bytes here */
|
||||
unsigned int st0; /* Main applet state. May be used by any applet */
|
||||
unsigned int st1; /* Applet substate. Mau be used by any applet */
|
||||
unsigned int st0; /* CLI state for stats, session state for peers */
|
||||
unsigned int st1; /* prompt/payload (bitwise OR of APPCTX_CLI_ST1_*) for stats, session error for peers */
|
||||
|
||||
unsigned int flags; /* APPCTX_FL_* */
|
||||
struct buffer inbuf;
|
||||
struct buffer outbuf;
|
||||
size_t to_forward;
|
||||
|
||||
struct buffer *chunk; /* used to store unfinished commands */
|
||||
struct applet *applet; /* applet this context refers to */
|
||||
struct session *sess; /* session for frontend applets (NULL for backend applets) */
|
||||
struct sedesc *sedesc; /* stream endpoint descriptor the applet is attached to */
|
||||
|
||||
struct {
|
||||
struct buffer *cmdline; /* used to store unfinished commands */
|
||||
|
||||
int severity_output; /* used within the cli_io_handler to format severity output of informational feedback */
|
||||
int level; /* the level of CLI which can be lowered dynamically */
|
||||
char payload_pat[8]; /* Payload pattern */
|
||||
char *payload; /* Pointer on the payload. NULL if no payload */
|
||||
uint32_t anon_key; /* the key to anonymise with the hash in cli */
|
||||
/* XXX 4 unused bytes here */
|
||||
int (*io_handler)(struct appctx *appctx); /* used within the cli_io_handler when st0 = CLI_ST_CALLBACK */
|
||||
void (*io_release)(struct appctx *appctx); /* used within the cli_io_handler when st0 = CLI_ST_CALLBACK,
|
||||
if the command is terminated or the session released */
|
||||
} cli_ctx; /* context dedicated to the CLI applet */
|
||||
|
||||
struct act_rule *rule; /* rule associated with the applet. */
|
||||
int (*io_handler)(struct appctx *appctx); /* used within the cli_io_handler when st0 = CLI_ST_CALLBACK */
|
||||
void (*io_release)(struct appctx *appctx); /* used within the cli_io_handler when st0 = CLI_ST_CALLBACK,
|
||||
if the command is terminated or the session released */
|
||||
int cli_severity_output; /* used within the cli_io_handler to format severity output of informational feedback */
|
||||
int cli_level; /* the level of CLI which can be lowered dynamically */
|
||||
char cli_payload_pat[8]; /* Payload pattern */
|
||||
uint32_t cli_anon_key; /* the key to anonymise with the hash in cli */
|
||||
struct buffer_wait buffer_wait; /* position in the list of objects waiting for a buffer */
|
||||
struct task *t; /* task associated to the applet */
|
||||
struct freq_ctr call_rate; /* appctx call rate */
|
||||
/* XXX 4 unused bytes here */
|
||||
struct mt_list wait_entry; /* entry in a list of waiters for an event (e.g. ring events) */
|
||||
|
||||
/* The pointer seen by application code is appctx->svcctx. In 2.7 the
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue