Compare commits
5 Commits
master
...
20250217-c
Author | SHA1 | Date | |
---|---|---|---|
|
a69203a3a9 | ||
|
af32dc61f4 | ||
|
5316923f6b | ||
|
5821eb062a | ||
|
6e91a7872e |
4
.github/h2spec.config
vendored
4
.github/h2spec.config
vendored
@ -20,8 +20,8 @@ defaults
|
||||
frontend h2
|
||||
mode http
|
||||
bind 127.0.0.1:8443 ssl crt reg-tests/ssl/common.pem alpn h2,http/1.1
|
||||
default_backend h2b
|
||||
default_backend h2
|
||||
|
||||
backend h2b
|
||||
backend h2
|
||||
errorfile 200 .github/errorfile
|
||||
http-request deny deny_status 200
|
||||
|
80
.github/workflows/aws-lc-fips.yml
vendored
80
.github/workflows/aws-lc-fips.yml
vendored
@ -5,8 +5,82 @@ on:
|
||||
- cron: "0 0 * * 4"
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
test:
|
||||
uses: ./.github/workflows/aws-lc-template.yml
|
||||
with:
|
||||
command: "from matrix import determine_latest_aws_lc_fips; print(determine_latest_aws_lc_fips(''))"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install VTest
|
||||
run: |
|
||||
scripts/build-vtest.sh
|
||||
- name: Determine latest AWS-LC release
|
||||
id: get_aws_lc_release
|
||||
run: |
|
||||
result=$(cd .github && python3 -c "from matrix import determine_latest_aws_lc_fips; print(determine_latest_aws_lc_fips(''))")
|
||||
echo $result
|
||||
echo "result=$result" >> $GITHUB_OUTPUT
|
||||
- name: Cache AWS-LC
|
||||
id: cache_aws_lc
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: '~/opt/'
|
||||
key: ssl-${{ steps.get_aws_lc_release.outputs.result }}-Ubuntu-latest-gcc
|
||||
- name: Install apt dependencies
|
||||
run: |
|
||||
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
|
||||
sudo apt-get --no-install-recommends -y install socat gdb
|
||||
- name: Install AWS-LC
|
||||
if: ${{ steps.cache_ssl.outputs.cache-hit != 'true' }}
|
||||
run: env ${{ steps.get_aws_lc_release.outputs.result }} scripts/build-ssl.sh
|
||||
- name: Compile HAProxy
|
||||
run: |
|
||||
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
|
||||
USE_OPENSSL_AWSLC=1 USE_QUIC=1 \
|
||||
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
|
||||
DEBUG="-DDEBUG_POOL_INTEGRITY" \
|
||||
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
|
||||
sudo make install
|
||||
- name: Show HAProxy version
|
||||
id: show-version
|
||||
run: |
|
||||
ldd $(which haproxy)
|
||||
haproxy -vv
|
||||
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
||||
- name: Install problem matcher for VTest
|
||||
run: echo "::add-matcher::.github/vtest.json"
|
||||
- name: Run VTest for HAProxy
|
||||
id: vtest
|
||||
run: |
|
||||
# This is required for macOS which does not actually allow to increase
|
||||
# the '-n' soft limit to the hard limit, thus failing to run.
|
||||
ulimit -n 65536
|
||||
# allow to catch coredumps
|
||||
ulimit -c unlimited
|
||||
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
||||
- name: Show VTest results
|
||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||
run: |
|
||||
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
|
||||
printf "::group::"
|
||||
cat $folder/INFO
|
||||
cat $folder/LOG
|
||||
echo "::endgroup::"
|
||||
done
|
||||
exit 1
|
||||
- name: Show coredumps
|
||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||
run: |
|
||||
failed=false
|
||||
shopt -s nullglob
|
||||
for file in /tmp/core.*; do
|
||||
failed=true
|
||||
printf "::group::"
|
||||
gdb -ex 'thread apply all bt full' ./haproxy $file
|
||||
echo "::endgroup::"
|
||||
done
|
||||
if [ "$failed" = true ]; then
|
||||
exit 1;
|
||||
fi
|
||||
|
103
.github/workflows/aws-lc-template.yml
vendored
103
.github/workflows/aws-lc-template.yml
vendored
@ -1,103 +0,0 @@
|
||||
name: AWS-LC template
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
command:
|
||||
required: true
|
||||
type: string
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install VTest
|
||||
run: |
|
||||
scripts/build-vtest.sh
|
||||
- name: Determine latest AWS-LC release
|
||||
id: get_aws_lc_release
|
||||
run: |
|
||||
result=$(cd .github && python3 -c "${{ inputs.command }}")
|
||||
echo $result
|
||||
echo "result=$result" >> $GITHUB_OUTPUT
|
||||
- name: Cache AWS-LC
|
||||
id: cache_aws_lc
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: '~/opt/'
|
||||
key: ssl-${{ steps.get_aws_lc_release.outputs.result }}-Ubuntu-latest-gcc
|
||||
- name: Install apt dependencies
|
||||
run: |
|
||||
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
|
||||
sudo apt-get --no-install-recommends -y install socat gdb jose
|
||||
- name: Install AWS-LC
|
||||
if: ${{ steps.cache_ssl.outputs.cache-hit != 'true' }}
|
||||
run: env ${{ steps.get_aws_lc_release.outputs.result }} scripts/build-ssl.sh
|
||||
- name: Compile HAProxy
|
||||
run: |
|
||||
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
|
||||
USE_OPENSSL_AWSLC=1 USE_QUIC=1 \
|
||||
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
|
||||
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
|
||||
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
|
||||
sudo make install
|
||||
- name: Show HAProxy version
|
||||
id: show-version
|
||||
run: |
|
||||
ldd $(which haproxy)
|
||||
haproxy -vv
|
||||
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
||||
- name: Install problem matcher for VTest
|
||||
run: echo "::add-matcher::.github/vtest.json"
|
||||
- name: Run VTest for HAProxy
|
||||
id: vtest
|
||||
run: |
|
||||
# This is required for macOS which does not actually allow to increase
|
||||
# the '-n' soft limit to the hard limit, thus failing to run.
|
||||
ulimit -n 65536
|
||||
# allow to catch coredumps
|
||||
ulimit -c unlimited
|
||||
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
||||
- name: Run Unit tests
|
||||
id: unittests
|
||||
run: |
|
||||
make unit-tests
|
||||
- name: Show VTest results
|
||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||
run: |
|
||||
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
|
||||
printf "::group::"
|
||||
cat $folder/INFO
|
||||
cat $folder/LOG
|
||||
echo "::endgroup::"
|
||||
done
|
||||
exit 1
|
||||
- name: Show coredumps
|
||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||
run: |
|
||||
failed=false
|
||||
shopt -s nullglob
|
||||
for file in /tmp/core.*; do
|
||||
failed=true
|
||||
printf "::group::"
|
||||
gdb -ex 'thread apply all bt full' ./haproxy $file
|
||||
echo "::endgroup::"
|
||||
done
|
||||
if [ "$failed" = true ]; then
|
||||
exit 1;
|
||||
fi
|
||||
- name: Show Unit-Tests results
|
||||
if: ${{ failure() && steps.unittests.outcome == 'failure' }}
|
||||
run: |
|
||||
for result in ${TMPDIR:-/tmp}/ha-unittests-*/results/res.*; do
|
||||
printf "::group::"
|
||||
cat $result
|
||||
echo "::endgroup::"
|
||||
done
|
||||
exit 1
|
||||
|
80
.github/workflows/aws-lc.yml
vendored
80
.github/workflows/aws-lc.yml
vendored
@ -5,8 +5,82 @@ on:
|
||||
- cron: "0 0 * * 4"
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
test:
|
||||
uses: ./.github/workflows/aws-lc-template.yml
|
||||
with:
|
||||
command: "from matrix import determine_latest_aws_lc; print(determine_latest_aws_lc(''))"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install VTest
|
||||
run: |
|
||||
scripts/build-vtest.sh
|
||||
- name: Determine latest AWS-LC release
|
||||
id: get_aws_lc_release
|
||||
run: |
|
||||
result=$(cd .github && python3 -c "from matrix import determine_latest_aws_lc; print(determine_latest_aws_lc(''))")
|
||||
echo $result
|
||||
echo "result=$result" >> $GITHUB_OUTPUT
|
||||
- name: Cache AWS-LC
|
||||
id: cache_aws_lc
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: '~/opt/'
|
||||
key: ssl-${{ steps.get_aws_lc_release.outputs.result }}-Ubuntu-latest-gcc
|
||||
- name: Install apt dependencies
|
||||
run: |
|
||||
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
|
||||
sudo apt-get --no-install-recommends -y install socat gdb
|
||||
- name: Install AWS-LC
|
||||
if: ${{ steps.cache_ssl.outputs.cache-hit != 'true' }}
|
||||
run: env ${{ steps.get_aws_lc_release.outputs.result }} scripts/build-ssl.sh
|
||||
- name: Compile HAProxy
|
||||
run: |
|
||||
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
|
||||
USE_OPENSSL_AWSLC=1 USE_QUIC=1 \
|
||||
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
|
||||
DEBUG="-DDEBUG_POOL_INTEGRITY" \
|
||||
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
|
||||
sudo make install
|
||||
- name: Show HAProxy version
|
||||
id: show-version
|
||||
run: |
|
||||
ldd $(which haproxy)
|
||||
haproxy -vv
|
||||
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
||||
- name: Install problem matcher for VTest
|
||||
run: echo "::add-matcher::.github/vtest.json"
|
||||
- name: Run VTest for HAProxy
|
||||
id: vtest
|
||||
run: |
|
||||
# This is required for macOS which does not actually allow to increase
|
||||
# the '-n' soft limit to the hard limit, thus failing to run.
|
||||
ulimit -n 65536
|
||||
# allow to catch coredumps
|
||||
ulimit -c unlimited
|
||||
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
||||
- name: Show VTest results
|
||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||
run: |
|
||||
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
|
||||
printf "::group::"
|
||||
cat $folder/INFO
|
||||
cat $folder/LOG
|
||||
echo "::endgroup::"
|
||||
done
|
||||
exit 1
|
||||
- name: Show coredumps
|
||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||
run: |
|
||||
failed=false
|
||||
shopt -s nullglob
|
||||
for file in /tmp/core.*; do
|
||||
failed=true
|
||||
printf "::group::"
|
||||
gdb -ex 'thread apply all bt full' ./haproxy $file
|
||||
echo "::endgroup::"
|
||||
done
|
||||
if [ "$failed" = true ]; then
|
||||
exit 1;
|
||||
fi
|
||||
|
5
.github/workflows/codespell.yml
vendored
5
.github/workflows/codespell.yml
vendored
@ -3,7 +3,6 @@ name: Spelling Check
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0 * * 2"
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
@ -11,12 +10,12 @@ permissions:
|
||||
jobs:
|
||||
codespell:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||
if: ${{ github.repository_owner == 'haproxy' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: codespell-project/codespell-problem-matcher@v1.2.0
|
||||
- uses: codespell-project/actions-codespell@master
|
||||
with:
|
||||
skip: CHANGELOG,Makefile,*.fig,*.pem,./doc/design-thoughts,./doc/internals
|
||||
ignore_words_list: pres,ist,ists,hist,wan,ca,cas,que,ans,te,nd,referer,ot,uint,iif,fo,keep-alives,dosen,ifset,thrid,strack,ba,chck,hel,unx,mor,clen,collet,bu,htmp,siz,experim
|
||||
ignore_words_list: ist,ists,hist,wan,ca,cas,que,ans,te,nd,referer,ot,uint,iif,fo,keep-alives,dosen,ifset,thrid,strack,ba,chck,hel,unx,mor,clen,collet,bu,htmp,siz,experim
|
||||
uri_ignore_words_list: trafic,ressources
|
||||
|
15
.github/workflows/compliance.yml
vendored
15
.github/workflows/compliance.yml
vendored
@ -11,8 +11,13 @@ permissions:
|
||||
jobs:
|
||||
h2spec:
|
||||
name: h2spec
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- TARGET: linux-glibc
|
||||
CC: gcc
|
||||
os: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install h2spec
|
||||
@ -23,12 +28,12 @@ jobs:
|
||||
tar xvf h2spec.tar.gz
|
||||
sudo install -m755 h2spec /usr/local/bin/h2spec
|
||||
echo "version=${H2SPEC_VERSION}" >> $GITHUB_OUTPUT
|
||||
- name: Compile HAProxy with gcc
|
||||
- name: Compile HAProxy with ${{ matrix.CC }}
|
||||
run: |
|
||||
make -j$(nproc) all \
|
||||
ERR=1 \
|
||||
TARGET=linux-glibc \
|
||||
CC=gcc \
|
||||
TARGET=${{ matrix.TARGET }} \
|
||||
CC=${{ matrix.CC }} \
|
||||
DEBUG="-DDEBUG_POOL_INTEGRITY" \
|
||||
USE_OPENSSL=1
|
||||
sudo make install
|
||||
|
2
.github/workflows/coverity.yml
vendored
2
.github/workflows/coverity.yml
vendored
@ -15,7 +15,7 @@ permissions:
|
||||
jobs:
|
||||
scan:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||
if: ${{ github.repository_owner == 'haproxy' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install apt dependencies
|
||||
|
2
.github/workflows/cross-zoo.yml
vendored
2
.github/workflows/cross-zoo.yml
vendored
@ -91,7 +91,7 @@ jobs:
|
||||
}
|
||||
]
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||
if: ${{ github.repository_owner == 'haproxy' }}
|
||||
steps:
|
||||
- name: install packages
|
||||
run: |
|
||||
|
10
.github/workflows/fedora-rawhide.yml
vendored
10
.github/workflows/fedora-rawhide.yml
vendored
@ -20,14 +20,14 @@ jobs:
|
||||
]
|
||||
name: ${{ matrix.platform.cc }}.${{ matrix.platform.name }}
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||
if: ${{ github.repository_owner == 'haproxy' }}
|
||||
container:
|
||||
image: fedora:rawhide
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
dnf -y install awk diffutils git pcre-devel zlib-devel pcre2-devel 'perl(FindBin)' perl-IPC-Cmd 'perl(File::Copy)' 'perl(File::Compare)' lua-devel socat findutils systemd-devel clang
|
||||
dnf -y install diffutils git pcre-devel zlib-devel pcre2-devel 'perl(FindBin)' perl-IPC-Cmd 'perl(File::Copy)' 'perl(File::Compare)' lua-devel socat findutils systemd-devel clang
|
||||
dnf -y install 'perl(FindBin)' 'perl(File::Compare)' perl-IPC-Cmd 'perl(File::Copy)' glibc-devel.i686 lua-devel.i686 lua-devel.x86_64 systemd-devel.i686 zlib-ng-compat-devel.i686 pcre-devel.i686 libatomic.i686
|
||||
- name: Install VTest
|
||||
run: scripts/build-vtest.sh
|
||||
@ -41,7 +41,7 @@ jobs:
|
||||
make dev/hpack/decode dev/hpack/gen-enc dev/hpack/gen-rht
|
||||
- name: Compile HAProxy with ${{ matrix.platform.cc }}
|
||||
run: |
|
||||
make -j3 CC=${{ matrix.platform.cc }} V=1 ERR=1 TARGET=linux-glibc DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" USE_OPENSSL=1 USE_QUIC=1 USE_ZLIB=1 USE_PCRE=1 USE_PCRE_JIT=1 USE_LUA=1 ADDLIB="${{ matrix.platform.ADDLIB_ATOMIC }} -Wl,-rpath,${HOME}/opt/lib" SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include ARCH_FLAGS="${{ matrix.platform.ARCH_FLAGS }}"
|
||||
make -j3 CC=${{ matrix.platform.cc }} V=1 ERR=1 TARGET=linux-glibc USE_OPENSSL=1 USE_QUIC=1 USE_ZLIB=1 USE_PCRE=1 USE_PCRE_JIT=1 USE_LUA=1 ADDLIB="${{ matrix.platform.ADDLIB_ATOMIC }} -Wl,-rpath,${HOME}/opt/lib" SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include ARCH_FLAGS="${{ matrix.platform.ARCH_FLAGS }}"
|
||||
make install
|
||||
- name: Show HAProxy version
|
||||
id: show-version
|
||||
@ -64,7 +64,3 @@ jobs:
|
||||
cat $folder/LOG
|
||||
echo "::endgroup::"
|
||||
done
|
||||
- name: Run Unit tests
|
||||
id: unittests
|
||||
run: |
|
||||
make unit-tests
|
2
.github/workflows/illumos.yml
vendored
2
.github/workflows/illumos.yml
vendored
@ -8,7 +8,7 @@ on:
|
||||
jobs:
|
||||
gcc:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||
if: ${{ github.repository_owner == 'haproxy' }}
|
||||
permissions:
|
||||
contents: read
|
||||
steps:
|
||||
|
18
.github/workflows/musl.yml
vendored
18
.github/workflows/musl.yml
vendored
@ -22,11 +22,11 @@ jobs:
|
||||
echo '/tmp/core/core.%h.%e.%t' > /proc/sys/kernel/core_pattern
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install dependencies
|
||||
run: apk add gcc gdb make tar git python3 libc-dev linux-headers pcre-dev pcre2-dev openssl-dev lua5.3-dev grep socat curl musl-dbg lua5.3-dbg jose
|
||||
run: apk add gcc gdb make tar git python3 libc-dev linux-headers pcre-dev pcre2-dev openssl-dev lua5.3-dev grep socat curl musl-dbg lua5.3-dbg
|
||||
- name: Install VTest
|
||||
run: scripts/build-vtest.sh
|
||||
- name: Build
|
||||
run: make -j$(nproc) TARGET=linux-musl DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" ARCH_FLAGS='-ggdb3' CC=cc V=1 USE_LUA=1 LUA_INC=/usr/include/lua5.3 LUA_LIB=/usr/lib/lua5.3 USE_OPENSSL=1 USE_PCRE2=1 USE_PCRE2_JIT=1 USE_PROMEX=1
|
||||
run: make -j$(nproc) TARGET=linux-musl ARCH_FLAGS='-ggdb3' CC=cc V=1 USE_LUA=1 LUA_INC=/usr/include/lua5.3 LUA_LIB=/usr/lib/lua5.3 USE_OPENSSL=1 USE_PCRE2=1 USE_PCRE2_JIT=1 USE_PROMEX=1
|
||||
- name: Show version
|
||||
run: ./haproxy -vv
|
||||
- name: Show linked libraries
|
||||
@ -37,10 +37,6 @@ jobs:
|
||||
- name: Run VTest
|
||||
id: vtest
|
||||
run: make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
||||
- name: Run Unit tests
|
||||
id: unittests
|
||||
run: |
|
||||
make unit-tests
|
||||
- name: Show coredumps
|
||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||
run: |
|
||||
@ -64,13 +60,3 @@ jobs:
|
||||
cat $folder/LOG
|
||||
echo "::endgroup::"
|
||||
done
|
||||
- name: Show Unit-Tests results
|
||||
if: ${{ failure() && steps.unittests.outcome == 'failure' }}
|
||||
run: |
|
||||
for result in ${TMPDIR:-/tmp}/ha-unittests-*/results/res.*; do
|
||||
printf "::group::"
|
||||
cat $result
|
||||
echo "::endgroup::"
|
||||
done
|
||||
exit 1
|
||||
|
||||
|
2
.github/workflows/netbsd.yml
vendored
2
.github/workflows/netbsd.yml
vendored
@ -8,7 +8,7 @@ on:
|
||||
jobs:
|
||||
gcc:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||
if: ${{ github.repository_owner == 'haproxy' }}
|
||||
permissions:
|
||||
contents: read
|
||||
steps:
|
||||
|
13
.github/workflows/quic-interop-aws-lc.yml
vendored
13
.github/workflows/quic-interop-aws-lc.yml
vendored
@ -13,7 +13,7 @@ on:
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-24.04
|
||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||
if: ${{ github.repository_owner == 'haproxy' }}
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
@ -38,15 +38,6 @@ jobs:
|
||||
SSLLIB: AWS-LC
|
||||
tags: ghcr.io/${{ github.repository }}:aws-lc
|
||||
|
||||
- name: Cleanup registry
|
||||
uses: actions/delete-package-versions@v5
|
||||
with:
|
||||
owner: ${{ github.repository_owner }}
|
||||
package-name: 'haproxy'
|
||||
package-type: container
|
||||
min-versions-to-keep: 1
|
||||
delete-only-untagged-versions: 'true'
|
||||
|
||||
run:
|
||||
needs: build
|
||||
strategy:
|
||||
@ -61,7 +52,7 @@ jobs:
|
||||
|
||||
name: ${{ matrix.suite.client }}
|
||||
runs-on: ubuntu-24.04
|
||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||
if: ${{ github.repository_owner == 'haproxy' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
13
.github/workflows/quic-interop-libressl.yml
vendored
13
.github/workflows/quic-interop-libressl.yml
vendored
@ -13,7 +13,7 @@ on:
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-24.04
|
||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||
if: ${{ github.repository_owner == 'haproxy' }}
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
@ -38,15 +38,6 @@ jobs:
|
||||
SSLLIB: LibreSSL
|
||||
tags: ghcr.io/${{ github.repository }}:libressl
|
||||
|
||||
- name: Cleanup registry
|
||||
uses: actions/delete-package-versions@v5
|
||||
with:
|
||||
owner: ${{ github.repository_owner }}
|
||||
package-name: 'haproxy'
|
||||
package-type: container
|
||||
min-versions-to-keep: 1
|
||||
delete-only-untagged-versions: 'true'
|
||||
|
||||
run:
|
||||
needs: build
|
||||
strategy:
|
||||
@ -59,7 +50,7 @@ jobs:
|
||||
|
||||
name: ${{ matrix.suite.client }}
|
||||
runs-on: ubuntu-24.04
|
||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||
if: ${{ github.repository_owner == 'haproxy' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
83
.github/workflows/quictls.yml
vendored
83
.github/workflows/quictls.yml
vendored
@ -1,83 +0,0 @@
|
||||
#
|
||||
# weekly run against modern QuicTLS branch, i.e. https://github.com/quictls/quictls
|
||||
#
|
||||
|
||||
name: QuicTLS
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0 * * 4"
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install VTest
|
||||
run: |
|
||||
scripts/build-vtest.sh
|
||||
- name: Install apt dependencies
|
||||
run: |
|
||||
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
|
||||
sudo apt-get --no-install-recommends -y install socat gdb
|
||||
- name: Install QuicTLS
|
||||
run: env QUICTLS=yes QUICTLS_URL=https://github.com/quictls/quictls scripts/build-ssl.sh
|
||||
- name: Compile HAProxy
|
||||
run: |
|
||||
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
|
||||
USE_QUIC=1 USE_OPENSSL=1 \
|
||||
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
|
||||
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
|
||||
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/" \
|
||||
ARCH_FLAGS="-ggdb3 -fsanitize=address"
|
||||
sudo make install
|
||||
- name: Show HAProxy version
|
||||
id: show-version
|
||||
run: |
|
||||
ldd $(which haproxy)
|
||||
haproxy -vv
|
||||
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
|
||||
- name: Install problem matcher for VTest
|
||||
run: echo "::add-matcher::.github/vtest.json"
|
||||
- name: Run VTest for HAProxy
|
||||
id: vtest
|
||||
run: |
|
||||
# This is required for macOS which does not actually allow to increase
|
||||
# the '-n' soft limit to the hard limit, thus failing to run.
|
||||
ulimit -n 65536
|
||||
# allow to catch coredumps
|
||||
ulimit -c unlimited
|
||||
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
||||
- name: Show VTest results
|
||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||
run: |
|
||||
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
|
||||
printf "::group::"
|
||||
cat $folder/INFO
|
||||
cat $folder/LOG
|
||||
echo "::endgroup::"
|
||||
done
|
||||
exit 1
|
||||
- name: Run Unit tests
|
||||
id: unittests
|
||||
run: |
|
||||
make unit-tests
|
||||
- name: Show coredumps
|
||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||
run: |
|
||||
failed=false
|
||||
shopt -s nullglob
|
||||
for file in /tmp/core.*; do
|
||||
failed=true
|
||||
printf "::group::"
|
||||
gdb -ex 'thread apply all bt full' ./haproxy $file
|
||||
echo "::endgroup::"
|
||||
done
|
||||
if [ "$failed" = true ]; then
|
||||
exit 1;
|
||||
fi
|
27
.github/workflows/vtest.yml
vendored
27
.github/workflows/vtest.yml
vendored
@ -86,8 +86,7 @@ jobs:
|
||||
${{ contains(matrix.FLAGS, 'USE_PCRE2=1') && 'libpcre2-dev' || '' }} \
|
||||
${{ contains(matrix.ssl, 'BORINGSSL=yes') && 'ninja-build' || '' }} \
|
||||
socat \
|
||||
gdb \
|
||||
jose
|
||||
gdb
|
||||
- name: Install brew dependencies
|
||||
if: ${{ startsWith(matrix.os, 'macos-') }}
|
||||
run: |
|
||||
@ -118,7 +117,7 @@ jobs:
|
||||
ERR=1 \
|
||||
TARGET=${{ matrix.TARGET }} \
|
||||
CC=${{ matrix.CC }} \
|
||||
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
|
||||
DEBUG="-DDEBUG_POOL_INTEGRITY" \
|
||||
${{ join(matrix.FLAGS, ' ') }} \
|
||||
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
|
||||
sudo make install
|
||||
@ -147,6 +146,14 @@ jobs:
|
||||
ulimit -n 65536
|
||||
ulimit -c unlimited
|
||||
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
||||
- name: Config syntax check memleak smoke testing
|
||||
if: ${{ contains(matrix.name, 'ASAN') }}
|
||||
run: |
|
||||
./haproxy -dI -f .github/h2spec.config -c
|
||||
./haproxy -dI -f examples/content-sw-sample.cfg -c
|
||||
./haproxy -dI -f examples/option-http_proxy.cfg -c
|
||||
./haproxy -dI -f examples/quick-test.cfg -c
|
||||
./haproxy -dI -f examples/transparent_proxy.cfg -c
|
||||
- name: Show VTest results
|
||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||
run: |
|
||||
@ -157,19 +164,7 @@ jobs:
|
||||
echo "::endgroup::"
|
||||
done
|
||||
exit 1
|
||||
- name: Run Unit tests
|
||||
id: unittests
|
||||
run: |
|
||||
make unit-tests
|
||||
- name: Show Unit-Tests results
|
||||
if: ${{ failure() && steps.unittests.outcome == 'failure' }}
|
||||
run: |
|
||||
for result in ${TMPDIR:-/tmp}/ha-unittests-*/results/res.*; do
|
||||
printf "::group::"
|
||||
cat $result
|
||||
echo "::endgroup::"
|
||||
done
|
||||
exit 1
|
||||
|
||||
- name: Show coredumps
|
||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||
run: |
|
||||
|
19
.github/workflows/wolfssl.yml
vendored
19
.github/workflows/wolfssl.yml
vendored
@ -11,7 +11,6 @@ permissions:
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install VTest
|
||||
@ -20,7 +19,7 @@ jobs:
|
||||
- name: Install apt dependencies
|
||||
run: |
|
||||
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
|
||||
sudo apt-get --no-install-recommends -y install socat gdb jose
|
||||
sudo apt-get --no-install-recommends -y install socat gdb
|
||||
- name: Install WolfSSL
|
||||
run: env WOLFSSL_VERSION=git-master WOLFSSL_DEBUG=1 scripts/build-ssl.sh
|
||||
- name: Compile HAProxy
|
||||
@ -28,7 +27,7 @@ jobs:
|
||||
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
|
||||
USE_OPENSSL_WOLFSSL=1 USE_QUIC=1 \
|
||||
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
|
||||
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
|
||||
DEBUG="-DDEBUG_POOL_INTEGRITY" \
|
||||
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/" \
|
||||
ARCH_FLAGS="-ggdb3 -fsanitize=address"
|
||||
sudo make install
|
||||
@ -49,10 +48,6 @@ jobs:
|
||||
# allow to catch coredumps
|
||||
ulimit -c unlimited
|
||||
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
|
||||
- name: Run Unit tests
|
||||
id: unittests
|
||||
run: |
|
||||
make unit-tests
|
||||
- name: Show VTest results
|
||||
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
|
||||
run: |
|
||||
@ -77,13 +72,3 @@ jobs:
|
||||
if [ "$failed" = true ]; then
|
||||
exit 1;
|
||||
fi
|
||||
- name: Show Unit-Tests results
|
||||
if: ${{ failure() && steps.unittests.outcome == 'failure' }}
|
||||
run: |
|
||||
for result in ${TMPDIR:-/tmp}/ha-unittests-*/results/res.*; do
|
||||
printf "::group::"
|
||||
cat $result
|
||||
echo "::endgroup::"
|
||||
done
|
||||
exit 1
|
||||
|
||||
|
859
CHANGELOG
859
CHANGELOG
@ -1,865 +1,6 @@
|
||||
ChangeLog :
|
||||
===========
|
||||
|
||||
2025/06/11 : 3.3-dev1
|
||||
- BUILD: tools: properly define ha_dump_backtrace() to avoid a build warning
|
||||
- DOC: config: Fix a typo in 2.7 (Name format for maps and ACLs)
|
||||
- REGTESTS: Do not use REQUIRE_VERSION for HAProxy 2.5+ (5)
|
||||
- REGTESTS: Remove REQUIRE_VERSION=2.3 from all tests
|
||||
- REGTESTS: Remove REQUIRE_VERSION=2.4 from all tests
|
||||
- REGTESTS: Remove tests with REQUIRE_VERSION_BELOW=2.4
|
||||
- REGTESTS: Remove support for REQUIRE_VERSION and REQUIRE_VERSION_BELOW
|
||||
- MINOR: server: group postinit server tasks under _srv_postparse()
|
||||
- MINOR: stats: add stat_col flags
|
||||
- MINOR: stats: add ME_NEW_COMMON() helper
|
||||
- MINOR: proxy: collect per-capability stat in proxy_cond_disable()
|
||||
- MINOR: proxy: add a true list containing all proxies
|
||||
- MINOR: log: only run postcheck_log_backend() checks on backend
|
||||
- MEDIUM: proxy: use global proxy list for REGISTER_POST_PROXY_CHECK() hook
|
||||
- MEDIUM: server: automatically add server to proxy list in new_server()
|
||||
- MEDIUM: server: add and use srv_init() function
|
||||
- BUG/MAJOR: leastconn: Protect tree_elt with the lbprm lock
|
||||
- BUG/MEDIUM: check: Requeue healthchecks on I/O events to handle check timeout
|
||||
- CLEANUP: applet: Update comment for applet_put* functions
|
||||
- DEBUG: check: Add the healthcheck's expiration date in the trace messags
|
||||
- BUG/MINOR: mux-spop: Fix null-pointer deref on SPOP stream allocation failure
|
||||
- CLEANUP: sink: remove useless cleanup in sink_new_from_logger()
|
||||
- MAJOR: counters: add shared counters base infrastructure
|
||||
- MINOR: counters: add shared counters helpers to get and drop shared pointers
|
||||
- MINOR: counters: add common struct and flags to {fe,be}_counters_shared
|
||||
- MEDIUM: counters: manage shared counters using dedicated helpers
|
||||
- CLEANUP: counters: merge some common counters between {fe,be}_counters_shared
|
||||
- MINOR: counters: add local-only internal rates to compute some maxes
|
||||
- MAJOR: counters: dispatch counters over thread groups
|
||||
- BUG/MEDIUM: cli: Properly parse empty lines and avoid crashed
|
||||
- BUG/MINOR: config: emit warning for empty args only in discovery mode
|
||||
- BUG/MINOR: config: fix arg number reported on empty arg warning
|
||||
- BUG/MINOR: quic: Missing SSL session object freeing
|
||||
- MINOR: applet: Add API functions to manipulate input and output buffers
|
||||
- MINOR: applet: Add API functions to get data from the input buffer
|
||||
- CLEANUP: applet: Simplify a bit comments for applet_put* functions
|
||||
- MEDIUM: hlua: Update TCP applet functions to use the new applet API
|
||||
- BUG/MEDIUM: fd: Use the provided tgid in fd_insert() to get tgroup_info
|
||||
- BUG/MINIR: h1: Fix doc of 'accept-unsafe-...-request' about URI parsing
|
||||
|
||||
2025/05/28 : 3.3-dev0
|
||||
- MINOR: version: mention that it's development again
|
||||
|
||||
2025/05/28 : 3.2.0
|
||||
- MINOR: promex: Add agent check status/code/duration metrics
|
||||
- MINOR: ssl: support strict-sni in ssl-default-bind-options
|
||||
- MINOR: ssl: also provide the "tls-tickets" bind option
|
||||
- MINOR: server: define CLI I/O handler for "add server"
|
||||
- MINOR: server: implement "add server help"
|
||||
- MINOR: server: use stress mode for "add server help"
|
||||
- BUG/MEDIUM: server: fix crash after duplicate GUID insertion
|
||||
- BUG/MEDIUM: server: fix potential null-deref after previous fix
|
||||
- MINOR: config: list recently added sections with -dKcfg
|
||||
- BUG/MAJOR: cache: Crash because of wrong cache entry deleted
|
||||
- DOC: configuration: fix the example in crt-store
|
||||
- DOC: config: clarify the wording around single/double quotes
|
||||
- DOC: config: clarify the legacy cookie and header captures
|
||||
- DOC: config: fix alphabetical ordering of layer 7 sample fetch functions
|
||||
- DOC: config: fix alphabetical ordering of layer 6 sample fetch functions
|
||||
- DOC: config: fix alphabetical ordering of layer 5 sample fetch functions
|
||||
- DOC: config: fix alphabetical ordering of layer 4 sample fetch functions
|
||||
- DOC: config: fix alphabetical ordering of internal sample fetch functions
|
||||
- BUG/MINOR: h3: Set HTX flags corresponding to the scheme found in the request
|
||||
- BUG/MEDIUM: h3: Declare absolute URI as normalized when a :authority is found
|
||||
- DOC: config: mention in bytes_in and bytes_out that they're read on input
|
||||
- DOC: config: clarify the basics of ACLs (call point, multi-valued etc)
|
||||
- REGTESTS: Make the script testing conditional set-var compatible with Vtest2
|
||||
- REGTESTS: Explicitly allow failing shell commands in some scripts
|
||||
- MINOR: listeners: Add support for a label on bind line
|
||||
- BUG/MEDIUM: cli/ring: Properly handle shutdown in "show event" I/O handler
|
||||
- BUG/MEDIUM: hlua: Properly detect shudowns for TCP applets based on the new API
|
||||
- BUG/MEDIUM: hlua: Fix getline() for TCP applets to work with applet's buffers
|
||||
- BUG/MEDIUM: hlua: Fix receive API for TCP applets to properly handle shutdowns
|
||||
- CI: vtest: Rely on VTest2 to run regression tests
|
||||
- CI: vtest: Fix the build script to properly work on MaOS
|
||||
- CI: combine AWS-LC and AWS-LC-FIPS by template
|
||||
- BUG/MEDIUM: httpclient: Throw an error if an lua httpclient instance is reused
|
||||
- DOC: hlua: Add a note to warn user about httpclient object reuse
|
||||
- DOC: hlua: fix a few typos in HTTPMessage.set_body_len() documentation
|
||||
- DEV: patchbot: prepare for new version 3.3-dev
|
||||
- MINOR: version: mention that it's 3.2 LTS now.
|
||||
|
||||
2025/05/21 : 3.2-dev17
|
||||
- DOC: configuration: explicit multi-choice on bind shards option
|
||||
- BUG/MINOR: sink: detect and warn when using "send-proxy" options with ring servers
|
||||
- BUG/MEDIUM: peers: also limit the number of incoming updates
|
||||
- MEDIUM: hlua: Add function to change the body length of an HTTP Message
|
||||
- BUG/MEDIUM: stconn: Disable 0-copy forwarding for filters altering the payload
|
||||
- BUG/MINOR: h3: don't insert more than one Host header
|
||||
- BUG/MEDIUM: h1/h2/h3: reject forbidden chars in the Host header field
|
||||
- DOC: config: properly index "table and "stick-table" in their section
|
||||
- DOC: management: change reference to configuration manual
|
||||
- BUILD: debug: mark ha_crash_now() as attribute(noreturn)
|
||||
- IMPORT: slz: avoid multiple shifts on 64-bits
|
||||
- IMPORT: slz: support crc32c for lookup hash on sse4 but only if requested
|
||||
- IMPORT: slz: use a better hash for machines with a fast multiply
|
||||
- IMPORT: slz: fix header used for empty zlib message
|
||||
- IMPORT: slz: silence a build warning on non-x86 non-arm
|
||||
- BUG/MAJOR: leastconn: do not loop forever when facing saturated servers
|
||||
- BUG/MAJOR: queue: properly keep count of the queue length
|
||||
- BUG/MINOR: quic: fix crash on quic_conn alloc failure
|
||||
- BUG/MAJOR: leastconn: never reuse the node after dropping the lock
|
||||
- MINOR: acme: renewal notification over the dpapi sink
|
||||
- CLEANUP: quic: Useless BIO_METHOD initialization
|
||||
- MINOR: quic: Add useful error traces about qc_ssl_sess_init() failures
|
||||
- MINOR: quic: Allow the use of the new OpenSSL 3.5.0 QUIC TLS API (to be completed)
|
||||
- MINOR: quic: implement all remaining callbacks for OpenSSL 3.5 QUIC API
|
||||
- MINOR: quic: OpenSSL 3.5 internal QUIC custom extension for transport parameters reset
|
||||
- MINOR: quic: OpenSSL 3.5 trick to support 0-RTT
|
||||
- DOC: update INSTALL for QUIC with OpenSSL 3.5 usages
|
||||
- DOC: management: update 'acme status'
|
||||
- BUG/MEDIUM: wdt: always ignore the first watchdog wakeup
|
||||
- CLEANUP: wdt: clarify the comments on the common exit path
|
||||
- BUILD: ssl: avoid possible printf format warning in traces
|
||||
- BUILD: acme: fix build issue on 32-bit archs with 64-bit time_t
|
||||
- DOC: management: precise some of the fields of "show servers conn"
|
||||
- BUG/MEDIUM: mux-quic: fix BUG_ON() on rxbuf alloc error
|
||||
- DOC: watchdog: update the doc to reflect the recent changes
|
||||
- BUG/MEDIUM: acme: check if acme domains are configured
|
||||
- BUG/MINOR: acme: fix formatting issue in error and logs
|
||||
- EXAMPLES: lua: avoid screen refresh effect in "trisdemo"
|
||||
- CLEANUP: quic: remove unused cbuf module
|
||||
- MINOR: quic: move function to check stream type in utils
|
||||
- MINOR: quic: refactor handling of streams after MUX release
|
||||
- MINOR: quic: add some missing includes
|
||||
- MINOR: quic: adjust quic_conn-t.h include list
|
||||
- CLEANUP: cfgparse: alphabetically sort the global keywords
|
||||
- MINOR: glitches: add global setting "tune.glitches.kill.cpu-usage"
|
||||
|
||||
2025/05/14 : 3.2-dev16
|
||||
- BUG/MEDIUM: mux-quic: fix crash on invalid fctl frame dereference
|
||||
- DEBUG: pool: permit per-pool UAF configuration
|
||||
- MINOR: acme: add the global option 'acme.scheduler'
|
||||
- DEBUG: pools: add a new integrity mode "backup" to copy the released area
|
||||
- MEDIUM: sock-inet: re-check IPv6 connectivity every 30s
|
||||
- BUG/MINOR: ssl: doesn't fill conf->crt with first arg
|
||||
- BUG/MINOR: ssl: prevent multiple 'crt' on the same ssl-f-use line
|
||||
- BUG/MINOR: ssl/ckch: always free() the previous entry during parsing
|
||||
- MINOR: tools: ha_freearray() frees an array of string
|
||||
- BUG/MINOR: ssl/ckch: always ha_freearray() the previous entry during parsing
|
||||
- MINOR: ssl/ckch: warn when the same keyword was used twice
|
||||
- BUG/MINOR: threads: fix soft-stop without multithreading support
|
||||
- BUG/MINOR: tools: improve parse_line()'s robustness against empty args
|
||||
- BUG/MINOR: cfgparse: improve the empty arg position report's robustness
|
||||
- BUG/MINOR: server: dont depend on proxy for server cleanup in srv_drop()
|
||||
- BUG/MINOR: server: perform lbprm deinit for dynamic servers
|
||||
- MINOR: http: add a function to validate characters of :authority
|
||||
- BUG/MEDIUM: h2/h3: reject some forbidden chars in :authority before reassembly
|
||||
- MINOR: quic: account Tx data per stream
|
||||
- MINOR: mux-quic: account Rx data per stream
|
||||
- MINOR: quic: add stream format for "show quic"
|
||||
- MINOR: quic: display QCS info on "show quic stream"
|
||||
- MINOR: quic: display stream age
|
||||
- BUG/MINOR: cpu-topo: fix group-by-cluster policy for disordered clusters
|
||||
- MINOR: cpu-topo: add a new "group-by-ccx" CPU policy
|
||||
- MINOR: cpu-topo: provide a function to sort clusters by average capacity
|
||||
- MEDIUM: cpu-topo: change "performance" to consider per-core capacity
|
||||
- MEDIUM: cpu-topo: change "efficiency" to consider per-core capacity
|
||||
- MEDIUM: cpu-topo: prefer grouping by CCX for "performance" and "efficiency"
|
||||
- MEDIUM: config: change default limits to 1024 threads and 32 groups
|
||||
- BUG/MINOR: hlua: Fix Channel:data() and Channel:line() to respect documentation
|
||||
- DOC: config: Fix a typo in the "term_events" definition
|
||||
- BUG/MINOR: spoe: Don't report error on applet release if filter is in DONE state
|
||||
- BUG/MINOR: mux-spop: Don't report error for stream if ACK was already received
|
||||
- BUG/MINOR: mux-spop: Make the demux stream ID a signed integer
|
||||
- BUG/MINOR: mux-spop: Don't open new streams for SPOP connection on error
|
||||
- MINOR: mux-spop: Don't set SPOP connection state to FRAME_H after ACK parsing
|
||||
- BUG/MEDIUM: mux-spop: Remove frame parsing states from the SPOP connection state
|
||||
- BUG/MEDIUM: mux-spop: Properly handle CLOSING state
|
||||
- BUG/MEDIUM: spop-conn: Report short read for partial frames payload
|
||||
- BUG/MEDIUM: mux-spop: Properly detect truncated frames on demux to report error
|
||||
- BUG/MEDIUM: mux-spop; Don't report a read error if there are pending data
|
||||
- DEBUG: mux-spop: Review some trace messages to adjust the message or the level
|
||||
- DOC: config: move address formats definition to section 2
|
||||
- DOC: config: move stick-tables and peers to their own section
|
||||
- DOC: config: move the extraneous sections out of the "global" definition
|
||||
- CI: AWS-LC(fips): enable unit tests
|
||||
- CI: AWS-LC: enable unit tests
|
||||
- CI: compliance: limit run on forks only to manual + cleanup
|
||||
- CI: musl: enable unit tests
|
||||
- CI: QuicTLS (weekly): limit run on forks only to manual dispatch
|
||||
- CI: WolfSSL: enable unit tests
|
||||
|
||||
2025/05/09 : 3.2-dev15
|
||||
- BUG/MEDIUM: stktable: fix sc_*(<ctr>) BUG_ON() regression with ctx > 9
|
||||
- BUG/MINOR: acme/cli: don't output error on success
|
||||
- BUG/MINOR: tools: do not create an empty arg from trailing spaces
|
||||
- MEDIUM: config: warn about the consequences of empty arguments on a config line
|
||||
- MINOR: tools: make parse_line() provide hints about empty args
|
||||
- MINOR: cfgparse: visually show the input line on empty args
|
||||
- BUG/MINOR: tools: always terminate empty lines
|
||||
- BUG/MINOR: tools: make parseline report the required space for the trailing 0
|
||||
- DEBUG: threads: don't keep lock label "OTHER" in the per-thread history
|
||||
- DEBUG: threads: merge successive idempotent lock operations in history
|
||||
- DEBUG: threads: display held locks in threads dumps
|
||||
- BUG/MINOR: proxy: only use proxy_inc_fe_cum_sess_ver_ctr() with frontends
|
||||
- Revert "BUG/MEDIUM: mux-spop: Handle CLOSING state and wait for AGENT DISCONNECT frame"
|
||||
- MINOR: acme/cli: 'acme status' show the status acme-configured certificates
|
||||
- MEDIUM: acme/ssl: remove 'acme ps' in favor of 'acme status'
|
||||
- DOC: configuration: add "acme" section to the keywords list
|
||||
- DOC: configuration: add the "crt-store" keyword
|
||||
- BUG/MAJOR: queue: lock around the call to pendconn_process_next_strm()
|
||||
- MINOR: ssl: add filename and linenum for ssl-f-use errors
|
||||
- BUG/MINOR: ssl: can't use crt-store some certificates in ssl-f-use
|
||||
- BUG/MINOR: tools: only fill first empty arg when not out of range
|
||||
- MINOR: debug: bump the dump buffer to 8kB
|
||||
- MINOR: stick-tables: add "ipv4" as an alias for the "ip" type
|
||||
- MINOR: quic: extend return value during TP parsing
|
||||
- BUG/MINOR: quic: use proper error code on missing CID in TPs
|
||||
- BUG/MINOR: quic: use proper error code on invalid server TP
|
||||
- BUG/MINOR: quic: reject retry_source_cid TP on server side
|
||||
- BUG/MINOR: quic: use proper error code on invalid received TP value
|
||||
- BUG/MINOR: quic: fix TP reject on invalid max-ack-delay
|
||||
- BUG/MINOR: quic: reject invalid max_udp_payload size
|
||||
- BUG/MEDIUM: peers: hold the refcnt until updating ts->seen
|
||||
- BUG/MEDIUM: stick-tables: close a tiny race in __stksess_kill()
|
||||
- BUG/MINOR: cli: fix too many args detection for commands
|
||||
- MINOR: server: ensure server postparse tasks are run for dynamic servers
|
||||
- BUG/MEDIUM: stick-table: always remove update before adding a new one
|
||||
- BUG/MEDIUM: quic: free stream_desc on all data acked
|
||||
- BUG/MINOR: cfgparse: consider the special case of empty arg caused by \x00
|
||||
- DOC: config: recommend disabling libc-based resolution with resolvers
|
||||
|
||||
2025/05/02 : 3.2-dev14
|
||||
- MINOR: acme: retry label always do a request
|
||||
- MINOR: acme: does not leave task for next request
|
||||
- BUG/MINOR: acme: reinit the retries only at next request
|
||||
- MINOR: acme: change the default max retries to 5
|
||||
- MINOR: acme: allow a delay after a valid response
|
||||
- MINOR: acme: wait 5s before checking the challenges results
|
||||
- MINOR: acme: emit a log when starting
|
||||
- MINOR: acme: delay of 5s after the finalize
|
||||
- BUG/MEDIUM: quic: Let it be known if the tasklet has been released.
|
||||
- BUG/MAJOR: tasks: fix task accounting when killed
|
||||
- CLEANUP: tasks: use the local state, not t->state, to check for tasklets
|
||||
- DOC: acme: external account binding is not supported
|
||||
- MINOR: hlua: ignore "tune.lua.bool-sample-conversion" if set after "lua-load"
|
||||
- MEDIUM: peers: Give up if we fail to take locks in hot path
|
||||
- MEDIUM: stick-tables: defer adding updates to a tasklet
|
||||
- MEDIUM: stick-tables: Limit the number of old entries we remove
|
||||
- MEDIUM: stick-tables: Limit the number of entries we expire
|
||||
- MINOR: cfgparse-global: add explicit error messages in cfg_parse_global_env_opts
|
||||
- MINOR: ssl: add function to extract X509 notBefore date in time_t
|
||||
- BUILD: acme: need HAVE_ASN1_TIME_TO_TM
|
||||
- MINOR: acme: move the acme task init in a dedicated function
|
||||
- MEDIUM: acme: add a basic scheduler
|
||||
- MINOR: acme: emit a log when the scheduler can't start the task
|
||||
|
||||
2025/04/30 : 3.2-dev13
|
||||
- MEDIUM: checks: Make sure we return the tasklet from srv_chk_io_cb
|
||||
- MEDIUM: listener: Make sure w ereturn the tasklet from accept_queue_process
|
||||
- MEDIUM: mux_fcgi: Make sure we return the tasklet from fcgi_deferred_shut
|
||||
- MEDIUM: quic: Make sure we return the tasklet from qcc_io_cb
|
||||
- MEDIUM: quic: Make sure we return NULL in quic_conn_app_io_cb if needed
|
||||
- MEDIUM: quic: Make sure we return the tasklet from quic_accept_run
|
||||
- BUG/MAJOR: tasklets: Make sure he tasklet can't run twice
|
||||
- BUG/MAJOR: listeners: transfer connection accounting when switching listeners
|
||||
- MINOR: ssl/cli: add a '-t' option to 'show ssl sni'
|
||||
- DOC: config: fix ACME paragraph rendering issue
|
||||
- DOC: config: clarify log-forward "host" option
|
||||
- MINOR: promex: expose ST_I_PX_RATE (current_session_rate)
|
||||
- BUILD: acme: use my_strndup() instead of strndup()
|
||||
- BUILD: leastconn: fix build warning when building without threads on old machines
|
||||
- MINOR: threads: prepare DEBUG_THREAD to receive more values
|
||||
- MINOR: threads: turn the full lock debugging to DEBUG_THREAD=2
|
||||
- MEDIUM: threads: keep history of taken locks with DEBUG_THREAD > 0
|
||||
- MINOR: threads/cli: display the lock history on "show threads"
|
||||
- MEDIUM: thread: set DEBUG_THREAD to 1 by default
|
||||
- BUG/MINOR: ssl/acme: free EVP_PKEY upon error
|
||||
- MINOR: acme: separate the code generating private keys
|
||||
- MINOR: acme: failure when no directory is specified
|
||||
- MEDIUM: acme: generate the account file when not found
|
||||
- MEDIUM: acme: use 'crt-base' to load the account key
|
||||
- MINOR: compiler: add more macros to detect macro definitions
|
||||
- MINOR: cli: split APPCTX_CLI_ST1_PROMPT into two distinct flags
|
||||
- MEDIUM: cli: make the prompt mode configurable between n/i/p
|
||||
- MEDIUM: mcli: make the prompt mode configurable between i/p
|
||||
- MEDIUM: mcli: replicate the current mode when enterin the worker process
|
||||
- DOC: configuration: acme account key are auto generated
|
||||
- CLEANUP: acme: remove old TODO for account key
|
||||
- DOC: configuration: add quic4 to the ssl-f-use example
|
||||
- BUG/MINOR: acme: does not try to unlock after a failed trylock
|
||||
- BUG/MINOR: mux-h2: fix the offset of the pattern for the ping frame
|
||||
- MINOR: tcp: add support for setting TCP_NOTSENT_LOWAT on both sides
|
||||
- BUG/MINOR: acme: creating an account should not end the task
|
||||
- MINOR: quic: rename min/max fields for congestion window algo
|
||||
- MINOR: quic: refactor BBR API
|
||||
- BUG/MINOR: quic: ensure cwnd limits are always enforced
|
||||
- MINOR: thread: define cshared type
|
||||
- MINOR: quic: account for global congestion window
|
||||
- MEDIUM: quic: limit global Tx memory
|
||||
- MEDIUM: acme: use a map to store tokens and thumbprints
|
||||
- BUG/MINOR: acme: remove references to virt@acme
|
||||
- MINOR: applet: add appctx_schedule() macro
|
||||
- BUG/MINOR: dns: add tempo between 2 connection attempts for dns servers
|
||||
- CLEANUP: dns: remove unused dns_stream_server struct member
|
||||
- BUG/MINOR: dns: prevent ds accumulation within dss
|
||||
- CLEANUP: proxy: mention that px->conn_retries isn't relevant in some cases
|
||||
- DOC: ring: refer to newer RFC5424
|
||||
- MINOR: tools: make my_strndup() take a size_t len instead of and int
|
||||
- MINOR: Add "sigalg" to "sigalg name" helper function
|
||||
- MINOR: ssl: Add traces to ssl init/close functions
|
||||
- MINOR: ssl: Add traces to recv/send functions
|
||||
- MINOR: ssl: Add traces to ssl_sock_io_cb function
|
||||
- MINOR: ssl: Add traces around SSL_do_handshake call
|
||||
- MINOR: ssl: Add traces to verify callback
|
||||
- MINOR: ssl: Add ocsp stapling callback traces
|
||||
- MINOR: ssl: Add traces to the switchctx callback
|
||||
- MINOR: ssl: Add traces about sigalg extension parsing in clientHello callback
|
||||
- MINOR: Add 'conn' param to ssl_sock_chose_sni_ctx
|
||||
- BUG/MEDIUM: mux-spop: Wait end of handshake to declare a spop connection ready
|
||||
- BUG/MEDIUM: mux-spop: Handle CLOSING state and wait for AGENT DISCONNECT frame
|
||||
- BUG/MINOR: mux-h1: Don't pretend connection was released for TCP>H1>H2 upgrade
|
||||
- BUG/MINOR: mux-h1: Fix trace message in h1_detroy() to not relay on connection
|
||||
- BUILD: ssl: Fix wolfssl build
|
||||
- BUG/MINOR: mux-spop: Use the right bitwise operator in spop_ctl()
|
||||
- MEDIUM: mux-quic: increase flow-control on each bufsize
|
||||
- MINOR: mux-quic: limit emitted MSD frames count per qcs
|
||||
- MINOR: add hlua_yield_asap() helper
|
||||
- MINOR: hlua_fcn: enforce yield after *_get_stats() methods
|
||||
- DOC: config: restore default values for resolvers hold directive
|
||||
- MINOR: ssl/cli: "acme ps" shows the acme tasks
|
||||
- MINOR: acme: acme_ctx_destroy() returns upon NULL
|
||||
- MINOR: acme: use acme_ctx_destroy() upon error
|
||||
- MEDIUM: tasks: Mutualize code between tasks and tasklets.
|
||||
- MEDIUM: tasks: More code factorization
|
||||
- MEDIUM: tasks: Remove TASK_IN_LIST and use TASK_QUEUED instead.
|
||||
- MINOR: tasks: Remove unused tasklet_remove_from_tasklet_list
|
||||
- MEDIUM: tasks: Mutualize the TASK_KILLED code between tasks and tasklets
|
||||
- BUG/MEDIUM: connections: Report connection closing in conn_create_mux()
|
||||
- BUILD/MEDIUM: quic: Make sure we build with recent changes
|
||||
|
||||
2025/04/25 : 3.2-dev12
|
||||
- BUG/MINOR: quic: do not crash on CRYPTO ncbuf alloc failure
|
||||
- BUG/MINOR: proxy: always detach a proxy from the names tree on free()
|
||||
- CLEANUP: proxy: detach the name node in proxy_free_common() instead
|
||||
- CLEANUP: Slightly reorder some proxy option flags to free slots
|
||||
- MINOR: proxy: Add options to drop HTTP trailers during message forwarding
|
||||
- MINOR: h1-htx: Skip C-L and T-E headers for 1xx and 204 messages during parsing
|
||||
- MINOR: mux-h1: Keep custom "Content-Length: 0" header in 1xx and 204 messages
|
||||
- MINOR: hlua/h1: Use http_parse_cont_len_header() to parse content-length value
|
||||
- CLEANUP: h1: Remove now useless h1_parse_cont_len_header() function
|
||||
- BUG/MEDIUM: mux-spop: Respect the negociated max-frame-size value to send frames
|
||||
- MINOR: http-act: Add 'pause' action to temporarily suspend the message analysis
|
||||
- MINOR: acme/cli: add the 'acme renew' command to the help message
|
||||
- MINOR: httpclient: add an "https" log-format
|
||||
- MEDIUM: acme: use a customized proxy
|
||||
- MEDIUM: acme: rename "uri" into "directory"
|
||||
- MEDIUM: acme: rename "account" into "account-key"
|
||||
- MINOR: stick-table: use a separate lock label for updates
|
||||
- MINOR: h3: simplify h3_rcv_buf return path
|
||||
- BUG/MINOR: mux-quic: fix possible infinite loop during decoding
|
||||
- BUG/MINOR: mux-quic: do not decode if conn in error
|
||||
- BUG/MINOR: cli: Issue an error when too many args are passed for a command
|
||||
- MINOR: cli: Use a full prompt command for bidir connections with workers
|
||||
- MAJOR: cli: Refacor parsing and execution of pipelined commands
|
||||
- MINOR: cli: Rename some CLI applet states to reflect recent refactoring
|
||||
- CLEANUP: applet: Update st0/st1 comment in appctx structure
|
||||
- BUG/MINOR: hlua: Fix I/O handler of lua CLI commands to not rely on the SC
|
||||
- BUG/MINOR: ring: Fix I/O handler of "show event" command to not rely on the SC
|
||||
- MINOR: cli/applet: Move appctx fields only used by the CLI in a private context
|
||||
- MINOR: cache: Add a pointer on the cache in the cache applet context
|
||||
- MINOR: hlua: Use the applet name in error messages for lua services
|
||||
- MINOR: applet: Save the "use-service" rule in the stream to init a service applet
|
||||
- CLEANUP: applet: Remove unsued rule pointer in appctx structure
|
||||
- BUG/MINOR: master/cli: properly trim the '@@' process name in error messages
|
||||
- MEDIUM: resolvers: add global "dns-accept-family" directive
|
||||
- MINOR: resolvers: add command-line argument -4 to force IPv4-only DNS
|
||||
- MINOR: sock-inet: detect apparent IPv6 connectivity
|
||||
- MINOR: resolvers: add "dns-accept-family auto" to rely on detected IPv6
|
||||
- MEDIUM: acme: use Retry-After value for retries
|
||||
- MEDIUM: acme: reset the remaining retries
|
||||
- MEDIUM: acme: better error/retry management of the challenge checks
|
||||
- BUG/MEDIUM: cli: Handle applet shutdown when waiting for a command line
|
||||
- Revert "BUG/MINOR: master/cli: properly trim the '@@' process name in error messages"
|
||||
- BUG/MINOR: master/cli: only parse the '@@' prefix on complete lines
|
||||
- MINOR: resolvers: use the runtime IPv6 status instead of boot time one
|
||||
|
||||
2025/04/18 : 3.2-dev11
|
||||
- CI: enable weekly QuicTLS build
|
||||
- DOC: management: slightly clarify the prefix role of the '@' command
|
||||
- DOC: management: add a paragraph about the limitations of the '@' prefix
|
||||
- MINOR: master/cli: support bidirectional communications with workers
|
||||
- MEDIUM: ssl/ckch: add filename and linenum argument to crt-store parsing
|
||||
- MINOR: acme: add the acme section in the configuration parser
|
||||
- MINOR: acme: add configuration for the crt-store
|
||||
- MINOR: acme: add private key configuration
|
||||
- MINOR: acme/cli: add the 'acme renew' command
|
||||
- MINOR: acme: the acme section is experimental
|
||||
- MINOR: acme: get the ACME directory
|
||||
- MINOR: acme: handle the nonce
|
||||
- MINOR: acme: check if the account exist
|
||||
- MINOR: acme: generate new account
|
||||
- MINOR: acme: newOrder request retrieve authorizations URLs
|
||||
- MINOR: acme: allow empty payload in acme_jws_payload()
|
||||
- MINOR: acme: get the challenges object from the Auth URL
|
||||
- MINOR: acme: send the request for challenge ready
|
||||
- MINOR: acme: implement a check on the challenge status
|
||||
- MINOR: acme: generate the CSR in a X509_REQ
|
||||
- MINOR: acme: finalize by sending the CSR
|
||||
- MINOR: acme: verify the order status once finalized
|
||||
- MINOR: acme: implement retrieval of the certificate
|
||||
- BUG/MINOR: acme: ckch_conf_acme_init() when no filename
|
||||
- MINOR: ssl/ckch: handle ckch_conf in ckchs_dup() and ckch_conf_clean()
|
||||
- MINOR: acme: copy the original ckch_store
|
||||
- MEDIUM: acme: replace the previous ckch instance with new ones
|
||||
- MINOR: acme: schedule retries with a timer
|
||||
- BUILD: acme: enable the ACME feature when JWS is present
|
||||
- BUG/MINOR: cpu-topo: check the correct variable for NULL after malloc()
|
||||
- BUG/MINOR: acme: key not restored upon error in acme_res_certificate()
|
||||
- BUG/MINOR: thread: protect thread_cpus_enabled_at_boot with USE_THREAD
|
||||
- MINOR: acme: default to 2048bits for RSA
|
||||
- DOC: acme: explain how to configure and run ACME
|
||||
- BUG/MINOR: debug: remove the trailing \n from BUG_ON() statements
|
||||
- DOC: config: add the missing "profiling.memory" to the global kw index
|
||||
- DOC: config: add the missing "force-cfg-parser-pause" to the global kw index
|
||||
- DEBUG: init: report invalid characters in debug description strings
|
||||
- DEBUG: rename DEBUG_GLITCHES to DEBUG_COUNTERS and enable it by default
|
||||
- DEBUG: counters: make COUNT_IF() only appear at DEBUG_COUNTERS>=1
|
||||
- DEBUG: counters: add the ability to enable/disable updating the COUNT_IF counters
|
||||
- MINOR: tools: let dump_addr_and_bytes() support dumping before the offset
|
||||
- MINOR: debug: in call traces, dump the 8 bytes before the return address, not after
|
||||
- MINOR: debug: detect call instructions and show the branch target in backtraces
|
||||
- BUG/MINOR: acme: fix possible NULL deref
|
||||
- CLEANUP: acme: stored value is overwritten before it can be used
|
||||
- BUILD: incompatible pointer type suspected with -DDEBUG_UNIT
|
||||
- BUG/MINOR: http-ana: Properly detect client abort when forwarding the response
|
||||
- BUG/MEDIUM: http-ana: Report 502 from req analyzer only during rsp forwarding
|
||||
- CI: fedora rawhide: enable unit tests
|
||||
- DOC: configuration: fix a typo in ACME documentation
|
||||
- MEDIUM: sink: add a new dpapi ring buffer
|
||||
- Revert "BUG/MINOR: acme: key not restored upon error in acme_res_certificate()"
|
||||
- BUG/MINOR: acme: key not restored upon error in acme_res_certificate() V2
|
||||
- BUG/MINOR: acme: fix the exponential backoff of retries
|
||||
- DOC: configuration: specify limitations of ACME for 3.2
|
||||
- MINOR: acme: emit logs instead of ha_notice
|
||||
- MINOR: acme: add a success message to the logs
|
||||
- BUG/MINOR: acme/cli: fix certificate name in error message
|
||||
- MINOR: acme: register the task in the ckch_store
|
||||
- MINOR: acme: free acme_ctx once the task is done
|
||||
- BUG/MEDIUM: h3: trim whitespaces when parsing headers value
|
||||
- BUG/MEDIUM: h3: trim whitespaces in header value prior to QPACK encoding
|
||||
- BUG/MINOR: h3: filter upgrade connection header
|
||||
- BUG/MINOR: h3: reject invalid :path in request
|
||||
- BUG/MINOR: h3: reject request URI with invalid characters
|
||||
- MEDIUM: h3: use absolute URI form with :authority
|
||||
- BUG/MEDIUM: hlua: fix hlua_applet_{http,tcp}_fct() yield regression (lost data)
|
||||
- BUG/MINOR: mux-h2: prevent past scheduling with idle connections
|
||||
- BUG/MINOR: rhttp: fix reconnect if timeout connect unset
|
||||
- BUG/MINOR: rhttp: ensure GOAWAY can be emitted after reversal
|
||||
- BUG/MINOR: mux-h2: do not apply timer on idle backend connection
|
||||
- MINOR: mux-h2: refactor idle timeout calculation
|
||||
- MINOR: mux-h2: prepare to support PING emission
|
||||
- MEDIUM: server/mux-h2: implement idle-ping on backend side
|
||||
- MEDIUM: listener/mux-h2: implement idle-ping on frontend side
|
||||
- MINOR: mux-h2: do not emit GOAWAY on idle ping expiration
|
||||
- MINOR: mux-h2: handle idle-ping on conn reverse
|
||||
- BUILD: makefile: enable backtrace by default on musl
|
||||
- BUG/MINOR: threads: set threads_idle and threads_harmless even with no threads
|
||||
- BUG/MINOR debug: fix !USE_THREAD_DUMP in ha_thread_dump_fill()
|
||||
- BUG/MINOR: wdt/debug: avoid signal re-entrance between debugger and watchdog
|
||||
- BUG/MINOR: debug: detect and prevent re-entrance in ha_thread_dump_fill()
|
||||
- MINOR: debug: do not statify a few debugging functions often used with wdt/dbg
|
||||
- MINOR: tools: also protect the library name resolution against concurrent accesses
|
||||
- MINOR: tools: protect dladdr() against reentrant calls from the debug handler
|
||||
- MINOR: debug: protect ha_dump_backtrace() against risks of re-entrance
|
||||
- MINOR: tinfo: keep a copy of the pointer to the thread dump buffer
|
||||
- MINOR: debug: always reset the dump pointer when done
|
||||
- MINOR: debug: remove unused case of thr!=tid in ha_thread_dump_one()
|
||||
- MINOR: pass a valid buffer pointer to ha_thread_dump_one()
|
||||
- MEDIUM: wdt: always make the faulty thread report its own warnings
|
||||
- MINOR: debug: make ha_stuck_warning() only work for the current thread
|
||||
- MINOR: debug: make ha_stuck_warning() print the whole message at once
|
||||
- CLEANUP: debug: no longer set nor use TH_FL_DUMPING_OTHERS
|
||||
- MINOR: sched: add a new function is_sched_alive() to report scheduler's health
|
||||
- MINOR: wdt: use is_sched_alive() instead of keeping a local ctxsw copy
|
||||
- MINOR: sample: add 4 new sample fetches for clienthello parsing
|
||||
- REGTEST: add new reg-test for the 4 new clienthello fetches
|
||||
- MINOR: servers: Move the per-thread server initialization earlier
|
||||
- MINOR: proxies: Initialize the per-thread structure earlier.
|
||||
- MINOR: servers: Provide a pointer to the server in srv_per_tgroup.
|
||||
- MINOR: lb_fwrr: Move the next weight out of fwrr_group.
|
||||
- MINOR: proxies: Add a per-thread group lbprm struct.
|
||||
- MEDIUM: lb_fwrr: Use one ebtree per thread group.
|
||||
- MEDIUM: lb_fwrr: Don't start all thread groups on the same server.
|
||||
- MINOR: proxies: Do stage2 initialization for sinks too
|
||||
|
||||
2025/04/11 : 3.2-dev10
|
||||
- REORG: ssl: move curves2nid and nid2nist to ssl_utils
|
||||
- BUG/MEDIUM: stream: Fix a possible freeze during a forced shut on a stream
|
||||
- MEDIUM: stream: Save SC and channel flags earlier in process_steam()
|
||||
- BUG/MINOR: peers: fix expire learned from a peer not converted from ms to ticks
|
||||
- BUG/MEDIUM: peers: prevent learning expiration too far in futur from unsync node
|
||||
- CI: spell check: allow manual trigger
|
||||
- CI: codespell: add "pres" to spellcheck whitelist
|
||||
- CLEANUP: assorted typo fixes in the code, commits and doc
|
||||
- CLEANUP: atomics: remove support for gcc < 4.7
|
||||
- CLEANUP: atomics: also replace __sync_synchronize() with __atomic_thread_fence()
|
||||
- TESTS: Fix build for filltab25.c
|
||||
- MEDIUM: ssl: replace "crt" lines by "ssl-f-use" lines
|
||||
- DOC: configuration: replace "crt" by "ssl-f-use" in listeners
|
||||
- MINOR: backend: mark srv as nonnull in alloc_dst_address()
|
||||
- BUG/MINOR: server: ensure check-reuse-pool is copied from default-server
|
||||
- MINOR: server: activate automatically check reuse for rhttp@ protocol
|
||||
- MINOR: check/backend: support conn reuse with SNI
|
||||
- MINOR: check: implement check-pool-conn-name srv keyword
|
||||
- MINOR: task: add thread safe notification_new and notification_wake variants
|
||||
- BUG/MINOR: hlua_fcn: fix potential UAF with Queue:pop_wait()
|
||||
- MINOR: hlua_fcn: register queue class using hlua_register_metatable()
|
||||
- MINOR: hlua: add core.wait()
|
||||
- MINOR: hlua: core.wait() takes optional delay paramater
|
||||
- MINOR: hlua: split hlua_applet_tcp_recv_yield() in two functions
|
||||
- MINOR: hlua: add AppletTCP:try_receive()
|
||||
- MINOR: hlua_fcn: add Queue:alarm()
|
||||
- MEDIUM: task: make notification_* API thread safe by default
|
||||
- CLEANUP: log: adjust _lf_cbor_encode_byte() comment
|
||||
- MEDIUM: ssl/crt-list: warn on negative wildcard filters
|
||||
- MEDIUM: ssl/crt-list: warn on negative filters only
|
||||
- BUILD: atomics: fix build issue on non-x86/non-arm systems
|
||||
- BUG/MINOR: log: fix CBOR encoding with LOG_VARTEXT_START() + lf_encode_chunk()
|
||||
- BUG/MEDIUM: sample: fix risk of overflow when replacing multiple regex back-refs
|
||||
- DOC: configuration: rework the crt-list section
|
||||
- MINOR: ring: support arbitrary delimiters through ring_dispatch_messages()
|
||||
- MINOR: ring/cli: support delimiting events with a trailing \0 on "show events"
|
||||
- DEV: h2: fix h2-tracer.lua nil value index
|
||||
- BUG/MINOR: backend: do not use the source port when hashing clientip
|
||||
- BUG/MINOR: hlua: fix invalid errmsg use in hlua_init()
|
||||
- MINOR: proxy: add setup_new_proxy() function
|
||||
- MINOR: checks: mark CHECKS-FE dummy frontend as internal
|
||||
- MINOR: flt_spoe: mark spoe agent frontend as internal
|
||||
- MEDIUM: tree-wide: avoid manually initializing proxies
|
||||
- MINOR: proxy: add deinit_proxy() helper func
|
||||
- MINOR: checks: deinit checks_fe upon deinit
|
||||
- MINOR: flt_spoe: deinit spoe agent proxy upon agent release
|
||||
|
||||
2025/04/02 : 3.2-dev9
|
||||
- MINOR: quic: move global tune options into quic_tune
|
||||
- CLEANUP: quic: reorganize TP flow-control initialization
|
||||
- MINOR: quic: ignore uni-stream for initial max data TP
|
||||
- MINOR: mux-quic: define config for max-data
|
||||
- MINOR: quic: define max-stream-data configuration as a ratio
|
||||
- MEDIUM: lb-chash: add directive hash-preserve-affinity
|
||||
- MEDIUM: pools: be a bit smarter when merging comparable size pools
|
||||
- REGTESTS: disable the test balance/balance-hash-maxqueue
|
||||
- BUG/MINOR: log: fix gcc warn about truncating NUL terminator while init char arrays
|
||||
- CI: fedora rawhide: allow "on: workflow_dispatch" in forks
|
||||
- CI: fedora rawhide: install "awk" as a dependency
|
||||
- CI: spellcheck: allow "on: workflow_dispatch" in forks
|
||||
- CI: coverity scan: allow "on: workflow_dispatch" in forks
|
||||
- CI: cross compile: allow "on: workflow_dispatch" in forks
|
||||
- CI: Illumos: allow "on: workflow_dispatch" in forks
|
||||
- CI: NetBSD: allow "on: workflow_dispatch" in forks
|
||||
- CI: QUIC Interop on AWS-LC: allow "on: workflow_dispatch" in forks
|
||||
- CI: QUIC Interop on LibreSSL: allow "on: workflow_dispatch" in forks
|
||||
- MINOR: compiler: add __nonstring macro
|
||||
- MINOR: thread: dump the CPU topology in thread_map_to_groups()
|
||||
- MINOR: cpu-set: compare two cpu sets with ha_cpuset_isequal()
|
||||
- MINOR: cpu-set: add a new function to print cpu-sets in human-friendly mode
|
||||
- MINOR: cpu-topo: add a dump of thread-to-CPU mapping to -dc
|
||||
- MINOR: cpu-topo: pass an extra argument to ha_cpu_policy
|
||||
- MINOR: cpu-topo: add new cpu-policies "group-by-2-clusters" and above
|
||||
- BUG/MINOR: config: silence .notice/.warning/.alert in discovery mode
|
||||
- EXAMPLES: add "games.cfg" and an example game in Lua
|
||||
- MINOR: jws: emit the JWK thumbprint
|
||||
- TESTS: jws: change the jwk format
|
||||
- MINOR: ssl/ckch: add substring parser for ckch_conf
|
||||
- MINOR: mt_list: Implement mt_list_try_lock_prev().
|
||||
- MINOR: lbprm: Add method to deinit server and proxy
|
||||
- MINOR: threads: Add HA_RWLOCK_TRYRDTOWR()
|
||||
- MAJOR: leastconn; Revamp the way servers are ordered.
|
||||
- BUG/MINOR: ssl/ckch: leak in error path
|
||||
- BUILD: ssl/ckch: potential null pointer dereference
|
||||
- MINOR: log: support "raw" logformat node typecast
|
||||
- CLEANUP: assorted typo fixes in the code and comments
|
||||
- DOC: config: fix two missing "content" in "tcp-request" examples
|
||||
- MINOR: cpu-topo: cpu_dump_topology() SMT info check little optimisation
|
||||
- BUILD: compiler: undefine the CONCAT() macro if already defined
|
||||
- BUG/MEDIUM: leastconn: Don't try to reposition if the server is down
|
||||
- BUG/MINOR: rhttp: fix incorrect dst/dst_port values
|
||||
- BUG/MINOR: backend: do not overwrite srv dst address on reuse
|
||||
- BUG/MEDIUM: backend: fix reuse with set-dst/set-dst-port
|
||||
- MINOR: sample: define bc_reused fetch
|
||||
- REGTESTS: extend conn reuse test with transparent proxy
|
||||
- MINOR: backend: fix comment when killing idle conns
|
||||
- MINOR: backend: adjust conn_backend_get() API
|
||||
- MINOR: backend: extract conn hash calculation from connect_server()
|
||||
- MINOR: backend: extract conn reuse from connect_server()
|
||||
- MINOR: backend: remove stream usage on connection reuse
|
||||
- MINOR: check define check-reuse-pool server keyword
|
||||
- MEDIUM: check: implement check-reuse-pool
|
||||
- BUILD: backend: silence a build warning when not using ssl
|
||||
- BUILD: quic_sock: address a strict-aliasing build warning with gcc 5 and 6
|
||||
- BUILD: ssl_ckch: use my_strndup() instead of strndup()
|
||||
- DOC: update INSTALL to reflect the minimum compiler version
|
||||
|
||||
2025/03/21 : 3.2-dev8
|
||||
- MINOR: jws: implement JWS signing
|
||||
- TESTS: jws: implement a test for JWS signing
|
||||
- CI: github: add "jose" to apt dependencies
|
||||
- CLEANUP: log-forward: remove useless options2 init
|
||||
- CLEANUP: log: add syslog_process_message() helper
|
||||
- MINOR: proxy: add proxy->options3
|
||||
- MINOR: log: migrate log-forward options from proxy->options2 to options3
|
||||
- MINOR: log: provide source address information in syslog_process_message()
|
||||
- MINOR: tools: only print address in sa2str() when port == -1
|
||||
- MINOR: log: add "option host" log-forward option
|
||||
- MINOR: log: handle log-forward "option host"
|
||||
- MEDIUM: log: change default "host" strategy for log-forward section
|
||||
- BUG/MEDIUM: thread: use pthread_self() not ha_pthread[tid] in set_affinity
|
||||
- MINOR: compiler: add a simple macro to concatenate resolved strings
|
||||
- MINOR: compiler: add a new __decl_thread_var() macro to declare local variables
|
||||
- BUILD: tools: silence a build warning when USE_THREAD=0
|
||||
- BUILD: backend: silence a build warning when threads are disabled
|
||||
- DOC: management: rename some last occurences from domain "dns" to "resolvers"
|
||||
- BUG/MINOR: stats: fix capabilities and hide settings for some generic metrics
|
||||
- MINOR: cli: export cli_io_handler() to ease symbol resolution
|
||||
- MINOR: tools: improve symbol resolution without dl_addr
|
||||
- MINOR: tools: ease the declaration of known symbols in resolve_sym_name()
|
||||
- MINOR: tools: teach resolve_sym_name() a few more common symbols
|
||||
- BUILD: tools: avoid a build warning on gcc-4.8 in resolve_sym_name()
|
||||
- DEV: ncpu: also emulate sysconf() for _SC_NPROCESSORS_*
|
||||
- DOC: design-thoughts: commit numa-auto.txt
|
||||
- MINOR: cpuset: make the API support negative CPU IDs
|
||||
- MINOR: thread: rely on the cpuset functions to count bound CPUs
|
||||
- MINOR: cpu-topo: add ha_cpu_topo definition
|
||||
- MINOR: cpu-topo: allocate and initialize the ha_cpu_topo array.
|
||||
- MINOR: cpu-topo: rely on _SC_NPROCESSORS_CONF to trim maxcpus
|
||||
- MINOR: cpu-topo: add a function to dump CPU topology
|
||||
- MINOR: cpu-topo: update CPU topology from excluded CPUs at boot
|
||||
- REORG: cpu-topo: move bound cpu detection from cpuset to cpu-topo
|
||||
- MINOR: cpu-topo: add detection of online CPUs on Linux
|
||||
- MINOR: cpu-topo: add detection of online CPUs on FreeBSD
|
||||
- MINOR: cpu-topo: try to detect offline cpus at boot
|
||||
- MINOR: cpu-topo: add CPU topology detection for linux
|
||||
- MINOR: cpu-topo: also store the sibling ID with SMT
|
||||
- MINOR: cpu-topo: add NUMA node identification to CPUs on Linux
|
||||
- MINOR: cpu-topo: add NUMA node identification to CPUs on FreeBSD
|
||||
- MINOR: thread: turn thread_cpu_mask_forced() into an init-time variable
|
||||
- MINOR: cfgparse: move the binding detection into numa_detect_topology()
|
||||
- MINOR: cfgparse: use already known offline CPU information
|
||||
- MINOR: global: add a command-line option to enable CPU binding debugging
|
||||
- MINOR: cpu-topo: add a new "cpu-set" global directive to choose cpus
|
||||
- MINOR: cpu-topo: add "drop-cpu" and "only-cpu" to cpu-set
|
||||
- MEDIUM: thread: start to detect thread groups and threads min/max
|
||||
- MEDIUM: cpu-topo: make sure to properly assign CPUs to threads as a fallback
|
||||
- MEDIUM: thread: reimplement first numa node detection
|
||||
- MEDIUM: cfgparse: remove now unused numa & thread-count detection
|
||||
- MINOR: cpu-topo: refine cpu dump output to better show kept/dropped CPUs
|
||||
- MINOR: cpu-topo: fall back to nominal_perf and scaling_max_freq for the capacity
|
||||
- MINOR: cpu-topo: use cpufreq before acpi cppc
|
||||
- MINOR: cpu-topo: boost the capacity of performance cores with cpufreq
|
||||
- MINOR: cpu-topo: skip CPU detection when /sys/.../cpu does not exist
|
||||
- MINOR: cpu-topo: skip identification of non-existing CPUs
|
||||
- MINOR: cpu-topo: skip CPU properties that we've verified do not exist
|
||||
- MINOR: cpu-topo: implement a sorting mechanism for CPU index
|
||||
- MINOR: cpu-topo: implement a sorting mechanism by CPU locality
|
||||
- MINOR: cpu-topo: implement a CPU sorting mechanism by cluster ID
|
||||
- MINOR: cpu-topo: ignore single-core clusters
|
||||
- MINOR: cpu-topo: assign clusters to cores without and renumber them
|
||||
- MINOR: cpu-topo: make sure we don't leave unassigned IDs in the cpu_topo
|
||||
- MINOR: cpu-topo: assign an L3 cache if more than 2 L2 instances
|
||||
- MINOR: cpu-topo: renumber cores to avoid holes and make them contiguous
|
||||
- MINOR: cpu-topo: add a function to sort by cluster+capacity
|
||||
- MINOR: cpu-topo: consider capacity when forming clusters
|
||||
- MINOR: cpu-topo: create an array of the clusters
|
||||
- MINOR: cpu-topo: ignore excess of too small clusters
|
||||
- MINOR: cpu-topo: add "only-node" and "drop-node" to cpu-set
|
||||
- MINOR: cpu-topo: add "only-thread" and "drop-thread" to cpu-set
|
||||
- MINOR: cpu-topo: add "only-core" and "drop-core" to cpu-set
|
||||
- MINOR: cpu-topo: add "only-cluster" and "drop-cluster" to cpu-set
|
||||
- MINOR: cpu-topo: add a CPU policy setting to the global section
|
||||
- MINOR: cpu-topo: add a 'first-usable-node' cpu policy
|
||||
- MEDIUM: cpu-topo: use the "first-usable-node" cpu-policy by default
|
||||
- CLEANUP: thread: now remove the temporary CPU node binding code
|
||||
- MINOR: cpu-topo: add cpu-policy "group-by-cluster"
|
||||
- MEDIUM: cpu-topo: let the "group-by-cluster" split groups
|
||||
- MINOR: cpu-topo: add a new "performance" cpu-policy
|
||||
- MINOR: cpu-topo: add a new "efficiency" cpu-policy
|
||||
- MINOR: cpu-topo: add a new "resource" cpu-policy
|
||||
- MINOR: jws: add new functions in jws.h
|
||||
- MINOR: cpu-topo: fix unused stack var 'cpu2' reported by coverity
|
||||
- MINOR: hlua: add an optional timeout to AppletTCP:receive()
|
||||
- MINOR: jws: use jwt_alg type instead of a char
|
||||
- BUG/MINOR: log: prevent saddr NULL deref in syslog_io_handler()
|
||||
- MINOR: stream: decrement srv->served after detaching from the list
|
||||
- BUG/MINOR: hlua: fix optional timeout argument index for AppletTCP:receive()
|
||||
- MINOR: server: simplify srv_has_streams()
|
||||
- CLEANUP: server: make it clear that srv_check_for_deletion() is thread-safe
|
||||
- MINOR: cli/server: don't take thread isolation to check for srv-removable
|
||||
- BUG/MINOR: limits: compute_ideal_maxconn: don't cap remain if fd_hard_limit=0
|
||||
- MINOR: limits: fix check_if_maxsock_permitted description
|
||||
- BUG/MEDIUM: hlua/cli: fix cli applet UAF in hlua_applet_wakeup()
|
||||
- MINOR: tools: path_base() concatenates a path with a base path
|
||||
- MEDIUM: ssl/ckch: make the ckch_conf more generic
|
||||
- BUG/MINOR: mux-h2: Reset streams with NO_ERROR code if full response was already sent
|
||||
- MINOR: stats: add .generic explicit field in stat_col struct
|
||||
- MINOR: stats: STATS_PX_CAP___B_ macro
|
||||
- MINOR: stats: add .cap for some static metrics
|
||||
- MINOR: stats: use stat_col storage stat_cols_info
|
||||
- MEDIUM: promex: switch to using stat_cols_info for global metrics
|
||||
- MINOR: promex: expose ST_I_INF_WARNINGS (AKA total_warnings) metric
|
||||
- MEDIUM: promex: switch to using stat_cols_px for front/back/server metrics
|
||||
- MINOR: stats: explicitly add frontend cap for ST_I_PX_REQ_TOT
|
||||
- CLEANUP: promex: remove unused PROMEX_FL_{INFO,FRONT,BACK,LI,SRV} flags
|
||||
- BUG/MEDIUM: mux-quic: fix crash on RS/SS emission if already close local
|
||||
- BUG/MINOR: mux-quic: remove extra BUG_ON() in _qcc_send_stream()
|
||||
- MEDIUM: mt_list: Reduce the max number of loops with exponential backoff
|
||||
- MINOR: stats: add alt_name field to stat_col struct
|
||||
- MINOR: stats: add alt name info to stat_cols_info where relevant
|
||||
- MINOR: promex: get rid of promex_global_metric array
|
||||
- MINOR: stats-proxy: add alt_name field for ME_NEW_{FE,BE,PX} helpers
|
||||
- MINOR: stats-proxy: add alt name info to stat_cols_px where relevant
|
||||
- MINOR: promex: get rid of promex_st_metrics array
|
||||
- MINOR: pools: rename the "by_what" field of the show pools context to "how"
|
||||
- MINOR: cli/pools: record the list of pool registrations even when merging them
|
||||
|
||||
2025/03/07 : 3.2-dev7
|
||||
- BUG/MEDIUM: applet: Don't handle EOI/EOS/ERROR is applet is waiting for room
|
||||
- BUG/MEDIUM: spoe/mux-spop: Introduce an NOOP action to deal with empty ACK
|
||||
- BUG/MINOR: cfgparse: fix NULL ptr dereference in cfg_parse_peers
|
||||
- BUG/MEDIUM: uxst: fix outgoing abns address family in connect()
|
||||
- REGTESTS: fix reg-tests/server/abnsz.vtc
|
||||
- BUG/MINOR: log: fix outgoing abns address family
|
||||
- BUG/MINOR: sink: add tempo between 2 connection attempts for sft servers
|
||||
- MINOR: clock: always use atomic ops for global_now_ms
|
||||
- CI: QUIC Interop: clean old docker images
|
||||
- BUG/MINOR: stream: do not call co_data() from __strm_dump_to_buffer()
|
||||
- BUG/MINOR: mux-h1: always make sure h1s->sd exists in h1_dump_h1s_info()
|
||||
- MINOR: tinfo: add a new thread flag to indicate a call from a sig handler
|
||||
- BUG/MEDIUM: stream: never allocate connection addresses from signal handler
|
||||
- MINOR: freq_ctr: provide non-blocking read functions
|
||||
- BUG/MEDIUM: stream: use non-blocking freq_ctr calls from the stream dumper
|
||||
- MINOR: tools: use only opportunistic symbols resolution
|
||||
- CLEANUP: task: move the barrier after clearing th_ctx->current
|
||||
- MINOR: compression: Introduce minimum size
|
||||
- BUG/MINOR: h2: always trim leading and trailing LWS in header values
|
||||
- MINOR: tinfo: split the signal handler report flags into 3
|
||||
- BUG/MEDIUM: stream: don't use localtime in dumps from a signal handler
|
||||
- OPTIM: connection: don't try to kill other threads' connection when !shared
|
||||
- BUILD: add possibility to use different QuicTLS variants
|
||||
- MEDIUM: fd: Wait if locked in fd_grab_tgid() and fd_take_tgid().
|
||||
- MINOR: fd: Add fd_lock_tgid_cur().
|
||||
- MEDIUM: epoll: Make sure we can add a new event
|
||||
- MINOR: pollers: Add a fixup_tgid_takeover() method.
|
||||
- MEDIUM: pollers: Drop fd events after a takeover to another tgid.
|
||||
- MEDIUM: connections: Allow taking over connections from other tgroups.
|
||||
- MEDIUM: servers: Add strict-maxconn.
|
||||
- BUG/MEDIUM: server: properly initialize PROXY v2 TLVs
|
||||
- BUG/MINOR: server: fix the "server-template" prefix memory leak
|
||||
- BUG/MINOR: h3: do not report transfer as aborted on preemptive response
|
||||
- CLEANUP: h3: fix documentation of h3_rcv_buf()
|
||||
- MINOR: hq-interop: properly handle incomplete request
|
||||
- BUG/MEDIUM: mux-fcgi: Try to fully fill demux buffer on receive if not empty
|
||||
- MINOR: h1: permit to relax the websocket checks for missing mandatory headers
|
||||
- BUG/MINOR: hq-interop: fix leak in case of rcv_buf early return
|
||||
- BUG/MINOR: server: check for either proxy-protocol v1 or v2 to send hedaer
|
||||
- MINOR: jws: implement a JWK public key converter
|
||||
- DEBUG: init: add a way to register functions for unit tests
|
||||
- TESTS: add a unit test runner in the Makefile
|
||||
- TESTS: jws: register a unittest for jwk
|
||||
- CI: github: run make unit-tests on the CI
|
||||
- TESTS: add config smoke checks in the unit tests
|
||||
- MINOR: jws: conversion to NIST curves name
|
||||
- CI: github: remove smoke tests from vtest.yml
|
||||
- TESTS: ist: fix wrong array size
|
||||
- TESTS: ist: use the exit code to return a verdict
|
||||
- TESTS: ist: add a ist.sh to launch in make unit-tests
|
||||
- CI: github: fix h2spec.config proxy names
|
||||
- DEBUG: init: Add a macro to register unit tests
|
||||
- MINOR: sample: allow custom date format in error-log-format
|
||||
- CLEANUP: log: removing "log-balance" references
|
||||
- BUG/MINOR: log: set proper smp size for balance log-hash
|
||||
- MINOR: log: use __send_log() with exact payload length
|
||||
- MEDIUM: log: postpone the decision to send or not log with empty messages
|
||||
- MINOR: proxy: make pr_mode enum bitfield compatible
|
||||
- MINOR: cfgparse-listen: add and use cfg_parse_listen_match_option() helper
|
||||
- MINOR: log: add options eval for log-forward
|
||||
- MINOR: log: detach prepare from parse message
|
||||
- MINOR: log: add dont-parse-log and assume-rfc6587-ntf options
|
||||
- BUG/MEIDUM: startup: return to initial cwd only after check_config_validity()
|
||||
- TESTS: change the output of run-unittests.sh
|
||||
- TESTS: unit-tests: store sh -x in a result file
|
||||
- CI: github: show results of the Unit tests
|
||||
- BUG/MINOR: cfgparse/peers: fix inconsistent check for missing peer server
|
||||
- BUG/MINOR: cfgparse/peers: properly handle ignored local peer case
|
||||
- BUG/MINOR: server: dont return immediately from parse_server() when skipping checks
|
||||
- MINOR: cfgparse/peers: provide more info when ignoring invalid "peer" or "server" lines
|
||||
- BUG/MINOR: stream: fix age calculation in "show sess" output
|
||||
- MINOR: stream/cli: rework "show sess" to better consider optional arguments
|
||||
- MINOR: stream/cli: make "show sess" support filtering on front/back/server
|
||||
- TESTS: quic: create first quic unittest
|
||||
- MINOR: h3/hq-interop: restore function for standalone FIN receive
|
||||
- MINOR/OPTIM: mux-quic: do not allocate rxbuf on standalone FIN
|
||||
- MINOR: mux-quic: refine reception of standalone STREAM FIN
|
||||
- MINOR: mux-quic: define globally stream rxbuf size
|
||||
- MINOR: mux-quic: define rxbuf wrapper
|
||||
- MINOR: mux-quic: store QCS Rx buf in a single-entry tree
|
||||
- MINOR: mux-quic: adjust Rx data consumption API
|
||||
- MINOR: mux-quic: adapt return value of qcc_decode_qcs()
|
||||
- MAJOR: mux-quic: support multiple QCS RX buffers
|
||||
- MEDIUM: mux-quic: handle too short data splitted on multiple rxbuf
|
||||
- MAJOR: mux-quic: increase stream flow-control for multi-buffer alloc
|
||||
- BUG/MINOR: cfgparse-tcp: relax namespace bind check
|
||||
- MINOR: startup: adjust alert messages, when capabilities are missed
|
||||
|
||||
2025/02/19 : 3.2-dev6
|
||||
- BUG/MEDIUM: debug: close a possible race between thread dump and panic()
|
||||
- DEBUG: thread: report the spin lock counters as seek locks
|
||||
- DEBUG: thread: make lock time computation more consistent
|
||||
- DEBUG: thread: report the wait time buckets for lock classes
|
||||
- DEBUG: thread: don't keep the redundant _locked counter
|
||||
- DEBUG: thread: make lock_stat per operation instead of for all operations
|
||||
- DEBUG: thread: reduce the struct lock_stat to store only 30 buckets
|
||||
- MINOR: lbprm: add a new callback ->server_requeue to the lbprm
|
||||
- MEDIUM: server: allocate a tasklet for asyncronous requeuing
|
||||
- MAJOR: leastconn: postpone the server's repositioning under contention
|
||||
- BUG/MINOR: quic: reserve length field for long header encoding
|
||||
- BUG/MINOR: quic: fix CRYPTO payload size calcul for encoding
|
||||
- MINOR: quic: simplify length calculation for STREAM/CRYPTO frames
|
||||
- BUG/MINOR: mworker: section ignored in discovery after a post_section_parser
|
||||
- BUG/MINOR: mworker: post_section_parser for the last section in discovery
|
||||
- CLEANUP: mworker: "program" section does not have a post_section_parser anymore
|
||||
- MEDIUM: initcall: allow to register mutiple post_section_parser per section
|
||||
- CI: cirrus-ci: bump FreeBSD image to 14-2
|
||||
- DOC: initcall: name correctly REGISTER_CONFIG_POST_SECTION()
|
||||
- REGTESTS: stop using truncated.vtc on freebsd
|
||||
- MINOR: quic: refactor STREAM encoding and splitting
|
||||
- MINOR: quic: refactor CRYPTO encoding and splitting
|
||||
- BUG/MEDIUM: fd: mark FD transferred to another process as FD_CLONED
|
||||
- BUG/MINOR: ssl/cli: "show ssl crt-list" lacks client-sigals
|
||||
- BUG/MINOR: ssl/cli: "show ssl crt-list" lacks sigals
|
||||
- MINOR: ssl/cli: display more filenames in 'show ssl cert'
|
||||
- DOC: watchdog: document the sequence of the watchdog and panic
|
||||
- MINOR: ssl: store the filenames resulting from a lookup in ckch_conf
|
||||
- MINOR: startup: allow hap_register_feature() to enable a feature in the list
|
||||
- MINOR: quic: support frame type as a varint
|
||||
- BUG/MINOR: startup: leave at first post_section_parser which fails
|
||||
- BUG/MINOR: startup: hap_register_feature() fix for partial feature name
|
||||
- BUG/MEDIUM: cli: Be sure to drop all input data in END state
|
||||
- BUG/MINOR: cli: Wait for the last ACK when FDs are xferred from the old worker
|
||||
- BUG/MEDIUM: filters: Handle filters registered on data with no payload callback
|
||||
- BUG/MINOR: fcgi: Don't set the status to 302 if it is already set
|
||||
- MINOR: ssl/crtlist: split the ckch_conf loading from the crtlist line parsing
|
||||
- MINOR: ssl/crtlist: handle crt_path == cc->crt in crtlist_load_crt()
|
||||
- MINOR: ssl/ckch: return from ckch_conf_clean() when conf is NULL
|
||||
- MEDIUM: ssl/crtlist: "crt" keyword in frontend
|
||||
- DOC: configuration: document the "crt" frontend keyword
|
||||
- DEV: h2: add a Lua-based HTTP/2 connection tracer
|
||||
- BUG/MINOR: quic: prevent crash on conn access after MUX init failure
|
||||
- BUG/MINOR: mux-quic: prevent crash after MUX init failure
|
||||
- DEV: h2: fix flags for the continuation frame
|
||||
- REGTESTS: Fix truncated.vtc to send 0-CRLF
|
||||
- BUG/MINOR: mux-h2: Properly handle full or truncated HTX messages on shut
|
||||
- Revert "REGTESTS: stop using truncated.vtc on freebsd"
|
||||
- MINOR: mux-quic: define a QCC application state member
|
||||
- MINOR: mux-quic/h3: emit SETTINGS via MUX tasklet handler
|
||||
- MINOR: mux-quic/h3: support temporary blocking on control stream sending
|
||||
|
||||
2025/02/08 : 3.2-dev5
|
||||
- BUG/MINOR: ssl: put ssl_sock_load_ca under SSL_NO_GENERATE_CERTIFICATES
|
||||
- CLEANUP: ssl: rename ssl_sock_load_ca to ssl_sock_gencert_load_ca
|
||||
|
@ -1010,7 +1010,7 @@ you notice you're already practising some of them:
|
||||
- continue to send pull requests after having been explained why they are not
|
||||
welcome.
|
||||
|
||||
- give wrong advice to people asking for help, or sending them patches to
|
||||
- give wrong advices to people asking for help, or sending them patches to
|
||||
try which make no sense, waste their time, and give them a bad impression
|
||||
of the people working on the project.
|
||||
|
||||
|
67
INSTALL
67
INSTALL
@ -111,22 +111,20 @@ HAProxy requires a working GCC or Clang toolchain and GNU make :
|
||||
may want to retry with "gmake" which is the name commonly used for GNU make
|
||||
on BSD systems.
|
||||
|
||||
- GCC >= 4.7 (up to 14 tested). Older versions are no longer supported due to
|
||||
the latest mt_list update which only uses c11-like atomics. Newer versions
|
||||
may sometimes break due to compiler regressions or behaviour changes. The
|
||||
version shipped with your operating system is very likely to work with no
|
||||
trouble. Clang >= 3.0 is also known to work as an alternative solution, and
|
||||
versions up to 19 were successfully tested. Recent versions may emit a bit
|
||||
more warnings that are worth reporting as they may reveal real bugs. TCC
|
||||
(https://repo.or.cz/tinycc.git) is also usable for developers but will not
|
||||
support threading and was found at least once to produce bad code in some
|
||||
rare corner cases (since fixed). But it builds extremely quickly (typically
|
||||
half a second for the whole project) and is very convenient to run quick
|
||||
tests during API changes or code refactoring.
|
||||
- GCC >= 4.2 (up to 14 tested). Older versions can be made to work with a
|
||||
few minor adaptations if really needed. Newer versions may sometimes break
|
||||
due to compiler regressions or behaviour changes. The version shipped with
|
||||
your operating system is very likely to work with no trouble. Clang >= 3.0
|
||||
is also known to work as an alternative solution. Recent versions may emit
|
||||
a bit more warnings that are worth reporting as they may reveal real bugs.
|
||||
TCC (https://repo.or.cz/tinycc.git) is also usable for developers but will
|
||||
not support threading and was found at least once to produce bad code in
|
||||
some rare corner cases (since fixed). But it builds extremely quickly
|
||||
(typically half a second for the whole project) and is very convenient to
|
||||
run quick tests during API changes or code refactoring.
|
||||
|
||||
- GNU ld (binutils package), with no particular version. Other linkers might
|
||||
work but were not tested. The default one from your operating system will
|
||||
normally work.
|
||||
work but were not tested.
|
||||
|
||||
On debian or Ubuntu systems and their derivatives, you may get all these tools
|
||||
at once by issuing the two following commands :
|
||||
@ -237,7 +235,7 @@ to forcefully enable it using "USE_LIBCRYPT=1".
|
||||
-----------------
|
||||
For SSL/TLS, it is necessary to use a cryptography library. HAProxy currently
|
||||
supports the OpenSSL library, and is known to build and work with branches
|
||||
1.0.0, 1.0.1, 1.0.2, 1.1.0, 1.1.1, and 3.0 to 3.5. It is recommended to use
|
||||
1.0.0, 1.0.1, 1.0.2, 1.1.0, 1.1.1, and 3.0 to 3.4. It is recommended to use
|
||||
at least OpenSSL 1.1.1 to have support for all SSL keywords and configuration
|
||||
in HAProxy. OpenSSL follows a long-term support cycle similar to HAProxy's,
|
||||
and each of the branches above receives its own fixes, without forcing you to
|
||||
@ -259,10 +257,10 @@ reported to work as well. While there are some efforts from the community to
|
||||
ensure they work well, OpenSSL remains the primary target and this means that
|
||||
in case of conflicting choices, OpenSSL support will be favored over other
|
||||
options. Note that QUIC is not fully supported when haproxy is built with
|
||||
OpenSSL < 3.5 version. In this case, QUICTLS is the preferred alternative.
|
||||
As of writing this, the QuicTLS project follows OpenSSL very closely and provides
|
||||
update simultaneously, but being a volunteer-driven project, its long-term future
|
||||
does not look certain enough to convince operating systems to package it, so it
|
||||
OpenSSL. In this case, QUICTLS is the preferred alternative. As of writing
|
||||
this, the QuicTLS project follows OpenSSL very closely and provides update
|
||||
simultaneously, but being a volunteer-driven project, its long-term future does
|
||||
not look certain enough to convince operating systems to package it, so it
|
||||
needs to be build locally. See the section about QUIC in this document.
|
||||
|
||||
A fifth option is wolfSSL (https://github.com/wolfSSL/wolfssl). It is the only
|
||||
@ -490,8 +488,8 @@ target. Common issues may include:
|
||||
other supported compatible library.
|
||||
|
||||
- many "dereferencing pointer 'sa.985' does break strict-aliasing rules"
|
||||
=> these warnings happen on old compilers (typically gcc before 7.x),
|
||||
and may safely be ignored; newer ones are better on these.
|
||||
=> these warnings happen on old compilers (typically gcc-4.4), and may
|
||||
safely be ignored; newer ones are better on these.
|
||||
|
||||
|
||||
4.11) QUIC
|
||||
@ -500,11 +498,10 @@ QUIC is the new transport layer protocol and is required for HTTP/3. This
|
||||
protocol stack is currently supported as an experimental feature in haproxy on
|
||||
the frontend side. In order to enable it, use "USE_QUIC=1 USE_OPENSSL=1".
|
||||
|
||||
Note that QUIC is not always fully supported by the OpenSSL library depending on
|
||||
its version. Indeed QUIC 0-RTT cannot be supported by OpenSSL for versions before
|
||||
3.5 contrary to others libraries with full QUIC support. The preferred option is
|
||||
to use QUICTLS. This is a fork of OpenSSL with a QUIC-compatible API. Its
|
||||
repository is available at this location:
|
||||
Note that QUIC is not fully supported by the OpenSSL library. Indeed QUIC 0-RTT
|
||||
cannot be supported by OpenSSL contrary to others libraries with full QUIC
|
||||
support. The preferred option is to use QUICTLS. This is a fork of OpenSSL with
|
||||
a QUIC-compatible API. Its repository is available at this location:
|
||||
|
||||
https://github.com/quictls/openssl
|
||||
|
||||
@ -532,18 +529,14 @@ way assuming that wolfSSL was installed in /opt/wolfssl-5.6.0 as shown in 4.5:
|
||||
SSL_INC=/opt/wolfssl-5.6.0/include SSL_LIB=/opt/wolfssl-5.6.0/lib
|
||||
LDFLAGS="-Wl,-rpath,/opt/wolfssl-5.6.0/lib"
|
||||
|
||||
As last resort, haproxy may be compiled against OpenSSL as follows from 3.5
|
||||
version with 0-RTT support:
|
||||
|
||||
$ make TARGET=generic USE_OPENSSL=1 USE_QUIC=1
|
||||
|
||||
or as follows for all OpenSSL versions but without O-RTT support:
|
||||
As last resort, haproxy may be compiled against OpenSSL as follows:
|
||||
|
||||
$ make TARGET=generic USE_OPENSSL=1 USE_QUIC=1 USE_QUIC_OPENSSL_COMPAT=1
|
||||
|
||||
In addition to this requirements, the QUIC listener bindings must be explicitly
|
||||
enabled with a specific QUIC tuning parameter. (see "limited-quic" global
|
||||
parameter of haproxy Configuration Manual).
|
||||
Note that QUIC 0-RTT is not supported by haproxy QUIC stack when built against
|
||||
OpenSSL. In addition to this compilation requirements, the QUIC listener
|
||||
bindings must be explicitly enabled with a specific QUIC tuning parameter.
|
||||
(see "limited-quic" global parameter of haproxy Configuration Manual).
|
||||
|
||||
|
||||
5) How to build HAProxy
|
||||
@ -761,8 +754,8 @@ forced to produce final binaries, and must not be used during bisect sessions,
|
||||
as it will often lead to the wrong commit.
|
||||
|
||||
Examples:
|
||||
# silence strict-aliasing warnings with old gcc-5.5:
|
||||
$ make -j$(nproc) TARGET=linux-glibc CC=gcc-55 CFLAGS=-fno-strict-aliasing
|
||||
# silence strict-aliasing warnings with old gcc-4.4:
|
||||
$ make -j$(nproc) TARGET=linux-glibc CC=gcc-44 CFLAGS=-fno-strict-aliasing
|
||||
|
||||
# disable all warning options:
|
||||
$ make -j$(nproc) TARGET=linux-glibc CC=mycc WARN_CFLAGS= NOWARN_CFLAGS=
|
||||
|
25
Makefile
25
Makefile
@ -201,7 +201,6 @@ endif
|
||||
#### May be used to force running a specific set of reg-tests
|
||||
REG_TEST_FILES =
|
||||
REG_TEST_SCRIPT=./scripts/run-regtests.sh
|
||||
UNIT_TEST_SCRIPT=./scripts/run-unittests.sh
|
||||
|
||||
#### Standard C definition
|
||||
# Compiler-specific flags that may be used to set the standard behavior we
|
||||
@ -261,9 +260,9 @@ endif
|
||||
# without appearing here. Currently defined DEBUG macros include DEBUG_FULL,
|
||||
# DEBUG_MEM_STATS, DEBUG_DONT_SHARE_POOLS, DEBUG_FD, DEBUG_POOL_INTEGRITY,
|
||||
# DEBUG_NO_POOLS, DEBUG_FAIL_ALLOC, DEBUG_STRICT_ACTION=[0-3], DEBUG_HPACK,
|
||||
# DEBUG_AUTH, DEBUG_SPOE, DEBUG_UAF, DEBUG_THREAD=0-2, DEBUG_STRICT, DEBUG_DEV,
|
||||
# DEBUG_AUTH, DEBUG_SPOE, DEBUG_UAF, DEBUG_THREAD, DEBUG_STRICT, DEBUG_DEV,
|
||||
# DEBUG_TASK, DEBUG_MEMORY_POOLS, DEBUG_POOL_TRACING, DEBUG_QPACK, DEBUG_LIST,
|
||||
# DEBUG_COUNTERS=[0-2], DEBUG_STRESS, DEBUG_UNIT.
|
||||
# DEBUG_GLITCHES, DEBUG_STRESS.
|
||||
DEBUG =
|
||||
|
||||
#### Trace options
|
||||
@ -401,7 +400,7 @@ ifeq ($(TARGET),linux-musl)
|
||||
USE_POLL USE_TPROXY USE_LIBCRYPT USE_DL USE_RT USE_CRYPT_H USE_NETFILTER \
|
||||
USE_CPU_AFFINITY USE_THREAD USE_EPOLL USE_LINUX_TPROXY USE_LINUX_CAP \
|
||||
USE_ACCEPT4 USE_LINUX_SPLICE USE_PRCTL USE_THREAD_DUMP USE_NS USE_TFO \
|
||||
USE_GETADDRINFO USE_BACKTRACE)
|
||||
USE_GETADDRINFO)
|
||||
INSTALL = install -v
|
||||
endif
|
||||
|
||||
@ -592,12 +591,10 @@ endif
|
||||
|
||||
ifneq ($(USE_BACKTRACE:0=),)
|
||||
BACKTRACE_LDFLAGS = -Wl,$(if $(EXPORT_SYMBOL),$(EXPORT_SYMBOL),--export-dynamic)
|
||||
BACKTRACE_CFLAGS = -fno-omit-frame-pointer
|
||||
endif
|
||||
|
||||
ifneq ($(USE_CPU_AFFINITY:0=),)
|
||||
OPTIONS_OBJS += src/cpuset.o
|
||||
OPTIONS_OBJS += src/cpu_topo.o
|
||||
endif
|
||||
|
||||
# OpenSSL is packaged in various forms and with various dependencies.
|
||||
@ -630,10 +627,7 @@ ifneq ($(USE_OPENSSL:0=),)
|
||||
SSL_LDFLAGS := $(if $(SSL_LIB),-L$(SSL_LIB)) -lssl -lcrypto
|
||||
endif
|
||||
USE_SSL := $(if $(USE_SSL:0=),$(USE_SSL:0=),implicit)
|
||||
OPTIONS_OBJS += src/ssl_sock.o src/ssl_ckch.o src/ssl_ocsp.o src/ssl_crtlist.o \
|
||||
src/ssl_sample.o src/cfgparse-ssl.o src/ssl_gencert.o \
|
||||
src/ssl_utils.o src/jwt.o src/ssl_clienthello.o src/jws.o src/acme.o \
|
||||
src/ssl_trace.o
|
||||
OPTIONS_OBJS += src/ssl_sock.o src/ssl_ckch.o src/ssl_ocsp.o src/ssl_crtlist.o src/ssl_sample.o src/cfgparse-ssl.o src/ssl_gencert.o src/ssl_utils.o src/jwt.o src/ssl_clienthello.o
|
||||
endif
|
||||
|
||||
ifneq ($(USE_ENGINE:0=),)
|
||||
@ -660,7 +654,7 @@ OPTIONS_OBJS += src/mux_quic.o src/h3.o src/quic_rx.o src/quic_tx.o \
|
||||
src/quic_cc_nocc.o src/quic_cc.o src/quic_pacing.o \
|
||||
src/h3_stats.o src/quic_stats.o src/qpack-enc.o \
|
||||
src/qpack-tbl.o src/quic_cc_drs.o src/quic_fctl.o \
|
||||
src/quic_enc.o
|
||||
src/cbuf.o
|
||||
endif
|
||||
|
||||
ifneq ($(USE_QUIC_OPENSSL_COMPAT:0=),)
|
||||
@ -984,7 +978,7 @@ OBJS += src/mux_h2.o src/mux_h1.o src/mux_fcgi.o src/log.o \
|
||||
src/lb_fas.o src/clock.o src/sock_inet.o src/ev_select.o \
|
||||
src/lb_map.o src/shctx.o src/mworker-prog.o src/hpack-dec.o \
|
||||
src/arg.o src/signal.o src/fix.o src/dynbuf.o src/guid.o \
|
||||
src/cfgparse-tcp.o src/lb_ss.o src/chunk.o src/counters.o \
|
||||
src/cfgparse-tcp.o src/lb_ss.o src/chunk.o \
|
||||
src/cfgparse-unix.o src/regex.o src/fcgi.o src/uri_auth.o \
|
||||
src/eb64tree.o src/eb32tree.o src/eb32sctree.o src/lru.o \
|
||||
src/limits.o src/ebimtree.o src/wdt.o src/hpack-tbl.o \
|
||||
@ -1027,7 +1021,7 @@ help:
|
||||
# TARGET variable is not set since we're not building, by definition.
|
||||
IGNORE_OPTS=help install install-man install-doc install-bin \
|
||||
uninstall clean tags cscope tar git-tar version update-version \
|
||||
opts reg-tests reg-tests-help unit-tests admin/halog/halog dev/flags/flags \
|
||||
opts reg-tests reg-tests-help admin/halog/halog dev/flags/flags \
|
||||
dev/haring/haring dev/ncpu/ncpu dev/poll/poll dev/tcploop/tcploop \
|
||||
dev/term_events/term_events
|
||||
|
||||
@ -1271,11 +1265,6 @@ reg-tests-help:
|
||||
|
||||
.PHONY: reg-tests reg-tests-help
|
||||
|
||||
unit-tests:
|
||||
$(Q)$(UNIT_TEST_SCRIPT)
|
||||
.PHONY: unit-tests
|
||||
|
||||
|
||||
# "make range" iteratively builds using "make all" and the exact same build
|
||||
# options for all commits within RANGE. RANGE may be either a git range
|
||||
# such as ref1..ref2 or a single commit, in which case all commits from
|
||||
|
@ -212,7 +212,7 @@ da_status_t da_atlas_compile(void *ctx, da_read_fn readfn, da_setpos_fn setposfn
|
||||
* da_getpropid on the atlas, and if generated by the search, the ID will be consistent across
|
||||
* different calls to search.
|
||||
* Properties added by a search that are neither in the compiled atlas, nor in the extra_props list
|
||||
* Are assigned an ID within the context that is not transferable through different search results
|
||||
* Are assigned an ID within the context that is not transferrable through different search results
|
||||
* within the same atlas.
|
||||
* @param atlas Atlas instance
|
||||
* @param extra_props properties
|
||||
|
@ -94,7 +94,7 @@ name must be preceded by a minus character ('-'). Here are examples:
|
||||
* Add section description as label for all metrics
|
||||
|
||||
It is possible to set a description in global and proxy sections, via the
|
||||
"description" directive. The global description is exposed if it is define via
|
||||
"description" directive. The global descrption is exposed if it is define via
|
||||
the "haproxy_process_description" metric. But the descriptions provided in proxy
|
||||
sections are not dumped. However, it is possible to add it as a label for all
|
||||
metrics of the corresponding section, including the global one. To do so,
|
||||
@ -389,9 +389,6 @@ listed below. Metrics from extra counters are not listed.
|
||||
| haproxy_server_max_connect_time_seconds |
|
||||
| haproxy_server_max_response_time_seconds |
|
||||
| haproxy_server_max_total_time_seconds |
|
||||
| haproxy_server_agent_status |
|
||||
| haproxy_server_agent_code |
|
||||
| haproxy_server_agent_duration_seconds |
|
||||
| haproxy_server_internal_errors_total |
|
||||
| haproxy_server_unsafe_idle_connections_current |
|
||||
| haproxy_server_safe_idle_connections_current |
|
||||
|
@ -32,11 +32,11 @@
|
||||
|
||||
/* Prometheus exporter flags (ctx->flags) */
|
||||
#define PROMEX_FL_METRIC_HDR 0x00000001
|
||||
/* unused: 0x00000002 */
|
||||
/* unused: 0x00000004 */
|
||||
/* unused: 0x00000008 */
|
||||
/* unused: 0x00000010 */
|
||||
/* unused: 0x00000020 */
|
||||
#define PROMEX_FL_INFO_METRIC 0x00000002
|
||||
#define PROMEX_FL_FRONT_METRIC 0x00000004
|
||||
#define PROMEX_FL_BACK_METRIC 0x00000008
|
||||
#define PROMEX_FL_SRV_METRIC 0x00000010
|
||||
#define PROMEX_FL_LI_METRIC 0x00000020
|
||||
#define PROMEX_FL_MODULE_METRIC 0x00000040
|
||||
#define PROMEX_FL_SCOPE_GLOBAL 0x00000080
|
||||
#define PROMEX_FL_SCOPE_FRONT 0x00000100
|
||||
|
@ -97,53 +97,187 @@ struct promex_ctx {
|
||||
*/
|
||||
#define PROMEX_MAX_METRIC_LENGTH 512
|
||||
|
||||
static inline enum promex_mt_type promex_global_gettype(int index, enum field_nature nature)
|
||||
{
|
||||
enum promex_mt_type type;
|
||||
/* Global metrics */
|
||||
const struct promex_metric promex_global_metrics[ST_I_INF_MAX] = {
|
||||
//[ST_I_INF_NAME] ignored
|
||||
//[ST_I_INF_VERSION], ignored
|
||||
//[ST_I_INF_RELEASE_DATE] ignored
|
||||
[ST_I_INF_NBTHREAD] = { .n = IST("nbthread"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_NBPROC] = { .n = IST("nbproc"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_PROCESS_NUM] = { .n = IST("relative_process_id"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
//[ST_I_INF_PID] ignored
|
||||
//[ST_I_INF_UPTIME] ignored
|
||||
[ST_I_INF_UPTIME_SEC] = { .n = IST("uptime_seconds"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_START_TIME_SEC] = { .n = IST("start_time_seconds"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
//[ST_I_INF_MEMMAX_MB] ignored
|
||||
[ST_I_INF_MEMMAX_BYTES] = { .n = IST("max_memory_bytes"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
//[ST_I_INF_POOL_ALLOC_MB] ignored
|
||||
[ST_I_INF_POOL_ALLOC_BYTES] = { .n = IST("pool_allocated_bytes"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
//[ST_I_INF_POOL_USED_MB] ignored
|
||||
[ST_I_INF_POOL_USED_BYTES] = { .n = IST("pool_used_bytes"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_POOL_FAILED] = { .n = IST("pool_failures_total"), .type = PROMEX_MT_COUNTER, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_ULIMIT_N] = { .n = IST("max_fds"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_MAXSOCK] = { .n = IST("max_sockets"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_MAXCONN] = { .n = IST("max_connections"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_HARD_MAXCONN] = { .n = IST("hard_max_connections"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_CURR_CONN] = { .n = IST("current_connections"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_CUM_CONN] = { .n = IST("connections_total"), .type = PROMEX_MT_COUNTER, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_CUM_REQ] = { .n = IST("requests_total"), .type = PROMEX_MT_COUNTER, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_MAX_SSL_CONNS] = { .n = IST("max_ssl_connections"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_CURR_SSL_CONNS] = { .n = IST("current_ssl_connections"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_CUM_SSL_CONNS] = { .n = IST("ssl_connections_total"), .type = PROMEX_MT_COUNTER, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_MAXPIPES] = { .n = IST("max_pipes"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_PIPES_USED] = { .n = IST("pipes_used_total"), .type = PROMEX_MT_COUNTER, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_PIPES_FREE] = { .n = IST("pipes_free_total"), .type = PROMEX_MT_COUNTER, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_CONN_RATE] = { .n = IST("current_connection_rate"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_CONN_RATE_LIMIT] = { .n = IST("limit_connection_rate"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_MAX_CONN_RATE] = { .n = IST("max_connection_rate"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_SESS_RATE] = { .n = IST("current_session_rate"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_SESS_RATE_LIMIT] = { .n = IST("limit_session_rate"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_MAX_SESS_RATE] = { .n = IST("max_session_rate"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_SSL_RATE] = { .n = IST("current_ssl_rate"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_SSL_RATE_LIMIT] = { .n = IST("limit_ssl_rate"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_MAX_SSL_RATE] = { .n = IST("max_ssl_rate"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_SSL_FRONTEND_KEY_RATE] = { .n = IST("current_frontend_ssl_key_rate"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_SSL_FRONTEND_MAX_KEY_RATE] = { .n = IST("max_frontend_ssl_key_rate"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_SSL_FRONTEND_SESSION_REUSE_PCT] = { .n = IST("frontend_ssl_reuse"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_SSL_BACKEND_KEY_RATE] = { .n = IST("current_backend_ssl_key_rate"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_SSL_BACKEND_MAX_KEY_RATE] = { .n = IST("max_backend_ssl_key_rate"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_SSL_CACHE_LOOKUPS] = { .n = IST("ssl_cache_lookups_total"), .type = PROMEX_MT_COUNTER, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_SSL_CACHE_MISSES] = { .n = IST("ssl_cache_misses_total"), .type = PROMEX_MT_COUNTER, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_COMPRESS_BPS_IN] = { .n = IST("http_comp_bytes_in_total"), .type = PROMEX_MT_COUNTER, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_COMPRESS_BPS_OUT] = { .n = IST("http_comp_bytes_out_total"), .type = PROMEX_MT_COUNTER, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_COMPRESS_BPS_RATE_LIM] = { .n = IST("limit_http_comp"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_ZLIB_MEM_USAGE] = { .n = IST("current_zlib_memory"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_MAX_ZLIB_MEM_USAGE] = { .n = IST("max_zlib_memory"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_TASKS] = { .n = IST("current_tasks"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_RUN_QUEUE] = { .n = IST("current_run_queue"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_IDLE_PCT] = { .n = IST("idle_time_percent"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_NODE] = { .n = IST("node"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_DESCRIPTION] = { .n = IST("description"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_STOPPING] = { .n = IST("stopping"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_JOBS] = { .n = IST("jobs"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_UNSTOPPABLE_JOBS] = { .n = IST("unstoppable_jobs"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_LISTENERS] = { .n = IST("listeners"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_ACTIVE_PEERS] = { .n = IST("active_peers"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_CONNECTED_PEERS] = { .n = IST("connected_peers"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_DROPPED_LOGS] = { .n = IST("dropped_logs_total"), .type = PROMEX_MT_COUNTER, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_BUSY_POLLING] = { .n = IST("busy_polling_enabled"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_FAILED_RESOLUTIONS] = { .n = IST("failed_resolutions"), .type = PROMEX_MT_COUNTER, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_TOTAL_BYTES_OUT] = { .n = IST("bytes_out_total"), .type = PROMEX_MT_COUNTER, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_TOTAL_SPLICED_BYTES_OUT] = { .n = IST("spliced_bytes_out_total"), .type = PROMEX_MT_COUNTER, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_BYTES_OUT_RATE] = { .n = IST("bytes_out_rate"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
//[ST_I_INF_DEBUG_COMMANDS_ISSUED] ignored
|
||||
[ST_I_INF_CUM_LOG_MSGS] = { .n = IST("recv_logs_total"), .type = PROMEX_MT_COUNTER, .flags = PROMEX_FL_INFO_METRIC },
|
||||
[ST_I_INF_BUILD_INFO] = { .n = IST("build_info"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
|
||||
};
|
||||
|
||||
/* general rule that fits most types
|
||||
*/
|
||||
type = (nature == FN_COUNTER) ? PROMEX_MT_COUNTER : PROMEX_MT_GAUGE;
|
||||
|
||||
/* historically we used to consider some metrics as counters while haproxy
|
||||
* doesn't consider them as such
|
||||
* FIXME: maybe this is no longer needed
|
||||
*/
|
||||
switch (index) {
|
||||
case ST_I_INF_POOL_FAILED:
|
||||
case ST_I_INF_CUM_CONN:
|
||||
case ST_I_INF_CUM_REQ:
|
||||
case ST_I_INF_CUM_SSL_CONNS:
|
||||
case ST_I_INF_PIPES_USED:
|
||||
case ST_I_INF_PIPES_FREE:
|
||||
case ST_I_INF_SSL_CACHE_LOOKUPS:
|
||||
case ST_I_INF_SSL_CACHE_MISSES:
|
||||
case ST_I_INF_COMPRESS_BPS_IN:
|
||||
case ST_I_INF_COMPRESS_BPS_OUT:
|
||||
case ST_I_INF_DROPPED_LOGS:
|
||||
case ST_I_INF_FAILED_RESOLUTIONS:
|
||||
case ST_I_INF_TOTAL_BYTES_OUT:
|
||||
case ST_I_INF_TOTAL_SPLICED_BYTES_OUT:
|
||||
case ST_I_INF_CUM_LOG_MSGS:
|
||||
type = PROMEX_MT_COUNTER;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return type;
|
||||
}
|
||||
|
||||
static inline enum promex_mt_type promex_st_gettype(int index, enum field_nature nature)
|
||||
{
|
||||
enum promex_mt_type type;
|
||||
|
||||
/* general rule that fits most types
|
||||
*/
|
||||
type = (nature == FN_COUNTER) ? PROMEX_MT_COUNTER : PROMEX_MT_GAUGE;
|
||||
|
||||
return type;
|
||||
}
|
||||
/* frontend/backend/server fields */
|
||||
const struct promex_metric promex_st_metrics[ST_I_PX_MAX] = {
|
||||
//[ST_I_PX_PXNAME] ignored
|
||||
//[ST_I_PX_SVNAME] ignored
|
||||
[ST_I_PX_QCUR] = { .n = IST("current_queue"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_QMAX] = { .n = IST("max_queue"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_SCUR] = { .n = IST("current_sessions"), .type = PROMEX_MT_GAUGE, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_LI_METRIC | PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_SMAX] = { .n = IST("max_sessions"), .type = PROMEX_MT_GAUGE, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_LI_METRIC | PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_SLIM] = { .n = IST("limit_sessions"), .type = PROMEX_MT_GAUGE, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_LI_METRIC | PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_STOT] = { .n = IST("sessions_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_LI_METRIC | PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_BIN] = { .n = IST("bytes_in_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_LI_METRIC | PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_BOUT] = { .n = IST("bytes_out_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_LI_METRIC | PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_DREQ] = { .n = IST("requests_denied_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_LI_METRIC | PROMEX_FL_BACK_METRIC ) },
|
||||
[ST_I_PX_DRESP] = { .n = IST("responses_denied_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_LI_METRIC | PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_EREQ] = { .n = IST("request_errors_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_LI_METRIC ) },
|
||||
[ST_I_PX_ECON] = { .n = IST("connection_errors_total"), .type = PROMEX_MT_COUNTER, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_ERESP] = { .n = IST("response_errors_total"), .type = PROMEX_MT_COUNTER, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_WRETR] = { .n = IST("retry_warnings_total"), .type = PROMEX_MT_COUNTER, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_WREDIS] = { .n = IST("redispatch_warnings_total"), .type = PROMEX_MT_COUNTER, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_STATUS] = { .n = IST("status"), .type = PROMEX_MT_GAUGE, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_LI_METRIC | PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_WEIGHT] = { .n = IST("weight"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_ACT] = { .n = IST("active_servers"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_BCK] = { .n = IST("backup_servers"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_CHKFAIL] = { .n = IST("check_failures_total"), .type = PROMEX_MT_COUNTER, .flags = ( PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_CHKDOWN] = { .n = IST("check_up_down_total"), .type = PROMEX_MT_COUNTER, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_LASTCHG] = { .n = IST("check_last_change_seconds"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_DOWNTIME] = { .n = IST("downtime_seconds_total"), .type = PROMEX_MT_COUNTER, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_QLIMIT] = { .n = IST("queue_limit"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_SRV_METRIC) },
|
||||
//[ST_I_PX_PID] ignored
|
||||
//[ST_I_PX_IID] ignored
|
||||
//[ST_I_PX_SID] ignored
|
||||
[ST_I_PX_THROTTLE] = { .n = IST("current_throttle"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_LBTOT] = { .n = IST("loadbalanced_total"), .type = PROMEX_MT_COUNTER, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
|
||||
//[ST_I_PX_TRACKED] ignored
|
||||
//[ST_I_PX_TYPE] ignored
|
||||
//[ST_I_PX_RATE] ignored
|
||||
[ST_I_PX_RATE_LIM] = { .n = IST("limit_session_rate"), .type = PROMEX_MT_GAUGE, .flags = (PROMEX_FL_FRONT_METRIC ) },
|
||||
[ST_I_PX_RATE_MAX] = { .n = IST("max_session_rate"), .type = PROMEX_MT_GAUGE, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_CHECK_STATUS] = { .n = IST("check_status"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_CHECK_CODE] = { .n = IST("check_code"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_CHECK_DURATION] = { .n = IST("check_duration_seconds"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_HRSP_1XX] = { .n = IST("http_responses_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_HRSP_2XX] = { .n = IST("http_responses_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_HRSP_3XX] = { .n = IST("http_responses_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_HRSP_4XX] = { .n = IST("http_responses_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_HRSP_5XX] = { .n = IST("http_responses_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_HRSP_OTHER] = { .n = IST("http_responses_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
|
||||
//[ST_I_PX_HANAFAIL] ignored
|
||||
//[ST_I_PX_REQ_RATE] ignored
|
||||
[ST_I_PX_REQ_RATE_MAX] = { .n = IST("http_requests_rate_max"), .type = PROMEX_MT_GAUGE, .flags = (PROMEX_FL_FRONT_METRIC ) },
|
||||
[ST_I_PX_REQ_TOT] = { .n = IST("http_requests_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_BACK_METRIC ) },
|
||||
[ST_I_PX_CLI_ABRT] = { .n = IST("client_aborts_total"), .type = PROMEX_MT_COUNTER, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_SRV_ABRT] = { .n = IST("server_aborts_total"), .type = PROMEX_MT_COUNTER, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_COMP_IN] = { .n = IST("http_comp_bytes_in_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_BACK_METRIC ) },
|
||||
[ST_I_PX_COMP_OUT] = { .n = IST("http_comp_bytes_out_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_BACK_METRIC ) },
|
||||
[ST_I_PX_COMP_BYP] = { .n = IST("http_comp_bytes_bypassed_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_BACK_METRIC ) },
|
||||
[ST_I_PX_COMP_RSP] = { .n = IST("http_comp_responses_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_BACK_METRIC ) },
|
||||
[ST_I_PX_LASTSESS] = { .n = IST("last_session_seconds"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
|
||||
//[ST_I_PX_LAST_CHK] ignored
|
||||
//[ST_I_PX_LAST_AGT] ignored
|
||||
[ST_I_PX_QTIME] = { .n = IST("queue_time_average_seconds"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_CTIME] = { .n = IST("connect_time_average_seconds"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_RTIME] = { .n = IST("response_time_average_seconds"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_TTIME] = { .n = IST("total_time_average_seconds"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
|
||||
//[ST_I_PX_AGENT_STATUS] ignored
|
||||
//[ST_I_PX_AGENT_CODE] ignored
|
||||
//[ST_I_PX_AGENT_DURATION] ignored
|
||||
//[ST_I_PX_CHECK_DESC] ignored
|
||||
//[ST_I_PX_AGENT_DESC] ignored
|
||||
//[ST_I_PX_CHECK_RISE] ignored
|
||||
//[ST_I_PX_CHECK_FALL] ignored
|
||||
//[ST_I_PX_CHECK_HEALTH] ignored
|
||||
//[ST_I_PX_AGENT_RISE] ignored
|
||||
//[ST_I_PX_AGENT_FALL] ignored
|
||||
//[ST_I_PX_AGENT_HEALTH] ignored
|
||||
//[ST_I_PX_ADDR] ignored
|
||||
//[ST_I_PX_COOKIE] ignored
|
||||
//[ST_I_PX_MODE] ignored
|
||||
//[ST_I_PX_ALGO] ignored
|
||||
//[ST_I_PX_CONN_RATE] ignored
|
||||
[ST_I_PX_CONN_RATE_MAX] = { .n = IST("connections_rate_max"), .type = PROMEX_MT_GAUGE, .flags = (PROMEX_FL_FRONT_METRIC ) },
|
||||
[ST_I_PX_CONN_TOT] = { .n = IST("connections_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC ) },
|
||||
[ST_I_PX_INTERCEPTED] = { .n = IST("intercepted_requests_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC ) },
|
||||
[ST_I_PX_DCON] = { .n = IST("denied_connections_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_LI_METRIC ) },
|
||||
[ST_I_PX_DSES] = { .n = IST("denied_sessions_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_LI_METRIC ) },
|
||||
[ST_I_PX_WREW] = { .n = IST("failed_header_rewriting_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_LI_METRIC | PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_CONNECT] = { .n = IST("connection_attempts_total"), .type = PROMEX_MT_COUNTER, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_REUSE] = { .n = IST("connection_reuses_total"), .type = PROMEX_MT_COUNTER, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_CACHE_LOOKUPS] = { .n = IST("http_cache_lookups_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_BACK_METRIC ) },
|
||||
[ST_I_PX_CACHE_HITS] = { .n = IST("http_cache_hits_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_BACK_METRIC ) },
|
||||
[ST_I_PX_SRV_ICUR] = { .n = IST("idle_connections_current"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_SRV_ILIM] = { .n = IST("idle_connections_limit"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_QT_MAX] = { .n = IST("max_queue_time_seconds"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_CT_MAX] = { .n = IST("max_connect_time_seconds"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_RT_MAX] = { .n = IST("max_response_time_seconds"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_TT_MAX] = { .n = IST("max_total_time_seconds"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_EINT] = { .n = IST("internal_errors_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_LI_METRIC | PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_IDLE_CONN_CUR] = { .n = IST("unsafe_idle_connections_current"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_SAFE_CONN_CUR] = { .n = IST("safe_idle_connections_current"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_USED_CONN_CUR] = { .n = IST("used_connections_current"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_NEED_CONN_EST] = { .n = IST("need_connections_current"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_UWEIGHT] = { .n = IST("uweight"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
|
||||
[ST_I_PX_AGG_SRV_CHECK_STATUS] = { .n = IST("agg_server_check_status"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_BACK_METRIC ) },
|
||||
[ST_I_PX_AGG_SRV_STATUS ] = { .n = IST("agg_server_status"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_BACK_METRIC ) },
|
||||
[ST_I_PX_AGG_CHECK_STATUS] = { .n = IST("agg_check_status"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_BACK_METRIC ) },
|
||||
};
|
||||
|
||||
/* Specialized frontend metric names, to override default ones */
|
||||
const struct ist promex_st_front_metrics_names[ST_I_PX_MAX] = {
|
||||
@ -173,8 +307,6 @@ const struct ist promex_st_metric_desc[ST_I_PX_MAX] = {
|
||||
[ST_I_PX_CTIME] = IST("Avg. connect time for last 1024 successful connections."),
|
||||
[ST_I_PX_RTIME] = IST("Avg. response time for last 1024 successful connections."),
|
||||
[ST_I_PX_TTIME] = IST("Avg. total time for last 1024 successful connections."),
|
||||
[ST_I_PX_AGENT_STATUS] = IST("Status of last agent check, per state label value."),
|
||||
[ST_I_PX_AGENT_DURATION] = IST("Total duration of the latest server agent check, in seconds."),
|
||||
[ST_I_PX_QT_MAX] = IST("Maximum observed time spent in the queue"),
|
||||
[ST_I_PX_CT_MAX] = IST("Maximum observed time spent waiting for a connection to complete"),
|
||||
[ST_I_PX_RT_MAX] = IST("Maximum observed time spent waiting for a server response"),
|
||||
@ -437,21 +569,17 @@ static int promex_dump_global_metrics(struct appctx *appctx, struct htx *htx)
|
||||
|
||||
for (; ctx->field_num < ST_I_INF_MAX; ctx->field_num++) {
|
||||
struct promex_label labels[PROMEX_MAX_LABELS-1] = {};
|
||||
enum promex_mt_type type;
|
||||
int lb_idx = 0;
|
||||
|
||||
if (!stat_cols_info[ctx->field_num].alt_name)
|
||||
if (!(promex_global_metrics[ctx->field_num].flags & ctx->flags))
|
||||
continue;
|
||||
|
||||
name = ist(stat_cols_info[ctx->field_num].alt_name);
|
||||
name = promex_global_metrics[ctx->field_num].n;
|
||||
desc = ist(stat_cols_info[ctx->field_num].desc);
|
||||
|
||||
if (promex_filter_metric(appctx, prefix, name))
|
||||
continue;
|
||||
|
||||
val = stat_line_info[ctx->field_num];
|
||||
type = promex_global_gettype(ctx->field_num, (val.type & FN_MASK));
|
||||
|
||||
switch (ctx->field_num) {
|
||||
case ST_I_INF_NODE:
|
||||
labels[lb_idx].name = ist("node");
|
||||
@ -474,7 +602,7 @@ static int promex_dump_global_metrics(struct appctx *appctx, struct htx *htx)
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
val = stat_line_info[ctx->field_num];
|
||||
}
|
||||
|
||||
if (global.desc && ((ctx->field_num == ST_I_INF_DESCRIPTION) || (ctx->flags & PROMEX_FL_DESC_LABELS))) {
|
||||
@ -484,7 +612,7 @@ static int promex_dump_global_metrics(struct appctx *appctx, struct htx *htx)
|
||||
}
|
||||
|
||||
if (!promex_dump_ts(appctx, prefix, name, desc,
|
||||
type,
|
||||
promex_global_metrics[ctx->field_num].type,
|
||||
&val, labels, &out, max))
|
||||
goto full;
|
||||
|
||||
@ -520,15 +648,14 @@ static int promex_dump_front_metrics(struct appctx *appctx, struct htx *htx)
|
||||
enum promex_front_state state;
|
||||
|
||||
for (;ctx->field_num < ST_I_PX_MAX; ctx->field_num++) {
|
||||
if (!stat_cols_px[ctx->field_num].alt_name ||
|
||||
!(stat_cols_px[ctx->field_num].cap & STATS_PX_CAP_FE))
|
||||
if (!(promex_st_metrics[ctx->field_num].flags & ctx->flags))
|
||||
continue;
|
||||
|
||||
name = promex_st_front_metrics_names[ctx->field_num];
|
||||
desc = promex_st_metric_desc[ctx->field_num];
|
||||
|
||||
if (!isttest(name))
|
||||
name = ist(stat_cols_px[ctx->field_num].alt_name);
|
||||
name = promex_st_metrics[ctx->field_num].n;
|
||||
if (!isttest(desc))
|
||||
desc = ist(stat_cols_px[ctx->field_num].desc);
|
||||
|
||||
@ -540,7 +667,6 @@ static int promex_dump_front_metrics(struct appctx *appctx, struct htx *htx)
|
||||
|
||||
while (px) {
|
||||
struct promex_label labels[PROMEX_MAX_LABELS-1] = {};
|
||||
enum promex_mt_type type;
|
||||
int lb_idx = 0;
|
||||
|
||||
labels[lb_idx].name = ist("proxy");
|
||||
@ -560,9 +686,6 @@ static int promex_dump_front_metrics(struct appctx *appctx, struct htx *htx)
|
||||
if (!stats_fill_fe_line(px, 0, stats, ST_I_PX_MAX, &(ctx->field_num)))
|
||||
return -1;
|
||||
|
||||
val = stats[ctx->field_num];
|
||||
type = promex_st_gettype(ctx->field_num, (val.type & FN_MASK));
|
||||
|
||||
switch (ctx->field_num) {
|
||||
case ST_I_PX_STATUS:
|
||||
state = !(px->flags & PR_FL_STOPPED);
|
||||
@ -572,7 +695,7 @@ static int promex_dump_front_metrics(struct appctx *appctx, struct htx *htx)
|
||||
val = mkf_u32(FO_STATUS, state == ctx->obj_state);
|
||||
|
||||
if (!promex_dump_ts(appctx, prefix, name, desc,
|
||||
type,
|
||||
promex_st_metrics[ctx->field_num].type,
|
||||
&val, labels, &out, max))
|
||||
goto full;
|
||||
}
|
||||
@ -589,6 +712,7 @@ static int promex_dump_front_metrics(struct appctx *appctx, struct htx *htx)
|
||||
case ST_I_PX_COMP_RSP:
|
||||
if (px->mode != PR_MODE_HTTP)
|
||||
goto next_px;
|
||||
val = stats[ctx->field_num];
|
||||
break;
|
||||
case ST_I_PX_HRSP_1XX:
|
||||
case ST_I_PX_HRSP_2XX:
|
||||
@ -602,14 +726,15 @@ static int promex_dump_front_metrics(struct appctx *appctx, struct htx *htx)
|
||||
ctx->flags &= ~PROMEX_FL_METRIC_HDR;
|
||||
labels[lb_idx].name = ist("code");
|
||||
labels[lb_idx].value = promex_hrsp_code[ctx->field_num - ST_I_PX_HRSP_1XX];
|
||||
val = stats[ctx->field_num];
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
val = stats[ctx->field_num];
|
||||
}
|
||||
|
||||
if (!promex_dump_ts(appctx, prefix, name, desc,
|
||||
type,
|
||||
promex_st_metrics[ctx->field_num].type,
|
||||
&val, labels, &out, max))
|
||||
goto full;
|
||||
next_px:
|
||||
@ -724,15 +849,14 @@ static int promex_dump_listener_metrics(struct appctx *appctx, struct htx *htx)
|
||||
enum li_status status;
|
||||
|
||||
for (;ctx->field_num < ST_I_PX_MAX; ctx->field_num++) {
|
||||
if (!stat_cols_px[ctx->field_num].alt_name ||
|
||||
!(stat_cols_px[ctx->field_num].cap & STATS_PX_CAP_LI))
|
||||
if (!(promex_st_metrics[ctx->field_num].flags & ctx->flags))
|
||||
continue;
|
||||
|
||||
name = promex_st_li_metrics_names[ctx->field_num];
|
||||
desc = promex_st_metric_desc[ctx->field_num];
|
||||
|
||||
if (!isttest(name))
|
||||
name = ist(stat_cols_px[ctx->field_num].alt_name);
|
||||
name = promex_st_metrics[ctx->field_num].n;
|
||||
if (!isttest(desc))
|
||||
desc = ist(stat_cols_px[ctx->field_num].desc);
|
||||
|
||||
@ -765,8 +889,6 @@ static int promex_dump_listener_metrics(struct appctx *appctx, struct htx *htx)
|
||||
li = LIST_NEXT(&px->conf.listeners, struct listener *, by_fe);
|
||||
|
||||
list_for_each_entry_from(li, &px->conf.listeners, by_fe) {
|
||||
enum promex_mt_type type;
|
||||
|
||||
if (!li->counters)
|
||||
continue;
|
||||
|
||||
@ -777,9 +899,6 @@ static int promex_dump_listener_metrics(struct appctx *appctx, struct htx *htx)
|
||||
ST_I_PX_MAX, &(ctx->field_num)))
|
||||
return -1;
|
||||
|
||||
val = stats[ctx->field_num];
|
||||
type = promex_st_gettype(ctx->field_num, (val.type & FN_MASK));
|
||||
|
||||
switch (ctx->field_num) {
|
||||
case ST_I_PX_STATUS:
|
||||
status = get_li_status(li);
|
||||
@ -788,18 +907,18 @@ static int promex_dump_listener_metrics(struct appctx *appctx, struct htx *htx)
|
||||
labels[lb_idx+1].name = ist("state");
|
||||
labels[lb_idx+1].value = ist(li_status_st[ctx->obj_state]);
|
||||
if (!promex_dump_ts(appctx, prefix, name, desc,
|
||||
type,
|
||||
promex_st_metrics[ctx->field_num].type,
|
||||
&val, labels, &out, max))
|
||||
goto full;
|
||||
}
|
||||
ctx->obj_state = 0;
|
||||
continue;
|
||||
default:
|
||||
break;
|
||||
val = stats[ctx->field_num];
|
||||
}
|
||||
|
||||
if (!promex_dump_ts(appctx, prefix, name, desc,
|
||||
type,
|
||||
promex_st_metrics[ctx->field_num].type,
|
||||
&val, labels, &out, max))
|
||||
goto full;
|
||||
}
|
||||
@ -932,15 +1051,14 @@ static int promex_dump_back_metrics(struct appctx *appctx, struct htx *htx)
|
||||
enum healthcheck_status srv_check_status;
|
||||
|
||||
for (;ctx->field_num < ST_I_PX_MAX; ctx->field_num++) {
|
||||
if (!stat_cols_px[ctx->field_num].alt_name ||
|
||||
!(stat_cols_px[ctx->field_num].cap & STATS_PX_CAP_BE))
|
||||
if (!(promex_st_metrics[ctx->field_num].flags & ctx->flags))
|
||||
continue;
|
||||
|
||||
name = promex_st_back_metrics_names[ctx->field_num];
|
||||
desc = promex_st_metric_desc[ctx->field_num];
|
||||
|
||||
if (!isttest(name))
|
||||
name = ist(stat_cols_px[ctx->field_num].alt_name);
|
||||
name = promex_st_metrics[ctx->field_num].n;
|
||||
if (!isttest(desc))
|
||||
desc = ist(stat_cols_px[ctx->field_num].desc);
|
||||
|
||||
@ -954,7 +1072,6 @@ static int promex_dump_back_metrics(struct appctx *appctx, struct htx *htx)
|
||||
struct promex_label labels[PROMEX_MAX_LABELS-1] = {};
|
||||
unsigned int srv_state_count[PROMEX_SRV_STATE_COUNT] = { 0 };
|
||||
unsigned int srv_check_count[HCHK_STATUS_SIZE] = { 0 };
|
||||
enum promex_mt_type type;
|
||||
const char *check_state;
|
||||
int lb_idx = 0;
|
||||
|
||||
@ -976,9 +1093,6 @@ static int promex_dump_back_metrics(struct appctx *appctx, struct htx *htx)
|
||||
if (!stats_fill_be_line(px, 0, stats, ST_I_PX_MAX, &(ctx->field_num)))
|
||||
return -1;
|
||||
|
||||
val = stats[ctx->field_num];
|
||||
type = promex_st_gettype(ctx->field_num, (val.type & FN_MASK));
|
||||
|
||||
switch (ctx->field_num) {
|
||||
case ST_I_PX_AGG_SRV_CHECK_STATUS: // DEPRECATED
|
||||
case ST_I_PX_AGG_SRV_STATUS:
|
||||
@ -995,7 +1109,7 @@ static int promex_dump_back_metrics(struct appctx *appctx, struct htx *htx)
|
||||
labels[lb_idx].name = ist("state");
|
||||
labels[lb_idx].value = promex_srv_st[ctx->obj_state];
|
||||
if (!promex_dump_ts(appctx, prefix, name, desc,
|
||||
type,
|
||||
promex_st_metrics[ctx->field_num].type,
|
||||
&val, labels, &out, max))
|
||||
goto full;
|
||||
}
|
||||
@ -1020,7 +1134,7 @@ static int promex_dump_back_metrics(struct appctx *appctx, struct htx *htx)
|
||||
labels[lb_idx].name = ist("state");
|
||||
labels[lb_idx].value = ist(check_state);
|
||||
if (!promex_dump_ts(appctx, prefix, name, desc,
|
||||
type,
|
||||
promex_st_metrics[ctx->field_num].type,
|
||||
&val, labels, &out, max))
|
||||
goto full;
|
||||
}
|
||||
@ -1033,7 +1147,7 @@ static int promex_dump_back_metrics(struct appctx *appctx, struct htx *htx)
|
||||
labels[lb_idx].value = promex_back_st[ctx->obj_state];
|
||||
val = mkf_u32(FO_STATUS, bkd_state == ctx->obj_state);
|
||||
if (!promex_dump_ts(appctx, prefix, name, desc,
|
||||
type,
|
||||
promex_st_metrics[ctx->field_num].type,
|
||||
&val, labels, &out, max))
|
||||
goto full;
|
||||
}
|
||||
@ -1080,6 +1194,7 @@ static int promex_dump_back_metrics(struct appctx *appctx, struct htx *htx)
|
||||
case ST_I_PX_COMP_RSP:
|
||||
if (px->mode != PR_MODE_HTTP)
|
||||
goto next_px;
|
||||
val = stats[ctx->field_num];
|
||||
break;
|
||||
case ST_I_PX_HRSP_1XX:
|
||||
case ST_I_PX_HRSP_2XX:
|
||||
@ -1093,14 +1208,15 @@ static int promex_dump_back_metrics(struct appctx *appctx, struct htx *htx)
|
||||
ctx->flags &= ~PROMEX_FL_METRIC_HDR;
|
||||
labels[lb_idx].name = ist("code");
|
||||
labels[lb_idx].value = promex_hrsp_code[ctx->field_num - ST_I_PX_HRSP_1XX];
|
||||
val = stats[ctx->field_num];
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
val = stats[ctx->field_num];
|
||||
}
|
||||
|
||||
if (!promex_dump_ts(appctx, prefix, name, desc,
|
||||
type,
|
||||
promex_st_metrics[ctx->field_num].type,
|
||||
&val, labels, &out, max))
|
||||
goto full;
|
||||
next_px:
|
||||
@ -1216,15 +1332,14 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
|
||||
const char *check_state;
|
||||
|
||||
for (;ctx->field_num < ST_I_PX_MAX; ctx->field_num++) {
|
||||
if (!stat_cols_px[ctx->field_num].alt_name ||
|
||||
!(stat_cols_px[ctx->field_num].cap & STATS_PX_CAP_SRV))
|
||||
if (!(promex_st_metrics[ctx->field_num].flags & ctx->flags))
|
||||
continue;
|
||||
|
||||
name = promex_st_srv_metrics_names[ctx->field_num];
|
||||
desc = promex_st_metric_desc[ctx->field_num];
|
||||
|
||||
if (!isttest(name))
|
||||
name = ist(stat_cols_px[ctx->field_num].alt_name);
|
||||
name = promex_st_metrics[ctx->field_num].n;
|
||||
if (!isttest(desc))
|
||||
desc = ist(stat_cols_px[ctx->field_num].desc);
|
||||
|
||||
@ -1236,7 +1351,6 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
|
||||
|
||||
while (px) {
|
||||
struct promex_label labels[PROMEX_MAX_LABELS-1] = {};
|
||||
enum promex_mt_type type;
|
||||
int lb_idx = 0;
|
||||
|
||||
labels[lb_idx].name = ist("proxy");
|
||||
@ -1266,9 +1380,6 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
|
||||
if ((ctx->flags & PROMEX_FL_NO_MAINT_SRV) && (sv->cur_admin & SRV_ADMF_MAINT))
|
||||
goto next_sv;
|
||||
|
||||
val = stats[ctx->field_num];
|
||||
type = promex_st_gettype(ctx->field_num, (val.type & FN_MASK));
|
||||
|
||||
switch (ctx->field_num) {
|
||||
case ST_I_PX_STATUS:
|
||||
state = promex_srv_status(sv);
|
||||
@ -1277,7 +1388,7 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
|
||||
labels[lb_idx+1].name = ist("state");
|
||||
labels[lb_idx+1].value = promex_srv_st[ctx->obj_state];
|
||||
if (!promex_dump_ts(appctx, prefix, name, desc,
|
||||
type,
|
||||
promex_st_metrics[ctx->field_num].type,
|
||||
&val, labels, &out, max))
|
||||
goto full;
|
||||
}
|
||||
@ -1327,7 +1438,7 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
|
||||
labels[lb_idx+1].name = ist("state");
|
||||
labels[lb_idx+1].value = ist(check_state);
|
||||
if (!promex_dump_ts(appctx, prefix, name, desc,
|
||||
type,
|
||||
promex_st_metrics[ctx->field_num].type,
|
||||
&val, labels, &out, max))
|
||||
goto full;
|
||||
}
|
||||
@ -1344,12 +1455,12 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
|
||||
secs = (double)sv->check.duration / 1000.0;
|
||||
val = mkf_flt(FN_DURATION, secs);
|
||||
break;
|
||||
|
||||
case ST_I_PX_REQ_TOT:
|
||||
if (px->mode != PR_MODE_HTTP) {
|
||||
sv = NULL;
|
||||
goto next_px;
|
||||
}
|
||||
val = stats[ctx->field_num];
|
||||
break;
|
||||
case ST_I_PX_HRSP_1XX:
|
||||
case ST_I_PX_HRSP_2XX:
|
||||
@ -1365,44 +1476,15 @@ static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
|
||||
ctx->flags &= ~PROMEX_FL_METRIC_HDR;
|
||||
labels[lb_idx+1].name = ist("code");
|
||||
labels[lb_idx+1].value = promex_hrsp_code[ctx->field_num - ST_I_PX_HRSP_1XX];
|
||||
break;
|
||||
|
||||
case ST_I_PX_AGENT_STATUS:
|
||||
if ((sv->agent.state & (CHK_ST_ENABLED|CHK_ST_PAUSED)) != CHK_ST_ENABLED)
|
||||
goto next_sv;
|
||||
|
||||
for (; ctx->obj_state < HCHK_STATUS_SIZE; ctx->obj_state++) {
|
||||
if (get_check_status_result(ctx->obj_state) < CHK_RES_FAILED)
|
||||
continue;
|
||||
val = mkf_u32(FO_STATUS, sv->agent.status == ctx->obj_state);
|
||||
check_state = get_check_status_info(ctx->obj_state);
|
||||
labels[lb_idx+1].name = ist("state");
|
||||
labels[lb_idx+1].value = ist(check_state);
|
||||
if (!promex_dump_ts(appctx, prefix, name, desc,
|
||||
type,
|
||||
&val, labels, &out, max))
|
||||
goto full;
|
||||
}
|
||||
ctx->obj_state = 0;
|
||||
goto next_sv;
|
||||
case ST_I_PX_AGENT_CODE:
|
||||
if ((sv->agent.state & (CHK_ST_ENABLED|CHK_ST_PAUSED)) != CHK_ST_ENABLED)
|
||||
goto next_sv;
|
||||
val = mkf_u32(FN_OUTPUT, (sv->agent.status < HCHK_STATUS_L57DATA) ? 0 : sv->agent.code);
|
||||
break;
|
||||
case ST_I_PX_AGENT_DURATION:
|
||||
if (sv->agent.status < HCHK_STATUS_CHECKED)
|
||||
goto next_sv;
|
||||
secs = (double)sv->agent.duration / 1000.0;
|
||||
val = mkf_flt(FN_DURATION, secs);
|
||||
val = stats[ctx->field_num];
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
val = stats[ctx->field_num];
|
||||
}
|
||||
|
||||
if (!promex_dump_ts(appctx, prefix, name, desc,
|
||||
type,
|
||||
promex_st_metrics[ctx->field_num].type,
|
||||
&val, labels, &out, max))
|
||||
goto full;
|
||||
next_sv:
|
||||
@ -1692,7 +1774,7 @@ static int promex_dump_metrics(struct appctx *appctx, struct stconn *sc, struct
|
||||
|
||||
switch (appctx->st1) {
|
||||
case PROMEX_DUMPER_INIT:
|
||||
ctx->flags |= PROMEX_FL_METRIC_HDR;
|
||||
ctx->flags |= (PROMEX_FL_METRIC_HDR|PROMEX_FL_INFO_METRIC);
|
||||
ctx->obj_state = 0;
|
||||
ctx->field_num = ST_I_INF_NAME;
|
||||
appctx->st1 = PROMEX_DUMPER_GLOBAL;
|
||||
@ -1708,7 +1790,8 @@ static int promex_dump_metrics(struct appctx *appctx, struct stconn *sc, struct
|
||||
}
|
||||
}
|
||||
|
||||
ctx->flags |= PROMEX_FL_METRIC_HDR;
|
||||
ctx->flags &= ~PROMEX_FL_INFO_METRIC;
|
||||
ctx->flags |= (PROMEX_FL_METRIC_HDR|PROMEX_FL_FRONT_METRIC);
|
||||
ctx->obj_state = 0;
|
||||
ctx->field_num = ST_I_PX_PXNAME;
|
||||
ctx->mod_field_num = 0;
|
||||
@ -1725,7 +1808,8 @@ static int promex_dump_metrics(struct appctx *appctx, struct stconn *sc, struct
|
||||
}
|
||||
}
|
||||
|
||||
ctx->flags |= PROMEX_FL_METRIC_HDR;
|
||||
ctx->flags &= ~PROMEX_FL_FRONT_METRIC;
|
||||
ctx->flags |= (PROMEX_FL_METRIC_HDR|PROMEX_FL_LI_METRIC);
|
||||
ctx->obj_state = 0;
|
||||
ctx->field_num = ST_I_PX_PXNAME;
|
||||
ctx->mod_field_num = 0;
|
||||
@ -1742,7 +1826,8 @@ static int promex_dump_metrics(struct appctx *appctx, struct stconn *sc, struct
|
||||
}
|
||||
}
|
||||
|
||||
ctx->flags |= PROMEX_FL_METRIC_HDR;
|
||||
ctx->flags &= ~PROMEX_FL_LI_METRIC;
|
||||
ctx->flags |= (PROMEX_FL_METRIC_HDR|PROMEX_FL_BACK_METRIC);
|
||||
ctx->obj_state = 0;
|
||||
ctx->field_num = ST_I_PX_PXNAME;
|
||||
ctx->mod_field_num = 0;
|
||||
@ -1759,7 +1844,8 @@ static int promex_dump_metrics(struct appctx *appctx, struct stconn *sc, struct
|
||||
}
|
||||
}
|
||||
|
||||
ctx->flags |= PROMEX_FL_METRIC_HDR;
|
||||
ctx->flags &= ~PROMEX_FL_BACK_METRIC;
|
||||
ctx->flags |= (PROMEX_FL_METRIC_HDR|PROMEX_FL_SRV_METRIC);
|
||||
ctx->obj_state = 0;
|
||||
ctx->field_num = ST_I_PX_PXNAME;
|
||||
ctx->mod_field_num = 0;
|
||||
@ -1776,6 +1862,7 @@ static int promex_dump_metrics(struct appctx *appctx, struct stconn *sc, struct
|
||||
}
|
||||
}
|
||||
|
||||
ctx->flags &= ~(PROMEX_FL_METRIC_HDR|PROMEX_FL_SRV_METRIC);
|
||||
ctx->flags |= (PROMEX_FL_METRIC_HDR|PROMEX_FL_MODULE_METRIC);
|
||||
ctx->field_num = 0;
|
||||
ctx->mod_field_num = 0;
|
||||
|
@ -25,7 +25,7 @@ end
|
||||
|
||||
# returns $node filled with the first node of ebroot $arg0
|
||||
define ebtree_first
|
||||
# browse ebtree left until encountering leaf
|
||||
# browse ebtree left until encoutering leaf
|
||||
set $node = (struct eb_node *)$arg0->b[0]
|
||||
while 1
|
||||
_ebtree_set_tag_node $node
|
||||
@ -76,7 +76,7 @@ end
|
||||
|
||||
# returns $node filled with the first node of ebroot $arg0
|
||||
define ebsctree_first
|
||||
# browse ebsctree left until encountering leaf
|
||||
# browse ebsctree left until encoutering leaf
|
||||
set $node = (struct eb32sc_node *)$arg0->b[0]
|
||||
while 1
|
||||
_ebsctree_set_tag_node $node
|
||||
|
@ -1,247 +0,0 @@
|
||||
-- This is an HTTP/2 tracer for a TCP proxy. It will decode the frames that are
|
||||
-- exchanged between the client and the server and indicate their direction,
|
||||
-- types, flags and lengths. Lines are prefixed with a connection number modulo
|
||||
-- 4096 that allows to sort out multiplexed exchanges. In order to use this,
|
||||
-- simply load this file in the global section and use it from a TCP proxy:
|
||||
--
|
||||
-- global
|
||||
-- lua-load "dev/h2/h2-tracer.lua"
|
||||
--
|
||||
-- listen h2_sniffer
|
||||
-- mode tcp
|
||||
-- bind :8002
|
||||
-- filter lua.h2-tracer #hex
|
||||
-- server s1 127.0.0.1:8003
|
||||
--
|
||||
|
||||
-- define the decoder's class here
|
||||
Dec = {}
|
||||
Dec.id = "Lua H2 tracer"
|
||||
Dec.flags = 0
|
||||
Dec.__index = Dec
|
||||
Dec.args = {} -- args passed by the filter's declaration
|
||||
Dec.cid = 0 -- next connection ID
|
||||
|
||||
-- prefix to indent responses
|
||||
res_pfx = " | "
|
||||
|
||||
-- H2 frame types
|
||||
h2ft = {
|
||||
[0] = "DATA",
|
||||
[1] = "HEADERS",
|
||||
[2] = "PRIORITY",
|
||||
[3] = "RST_STREAM",
|
||||
[4] = "SETTINGS",
|
||||
[5] = "PUSH_PROMISE",
|
||||
[6] = "PING",
|
||||
[7] = "GOAWAY",
|
||||
[8] = "WINDOW_UPDATE",
|
||||
[9] = "CONTINUATION",
|
||||
}
|
||||
|
||||
h2ff = {
|
||||
[0] = { [0] = "ES", [3] = "PADDED" }, -- data
|
||||
[1] = { [0] = "ES", [2] = "EH", [3] = "PADDED", [5] = "PRIORITY" }, -- headers
|
||||
[2] = { }, -- priority
|
||||
[3] = { }, -- rst_stream
|
||||
[4] = { [0] = "ACK" }, -- settings
|
||||
[5] = { [2] = "EH", [3] = "PADDED" }, -- push_promise
|
||||
[6] = { [0] = "ACK" }, -- ping
|
||||
[7] = { }, -- goaway
|
||||
[8] = { }, -- window_update
|
||||
[9] = { [2] = "EH" }, -- continuation
|
||||
}
|
||||
|
||||
function Dec:new()
|
||||
local dec = {}
|
||||
|
||||
setmetatable(dec, Dec)
|
||||
dec.do_hex = false
|
||||
if (Dec.args[1] == "hex") then
|
||||
dec.do_hex = true
|
||||
end
|
||||
|
||||
Dec.cid = Dec.cid+1
|
||||
-- mix the thread number when multithreading.
|
||||
dec.cid = Dec.cid + 64 * core.thread
|
||||
|
||||
-- state per dir. [1]=req [2]=res
|
||||
dec.st = {
|
||||
[1] = {
|
||||
hdr = { 0, 0, 0, 0, 0, 0, 0, 0, 0 },
|
||||
fofs = 0,
|
||||
flen = 0,
|
||||
ftyp = 0,
|
||||
fflg = 0,
|
||||
sid = 0,
|
||||
tot = 0,
|
||||
},
|
||||
[2] = {
|
||||
hdr = { 0, 0, 0, 0, 0, 0, 0, 0, 0 },
|
||||
fofs = 0,
|
||||
flen = 0,
|
||||
ftyp = 0,
|
||||
fflg = 0,
|
||||
sid = 0,
|
||||
tot = 0,
|
||||
},
|
||||
}
|
||||
return dec
|
||||
end
|
||||
|
||||
function Dec:start_analyze(txn, chn)
|
||||
if chn:is_resp() then
|
||||
io.write(string.format("[%03x] ", self.cid % 4096) .. res_pfx .. "### res start\n")
|
||||
else
|
||||
io.write(string.format("[%03x] ", self.cid % 4096) .. "### req start\n")
|
||||
end
|
||||
filter.register_data_filter(self, chn)
|
||||
end
|
||||
|
||||
function Dec:end_analyze(txn, chn)
|
||||
if chn:is_resp() then
|
||||
io.write(string.format("[%03x] ", self.cid % 4096) .. res_pfx .. "### res end: " .. self.st[2].tot .. " bytes total\n")
|
||||
else
|
||||
io.write(string.format("[%03x] ", self.cid % 4096) .. "### req end: " ..self.st[1].tot.. " bytes total\n")
|
||||
end
|
||||
end
|
||||
|
||||
function Dec:tcp_payload(txn, chn)
|
||||
local data = { }
|
||||
local dofs = 1
|
||||
local pfx = ""
|
||||
local dir = 1
|
||||
local sofs = 0
|
||||
local ft = ""
|
||||
local ff = ""
|
||||
|
||||
if chn:is_resp() then
|
||||
pfx = res_pfx
|
||||
dir = 2
|
||||
end
|
||||
|
||||
pfx = string.format("[%03x] ", self.cid % 4096) .. pfx
|
||||
|
||||
-- stream offset before processing
|
||||
sofs = self.st[dir].tot
|
||||
|
||||
if (chn:input() > 0) then
|
||||
data = chn:data()
|
||||
self.st[dir].tot = self.st[dir].tot + chn:input()
|
||||
end
|
||||
|
||||
if (chn:input() > 0 and self.do_hex ~= false) then
|
||||
io.write("\n" .. pfx .. "Hex:\n")
|
||||
for i = 1, #data do
|
||||
if ((i & 7) == 1) then io.write(pfx) end
|
||||
io.write(string.format("0x%02x ", data:sub(i, i):byte()))
|
||||
if ((i & 7) == 0 or i == #data) then io.write("\n") end
|
||||
end
|
||||
end
|
||||
|
||||
-- start at byte 1 in the <data> string
|
||||
dofs = 1
|
||||
|
||||
-- the first 24 bytes are expected to be an H2 preface on the request
|
||||
if (dir == 1 and sofs < 24) then
|
||||
-- let's not check it for now
|
||||
local bytes = self.st[dir].tot - sofs
|
||||
if (sofs + self.st[dir].tot >= 24) then
|
||||
-- skip what was missing from the preface
|
||||
dofs = dofs + 24 - sofs
|
||||
sofs = 24
|
||||
io.write(pfx .. "[PREFACE len=24]\n")
|
||||
else
|
||||
-- consume more preface bytes
|
||||
sofs = sofs + self.st[dir].tot
|
||||
return
|
||||
end
|
||||
end
|
||||
|
||||
-- parse contents as long as there are pending data
|
||||
|
||||
while true do
|
||||
-- check if we need to consume data from the current frame
|
||||
-- flen is the number of bytes left before the frame's end.
|
||||
if (self.st[dir].flen > 0) then
|
||||
if dofs > #data then return end -- missing data
|
||||
if (#data - dofs + 1 < self.st[dir].flen) then
|
||||
-- insufficient data
|
||||
self.st[dir].flen = self.st[dir].flen - (#data - dofs + 1)
|
||||
io.write(pfx .. string.format("%32s\n", "... -" .. (#data - dofs + 1) .. " = " .. self.st[dir].flen))
|
||||
dofs = #data + 1
|
||||
return
|
||||
else
|
||||
-- enough data to finish
|
||||
if (dofs == 1) then
|
||||
-- only print a partial size if the frame was interrupted
|
||||
io.write(pfx .. string.format("%32s\n", "... -" .. self.st[dir].flen .. " = 0"))
|
||||
end
|
||||
dofs = dofs + self.st[dir].flen
|
||||
self.st[dir].flen = 0
|
||||
end
|
||||
end
|
||||
|
||||
-- here, flen = 0, we're at the beginning of a new frame --
|
||||
|
||||
-- read possibly missing header bytes until dec.fofs == 9
|
||||
while self.st[dir].fofs < 9 do
|
||||
if dofs > #data then return end -- missing data
|
||||
self.st[dir].hdr[self.st[dir].fofs + 1] = data:sub(dofs, dofs):byte()
|
||||
dofs = dofs + 1
|
||||
self.st[dir].fofs = self.st[dir].fofs + 1
|
||||
end
|
||||
|
||||
-- we have a full frame header here
|
||||
if (self.do_hex ~= false) then
|
||||
io.write("\n" .. pfx .. string.format("hdr=%02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
|
||||
self.st[dir].hdr[1], self.st[dir].hdr[2], self.st[dir].hdr[3],
|
||||
self.st[dir].hdr[4], self.st[dir].hdr[5], self.st[dir].hdr[6],
|
||||
self.st[dir].hdr[7], self.st[dir].hdr[8], self.st[dir].hdr[9]))
|
||||
end
|
||||
|
||||
-- we have a full frame header, we'll be ready
|
||||
-- for a new frame once the data is gone
|
||||
self.st[dir].flen = self.st[dir].hdr[1] * 65536 +
|
||||
self.st[dir].hdr[2] * 256 +
|
||||
self.st[dir].hdr[3]
|
||||
self.st[dir].ftyp = self.st[dir].hdr[4]
|
||||
self.st[dir].fflg = self.st[dir].hdr[5]
|
||||
self.st[dir].sid = self.st[dir].hdr[6] * 16777216 +
|
||||
self.st[dir].hdr[7] * 65536 +
|
||||
self.st[dir].hdr[8] * 256 +
|
||||
self.st[dir].hdr[9]
|
||||
self.st[dir].fofs = 0
|
||||
|
||||
-- decode frame type
|
||||
if self.st[dir].ftyp <= 9 then
|
||||
ft = h2ft[self.st[dir].ftyp]
|
||||
else
|
||||
ft = string.format("TYPE_0x%02x\n", self.st[dir].ftyp)
|
||||
end
|
||||
|
||||
-- decode frame flags for frame type <ftyp>
|
||||
ff = ""
|
||||
for i = 7, 0, -1 do
|
||||
if (((self.st[dir].fflg >> i) & 1) ~= 0) then
|
||||
if self.st[dir].ftyp <= 9 and h2ff[self.st[dir].ftyp][i] ~= nil then
|
||||
ff = ff .. ((ff == "") and "" or "+")
|
||||
ff = ff .. h2ff[self.st[dir].ftyp][i]
|
||||
else
|
||||
ff = ff .. ((ff == "") and "" or "+")
|
||||
ff = ff .. string.format("0x%02x", 1<<i)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
io.write(pfx .. string.format("[%s %ssid=%u len=%u (bytes=%u)]\n",
|
||||
ft, (ff == "") and "" or ff .. " ",
|
||||
self.st[dir].sid, self.st[dir].flen,
|
||||
(#data - dofs + 1)))
|
||||
end
|
||||
end
|
||||
|
||||
core.register_filter("h2-tracer", Dec, function(dec, args)
|
||||
Dec.args = args
|
||||
return dec
|
||||
end)
|
@ -1,5 +1,4 @@
|
||||
#define _GNU_SOURCE
|
||||
#include <errno.h>
|
||||
#include <limits.h>
|
||||
#include <sched.h>
|
||||
#include <stdio.h>
|
||||
@ -12,22 +11,6 @@
|
||||
|
||||
static char prog_full_path[PATH_MAX];
|
||||
|
||||
long sysconf(int name)
|
||||
{
|
||||
if (name == _SC_NPROCESSORS_ONLN ||
|
||||
name == _SC_NPROCESSORS_CONF) {
|
||||
const char *ncpu = getenv("NCPU");
|
||||
int n;
|
||||
|
||||
n = ncpu ? atoi(ncpu) : CPU_SETSIZE;
|
||||
if (n < 0 || n > CPU_SETSIZE)
|
||||
n = CPU_SETSIZE;
|
||||
return n;
|
||||
}
|
||||
errno = EINVAL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* return a cpu_set having the first $NCPU set */
|
||||
int sched_getaffinity(pid_t pid, size_t cpusetsize, cpu_set_t *mask)
|
||||
{
|
||||
|
@ -1,70 +0,0 @@
|
||||
BEGININPUT
|
||||
BEGINCONTEXT
|
||||
|
||||
HAProxy's development cycle consists in one development branch, and multiple
|
||||
maintenance branches.
|
||||
|
||||
All the development is made into the development branch exclusively. This
|
||||
includes mostly new features, doc updates, cleanups and or course, fixes.
|
||||
|
||||
The maintenance branches, also called stable branches, never see any
|
||||
development, and only receive ultra-safe fixes for bugs that affect them,
|
||||
that are picked from the development branch.
|
||||
|
||||
Branches are numbered in 0.1 increments. Every 6 months, upon a new major
|
||||
release, the development branch enters maintenance and a new development branch
|
||||
is created with a new, higher version. The current development branch is
|
||||
3.3-dev, and maintenance branches are 3.2 and below.
|
||||
|
||||
Fixes created in the development branch for issues that were introduced in an
|
||||
earlier branch are applied in descending order to each and every version till
|
||||
that branch that introduced the issue: 3.2 first, then 3.1, then 3.0, then 2.9
|
||||
and so on. This operation is called "backporting". A fix for an issue is never
|
||||
backported beyond the branch that introduced the issue. An important point is
|
||||
that the project maintainers really aim at zero regression in maintenance
|
||||
branches, so they're never willing to take any risk backporting patches that
|
||||
are not deemed strictly necessary.
|
||||
|
||||
Fixes consist of patches managed using the Git version control tool and are
|
||||
identified by a Git commit ID and a commit message. For this reason we
|
||||
indistinctly talk about backporting fixes, commits, or patches; all mean the
|
||||
same thing. When mentioning commit IDs, developers always use a short form
|
||||
made of the first 8 characters only, and expect the AI assistant to do the
|
||||
same.
|
||||
|
||||
It seldom happens that some fixes depend on changes that were brought by other
|
||||
patches that were not in some branches and that will need to be backported as
|
||||
well for the fix to work. In this case, such information is explicitly provided
|
||||
in the commit message by the patch's author in natural language.
|
||||
|
||||
Developers are serious and always indicate if a patch needs to be backported.
|
||||
Sometimes they omit the exact target branch, or they will say that the patch is
|
||||
"needed" in some older branch, but it means the same. If a commit message
|
||||
doesn't mention any backport instructions, it means that the commit does not
|
||||
have to be backported. And patches that are not strictly bug fixes nor doc
|
||||
improvements are normally not backported. For example, fixes for design
|
||||
limitations, architectural improvements and performance optimizations are
|
||||
considered too risky for a backport. Finally, all bug fixes are tagged as
|
||||
"BUG" at the beginning of their subject line. Patches that are not tagged as
|
||||
such are not bugs, and must never be backported unless their commit message
|
||||
explicitly requests so.
|
||||
|
||||
ENDCONTEXT
|
||||
|
||||
A developer is reviewing the development branch, trying to spot which commits
|
||||
need to be backported to maintenance branches. This person is already expert
|
||||
on HAProxy and everything related to Git, patch management, and the risks
|
||||
associated with backports, so he doesn't want to be told how to proceed nor to
|
||||
review the contents of the patch.
|
||||
|
||||
The goal for this developer is to get some help from the AI assistant to save
|
||||
some precious time on this tedious review work. In order to do a better job, he
|
||||
needs an accurate summary of the information and instructions found in each
|
||||
commit message. Specifically he needs to figure if the patch fixes a problem
|
||||
affecting an older branch or not, if it needs to be backported, if so to which
|
||||
branches, and if other patches need to be backported along with it.
|
||||
|
||||
The indented text block below after an "id" line and starting with a Subject line
|
||||
is a commit message from the HAProxy development branch that describes a patch
|
||||
applied to that branch, starting with its subject line, please read it carefully.
|
||||
|
@ -1,29 +0,0 @@
|
||||
|
||||
ENDINPUT
|
||||
BEGININSTRUCTION
|
||||
|
||||
You are an AI assistant that follows instruction extremely well. Help as much
|
||||
as you can, responding to a single question using a single response.
|
||||
|
||||
The developer wants to know if he needs to backport the patch above to fix
|
||||
maintenance branches, for which branches, and what possible dependencies might
|
||||
be mentioned in the commit message. Carefully study the commit message and its
|
||||
backporting instructions if any (otherwise it should probably not be backported),
|
||||
then provide a very concise and short summary that will help the developer decide
|
||||
to backport it, or simply to skip it.
|
||||
|
||||
Start by explaining in one or two sentences what you recommend for this one and why.
|
||||
Finally, based on your analysis, give your general conclusion as "Conclusion: X"
|
||||
where X is a single word among:
|
||||
- "yes", if you recommend to backport the patch right now either because
|
||||
it explicitly states this or because it's a fix for a bug that affects
|
||||
a maintenance branch (3.2 or lower);
|
||||
- "wait", if this patch explicitly mentions that it must be backported, but
|
||||
only after waiting some time.
|
||||
- "no", if nothing clearly indicates a necessity to backport this patch (e.g.
|
||||
lack of explicit backport instructions, or it's just an improvement);
|
||||
- "uncertain" otherwise for cases not covered above
|
||||
|
||||
ENDINSTRUCTION
|
||||
|
||||
Explanation:
|
@ -362,7 +362,7 @@ option set-process-time <var name>
|
||||
latency added by the SPOE processing for the last handled event or group.
|
||||
|
||||
If several events or groups are processed for the same stream, this value
|
||||
will be overridden.
|
||||
will be overrideen.
|
||||
|
||||
See also: "option set-total-time".
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -376,9 +376,6 @@ mt_list_lock_prev(elt)
|
||||
Return A elt
|
||||
value: <===>
|
||||
|
||||
mt_list_try_lock_prev(elt)
|
||||
Does the same thing as mt_list_lock_prev(), except if the list is
|
||||
locked already, it returns { NULL, NULL } instead of waiting.
|
||||
|
||||
mt_list_lock_elem(elt)
|
||||
Locks the element only. Both of its pointers are replaced by two locked
|
||||
|
@ -204,14 +204,6 @@ the cache, when this option is set, objects are picked from the cache from the
|
||||
oldest one instead of the freshest one. This way even late memory corruptions
|
||||
have a chance to be detected.
|
||||
|
||||
Another non-destructive approach is to use "-dMbackup". A full copy of the
|
||||
object is made after its end, which eases inspection (e.g. of the parts
|
||||
scratched by the pool_item elements), and a comparison is made upon allocation
|
||||
of that object, just like with "-dMintegrity", causing a crash on mismatch. The
|
||||
initial 4 words corresponding to the list are ignored as well. Note that when
|
||||
both "-dMbackup" and "-dMintegrity" are used, the copy is performed before
|
||||
being scratched, and the comparison is done by "-dMintegrity" only.
|
||||
|
||||
When build option DEBUG_MEMORY_POOLS is set, or the boot-time option "-dMtag"
|
||||
is passed on the executable's command line, pool objects are allocated with
|
||||
one extra pointer compared to the requested size, so that the bytes that follow
|
||||
@ -350,9 +342,7 @@ struct pool_head *create_pool(char *name, uint size, uint flags)
|
||||
"-dMno-merge" is passed on the executable's command line, the pools
|
||||
also need to have the exact same name to be merged. In addition, unless
|
||||
MEM_F_EXACT is set in <flags>, the object size will usually be rounded
|
||||
up to the size of pointers (16 or 32 bytes). MEM_F_UAF may be set on a
|
||||
per-pool basis to enable the UAF detection only for this specific pool,
|
||||
saving the massive overhead of global usage. The name that will appear
|
||||
up to the size of pointers (16 or 32 bytes). The name that will appear
|
||||
in the pool upon merging is the name of the first created pool. The
|
||||
returned pointer is the new (or reused) pool head, or NULL upon error.
|
||||
Pools created this way must be destroyed using pool_destroy().
|
||||
|
@ -21,7 +21,7 @@ falls back to CLOCK_REALTIME. The former is more accurate as it really counts
|
||||
the time spent in the process, while the latter might also account for time
|
||||
stuck on paging in etc.
|
||||
|
||||
Then wdt_ping() is called to arm the timer. It's set to trigger every
|
||||
Then wdt_ping() is called to arm the timer. t's set to trigger every
|
||||
<wdt_warn_blocked_traffic_ns> interval. It is also called by wdt_handler()
|
||||
to reprogram a new wakeup after it has ticked.
|
||||
|
||||
@ -37,18 +37,15 @@ If the thread was not marked as stuck, it's verified that no progress was made
|
||||
for at least one second, in which case the TH_FL_STUCK flag is set. The lack of
|
||||
progress is measured by the distance between the thread's current cpu_time and
|
||||
its prev_cpu_time. If the lack of progress is at least as large as the warning
|
||||
threshold, then the signal is bounced to the faulty thread if it's not the
|
||||
current one. Since this bounce is based on the time spent without update, it
|
||||
already doesn't happen often.
|
||||
threshold and no context switch happened since last call, ha_stuck_warning() is
|
||||
called to emit a warning about that thread. In any case the context switch
|
||||
counter for that thread is updated.
|
||||
|
||||
Once on the faulty thread, two checks are performed:
|
||||
1) if the thread was already marked as stuck, then the thread is considered
|
||||
as definitely stuck, and ha_panic() is called. It will not return.
|
||||
|
||||
2) a check is made to verify if the scheduler is still ticking, by reading
|
||||
and setting a variable that only the scheduler can clear when leaving a
|
||||
task. If the scheduler didn't make any progress, ha_stuck_warning() is
|
||||
called to emit a warning about that thread.
|
||||
If the thread was already marked as stuck, then the thread is considered as
|
||||
definitely stuck. Then ha_panic() is directly called if the thread is the
|
||||
current one, otherwise ha_kill() is used to resend the signal directly to the
|
||||
target thread, which will in turn go through this handler and handle the panic
|
||||
itself.
|
||||
|
||||
Most of the time there's no panic of course, and a wdt_ping() is performed
|
||||
before leaving the handler to reprogram a check for that thread.
|
||||
@ -64,12 +61,12 @@ set TAINTED_WARN_BLOCKED_TRAFFIC.
|
||||
|
||||
ha_panic() uses the current thread's trash buffer to produce the messages, as
|
||||
we don't care about its contents since that thread will never return. However
|
||||
ha_stuck_warning() instead uses a local 8kB buffer in the thread's stack.
|
||||
ha_stuck_warning() instead uses a local 4kB buffer in the thread's stack.
|
||||
ha_panic() will call ha_thread_dump_fill() for each thread, to complete the
|
||||
buffer being filled with each thread's dump messages. ha_stuck_warning() only
|
||||
calls ha_thread_dump_one(), which works on the current thread. In both cases
|
||||
the message is then directly sent to fd #2 (stderr) and ha_thread_dump_done()
|
||||
is called to release the dumped thread.
|
||||
calls the function for the current thread. In both cases the message is then
|
||||
directly sent to fd #2 (stderr) and ha_thread_dump_one() is called to release
|
||||
the dumped thread.
|
||||
|
||||
Both print a few extra messages, but ha_panic() just ends by looping on abort()
|
||||
until the process dies.
|
||||
@ -88,12 +85,6 @@ to point to the target buffer. The thread_dump_buffer has 4 possible values:
|
||||
will keep their own copy of their own dump so that it can be later found in
|
||||
the core file for inspection.
|
||||
|
||||
A copy of the last valid thread_dump_buffer used is kept in last_dump_buffer,
|
||||
for easier post-mortem analysis. This one may be NULL or even invalid, but
|
||||
usually during a panic it will be valid, and may reveal useful hints even if it
|
||||
still contains the dump of the last warning. Usually this will point to a trash
|
||||
buffer or to stack area.
|
||||
|
||||
ha_thread_dump_fill() then either directly calls ha_thread_dump_one() if the
|
||||
target thread is the current thread, or sends the target thread DEBUGSIG
|
||||
(SIGURG) if it's a different thread. This signal is initialized at boot time
|
||||
@ -113,19 +104,13 @@ ha_dump_backtrace() before returning.
|
||||
ha_dump_backtrace() produces a backtrace into a local buffer (100 entries max),
|
||||
then dumps the code bytes nearby the crashing instrution, dumps pointers and
|
||||
tries to resolve function names, and sends all of that into the target buffer.
|
||||
On some architectures (x86_64, arm64), it will also try to detect and decode
|
||||
call instructions and resolve them to called functions.
|
||||
|
||||
3. Improvements
|
||||
---------------
|
||||
|
||||
The symbols resolution is extremely expensive, particularly for the warnings
|
||||
which should be fast. But we need it, it's just unfortunate that it strikes at
|
||||
the wrong moment. At least ha_dump_backtrace() does disable signals while it's
|
||||
resolving, in order to avoid unwanted re-entrance. In addition, the called
|
||||
function resolve_sym_name() uses some locking and refrains from calling the
|
||||
dladdr family of functions in a re-entrant way (in the worst case only well
|
||||
known symbols will be resolved)..
|
||||
the wrong moment.
|
||||
|
||||
In an ideal case, ha_dump_backtrace() would dump the pointers to a local array,
|
||||
which would then later be resolved asynchronously in a tasklet. This can work
|
||||
|
@ -1,7 +1,7 @@
|
||||
-----------------------
|
||||
HAProxy Starter Guide
|
||||
-----------------------
|
||||
version 3.3
|
||||
version 3.2
|
||||
|
||||
|
||||
This document is an introduction to HAProxy for all those who don't know it, as
|
||||
|
@ -893,9 +893,7 @@ Core class
|
||||
|
||||
**context**: init, task, action
|
||||
|
||||
This function returns a new object of a *httpclient* class. An *httpclient*
|
||||
object must be used to process one and only one request. It must never be
|
||||
reused to process several requests.
|
||||
This function returns a new object of a *httpclient* class.
|
||||
|
||||
:returns: A :ref:`httpclient_class` object.
|
||||
|
||||
@ -928,25 +926,12 @@ Core class
|
||||
its work and wants to give back the control to HAProxy without executing the
|
||||
remaining code. It can be seen as a multi-level "return".
|
||||
|
||||
.. js:function:: core.wait([milliseconds])
|
||||
|
||||
**context**: task, action
|
||||
|
||||
Give back the hand at the HAProxy scheduler. Unlike :js:func:`core.yield`
|
||||
the task will not be woken up automatically to resume as fast as possible.
|
||||
Instead, it will wait for an event to wake the task. If milliseconds argument
|
||||
is provided then the Lua excecution will be automatically resumed passed this
|
||||
delay even if no event caused the task to wake itself up.
|
||||
|
||||
:param integer milliseconds: automatic wakeup passed this delay. (optional)
|
||||
|
||||
.. js:function:: core.yield()
|
||||
|
||||
**context**: task, action
|
||||
|
||||
Give back the hand at the HAProxy scheduler. It is used when the LUA
|
||||
processing consumes a lot of processing time. Lua excecution will be resumed
|
||||
automatically (automatic reschedule).
|
||||
processing consumes a lot of processing time.
|
||||
|
||||
.. js:function:: core.parse_addr(address)
|
||||
|
||||
@ -1884,17 +1869,6 @@ Queue class
|
||||
|
||||
Use :js:func:`core.queue` to get a new Queue object.
|
||||
|
||||
.. js:function:: Queue.alarm()
|
||||
|
||||
**context**: task, action, service
|
||||
|
||||
Sets a wakeup alarm on the current Lua context so that when new data
|
||||
becomes available on the Queue, the current Lua context is woken up
|
||||
automatically. It can be combined with :js:func:`core.wait` to wait
|
||||
for Queue events.
|
||||
|
||||
:param class_queue queue: A :ref:`queue_class` to the current queue
|
||||
|
||||
.. js:function:: Queue.size(queue)
|
||||
|
||||
This function returns the number of items within the Queue.
|
||||
@ -2583,9 +2557,7 @@ HTTPClient class
|
||||
.. js:class:: HTTPClient
|
||||
|
||||
The httpclient class allows issue of outbound HTTP requests through a simple
|
||||
API without the knowledge of HAProxy internals. Any instance must be used to
|
||||
process one and only one request. It must never be reused to process several
|
||||
requests.
|
||||
API without the knowledge of HAProxy internals.
|
||||
|
||||
.. js:function:: HTTPClient.get(httpclient, request)
|
||||
.. js:function:: HTTPClient.head(httpclient, request)
|
||||
@ -3492,7 +3464,7 @@ Patref class
|
||||
in case of duplicated entries, only the first matching entry is returned.
|
||||
|
||||
.. Warning::
|
||||
Not meant to be shared between multiple contexts. If multiple contexts
|
||||
Not meant to be shared bewteen multiple contexts. If multiple contexts
|
||||
need to work on the same pattern reference, each context should have
|
||||
its own patref object.
|
||||
|
||||
@ -3522,7 +3494,7 @@ Patref class
|
||||
.. js:function:: Patref.commit(ref)
|
||||
|
||||
Tries to commit pending Patref object updates, that is updates made to the
|
||||
local object will be committed to the underlying pattern reference storage
|
||||
local object will be committed to the underlying patter reference storage
|
||||
in an atomic manner upon success. Upon failure, local pending updates are
|
||||
lost. Upon success, all other pending updates on the pattern reference
|
||||
(e.g.: "prepare" from the cli or from other Patref Lua objects) started
|
||||
@ -3914,31 +3886,16 @@ AppletTCP class
|
||||
:param class_AppletTCP applet: An :ref:`applettcp_class`
|
||||
:returns: a string. The string can be empty if we reach the end of the stream.
|
||||
|
||||
.. js:function:: AppletTCP.receive(applet, [size, [timeout]])
|
||||
.. js:function:: AppletTCP.receive(applet, [size])
|
||||
|
||||
Reads data from the TCP stream, according to the specified read *size*. If the
|
||||
*size* is missing, the function tries to read all the content of the stream
|
||||
until the end. An optional timeout may be specified in milliseconds. In this
|
||||
case the function will return no longer than this delay, with the amount of
|
||||
available data, or nil if there is no data. An empty string is returned if the
|
||||
connection is closed.
|
||||
until the end.
|
||||
|
||||
:param class_AppletTCP applet: An :ref:`applettcp_class`
|
||||
:param integer size: the required read size.
|
||||
:returns: return nil if the timeout has expired and no data was available but
|
||||
can still be received. Otherwise, a string is returned, possibly an empty
|
||||
string if the connection is closed.
|
||||
|
||||
.. js:function:: AppletTCP.try_receive(applet)
|
||||
|
||||
Reads available data from the TCP stream and returns immediately. Returns a
|
||||
string containing read bytes or nil if no bytes are available at that time. An
|
||||
empty string is returned if the connection is closed.
|
||||
|
||||
:param class_AppletTCP applet: An :ref:`applettcp_class`
|
||||
:returns: return nil if no data was available but can still be
|
||||
received. Otherwise, a string is returned, possibly an empty string if the
|
||||
connection is closed.
|
||||
:returns: always return a string, the string can be empty if the connection is
|
||||
closed.
|
||||
|
||||
.. js:function:: AppletTCP.send(appletmsg)
|
||||
|
||||
@ -4615,27 +4572,6 @@ HTTPMessage class
|
||||
data by default.
|
||||
:returns: an integer containing the amount of bytes copied or -1.
|
||||
|
||||
.. js:function:: HTTPMessage.set_body_len(http_msg, length)
|
||||
|
||||
This function changes the expected payload length of the HTTP message
|
||||
**http_msg**. **length** can be an integer value. In that case, a
|
||||
"Content-Length" header is added with the given value. It is also possible to
|
||||
pass the **"chunked"** string instead of an integer value to force the HTTP
|
||||
message to be chunk-encoded. In that case, a "Transfer-Encoding" header is
|
||||
added with the "chunked" value. In both cases, all existing "Content-Length"
|
||||
and "Transfer-Encoding" headers are removed.
|
||||
|
||||
This function should be used in the filter context to be able to alter the
|
||||
payload of the HTTP message. The internal state of the HTTP message is updated
|
||||
accordingly. :js:func:`HTTPMessage.add_header()` or
|
||||
:js:func:`HTTPMessage.set_header()` functions must be used in that case.
|
||||
|
||||
:param class_httpmessage http_msg: The manipulated HTTP message.
|
||||
:param type length: The new payload length to set. It can be an integer or
|
||||
the string "chunked".
|
||||
:returns: true if the payload length was successfully updated, false
|
||||
otherwise.
|
||||
|
||||
.. js:function:: HTTPMessage.set_eom(http_msg)
|
||||
|
||||
This function set the end of message for the HTTP message **http_msg**.
|
||||
|
@ -1,7 +1,7 @@
|
||||
------------------------
|
||||
HAProxy Management Guide
|
||||
------------------------
|
||||
version 3.3
|
||||
version 3.2
|
||||
|
||||
|
||||
This document describes how to start, stop, manage, and troubleshoot HAProxy,
|
||||
@ -192,11 +192,6 @@ list of options is :
|
||||
|
||||
-Ws : master-worker mode with support of `notify` type of systemd service.
|
||||
|
||||
-4 : force DNS resolvers to query and accept IPv4 addresses only ("A"
|
||||
records). This can be used when facing difficulties in certain
|
||||
environments lacking end-to-end dual-stack connectivity. It overrides
|
||||
the global "dns-accept-family" directive and forces it to "ipv4".
|
||||
|
||||
-c : only performs a check of the configuration files and exits before trying
|
||||
to bind. The exit status is zero if everything is OK, or non-zero if an
|
||||
error is encountered. Presence of warnings will be reported if any.
|
||||
@ -325,16 +320,6 @@ list of options is :
|
||||
last released. This works best with "no-merge", "cold-first" and "tag".
|
||||
Enabling this option will slightly increase the CPU usage.
|
||||
|
||||
- backup / no-backup:
|
||||
This option performs a copy of each released object at release time,
|
||||
allowing developers to inspect them. It also performs a comparison at
|
||||
allocation time to detect if anything changed in between, indicating a
|
||||
use-after-free condition. This doubles the memory usage and slightly
|
||||
increases the CPU usage (similar to "integrity"). If combined with
|
||||
"integrity", it still duplicates the contents but doesn't perform the
|
||||
comparison (which is performed by "integrity"). Just like "integrity",
|
||||
it works best with "no-merge", "cold-first" and "tag".
|
||||
|
||||
- no-global / global:
|
||||
Depending on the operating system, a process-wide global memory cache
|
||||
may be enabled if it is estimated that the standard allocator is too
|
||||
@ -409,9 +394,6 @@ list of options is :
|
||||
foreground. It is mainly used during development or during small tests, as
|
||||
Ctrl-C is enough to stop the process. Never use it in an init script.
|
||||
|
||||
-dc : enable CPU affinity debugging. The list of selected and evicted CPUs as
|
||||
well as their topology will be reported before starting.
|
||||
|
||||
-de : disable the use of the "epoll" poller. It is equivalent to the "global"
|
||||
section's keyword "noepoll". It is mostly useful when suspecting a bug
|
||||
related to this poller. On systems supporting epoll, the fallback will
|
||||
@ -1140,9 +1122,9 @@ CSV output format for monitoring tools. The same format is provided on the
|
||||
Unix socket.
|
||||
|
||||
Statistics are regroup in categories labelled as domains, corresponding to the
|
||||
multiple components of HAProxy. There are two domains available: proxy and
|
||||
resolvers. If not specified, the proxy domain is selected. Note that only the
|
||||
proxy statistics are printed on the HTTP page.
|
||||
multiple components of HAProxy. There are two domains available: proxy and dns.
|
||||
If not specified, the proxy domain is selected. Note that only the proxy
|
||||
statistics are printed on the HTTP page.
|
||||
|
||||
9.1. CSV format
|
||||
---------------
|
||||
@ -1521,10 +1503,9 @@ that the terminal is handled by the readline library which supports line
|
||||
editing and history, which is very convenient when issuing repeated commands
|
||||
(eg: watch a counter).
|
||||
|
||||
The socket supports three operation modes :
|
||||
- non-interactive, silent
|
||||
- interactive, silent
|
||||
- interactive with prompt
|
||||
The socket supports two operation modes :
|
||||
- interactive
|
||||
- non-interactive
|
||||
|
||||
The non-interactive mode is the default when socat connects to the socket. In
|
||||
this mode, a single line may be sent. It is processed as a whole, responses are
|
||||
@ -1538,25 +1519,12 @@ example :
|
||||
If a command needs to use a semi-colon or a backslash (eg: in a value), it
|
||||
must be preceded by a backslash ('\').
|
||||
|
||||
The interactive mode allows new commands to be sent after the ones from the
|
||||
previous lines finish. It exists in two variants, one silent, which works like
|
||||
the non-interactive mode except that the socket waits for a new command instead
|
||||
of closing, and one where a prompt is displayed ('>') at the beginning of the
|
||||
line. The interactive mode is preferred for advanced tools while the prompt
|
||||
mode is preferred for humans.
|
||||
|
||||
The mode can be changed using the "prompt" command. By default, it toggles the
|
||||
interactive+prompt modes. Entering "prompt" in interactive mode will switch to
|
||||
prompt mode. The command optionally takes a specific mode among which:
|
||||
|
||||
- "n" : non-interactive mode (single command and quits)
|
||||
- "i" : interactive mode (multiple commands, no prompt)
|
||||
- "p" : prompt mode (multiple commands with a prompt)
|
||||
|
||||
Since the default mode is non-interactive, "prompt" must be used as the first
|
||||
command in order to switch it, otherwise the previous command will cause the
|
||||
connection to be closed. Switching to non-interactive mode will result in the
|
||||
connection to be closed after all the commands of the same line complete.
|
||||
The interactive mode displays a prompt ('>') and waits for commands to be
|
||||
entered on the line, then processes them, and displays the prompt again to wait
|
||||
for a new command. This mode is entered via the "prompt" command which must be
|
||||
sent on the first line in non-interactive mode. The mode is a flip switch, if
|
||||
"prompt" is sent in interactive mode, it is disabled and the connection closes
|
||||
after processing the last command of the same line.
|
||||
|
||||
For this reason, when debugging by hand, it's quite common to start with the
|
||||
"prompt" command :
|
||||
@ -1567,9 +1535,6 @@ For this reason, when debugging by hand, it's quite common to start with the
|
||||
...
|
||||
>
|
||||
|
||||
Interactive tools might prefer starting with "prompt i" to switch to interactive
|
||||
mode without the prompt.
|
||||
|
||||
Optionally the process' uptime may be displayed in the prompt. In order to
|
||||
enable this, the "prompt timed" command will enable the prompt and toggle the
|
||||
displaying of the time. The uptime is displayed in format "d:hh:mm:ss" where
|
||||
@ -1651,29 +1616,6 @@ abort ssl crl-file <crlfile>
|
||||
|
||||
See also "set ssl crl-file" and "commit ssl crl-file".
|
||||
|
||||
acme renew <certificate>
|
||||
Starts an ACME certificate generation task with the given certificate name.
|
||||
The certificate must be linked to an acme section, see section 12.8 "ACME"
|
||||
of the configuration manual. See also "acme status".
|
||||
|
||||
acme status
|
||||
Show the status of every certificates that were configured with ACME.
|
||||
|
||||
This command outputs, separated by a tab:
|
||||
- The name of the certificate configured in haproxy
|
||||
- The acme section used in the configuration
|
||||
- The state of the acme task, either "Running", "Scheduled" or "Stopped"
|
||||
- The UTC expiration date of the certificate in ISO8601 format
|
||||
- The relative expiration time (0d if expired)
|
||||
- The UTC scheduled date of the certificate in ISO8601 format
|
||||
- The relative schedule time (0d if Running)
|
||||
|
||||
Example:
|
||||
$ echo "@1; acme status" | socat /tmp/master.sock - | column -t -s $'\t'
|
||||
# certificate section state expiration date (UTC) expires in scheduled date (UTC) scheduled in
|
||||
ecdsa.pem LE Running 2020-01-18T09:31:12Z 0d 0h00m00s 2020-01-15T21:31:12Z 0d 0h00m00s
|
||||
foobar.pem.rsa LE Scheduled 2025-08-04T11:50:54Z 89d 23h01m13s 2025-07-27T23:50:55Z 82d 11h01m14s
|
||||
|
||||
add acl [@<ver>] <acl> <pattern>
|
||||
Add an entry into the acl <acl>. <acl> is the #<id> or the <name> returned by
|
||||
"show acl". This command does not verify if the entry already exists. Entries
|
||||
@ -1724,9 +1666,8 @@ add server <backend>/<server> [args]*
|
||||
The <server> name must not be already used in the backend. A special
|
||||
restriction is put on the backend which must used a dynamic load-balancing
|
||||
algorithm. A subset of keywords from the server config file statement can be
|
||||
used to configure the server behavior (see "add server help" to list them).
|
||||
Also note that no settings will be reused from an hypothetical
|
||||
'default-server' statement in the same backend.
|
||||
used to configure the server behavior. Also note that no settings will be
|
||||
reused from an hypothetical 'default-server' statement in the same backend.
|
||||
|
||||
Currently a dynamic server is statically initialized with the "none"
|
||||
init-addr method. This means that no resolution will be undertaken if a FQDN
|
||||
@ -1756,10 +1697,78 @@ add server <backend>/<server> [args]*
|
||||
servers. Please refer to the "u-limit" global keyword documentation in this
|
||||
case.
|
||||
|
||||
add server help
|
||||
List the keywords supported for dynamic servers by the current haproxy
|
||||
version. Keyword syntax is similar to the server line from the configuration
|
||||
file, please refer to their individual documentation for details.
|
||||
Here is the list of the currently supported keywords :
|
||||
|
||||
- agent-addr
|
||||
- agent-check
|
||||
- agent-inter
|
||||
- agent-port
|
||||
- agent-send
|
||||
- allow-0rtt
|
||||
- alpn
|
||||
- addr
|
||||
- backup
|
||||
- ca-file
|
||||
- check
|
||||
- check-alpn
|
||||
- check-proto
|
||||
- check-send-proxy
|
||||
- check-sni
|
||||
- check-ssl
|
||||
- check-via-socks4
|
||||
- ciphers
|
||||
- ciphersuites
|
||||
- cookie
|
||||
- crl-file
|
||||
- crt
|
||||
- disabled
|
||||
- downinter
|
||||
- error-limit
|
||||
- fall
|
||||
- fastinter
|
||||
- force-sslv3/tlsv10/tlsv11/tlsv12/tlsv13
|
||||
- id
|
||||
- init-state
|
||||
- inter
|
||||
- maxconn
|
||||
- maxqueue
|
||||
- minconn
|
||||
- no-ssl-reuse
|
||||
- no-sslv3/tlsv10/tlsv11/tlsv12/tlsv13
|
||||
- no-tls-tickets
|
||||
- npn
|
||||
- observe
|
||||
- on-error
|
||||
- on-marked-down
|
||||
- on-marked-up
|
||||
- pool-low-conn
|
||||
- pool-max-conn
|
||||
- pool-purge-delay
|
||||
- port
|
||||
- proto
|
||||
- proxy-v2-options
|
||||
- rise
|
||||
- send-proxy
|
||||
- send-proxy-v2
|
||||
- send-proxy-v2-ssl
|
||||
- send-proxy-v2-ssl-cn
|
||||
- slowstart
|
||||
- sni
|
||||
- source
|
||||
- ssl
|
||||
- ssl-max-ver
|
||||
- ssl-min-ver
|
||||
- tfo
|
||||
- tls-tickets
|
||||
- track
|
||||
- usesrc
|
||||
- verify
|
||||
- verifyhost
|
||||
- weight
|
||||
- ws
|
||||
|
||||
Their syntax is similar to the server line from the configuration file,
|
||||
please refer to their individual documentation for details.
|
||||
|
||||
add ssl ca-file <cafile> <payload>
|
||||
Add a new certificate to a ca-file. This command is useful when you reached
|
||||
@ -1982,9 +1991,9 @@ commit ssl crl-file <crlfile>
|
||||
See also "new ssl crl-file", "set ssl crl-file", "abort ssl crl-file" and
|
||||
"add ssl crt-list".
|
||||
|
||||
debug counters [reset|show|on|off|all|bug|chk|cnt|glt|?]*
|
||||
debug counters [reset|show|all|bug|chk|cnt|glt|?]*
|
||||
List internal counters placed in the code, which may vary depending on some
|
||||
build options. Some of them depend on DEBUG_STRICT, others on DEBUG_COUNTERS.
|
||||
build options. Some of them depend on DEBUG_STRICT, others on DEBUG_GLITCHES.
|
||||
The command takes a combination of multiple arguments, some defining actions
|
||||
and others defining filters:
|
||||
- bug enables listing the counters for BUG_ON() statements
|
||||
@ -1992,8 +2001,6 @@ debug counters [reset|show|on|off|all|bug|chk|cnt|glt|?]*
|
||||
- chk enables listing the counters for CHECK_IF() statements
|
||||
- glt enables listing the counters for COUNT_GLITCH() statements
|
||||
- all enables showing counters that never triggered (value 0)
|
||||
- off action: disables updating of the COUNT_IF() counters
|
||||
- on action: enables updating of the COUNT_IF() counters
|
||||
- reset action: resets all specified counters
|
||||
- show action: shows all specified counters
|
||||
|
||||
@ -2347,27 +2354,15 @@ prepare map <map>
|
||||
committed. Version numbers are unsigned 32-bit values which wrap at the end,
|
||||
so care must be taken when comparing them in an external program.
|
||||
|
||||
prompt [help | n | i | p | timed]*
|
||||
Changes the behavior of the interactive mode and the prompt displayed at the
|
||||
beginning of the line in interactive mode:
|
||||
- "help" : displays the command's usage
|
||||
- "n" : switches to non-interactive mode
|
||||
- "i" : switches to interactive mode
|
||||
- "p" : switches to interactive + prompt mode
|
||||
- "timed" : toggles displaying the time in the prompt
|
||||
|
||||
Without any option, this will cycle through prompt mode then non-interactive
|
||||
mode. In non-interactive mode, the connection is closed after the last
|
||||
command of the current line compltes. In interactive mode, the connection is
|
||||
not closed after a command completes, so that a new one can be entered. In
|
||||
prompt mode, the interactive mode is still in use, and a prompt will appear
|
||||
at the beginning of the line, indicating to the user that the interpreter is
|
||||
waiting for a new command. The prompt consists in a right angle bracket
|
||||
followed by a space "> ".
|
||||
|
||||
The prompt mode is more suited to human users, the interactive mode to
|
||||
advanced scripts, and the non-interactive mode (default) to basic scripts.
|
||||
Note that the non-interactive mode is not available for the master socket.
|
||||
prompt
|
||||
Toggle the prompt at the beginning of the line and enter or leave interactive
|
||||
mode. In interactive mode, the connection is not closed after a command
|
||||
completes. Instead, the prompt will appear again, indicating the user that
|
||||
the interpreter is waiting for a new command. The prompt consists in a right
|
||||
angle bracket followed by a space "> ". This mode is particularly convenient
|
||||
when one wants to periodically check information such as stats or errors.
|
||||
It is also a good idea to enter interactive mode before issuing a "help"
|
||||
command.
|
||||
|
||||
quit
|
||||
Close the connection when in interactive mode.
|
||||
@ -2886,7 +2881,7 @@ show errors [<iid>|<proxy>] [request|response]
|
||||
error was at byte 23. This is the slash ('/') in header name
|
||||
"header/bizarre", which is not a valid HTTP character for a header name.
|
||||
|
||||
show events [<sink>] [-w] [-n] [-0]
|
||||
show events [<sink>] [-w] [-n]
|
||||
With no option, this lists all known event sinks and their types. With an
|
||||
option, it will dump all available events in the designated sink if it is of
|
||||
type buffer. If option "-w" is passed after the sink name, then once the end
|
||||
@ -2895,9 +2890,7 @@ show events [<sink>] [-w] [-n] [-0]
|
||||
be discarded) or by closing the session. Finally, option "-n" is used to
|
||||
directly seek to the end of the buffer, which is often convenient when
|
||||
combined with "-w" to only report new events. For convenience, "-wn" or "-nw"
|
||||
may be used to enable both options at once. By default, all events are
|
||||
delimited by a line feed character ('\n' or 10 or 0x0A). It is possible to
|
||||
change this to the NUL character ('\0' or 0) by passing the "-0" argument.
|
||||
may be used to enable both options at once.
|
||||
|
||||
show fd [-!plcfbsd]* [<fd>]
|
||||
Dump the list of either all open file descriptors or just the one number <fd>
|
||||
@ -3129,7 +3122,7 @@ show peers [dict|-] [<peers section>]
|
||||
table:0x55871b5b46a0 id=stkt update=1 localupdate=0 \
|
||||
commitupdate=0 syncing=0
|
||||
|
||||
show pools [byname|bysize|byusage] [detailed] [match <pfx>] [<nb>]
|
||||
show pools [byname|bysize|byusage] [match <pfx>] [<nb>]
|
||||
Dump the status of internal memory pools. This is useful to track memory
|
||||
usage when suspecting a memory leak for example. It does exactly the same
|
||||
as the SIGQUIT when running in foreground except that it does not flush the
|
||||
@ -3137,12 +3130,10 @@ show pools [byname|bysize|byusage] [detailed] [match <pfx>] [<nb>]
|
||||
sorted by pool name; if "bysize" is specified, it is sorted by item size in
|
||||
reverse order; if "byusage" is specified, it is sorted by total usage in
|
||||
reverse order, and only used entries are shown. It is also possible to limit
|
||||
the output to the <nb> first entries (e.g. when sorting by usage). It is
|
||||
possible to also dump more internal details, including the list of all pools
|
||||
that were merged together, by specifying "detailed". Finally, if "match"
|
||||
followed by a prefix is specified, then only pools whose name starts with
|
||||
this prefix will be shown. The reported total only concerns pools matching
|
||||
the filtering criteria. Example:
|
||||
the output to the <nb> first entries (e.g. when sorting by usage). Finally,
|
||||
if "match" followed by a prefix is specified, then only pools whose name
|
||||
starts with this prefix will be shown. The reported total only concerns pools
|
||||
matching the filtering criteria. Example:
|
||||
|
||||
$ socat - /tmp/haproxy.sock <<< "show pools match quic byusage"
|
||||
Dumping pools usage. Use SIGQUIT to flush them.
|
||||
@ -3207,11 +3198,11 @@ show quic [<format>] [<filter>]
|
||||
|
||||
An optional argument can be specified to control the verbosity. Its value can
|
||||
be interpreted in different way. The first possibility is to used predefined
|
||||
values, "oneline" for the default format, "stream" to list every active
|
||||
streams and "full" to display all information. Alternatively, a list of
|
||||
comma-delimited fields can be specified to restrict output. Currently
|
||||
supported values are "tp", "sock", "pktns", "cc" and "mux". Finally, "help"
|
||||
in the format will instead show a more detailed help message.
|
||||
values, "oneline" for the default format and "full" to display all
|
||||
information. Alternatively, a list of comma-delimited fields can be specified
|
||||
to restrict output. Currently supported values are "tp", "sock", "pktns",
|
||||
"cc" and "mux". Finally, "help" in the format will instead show a more
|
||||
detailed help message.
|
||||
|
||||
The final argument is used to restrict or extend the connection list. By
|
||||
default, connections on closing or draining state are not displayed. Use the
|
||||
@ -3226,29 +3217,7 @@ show servers conn [<backend>]
|
||||
The output consists in a header line showing the fields titles, then one
|
||||
server per line with for each, the backend name and ID, server name and ID,
|
||||
the address, port and a series or values. The number of fields varies
|
||||
depending on thread count. The exact format of the output may vary slightly
|
||||
across versions and depending on the number of threads. One needs to pay
|
||||
attention to the header line to match columns when extracting output values,
|
||||
and to the number of threads as the last columns are per-thread:
|
||||
|
||||
bkname/svname Backend name '/' server name
|
||||
bkid/svid Backend ID '/' server ID
|
||||
addr Server's IP address
|
||||
port Server's port (or zero if none)
|
||||
- Unused field, serves as a visual delimiter
|
||||
purge_delay Interval between connection purges, in milliseconds
|
||||
used_cur Number of connections currently in use
|
||||
used_max Highest value of used_cur since the process started
|
||||
need_est Floating estimate of total needed connections
|
||||
unsafe_nb Number of idle connections considered as "unsafe"
|
||||
safe_nb Number of idle connections considered as "safe"
|
||||
idle_lim Configured maximum number of idle connections
|
||||
idle_cur Total of the per-thread currently idle connections
|
||||
idle_per_thr[NB] Idle conns per thread for each one of the NB threads
|
||||
|
||||
HAProxy will kill a portion of <idle_cur> every <purge_delay> when the total
|
||||
of <idle_cur> + <used_cur> exceeds the estimate <need_est>. This estimate
|
||||
varies based on connection activity.
|
||||
depending on thread count.
|
||||
|
||||
Given the threaded nature of idle connections, it's important to understand
|
||||
that some values may change once read, and that as such, consistency within a
|
||||
@ -3365,52 +3334,41 @@ show sess [<options>*]
|
||||
output reports less entries than really exist because it will dump all
|
||||
existing streams up to the last one that was created before the command was
|
||||
entered; those which die in the mean time will not appear.
|
||||
For supported options, see below.
|
||||
For supported opitons, see below.
|
||||
|
||||
show sess [<id> | all | help] [<options>*]
|
||||
Display a lot of internal information about the matching streams. The command
|
||||
knows two output formats: a short one, which is the default when not asking
|
||||
for a specific stream identifier, and an extended one when listing designated
|
||||
streams. The short format, used by default with "show sess", only dumps one
|
||||
stream per line with a few info, and the stream identifier at the beginning
|
||||
of the line in hexadecimal (it corresponds to the pointer to the stream).
|
||||
show sess [<id> | older <age> | susp | all] [<options>*]
|
||||
Display a lot of internal information about the matching streams. In the
|
||||
first form, only the stream matching the specified stream identifier will
|
||||
be shown. This identifier is the first field at the beginning of the lines in
|
||||
the dumps of "show sess" (it corresponds to the stream pointer). In the
|
||||
second form, only streams older than <age> (in seconds by default) will be
|
||||
shown. Passing "susp" instead will only report entries that are considered as
|
||||
suspicious by the developers based on criteria that may in time or vary along
|
||||
versions. If "all" is used instead, then all streams will be dumped. Dumping
|
||||
many streams can produce a huge output, take a lot of time and be CPU
|
||||
intensive, so it's always better to only dump the minimum needed. Those
|
||||
information are useless to most users but may be used by haproxy developers
|
||||
to troubleshoot a complex bug. The output format is intentionally not
|
||||
documented so that it can freely evolve depending on demands. This output
|
||||
is meant to be interpreted while checking function strm_dump_to_buffer() in
|
||||
src/stream.c to figure the exact meaning of certain fields.
|
||||
|
||||
In the extended form, used by "show sess <id>" or "show sess all", streams
|
||||
are dumped with a huge amount of debugging details over multiple lines
|
||||
(around 20 each), and still start with their identifier. The delimiter
|
||||
between streams here is the identifier at the beginning of the line; extra
|
||||
lines belonging to the same stream start with one or multiple spaces (the
|
||||
stream is dumped indented). Dumping many streams can produce a huge output,
|
||||
take a lot of time and be CPU intensive, so it's always better to only dump
|
||||
the minimum needed. Those information are useless to most users but may be
|
||||
used by HAProxy developers to troubleshoot a complex bug. The exact output
|
||||
format is intentionally not documented so that it can freely evolve depending
|
||||
on requirements, including in stable branches. This output is meant to be
|
||||
interpreted while checking function strm_dump_to_buffer() in src/stream.c to
|
||||
figure the exact meaning of certain fields.
|
||||
It is possible to set some options to customize the dump. Here are the
|
||||
supported options:
|
||||
|
||||
The "help" argument will show the detailed usage of the command instead of
|
||||
dumping streams.
|
||||
- show-uri: Dump the transaction URI, as captured during the request
|
||||
analysis. It is only displayed if it was captured.
|
||||
|
||||
It is possible to set some options to customize the dump or apply some
|
||||
filters. Here are the supported options:
|
||||
- backend <b> only display streams attached to this backend
|
||||
- frontend <f> only display streams attached to this frontend
|
||||
- older <age> only display streams older than <age> seconds
|
||||
- server <b/s> only show streams attached to this backend+server
|
||||
- show-uri dump the transaction URI, as captured during the request
|
||||
analysis. It is only displayed if it was captured.
|
||||
- susp only show streams considered as suspicious by the developers
|
||||
based on criteria that may in time or vary along versions.
|
||||
- help: dump a more detailed help message instead
|
||||
|
||||
show stat [domain <resolvers|proxy>] [{<iid>|<proxy>} <type> <sid>] \
|
||||
[typed|json] [desc] [up|no-maint]
|
||||
Dump statistics. The domain is used to select which statistics to print;
|
||||
resolvers and proxy are available for now. By default, the CSV format is used;
|
||||
you can activate the extended typed output format described in the section
|
||||
above if "typed" is passed after the other arguments; or in JSON if "json" is
|
||||
passed after the other arguments. By passing <id>, <type> and <sid>, it is
|
||||
possible to dump only selected items :
|
||||
Dump statistics. The domain is used to select which statistics to print; dns
|
||||
and proxy are available for now. By default, the CSV format is used; you can
|
||||
activate the extended typed output format described in the section above if
|
||||
"typed" is passed after the other arguments; or in JSON if "json" is passed
|
||||
after the other arguments. By passing <id>, <type> and <sid>, it is possible
|
||||
to dump only selected items :
|
||||
- <iid> is a proxy ID, -1 to dump everything. Alternatively, a proxy name
|
||||
<proxy> may be specified. In this case, this proxy's ID will be used as
|
||||
the ID selector.
|
||||
@ -3817,7 +3775,7 @@ show ssl providers
|
||||
- fips
|
||||
- base
|
||||
|
||||
show ssl sni [-f <frontend>] [-A] [-t <offset>]
|
||||
show ssl sni [-f <frontend>] [-A]
|
||||
Dump every SNI configured for the designated frontend, or all frontends if no
|
||||
frontend was specified. It allows to see what SNI are offered for a frontend,
|
||||
and to identify if a SNI is defined multiple times by multiple certificates for
|
||||
@ -3826,12 +3784,6 @@ show ssl sni [-f <frontend>] [-A] [-t <offset>]
|
||||
The -A option allows to filter the list and only displays the certificates
|
||||
that are past the notAfter date, allowing to show only expired certificates.
|
||||
|
||||
The -t option takes an offset in seconds, or with a time unit (s, m, h, d),
|
||||
which is added to the current time, allowing to check which certificates
|
||||
expired after the offset when combined with -A.
|
||||
For example if you want to check which certificates would be expired in 30d,
|
||||
just do "show ssl sni -A -t 30d".
|
||||
|
||||
Columns are separated by a single \t, allowing to parse it simply.
|
||||
|
||||
The 'Frontend/Bind' column shows the frontend name followed by the bind line
|
||||
@ -3839,7 +3791,7 @@ show ssl sni [-f <frontend>] [-A] [-t <offset>]
|
||||
|
||||
The 'SNI' column shows the SNI, it can be either a CN, a SAN or a filter from a crt-list.
|
||||
The default certificates of a bind line, (which are either declared
|
||||
explicitly by 'default-crt' or is implicitly the first certificate of a bind
|
||||
explicitely by 'default-crt' or is implicitely the first certificate of a bind
|
||||
line when no 'strict-sni' is used) shows the '*' character in the SNI column.
|
||||
|
||||
The 'Negative Filter' column is the list of negative filters associated to a
|
||||
@ -3855,7 +3807,7 @@ show ssl sni [-f <frontend>] [-A] [-t <offset>]
|
||||
leaf certificate.
|
||||
|
||||
Example:
|
||||
$ echo "@1 show ssl sni -A -t 30d" | socat /var/run/haproxy-master.sock - | column -t -s $'\t'
|
||||
$ echo "@1 show ssl sni" | socat /var/run/haproxy-master.sock - | column -t -s $'\t'
|
||||
# Frontend/Bind SNI Negative Filter Type Filename NotAfter NotBefore
|
||||
li1/haproxy.cfg:10021 *.ex.lan !m1.ex.lan rsa example.lan.pem Jun 13 13:37:21 2024 GMT May 14 13:37:21 2024 GMT
|
||||
li1/haproxy.cfg:10021 machine10 - ecdsa machine10.pem.ecdsa Jun 13 13:37:21 2024 GMT May 14 13:37:21 2024 GMT
|
||||
@ -4342,11 +4294,6 @@ Example:
|
||||
master. Leaving processes are only accessible with the PID as relative process
|
||||
number are only usable with the current processes.
|
||||
|
||||
This prefix may be used as a wrapper before a command, indicating that this
|
||||
command and only this one will be sent to the designated process. In this
|
||||
case the full command ends at the end of line or semi-colon like any regular
|
||||
command.
|
||||
|
||||
Examples:
|
||||
|
||||
$ socat /var/run/haproxy-master.sock readline
|
||||
@ -4364,10 +4311,8 @@ Example:
|
||||
$ echo '@!1271 show info; @!1272 show info' | socat /var/run/haproxy-master.sock -
|
||||
[...]
|
||||
|
||||
The prefix may also be use as a standalone command to switch the default execution
|
||||
context to the designated process, indicating that all subsequent commands will all
|
||||
be executed in that process, until a new '@' command changes the execution context
|
||||
again.
|
||||
A prefix could be use as a command, which will send every next commands to
|
||||
the specified process.
|
||||
|
||||
Examples:
|
||||
|
||||
@ -4384,52 +4329,6 @@ Example:
|
||||
$ echo '@1; show info; show stat; @2; show info; show stat' | socat /var/run/haproxy-master.sock -
|
||||
[...]
|
||||
|
||||
Note about limitations: a few rare commands alter a CLI session's state
|
||||
(e.g. "set anon", "set timeout") and may not behave exactly similarly once
|
||||
run from the master CLI due to commands being sent one at a time on their own
|
||||
CLI session. Similarly, a few rare commands ("show events", "wait") actively
|
||||
monitor the CLI for input or closure and are immediately interrupted when the
|
||||
CLI is closed. These commands will not work as expected through the master
|
||||
CLI because the command's input is closed after each command. For such rare
|
||||
casesn the "@@" variant below might be more suited.
|
||||
|
||||
@@<[!]pid> [command...]
|
||||
This prefix or command is very similar to the "@" prefix documented above
|
||||
except that it enters the worker process, delivers the whole command line
|
||||
into it as-is and stays there until the command finishes. Semi-colons are
|
||||
delivered as well, allowing to execute a full pipelined command in a worker
|
||||
process. The connection with the work remains open until the list of commands
|
||||
completes. Any data sent after the commands will be forwarded to the worker
|
||||
process' CLI and may be consumed by the commands being executed and will be
|
||||
lost for the master process' CLI, offering a truly bidirectional connection
|
||||
with the worker process. As such, users of such commands must be very careful
|
||||
to wait for the command's completion before sending new commands to the
|
||||
master CLI.
|
||||
|
||||
Instead of executing a single command, it is also possible to open a fully
|
||||
interactive session on the worker process by not specifying any command
|
||||
(i.e. "@@1" on its own line). This session can be terminated either by
|
||||
closing the connection or by quitting the worker process (using the "quit"
|
||||
command). In this case, the prompt mode of the master socket (interactive,
|
||||
prompt, timed) is propagated into the worker process.
|
||||
|
||||
Examples:
|
||||
# gracefully close connections and delete a server once idle (wait max 10s)
|
||||
$ socat -t 11 /var/run/haproxy-master.sock - <<< \
|
||||
"@@1 disable server app2/srv36; \
|
||||
wait 10000 srv-removable app2/srv36; \
|
||||
del server app2/srv36"
|
||||
|
||||
# forcefully close connections and quickly delete a server
|
||||
$ socat /var/run/haproxy-master.sock - <<< \
|
||||
"@@1 disable server app2/srv36; \
|
||||
shutdown sessions server app2/srv36; \
|
||||
wait 100 srv-removable app2/srv36; \
|
||||
del server app2/srv36"
|
||||
|
||||
# show messages arriving to this ring in real time ("tail -f" equivalent)
|
||||
$ (echo "show events buf0 -w"; read) | socat /var/run/haproxy-master.sock -
|
||||
|
||||
expert-mode [on|off]
|
||||
This command activates the "expert-mode" for every worker accessed from the
|
||||
master CLI. Combined with "mcli-debug-mode" it also activates the command on
|
||||
|
@ -1,14 +0,0 @@
|
||||
global
|
||||
default-path config
|
||||
tune.lua.bool-sample-conversion normal
|
||||
# load all games here
|
||||
lua-load lua/trisdemo.lua
|
||||
|
||||
defaults
|
||||
timeout client 1h
|
||||
|
||||
# map one TCP port to each game
|
||||
.notice 'use "socat TCP-CONNECT:0:7001 STDIO,raw,echo=0" to start playing'
|
||||
frontend trisdemo
|
||||
bind :7001
|
||||
tcp-request content use-service lua.trisdemo
|
@ -1,251 +0,0 @@
|
||||
-- Example game of falling pieces for HAProxy CLI/Applet
|
||||
local board_width = 10
|
||||
local board_height = 20
|
||||
local game_name = "Lua Tris Demo"
|
||||
|
||||
-- Shapes with IDs for color mapping
|
||||
local pieces = {
|
||||
{id = 1, shape = {{1,1,1,1}}}, -- I (Cyan)
|
||||
{id = 2, shape = {{1,1},{1,1}}}, -- O (Yellow)
|
||||
{id = 3, shape = {{0,1,0},{1,1,1}}}, -- T (Purple)
|
||||
{id = 4, shape = {{0,1,1},{1,1,0}}}, -- S (Green)
|
||||
{id = 5, shape = {{1,1,0},{0,1,1}}}, -- Z (Red)
|
||||
{id = 6, shape = {{1,0,0},{1,1,1}}}, -- J (Blue)
|
||||
{id = 7, shape = {{0,0,1},{1,1,1}}} -- L (Orange)
|
||||
}
|
||||
|
||||
-- ANSI escape codes
|
||||
local clear_screen = "\27[2J"
|
||||
local cursor_home = "\27[H"
|
||||
local cursor_hide = "\27[?25l"
|
||||
local cursor_show = "\27[?25h"
|
||||
local reset_color = "\27[0m"
|
||||
|
||||
local color_codes = {
|
||||
[1] = "\27[1;36m", -- I: Cyan
|
||||
[2] = "\27[1;37m", -- O: White
|
||||
[3] = "\27[1;35m", -- T: Purple
|
||||
[4] = "\27[1;32m", -- S: Green
|
||||
[5] = "\27[1;31m", -- Z: Red
|
||||
[6] = "\27[1;34m", -- J: Blue
|
||||
[7] = "\27[1;33m" -- L: Yellow
|
||||
}
|
||||
|
||||
local function init_board()
|
||||
local board = {}
|
||||
for y = 1, board_height do
|
||||
board[y] = {}
|
||||
for x = 1, board_width do
|
||||
board[y][x] = 0 -- 0 for empty, piece ID for placed blocks
|
||||
end
|
||||
end
|
||||
return board
|
||||
end
|
||||
|
||||
local function can_place_piece(board, piece, px, py)
|
||||
for y = 1, #piece do
|
||||
for x = 1, #piece[1] do
|
||||
if piece[y][x] == 1 then
|
||||
local board_x = px + x - 1
|
||||
local board_y = py + y - 1
|
||||
if board_x < 1 or board_x > board_width or board_y > board_height or
|
||||
(board_y >= 1 and board[board_y][board_x] ~= 0) then
|
||||
return false
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
return true
|
||||
end
|
||||
|
||||
local function place_piece(board, piece, piece_id, px, py)
|
||||
for y = 1, #piece do
|
||||
for x = 1, #piece[1] do
|
||||
if piece[y][x] == 1 then
|
||||
local board_x = px + x - 1
|
||||
local board_y = py + y - 1
|
||||
if board_y >= 1 and board_y <= board_height then
|
||||
board[board_y][board_x] = piece_id -- Store piece ID for color
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
local function clear_lines(board)
|
||||
local lines_cleared = 0
|
||||
local y = board_height
|
||||
while y >= 1 do
|
||||
local full = true
|
||||
for x = 1, board_width do
|
||||
if board[y][x] == 0 then
|
||||
full = false
|
||||
break
|
||||
end
|
||||
end
|
||||
if full then
|
||||
table.remove(board, y)
|
||||
table.insert(board, 1, {})
|
||||
for x = 1, board_width do
|
||||
board[1][x] = 0
|
||||
end
|
||||
lines_cleared = lines_cleared + 1
|
||||
else
|
||||
y = y - 1
|
||||
end
|
||||
end
|
||||
return lines_cleared
|
||||
end
|
||||
|
||||
local function rotate_piece(piece, piece_id, px, py, board)
|
||||
local new_piece = {}
|
||||
for x = 1, #piece[1] do
|
||||
new_piece[x] = {}
|
||||
for y = 1, #piece do
|
||||
new_piece[x][#piece + 1 - y] = piece[y][x]
|
||||
end
|
||||
end
|
||||
if can_place_piece(board, new_piece, px, py) then
|
||||
return new_piece
|
||||
end
|
||||
return piece
|
||||
end
|
||||
|
||||
function render(applet, board, piece, piece_id, px, py, score)
|
||||
local output = cursor_home
|
||||
output = output .. game_name .. " - Lines: " .. score .. "\r\n"
|
||||
output = output .. "+" .. string.rep("-", board_width * 2) .. "+\r\n"
|
||||
for y = 1, board_height do
|
||||
output = output .. "|"
|
||||
for x = 1, board_width do
|
||||
local char = " "
|
||||
-- Current piece
|
||||
for py_idx = 1, #piece do
|
||||
for px_idx = 1, #piece[1] do
|
||||
if piece[py_idx][px_idx] == 1 then
|
||||
local board_x = px + px_idx - 1
|
||||
local board_y = py + py_idx - 1
|
||||
if board_x == x and board_y == y then
|
||||
char = color_codes[piece_id] .. "[]" .. reset_color
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
-- Placed blocks
|
||||
if board[y][x] ~= 0 then
|
||||
char = color_codes[board[y][x]] .. "[]" .. reset_color
|
||||
end
|
||||
output = output .. char
|
||||
end
|
||||
output = output .. "|\r\n"
|
||||
end
|
||||
output = output .. "+" .. string.rep("-", board_width * 2) .. "+\r\n"
|
||||
output = output .. "Use arrow keys to move, Up to rotate, q to quit"
|
||||
applet:send(output)
|
||||
end
|
||||
|
||||
function handler(applet)
|
||||
local board = init_board()
|
||||
local piece_idx = math.random(#pieces)
|
||||
local current_piece = pieces[piece_idx].shape
|
||||
local piece_id = pieces[piece_idx].id
|
||||
local piece_x = math.floor(board_width / 2) - math.floor(#current_piece[1] / 2)
|
||||
local piece_y = 1
|
||||
local score = 0
|
||||
local game_over = false
|
||||
local delay = 500
|
||||
|
||||
if not can_place_piece(board, current_piece, piece_x, piece_y) then
|
||||
game_over = true
|
||||
end
|
||||
|
||||
applet:send(cursor_hide)
|
||||
applet:send(clear_screen)
|
||||
|
||||
-- fall the piece by one line every delay
|
||||
local function fall_piece()
|
||||
while not game_over do
|
||||
piece_y = piece_y + 1
|
||||
if not can_place_piece(board, current_piece, piece_x, piece_y) then
|
||||
piece_y = piece_y - 1
|
||||
place_piece(board, current_piece, piece_id, piece_x, piece_y)
|
||||
score = score + clear_lines(board)
|
||||
piece_idx = math.random(#pieces)
|
||||
current_piece = pieces[piece_idx].shape
|
||||
piece_id = pieces[piece_idx].id
|
||||
piece_x = math.floor(board_width / 2) - math.floor(#current_piece[1] / 2)
|
||||
piece_y = 1
|
||||
if not can_place_piece(board, current_piece, piece_x, piece_y) then
|
||||
game_over = true
|
||||
end
|
||||
end
|
||||
core.msleep(delay)
|
||||
end
|
||||
end
|
||||
|
||||
core.register_task(fall_piece)
|
||||
|
||||
local function drop_piece()
|
||||
while can_place_piece(board, current_piece, piece_x, piece_y) do
|
||||
piece_y = piece_y + 1
|
||||
end
|
||||
piece_y = piece_y - 1
|
||||
place_piece(board, current_piece, piece_id, piece_x, piece_y)
|
||||
score = score + clear_lines(board)
|
||||
piece_idx = math.random(#pieces)
|
||||
current_piece = pieces[piece_idx].shape
|
||||
piece_id = pieces[piece_idx].id
|
||||
piece_x = math.floor(board_width / 2) - math.floor(#current_piece[1] / 2)
|
||||
piece_y = 1
|
||||
if not can_place_piece(board, current_piece, piece_x, piece_y) then
|
||||
game_over = true
|
||||
end
|
||||
render(applet, board, current_piece, piece_id, piece_x, piece_y, score)
|
||||
end
|
||||
|
||||
while not game_over do
|
||||
render(applet, board, current_piece, piece_id, piece_x, piece_y, score)
|
||||
|
||||
-- update the delay based on the score: 500 for 0 lines to 100ms for 100 lines.
|
||||
if score >= 100 then
|
||||
delay = 100
|
||||
else
|
||||
delay = 500 - 4*score
|
||||
end
|
||||
|
||||
local input = applet:receive(1, delay)
|
||||
if input then
|
||||
if input == "" or input == "q" then
|
||||
game_over = true
|
||||
elseif input == "\27" then
|
||||
local a = applet:receive(1, delay)
|
||||
if a == "[" then
|
||||
local b = applet:receive(1, delay)
|
||||
if b == "A" then -- Up arrow (rotate clockwise)
|
||||
current_piece = rotate_piece(current_piece, piece_id, piece_x, piece_y, board)
|
||||
elseif b == "B" then -- Down arrow (full drop)
|
||||
drop_piece()
|
||||
elseif b == "C" then -- Right arrow
|
||||
piece_x = piece_x + 1
|
||||
if not can_place_piece(board, current_piece, piece_x, piece_y) then
|
||||
piece_x = piece_x - 1
|
||||
end
|
||||
elseif b == "D" then -- Left arrow
|
||||
piece_x = piece_x - 1
|
||||
if not can_place_piece(board, current_piece, piece_x, piece_y) then
|
||||
piece_x = piece_x + 1
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
applet:send(clear_screen .. cursor_home .. "Game Over! Lines: " .. score .. "\r\n" .. cursor_show)
|
||||
end
|
||||
|
||||
-- works as a TCP applet
|
||||
core.register_service("trisdemo", "tcp", handler)
|
||||
|
||||
-- may also work on the CLI but requires an unbuffered handler
|
||||
core.register_cli({"trisdemo"}, "Play a simple falling pieces game", handler)
|
@ -1,84 +0,0 @@
|
||||
/* SPDX-License-Identifier: LGPL-2.1-or-later */
|
||||
#ifndef _ACME_T_H_
|
||||
#define _ACME_T_H_
|
||||
|
||||
#include <haproxy/istbuf.h>
|
||||
#include <haproxy/openssl-compat.h>
|
||||
|
||||
#define ACME_RETRY 5
|
||||
|
||||
/* acme section configuration */
|
||||
struct acme_cfg {
|
||||
char *filename; /* config filename */
|
||||
int linenum; /* config linenum */
|
||||
char *name; /* section name */
|
||||
char *directory; /* directory URL */
|
||||
char *map; /* storage for tokens + thumbprint */
|
||||
struct {
|
||||
char *contact; /* email associated to account */
|
||||
char *file; /* account key filename */
|
||||
EVP_PKEY *pkey; /* account PKEY */
|
||||
char *thumbprint; /* account PKEY JWS thumbprint */
|
||||
} account;
|
||||
|
||||
struct {
|
||||
int type; /* EVP_PKEY_EC or EVP_PKEY_RSA */
|
||||
int bits; /* bits for RSA */
|
||||
int curves; /* NID of curves */
|
||||
} key;
|
||||
char *challenge; /* HTTP-01, DNS-01, etc */
|
||||
struct acme_cfg *next;
|
||||
};
|
||||
|
||||
enum acme_st {
|
||||
ACME_RESSOURCES = 0,
|
||||
ACME_NEWNONCE,
|
||||
ACME_CHKACCOUNT,
|
||||
ACME_NEWACCOUNT,
|
||||
ACME_NEWORDER,
|
||||
ACME_AUTH,
|
||||
ACME_CHALLENGE,
|
||||
ACME_CHKCHALLENGE,
|
||||
ACME_FINALIZE,
|
||||
ACME_CHKORDER,
|
||||
ACME_CERTIFICATE,
|
||||
ACME_END
|
||||
};
|
||||
|
||||
enum http_st {
|
||||
ACME_HTTP_REQ,
|
||||
ACME_HTTP_RES,
|
||||
};
|
||||
|
||||
struct acme_auth {
|
||||
struct ist auth; /* auth URI */
|
||||
struct ist chall; /* challenge URI */
|
||||
struct ist token; /* token */
|
||||
void *next;
|
||||
};
|
||||
|
||||
/* acme task context */
|
||||
struct acme_ctx {
|
||||
enum acme_st state;
|
||||
enum http_st http_state;
|
||||
int retries;
|
||||
int retryafter;
|
||||
struct httpclient *hc;
|
||||
struct acme_cfg *cfg;
|
||||
struct ckch_store *store;
|
||||
struct {
|
||||
struct ist newNonce;
|
||||
struct ist newAccount;
|
||||
struct ist newOrder;
|
||||
} ressources;
|
||||
struct ist nonce;
|
||||
struct ist kid;
|
||||
struct ist order;
|
||||
struct acme_auth *auths;
|
||||
struct acme_auth *next_auth;
|
||||
X509_REQ *req;
|
||||
struct ist finalize;
|
||||
struct ist certificate;
|
||||
struct mt_list el;
|
||||
};
|
||||
#endif
|
@ -1,9 +0,0 @@
|
||||
/* SPDX-License-Identifier: LGPL-2.1-or-later */
|
||||
#ifndef _ACME_H_
|
||||
#define _ACME_H_
|
||||
|
||||
#include <haproxy/ssl_ckch-t.h>
|
||||
|
||||
int ckch_conf_acme_init(void *value, char *buf, struct ckch_data *d, int cli, const char *filename, int linenum, char **err);
|
||||
|
||||
#endif
|
@ -101,36 +101,29 @@ struct applet {
|
||||
struct appctx {
|
||||
enum obj_type obj_type; /* OBJ_TYPE_APPCTX */
|
||||
/* 3 unused bytes here */
|
||||
unsigned int st0; /* Main applet state. May be used by any applet */
|
||||
unsigned int st1; /* Applet substate. Mau be used by any applet */
|
||||
unsigned int st0; /* CLI state for stats, session state for peers */
|
||||
unsigned int st1; /* prompt/payload (bitwise OR of APPCTX_CLI_ST1_*) for stats, session error for peers */
|
||||
|
||||
unsigned int flags; /* APPCTX_FL_* */
|
||||
struct buffer inbuf;
|
||||
struct buffer outbuf;
|
||||
size_t to_forward;
|
||||
|
||||
struct buffer *chunk; /* used to store unfinished commands */
|
||||
struct applet *applet; /* applet this context refers to */
|
||||
struct session *sess; /* session for frontend applets (NULL for backend applets) */
|
||||
struct sedesc *sedesc; /* stream endpoint descriptor the applet is attached to */
|
||||
|
||||
struct {
|
||||
struct buffer *cmdline; /* used to store unfinished commands */
|
||||
|
||||
int severity_output; /* used within the cli_io_handler to format severity output of informational feedback */
|
||||
int level; /* the level of CLI which can be lowered dynamically */
|
||||
char payload_pat[8]; /* Payload pattern */
|
||||
char *payload; /* Pointer on the payload. NULL if no payload */
|
||||
uint32_t anon_key; /* the key to anonymise with the hash in cli */
|
||||
/* XXX 4 unused bytes here */
|
||||
int (*io_handler)(struct appctx *appctx); /* used within the cli_io_handler when st0 = CLI_ST_CALLBACK */
|
||||
void (*io_release)(struct appctx *appctx); /* used within the cli_io_handler when st0 = CLI_ST_CALLBACK,
|
||||
if the command is terminated or the session released */
|
||||
} cli_ctx; /* context dedicated to the CLI applet */
|
||||
|
||||
struct act_rule *rule; /* rule associated with the applet. */
|
||||
int (*io_handler)(struct appctx *appctx); /* used within the cli_io_handler when st0 = CLI_ST_CALLBACK */
|
||||
void (*io_release)(struct appctx *appctx); /* used within the cli_io_handler when st0 = CLI_ST_CALLBACK,
|
||||
if the command is terminated or the session released */
|
||||
int cli_severity_output; /* used within the cli_io_handler to format severity output of informational feedback */
|
||||
int cli_level; /* the level of CLI which can be lowered dynamically */
|
||||
char cli_payload_pat[8]; /* Payload pattern */
|
||||
uint32_t cli_anon_key; /* the key to anonymise with the hash in cli */
|
||||
struct buffer_wait buffer_wait; /* position in the list of objects waiting for a buffer */
|
||||
struct task *t; /* task associated to the applet */
|
||||
struct freq_ctr call_rate; /* appctx call rate */
|
||||
/* XXX 4 unused bytes here */
|
||||
struct mt_list wait_entry; /* entry in a list of waiters for an event (e.g. ring events) */
|
||||
|
||||
/* The pointer seen by application code is appctx->svcctx. In 2.7 the
|
||||
|
@ -58,7 +58,7 @@ size_t appctx_raw_snd_buf(struct appctx *appctx, struct buffer *buf, size_t coun
|
||||
size_t appctx_snd_buf(struct stconn *sc, struct buffer *buf, size_t count, unsigned int flags);
|
||||
|
||||
int appctx_fastfwd(struct stconn *sc, unsigned int count, unsigned int flags);
|
||||
ssize_t applet_append_line(void *ctx, struct ist v1, struct ist v2, size_t ofs, size_t len, char delim);
|
||||
ssize_t applet_append_line(void *ctx, struct ist v1, struct ist v2, size_t ofs, size_t len);
|
||||
static forceinline void applet_fl_set(struct appctx *appctx, uint on);
|
||||
static forceinline void applet_fl_clr(struct appctx *appctx, uint off);
|
||||
|
||||
@ -146,9 +146,6 @@ static inline void __appctx_free(struct appctx *appctx)
|
||||
#define appctx_wakeup(ctx) \
|
||||
_task_wakeup((ctx)->t, TASK_WOKEN_OTHER, MK_CALLER(WAKEUP_TYPE_APPCTX_WAKEUP, 0, 0))
|
||||
|
||||
#define appctx_schedule(ctx, w) \
|
||||
_task_schedule((ctx)->t, w, MK_CALLER(WAKEUP_TYPE_TASK_SCHEDULE, 0, 0))
|
||||
|
||||
/* returns the stream connector the appctx is attached to, via the sedesc */
|
||||
static inline struct stconn *appctx_sc(const struct appctx *appctx)
|
||||
{
|
||||
@ -282,92 +279,6 @@ static inline void applet_expect_data(struct appctx *appctx)
|
||||
se_fl_clr(appctx->sedesc, SE_FL_EXP_NO_DATA);
|
||||
}
|
||||
|
||||
/* Returns the buffer containing data pushed to the applet by the stream. For
|
||||
* applets using its own buffers it is the appctx input buffer. For legacy
|
||||
* applet, it is the output channel buffer.
|
||||
*/
|
||||
static inline struct buffer *applet_get_inbuf(struct appctx *appctx)
|
||||
{
|
||||
if (appctx->flags & APPCTX_FL_INOUT_BUFS)
|
||||
return &appctx->inbuf;
|
||||
else
|
||||
return sc_ob(appctx_sc(appctx));
|
||||
}
|
||||
|
||||
/* Returns the buffer containing data pushed by the applets to the stream. For
|
||||
* applets using its own buffer it is the appctx output buffer. For legacy
|
||||
* applet, it is the input channel buffer.
|
||||
*/
|
||||
static inline struct buffer *applet_get_outbuf(struct appctx *appctx)
|
||||
{
|
||||
if (appctx->flags & APPCTX_FL_INOUT_BUFS)
|
||||
return &appctx->outbuf;
|
||||
else
|
||||
return sc_ib(appctx_sc(appctx));
|
||||
}
|
||||
|
||||
/* Returns the amount of data in the input buffer (see applet_get_inbuf) */
|
||||
static inline size_t applet_input_data(const struct appctx *appctx)
|
||||
{
|
||||
if (appctx->flags & APPCTX_FL_INOUT_BUFS)
|
||||
return b_data(&appctx->inbuf);
|
||||
else
|
||||
return co_data(sc_oc(appctx_sc(appctx)));
|
||||
}
|
||||
|
||||
/* Skips <len> bytes from the input buffer (see applet_get_inbuf).
|
||||
*
|
||||
* This is useful when data have been read directly from the buffer. It is
|
||||
* illegal to call this function with <len> causing a wrapping at the end of the
|
||||
* buffer. It's the caller's responsibility to ensure that <len> is never larger
|
||||
* than available ouput data.
|
||||
*/
|
||||
static inline void applet_skip_input(struct appctx *appctx, size_t len)
|
||||
{
|
||||
if (appctx->flags & APPCTX_FL_INOUT_BUFS)
|
||||
b_del(&appctx->inbuf, len);
|
||||
else
|
||||
co_skip(sc_oc(appctx_sc(appctx)), len);
|
||||
}
|
||||
|
||||
/* Removes all bytes from the input buffer (see applet_get_inbuf).
|
||||
*/
|
||||
static inline void applet_reset_input(struct appctx *appctx)
|
||||
{
|
||||
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
|
||||
b_reset(&appctx->inbuf);
|
||||
applet_fl_clr(appctx, APPCTX_FL_INBLK_FULL);
|
||||
}
|
||||
else
|
||||
co_skip(sc_oc(appctx_sc(appctx)), co_data(sc_oc(appctx_sc(appctx))));
|
||||
}
|
||||
|
||||
/* Returns the amout of space available at the output buffer (see applet_get_outbuf).
|
||||
*/
|
||||
static inline size_t applet_output_room(const struct appctx *appctx)
|
||||
{
|
||||
if (appctx->flags & APPCTX_FL_INOUT_BUFS)
|
||||
return b_room(&appctx->outbuf);
|
||||
else
|
||||
return channel_recv_max(sc_ic(appctx_sc(appctx)));
|
||||
}
|
||||
|
||||
/*Indicates that the applet have more data to deliver and it needs more room in
|
||||
* the output buffer to do so (see applet_get_outbuf).
|
||||
*
|
||||
* For applets using its own buffers, <room_needed> is not used and only
|
||||
* <appctx> flags are updated. For legacy applets, the amount of free space
|
||||
* required must be specified. In this last case, it is the caller
|
||||
* responsibility to be sure <room_needed> is valid.
|
||||
*/
|
||||
static inline void applet_need_room(struct appctx *appctx, size_t room_needed)
|
||||
{
|
||||
if (appctx->flags & APPCTX_FL_INOUT_BUFS)
|
||||
applet_have_more_data(appctx);
|
||||
else
|
||||
sc_need_room(appctx_sc(appctx), room_needed);
|
||||
}
|
||||
|
||||
/* Should only be used via wrappers applet_putchk() / applet_putchk_stress(). */
|
||||
static inline int _applet_putchk(struct appctx *appctx, struct buffer *chunk,
|
||||
int stress)
|
||||
@ -404,10 +315,9 @@ static inline int _applet_putchk(struct appctx *appctx, struct buffer *chunk,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* writes chunk <chunk> into the applet output buffer (see applet_get_outbuf).
|
||||
*
|
||||
* Returns the number of written bytes on success or -1 on error (lake of space,
|
||||
* shutdown, invalid call...)
|
||||
/* writes chunk <chunk> into the input channel of the stream attached to this
|
||||
* appctx's endpoint, and marks the SC_FL_NEED_ROOM on a channel full error.
|
||||
* See ci_putchk() for the list of return codes.
|
||||
*/
|
||||
static inline int applet_putchk(struct appctx *appctx, struct buffer *chunk)
|
||||
{
|
||||
@ -420,10 +330,9 @@ static inline int applet_putchk_stress(struct appctx *appctx, struct buffer *chu
|
||||
return _applet_putchk(appctx, chunk, 1);
|
||||
}
|
||||
|
||||
/* writes <len> chars from <blk> into the applet output buffer (see applet_get_outbuf).
|
||||
*
|
||||
* Returns the number of written bytes on success or -1 on error (lake of space,
|
||||
* shutdown, invalid call...)
|
||||
/* writes <len> chars from <blk> into the input channel of the stream attached
|
||||
* to this appctx's endpoint, and marks the SC_FL_NEED_ROOM on a channel full
|
||||
* error. See ci_putblk() for the list of return codes.
|
||||
*/
|
||||
static inline int applet_putblk(struct appctx *appctx, const char *blk, int len)
|
||||
{
|
||||
@ -455,11 +364,10 @@ static inline int applet_putblk(struct appctx *appctx, const char *blk, int len)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* writes chars from <str> up to the trailing zero (excluded) into the applet
|
||||
* output buffer (see applet_get_outbuf).
|
||||
*
|
||||
* Returns the number of written bytes on success or -1 on error (lake of space,
|
||||
* shutdown, invalid call...)
|
||||
/* writes chars from <str> up to the trailing zero (excluded) into the input
|
||||
* channel of the stream attached to this appctx's endpoint, and marks the
|
||||
* SC_FL_NEED_ROOM on a channel full error. See ci_putstr() for the list of
|
||||
* return codes.
|
||||
*/
|
||||
static inline int applet_putstr(struct appctx *appctx, const char *str)
|
||||
{
|
||||
@ -492,10 +400,9 @@ static inline int applet_putstr(struct appctx *appctx, const char *str)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* writes character <chr> into the applet's output buffer (see applet_get_outbuf).
|
||||
*
|
||||
* Returns the number of written bytes on success or -1 on error (lake of space,
|
||||
* shutdown, invalid call...)
|
||||
/* writes character <chr> into the input channel of the stream attached to this
|
||||
* appctx's endpoint, and marks the SC_FL_NEED_ROOM on a channel full error.
|
||||
* See ci_putchr() for the list of return codes.
|
||||
*/
|
||||
static inline int applet_putchr(struct appctx *appctx, char chr)
|
||||
{
|
||||
@ -528,283 +435,6 @@ static inline int applet_putchr(struct appctx *appctx, char chr)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int applet_may_get(const struct appctx *appctx, size_t len)
|
||||
{
|
||||
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
|
||||
if (len > b_data(&appctx->inbuf)) {
|
||||
if (se_fl_test(appctx->sedesc, SE_FL_SHW))
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
else {
|
||||
const struct stconn *sc = appctx_sc(appctx);
|
||||
|
||||
if ((sc->flags & SC_FL_SHUT_DONE) || len > co_data(sc_oc(sc))) {
|
||||
if (sc->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED))
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
/* Gets one char from the applet input buffer (see appet_get_inbuf),
|
||||
*
|
||||
* Return values :
|
||||
* 1 : number of bytes read, equal to requested size.
|
||||
* =0 : not enough data available. <c> is left undefined.
|
||||
* <0 : no more bytes readable because output is shut.
|
||||
*
|
||||
* The status of the corresponding buffer is not changed. The caller must call
|
||||
* applet_skip_input() to update it.
|
||||
*/
|
||||
static inline int applet_getchar(const struct appctx *appctx, char *c)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = applet_may_get(appctx, 1);
|
||||
if (ret <= 0)
|
||||
return ret;
|
||||
*c = ((appctx->flags & APPCTX_FL_INOUT_BUFS)
|
||||
? *(b_head(&appctx->inbuf))
|
||||
: *(co_head(sc_oc(appctx_sc(appctx)))));
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Copies one full block of data from the applet input buffer (see
|
||||
* appet_get_inbuf).
|
||||
*
|
||||
* <len> bytes are capied, starting at the offset <offset>.
|
||||
*
|
||||
* Return values :
|
||||
* >0 : number of bytes read, equal to requested size.
|
||||
* =0 : not enough data available. <blk> is left undefined.
|
||||
* <0 : no more bytes readable because output is shut.
|
||||
*
|
||||
* The status of the corresponding buffer is not changed. The caller must call
|
||||
* applet_skip_input() to update it.
|
||||
*/
|
||||
static inline int applet_getblk(const struct appctx *appctx, char *blk, int len, int offset)
|
||||
{
|
||||
const struct buffer *buf;
|
||||
int ret;
|
||||
|
||||
ret = applet_may_get(appctx, len+offset);
|
||||
if (ret <= 0)
|
||||
return ret;
|
||||
|
||||
buf = ((appctx->flags & APPCTX_FL_INOUT_BUFS)
|
||||
? &appctx->inbuf
|
||||
: sc_ob(appctx_sc(appctx)));
|
||||
return b_getblk(buf, blk, len, offset);
|
||||
}
|
||||
|
||||
/* Gets one text block representing a word from the applet input buffer (see
|
||||
* appet_get_inbuf).
|
||||
*
|
||||
* The separator is waited for as long as some data can still be received and the
|
||||
* destination is not full. Otherwise, the string may be returned as is, without
|
||||
* the separator.
|
||||
*
|
||||
* Return values :
|
||||
* >0 : number of bytes read. Includes the separator if present before len or end.
|
||||
* =0 : no separator before end found. <str> is left undefined.
|
||||
* <0 : no more bytes readable because output is shut.
|
||||
*
|
||||
* The status of the corresponding buffer is not changed. The caller must call
|
||||
* applet_skip_input() to update it.
|
||||
*/
|
||||
static inline int applet_getword(const struct appctx *appctx, char *str, int len, char sep)
|
||||
{
|
||||
const struct buffer *buf;
|
||||
char *p;
|
||||
size_t input, max = len;
|
||||
int ret = 0;
|
||||
|
||||
ret = applet_may_get(appctx, 1);
|
||||
if (ret <= 0)
|
||||
goto out;
|
||||
|
||||
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
|
||||
buf = &appctx->inbuf;
|
||||
input = b_data(buf);
|
||||
}
|
||||
else {
|
||||
struct stconn *sc = appctx_sc(appctx);
|
||||
|
||||
buf = sc_ob(sc);
|
||||
input = co_data(sc_oc(sc));
|
||||
}
|
||||
|
||||
if (max > input) {
|
||||
max = input;
|
||||
str[max-1] = 0;
|
||||
}
|
||||
|
||||
p = b_head(buf);
|
||||
|
||||
while (max) {
|
||||
*str++ = *p;
|
||||
ret++;
|
||||
max--;
|
||||
if (*p == sep)
|
||||
goto out;
|
||||
p = b_next(buf, p);
|
||||
}
|
||||
|
||||
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
|
||||
if (ret < len && (ret < input || b_room(buf)) &&
|
||||
!se_fl_test(appctx->sedesc, SE_FL_SHW))
|
||||
ret = 0;
|
||||
}
|
||||
else {
|
||||
struct stconn *sc = appctx_sc(appctx);
|
||||
|
||||
if (ret < len && (ret < input || channel_may_recv(sc_oc(sc))) &&
|
||||
!(sc->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)))
|
||||
ret = 0;
|
||||
}
|
||||
out:
|
||||
if (max)
|
||||
*str = 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Gets one text block representing a line from the applet input buffer (see
|
||||
* appet_get_inbuf).
|
||||
*
|
||||
* The '\n' is waited for as long as some data can still be received and the
|
||||
* destination is not full. Otherwise, the string may be returned as is, without
|
||||
* the '\n'.
|
||||
*
|
||||
* Return values :
|
||||
* >0 : number of bytes read. Includes the \n if present before len or end.
|
||||
* =0 : no '\n' before end found. <str> is left undefined.
|
||||
* <0 : no more bytes readable because output is shut.
|
||||
*
|
||||
* The status of the corresponding buffer is not changed. The caller must call
|
||||
* applet_skip_input() to update it.
|
||||
*/
|
||||
static inline int applet_getline(const struct appctx *appctx, char *str, int len)
|
||||
{
|
||||
return applet_getword(appctx, str, len, '\n');
|
||||
}
|
||||
|
||||
/* Gets one or two blocks of data at once from the applet input buffer (see appet_get_inbuf),
|
||||
*
|
||||
* Data are not copied.
|
||||
*
|
||||
* Return values :
|
||||
* >0 : number of blocks filled (1 or 2). blk1 is always filled before blk2.
|
||||
* =0 : not enough data available. <blk*> are left undefined.
|
||||
* <0 : no more bytes readable because output is shut.
|
||||
*
|
||||
* The status of the corresponding buffer is not changed. The caller must call
|
||||
* applet_skip_input() to update it.
|
||||
*/
|
||||
static inline int applet_getblk_nc(const struct appctx *appctx, const char **blk1, size_t *len1, const char **blk2, size_t *len2)
|
||||
{
|
||||
const struct buffer *buf;
|
||||
size_t max;
|
||||
int ret;
|
||||
|
||||
ret = applet_may_get(appctx, 1);
|
||||
if (ret <= 0)
|
||||
return ret;
|
||||
|
||||
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
|
||||
buf = &appctx->inbuf;
|
||||
max = b_data(buf);
|
||||
}
|
||||
else {
|
||||
struct stconn *sc = appctx_sc(appctx);
|
||||
|
||||
buf = sc_ob(sc);
|
||||
max = co_data(sc_oc(sc));
|
||||
}
|
||||
|
||||
return b_getblk_nc(buf, blk1, len1, blk2, len2, 0, max);
|
||||
}
|
||||
|
||||
/* Gets one or two blocks of text representing a word from the applet input
|
||||
* buffer (see appet_get_inbuf).
|
||||
*
|
||||
* Data are not copied. The separator is waited for as long as some data can
|
||||
* still be received and the destination is not full. Otherwise, the string may
|
||||
* be returned as is, without the separator.
|
||||
*
|
||||
* Return values :
|
||||
* >0 : number of bytes read. Includes the separator if present before len or end.
|
||||
* =0 : no separator before end found. <str> is left undefined.
|
||||
* <0 : no more bytes readable because output is shut.
|
||||
*
|
||||
* The status of the corresponding buffer is not changed. The caller must call
|
||||
* applet_skip_input() to update it.
|
||||
*/
|
||||
static inline int applet_getword_nc(const struct appctx *appctx, const char **blk1, size_t *len1, const char **blk2, size_t *len2, char sep)
|
||||
{
|
||||
int ret;
|
||||
size_t l;
|
||||
|
||||
ret = applet_getblk_nc(appctx, blk1, len1, blk2, len2);
|
||||
if (unlikely(ret <= 0))
|
||||
return ret;
|
||||
|
||||
for (l = 0; l < *len1 && (*blk1)[l] != sep; l++);
|
||||
if (l < *len1 && (*blk1)[l] == sep) {
|
||||
*len1 = l + 1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (ret >= 2) {
|
||||
for (l = 0; l < *len2 && (*blk2)[l] != sep; l++);
|
||||
if (l < *len2 && (*blk2)[l] == sep) {
|
||||
*len2 = l + 1;
|
||||
return 2;
|
||||
}
|
||||
}
|
||||
|
||||
/* If we have found no LF and the buffer is full or the SC is shut, then
|
||||
* the resulting string is made of the concatenation of the pending
|
||||
* blocks (1 or 2).
|
||||
*/
|
||||
if (appctx->flags & APPCTX_FL_INOUT_BUFS) {
|
||||
if (b_full(&appctx->inbuf) || se_fl_test(appctx->sedesc, SE_FL_SHW))
|
||||
return ret;
|
||||
}
|
||||
else {
|
||||
struct stconn *sc = appctx_sc(appctx);
|
||||
|
||||
if (!channel_may_recv(sc_oc(sc)) || sc->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED))
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* No LF yet and not shut yet */
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* Gets one or two blocks of text representing a line from the applet input
|
||||
* buffer (see appet_get_inbuf).
|
||||
*
|
||||
* Data are not copied. The '\n' is waited for as long as some data can still be
|
||||
* received and the destination is not full. Otherwise, the string may be
|
||||
* returned as is, without the '\n'.
|
||||
*
|
||||
* Return values :
|
||||
* >0 : number of bytes read. Includes the \n if present before len or end.
|
||||
* =0 : no '\n' before end found. <str> is left undefined.
|
||||
* <0 : no more bytes readable because output is shut.
|
||||
*
|
||||
* The status of the corresponding buffer is not changed. The caller must call
|
||||
* applet_skip_input() to update it.
|
||||
*/
|
||||
static inline int applet_getline_nc(const struct appctx *appctx, const char **blk1, size_t *len1, const char **blk2, size_t *len2)
|
||||
{
|
||||
return applet_getword_nc(appctx, blk1, len1, blk2, len2, '\n');
|
||||
}
|
||||
|
||||
#endif /* _HAPROXY_APPLET_H */
|
||||
|
||||
/*
|
||||
|
@ -26,9 +26,9 @@
|
||||
#include <haproxy/compiler.h>
|
||||
|
||||
/* A few notes for the macros and functions here:
|
||||
* - this file is painful to edit, most operations exist in 2 variants,
|
||||
* no-thread, and threads (with gcc>=4.7). Be careful when modifying
|
||||
* it not to break any of them.
|
||||
* - this file is painful to edit, most operations exist in 3 variants,
|
||||
* no-thread, threads with gcc<4.7, threads with gcc>=4.7. Be careful when
|
||||
* modifying it not to break any of them.
|
||||
*
|
||||
* - macros named HA_ATOMIC_* are or use in the general case, they contain the
|
||||
* required memory barriers to guarantee sequential consistency
|
||||
@ -191,10 +191,122 @@
|
||||
|
||||
/* Threads are ENABLED, all atomic ops are made thread-safe. By extension they
|
||||
* can also be used for inter-process synchronization but one must verify that
|
||||
* the code still builds with threads disabled. Code below requires C11 atomics
|
||||
* as present in gcc >= 4.7 or clang.
|
||||
* the code still builds with threads disabled.
|
||||
*/
|
||||
|
||||
#if defined(__GNUC__) && (__GNUC__ < 4 || __GNUC__ == 4 && __GNUC_MINOR__ < 7) && !defined(__clang__)
|
||||
/* gcc < 4.7 */
|
||||
|
||||
#define HA_ATOMIC_LOAD(val) \
|
||||
({ \
|
||||
typeof((val)) __val_load = (val); \
|
||||
typeof(*(val)) __ret_val = \
|
||||
({ __sync_synchronize(); *(volatile typeof(__val_load))__val_load; }); \
|
||||
__sync_synchronize(); \
|
||||
__ret_val; \
|
||||
})
|
||||
|
||||
#define HA_ATOMIC_STORE(val, new) \
|
||||
({ \
|
||||
typeof((val)) __val_store = (val); \
|
||||
typeof(*(val)) __old_store; \
|
||||
typeof((new)) __new_store = (new); \
|
||||
do { __old_store = *__val_store; \
|
||||
} while (!__sync_bool_compare_and_swap(__val_store, __old_store, __new_store) && __ha_cpu_relax()); \
|
||||
})
|
||||
|
||||
#define HA_ATOMIC_XCHG(val, new) \
|
||||
({ \
|
||||
typeof((val)) __val_xchg = (val); \
|
||||
typeof(*(val)) __old_xchg; \
|
||||
typeof((new)) __new_xchg = (new); \
|
||||
do { __old_xchg = *__val_xchg; \
|
||||
} while (!__sync_bool_compare_and_swap(__val_xchg, __old_xchg, __new_xchg) && __ha_cpu_relax()); \
|
||||
__old_xchg; \
|
||||
})
|
||||
|
||||
#define HA_ATOMIC_AND(val, flags) do { __sync_and_and_fetch(val, flags); } while (0)
|
||||
#define HA_ATOMIC_OR(val, flags) do { __sync_or_and_fetch(val, flags); } while (0)
|
||||
#define HA_ATOMIC_ADD(val, i) do { __sync_add_and_fetch(val, i); } while (0)
|
||||
#define HA_ATOMIC_SUB(val, i) do { __sync_sub_and_fetch(val, i); } while (0)
|
||||
#define HA_ATOMIC_INC(val) do { __sync_add_and_fetch(val, 1); } while (0)
|
||||
#define HA_ATOMIC_DEC(val) do { __sync_sub_and_fetch(val, 1); } while (0)
|
||||
|
||||
#define HA_ATOMIC_AND_FETCH(val, flags) __sync_and_and_fetch(val, flags)
|
||||
#define HA_ATOMIC_OR_FETCH(val, flags) __sync_or_and_fetch(val, flags)
|
||||
#define HA_ATOMIC_ADD_FETCH(val, i) __sync_add_and_fetch(val, i)
|
||||
#define HA_ATOMIC_SUB_FETCH(val, i) __sync_sub_and_fetch(val, i)
|
||||
|
||||
#define HA_ATOMIC_FETCH_AND(val, flags) __sync_fetch_and_and(val, flags)
|
||||
#define HA_ATOMIC_FETCH_OR(val, flags) __sync_fetch_and_or(val, flags)
|
||||
#define HA_ATOMIC_FETCH_ADD(val, i) __sync_fetch_and_add(val, i)
|
||||
#define HA_ATOMIC_FETCH_SUB(val, i) __sync_fetch_and_sub(val, i)
|
||||
|
||||
#define HA_ATOMIC_BTS(val, bit) \
|
||||
({ \
|
||||
typeof(*(val)) __b_bts = (1UL << (bit)); \
|
||||
__sync_fetch_and_or((val), __b_bts) & __b_bts; \
|
||||
})
|
||||
|
||||
#define HA_ATOMIC_BTR(val, bit) \
|
||||
({ \
|
||||
typeof(*(val)) __b_btr = (1UL << (bit)); \
|
||||
__sync_fetch_and_and((val), ~__b_btr) & __b_btr; \
|
||||
})
|
||||
|
||||
/* the CAS is a bit complicated. The older API doesn't support returning the
|
||||
* value and the swap's result at the same time. So here we take what looks
|
||||
* like the safest route, consisting in using the boolean version guaranteeing
|
||||
* that the operation was performed or not, and we snoop a previous value. If
|
||||
* the compare succeeds, we return. If it fails, we return the previous value,
|
||||
* but only if it differs from the expected one. If it's the same it's a race
|
||||
* thus we try again to avoid confusing a possibly sensitive caller.
|
||||
*/
|
||||
#define HA_ATOMIC_CAS(val, old, new) \
|
||||
({ \
|
||||
typeof((val)) __val_cas = (val); \
|
||||
typeof((old)) __oldp_cas = (old); \
|
||||
typeof(*(old)) __oldv_cas; \
|
||||
typeof((new)) __new_cas = (new); \
|
||||
int __ret_cas; \
|
||||
do { \
|
||||
__oldv_cas = *__val_cas; \
|
||||
__ret_cas = __sync_bool_compare_and_swap(__val_cas, *__oldp_cas, __new_cas); \
|
||||
} while (!__ret_cas && *__oldp_cas == __oldv_cas && __ha_cpu_relax()); \
|
||||
if (!__ret_cas) \
|
||||
*__oldp_cas = __oldv_cas; \
|
||||
__ret_cas; \
|
||||
})
|
||||
|
||||
/* warning, n is a pointer to the double value for dwcas */
|
||||
#define HA_ATOMIC_DWCAS(val, o, n) __ha_cas_dw(val, o, n)
|
||||
|
||||
#define HA_ATOMIC_UPDATE_MAX(val, new) \
|
||||
({ \
|
||||
typeof(val) __val = (val); \
|
||||
typeof(*(val)) __old_max = *__val; \
|
||||
typeof(*(val)) __new_max = (new); \
|
||||
\
|
||||
while (__old_max < __new_max && \
|
||||
!HA_ATOMIC_CAS(__val, &__old_max, __new_max) && __ha_cpu_relax()); \
|
||||
*__val; \
|
||||
})
|
||||
|
||||
#define HA_ATOMIC_UPDATE_MIN(val, new) \
|
||||
({ \
|
||||
typeof(val) __val = (val); \
|
||||
typeof(*(val)) __old_min = *__val; \
|
||||
typeof(*(val)) __new_min = (new); \
|
||||
\
|
||||
while (__old_min > __new_min && \
|
||||
!HA_ATOMIC_CAS(__val, &__old_min, __new_min) && __ha_cpu_relax()); \
|
||||
*__val; \
|
||||
})
|
||||
|
||||
#else /* gcc */
|
||||
|
||||
/* gcc >= 4.7 or clang */
|
||||
|
||||
#define HA_ATOMIC_STORE(val, new) __atomic_store_n(val, new, __ATOMIC_RELEASE)
|
||||
#define HA_ATOMIC_LOAD(val) __atomic_load_n(val, __ATOMIC_ACQUIRE)
|
||||
#define HA_ATOMIC_XCHG(val, new) __atomic_exchange_n(val, new, __ATOMIC_ACQ_REL)
|
||||
@ -408,6 +520,8 @@
|
||||
/* warning, n is a pointer to the double value for dwcas */
|
||||
#define _HA_ATOMIC_DWCAS(val, o, n) __ha_cas_dw(val, o, n)
|
||||
|
||||
#endif /* gcc >= 4.7 */
|
||||
|
||||
/* Here come a few architecture-specific double-word CAS and barrier
|
||||
* implementations.
|
||||
*/
|
||||
@ -685,13 +799,12 @@ static __inline int __ha_cas_dw(void *target, void *compare, void *set)
|
||||
|
||||
#else /* unknown / unhandled architecture, fall back to generic barriers */
|
||||
|
||||
#define __ha_barrier_atomic_load() __atomic_thread_fence(__ATOMIC_ACQUIRE)
|
||||
#define __ha_barrier_atomic_store() __atomic_thread_fence(__ATOMIC_RELEASE)
|
||||
#define __ha_barrier_atomic_full() __atomic_thread_fence(__ATOMIC_SEQ_CST)
|
||||
#define __ha_barrier_load() __atomic_thread_fence(__ATOMIC_ACQUIRE)
|
||||
#define __ha_barrier_store() __atomic_thread_fence(__ATOMIC_RELEASE)
|
||||
#define __ha_barrier_full() __atomic_thread_fence(__ATOMIC_SEQ_CST)
|
||||
|
||||
#define __ha_barrier_atomic_load __sync_synchronize
|
||||
#define __ha_barrier_atomic_store __sync_synchronize
|
||||
#define __ha_barrier_atomic_full __sync_synchronize
|
||||
#define __ha_barrier_load __sync_synchronize
|
||||
#define __ha_barrier_store __sync_synchronize
|
||||
#define __ha_barrier_full __sync_synchronize
|
||||
/* Note: there is no generic DWCAS */
|
||||
|
||||
/* short-lived CPU relaxation */
|
||||
|
@ -144,12 +144,6 @@
|
||||
*/
|
||||
#define BE_WEIGHT_SCALE 16
|
||||
|
||||
/* LB parameters for all algorithms, with one instance per thread-group */
|
||||
struct lbprm_per_tgrp {
|
||||
union {
|
||||
struct lb_fwrr_per_tgrp fwrr;
|
||||
};
|
||||
};
|
||||
/* LB parameters for all algorithms */
|
||||
struct lbprm {
|
||||
union { /* LB parameters depending on the algo type */
|
||||
@ -168,15 +162,12 @@ struct lbprm {
|
||||
int wmult; /* ratio between user weight and effective weight */
|
||||
int wdiv; /* ratio between effective weight and user weight */
|
||||
int hash_balance_factor; /* load balancing factor * 100, 0 if disabled */
|
||||
unsigned int lb_free_list_nb; /* Number of elements in the free list */
|
||||
struct sample_expr *expr; /* sample expression for "balance (log-)hash" */
|
||||
char *arg_str; /* name of the URL parameter/header/cookie used for hashing */
|
||||
int arg_len; /* strlen(arg_str), computed only once */
|
||||
int arg_opt1; /* extra option 1 for the LB algo (algo-specific) */
|
||||
int arg_opt2; /* extra option 2 for the LB algo (algo-specific) */
|
||||
int arg_opt3; /* extra option 3 for the LB algo (algo-specific) */
|
||||
uint64_t lb_seq; /* sequence number for algos who need it */
|
||||
struct mt_list lb_free_list; /* LB tree elements available */
|
||||
__decl_thread(HA_RWLOCK_T lock);
|
||||
struct server *fbck; /* first backup server when !PR_O_USE_ALL_BK, or NULL */
|
||||
|
||||
@ -190,8 +181,6 @@ struct lbprm {
|
||||
void (*server_take_conn)(struct server *); /* to be called when connection is assigned */
|
||||
void (*server_drop_conn)(struct server *); /* to be called when connection is dropped */
|
||||
void (*server_requeue)(struct server *); /* function used to place the server where it must be */
|
||||
void (*proxy_deinit)(struct proxy *); /* to be called when we're destroying the proxy */
|
||||
void (*server_deinit)(struct server *); /* to be called when we're destroying the server */
|
||||
};
|
||||
|
||||
#endif /* _HAPROXY_BACKEND_T_H */
|
||||
|
@ -45,16 +45,6 @@ int assign_server_and_queue(struct stream *s);
|
||||
int alloc_bind_address(struct sockaddr_storage **ss,
|
||||
struct server *srv, struct proxy *be,
|
||||
struct stream *s);
|
||||
|
||||
int64_t be_calculate_conn_hash(struct server *srv, struct stream *strm,
|
||||
struct session *sess,
|
||||
struct sockaddr_storage *src,
|
||||
struct sockaddr_storage *dst,
|
||||
struct ist name);
|
||||
int be_reuse_connection(int64_t hash, struct session *sess,
|
||||
struct proxy *be, struct server *srv,
|
||||
struct stconn *sc, enum obj_type *target, int not_first_req);
|
||||
|
||||
int srv_redispatch_connect(struct stream *t);
|
||||
void back_try_conn_req(struct stream *s);
|
||||
void back_handle_st_req(struct stream *s);
|
||||
@ -86,7 +76,7 @@ static inline int be_usable_srv(struct proxy *be)
|
||||
/* set the time of last session on the backend */
|
||||
static inline void be_set_sess_last(struct proxy *be)
|
||||
{
|
||||
HA_ATOMIC_STORE(&be->be_counters.shared->tg[tgid - 1]->last_sess, ns_to_sec(now_ns));
|
||||
be->be_counters.last_sess = ns_to_sec(now_ns);
|
||||
}
|
||||
|
||||
/* This function returns non-zero if the designated server will be
|
||||
|
@ -68,7 +68,7 @@
|
||||
#else // not x86
|
||||
|
||||
/* generic implementation, causes a segfault */
|
||||
static inline __attribute((always_inline,noreturn,unused)) void ha_crash_now(void)
|
||||
static inline __attribute((always_inline)) void ha_crash_now(void)
|
||||
{
|
||||
#if __GNUC_PREREQ__(5, 0)
|
||||
#pragma GCC diagnostic push
|
||||
@ -161,8 +161,6 @@ static __attribute__((noinline,noreturn,unused)) void abort_with_line(uint line)
|
||||
* COUNT_IF() invocation requires a special section ("dbg_cnt") hence a modern
|
||||
* linker.
|
||||
*/
|
||||
extern unsigned int debug_enable_counters;
|
||||
|
||||
#if !defined(USE_OBSOLETE_LINKER)
|
||||
|
||||
/* type of checks that can be verified. We cannot really distinguish between
|
||||
@ -224,44 +222,30 @@ extern __attribute__((__weak__)) struct debug_count __stop_dbg_cnt HA_SECTION_S
|
||||
_HA_ATOMIC_INC(&__dbg_cnt_##_line.count); \
|
||||
} while (0)
|
||||
|
||||
|
||||
/* Matrix for DEBUG_COUNTERS:
|
||||
* 0 : only BUG_ON() and CHECK_IF() are reported (super rare)
|
||||
* 1 : COUNT_GLITCH() are also reported (rare)
|
||||
* COUNT_IF() are also reported only if debug_enable_counters was set
|
||||
* 2 : COUNT_IF() are also reported unless debug_enable_counters was reset
|
||||
*/
|
||||
|
||||
/* Core of the COUNT_IF() macro, checks the condition and counts one hit if
|
||||
* true. It's only enabled at DEBUG_COUNTERS >= 1, and enabled by default if
|
||||
* DEBUG_COUNTERS >= 2.
|
||||
* true.
|
||||
*/
|
||||
# if defined(DEBUG_COUNTERS) && (DEBUG_COUNTERS >= 1)
|
||||
# define _COUNT_IF(cond, file, line, ...) \
|
||||
(unlikely(cond) ? ({ \
|
||||
if (debug_enable_counters) \
|
||||
__DBG_COUNT(cond, file, line, DBG_COUNT_IF, __VA_ARGS__); \
|
||||
1; /* let's return the true condition */ \
|
||||
#define _COUNT_IF(cond, file, line, ...) \
|
||||
(unlikely(cond) ? ({ \
|
||||
__DBG_COUNT(cond, file, line, DBG_COUNT_IF, __VA_ARGS__); \
|
||||
1; /* let's return the true condition */ \
|
||||
}) : 0)
|
||||
# else
|
||||
# define _COUNT_IF(cond, file, line, ...) DISGUISE(unlikely(cond) ? 1 : 0)
|
||||
# endif
|
||||
|
||||
/* DEBUG_COUNTERS enables counting the number of glitches per line of code. The
|
||||
/* DEBUG_GLITCHES enables counting the number of glitches per line of code. The
|
||||
* condition is empty (nothing to write there), except maybe __VA_ARGS at the
|
||||
* end.
|
||||
*/
|
||||
# if !defined(DEBUG_COUNTERS) || (DEBUG_COUNTERS == 0)
|
||||
# if !defined(DEBUG_GLITCHES)
|
||||
# define _COUNT_GLITCH(file, line, ...) do { } while (0)
|
||||
# else
|
||||
# define _COUNT_GLITCH(file, line, ...) do { \
|
||||
__DBG_COUNT(, file, line, DBG_GLITCH, __VA_ARGS__); \
|
||||
} while (0)
|
||||
# endif
|
||||
# endif
|
||||
|
||||
#else /* USE_OBSOLETE_LINKER not defined below */
|
||||
# define __DBG_COUNT(cond, file, line, type, ...) do { } while (0)
|
||||
# define _COUNT_IF(cond, file, line, ...) DISGUISE(unlikely(cond) ? 1 : 0)
|
||||
# define _COUNT_IF(cond, file, line, ...) DISGUISE(cond)
|
||||
# define _COUNT_GLITCH(file, line, ...) do { } while (0)
|
||||
#endif
|
||||
|
||||
|
46
include/haproxy/cbuf-t.h
Normal file
46
include/haproxy/cbuf-t.h
Normal file
@ -0,0 +1,46 @@
|
||||
/*
|
||||
* include/haprox/cbuf-t.h
|
||||
* This file contains definition for circular buffers.
|
||||
*
|
||||
* Copyright 2021 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation, version 2.1
|
||||
* exclusively.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef _HAPROXY_CBUF_T_H
|
||||
#define _HAPROXY_CBUF_T_H
|
||||
#ifdef USE_QUIC
|
||||
#ifndef USE_OPENSSL
|
||||
#error "Must define USE_OPENSSL"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <stddef.h>
|
||||
#include <haproxy/list-t.h>
|
||||
|
||||
extern struct pool_head *pool_head_cbuf;
|
||||
|
||||
struct cbuf {
|
||||
/* buffer */
|
||||
unsigned char *buf;
|
||||
/* buffer size */
|
||||
size_t sz;
|
||||
/* Writer index */
|
||||
size_t wr;
|
||||
/* Reader index */
|
||||
size_t rd;
|
||||
};
|
||||
|
||||
#endif /* _HAPROXY_CBUF_T_H */
|
136
include/haproxy/cbuf.h
Normal file
136
include/haproxy/cbuf.h
Normal file
@ -0,0 +1,136 @@
|
||||
/*
|
||||
* include/haprox/cbuf.h
|
||||
* This file contains definitions and prototypes for circular buffers.
|
||||
* Inspired from Linux circular buffers (include/linux/circ_buf.h).
|
||||
*
|
||||
* Copyright 2021 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation, version 2.1
|
||||
* exclusively.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef _HAPROXY_CBUF_H
|
||||
#define _HAPROXY_CBUF_H
|
||||
#ifdef USE_QUIC
|
||||
#ifndef USE_OPENSSL
|
||||
#error "Must define USE_OPENSSL"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <haproxy/atomic.h>
|
||||
#include <haproxy/list.h>
|
||||
#include <haproxy/cbuf-t.h>
|
||||
|
||||
struct cbuf *cbuf_new(unsigned char *buf, size_t sz);
|
||||
void cbuf_free(struct cbuf *cbuf);
|
||||
|
||||
/* Amount of data between <rd> and <wr> */
|
||||
#define CBUF_DATA(wr, rd, size) (((wr) - (rd)) & ((size) - 1))
|
||||
|
||||
/* Return the writer position in <cbuf>.
|
||||
* To be used only by the writer!
|
||||
*/
|
||||
static inline unsigned char *cb_wr(struct cbuf *cbuf)
|
||||
{
|
||||
return cbuf->buf + cbuf->wr;
|
||||
}
|
||||
|
||||
/* Reset the reader index.
|
||||
* To be used by a reader!
|
||||
*/
|
||||
static inline void cb_rd_reset(struct cbuf *cbuf)
|
||||
{
|
||||
cbuf->rd = 0;
|
||||
}
|
||||
|
||||
/* Reset the writer index.
|
||||
* To be used by a writer!
|
||||
*/
|
||||
static inline void cb_wr_reset(struct cbuf *cbuf)
|
||||
{
|
||||
cbuf->wr = 0;
|
||||
}
|
||||
|
||||
/* Increase <cbuf> circular buffer data by <count>.
|
||||
* To be used by a writer!
|
||||
*/
|
||||
static inline void cb_add(struct cbuf *cbuf, size_t count)
|
||||
{
|
||||
cbuf->wr = (cbuf->wr + count) & (cbuf->sz - 1);
|
||||
}
|
||||
|
||||
/* Return the reader position in <cbuf>.
|
||||
* To be used only by the reader!
|
||||
*/
|
||||
static inline unsigned char *cb_rd(struct cbuf *cbuf)
|
||||
{
|
||||
return cbuf->buf + cbuf->rd;
|
||||
}
|
||||
|
||||
/* Skip <count> byte in <cbuf> circular buffer.
|
||||
* To be used by a reader!
|
||||
*/
|
||||
static inline void cb_del(struct cbuf *cbuf, size_t count)
|
||||
{
|
||||
cbuf->rd = (cbuf->rd + count) & (cbuf->sz - 1);
|
||||
}
|
||||
|
||||
/* Return the amount of data left in <cbuf>.
|
||||
* To be used only by the writer!
|
||||
*/
|
||||
static inline size_t cb_data(struct cbuf *cbuf)
|
||||
{
|
||||
size_t rd;
|
||||
|
||||
rd = HA_ATOMIC_LOAD(&cbuf->rd);
|
||||
return CBUF_DATA(cbuf->wr, rd, cbuf->sz);
|
||||
}
|
||||
|
||||
/* Return the amount of room left in <cbuf> minus 1 to distinguish
|
||||
* the case where the buffer is full from the case where is is empty
|
||||
* To be used only by the write!
|
||||
*/
|
||||
static inline size_t cb_room(struct cbuf *cbuf)
|
||||
{
|
||||
size_t rd;
|
||||
|
||||
rd = HA_ATOMIC_LOAD(&cbuf->rd);
|
||||
return CBUF_DATA(rd, cbuf->wr + 1, cbuf->sz);
|
||||
}
|
||||
|
||||
/* Return the amount of contiguous data left in <cbuf>.
|
||||
* To be used only by the reader!
|
||||
*/
|
||||
static inline size_t cb_contig_data(struct cbuf *cbuf)
|
||||
{
|
||||
size_t end, n;
|
||||
|
||||
end = cbuf->sz - cbuf->rd;
|
||||
n = (HA_ATOMIC_LOAD(&cbuf->wr) + end) & (cbuf->sz - 1);
|
||||
return n < end ? n : end;
|
||||
}
|
||||
|
||||
/* Return the amount of contiguous space left in <cbuf>.
|
||||
* To be used only by the writer!
|
||||
*/
|
||||
static inline size_t cb_contig_space(struct cbuf *cbuf)
|
||||
{
|
||||
size_t end, n;
|
||||
|
||||
end = cbuf->sz - 1 - cbuf->wr;
|
||||
n = (HA_ATOMIC_LOAD(&cbuf->rd) + end) & (cbuf->sz - 1);
|
||||
return n <= end ? n : end + 1;
|
||||
}
|
||||
|
||||
#endif /* _HAPROXY_CBUF_H */
|
@ -23,7 +23,6 @@
|
||||
#define _HAPROXY_CFGPARSE_H
|
||||
|
||||
#include <haproxy/api.h>
|
||||
#include <haproxy/proxy-t.h>
|
||||
|
||||
struct hap_cpuset;
|
||||
struct proxy;
|
||||
@ -39,7 +38,6 @@ struct acl_cond;
|
||||
#define CFG_CRTLIST 5
|
||||
#define CFG_CRTSTORE 6
|
||||
#define CFG_TRACES 7
|
||||
#define CFG_ACME 8
|
||||
|
||||
/* various keyword modifiers */
|
||||
enum kw_mod {
|
||||
@ -111,14 +109,9 @@ extern char *cursection;
|
||||
extern int non_global_section_parsed;
|
||||
|
||||
extern struct proxy *curproxy;
|
||||
extern char initial_cwd[PATH_MAX];
|
||||
|
||||
int cfg_parse_global(const char *file, int linenum, char **args, int inv);
|
||||
int cfg_parse_listen(const char *file, int linenum, char **args, int inv);
|
||||
int cfg_parse_listen_match_option(const char *file, int linenum, int kwm,
|
||||
const struct cfg_opt config_opts[], int *err_code,
|
||||
char **args, int mode, int cap,
|
||||
int *options, int *no_options);
|
||||
int cfg_parse_traces(const char *file, int linenum, char **args, int inv);
|
||||
int cfg_parse_track_sc_num(unsigned int *track_sc_num,
|
||||
const char *arg, const char *end, char **err);
|
||||
|
@ -172,7 +172,6 @@ struct check {
|
||||
char desc[HCHK_DESC_LEN]; /* health check description */
|
||||
signed char use_ssl; /* use SSL for health checks (1: on, 0: server mode, -1: off) */
|
||||
int send_proxy; /* send a PROXY protocol header with checks */
|
||||
int reuse_pool; /* try to reuse idle connections */
|
||||
struct tcpcheck_rules *tcpcheck_rules; /* tcp-check send / expect rules */
|
||||
struct tcpcheck_rule *current_step; /* current step when using tcpcheck */
|
||||
int inter, fastinter, downinter; /* checks: time in milliseconds */
|
||||
@ -188,7 +187,6 @@ struct check {
|
||||
char **envp; /* the environment to use if running a process-based check */
|
||||
struct pid_list *curpid; /* entry in pid_list used for current process-based test, or -1 if not in test */
|
||||
struct sockaddr_storage addr; /* the address to check */
|
||||
char *pool_conn_name; /* conn name used on reuse */
|
||||
char *sni; /* Server name */
|
||||
char *alpn_str; /* ALPN to use for checks */
|
||||
int alpn_len; /* ALPN string length */
|
||||
|
@ -24,7 +24,7 @@
|
||||
|
||||
#include <haproxy/applet-t.h>
|
||||
|
||||
/* Access level for a stats socket (appctx->cli_ctx.level) */
|
||||
/* Access level for a stats socket (appctx->cli_level) */
|
||||
#define ACCESS_LVL_NONE 0x0000
|
||||
#define ACCESS_LVL_USER 0x0001
|
||||
#define ACCESS_LVL_OPER 0x0002
|
||||
@ -41,12 +41,11 @@
|
||||
#define ACCESS_MCLI_SEVERITY_STR 0x0200 /* 'set severity-output string' on master CLI */
|
||||
|
||||
/* flags for appctx->st1 */
|
||||
#define APPCTX_CLI_ST1_PAYLOAD (1 << 0)
|
||||
#define APPCTX_CLI_ST1_NOLF (1 << 1)
|
||||
#define APPCTX_CLI_ST1_LASTCMD (1 << 2)
|
||||
#define APPCTX_CLI_ST1_INTER (1 << 3) /* interactive mode (i.e. don't close after 1st cmd) */
|
||||
#define APPCTX_CLI_ST1_PROMPT (1 << 4) /* display prompt */
|
||||
#define APPCTX_CLI_ST1_TIMED (1 << 5) /* display timer in prompt */
|
||||
#define APPCTX_CLI_ST1_PROMPT (1 << 0)
|
||||
#define APPCTX_CLI_ST1_PAYLOAD (1 << 1)
|
||||
#define APPCTX_CLI_ST1_NOLF (1 << 2)
|
||||
#define APPCTX_CLI_ST1_TIMED (1 << 3)
|
||||
#define APPCTX_CLI_ST1_LASTCMD (1 << 4)
|
||||
|
||||
#define CLI_PREFIX_KW_NB 5
|
||||
#define CLI_MAX_MATCHES 5
|
||||
@ -56,8 +55,8 @@
|
||||
enum {
|
||||
CLI_ST_INIT = 0, /* initial state, must leave to zero ! */
|
||||
CLI_ST_END, /* final state, let's close */
|
||||
CLI_ST_PARSE_CMDLINE, /* wait for a full command line */
|
||||
CLI_ST_PROCESS_CMDLINE, /* process all commands on the command line */
|
||||
CLI_ST_GETREQ, /* wait for a request */
|
||||
CLI_ST_PARSEREQ, /* parse a request */
|
||||
CLI_ST_OUTPUT, /* all states after this one are responses */
|
||||
CLI_ST_PROMPT, /* display the prompt (first output, same code) */
|
||||
CLI_ST_PRINT, /* display const message in cli->msg */
|
||||
|
@ -55,24 +55,6 @@
|
||||
#define ___equals_1(x) ____equals_1(comma_for_one ## x 1)
|
||||
#define __equals_1(x) ___equals_1(x)
|
||||
|
||||
/* same but checks if defined as zero, useful to distinguish between -DFOO and
|
||||
* -DFOO=0.
|
||||
*/
|
||||
#define comma_for_zero0 ,
|
||||
#define _____equals_0(x, y, ...) (y)
|
||||
#define ____equals_0(x, ...) _____equals_0(x, 0)
|
||||
#define ___equals_0(x) ____equals_0(comma_for_zero ## x 1)
|
||||
#define __equals_0(x) ___equals_0(x)
|
||||
|
||||
/* same but checks if defined as empty, useful to distinguish between -DFOO= and
|
||||
* -DFOO=anything.
|
||||
*/
|
||||
#define comma_for_empty ,
|
||||
#define _____def_as_empty(x, y, ...) (y)
|
||||
#define ____def_as_empty(x, ...) _____def_as_empty(x, 0)
|
||||
#define ___def_as_empty(x) ____def_as_empty(comma_for_empty ## x 1)
|
||||
#define __def_as_empty(x) ___def_as_empty(x)
|
||||
|
||||
/* gcc 5 and clang 3 brought __has_attribute(), which is not well documented in
|
||||
* the case of gcc, but is convenient since handled at the preprocessor level.
|
||||
* In both cases it's possible to test for __has_attribute() using ifdef. When
|
||||
@ -301,11 +283,6 @@
|
||||
#define _TOSTR(x) #x
|
||||
#define TOSTR(x) _TOSTR(x)
|
||||
|
||||
/* concatenates the two strings after resolving possible macros */
|
||||
#undef CONCAT // Turns out NetBSD defines it to the same in exec_elf.h
|
||||
#define _CONCAT(a,b) a ## b
|
||||
#define CONCAT(a,b) _CONCAT(a,b)
|
||||
|
||||
/*
|
||||
* Gcc >= 3 provides the ability for the program to give hints to the
|
||||
* compiler about what branch of an if is most likely to be taken. This
|
||||
@ -522,18 +499,6 @@
|
||||
#define __decl_thread(decl)
|
||||
#endif
|
||||
|
||||
/* The __decl_thread_var() statement declares a variable when threads are enabled
|
||||
* or replaces it with an dummy statement to avoid placing a lone semi-colon. The
|
||||
* purpose is to condition the presence of some variables or to the fact that
|
||||
* threads are enabled, without having to enclose them inside an ugly
|
||||
* #ifdef USE_THREAD/#endif clause.
|
||||
*/
|
||||
#ifdef USE_THREAD
|
||||
#define __decl_thread_var(decl) decl
|
||||
#else
|
||||
#define __decl_thread_var(decl) enum { CONCAT(_dummy_var_decl_,__LINE__), }
|
||||
#endif
|
||||
|
||||
/* clang has a __has_feature() macro which reports true/false on a number of
|
||||
* internally supported features. Let's make sure this macro is always defined
|
||||
* and returns zero when not supported.
|
||||
@ -542,15 +507,4 @@
|
||||
#define __has_feature(x) 0
|
||||
#endif
|
||||
|
||||
/* gcc 15 throws warning if fixed-size char array does not contain a terminating
|
||||
* NUL. gcc has an attribute 'nonstring', which allows to suppress this warning
|
||||
* for such array declarations. But it's not the case for clang and other
|
||||
* compilers.
|
||||
*/
|
||||
#if __has_attribute(nonstring)
|
||||
#define __nonstring __attribute__ ((nonstring))
|
||||
#else
|
||||
#define __nonstring
|
||||
#endif
|
||||
|
||||
#endif /* _HAPROXY_COMPILER_H */
|
||||
|
@ -50,8 +50,6 @@ struct comp {
|
||||
struct comp_algo *algo_req; /* Algo to use for request */
|
||||
struct comp_type *types_req; /* Types to be compressed for requests */
|
||||
struct comp_type *types_res; /* Types to be compressed for responses */
|
||||
unsigned int minsize_res; /* Min response body size to be compressed */
|
||||
unsigned int minsize_req; /* Min request body size to be compressed */
|
||||
unsigned int flags;
|
||||
};
|
||||
|
||||
|
@ -332,7 +332,7 @@ enum proto_proxy_side {
|
||||
|
||||
/* ctl command used by mux->ctl() */
|
||||
enum mux_ctl_type {
|
||||
MUX_CTL_STATUS, /* Expects an int as output, sets it to a combination of MUX_CTL_STATUS flags */
|
||||
MUX_CTL_STATUS, /* Expects an int as output, sets it to a combinaison of MUX_CTL_STATUS flags */
|
||||
MUX_CTL_EXIT_STATUS, /* Expects an int as output, sets the mux exist/error/http status, if known or 0 */
|
||||
MUX_CTL_REVERSE_CONN, /* Notify about an active reverse connection accepted. */
|
||||
MUX_CTL_SUBS_RECV, /* Notify the mux it must wait for read events again */
|
||||
|
@ -75,7 +75,7 @@ int conn_send_socks4_proxy_request(struct connection *conn);
|
||||
int conn_recv_socks4_proxy_response(struct connection *conn);
|
||||
|
||||
/* If we delayed the mux creation because we were waiting for the handshake, do it now */
|
||||
int conn_create_mux(struct connection *conn, int *closed_connection);
|
||||
int conn_create_mux(struct connection *conn);
|
||||
int conn_notify_mux(struct connection *conn, int old_flags, int forced_wake);
|
||||
int conn_upgrade_mux_fe(struct connection *conn, void *ctx, struct buffer *buf,
|
||||
struct ist mux_proto, int mode);
|
||||
@ -708,19 +708,6 @@ static inline void conn_set_reverse(struct connection *conn, enum obj_type *targ
|
||||
conn->reverse.target = target;
|
||||
}
|
||||
|
||||
/* Returns idle-ping value for <conn> depending on its proxy side. */
|
||||
static inline int conn_idle_ping(const struct connection *conn)
|
||||
{
|
||||
if (conn_is_back(conn)) {
|
||||
struct server *srv = objt_server(conn->target);
|
||||
return srv ? srv->idle_ping : TICK_ETERNITY;
|
||||
}
|
||||
else {
|
||||
struct session *sess = conn->owner;
|
||||
return sess->listener->bind_conf->idle_ping;
|
||||
}
|
||||
}
|
||||
|
||||
/* Returns the listener instance for connection used for active reverse. */
|
||||
static inline struct listener *conn_active_reverse_listener(const struct connection *conn)
|
||||
{
|
||||
@ -797,7 +784,7 @@ static inline const char *tevt_evts2str(uint32_t evts)
|
||||
if (!evt)
|
||||
continue;
|
||||
|
||||
/* Backend location are displayed in capital letter */
|
||||
/* Backend location are displayed in captial letter */
|
||||
is_back = !!((evt >> 4) & 0x8);
|
||||
switch ((enum term_event_loc)((evt >> 4) & ~0x8)) {
|
||||
case tevt_loc_fd: tevt_evts_str[idx++] = (is_back ? 'F' : 'f'); break;
|
||||
|
@ -25,144 +25,108 @@
|
||||
|
||||
#include <haproxy/freq_ctr-t.h>
|
||||
|
||||
#define COUNTERS_SHARED_F_NONE 0x0000
|
||||
#define COUNTERS_SHARED_F_LOCAL 0x0001 // shared counter struct is actually process-local
|
||||
|
||||
// common to fe_counters_shared and be_counters_shared
|
||||
#define COUNTERS_SHARED \
|
||||
struct { \
|
||||
uint16_t flags; /* COUNTERS_SHARED_F flags */\
|
||||
};
|
||||
#define COUNTERS_SHARED_TG \
|
||||
struct { \
|
||||
unsigned long last_change; /* last time, when the state was changed */\
|
||||
long long srv_aborts; /* aborted responses during DATA phase caused by the server */\
|
||||
long long cli_aborts; /* aborted responses during DATA phase caused by the client */\
|
||||
long long internal_errors; /* internal processing errors */\
|
||||
long long failed_rewrites; /* failed rewrites (warning) */\
|
||||
long long bytes_out; /* number of bytes transferred from the server to the client */\
|
||||
long long bytes_in; /* number of bytes transferred from the client to the server */\
|
||||
long long denied_resp; /* blocked responses because of security concerns */\
|
||||
long long denied_req; /* blocked requests because of security concerns */\
|
||||
long long cum_sess; /* cumulated number of accepted connections */\
|
||||
/* compression counters, index 0 for requests, 1 for responses */\
|
||||
long long comp_in[2]; /* input bytes fed to the compressor */\
|
||||
long long comp_out[2]; /* output bytes emitted by the compressor */\
|
||||
long long comp_byp[2]; /* input bytes that bypassed the compressor (cpu/ram/bw limitation) */\
|
||||
struct freq_ctr sess_per_sec; /* sessions per second on this server */\
|
||||
}
|
||||
|
||||
// for convenience (generic pointer)
|
||||
struct counters_shared {
|
||||
COUNTERS_SHARED;
|
||||
struct {
|
||||
COUNTERS_SHARED_TG;
|
||||
} *tg[MAX_TGROUPS];
|
||||
};
|
||||
|
||||
/* counters used by listeners and frontends */
|
||||
struct fe_counters_shared_tg {
|
||||
COUNTERS_SHARED_TG;
|
||||
|
||||
long long denied_sess; /* denied session requests (tcp-req-sess rules) */
|
||||
long long denied_conn; /* denied connection requests (tcp-req-conn rules) */
|
||||
long long intercepted_req; /* number of monitoring or stats requests intercepted by the frontend */
|
||||
long long cum_conn; /* cumulated number of received connections */
|
||||
struct freq_ctr conn_per_sec; /* received connections per second on the frontend */
|
||||
|
||||
struct freq_ctr req_per_sec; /* HTTP requests per second on the frontend */
|
||||
|
||||
long long cum_sess_ver[3]; /* cumulated number of h1/h2/h3 sessions */
|
||||
union {
|
||||
struct {
|
||||
long long cum_req[4]; /* cumulated number of processed other/h1/h2/h3 requests */
|
||||
long long cache_hits; /* cache hits */
|
||||
long long cache_lookups;/* cache lookups */
|
||||
long long comp_rsp; /* number of compressed responses */
|
||||
long long rsp[6]; /* http response codes */
|
||||
} http;
|
||||
} p; /* protocol-specific stats */
|
||||
|
||||
long long failed_req; /* failed requests (eg: invalid or timeout) */
|
||||
};
|
||||
|
||||
struct fe_counters_shared {
|
||||
COUNTERS_SHARED;
|
||||
struct fe_counters_shared_tg *tg[MAX_TGROUPS];
|
||||
};
|
||||
|
||||
struct fe_counters {
|
||||
struct fe_counters_shared *shared; /* shared counters */
|
||||
unsigned int conn_max; /* max # of active sessions */
|
||||
long long cum_conn; /* cumulated number of received connections */
|
||||
long long cum_sess; /* cumulated number of accepted connections */
|
||||
long long cum_sess_ver[3]; /* cumulated number of h1/h2/h3 sessions */
|
||||
|
||||
unsigned int cps_max; /* maximum of new connections received per second */
|
||||
unsigned int sps_max; /* maximum of new connections accepted per second (sessions) */
|
||||
struct freq_ctr _sess_per_sec; /* sessions per second on this frontend, used to compute sps_max (internal use only) */
|
||||
struct freq_ctr _conn_per_sec; /* connections per second on this frontend, used to compute cps_max (internal use only) */
|
||||
|
||||
long long bytes_in; /* number of bytes transferred from the client to the server */
|
||||
long long bytes_out; /* number of bytes transferred from the server to the client */
|
||||
|
||||
/* compression counters, index 0 for requests, 1 for responses */
|
||||
long long comp_in[2]; /* input bytes fed to the compressor */
|
||||
long long comp_out[2]; /* output bytes emitted by the compressor */
|
||||
long long comp_byp[2]; /* input bytes that bypassed the compressor (cpu/ram/bw limitation) */
|
||||
|
||||
long long denied_req; /* blocked requests because of security concerns */
|
||||
long long denied_resp; /* blocked responses because of security concerns */
|
||||
long long failed_req; /* failed requests (eg: invalid or timeout) */
|
||||
long long denied_conn; /* denied connection requests (tcp-req-conn rules) */
|
||||
long long denied_sess; /* denied session requests (tcp-req-sess rules) */
|
||||
long long failed_rewrites; /* failed rewrites (warning) */
|
||||
long long internal_errors; /* internal processing errors */
|
||||
|
||||
long long cli_aborts; /* aborted responses during DATA phase caused by the client */
|
||||
long long srv_aborts; /* aborted responses during DATA phase caused by the server */
|
||||
long long intercepted_req; /* number of monitoring or stats requests intercepted by the frontend */
|
||||
|
||||
union {
|
||||
struct {
|
||||
unsigned int rps_max; /* maximum of new HTTP requests second observed */
|
||||
struct freq_ctr _req_per_sec; /* HTTP requests per second on the frontend, only used to compute rps_max */
|
||||
} http;
|
||||
} p; /* protocol-specific stats */
|
||||
};
|
||||
|
||||
struct be_counters_shared_tg {
|
||||
COUNTERS_SHARED_TG;
|
||||
|
||||
long long cum_lbconn; /* cumulated number of sessions processed by load balancing (BE only) */
|
||||
|
||||
long long connect; /* number of connection establishment attempts */
|
||||
long long reuse; /* number of connection reuses */
|
||||
unsigned long last_sess; /* last session time */
|
||||
|
||||
long long failed_checks, failed_hana; /* failed health checks and health analyses for servers */
|
||||
long long down_trans; /* up->down transitions */
|
||||
|
||||
union {
|
||||
struct {
|
||||
long long cum_req; /* cumulated number of processed HTTP requests */
|
||||
|
||||
long long cache_hits; /* cache hits */
|
||||
long long cache_lookups;/* cache lookups */
|
||||
long long cum_req[4]; /* cumulated number of processed other/h1/h2/h3 requests */
|
||||
long long comp_rsp; /* number of compressed responses */
|
||||
unsigned int rps_max; /* maximum of new HTTP requests second observed */
|
||||
long long rsp[6]; /* http response codes */
|
||||
|
||||
long long cache_lookups;/* cache lookups */
|
||||
long long cache_hits; /* cache hits */
|
||||
} http;
|
||||
} p; /* protocol-specific stats */
|
||||
|
||||
long long redispatches; /* retried and redispatched connections (BE only) */
|
||||
long long retries; /* retried and redispatched connections (BE only) */
|
||||
long long failed_resp; /* failed responses (BE only) */
|
||||
long long failed_conns; /* failed connect() attempts (BE only) */
|
||||
};
|
||||
struct freq_ctr sess_per_sec; /* sessions per second on this server */
|
||||
struct freq_ctr req_per_sec; /* HTTP requests per second on the frontend */
|
||||
struct freq_ctr conn_per_sec; /* received connections per second on the frontend */
|
||||
|
||||
struct be_counters_shared {
|
||||
COUNTERS_SHARED;
|
||||
struct be_counters_shared_tg *tg[MAX_TGROUPS];
|
||||
unsigned long last_change; /* last time, when the state was changed */
|
||||
};
|
||||
|
||||
/* counters used by servers and backends */
|
||||
struct be_counters {
|
||||
struct be_counters_shared *shared; /* shared counters */
|
||||
unsigned int conn_max; /* max # of active sessions */
|
||||
long long cum_sess; /* cumulated number of accepted connections */
|
||||
long long cum_lbconn; /* cumulated number of sessions processed by load balancing (BE only) */
|
||||
|
||||
unsigned int cps_max; /* maximum of new connections received per second */
|
||||
unsigned int sps_max; /* maximum of new connections accepted per second (sessions) */
|
||||
unsigned int nbpend_max; /* max number of pending connections with no server assigned yet */
|
||||
unsigned int cur_sess_max; /* max number of currently active sessions */
|
||||
|
||||
struct freq_ctr _sess_per_sec; /* sessions per second on this frontend, used to compute sps_max (internal use only) */
|
||||
long long bytes_in; /* number of bytes transferred from the client to the server */
|
||||
long long bytes_out; /* number of bytes transferred from the server to the client */
|
||||
|
||||
/* compression counters, index 0 for requests, 1 for responses */
|
||||
long long comp_in[2]; /* input bytes fed to the compressor */
|
||||
long long comp_out[2]; /* output bytes emitted by the compressor */
|
||||
long long comp_byp[2]; /* input bytes that bypassed the compressor (cpu/ram/bw limitation) */
|
||||
|
||||
long long denied_req; /* blocked requests because of security concerns */
|
||||
long long denied_resp; /* blocked responses because of security concerns */
|
||||
|
||||
long long connect; /* number of connection establishment attempts */
|
||||
long long reuse; /* number of connection reuses */
|
||||
long long failed_conns; /* failed connect() attempts (BE only) */
|
||||
long long failed_resp; /* failed responses (BE only) */
|
||||
long long cli_aborts; /* aborted responses during DATA phase caused by the client */
|
||||
long long srv_aborts; /* aborted responses during DATA phase caused by the server */
|
||||
long long retries; /* retried and redispatched connections (BE only) */
|
||||
long long redispatches; /* retried and redispatched connections (BE only) */
|
||||
long long failed_rewrites; /* failed rewrites (warning) */
|
||||
long long internal_errors; /* internal processing errors */
|
||||
|
||||
long long failed_checks, failed_hana; /* failed health checks and health analyses for servers */
|
||||
long long down_trans; /* up->down transitions */
|
||||
|
||||
unsigned int q_time, c_time, d_time, t_time; /* sums of conn_time, queue_time, data_time, total_time */
|
||||
unsigned int qtime_max, ctime_max, dtime_max, ttime_max; /* maximum of conn_time, queue_time, data_time, total_time observed */
|
||||
|
||||
union {
|
||||
struct {
|
||||
long long cum_req; /* cumulated number of processed HTTP requests */
|
||||
long long comp_rsp; /* number of compressed responses */
|
||||
unsigned int rps_max; /* maximum of new HTTP requests second observed */
|
||||
long long rsp[6]; /* http response codes */
|
||||
long long cache_lookups;/* cache lookups */
|
||||
long long cache_hits; /* cache hits */
|
||||
} http;
|
||||
} p; /* protocol-specific stats */
|
||||
|
||||
struct freq_ctr sess_per_sec; /* sessions per second on this server */
|
||||
|
||||
unsigned long last_sess; /* last session time */
|
||||
unsigned long last_change; /* last time, when the state was changed */
|
||||
};
|
||||
|
||||
#endif /* _HAPROXY_COUNTERS_T_H */
|
||||
|
@ -1,102 +0,0 @@
|
||||
/*
|
||||
* include/haproxy/counters.h
|
||||
* objects counters management
|
||||
*
|
||||
* Copyright 2025 HAProxy Technologies
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation, version 2.1
|
||||
* exclusively.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef _HAPROXY_COUNTERS_H
|
||||
# define _HAPROXY_COUNTERS_H
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
#include <haproxy/counters-t.h>
|
||||
#include <haproxy/guid-t.h>
|
||||
|
||||
struct fe_counters_shared *counters_fe_shared_get(const struct guid_node *guid);
|
||||
struct be_counters_shared *counters_be_shared_get(const struct guid_node *guid);
|
||||
|
||||
void counters_fe_shared_drop(struct fe_counters_shared *counters);
|
||||
void counters_be_shared_drop(struct be_counters_shared *counters);
|
||||
|
||||
/* time oriented helper: get last time (relative to current time) on a given
|
||||
* <scounter> array, for <elem> member (one member per thread group) which is
|
||||
* assumed to be unsigned long type.
|
||||
*
|
||||
* wrapping is handled by taking the lowest diff between now and last counter.
|
||||
* But since wrapping is expected once every ~136 years (starting 01/01/1970),
|
||||
* perhaps it's not worth the extra CPU cost.. let's see.
|
||||
*/
|
||||
#define COUNTERS_SHARED_LAST_OFFSET(scounters, type, offset) \
|
||||
({ \
|
||||
unsigned long last = HA_ATOMIC_LOAD((type *)((char *)scounters[0] + offset));\
|
||||
unsigned long now_seconds = ns_to_sec(now_ns); \
|
||||
int it; \
|
||||
\
|
||||
for (it = 1; it < global.nbtgroups; it++) { \
|
||||
unsigned long cur = HA_ATOMIC_LOAD((type *)((char *)scounters[it] + offset));\
|
||||
if ((now_seconds - cur) < (now_seconds - last)) \
|
||||
last = cur; \
|
||||
} \
|
||||
last; \
|
||||
})
|
||||
|
||||
#define COUNTERS_SHARED_LAST(scounters, elem) \
|
||||
({ \
|
||||
int offset = offsetof(typeof(**scounters), elem); \
|
||||
unsigned long last = COUNTERS_SHARED_LAST_OFFSET(scounters, typeof(scounters[0]->elem), offset); \
|
||||
\
|
||||
last; \
|
||||
})
|
||||
|
||||
|
||||
/* generic unsigned integer addition for all <elem> members from
|
||||
* <scounters> array (one member per thread group)
|
||||
* <rfunc> is function taking pointer as parameter to read from the memory
|
||||
* location pointed to scounters[it].elem
|
||||
*/
|
||||
#define COUNTERS_SHARED_TOTAL_OFFSET(scounters, type, offset, rfunc) \
|
||||
({ \
|
||||
uint64_t __ret = 0; \
|
||||
int it; \
|
||||
\
|
||||
for (it = 0; it < global.nbtgroups; it++) \
|
||||
__ret += rfunc((type *)((char *)scounters[it] + offset)); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define COUNTERS_SHARED_TOTAL(scounters, elem, rfunc) \
|
||||
({ \
|
||||
int offset = offsetof(typeof(**scounters), elem); \
|
||||
uint64_t __ret = COUNTERS_SHARED_TOTAL_OFFSET(scounters, typeof(scounters[0]->elem), offset, rfunc);\
|
||||
\
|
||||
__ret; \
|
||||
})
|
||||
/* same as COUNTERS_SHARED_TOTAL but with <rfunc> taking 2 extras arguments:
|
||||
* <arg1> and <arg2>
|
||||
*/
|
||||
#define COUNTERS_SHARED_TOTAL_ARG2(scounters, elem, rfunc, arg1, arg2) \
|
||||
({ \
|
||||
uint64_t __ret = 0; \
|
||||
int it; \
|
||||
\
|
||||
for (it = 0; it < global.nbtgroups; it++) \
|
||||
__ret += rfunc(&scounters[it]->elem, arg1, arg2); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#endif /* _HAPROXY_COUNTERS_H */
|
@ -1,68 +0,0 @@
|
||||
#ifndef _HAPROXY_CPU_TOPO_T_H
|
||||
#define _HAPROXY_CPU_TOPO_T_H
|
||||
|
||||
#include <haproxy/api-t.h>
|
||||
#include <haproxy/cpuset-t.h>
|
||||
|
||||
/* CPU state flags used with CPU topology detection (ha_cpu_topo.st). We try
|
||||
* hard to rely on known info. For example we don't claim a CPU is bound or
|
||||
* online if we don't know, reason why instead we store offline or excluded.
|
||||
* Other flags like DONT_USE indicate a user's choice while IGNORED indicates
|
||||
* the result of an automated selection. Two marks are available for allocation
|
||||
* algorithms to temporarily compare/select/evict CPUs. These must be cleared
|
||||
* after use.
|
||||
*/
|
||||
#define HA_CPU_F_EXCLUDED 0x0001 // this CPU was excluded at boot
|
||||
#define HA_CPU_F_OFFLINE 0x0002 // this CPU is known to be offline
|
||||
#define HA_CPU_F_DONT_USE 0x0004 // this CPU must not be used
|
||||
#define HA_CPU_F_IGNORED 0x0008 // this CPU will not be used
|
||||
#define HA_CPU_F_EXCL_MASK 0x000F // mask of bits that exclude a CPU
|
||||
#define HA_CPU_F_MARK1 0x0010 // for temporary internal use only
|
||||
#define HA_CPU_F_MARK2 0x0020 // for temporary internal use only
|
||||
#define HA_CPU_F_MARK_MASK 0x0030 // mask to drop the two marks above
|
||||
|
||||
/* CPU topology descriptor. All the ID and IDX fields are initialized to -1
|
||||
* when not known. The identifiers there are mostly assigned on the fly using
|
||||
* increments and have no particular representation except the fact that CPUs
|
||||
* having the same ID there share the same designated resource. The flags are
|
||||
* preset to zero.
|
||||
*/
|
||||
struct ha_cpu_topo {
|
||||
ushort st; // state flags (HA_CPU_F_*)
|
||||
short idx; // CPU index as passed to the OS. Initially the entry index.
|
||||
short ca_id[5]; // cache ID for each level (L0 to L4)
|
||||
short ts_id; // thread-set identifier (generally core number)
|
||||
short cl_gid; // cluster global identifier (group of more intimate cores)
|
||||
short cl_lid; // cluster local identifier (per {pkg,node})
|
||||
short no_id; // NUMA node identifier
|
||||
short pk_id; // package identifier
|
||||
short th_cnt; // number of siblings threads
|
||||
short th_id; // thread ID among siblings of the same core
|
||||
short capa; // estimated CPU relative capacity; more is better
|
||||
};
|
||||
|
||||
/* Description of a CPU cluster. */
|
||||
struct ha_cpu_cluster {
|
||||
uint idx; /* used when sorting, normally the entry index */
|
||||
uint capa; /* total capacity */
|
||||
uint nb_cores; /* total distinct cores */
|
||||
uint nb_cpu; /* total CPUs */
|
||||
};
|
||||
|
||||
/* Description of a CPU selection policy. For now it only associates an option
|
||||
* name with a callback function that is supposed to adjust the global.nbthread
|
||||
* and global.nbtgroups based on the policy, the topology, and the constraints
|
||||
* on the number of threads which must be between tmin and tmax included, and
|
||||
* the number of thread groups which must be between gmin and gmax included.
|
||||
* The callback also takes the policy number (cpu_policy) and a pointer to a
|
||||
* string to write an error to in case of failure (in which case ret must be
|
||||
* < 0 and the caller will fre the location). More settings might come later.
|
||||
*/
|
||||
struct ha_cpu_policy {
|
||||
const char *name; /* option name in the configuration */
|
||||
const char *desc; /* short description for help messages */
|
||||
int (*fct)(int policy, int tmin, int tmax, int gmin, int gmax, char **err);
|
||||
int arg; /* optional arg for the function */
|
||||
};
|
||||
|
||||
#endif /* _HAPROXY_CPU_TOPO_T_H */
|
@ -1,79 +0,0 @@
|
||||
#ifndef _HAPROXY_CPU_TOPO_H
|
||||
#define _HAPROXY_CPU_TOPO_H
|
||||
|
||||
#include <haproxy/api.h>
|
||||
#include <haproxy/cpuset-t.h>
|
||||
#include <haproxy/cpu_topo-t.h>
|
||||
|
||||
extern int cpu_topo_maxcpus;
|
||||
extern int cpu_topo_lastcpu;
|
||||
extern struct ha_cpu_topo *ha_cpu_topo;
|
||||
|
||||
/* non-zero if we're certain that taskset or similar was used to force CPUs */
|
||||
extern int cpu_mask_forced;
|
||||
|
||||
/* Detects CPUs that are online on the system. It may rely on FS access (e.g.
|
||||
* /sys on Linux). Returns the number of CPUs detected or 0 if the detection
|
||||
* failed.
|
||||
*/
|
||||
int ha_cpuset_detect_online(struct hap_cpuset *set);
|
||||
|
||||
/* Detects the CPUs that will be used based on the ones the process is bound to.
|
||||
* Returns non-zero on success, zero on failure. Note that it may not be
|
||||
* performed in the function above because some calls may rely on other items
|
||||
* being allocated (e.g. trash).
|
||||
*/
|
||||
int cpu_detect_usable(void);
|
||||
|
||||
/* detect the CPU topology based on info in /sys */
|
||||
int cpu_detect_topology(void);
|
||||
|
||||
/* fix missing info in the CPU topology */
|
||||
void cpu_fixup_topology(void);
|
||||
|
||||
/* compose clusters */
|
||||
void cpu_compose_clusters(void);
|
||||
|
||||
/* apply remaining topology-based cpu set restrictions */
|
||||
void cpu_refine_cpusets(void);
|
||||
|
||||
/* apply the chosen CPU policy. Returns < 0 on failure with a message in *err
|
||||
* that must be freed by the caller if non-null.
|
||||
*/
|
||||
int cpu_apply_policy(int tmin, int tmax, int gmin, int gmax, char **err);
|
||||
|
||||
/* Detects CPUs that are bound to the current process. Returns the number of
|
||||
* CPUs detected or 0 if the detection failed.
|
||||
*/
|
||||
int ha_cpuset_detect_bound(struct hap_cpuset *set);
|
||||
|
||||
/* Returns true if at least one cpu-map directive was configured, otherwise
|
||||
* false.
|
||||
*/
|
||||
int cpu_map_configured(void);
|
||||
|
||||
/* Dump the CPU topology <topo> for up to cpu_topo_maxcpus CPUs for
|
||||
* debugging purposes. Offline CPUs are skipped.
|
||||
*/
|
||||
void cpu_dump_topology(const struct ha_cpu_topo *topo);
|
||||
|
||||
/* re-order a CPU topology array by locality to help form groups. */
|
||||
void cpu_reorder_by_locality(struct ha_cpu_topo *topo, int entries);
|
||||
|
||||
/* re-order a CPU topology array by CPU index only, to undo the function above,
|
||||
* in case other calls need to be made on top of this.
|
||||
*/
|
||||
void cpu_reorder_by_index(struct ha_cpu_topo *topo, int entries);
|
||||
|
||||
/* re-order a CPU topology array by cluster id. */
|
||||
void cpu_reorder_by_cluster(struct ha_cpu_topo *topo, int entries);
|
||||
|
||||
/* Functions used by qsort to compare hardware CPUs (not meant to be used from
|
||||
* outside cpu_topo).
|
||||
*/
|
||||
int _cmp_cpu_index(const void *a, const void *b);
|
||||
int _cmp_cpu_locality(const void *a, const void *b);
|
||||
int _cmp_cpu_cluster(const void *a, const void *b);
|
||||
int _cmp_cpu_cluster_capa(const void *a, const void *b);
|
||||
|
||||
#endif /* _HAPROXY_CPU_TOPO_H */
|
@ -44,13 +44,15 @@ int ha_cpuset_ffs(const struct hap_cpuset *set);
|
||||
*/
|
||||
void ha_cpuset_assign(struct hap_cpuset *dst, struct hap_cpuset *src);
|
||||
|
||||
/* returns true if the sets are equal */
|
||||
int ha_cpuset_isequal(const struct hap_cpuset *dst, const struct hap_cpuset *src);
|
||||
|
||||
/* Returns the biggest index plus one usable on the platform.
|
||||
*/
|
||||
int ha_cpuset_size(void);
|
||||
|
||||
/* Detects CPUs that are bound to the current process. Returns the number of
|
||||
* CPUs detected or 0 if the detection failed.
|
||||
*/
|
||||
int ha_cpuset_detect_bound(struct hap_cpuset *set);
|
||||
|
||||
/* Parse cpu sets. Each CPU set is either a unique number between 0 and
|
||||
* ha_cpuset_size() - 1 or a range with two such numbers delimited by a dash
|
||||
* ('-'). Each CPU set can be a list of unique numbers or ranges separated by
|
||||
@ -60,18 +62,15 @@ int ha_cpuset_size(void);
|
||||
*/
|
||||
int parse_cpu_set(const char **args, struct hap_cpuset *cpu_set, char **err);
|
||||
|
||||
/* Print a cpu-set as compactly as possible and returns the output length.
|
||||
* Returns >size if it cannot emit anything due to length constraints, in which
|
||||
* case it will match what is at least needed to go further, and may return 0
|
||||
* for an empty set. It will emit series of comma-delimited ranges in the form
|
||||
* "beg[-end]".
|
||||
*/
|
||||
int print_cpu_set(char *output, size_t size, const struct hap_cpuset *cpu_set);
|
||||
|
||||
/* Parse a linux cpu map string representing to a numeric cpu mask map
|
||||
* The cpu map string is a list of 4-byte hex strings separated by commas, with
|
||||
* most-significant byte first, one bit per cpu number.
|
||||
*/
|
||||
void parse_cpumap(char *cpumap_str, struct hap_cpuset *cpu_set);
|
||||
|
||||
/* Returns true if at least one cpu-map directive was configured, otherwise
|
||||
* false.
|
||||
*/
|
||||
int cpu_map_configured(void);
|
||||
|
||||
#endif /* _HAPROXY_CPUSET_H */
|
||||
|
@ -28,18 +28,14 @@ extern unsigned int debug_commands_issued;
|
||||
extern unsigned int warn_blocked_issued;
|
||||
|
||||
void ha_task_dump(struct buffer *buf, const struct task *task, const char *pfx);
|
||||
void ha_thread_dump_one(struct buffer *buf, int is_caller);
|
||||
void ha_thread_dump_one(int thr, int from_signal);
|
||||
void ha_dump_backtrace(struct buffer *buf, const char *prefix, int dump);
|
||||
void ha_backtrace_to_stderr(void);
|
||||
void ha_panic(void);
|
||||
void ha_stuck_warning(void);
|
||||
void ha_stuck_warning(int thr);
|
||||
|
||||
void post_mortem_add_component(const char *name, const char *version,
|
||||
const char *toolchain, const char *toolchain_opts,
|
||||
const char *build_settings, const char *path);
|
||||
|
||||
#ifdef DEBUG_UNIT
|
||||
void list_unittests();
|
||||
#endif
|
||||
|
||||
#endif /* _HAPROXY_DEBUG_H */
|
||||
|
@ -44,7 +44,7 @@
|
||||
* doesn't engage us too far.
|
||||
*/
|
||||
#ifndef MAX_TGROUPS
|
||||
#define MAX_TGROUPS 32
|
||||
#define MAX_TGROUPS 16
|
||||
#endif
|
||||
|
||||
#define MAX_THREADS_PER_GROUP __WORDSIZE
|
||||
@ -53,7 +53,7 @@
|
||||
* long bits if more tgroups are enabled.
|
||||
*/
|
||||
#ifndef MAX_THREADS
|
||||
#define MAX_THREADS ((((MAX_TGROUPS) > 1) ? 16 : 1) * (MAX_THREADS_PER_GROUP))
|
||||
#define MAX_THREADS ((((MAX_TGROUPS) > 1) ? 4 : 1) * (MAX_THREADS_PER_GROUP))
|
||||
#endif
|
||||
|
||||
#endif // USE_THREAD
|
||||
@ -349,11 +349,6 @@
|
||||
#define SRV_CHK_INTER_THRES 1000
|
||||
#endif
|
||||
|
||||
/* INET6 connectivity caching interval (in ms) */
|
||||
#ifndef INET6_CONNECTIVITY_CACHE_TIME
|
||||
#define INET6_CONNECTIVITY_CACHE_TIME 30000
|
||||
#endif
|
||||
|
||||
/* Specifies the string used to report the version and release date on the
|
||||
* statistics page. May be defined to the empty string ("") to permanently
|
||||
* disable the feature.
|
||||
@ -599,20 +594,6 @@
|
||||
# define DEBUG_STRICT 1
|
||||
#endif
|
||||
|
||||
/* Let's make DEBUG_THREAD default to 1, and make sure it has a value */
|
||||
#ifndef DEBUG_THREAD
|
||||
# if defined(USE_THREAD)
|
||||
# define DEBUG_THREAD 1
|
||||
# else
|
||||
# define DEBUG_THREAD 0
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/* Let's make DEBUG_COUNTERS default to 1 to have glitches counters by default */
|
||||
#ifndef DEBUG_COUNTERS
|
||||
# define DEBUG_COUNTERS 1
|
||||
#endif
|
||||
|
||||
#if !defined(DEBUG_MEMORY_POOLS)
|
||||
# define DEBUG_MEMORY_POOLS 1
|
||||
#endif
|
||||
@ -621,46 +602,4 @@
|
||||
#define MAX_SELF_USE_QUEUE 9
|
||||
#endif
|
||||
|
||||
/*
|
||||
* FWLC defines
|
||||
*/
|
||||
|
||||
/*
|
||||
* How many mt_lists we use per tree elements.
|
||||
* The more lists we have, the less likely it
|
||||
* will be that we'll have contention when
|
||||
* inserting/removing an element, but the more
|
||||
* costly it will be to look up servers.
|
||||
*/
|
||||
#ifndef FWLC_LISTS_NB
|
||||
#define FWLC_LISTS_NB 4
|
||||
#endif /* FWLC_LISTS_NB */
|
||||
|
||||
/*
|
||||
* How many entries we want to keep in the
|
||||
* free list, before trying to use some.
|
||||
* We want to keep some nodes in the tree,
|
||||
* to avoid having to re-allocate one and
|
||||
* modify the tree, which requires the
|
||||
* write lock and is costly, but we
|
||||
* don't want to have too much, to save
|
||||
* memory.
|
||||
*/
|
||||
#ifndef FWLC_MIN_FREE_ENTRIES
|
||||
#define FWLC_MIN_FREE_ENTRIES 500
|
||||
#endif /* FWLC_MIN_FREE_ENTRIES */
|
||||
|
||||
/*
|
||||
* QUIC
|
||||
*/
|
||||
|
||||
/* Memory usage in bytes on Tx side, 0 for unlimited. */
|
||||
#ifndef QUIC_MAX_TX_MEM
|
||||
#define QUIC_MAX_TX_MEM 0
|
||||
#endif
|
||||
|
||||
#ifndef STKTABLE_MAX_UPDATES_AT_ONCE
|
||||
#define STKTABLE_MAX_UPDATES_AT_ONCE 100
|
||||
#endif /* STKTABLE_MAX_UPDATES_AT_ONCE */
|
||||
|
||||
#endif /* _HAPROXY_DEFAULTS_H */
|
||||
|
@ -42,11 +42,6 @@
|
||||
#define DNS_TCP_MSG_MAX_SIZE 65535
|
||||
#define DNS_TCP_MSG_RING_MAX_SIZE (1 + 1 + 3 + DNS_TCP_MSG_MAX_SIZE) // varint_bytes(DNS_TCP_MSG_MAX_SIZE) == 3
|
||||
|
||||
/* threshold to consider that the link to dns server is failing
|
||||
* and we should stop creating new sessions
|
||||
*/
|
||||
#define DNS_MAX_DSS_CONSECUTIVE_ERRORS 100
|
||||
|
||||
/* DNS request or response header structure */
|
||||
struct dns_header {
|
||||
uint16_t id;
|
||||
@ -84,7 +79,7 @@ struct dns_additional_record {
|
||||
struct dns_stream_server {
|
||||
struct server *srv;
|
||||
struct dns_ring *ring_req;
|
||||
int consecutive_errors; /* number of errors since last successful query (atomically updated without lock) */
|
||||
int max_slots;
|
||||
int maxconn;
|
||||
int idle_conns;
|
||||
int cur_conns;
|
||||
|
@ -242,7 +242,6 @@ struct poller {
|
||||
void (*term)(struct poller *p); /* termination of this poller */
|
||||
int (*test)(struct poller *p); /* pre-init check of the poller */
|
||||
int (*fork)(struct poller *p); /* post-fork re-opening */
|
||||
void (*fixup_tgid_takeover)(struct poller *p, const int fd, const int old_tid, const int old_tgid); /* Fixup anything necessary after a FD takeover across tgids */
|
||||
const char *name; /* poller name */
|
||||
unsigned int flags; /* HAP_POLL_F_* */
|
||||
int pref; /* try pollers with higher preference first */
|
||||
|
@ -364,20 +364,6 @@ static inline void fd_lock_tgid(int fd, uint desired_tgid)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Try to lock the tgid, keeping the current tgid value.
|
||||
* Returns 1 on success, or 0 on failure.
|
||||
*/
|
||||
static inline int fd_lock_tgid_cur(int fd)
|
||||
{
|
||||
uint old = _HA_ATOMIC_LOAD(&fdtab[fd].refc_tgid) & 0x7fff;
|
||||
|
||||
if (_HA_ATOMIC_CAS(&fdtab[fd].refc_tgid, &old, (old | 0x8000) + 0x10000))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* Grab a reference to the FD's TGID, and return the tgid. Note that a TGID of
|
||||
* zero indicates the FD was closed, thus also fails (i.e. no need to drop it).
|
||||
* On non-zero (success), the caller must release it using fd_drop_tgid().
|
||||
@ -387,10 +373,6 @@ static inline uint fd_take_tgid(int fd)
|
||||
uint old;
|
||||
|
||||
old = _HA_ATOMIC_FETCH_ADD(&fdtab[fd].refc_tgid, 0x10000) & 0xffff;
|
||||
while (old & 0x8000) {
|
||||
old = _HA_ATOMIC_LOAD(&fdtab[fd].refc_tgid) & 0xffff;
|
||||
__ha_cpu_relax();
|
||||
}
|
||||
if (likely(old))
|
||||
return old;
|
||||
HA_ATOMIC_SUB(&fdtab[fd].refc_tgid, 0x10000);
|
||||
@ -416,11 +398,6 @@ static inline uint fd_grab_tgid(int fd, uint desired_tgid)
|
||||
uint old;
|
||||
|
||||
old = _HA_ATOMIC_FETCH_ADD(&fdtab[fd].refc_tgid, 0x10000) & 0xffff;
|
||||
/* If the tgid is locked, wait until it no longer is */
|
||||
while (old & 0x8000) {
|
||||
old = _HA_ATOMIC_LOAD(&fdtab[fd].refc_tgid) & 0xffff;
|
||||
__ha_cpu_relax();
|
||||
}
|
||||
if (likely(old == desired_tgid))
|
||||
return 1;
|
||||
HA_ATOMIC_SUB(&fdtab[fd].refc_tgid, 0x10000);
|
||||
@ -448,22 +425,6 @@ static inline void fd_claim_tgid(int fd, uint desired_tgid)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the FD's TGID.
|
||||
* This should be called with the lock held, and will drop the lock once
|
||||
* the TGID is updated.
|
||||
* The reference counter is however preserved.
|
||||
*/
|
||||
static inline void fd_update_tgid(int fd, uint desired_tgid)
|
||||
{
|
||||
unsigned int orig_tgid = fdtab[fd].refc_tgid;
|
||||
unsigned int new_tgid;
|
||||
/* Remove the lock, and switch to the new tgid */
|
||||
do {
|
||||
new_tgid = (orig_tgid & 0xffff0000) | desired_tgid;
|
||||
} while (!_HA_ATOMIC_CAS(&fdtab[fd].refc_tgid, &orig_tgid, new_tgid) && __ha_cpu_relax());
|
||||
}
|
||||
|
||||
/* atomically read the running mask if the tgid matches, or returns zero if it
|
||||
* does not match. This is meant for use in code paths where the bit is expected
|
||||
* to be present and will be sufficient to protect against a short-term group
|
||||
@ -499,7 +460,6 @@ static inline long fd_clr_running(int fd)
|
||||
static inline void fd_insert(int fd, void *owner, void (*iocb)(int fd), int tgid, unsigned long thread_mask)
|
||||
{
|
||||
extern void sock_conn_iocb(int);
|
||||
struct tgroup_info *tginfo = &ha_tgroup_info[tgid - 1];
|
||||
int newstate;
|
||||
|
||||
/* conn_fd_handler should support edge-triggered FDs */
|
||||
@ -529,7 +489,7 @@ static inline void fd_insert(int fd, void *owner, void (*iocb)(int fd), int tgid
|
||||
BUG_ON(fdtab[fd].state != 0);
|
||||
BUG_ON(tgid < 1 || tgid > MAX_TGROUPS);
|
||||
|
||||
thread_mask &= tginfo->threads_enabled;
|
||||
thread_mask &= tg->threads_enabled;
|
||||
BUG_ON(thread_mask == 0);
|
||||
|
||||
fd_claim_tgid(fd, tgid);
|
||||
|
@ -28,9 +28,7 @@
|
||||
#include <haproxy/ticks.h>
|
||||
|
||||
/* exported functions from freq_ctr.c */
|
||||
ullong _freq_ctr_total_from_values(uint period, int pend, uint tick, ullong past, ullong curr);
|
||||
ullong freq_ctr_total(const struct freq_ctr *ctr, uint period, int pend);
|
||||
ullong freq_ctr_total_estimate(const struct freq_ctr *ctr, uint period, int pend);
|
||||
int freq_ctr_overshoot_period(const struct freq_ctr *ctr, uint period, uint freq);
|
||||
uint update_freq_ctr_period_slow(struct freq_ctr *ctr, uint period, uint inc);
|
||||
|
||||
@ -94,16 +92,6 @@ static inline uint read_freq_ctr_period(const struct freq_ctr *ctr, uint period)
|
||||
return div64_32(total, period);
|
||||
}
|
||||
|
||||
/* same as read_freq_ctr_period() above except that it doesn't lock and may
|
||||
* return incorrect values. This is only meant for use in signal handlers.
|
||||
*/
|
||||
static inline uint read_freq_ctr_period_estimate(const struct freq_ctr *ctr, uint period)
|
||||
{
|
||||
ullong total = freq_ctr_total_estimate(ctr, period, -1);
|
||||
|
||||
return div64_32(total, period);
|
||||
}
|
||||
|
||||
/* same as read_freq_ctr_period() above except that floats are used for the
|
||||
* output so that low rates can be more precise.
|
||||
*/
|
||||
|
@ -79,15 +79,14 @@
|
||||
#define GTUNE_DISABLE_H2_WEBSOCKET (1<<21)
|
||||
#define GTUNE_DISABLE_ACTIVE_CLOSE (1<<22)
|
||||
#define GTUNE_QUICK_EXIT (1<<23)
|
||||
/* (1<<24) unused */
|
||||
#define GTUNE_QUIC_SOCK_PER_CONN (1<<24)
|
||||
#define GTUNE_NO_QUIC (1<<25)
|
||||
#define GTUNE_USE_FAST_FWD (1<<26)
|
||||
#define GTUNE_LISTENER_MQ_FAIR (1<<27)
|
||||
#define GTUNE_LISTENER_MQ_OPT (1<<28)
|
||||
#define GTUNE_LISTENER_MQ_ANY (GTUNE_LISTENER_MQ_FAIR | GTUNE_LISTENER_MQ_OPT)
|
||||
|
||||
/* subsystem-specific debugging options for tune.debug */
|
||||
#define GDBG_CPU_AFFINITY (1U<< 0)
|
||||
#define GTUNE_QUIC_CC_HYSTART (1<<29)
|
||||
#define GTUNE_QUIC_NO_UDP_GSO (1<<30)
|
||||
|
||||
#define NO_ZERO_COPY_FWD 0x0001 /* Globally disable zero-copy FF */
|
||||
#define NO_ZERO_COPY_FWD_PT 0x0002 /* disable zero-copy FF for PT (recv & send are disabled automatically) */
|
||||
@ -110,13 +109,6 @@ enum {
|
||||
SSL_SERVER_VERIFY_REQUIRED = 1,
|
||||
};
|
||||
|
||||
/* Takeover across thread groups */
|
||||
enum threadgroup_takeover {
|
||||
NO_THREADGROUP_TAKEOVER = 0,
|
||||
RESTRICTED_THREADGROUP_TAKEOVER = 1,
|
||||
FULL_THREADGROUP_TAKEOVER = 2,
|
||||
};
|
||||
|
||||
/* bit values to go with "warned" above */
|
||||
#define WARN_ANY 0x00000001 /* any warning was emitted */
|
||||
#define WARN_FORCECLOSE_DEPRECATED 0x00000002
|
||||
@ -169,10 +161,9 @@ struct global {
|
||||
unsigned char cluster_secret[16]; /* 128 bits of an SHA1 digest of a secret defined as ASCII string */
|
||||
struct {
|
||||
int maxpollevents; /* max number of poll events at once */
|
||||
int max_rules_at_once; /* max number of rules executed in a single evaluation loop */
|
||||
int max_rules_at_once; /* max number of rules excecuted in a single evaluation loop */
|
||||
int maxaccept; /* max number of consecutive accept() */
|
||||
int options; /* various tuning options */
|
||||
uint debug; /* various debugging options (GDBG_*) */
|
||||
int runqueue_depth;/* max number of tasks to run at once */
|
||||
uint recv_enough; /* how many input bytes at once are "enough" */
|
||||
uint bufsize; /* buffer size in bytes, defaults to BUFSIZE */
|
||||
@ -184,8 +175,6 @@ struct global {
|
||||
uint client_rcvbuf; /* set client rcvbuf to this value if not null */
|
||||
uint server_sndbuf; /* set server sndbuf to this value if not null */
|
||||
uint server_rcvbuf; /* set server rcvbuf to this value if not null */
|
||||
uint client_notsent_lowat; /* set client tcp_notsent_lowat to this value if not null */
|
||||
uint server_notsent_lowat; /* set client tcp_notsent_lowat to this value if not null */
|
||||
uint frontend_sndbuf; /* set frontend dgram sndbuf to this value if not null */
|
||||
uint frontend_rcvbuf; /* set frontend dgram rcvbuf to this value if not null */
|
||||
uint backend_sndbuf; /* set backend dgram sndbuf to this value if not null */
|
||||
@ -197,7 +186,6 @@ struct global {
|
||||
int pattern_cache; /* max number of entries in the pattern cache. */
|
||||
int sslcachesize; /* SSL cache size in session, defaults to 20000 */
|
||||
int comp_maxlevel; /* max HTTP compression level */
|
||||
uint glitch_kill_maxidle; /* have glitches kill only below this level of idle */
|
||||
int pool_low_ratio; /* max ratio of FDs used before we stop using new idle connections */
|
||||
int pool_high_ratio; /* max ratio of FDs used before we start killing idle connections when creating new connections */
|
||||
int pool_low_count; /* max number of opened fd before we stop using new idle connections */
|
||||
@ -211,16 +199,12 @@ struct global {
|
||||
int default_shards; /* default shards for listeners, or -1 (by-thread) or -2 (by-group) */
|
||||
uint max_checks_per_thread; /* if >0, no more than this concurrent checks per thread */
|
||||
uint ring_queues; /* if >0, #ring queues, otherwise equals #thread groups */
|
||||
enum threadgroup_takeover tg_takeover; /* Policy for threadgroup takeover */
|
||||
#ifdef USE_QUIC
|
||||
unsigned int quic_backend_max_idle_timeout;
|
||||
unsigned int quic_frontend_max_idle_timeout;
|
||||
unsigned int quic_frontend_glitches_threshold;
|
||||
unsigned int quic_frontend_max_data;
|
||||
unsigned int quic_frontend_max_streams_bidi;
|
||||
uint64_t quic_frontend_max_tx_mem;
|
||||
size_t quic_frontend_max_window_size;
|
||||
unsigned int quic_frontend_stream_data_ratio;
|
||||
unsigned int quic_retry_threshold;
|
||||
unsigned int quic_reorder_ratio;
|
||||
unsigned int quic_max_frame_loss;
|
||||
|
@ -158,6 +158,7 @@ int h1_headers_to_hdr_list(char *start, const char *stop,
|
||||
struct http_hdr *hdr, unsigned int hdr_num,
|
||||
struct h1m *h1m, union h1_sl *slp);
|
||||
|
||||
int h1_parse_cont_len_header(struct h1m *h1m, struct ist *value);
|
||||
int h1_parse_xfer_enc_header(struct h1m *h1m, struct ist value);
|
||||
void h1_parse_connection_header(struct h1m *h1m, struct ist *value);
|
||||
void h1_parse_upgrade_header(struct h1m *h1m, struct ist value);
|
||||
|
@ -38,7 +38,7 @@ int h1_parse_msg_tlrs(struct h1m *h1m, struct htx *dsthtx,
|
||||
struct buffer *srcbuf, size_t ofs, size_t max);
|
||||
|
||||
/* Returns the URI of an HTX message in the most common format for a H1 peer. It
|
||||
* is the path part of an absolute URI when the URI was normalized, otherwise
|
||||
* is the path part of an absolute URI when the URI was normalized, ortherwise
|
||||
* it is the whole URI, as received. Concretely, it is only a special case for
|
||||
* URIs received from H2 clients, to be able to send a relative path the H1
|
||||
* servers.
|
||||
|
@ -60,7 +60,6 @@
|
||||
#define CLASS_CERTCACHE "CertCache"
|
||||
#define CLASS_PROXY_LIST "ProxyList"
|
||||
#define CLASS_SERVER_LIST "ServerList"
|
||||
#define CLASS_QUEUE "Queue"
|
||||
|
||||
struct stream;
|
||||
|
||||
|
@ -50,7 +50,6 @@
|
||||
#define HLUA_INIT(__hlua) do { (__hlua)->T = 0; } while(0)
|
||||
|
||||
/* Lua HAProxy integration functions. */
|
||||
void hlua_yield_asap(lua_State *L);
|
||||
const char *hlua_traceback(lua_State *L, const char* sep);
|
||||
void hlua_ctx_destroy(struct hlua *lua);
|
||||
void hlua_init();
|
||||
|
@ -232,52 +232,6 @@ static inline int http_path_has_forbidden_char(const struct ist ist, const char
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Checks whether the :authority pseudo header contains dangerous chars that
|
||||
* might affect its reassembly. We want to catch anything below 0x21, above
|
||||
* 0x7e, as well as '@', '[', ']', '/','?', '#', '\', CR, LF, NUL. Then we
|
||||
* fall back to the slow path and decide. Brackets are used for IP-literal and
|
||||
* deserve special case, that is better handled in the slow path. The function
|
||||
* returns 0 if no forbidden char is presnet, non-zero otherwise.
|
||||
*/
|
||||
static inline int http_authority_has_forbidden_char(const struct ist ist)
|
||||
{
|
||||
size_t ofs, len = istlen(ist);
|
||||
const char *p = istptr(ist);
|
||||
int brackets = 0;
|
||||
uchar c;
|
||||
|
||||
/* Many attempts with various methods have shown that moderately recent
|
||||
* compilers (gcc >= 9, clang >= 13) will arrange the code below as an
|
||||
* evaluation tree that remains efficient at -O2 and above (~1.2ns per
|
||||
* char). The immediate next efficient one is the bitmap from 64-bit
|
||||
* registers but it's extremely sensitive to code arrangements and
|
||||
* optimization.
|
||||
*/
|
||||
for (ofs = 0; ofs < len; ofs++) {
|
||||
c = p[ofs];
|
||||
|
||||
if (unlikely(c < 0x21 || c > 0x7e ||
|
||||
c == '#' || c == '/' || c == '?' || c == '@' ||
|
||||
c == '[' || c == '\\' || c == ']')) {
|
||||
/* all of them must be rejected, except '[' which may
|
||||
* only appear at the beginning, and ']' which may
|
||||
* only appear at the end or before a colon.
|
||||
*/
|
||||
if ((c == '[' && ofs == 0) ||
|
||||
(c == ']' && (ofs == len - 1 || p[ofs + 1] == ':'))) {
|
||||
/* that's an IP-literal (see RFC3986#3.2), it's
|
||||
* OK for now.
|
||||
*/
|
||||
brackets ^= 1;
|
||||
} else {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
/* there must be no opening bracket left nor lone closing one */
|
||||
return brackets;
|
||||
}
|
||||
|
||||
/* Checks status code array <array> for the presence of status code <status>.
|
||||
* Returns non-zero if the code is present, zero otherwise. Any status code is
|
||||
* permitted.
|
||||
|
@ -168,7 +168,7 @@ enum {
|
||||
REDIRECT_FLAG_FROM_REQ = 4, /* redirect rule on the request path */
|
||||
REDIRECT_FLAG_IGNORE_EMPTY = 8, /* silently ignore empty location expressions */
|
||||
REDIRECT_FLAG_KEEP_QS = 16, /* append the query string to location, if any */
|
||||
REDIRECT_FLAG_COOKIE_FMT = 32, /* The cookie value is a log-format string */
|
||||
REDIRECT_FLAG_COOKIE_FMT = 32, /* The cookie value is a log-format stirng*/
|
||||
};
|
||||
|
||||
/* Redirect types (location, prefix, extended ) */
|
||||
|
@ -61,10 +61,4 @@ struct per_thread_deinit_fct {
|
||||
void (*fct)();
|
||||
};
|
||||
|
||||
struct unittest_fct {
|
||||
struct list list;
|
||||
const char *name;
|
||||
int (*fct)(int argc, char **argv);
|
||||
};
|
||||
|
||||
#endif /* _HAPROXY_INIT_T_H */
|
||||
|
@ -32,17 +32,6 @@ void hap_register_per_thread_init(int (*fct)());
|
||||
void hap_register_per_thread_deinit(void (*fct)());
|
||||
void hap_register_per_thread_free(void (*fct)());
|
||||
|
||||
|
||||
#ifdef DEBUG_UNIT
|
||||
void hap_register_unittest(const char *name, int (*fct)(int, char **));
|
||||
/* Simplified way to register a unit test */
|
||||
#define REGISTER_UNITTEST(name, fct) \
|
||||
INITCALL2(STG_REGISTER, hap_register_unittest, name, (fct))
|
||||
#else
|
||||
#define hap_register_unittest(a,b) ({})
|
||||
#define REGISTER_UNITTEST(name, fct)
|
||||
#endif
|
||||
|
||||
/* simplified way to declare a pre-check callback in a file */
|
||||
#define REGISTER_PRE_CHECK(fct) \
|
||||
INITCALL1(STG_REGISTER, hap_register_pre_check, (fct))
|
||||
|
@ -1,18 +0,0 @@
|
||||
/* SPDX-License-Identifier: LGPL-2.1-or-later */
|
||||
|
||||
#ifndef _HAPROXY_JWK_H_
|
||||
#define _HAPROXY_JWK_H_
|
||||
|
||||
#include <haproxy/openssl-compat.h>
|
||||
#include <haproxy/jwt-t.h>
|
||||
|
||||
int bn2base64url(const BIGNUM *bn, char *dst, size_t dsize);
|
||||
int EVP_PKEY_to_pub_jwk(EVP_PKEY *pkey, char *dst, size_t dsize);
|
||||
enum jwt_alg EVP_PKEY_to_jws_alg(EVP_PKEY *pkey);
|
||||
int jws_b64_payload(char *payload, char *dst, size_t dsize);
|
||||
int jws_b64_protected(enum jwt_alg alg, char *kid, char *jwk, char *nonce, char *url, char *dst, size_t dsize);
|
||||
int jws_b64_signature(EVP_PKEY *pkey, enum jwt_alg alg, char *b64protected, char *b64payload, char *dst, size_t dsize);
|
||||
int jws_flattened(char *protected, char *payload, char *signature, char *dst, size_t dsize);
|
||||
int jws_thumbprint(EVP_PKEY *pkey, char *dst, size_t dsize);
|
||||
|
||||
#endif /* ! _HAPROXY_JWK_H_ */
|
@ -23,7 +23,6 @@
|
||||
#define _HAPROXY_LB_FWRR_T_H
|
||||
|
||||
#include <import/ebtree-t.h>
|
||||
#include <haproxy/thread-t.h>
|
||||
|
||||
/* This structure is used to apply fast weighted round robin on a server group */
|
||||
struct fwrr_group {
|
||||
@ -33,17 +32,12 @@ struct fwrr_group {
|
||||
struct eb_root *next; /* servers to be placed at next run */
|
||||
int curr_pos; /* current position in the tree */
|
||||
int curr_weight; /* total weight of the current time range */
|
||||
};
|
||||
|
||||
struct lb_fwrr_per_tgrp {
|
||||
struct fwrr_group act; /* weighted round robin on the active servers */
|
||||
struct fwrr_group bck; /* weighted round robin on the backup servers */
|
||||
__decl_thread(HA_RWLOCK_T lock);
|
||||
int next_weight; /* total weight of the next time range */
|
||||
};
|
||||
|
||||
struct lb_fwrr {
|
||||
int next_weight_act; /* total weight of the next time range on active servers, for all trees */
|
||||
int next_weight_bck; /* total weight of the next time range on backup servers, for all trees */
|
||||
struct fwrr_group act; /* weighted round robin on the active servers */
|
||||
struct fwrr_group bck; /* weighted round robin on the backup servers */
|
||||
};
|
||||
|
||||
#endif /* _HAPROXY_LB_FWRR_T_H */
|
||||
|
@ -121,7 +121,6 @@ enum li_status {
|
||||
#define BC_SSL_O_NONE 0x0000
|
||||
#define BC_SSL_O_NO_TLS_TICKETS 0x0100 /* disable session resumption tickets */
|
||||
#define BC_SSL_O_PREF_CLIE_CIPH 0x0200 /* prefer client ciphers */
|
||||
#define BC_SSL_O_STRICT_SNI 0x0400 /* refuse negotiation if sni doesn't match a certificate */
|
||||
#endif
|
||||
|
||||
struct tls_version_filter {
|
||||
@ -170,6 +169,7 @@ struct bind_conf {
|
||||
unsigned long long ca_ignerr_bitfield[IGNERR_BF_SIZE]; /* ignored verify errors in handshake if depth > 0 */
|
||||
unsigned long long crt_ignerr_bitfield[IGNERR_BF_SIZE]; /* ignored verify errors in handshake if depth == 0 */
|
||||
void *initial_ctx; /* SSL context for initial negotiation */
|
||||
int strict_sni; /* refuse negotiation if sni doesn't match a certificate */
|
||||
int ssl_options; /* ssl options */
|
||||
struct eb_root sni_ctx; /* sni_ctx tree of all known certs full-names sorted by name */
|
||||
struct eb_root sni_w_ctx; /* sni_ctx tree of all known certs wildcards sorted by name */
|
||||
@ -193,7 +193,6 @@ struct bind_conf {
|
||||
unsigned int analysers; /* bitmap of required protocol analysers */
|
||||
int maxseg; /* for TCP, advertised MSS */
|
||||
int tcp_ut; /* for TCP, user timeout */
|
||||
int idle_ping; /* MUX idle-ping interval in ms */
|
||||
int maxaccept; /* if set, max number of connections accepted at once (-1 when disabled) */
|
||||
unsigned int backlog; /* if set, listen backlog */
|
||||
int maxconn; /* maximum connections allowed on this listener */
|
||||
@ -244,7 +243,7 @@ struct listener {
|
||||
struct fe_counters *counters; /* statistics counters */
|
||||
struct mt_list wait_queue; /* link element to make the listener wait for something (LI_LIMITED) */
|
||||
char *name; /* listener's name */
|
||||
char *label; /* listener's label */
|
||||
|
||||
unsigned int thr_conn[MAX_THREADS_PER_GROUP]; /* number of connections per thread for the group */
|
||||
|
||||
struct list by_fe; /* chaining in frontend's list of listeners */
|
||||
|
@ -36,7 +36,6 @@ extern struct pool_head *pool_head_uniqueid;
|
||||
extern const char *log_levels[];
|
||||
extern char *log_format;
|
||||
extern char httpclient_log_format[];
|
||||
extern char httpsclient_log_format[];
|
||||
extern char default_tcp_log_format[];
|
||||
extern char clf_tcp_log_format[];
|
||||
extern char default_http_log_format[];
|
||||
@ -145,7 +144,7 @@ void app_log(struct list *loggers, struct buffer *tag, int level, const char *fo
|
||||
*/
|
||||
int add_to_logformat_list(char *start, char *end, int type, struct lf_expr *lf_expr, char **err);
|
||||
|
||||
ssize_t syslog_applet_append_event(void *ctx, struct ist v1, struct ist v2, size_t ofs, size_t len, char delim);
|
||||
ssize_t syslog_applet_append_event(void *ctx, struct ist v1, struct ist v2, size_t ofs, size_t len);
|
||||
|
||||
/*
|
||||
* Parse the log_format string and fill a linked list.
|
||||
|
@ -70,9 +70,6 @@
|
||||
#define H2_CF_ERROR 0x01000000 //A read error was detected (handled has an abort)
|
||||
#define H2_CF_WAIT_INLIST 0x02000000 // there is at least one stream blocked by another stream in send_list/fctl_list
|
||||
|
||||
#define H2_CF_IDL_PING 0x04000000 // timer task scheduled for a PING emission
|
||||
#define H2_CF_IDL_PING_SENT 0x08000000 // PING emitted, or will be on next tasklet run, waiting for ACK
|
||||
|
||||
/* This function is used to report flags in debugging tools. Please reflect
|
||||
* below any single-bit flag addition above in the same order via the
|
||||
* __APPEND_FLAG macro. The new end of the buffer is returned.
|
||||
|
@ -17,7 +17,6 @@
|
||||
#include <haproxy/quic_frame-t.h>
|
||||
#include <haproxy/quic_pacing-t.h>
|
||||
#include <haproxy/quic_stream-t.h>
|
||||
#include <haproxy/quic_utils-t.h>
|
||||
#include <haproxy/stconn-t.h>
|
||||
#include <haproxy/task-t.h>
|
||||
#include <haproxy/time-t.h>
|
||||
@ -28,20 +27,16 @@ enum qcs_type {
|
||||
QCS_SRV_BIDI,
|
||||
QCS_CLT_UNI,
|
||||
QCS_SRV_UNI,
|
||||
};
|
||||
|
||||
enum qcc_app_st {
|
||||
QCC_APP_ST_NULL,
|
||||
QCC_APP_ST_INIT,
|
||||
QCC_APP_ST_SHUT,
|
||||
} __attribute__((packed));
|
||||
/* Must be the last one */
|
||||
QCS_MAX_TYPES
|
||||
};
|
||||
|
||||
struct qcc {
|
||||
struct connection *conn;
|
||||
uint64_t nb_sc; /* number of attached stream connectors */
|
||||
uint64_t nb_hreq; /* number of in-progress http requests */
|
||||
uint32_t flags; /* QC_CF_* */
|
||||
enum qcc_app_st app_st; /* application layer state */
|
||||
int glitches; /* total number of glitches on this connection */
|
||||
|
||||
/* flow-control fields set by us enforced on our side. */
|
||||
@ -110,6 +105,9 @@ struct qcc {
|
||||
void *ctx; /* Application layer context */
|
||||
};
|
||||
|
||||
/* Maximum size of stream Rx buffer. */
|
||||
#define QC_S_RX_BUF_SZ (global.tune.bufsize - NCB_RESERVED_SZ)
|
||||
|
||||
/* QUIC stream states
|
||||
*
|
||||
* On initialization a stream is put on idle state. It is opened as soon as
|
||||
@ -132,15 +130,6 @@ enum qcs_state {
|
||||
QC_SS_CLO, /* closed */
|
||||
} __attribute__((packed));
|
||||
|
||||
/* STREAM receive buffer. Can handle out-of-order storage.
|
||||
* Can be used as a tree node to allocate multiple entries ordered by offsets.
|
||||
*/
|
||||
struct qc_stream_rxbuf {
|
||||
struct eb64_node off_node; /* base offset of current buffer, node for QCS rx.bufs */
|
||||
struct ncbuf ncb; /* data storage with support for out of order offset */
|
||||
uint64_t off_end; /* first offset directly outside of current buffer */
|
||||
};
|
||||
|
||||
struct qcs {
|
||||
struct qcc *qcc;
|
||||
struct sedesc *sd;
|
||||
@ -149,17 +138,15 @@ struct qcs {
|
||||
void *ctx; /* app-ops context */
|
||||
|
||||
struct {
|
||||
uint64_t offset; /* read offset */
|
||||
uint64_t offset; /* absolute current base offset of ncbuf */
|
||||
uint64_t offset_max; /* maximum absolute offset received */
|
||||
struct eb_root bufs; /* receive buffers tree ordered by offset */
|
||||
struct ncbuf ncbuf; /* receive buffer - can handle out-of-order offset frames */
|
||||
struct buffer app_buf; /* receive buffer used by stconn layer */
|
||||
uint64_t msd; /* current max-stream-data limit to enforce */
|
||||
uint64_t msd_base; /* max-stream-data previous to latest update */
|
||||
struct bdata_ctr data; /* data utilization counter. Note that <tot> is now used for now as accounting may be difficult with ncbuf. */
|
||||
uint64_t msd_init; /* initial max-stream-data */
|
||||
} rx;
|
||||
struct {
|
||||
struct quic_fctl fc; /* stream flow control applied on sending */
|
||||
struct quic_frame *msd_frm; /* MAX_STREAM_DATA frame prepared */
|
||||
} tx;
|
||||
|
||||
struct eb64_node by_id;
|
||||
@ -202,7 +189,7 @@ struct qcc_app_ops {
|
||||
/* Initialize <qcs> stream app context or leave it to NULL if rejected. */
|
||||
int (*attach)(struct qcs *qcs, void *conn_ctx);
|
||||
|
||||
/* Convert received HTTP payload to HTX. Returns amount of decoded bytes from <b> or a negative error code. */
|
||||
/* Convert received HTTP payload to HTX. */
|
||||
ssize_t (*rcv_buf)(struct qcs *qcs, struct buffer *b, int fin);
|
||||
|
||||
/* Convert HTX to HTTP payload for sending. */
|
||||
@ -232,9 +219,9 @@ struct qcc_app_ops {
|
||||
|
||||
#define QC_CF_ERRL 0x00000001 /* fatal error detected locally, connection should be closed soon */
|
||||
#define QC_CF_ERRL_DONE 0x00000002 /* local error properly handled, connection can be released */
|
||||
#define QC_CF_IS_BACK 0x00000004 /* backend side */
|
||||
/* unused 0x00000004 */
|
||||
#define QC_CF_CONN_FULL 0x00000008 /* no stream buffers available on connection */
|
||||
/* unused 0x00000010 */
|
||||
#define QC_CF_APP_SHUT 0x00000010 /* Application layer shutdown done. */
|
||||
#define QC_CF_ERR_CONN 0x00000020 /* fatal error reported by transport layer */
|
||||
#define QC_CF_WAIT_HS 0x00000040 /* MUX init before QUIC handshake completed (0-RTT) */
|
||||
|
||||
@ -251,8 +238,9 @@ static forceinline char *qcc_show_flags(char *buf, size_t len, const char *delim
|
||||
_(QC_CF_ERRL,
|
||||
_(QC_CF_ERRL_DONE,
|
||||
_(QC_CF_CONN_FULL,
|
||||
_(QC_CF_APP_SHUT,
|
||||
_(QC_CF_ERR_CONN,
|
||||
_(QC_CF_WAIT_HS)))));
|
||||
_(QC_CF_WAIT_HS))))));
|
||||
/* epilogue */
|
||||
_(~0U);
|
||||
return buf;
|
||||
|
@ -46,12 +46,15 @@ int qcc_recv_max_stream_data(struct qcc *qcc, uint64_t id, uint64_t max);
|
||||
int qcc_recv_reset_stream(struct qcc *qcc, uint64_t id, uint64_t err, uint64_t final_size);
|
||||
int qcc_recv_stop_sending(struct qcc *qcc, uint64_t id, uint64_t err);
|
||||
|
||||
static inline int qmux_stream_rx_bufsz(void)
|
||||
{
|
||||
return global.tune.bufsize - NCB_RESERVED_SZ;
|
||||
}
|
||||
|
||||
/* Bit shift to get the stream sub ID for internal use which is obtained
|
||||
* shifting the stream IDs by this value, knowing that the
|
||||
* QCS_ID_TYPE_SHIFT less significant bits identify the stream ID
|
||||
* types (client initiated bidirectional, server initiated bidirectional,
|
||||
* client initiated unidirectional, server initiated bidirectional).
|
||||
* Note that there is no reference to such stream sub IDs in the RFC.
|
||||
*/
|
||||
#define QCS_ID_TYPE_MASK 0x3
|
||||
#define QCS_ID_TYPE_SHIFT 2
|
||||
/* The less significant bit of a stream ID is set for a server initiated stream */
|
||||
#define QCS_ID_SRV_INTIATOR_BIT 0x1
|
||||
/* This bit is set for unidirectional streams */
|
||||
@ -74,6 +77,16 @@ static inline int quic_stream_is_remote(struct qcc *qcc, uint64_t id)
|
||||
return !quic_stream_is_local(qcc, id);
|
||||
}
|
||||
|
||||
static inline int quic_stream_is_uni(uint64_t id)
|
||||
{
|
||||
return id & QCS_ID_DIR_BIT;
|
||||
}
|
||||
|
||||
static inline int quic_stream_is_bidi(uint64_t id)
|
||||
{
|
||||
return !quic_stream_is_uni(id);
|
||||
}
|
||||
|
||||
static inline char *qcs_st_to_str(enum qcs_state st)
|
||||
{
|
||||
switch (st) {
|
||||
|
@ -87,9 +87,10 @@ static forceinline char *spop_strm_show_flags(char *buf, size_t len, const char
|
||||
|
||||
/* SPOP connection state (spop_conn->state) */
|
||||
enum spop_conn_st {
|
||||
SPOP_CS_HA_HELLO = 0, /* init done, waiting for sending HELLO frame */
|
||||
SPOP_CS_AGENT_HELLO, /* HELLO frame sent, waiting for agent HELLO frame to define the connection settings */
|
||||
SPOP_CS_RUNNING, /* HELLO handshake finished, exchange NOTIFY/ACK frames */
|
||||
SPOP_CS_HA_HELLO = 0, /* init done, waiting for sending HELLO frame */
|
||||
SPOP_CS_AGENT_HELLO, /* HELLO frame sent, waiting for agent HELLO frame to define the connection settings */
|
||||
SPOP_CS_FRAME_H, /* HELLO handshake finished, waiting for a frame header */
|
||||
SPOP_CS_FRAME_P, /* Frame header received, waiting for a frame data */
|
||||
SPOP_CS_ERROR, /* send DISCONNECT frame to be able ti close the connection */
|
||||
SPOP_CS_CLOSING, /* DISCONNECT frame sent, waiting for the agent DISCONNECT frame before closing */
|
||||
SPOP_CS_CLOSED, /* Agent DISCONNECT frame received and close the connection ASAP */
|
||||
@ -102,7 +103,8 @@ static inline const char *spop_conn_st_to_str(enum spop_conn_st st)
|
||||
switch (st) {
|
||||
case SPOP_CS_HA_HELLO : return "HHL";
|
||||
case SPOP_CS_AGENT_HELLO: return "AHL";
|
||||
case SPOP_CS_RUNNING : return "RUN";
|
||||
case SPOP_CS_FRAME_H : return "FRH";
|
||||
case SPOP_CS_FRAME_P : return "FRP";
|
||||
case SPOP_CS_ERROR : return "ERR";
|
||||
case SPOP_CS_CLOSING : return "CLI";
|
||||
case SPOP_CS_CLOSED : return "CLO";
|
||||
|
@ -46,25 +46,7 @@
|
||||
|
||||
#ifdef USE_QUIC_OPENSSL_COMPAT
|
||||
#include <haproxy/quic_openssl_compat.h>
|
||||
#else
|
||||
#if defined(OSSL_FUNC_SSL_QUIC_TLS_CRYPTO_SEND)
|
||||
/* This macro is defined by the new OpenSSL 3.5.0 QUIC TLS API and it is not
|
||||
* defined by quictls.
|
||||
*/
|
||||
#define HAVE_OPENSSL_QUIC
|
||||
#define SSL_set_quic_transport_params SSL_set_quic_tls_transport_params
|
||||
#define SSL_set_quic_early_data_enabled SSL_set_quic_tls_early_data_enabled
|
||||
#define SSL_quic_read_level(arg) -1
|
||||
|
||||
enum ssl_encryption_level_t {
|
||||
ssl_encryption_initial = 0,
|
||||
ssl_encryption_early_data,
|
||||
ssl_encryption_handshake,
|
||||
ssl_encryption_application
|
||||
};
|
||||
|
||||
#endif
|
||||
#endif /* USE_QUIC_OPENSSL_COMPAT */
|
||||
|
||||
#if defined(OPENSSL_IS_AWSLC)
|
||||
#define OPENSSL_NO_DH
|
||||
@ -146,14 +128,6 @@ enum ssl_encryption_level_t {
|
||||
#define HAVE_SSL_SET_SECURITY_LEVEL
|
||||
#endif
|
||||
|
||||
#if ((defined(LIBRESSL_VERSION_NUMBER) && (LIBRESSL_VERSION_NUMBER >= 0x3030600L)) || (HA_OPENSSL_VERSION_NUMBER >= 0x10101000L) || defined(OPENSSL_IS_AWSLC)) && !defined(USE_OPENSSL_WOLFSSL)
|
||||
#define HAVE_JWS
|
||||
#endif
|
||||
|
||||
#if (defined(HAVE_JWS) && defined(HAVE_ASN1_TIME_TO_TM))
|
||||
#define HAVE_ACME
|
||||
#endif
|
||||
|
||||
#if !defined(HAVE_SSL_SET_SECURITY_LEVEL)
|
||||
/* define a nope function for set_security_level */
|
||||
#define SSL_CTX_set_security_level(ctx, level) ({})
|
||||
@ -430,7 +404,7 @@ static inline unsigned long ERR_peek_error_func(const char **func)
|
||||
#define SSL_OP_CIPHER_SERVER_PREFERENCE 0
|
||||
#endif
|
||||
|
||||
/* needs OpenSSL >= 0.9.7 and renegotiation options on WolfSSL */
|
||||
/* needs OpenSSL >= 0.9.7 and renegotation options on WolfSSL */
|
||||
#if !defined(SSL_OP_NO_SESSION_RESUMPTION_ON_RENEGOTIATION) || \
|
||||
(defined(USE_OPENSSL_WOLFSSL) && !defined(HAVE_SECURE_RENEGOTIATION) && !defined(HAVE_SERVER_RENEGOTIATION_INFO))
|
||||
#undef SSL_OP_NO_SESSION_RESUMPTION_ON_RENEGOTIATION
|
||||
|
@ -38,7 +38,7 @@ extern int (*const pat_index_fcts[PAT_MATCH_NUM])(struct pattern_expr *, struct
|
||||
extern void (*const pat_prune_fcts[PAT_MATCH_NUM])(struct pattern_expr *);
|
||||
extern struct pattern *(*const pat_match_fcts[PAT_MATCH_NUM])(struct sample *, struct pattern_expr *, int);
|
||||
|
||||
/* This is the root of the list of all available pattern_ref values. */
|
||||
/* This is the root of the list of all pattern_ref avalaibles. */
|
||||
extern struct list pattern_reference;
|
||||
|
||||
int pattern_finalize_config(void);
|
||||
|
@ -27,7 +27,6 @@
|
||||
|
||||
#define MEM_F_SHARED 0x1
|
||||
#define MEM_F_EXACT 0x2
|
||||
#define MEM_F_UAF 0x4
|
||||
|
||||
/* A special pointer for the pool's free_list that indicates someone is
|
||||
* currently manipulating it. Serves as a short-lived lock.
|
||||
@ -52,7 +51,6 @@
|
||||
#define POOL_DBG_TAG 0x00000080 // place a tag at the end of the area
|
||||
#define POOL_DBG_POISON 0x00000100 // poison memory area on pool_alloc()
|
||||
#define POOL_DBG_UAF 0x00000200 // enable use-after-free protection
|
||||
#define POOL_DBG_BACKUP 0x00000400 // backup the object contents on free()
|
||||
|
||||
|
||||
/* This is the head of a thread-local cache */
|
||||
@ -64,17 +62,6 @@ struct pool_cache_head {
|
||||
ulong fill_pattern; /* pattern used to fill the area on free */
|
||||
} THREAD_ALIGNED(64);
|
||||
|
||||
/* This describes a pool registration, which is what was passed to
|
||||
* create_pool() and that might have been merged with an existing pool.
|
||||
*/
|
||||
struct pool_registration {
|
||||
struct list list; /* link element */
|
||||
char name[12]; /* name of the pool */
|
||||
unsigned int size; /* expected object size */
|
||||
unsigned int flags; /* MEM_F_* */
|
||||
unsigned int align; /* expected alignment; 0=unspecified */
|
||||
};
|
||||
|
||||
/* This represents one item stored in the thread-local cache. <by_pool> links
|
||||
* the object to the list of objects in the pool, and <by_lru> links the object
|
||||
* to the local thread's list of hottest objects. This way it's possible to
|
||||
@ -127,11 +114,9 @@ struct pool_head {
|
||||
unsigned int flags; /* MEM_F_* */
|
||||
unsigned int users; /* number of pools sharing this zone */
|
||||
unsigned int alloc_sz; /* allocated size (includes hidden fields) */
|
||||
unsigned int sum_size; /* sum of all registered users' size */
|
||||
struct list list; /* list of all known pools */
|
||||
void *base_addr; /* allocation address, for free() */
|
||||
char name[12]; /* name of the pool */
|
||||
struct list regs; /* registrations: alt names for this pool */
|
||||
|
||||
/* heavily read-write part */
|
||||
THREAD_ALIGN(64);
|
||||
|
@ -30,6 +30,4 @@ extern struct protocol proto_quic6;
|
||||
|
||||
extern struct quic_dghdlr *quic_dghdlrs;
|
||||
|
||||
extern THREAD_LOCAL struct cshared quic_mem_diff;
|
||||
|
||||
#endif /* _HAPROXY_PROTO_QUIC_H */
|
||||
|
@ -45,19 +45,15 @@
|
||||
#include <haproxy/uri_auth-t.h>
|
||||
#include <haproxy/http_ext-t.h>
|
||||
|
||||
/* values for proxy->mode, only one value per proxy.
|
||||
*
|
||||
* values are bitfield compatible so that functions may
|
||||
* take a bitfield of compatible modes as parameter
|
||||
*/
|
||||
/* values for proxy->mode */
|
||||
enum pr_mode {
|
||||
PR_MODES = 0x00,
|
||||
PR_MODE_TCP = 0x01,
|
||||
PR_MODE_HTTP = 0x02,
|
||||
PR_MODE_CLI = 0x04,
|
||||
PR_MODE_SYSLOG = 0x08,
|
||||
PR_MODE_PEERS = 0x10,
|
||||
PR_MODE_SPOP = 0x20,
|
||||
PR_MODE_TCP = 0,
|
||||
PR_MODE_HTTP,
|
||||
PR_MODE_CLI,
|
||||
PR_MODE_SYSLOG,
|
||||
PR_MODE_PEERS,
|
||||
PR_MODE_SPOP,
|
||||
PR_MODES
|
||||
} __attribute__((packed));
|
||||
|
||||
enum PR_SRV_STATE_FILE {
|
||||
@ -114,12 +110,11 @@ enum PR_SRV_STATE_FILE {
|
||||
#define PR_O_HTTP_CLO 0x01000000 /* HTTP close mode (httpclose) */
|
||||
#define PR_O_HTTP_SCL 0x02000000 /* HTTP server close mode (http-server-close) */
|
||||
#define PR_O_HTTP_MODE 0x03000000 /* MASK to retrieve the HTTP mode */
|
||||
/* unused: 0x04000000 */
|
||||
|
||||
#define PR_O_HTTP_DROP_REQ_TRLS 0x04000000 /* Drop the request trailers when forwarding to the server */
|
||||
#define PR_O_HTTP_DROP_RES_TRLS 0x08000000 /* Drop response trailers when forwarding to the client */
|
||||
|
||||
#define PR_O_TCPCHK_SSL 0x10000000 /* at least one TCPCHECK connect rule requires SSL */
|
||||
#define PR_O_CONTSTATS 0x20000000 /* continuous counters */
|
||||
#define PR_O_TCPCHK_SSL 0x08000000 /* at least one TCPCHECK connect rule requires SSL */
|
||||
#define PR_O_CONTSTATS 0x10000000 /* continuous counters */
|
||||
/* unused: 0x20000000 */
|
||||
#define PR_O_DISABLE404 0x40000000 /* Disable a server on a 404 response to a health-check */
|
||||
/* unused: 0x80000000 */
|
||||
|
||||
@ -156,8 +151,7 @@ enum PR_SRV_STATE_FILE {
|
||||
#define PR_O2_RSTRICT_REQ_HDR_NAMES_DEL 0x00800000 /* remove request header names containing chars outside of [0-9a-zA-Z-] charset */
|
||||
#define PR_O2_RSTRICT_REQ_HDR_NAMES_NOOP 0x01000000 /* preserve request header names containing chars outside of [0-9a-zA-Z-] charset */
|
||||
#define PR_O2_RSTRICT_REQ_HDR_NAMES_MASK 0x01c00000 /* mask for restrict-http-header-names option */
|
||||
|
||||
/* unused : 0x02000000 ... 0x08000000 */
|
||||
/* unused : 0x0000000..0x80000000 */
|
||||
|
||||
/* server health checks */
|
||||
#define PR_O2_CHK_NONE 0x00000000 /* no L7 health checks configured (TCP by default) */
|
||||
@ -167,29 +161,6 @@ enum PR_SRV_STATE_FILE {
|
||||
#define PR_O2_CHK_ANY 0xF0000000 /* Mask to cover any check */
|
||||
/* end of proxy->options2 */
|
||||
|
||||
/* bits for proxy->options3 */
|
||||
|
||||
/* bits for log-forward proxies */
|
||||
#define PR_O3_DONTPARSELOG 0x00000001 /* don't parse log messages */
|
||||
#define PR_O3_ASSUME_RFC6587_NTF 0x00000002 /* assume that we are going to receive just non-transparent framing messages */
|
||||
|
||||
/* unused: 0x00000004 to 0x00000008 */
|
||||
|
||||
#define PR_O3_LOGF_HOST_REPLACE 0x00000010
|
||||
#define PR_O3_LOGF_HOST_FILL 0x00000020
|
||||
#define PR_O3_LOGF_HOST_KEEP 0x00000040
|
||||
#define PR_O3_LOGF_HOST_APPEND 0x00000080
|
||||
#define PR_O3_LOGF_HOST 0x000000F0
|
||||
|
||||
/* bits for hash-preserve-affinity */
|
||||
#define PR_O3_HASHAFNTY_ALWS 0x00000000 /* always preserve hash affinity */
|
||||
#define PR_O3_HASHAFNTY_MAXCONN 0x00000100 /* preserve hash affinity until maxconn is reached */
|
||||
#define PR_O3_HASHAFNTY_MAXQUEUE 0x00000200 /* preserve hash affinity until maxqueue is reached */
|
||||
#define PR_O3_HASHAFNTY_MASK 0x00000300 /* mask for hash-preserve-affinity */
|
||||
|
||||
/* unused: 0x00000400 to 0x80000000 */
|
||||
/* end of proxy->options3 */
|
||||
|
||||
/* Cookie settings for pr->ck_opts */
|
||||
#define PR_CK_RW 0x00000001 /* rewrite all direct cookies with the right serverid */
|
||||
#define PR_CK_IND 0x00000002 /* keep only indirect cookies */
|
||||
@ -300,10 +271,9 @@ struct error_snapshot {
|
||||
char buf[VAR_ARRAY]; /* copy of the beginning of the message for bufsize bytes */
|
||||
};
|
||||
|
||||
/* Each proxy will have one occurrence of this structure per thread group */
|
||||
/* Each proxy will have one occurence of this structure per thread group */
|
||||
struct proxy_per_tgroup {
|
||||
struct queue queue;
|
||||
struct lbprm_per_tgrp lbprm;
|
||||
} THREAD_ALIGNED(64);
|
||||
|
||||
struct proxy {
|
||||
@ -311,15 +281,10 @@ struct proxy {
|
||||
char flags; /* bit field PR_FL_* */
|
||||
enum pr_mode mode; /* mode = PR_MODE_TCP, PR_MODE_HTTP, ... */
|
||||
char cap; /* supported capabilities (PR_CAP_*) */
|
||||
/* 4-bytes hole */
|
||||
|
||||
struct list global_list; /* list member for global proxy list */
|
||||
|
||||
unsigned int maxconn; /* max # of active streams on the frontend */
|
||||
|
||||
int options; /* PR_O_REDISP, PR_O_TRANSP, ... */
|
||||
int options2; /* PR_O2_* */
|
||||
int options3; /* PR_O3_* */
|
||||
unsigned int ck_opts; /* PR_CK_* (cookie options) */
|
||||
unsigned int fe_req_ana, be_req_ana; /* bitmap of common request protocol analysers for the frontend and backend */
|
||||
unsigned int fe_rsp_ana, be_rsp_ana; /* bitmap of common response protocol analysers for the frontend and backend */
|
||||
@ -455,7 +420,6 @@ struct proxy {
|
||||
/* used only during configuration parsing */
|
||||
int no_options; /* PR_O_REDISP, PR_O_TRANSP, ... */
|
||||
int no_options2; /* PR_O2_* */
|
||||
int no_options3; /* PR_O3_* */
|
||||
|
||||
struct {
|
||||
const char *file; /* file where the section appears */
|
||||
|
@ -33,14 +33,12 @@
|
||||
#include <haproxy/thread.h>
|
||||
|
||||
extern struct proxy *proxies_list;
|
||||
extern struct list proxies;
|
||||
extern struct eb_root used_proxy_id; /* list of proxy IDs in use */
|
||||
extern unsigned int error_snapshot_id; /* global ID assigned to each error then incremented */
|
||||
extern struct eb_root proxy_by_name; /* tree of proxies sorted by name */
|
||||
|
||||
extern const struct cfg_opt cfg_opts[];
|
||||
extern const struct cfg_opt cfg_opts2[];
|
||||
extern const struct cfg_opt cfg_opts3[];
|
||||
|
||||
struct task *manage_proxy(struct task *t, void *context, unsigned int state);
|
||||
void proxy_cond_pause(struct proxy *p);
|
||||
@ -52,7 +50,6 @@ int resume_proxy(struct proxy *p);
|
||||
void stop_proxy(struct proxy *p);
|
||||
int stream_set_backend(struct stream *s, struct proxy *be);
|
||||
|
||||
void deinit_proxy(struct proxy *p);
|
||||
void free_proxy(struct proxy *p);
|
||||
const char *proxy_cap_str(int cap);
|
||||
const char *proxy_mode_str(int mode);
|
||||
@ -72,7 +69,6 @@ void proxy_destroy_all_unref_defaults(void);
|
||||
void proxy_ref_defaults(struct proxy *px, struct proxy *defpx);
|
||||
void proxy_unref_defaults(struct proxy *px);
|
||||
void proxy_unref_or_destroy_defaults(struct proxy *px);
|
||||
int setup_new_proxy(struct proxy *px, const char *name, unsigned int cap, char **errmsg);
|
||||
struct proxy *alloc_new_proxy(const char *name, unsigned int cap,
|
||||
char **errmsg);
|
||||
struct proxy *parse_new_proxy(const char *name, unsigned int cap,
|
||||
@ -136,24 +132,22 @@ static inline void proxy_reset_timeouts(struct proxy *proxy)
|
||||
/* increase the number of cumulated connections received on the designated frontend */
|
||||
static inline void proxy_inc_fe_conn_ctr(struct listener *l, struct proxy *fe)
|
||||
{
|
||||
_HA_ATOMIC_INC(&fe->fe_counters.shared->tg[tgid - 1]->cum_conn);
|
||||
_HA_ATOMIC_INC(&fe->fe_counters.cum_conn);
|
||||
if (l && l->counters)
|
||||
_HA_ATOMIC_INC(&l->counters->shared->tg[tgid - 1]->cum_conn);
|
||||
update_freq_ctr(&fe->fe_counters.shared->tg[tgid - 1]->conn_per_sec, 1);
|
||||
_HA_ATOMIC_INC(&l->counters->cum_conn);
|
||||
HA_ATOMIC_UPDATE_MAX(&fe->fe_counters.cps_max,
|
||||
update_freq_ctr(&fe->fe_counters._conn_per_sec, 1));
|
||||
update_freq_ctr(&fe->fe_counters.conn_per_sec, 1));
|
||||
}
|
||||
|
||||
/* increase the number of cumulated connections accepted by the designated frontend */
|
||||
static inline void proxy_inc_fe_sess_ctr(struct listener *l, struct proxy *fe)
|
||||
{
|
||||
|
||||
_HA_ATOMIC_INC(&fe->fe_counters.shared->tg[tgid - 1]->cum_sess);
|
||||
_HA_ATOMIC_INC(&fe->fe_counters.cum_sess);
|
||||
if (l && l->counters)
|
||||
_HA_ATOMIC_INC(&l->counters->shared->tg[tgid - 1]->cum_sess);
|
||||
update_freq_ctr(&fe->fe_counters.shared->tg[tgid - 1]->sess_per_sec, 1);
|
||||
_HA_ATOMIC_INC(&l->counters->cum_sess);
|
||||
HA_ATOMIC_UPDATE_MAX(&fe->fe_counters.sps_max,
|
||||
update_freq_ctr(&fe->fe_counters._sess_per_sec, 1));
|
||||
update_freq_ctr(&fe->fe_counters.sess_per_sec, 1));
|
||||
}
|
||||
|
||||
/* increase the number of cumulated HTTP sessions on the designated frontend.
|
||||
@ -163,21 +157,20 @@ static inline void proxy_inc_fe_cum_sess_ver_ctr(struct listener *l, struct prox
|
||||
unsigned int http_ver)
|
||||
{
|
||||
if (http_ver == 0 ||
|
||||
http_ver > sizeof(fe->fe_counters.shared->tg[tgid - 1]->cum_sess_ver) / sizeof(*fe->fe_counters.shared->tg[tgid - 1]->cum_sess_ver))
|
||||
http_ver > sizeof(fe->fe_counters.cum_sess_ver) / sizeof(*fe->fe_counters.cum_sess_ver))
|
||||
return;
|
||||
|
||||
_HA_ATOMIC_INC(&fe->fe_counters.shared->tg[tgid - 1]->cum_sess_ver[http_ver - 1]);
|
||||
_HA_ATOMIC_INC(&fe->fe_counters.cum_sess_ver[http_ver - 1]);
|
||||
if (l && l->counters)
|
||||
_HA_ATOMIC_INC(&l->counters->shared->tg[tgid - 1]->cum_sess_ver[http_ver - 1]);
|
||||
_HA_ATOMIC_INC(&l->counters->cum_sess_ver[http_ver - 1]);
|
||||
}
|
||||
|
||||
/* increase the number of cumulated streams on the designated backend */
|
||||
static inline void proxy_inc_be_ctr(struct proxy *be)
|
||||
{
|
||||
_HA_ATOMIC_INC(&be->be_counters.shared->tg[tgid - 1]->cum_sess);
|
||||
update_freq_ctr(&be->be_counters.shared->tg[tgid - 1]->sess_per_sec, 1);
|
||||
_HA_ATOMIC_INC(&be->be_counters.cum_sess);
|
||||
HA_ATOMIC_UPDATE_MAX(&be->be_counters.sps_max,
|
||||
update_freq_ctr(&be->be_counters._sess_per_sec, 1));
|
||||
update_freq_ctr(&be->be_counters.sess_per_sec, 1));
|
||||
}
|
||||
|
||||
/* increase the number of cumulated requests on the designated frontend.
|
||||
@ -187,15 +180,14 @@ static inline void proxy_inc_be_ctr(struct proxy *be)
|
||||
static inline void proxy_inc_fe_req_ctr(struct listener *l, struct proxy *fe,
|
||||
unsigned int http_ver)
|
||||
{
|
||||
if (http_ver >= sizeof(fe->fe_counters.shared->tg[tgid - 1]->p.http.cum_req) / sizeof(*fe->fe_counters.shared->tg[tgid - 1]->p.http.cum_req))
|
||||
if (http_ver >= sizeof(fe->fe_counters.p.http.cum_req) / sizeof(*fe->fe_counters.p.http.cum_req))
|
||||
return;
|
||||
|
||||
_HA_ATOMIC_INC(&fe->fe_counters.shared->tg[tgid - 1]->p.http.cum_req[http_ver]);
|
||||
_HA_ATOMIC_INC(&fe->fe_counters.p.http.cum_req[http_ver]);
|
||||
if (l && l->counters)
|
||||
_HA_ATOMIC_INC(&l->counters->shared->tg[tgid - 1]->p.http.cum_req[http_ver]);
|
||||
update_freq_ctr(&fe->fe_counters.shared->tg[tgid - 1]->req_per_sec, 1);
|
||||
_HA_ATOMIC_INC(&l->counters->p.http.cum_req[http_ver]);
|
||||
HA_ATOMIC_UPDATE_MAX(&fe->fe_counters.p.http.rps_max,
|
||||
update_freq_ctr(&fe->fe_counters.p.http._req_per_sec, 1));
|
||||
update_freq_ctr(&fe->fe_counters.req_per_sec, 1));
|
||||
}
|
||||
|
||||
/* Returns non-zero if the proxy is configured to retry a request if we got that status, 0 otherwise */
|
||||
|
@ -8,9 +8,6 @@
|
||||
|
||||
size_t qcs_http_rcv_buf(struct qcs *qcs, struct buffer *buf, size_t count,
|
||||
char *fin);
|
||||
|
||||
int qcs_http_handle_standalone_fin(struct qcs *qcs);
|
||||
|
||||
size_t qcs_http_snd_buf(struct qcs *qcs, struct buffer *buf, size_t count,
|
||||
char *fin);
|
||||
|
||||
|
@ -107,11 +107,11 @@ struct quic_cc_path {
|
||||
/* Congestion window. */
|
||||
uint64_t cwnd;
|
||||
/* The current maximum congestion window value reached. */
|
||||
uint64_t cwnd_last_max;
|
||||
/* Max limit on congestion window size. */
|
||||
uint64_t limit_max;
|
||||
/* Min limit on congestion window size. */
|
||||
uint64_t limit_min;
|
||||
uint64_t mcwnd;
|
||||
/* The maximum congestion window value which can be reached. */
|
||||
uint64_t max_cwnd;
|
||||
/* Minimum congestion window. */
|
||||
uint64_t min_cwnd;
|
||||
/* Prepared data to be sent (in bytes). */
|
||||
uint64_t prep_in_flight;
|
||||
/* Outstanding data (in bytes). */
|
||||
@ -123,7 +123,7 @@ struct quic_cc_path {
|
||||
uint32_t recovery_start_ts;
|
||||
};
|
||||
|
||||
/* pacing can be optionally activated on top of the algorithm */
|
||||
/* pacing can be optionnaly activated on top of the algorithm */
|
||||
#define QUIC_CC_ALGO_FL_OPT_PACING 0x01
|
||||
|
||||
struct quic_cc_algo {
|
||||
|
@ -29,11 +29,9 @@
|
||||
#include <haproxy/api.h>
|
||||
#include <haproxy/buf.h>
|
||||
#include <haproxy/chunk.h>
|
||||
#include <haproxy/proto_quic.h>
|
||||
#include <haproxy/quic_cc-t.h>
|
||||
#include <haproxy/quic_conn-t.h>
|
||||
#include <haproxy/quic_loss.h>
|
||||
#include <haproxy/thread.h>
|
||||
|
||||
void quic_cc_init(struct quic_cc *cc, struct quic_cc_algo *algo, struct quic_conn *qc);
|
||||
void quic_cc_event(struct quic_cc *cc, struct quic_cc_event *ev);
|
||||
@ -93,10 +91,9 @@ static inline void quic_cc_path_init(struct quic_cc_path *path, int ipv4, unsign
|
||||
*(size_t *)&path->mtu = max_dgram_sz;
|
||||
path->initial_wnd = QUIC_MIN(10 * max_dgram_sz, QUIC_MAX(max_dgram_sz << 1, 14720U));
|
||||
path->cwnd = path->initial_wnd;
|
||||
cshared_add(&quic_mem_diff, path->cwnd);
|
||||
path->cwnd_last_max = path->cwnd;
|
||||
path->limit_max = max_cwnd;
|
||||
path->limit_min = max_dgram_sz << 1;
|
||||
path->mcwnd = path->cwnd;
|
||||
path->max_cwnd = max_cwnd;
|
||||
path->min_cwnd = max_dgram_sz << 1;
|
||||
path->prep_in_flight = 0;
|
||||
path->in_flight = 0;
|
||||
path->ifae_pkts = 0;
|
||||
@ -118,9 +115,7 @@ static inline size_t quic_cc_path_prep_data(struct quic_cc_path *path)
|
||||
return path->cwnd - path->prep_in_flight;
|
||||
}
|
||||
|
||||
void quic_cc_path_reset(struct quic_cc_path *path);
|
||||
void quic_cc_path_set(struct quic_cc_path *path, uint64_t val);
|
||||
void quic_cc_path_inc(struct quic_cc_path *path, uint64_t val);
|
||||
int quic_cwnd_may_increase(const struct quic_cc_path *path);
|
||||
|
||||
#endif /* USE_QUIC */
|
||||
#endif /* _PROTO_QUIC_CC_H */
|
||||
|
@ -28,22 +28,22 @@
|
||||
|
||||
#include <sys/socket.h>
|
||||
|
||||
#include <import/ebtree-t.h>
|
||||
#include <haproxy/cbuf-t.h>
|
||||
#include <haproxy/list.h>
|
||||
#include <haproxy/show_flags-t.h>
|
||||
|
||||
#include <haproxy/api-t.h>
|
||||
#include <haproxy/buf-t.h>
|
||||
#include <haproxy/listener-t.h>
|
||||
#include <haproxy/openssl-compat.h>
|
||||
#include <haproxy/mux_quic-t.h>
|
||||
#include <haproxy/quic_cid-t.h>
|
||||
#include <haproxy/quic_cc-t.h>
|
||||
#include <haproxy/quic_frame-t.h>
|
||||
#include <haproxy/quic_loss-t.h>
|
||||
#include <haproxy/quic_openssl_compat-t.h>
|
||||
#include <haproxy/quic_stats-t.h>
|
||||
#include <haproxy/quic_tls-t.h>
|
||||
#include <haproxy/quic_tp-t.h>
|
||||
#include <haproxy/show_flags-t.h>
|
||||
#include <haproxy/ssl_sock-t.h>
|
||||
#include <haproxy/task-t.h>
|
||||
#include <haproxy/task.h>
|
||||
|
||||
#include <import/ebtree-t.h>
|
||||
|
||||
typedef unsigned long long ull;
|
||||
|
||||
@ -100,9 +100,6 @@ typedef unsigned long long ull;
|
||||
/* Default congestion window size. 480 kB, equivalent to the legacy value which was 30*bufsize */
|
||||
#define QUIC_DFLT_MAX_WINDOW_SIZE 491520
|
||||
|
||||
/* Default ratio applied for max-stream-data-bidi-remote derived from max-data */
|
||||
#define QUIC_DFLT_FRONT_STREAM_DATA_RATIO 90
|
||||
|
||||
/*
|
||||
* 0 1 2 3
|
||||
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
@ -228,9 +225,6 @@ struct quic_version {
|
||||
extern const struct quic_version quic_versions[];
|
||||
extern const size_t quic_versions_nb;
|
||||
extern const struct quic_version *preferred_version;
|
||||
extern const struct quic_version *quic_version_draft_29;
|
||||
extern const struct quic_version *quic_version_1;
|
||||
extern const struct quic_version *quic_version_2;
|
||||
|
||||
/* unused: 0x01 */
|
||||
/* Flag the packet number space as requiring an ACK frame to be sent. */
|
||||
@ -282,10 +276,6 @@ struct quic_conn_cntrs {
|
||||
long long streams_blocked_uni; /* total number of times STREAMS_BLOCKED_UNI frame was received */
|
||||
};
|
||||
|
||||
struct connection;
|
||||
struct qcc;
|
||||
struct qcc_app_ops;
|
||||
|
||||
#define QUIC_CONN_COMMON \
|
||||
struct { \
|
||||
/* Connection owned socket FD. */ \
|
||||
@ -308,7 +298,6 @@ struct qcc_app_ops;
|
||||
/* Number of received bytes. */ \
|
||||
uint64_t rx; \
|
||||
} bytes; \
|
||||
size_t max_udp_payload; \
|
||||
/* First DCID used by client on its Initial packet. */ \
|
||||
struct quic_cid odcid; \
|
||||
/* DCID of our endpoint - not updated when a new DCID is used */ \
|
||||
@ -319,7 +308,7 @@ struct qcc_app_ops;
|
||||
* with a connection \
|
||||
*/ \
|
||||
struct eb_root *cids; \
|
||||
enum obj_type *target; \
|
||||
struct listener *li; /* only valid for frontend connections */ \
|
||||
/* Idle timer task */ \
|
||||
struct task *idle_timer_task; \
|
||||
unsigned int idle_expire; \
|
||||
@ -342,10 +331,7 @@ struct quic_conn {
|
||||
int tps_tls_ext;
|
||||
int state;
|
||||
enum qc_mux_state mux_state; /* status of the connection/mux layer */
|
||||
#ifdef HAVE_OPENSSL_QUIC
|
||||
uint32_t prot_level;
|
||||
#endif
|
||||
#if defined(USE_QUIC_OPENSSL_COMPAT) || defined(HAVE_OPENSSL_QUIC)
|
||||
#ifdef USE_QUIC_OPENSSL_COMPAT
|
||||
unsigned char enc_params[QUIC_TP_MAX_ENCLEN]; /* encoded QUIC transport parameters */
|
||||
size_t enc_params_len;
|
||||
#endif
|
||||
@ -394,10 +380,10 @@ struct quic_conn {
|
||||
/* RX buffer */
|
||||
struct buffer buf;
|
||||
struct list pkt_list;
|
||||
|
||||
/* first unhandled streams ID, set by MUX after release */
|
||||
uint64_t stream_max_uni;
|
||||
uint64_t stream_max_bidi;
|
||||
struct {
|
||||
/* Number of open or closed streams */
|
||||
uint64_t nb_streams;
|
||||
} strms[QCS_MAX_TYPES];
|
||||
} rx;
|
||||
struct {
|
||||
struct quic_tls_kp prv_rx;
|
||||
@ -460,7 +446,6 @@ struct quic_conn_closed {
|
||||
#define QUIC_FL_CONN_HPKTNS_DCD (1U << 16) /* Handshake packet number space discarded */
|
||||
#define QUIC_FL_CONN_PEER_VALIDATED_ADDR (1U << 17) /* Peer address is considered as validated for this connection. */
|
||||
#define QUIC_FL_CONN_NO_TOKEN_RCVD (1U << 18) /* Client dit not send any token */
|
||||
#define QUIC_FL_CONN_SCID_RECEIVED (1U << 19) /* (client only: first Initial received. */
|
||||
/* gap here */
|
||||
#define QUIC_FL_CONN_TO_KILL (1U << 24) /* Unusable connection, to be killed */
|
||||
#define QUIC_FL_CONN_TX_TP_RECEIVED (1U << 25) /* Peer transport parameters have been received (used for the transmitting part) */
|
||||
|
@ -69,8 +69,7 @@ struct quic_conn *qc_new_conn(const struct quic_version *qv, int ipv4,
|
||||
struct quic_connection_id *conn_id,
|
||||
struct sockaddr_storage *local_addr,
|
||||
struct sockaddr_storage *peer_addr,
|
||||
int token, void *owner,
|
||||
struct connection *conn);
|
||||
int server, int token, void *owner);
|
||||
int quic_build_post_handshake_frames(struct quic_conn *qc);
|
||||
const struct quic_version *qc_supported_version(uint32_t version);
|
||||
int quic_peer_validated_addr(struct quic_conn *qc);
|
||||
@ -128,11 +127,7 @@ static inline void quic_conn_mv_cids_to_cc_conn(struct quic_conn_closed *cc_conn
|
||||
|
||||
}
|
||||
|
||||
/* Allocate the underlying required memory for <ncbuf> non-contiguous buffer.
|
||||
* Does nothing if buffer is already allocated.
|
||||
*
|
||||
* Returns the buffer instance or NULL on allocation failure.
|
||||
*/
|
||||
/* Allocate the underlying required memory for <ncbuf> non-contiguous buffer */
|
||||
static inline struct ncbuf *quic_get_ncbuf(struct ncbuf *ncbuf)
|
||||
{
|
||||
struct buffer buf = BUF_NULL;
|
||||
@ -140,8 +135,8 @@ static inline struct ncbuf *quic_get_ncbuf(struct ncbuf *ncbuf)
|
||||
if (!ncb_is_null(ncbuf))
|
||||
return ncbuf;
|
||||
|
||||
if (!b_alloc(&buf, DB_MUX_RX))
|
||||
return NULL;
|
||||
b_alloc(&buf, DB_MUX_RX);
|
||||
BUG_ON(b_is_null(&buf));
|
||||
|
||||
*ncbuf = ncb_make(buf.area, buf.size, 0);
|
||||
ncb_init(ncbuf, 0);
|
||||
@ -164,22 +159,6 @@ static inline void quic_free_ncbuf(struct ncbuf *ncbuf)
|
||||
*ncbuf = NCBUF_NULL;
|
||||
}
|
||||
|
||||
/* Return the address of the QUIC counters attached to the proxy of
|
||||
* the owner of the connection whose object type address is <o> for
|
||||
* listener and servers, or NULL for others object type.
|
||||
*/
|
||||
static inline void *qc_counters(enum obj_type *o, const struct stats_module *m)
|
||||
{
|
||||
struct proxy *p;
|
||||
struct listener *l = objt_listener(o);
|
||||
struct server *s = objt_server(o);
|
||||
|
||||
p = l ? l->bind_conf->frontend :
|
||||
s ? s->proxy : NULL;
|
||||
|
||||
return p ? EXTRA_COUNTERS_GET(p->extra_counters_fe, m) : NULL;
|
||||
}
|
||||
|
||||
void chunk_frm_appendf(struct buffer *buf, const struct quic_frame *frm);
|
||||
void quic_set_connection_close(struct quic_conn *qc, const struct quic_err err);
|
||||
void quic_set_tls_alert(struct quic_conn *qc, int alert);
|
||||
@ -191,7 +170,7 @@ int qc_notify_send(struct quic_conn *qc);
|
||||
|
||||
void qc_check_close_on_released_mux(struct quic_conn *qc);
|
||||
|
||||
int quic_conn_release(struct quic_conn *qc);
|
||||
void quic_conn_release(struct quic_conn *qc);
|
||||
|
||||
void qc_kill_conn(struct quic_conn *qc);
|
||||
|
||||
|
@ -5,7 +5,7 @@
|
||||
#include <haproxy/quic_cc-t.h>
|
||||
|
||||
struct quic_pacer {
|
||||
const struct quic_cc *cc; /* Congestion controller algo used for this connection */
|
||||
const struct quic_cc *cc; /* Congestion controler algo used for this connection */
|
||||
ullong cur; /* Nanosecond timestamp of the last credit reloading */
|
||||
uint credit; /* Number of packets which can be emitted in a single burst */
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user