mirror of
https://github.com/isc-projects/bind9.git
synced 2026-02-28 20:41:18 -05:00
The ability to conveniently mark tests which should only be run when the
CI_ENABLE_ALL_TESTS environment variable is set seems to be useful on a
general level and therefore it should not be limited to the "timeouts"
system test, where it is currently used.
pytest documentation [1] suggests to reuse commonly used test markers by
putting them all in a single Python module which then has to be imported
by test files that want to use the markers defined therein. Follow that
advice by creating a new bin/tests/system/pytest_custom_markers.py
Python module containing the relevant marker definitions.
Note that "import pytest_custom_markers" works from a test-specific
subdirectory because pytest modifies sys.path so that it contains the
paths to all parent directories containing a conftest.py file (and
bin/tests/system/ is one). PyLint does not like that, though, so add a
relevant PyLint suppression.
The above changes make bin/tests/system/timeouts/conftest.py redundant,
so remove it.
[1] https://docs.pytest.org/en/7.0.x/how-to/skipping.html#id1
(cherry picked from commit 00392921f0)
353 lines
11 KiB
Bash
353 lines
11 KiB
Bash
#!/bin/sh
|
|
#
|
|
# Copyright (C) Internet Systems Consortium, Inc. ("ISC")
|
|
#
|
|
# SPDX-License-Identifier: MPL-2.0
|
|
#
|
|
# This Source Code Form is subject to the terms of the Mozilla Public
|
|
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
# file, you can obtain one at https://mozilla.org/MPL/2.0/.
|
|
#
|
|
# See the COPYRIGHT file distributed with this work for additional
|
|
# information regarding copyright ownership.
|
|
|
|
#
|
|
# Run a system test.
|
|
#
|
|
|
|
top_builddir=@top_builddir@
|
|
builddir=@abs_builddir@
|
|
srcdir=@abs_srcdir@
|
|
|
|
# shellcheck source=conf.sh
|
|
. ${builddir}/conf.sh
|
|
|
|
if [ "$(id -u)" -eq "0" ] && [ "@DEVELOPER_MODE@" != "yes" ]; then
|
|
echofail "Refusing to run test as root. Build with --enable-developer to override." >&2
|
|
exit 1
|
|
fi
|
|
|
|
export builddir
|
|
export srcdir
|
|
|
|
date_with_args() (
|
|
date "+%Y-%m-%dT%T%z"
|
|
)
|
|
|
|
stopservers=true
|
|
# baseport == 0 means random
|
|
baseport=0
|
|
|
|
if [ "${SYSTEMTEST_NO_CLEAN:-0}" -eq 1 ]; then
|
|
clean=false
|
|
else
|
|
clean=true
|
|
fi
|
|
|
|
do_run=false
|
|
restart=false
|
|
log_flags="-r"
|
|
while getopts "sknp:r-:t" OPT; do
|
|
log_flags="$log_flags -$OPT$OPTARG"
|
|
if [ "$OPT" = "-" ] && [ -n "$OPTARG" ]; then
|
|
OPT="${OPTARG%%=*}"
|
|
OPTARG="${OPTARG#$OPT}"
|
|
OPTARG="${OPTARG#=}"
|
|
fi
|
|
|
|
# shellcheck disable=SC2214
|
|
case "$OPT" in
|
|
k | keep) stopservers=false ;;
|
|
n | noclean) clean=false ;;
|
|
p | port) baseport=$OPTARG ;;
|
|
r | run) do_run=true ;;
|
|
s | skip) exit 77 ;;
|
|
t | restart) restart=true ;;
|
|
-) break ;;
|
|
*) echo "invalid option" >&2; exit 1 ;;
|
|
esac
|
|
done
|
|
|
|
shift $((OPTIND-1))
|
|
|
|
if ! $do_run; then
|
|
if [ "$baseport" -eq 0 ]; then
|
|
log_flags="$log_flags -p 5300"
|
|
fi
|
|
env - \
|
|
CYGWIN="$CYGWIN" \
|
|
SYSTEMTEST_FORCE_COLOR="$SYSTEMTEST_FORCE_COLOR" \
|
|
SYSTEMTEST_NO_CLEAN="$SYSTEMTEST_NO_CLEAN" \
|
|
SLOT="$SLOT" \
|
|
OPENSSL_CONF="$OPENSSL_CONF" \
|
|
SOFTHSM2_CONF="$SOFTHSM2_CONF" \
|
|
SOFTHSM2_MODULE="$SOFTHSM2_MODULE" \
|
|
PATH="$PATH" \
|
|
${LD_LIBRARY_PATH:+"LD_LIBRARY_PATH=${LD_LIBRARY_PATH}"} \
|
|
TESTS="$*" \
|
|
TEST_SUITE_LOG=run.log \
|
|
LOG_DRIVER_FLAGS="--verbose yes --color-tests yes" \
|
|
LOG_FLAGS="$log_flags" \
|
|
TEST_LARGE_MAP="${TEST_LARGE_MAP}" \
|
|
CI_ENABLE_ALL_TESTS="${CI_ENABLE_ALL_TESTS}" \
|
|
make -e check
|
|
exit $?
|
|
fi
|
|
|
|
if [ $# -eq 0 ]; then
|
|
echofail "Usage: $0 [-k] [-n] [-p <PORT>] test-directory [test-options]" >&2;
|
|
exit 1
|
|
fi
|
|
|
|
systest=$(basename "${1%%/}")
|
|
shift
|
|
|
|
if [ ! -d "${srcdir}/$systest" ]; then
|
|
echofail "$0: $systest: no such test" >&2
|
|
exit 1
|
|
fi
|
|
|
|
if [ "${srcdir}" != "${builddir}" ]; then
|
|
if [ ! -d common ] || [ ! -r common/.prepared ]; then
|
|
cp -a "${srcdir}/common" "${builddir}"
|
|
fi
|
|
# Some tests require additional files to work for out-of-tree test runs.
|
|
for file in ckdnsrps.sh conftest.py digcomp.pl ditch.pl fromhex.pl kasp.sh packet.pl pytest_custom_markers.py start.pl stop.pl testcrypto.sh; do
|
|
if [ ! -r "${file}" ]; then
|
|
cp -a "${srcdir}/${file}" "${builddir}"
|
|
fi
|
|
done
|
|
if [ ! -d "$systest" ] || [ ! -r "$systest/.prepared" ]; then
|
|
mkdir -p "${builddir}/$systest"
|
|
cp -a "${srcdir}/$systest" "${builddir}/"
|
|
touch "$systest/.prepared"
|
|
fi
|
|
fi
|
|
|
|
if [ ! -d "${systest}" ]; then
|
|
echofail "$0: $systest: no such test" >&2
|
|
exit 1
|
|
fi
|
|
|
|
|
|
# Determine which ports to use for this system test.
|
|
eval "$(cd "${srcdir}" && ./get_ports.sh -p "$baseport" -t "$systest")"
|
|
|
|
# Start all servers used by the system test. Ensure all log files written
|
|
# during a system test (tests.sh + potentially multiple *.py scripts) are
|
|
# retained for each run by calling start.pl with the --restart command-line
|
|
# option for all invocations except the first one.
|
|
start_servers() {
|
|
echoinfo "I:$systest:starting servers"
|
|
if $restart || [ "$run" -gt 0 ]; then
|
|
restart_opt="--restart"
|
|
fi
|
|
if ! $PERL start.pl ${restart_opt} --port "$PORT" "$systest"; then
|
|
echoinfo "I:$systest:starting servers failed"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
stop_servers() {
|
|
if $stopservers; then
|
|
echoinfo "I:$systest:stopping servers"
|
|
if ! $PERL stop.pl "$systest"; then
|
|
echoinfo "I:$systest:stopping servers failed"
|
|
return 1
|
|
fi
|
|
fi
|
|
}
|
|
|
|
echostart "S:$systest:$(date_with_args)"
|
|
echoinfo "T:$systest:1:A"
|
|
echoinfo "A:$systest:System test $systest"
|
|
echoinfo "I:$systest:PORTS:${PORT},${TLSPORT},${HTTPPORT},${HTTPSPORT},${EXTRAPORT1},${EXTRAPORT2},${EXTRAPORT3},${EXTRAPORT4},${EXTRAPORT5},${EXTRAPORT6},${EXTRAPORT7},${EXTRAPORT8},${CONTROLPORT}"
|
|
|
|
$PERL ${srcdir}/testsock.pl -p "$PORT" || {
|
|
echowarn "I:$systest:Network interface aliases not set up. Skipping test."
|
|
echowarn "R:$systest:FAIL"
|
|
echoend "E:$systest:$(date_with_args)"
|
|
exit 1;
|
|
}
|
|
|
|
# Check for test-specific prerequisites.
|
|
test ! -f "$systest/prereq.sh" || ( cd "${systest}" && $SHELL prereq.sh "$@" )
|
|
result=$?
|
|
|
|
if [ $result -eq 0 ]; then
|
|
: prereqs ok
|
|
else
|
|
echowarn "I:$systest:Prerequisites missing, skipping test."
|
|
echowarn "R:$systest:SKIPPED";
|
|
echoend "E:$systest:$(date_with_args)"
|
|
exit 77
|
|
fi
|
|
|
|
# Clean up files left from any potential previous runs except when
|
|
# started with the --restart option.
|
|
if ! $restart; then
|
|
if test -f "$systest/clean.sh"; then
|
|
if ! ( cd "${systest}" && $SHELL clean.sh "$@" ); then
|
|
echowarn "I:$systest:clean.sh script failed"
|
|
echofail "R:$systest:FAIL"
|
|
echoend "E:$systest:$(date_with_args)"
|
|
exit 1
|
|
fi
|
|
fi
|
|
fi
|
|
|
|
# Set up any dynamically generated test data
|
|
if test -f "$systest/setup.sh"
|
|
then
|
|
if ! ( cd "${systest}" && $SHELL setup.sh "$@" ); then
|
|
echowarn "I:$systest:setup.sh script failed"
|
|
echofail "R:$systest:FAIL"
|
|
echoend "E:$systest:$(date_with_args)"
|
|
exit 1
|
|
fi
|
|
fi
|
|
|
|
status=0
|
|
run=0
|
|
# Run the tests
|
|
if [ -r "$systest/tests.sh" ]; then
|
|
if start_servers; then
|
|
( cd "$systest" && $SHELL tests.sh "$@" )
|
|
status=$?
|
|
run=$((run+1))
|
|
stop_servers || status=1
|
|
else
|
|
status=1
|
|
fi
|
|
fi
|
|
|
|
if [ $status -eq 0 ]; then
|
|
if [ -n "$PYTEST" ]; then
|
|
for test in $(cd "${systest}" && find . -name "tests*.py"); do
|
|
rm -f "$systest/$test.status"
|
|
if start_servers; then
|
|
run=$((run+1))
|
|
test_status=0
|
|
(cd "$systest" && "$PYTEST" -v "$test" "$@" || echo "$?" > "$test.status") | SYSTESTDIR="$systest" cat_d
|
|
if [ -f "$systest/$test.status" ]; then
|
|
if [ "$(cat "$systest/$test.status")" = "5" ]; then
|
|
echowarn "R:$systest:SKIPPED"
|
|
else
|
|
echo_i "FAILED"
|
|
test_status=$(cat "$systest/$test.status")
|
|
fi
|
|
fi
|
|
status=$((status+test_status))
|
|
stop_servers || status=1
|
|
else
|
|
status=1
|
|
fi
|
|
if [ $status -ne 0 ]; then
|
|
break
|
|
fi
|
|
done
|
|
rm -f "$systest/$test.status"
|
|
else
|
|
echoinfo "I:$systest:pytest not installed, skipping python tests"
|
|
fi
|
|
fi
|
|
|
|
if [ "$run" -eq "0" ]; then
|
|
echoinfo "I:$systest:No tests were found and run"
|
|
status=255
|
|
fi
|
|
|
|
if $stopservers
|
|
then
|
|
:
|
|
else
|
|
exit $status
|
|
fi
|
|
|
|
get_core_dumps() {
|
|
find "$systest/" \( -name 'core' -or -name 'core.*' -or -name '*.core' \) ! -name '*.gz' ! -name '*.txt' | sort
|
|
}
|
|
|
|
core_dumps=$(get_core_dumps | tr '\n' ' ')
|
|
assertion_failures=$(find "$systest/" -name named.run -exec grep "assertion failure" {} + | wc -l)
|
|
sanitizer_summaries=$(find "$systest/" -name 'tsan.*' | wc -l)
|
|
if [ -n "$core_dumps" ]; then
|
|
status=1
|
|
echoinfo "I:$systest:Core dump(s) found: $core_dumps"
|
|
get_core_dumps | while read -r coredump; do
|
|
export SYSTESTDIR="$systest"
|
|
echoinfo "D:$systest:backtrace from $coredump:"
|
|
echoinfo "D:$systest:--------------------------------------------------------------------------------"
|
|
binary=$(gdb --batch --core="$coredump" 2>/dev/null | sed -ne "s|Core was generated by \`\([^' ]*\)[' ].*|\1|p")
|
|
if [ ! -f "${binary}" ]; then
|
|
binary=$(find "${top_builddir}" -path "*/.libs/${binary}" -type f)
|
|
fi
|
|
"${top_builddir}/libtool" --mode=execute gdb \
|
|
-batch \
|
|
-ex bt \
|
|
-core="$coredump" \
|
|
-- \
|
|
"$binary" 2>/dev/null | sed -n '/^Core was generated by/,$p' | cat_d
|
|
echoinfo "D:$systest:--------------------------------------------------------------------------------"
|
|
coredump_backtrace="${coredump}-backtrace.txt"
|
|
echoinfo "D:$systest:full backtrace from $coredump saved in $coredump_backtrace"
|
|
"${top_builddir}/libtool" --mode=execute gdb \
|
|
-batch \
|
|
-command=run.gdb \
|
|
-core="$coredump" \
|
|
-- \
|
|
"$binary" > "$coredump_backtrace" 2>&1
|
|
echoinfo "D:$systest:core dump $coredump archived as $coredump.gz"
|
|
gzip -1 "${coredump}"
|
|
done
|
|
elif [ "$assertion_failures" -ne 0 ]; then
|
|
status=1
|
|
SYSTESTDIR="$systest"
|
|
echoinfo "I:$systest:$assertion_failures assertion failure(s) found"
|
|
find "$systest/" -name 'tsan.*' -exec grep "SUMMARY: " {} + | sort -u | cat_d
|
|
elif [ "$sanitizer_summaries" -ne 0 ]; then
|
|
status=1
|
|
echoinfo "I:$systest:$sanitizer_summaries sanitizer report(s) found"
|
|
fi
|
|
|
|
print_outstanding_files() {
|
|
if test -d ${srcdir}/../../../.git; then
|
|
git status -su --ignored "${systest}/" 2>/dev/null | \
|
|
sed -n -e 's|^?? \(.*\)|I:'"${systest}"':file \1 not removed|p' \
|
|
-e 's|^!! \(.*/named.run\)$|I:'"${systest}"':file \1 not removed|p' \
|
|
-e 's|^!! \(.*/named.memstats\)$|I:'"${systest}"':file \1 not removed|p'
|
|
fi
|
|
}
|
|
|
|
print_outstanding_files_oot() {
|
|
if test -d ${srcdir}/../../../.git; then
|
|
git -C "${srcdir}/${systest}" ls-files | sed "s|^|${systest}/|" > gitfiles.txt
|
|
find "${systest}/" -type f ! -name .prepared ! -name Makefile > testfiles.txt
|
|
grep -F -x -v -f gitfiles.txt testfiles.txt
|
|
rm -f gitfiles.txt testfiles.txt
|
|
fi
|
|
}
|
|
|
|
if [ $status -ne 0 ]; then
|
|
echofail "R:$systest:FAIL"
|
|
else
|
|
echopass "R:$systest:PASS"
|
|
if $clean && ! $restart; then
|
|
( cd "${systest}" && $SHELL clean.sh "$@" )
|
|
if [ "${srcdir}" = "${builddir}" ]; then
|
|
print_outstanding_files
|
|
else
|
|
print_outstanding_files_oot | xargs rm -f
|
|
find "${systest}/" \( -type d -empty \) -delete 2>/dev/null
|
|
fi
|
|
fi
|
|
fi
|
|
|
|
NAMED_RUN_LINES_THRESHOLD=200000
|
|
find "${systest}" -type f -name "named.run" -exec wc -l {} \; | awk "\$1 > ${NAMED_RUN_LINES_THRESHOLD} { print \$2 }" | sort | while read -r LOG_FILE; do
|
|
echowarn "I:${systest}:${LOG_FILE} contains more than ${NAMED_RUN_LINES_THRESHOLD} lines, consider tweaking the test to limit disk I/O"
|
|
done
|
|
|
|
echoend "E:$systest:$(date_with_args)"
|
|
|
|
exit $status
|